Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/slab.h>
  35#include <linux/pci.h>
  36#include <linux/dma-mapping.h>
  37#include <rdma/rdma_cm.h>
  38
 
  39#include "rds.h"
  40#include "ib.h"
  41
  42static struct kmem_cache *rds_ib_incoming_slab;
  43static struct kmem_cache *rds_ib_frag_slab;
  44static atomic_t	rds_ib_allocation = ATOMIC_INIT(0);
  45
  46void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
  47{
  48	struct rds_ib_recv_work *recv;
  49	u32 i;
  50
  51	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
  52		struct ib_sge *sge;
  53
  54		recv->r_ibinc = NULL;
  55		recv->r_frag = NULL;
  56
  57		recv->r_wr.next = NULL;
  58		recv->r_wr.wr_id = i;
  59		recv->r_wr.sg_list = recv->r_sge;
  60		recv->r_wr.num_sge = RDS_IB_RECV_SGE;
  61
  62		sge = &recv->r_sge[0];
  63		sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
  64		sge->length = sizeof(struct rds_header);
  65		sge->lkey = ic->i_mr->lkey;
  66
  67		sge = &recv->r_sge[1];
  68		sge->addr = 0;
  69		sge->length = RDS_FRAG_SIZE;
  70		sge->lkey = ic->i_mr->lkey;
  71	}
  72}
  73
  74/*
  75 * The entire 'from' list, including the from element itself, is put on
  76 * to the tail of the 'to' list.
  77 */
  78static void list_splice_entire_tail(struct list_head *from,
  79				    struct list_head *to)
  80{
  81	struct list_head *from_last = from->prev;
  82
  83	list_splice_tail(from_last, to);
  84	list_add_tail(from_last, to);
  85}
  86
  87static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
  88{
  89	struct list_head *tmp;
  90
  91	tmp = xchg(&cache->xfer, NULL);
  92	if (tmp) {
  93		if (cache->ready)
  94			list_splice_entire_tail(tmp, cache->ready);
  95		else
  96			cache->ready = tmp;
  97	}
  98}
  99
 100static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
 101{
 102	struct rds_ib_cache_head *head;
 103	int cpu;
 104
 105	cache->percpu = alloc_percpu(struct rds_ib_cache_head);
 106	if (!cache->percpu)
 107	       return -ENOMEM;
 108
 109	for_each_possible_cpu(cpu) {
 110		head = per_cpu_ptr(cache->percpu, cpu);
 111		head->first = NULL;
 112		head->count = 0;
 113	}
 114	cache->xfer = NULL;
 115	cache->ready = NULL;
 116
 117	return 0;
 118}
 119
 120int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
 121{
 122	int ret;
 123
 124	ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
 125	if (!ret) {
 126		ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
 127		if (ret)
 128			free_percpu(ic->i_cache_incs.percpu);
 129	}
 130
 131	return ret;
 132}
 133
 134static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
 135					  struct list_head *caller_list)
 136{
 137	struct rds_ib_cache_head *head;
 138	int cpu;
 139
 140	for_each_possible_cpu(cpu) {
 141		head = per_cpu_ptr(cache->percpu, cpu);
 142		if (head->first) {
 143			list_splice_entire_tail(head->first, caller_list);
 144			head->first = NULL;
 145		}
 146	}
 147
 148	if (cache->ready) {
 149		list_splice_entire_tail(cache->ready, caller_list);
 150		cache->ready = NULL;
 151	}
 152}
 153
 154void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
 155{
 156	struct rds_ib_incoming *inc;
 157	struct rds_ib_incoming *inc_tmp;
 158	struct rds_page_frag *frag;
 159	struct rds_page_frag *frag_tmp;
 160	LIST_HEAD(list);
 161
 162	rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 163	rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
 164	free_percpu(ic->i_cache_incs.percpu);
 165
 166	list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
 167		list_del(&inc->ii_cache_entry);
 168		WARN_ON(!list_empty(&inc->ii_frags));
 169		kmem_cache_free(rds_ib_incoming_slab, inc);
 
 170	}
 171
 172	rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 173	rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
 174	free_percpu(ic->i_cache_frags.percpu);
 175
 176	list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
 177		list_del(&frag->f_cache_entry);
 178		WARN_ON(!list_empty(&frag->f_item));
 179		kmem_cache_free(rds_ib_frag_slab, frag);
 180	}
 181}
 182
 183/* fwd decl */
 184static void rds_ib_recv_cache_put(struct list_head *new_item,
 185				  struct rds_ib_refill_cache *cache);
 186static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
 187
 188
 189/* Recycle frag and attached recv buffer f_sg */
 190static void rds_ib_frag_free(struct rds_ib_connection *ic,
 191			     struct rds_page_frag *frag)
 192{
 193	rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
 194
 195	rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
 
 
 196}
 197
 198/* Recycle inc after freeing attached frags */
 199void rds_ib_inc_free(struct rds_incoming *inc)
 200{
 201	struct rds_ib_incoming *ibinc;
 202	struct rds_page_frag *frag;
 203	struct rds_page_frag *pos;
 204	struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
 205
 206	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 207
 208	/* Free attached frags */
 209	list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
 210		list_del_init(&frag->f_item);
 211		rds_ib_frag_free(ic, frag);
 212	}
 213	BUG_ON(!list_empty(&ibinc->ii_frags));
 214
 215	rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
 216	rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
 217}
 218
 219static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
 220				  struct rds_ib_recv_work *recv)
 221{
 222	if (recv->r_ibinc) {
 223		rds_inc_put(&recv->r_ibinc->ii_inc);
 224		recv->r_ibinc = NULL;
 225	}
 226	if (recv->r_frag) {
 227		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
 228		rds_ib_frag_free(ic, recv->r_frag);
 229		recv->r_frag = NULL;
 230	}
 231}
 232
 233void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
 234{
 235	u32 i;
 236
 237	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
 238		rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
 239}
 240
 241static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
 242						     gfp_t slab_mask)
 243{
 244	struct rds_ib_incoming *ibinc;
 245	struct list_head *cache_item;
 246	int avail_allocs;
 247
 248	cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
 249	if (cache_item) {
 250		ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
 251	} else {
 252		avail_allocs = atomic_add_unless(&rds_ib_allocation,
 253						 1, rds_ib_sysctl_max_recv_allocation);
 254		if (!avail_allocs) {
 255			rds_ib_stats_inc(s_ib_rx_alloc_limit);
 256			return NULL;
 257		}
 258		ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
 259		if (!ibinc) {
 260			atomic_dec(&rds_ib_allocation);
 261			return NULL;
 262		}
 
 263	}
 264	INIT_LIST_HEAD(&ibinc->ii_frags);
 265	rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
 266
 267	return ibinc;
 268}
 269
 270static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
 271						    gfp_t slab_mask, gfp_t page_mask)
 272{
 273	struct rds_page_frag *frag;
 274	struct list_head *cache_item;
 275	int ret;
 276
 277	cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
 278	if (cache_item) {
 279		frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
 
 
 280	} else {
 281		frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
 282		if (!frag)
 283			return NULL;
 284
 285		sg_init_table(&frag->f_sg, 1);
 286		ret = rds_page_remainder_alloc(&frag->f_sg,
 287					       RDS_FRAG_SIZE, page_mask);
 288		if (ret) {
 289			kmem_cache_free(rds_ib_frag_slab, frag);
 290			return NULL;
 291		}
 
 292	}
 293
 294	INIT_LIST_HEAD(&frag->f_item);
 295
 296	return frag;
 297}
 298
 299static int rds_ib_recv_refill_one(struct rds_connection *conn,
 300				  struct rds_ib_recv_work *recv, int prefill)
 301{
 302	struct rds_ib_connection *ic = conn->c_transport_data;
 303	struct ib_sge *sge;
 304	int ret = -ENOMEM;
 305	gfp_t slab_mask = GFP_NOWAIT;
 306	gfp_t page_mask = GFP_NOWAIT;
 307
 308	if (prefill) {
 309		slab_mask = GFP_KERNEL;
 310		page_mask = GFP_HIGHUSER;
 311	}
 312
 313	if (!ic->i_cache_incs.ready)
 314		rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 315	if (!ic->i_cache_frags.ready)
 316		rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 317
 318	/*
 319	 * ibinc was taken from recv if recv contained the start of a message.
 320	 * recvs that were continuations will still have this allocated.
 321	 */
 322	if (!recv->r_ibinc) {
 323		recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
 324		if (!recv->r_ibinc)
 325			goto out;
 326	}
 327
 328	WARN_ON(recv->r_frag); /* leak! */
 329	recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
 330	if (!recv->r_frag)
 331		goto out;
 332
 333	ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
 334			    1, DMA_FROM_DEVICE);
 335	WARN_ON(ret != 1);
 336
 337	sge = &recv->r_sge[0];
 338	sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
 339	sge->length = sizeof(struct rds_header);
 340
 341	sge = &recv->r_sge[1];
 342	sge->addr = sg_dma_address(&recv->r_frag->f_sg);
 343	sge->length = sg_dma_len(&recv->r_frag->f_sg);
 344
 345	ret = 0;
 346out:
 347	return ret;
 348}
 349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350/*
 351 * This tries to allocate and post unused work requests after making sure that
 352 * they have all the allocations they need to queue received fragments into
 353 * sockets.
 354 *
 355 * -1 is returned if posting fails due to temporary resource exhaustion.
 356 */
 357void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
 358{
 359	struct rds_ib_connection *ic = conn->c_transport_data;
 360	struct rds_ib_recv_work *recv;
 361	struct ib_recv_wr *failed_wr;
 362	unsigned int posted = 0;
 363	int ret = 0;
 
 
 364	u32 pos;
 365
 
 
 
 
 
 
 
 366	while ((prefill || rds_conn_up(conn)) &&
 367	       rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
 368		if (pos >= ic->i_recv_ring.w_nr) {
 369			printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
 370					pos);
 371			break;
 372		}
 373
 374		recv = &ic->i_recvs[pos];
 375		ret = rds_ib_recv_refill_one(conn, recv, prefill);
 376		if (ret) {
 
 377			break;
 378		}
 379
 380		/* XXX when can this fail? */
 381		ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
 382		rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
 383			 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
 384			 (long) sg_dma_address(&recv->r_frag->f_sg), ret);
 
 
 
 385		if (ret) {
 386			rds_ib_conn_error(conn, "recv post on "
 387			       "%pI4 returned %d, disconnecting and "
 388			       "reconnecting\n", &conn->c_faddr,
 389			       ret);
 390			break;
 391		}
 392
 393		posted++;
 
 
 
 
 
 394	}
 395
 396	/* We're doing flow control - update the window. */
 397	if (ic->i_flowctl && posted)
 398		rds_ib_advertise_credits(conn, posted);
 399
 400	if (ret)
 401		rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 402}
 403
 404/*
 405 * We want to recycle several types of recv allocations, like incs and frags.
 406 * To use this, the *_free() function passes in the ptr to a list_head within
 407 * the recyclee, as well as the cache to put it on.
 408 *
 409 * First, we put the memory on a percpu list. When this reaches a certain size,
 410 * We move it to an intermediate non-percpu list in a lockless manner, with some
 411 * xchg/compxchg wizardry.
 412 *
 413 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
 414 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
 415 * list_empty() will return true with one element is actually present.
 416 */
 417static void rds_ib_recv_cache_put(struct list_head *new_item,
 418				 struct rds_ib_refill_cache *cache)
 419{
 420	unsigned long flags;
 421	struct rds_ib_cache_head *chp;
 422	struct list_head *old;
 423
 424	local_irq_save(flags);
 425
 426	chp = per_cpu_ptr(cache->percpu, smp_processor_id());
 427	if (!chp->first)
 428		INIT_LIST_HEAD(new_item);
 429	else /* put on front */
 430		list_add_tail(new_item, chp->first);
 431	chp->first = new_item;
 432	chp->count++;
 
 433
 434	if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
 435		goto end;
 436
 437	/*
 438	 * Return our per-cpu first list to the cache's xfer by atomically
 439	 * grabbing the current xfer list, appending it to our per-cpu list,
 440	 * and then atomically returning that entire list back to the
 441	 * cache's xfer list as long as it's still empty.
 442	 */
 443	do {
 444		old = xchg(&cache->xfer, NULL);
 445		if (old)
 446			list_splice_entire_tail(old, chp->first);
 447		old = cmpxchg(&cache->xfer, NULL, chp->first);
 448	} while (old);
 449
 450	chp->first = NULL;
 451	chp->count = 0;
 
 452end:
 453	local_irq_restore(flags);
 454}
 455
 456static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
 457{
 458	struct list_head *head = cache->ready;
 459
 460	if (head) {
 461		if (!list_empty(head)) {
 462			cache->ready = head->next;
 463			list_del_init(head);
 464		} else
 465			cache->ready = NULL;
 466	}
 467
 468	return head;
 469}
 470
 471int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
 472			    size_t size)
 473{
 474	struct rds_ib_incoming *ibinc;
 475	struct rds_page_frag *frag;
 476	struct iovec *iov = first_iov;
 477	unsigned long to_copy;
 478	unsigned long frag_off = 0;
 479	unsigned long iov_off = 0;
 480	int copied = 0;
 481	int ret;
 482	u32 len;
 483
 484	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 485	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 486	len = be32_to_cpu(inc->i_hdr.h_len);
 487
 488	while (copied < size && copied < len) {
 489		if (frag_off == RDS_FRAG_SIZE) {
 490			frag = list_entry(frag->f_item.next,
 491					  struct rds_page_frag, f_item);
 492			frag_off = 0;
 493		}
 494		while (iov_off == iov->iov_len) {
 495			iov_off = 0;
 496			iov++;
 497		}
 498
 499		to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
 500		to_copy = min_t(size_t, to_copy, size - copied);
 501		to_copy = min_t(unsigned long, to_copy, len - copied);
 502
 503		rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
 504			 "[%p, %u] + %lu\n",
 505			 to_copy, iov->iov_base, iov->iov_len, iov_off,
 506			 sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
 507
 508		/* XXX needs + offset for multiple recvs per page */
 509		ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
 510					    frag->f_sg.offset + frag_off,
 511					    iov->iov_base + iov_off,
 512					    to_copy);
 513		if (ret) {
 514			copied = ret;
 515			break;
 516		}
 517
 518		iov_off += to_copy;
 519		frag_off += to_copy;
 520		copied += to_copy;
 521	}
 522
 523	return copied;
 524}
 525
 526/* ic starts out kzalloc()ed */
 527void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
 528{
 529	struct ib_send_wr *wr = &ic->i_ack_wr;
 530	struct ib_sge *sge = &ic->i_ack_sge;
 531
 532	sge->addr = ic->i_ack_dma;
 533	sge->length = sizeof(struct rds_header);
 534	sge->lkey = ic->i_mr->lkey;
 535
 536	wr->sg_list = sge;
 537	wr->num_sge = 1;
 538	wr->opcode = IB_WR_SEND;
 539	wr->wr_id = RDS_IB_ACK_WR_ID;
 540	wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
 541}
 542
 543/*
 544 * You'd think that with reliable IB connections you wouldn't need to ack
 545 * messages that have been received.  The problem is that IB hardware generates
 546 * an ack message before it has DMAed the message into memory.  This creates a
 547 * potential message loss if the HCA is disabled for any reason between when it
 548 * sends the ack and before the message is DMAed and processed.  This is only a
 549 * potential issue if another HCA is available for fail-over.
 550 *
 551 * When the remote host receives our ack they'll free the sent message from
 552 * their send queue.  To decrease the latency of this we always send an ack
 553 * immediately after we've received messages.
 554 *
 555 * For simplicity, we only have one ack in flight at a time.  This puts
 556 * pressure on senders to have deep enough send queues to absorb the latency of
 557 * a single ack frame being in flight.  This might not be good enough.
 558 *
 559 * This is implemented by have a long-lived send_wr and sge which point to a
 560 * statically allocated ack frame.  This ack wr does not fall under the ring
 561 * accounting that the tx and rx wrs do.  The QP attribute specifically makes
 562 * room for it beyond the ring size.  Send completion notices its special
 563 * wr_id and avoids working with the ring in that case.
 564 */
 565#ifndef KERNEL_HAS_ATOMIC64
 566static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
 567				int ack_required)
 568{
 569	unsigned long flags;
 570
 571	spin_lock_irqsave(&ic->i_ack_lock, flags);
 572	ic->i_ack_next = seq;
 573	if (ack_required)
 574		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 575	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 576}
 577
 578static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 579{
 580	unsigned long flags;
 581	u64 seq;
 582
 583	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 584
 585	spin_lock_irqsave(&ic->i_ack_lock, flags);
 586	seq = ic->i_ack_next;
 587	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 588
 589	return seq;
 590}
 591#else
 592static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
 593				int ack_required)
 594{
 595	atomic64_set(&ic->i_ack_next, seq);
 596	if (ack_required) {
 597		smp_mb__before_clear_bit();
 598		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 599	}
 600}
 601
 602static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 603{
 604	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 605	smp_mb__after_clear_bit();
 606
 607	return atomic64_read(&ic->i_ack_next);
 608}
 609#endif
 610
 611
 612static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
 613{
 614	struct rds_header *hdr = ic->i_ack;
 615	struct ib_send_wr *failed_wr;
 616	u64 seq;
 617	int ret;
 618
 619	seq = rds_ib_get_ack(ic);
 620
 621	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
 
 
 
 622	rds_message_populate_header(hdr, 0, 0, 0);
 623	hdr->h_ack = cpu_to_be64(seq);
 624	hdr->h_credit = adv_credits;
 625	rds_message_make_checksum(hdr);
 
 
 
 626	ic->i_ack_queued = jiffies;
 627
 628	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
 629	if (unlikely(ret)) {
 630		/* Failed to send. Release the WR, and
 631		 * force another ACK.
 632		 */
 633		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 634		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 635
 636		rds_ib_stats_inc(s_ib_ack_send_failure);
 637
 638		rds_ib_conn_error(ic->conn, "sending ack failed\n");
 639	} else
 640		rds_ib_stats_inc(s_ib_ack_sent);
 641}
 642
 643/*
 644 * There are 3 ways of getting acknowledgements to the peer:
 645 *  1.	We call rds_ib_attempt_ack from the recv completion handler
 646 *	to send an ACK-only frame.
 647 *	However, there can be only one such frame in the send queue
 648 *	at any time, so we may have to postpone it.
 649 *  2.	When another (data) packet is transmitted while there's
 650 *	an ACK in the queue, we piggyback the ACK sequence number
 651 *	on the data packet.
 652 *  3.	If the ACK WR is done sending, we get called from the
 653 *	send queue completion handler, and check whether there's
 654 *	another ACK pending (postponed because the WR was on the
 655 *	queue). If so, we transmit it.
 656 *
 657 * We maintain 2 variables:
 658 *  -	i_ack_flags, which keeps track of whether the ACK WR
 659 *	is currently in the send queue or not (IB_ACK_IN_FLIGHT)
 660 *  -	i_ack_next, which is the last sequence number we received
 661 *
 662 * Potentially, send queue and receive queue handlers can run concurrently.
 663 * It would be nice to not have to use a spinlock to synchronize things,
 664 * but the one problem that rules this out is that 64bit updates are
 665 * not atomic on all platforms. Things would be a lot simpler if
 666 * we had atomic64 or maybe cmpxchg64 everywhere.
 667 *
 668 * Reconnecting complicates this picture just slightly. When we
 669 * reconnect, we may be seeing duplicate packets. The peer
 670 * is retransmitting them, because it hasn't seen an ACK for
 671 * them. It is important that we ACK these.
 672 *
 673 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
 674 * this flag set *MUST* be acknowledged immediately.
 675 */
 676
 677/*
 678 * When we get here, we're called from the recv queue handler.
 679 * Check whether we ought to transmit an ACK.
 680 */
 681void rds_ib_attempt_ack(struct rds_ib_connection *ic)
 682{
 683	unsigned int adv_credits;
 684
 685	if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 686		return;
 687
 688	if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
 689		rds_ib_stats_inc(s_ib_ack_send_delayed);
 690		return;
 691	}
 692
 693	/* Can we get a send credit? */
 694	if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
 695		rds_ib_stats_inc(s_ib_tx_throttle);
 696		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 697		return;
 698	}
 699
 700	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 701	rds_ib_send_ack(ic, adv_credits);
 702}
 703
 704/*
 705 * We get here from the send completion handler, when the
 706 * adapter tells us the ACK frame was sent.
 707 */
 708void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
 709{
 710	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 711	rds_ib_attempt_ack(ic);
 712}
 713
 714/*
 715 * This is called by the regular xmit code when it wants to piggyback
 716 * an ACK on an outgoing frame.
 717 */
 718u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
 719{
 720	if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 721		rds_ib_stats_inc(s_ib_ack_send_piggybacked);
 722	return rds_ib_get_ack(ic);
 723}
 724
 725/*
 726 * It's kind of lame that we're copying from the posted receive pages into
 727 * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
 728 * them.  But receiving new congestion bitmaps should be a *rare* event, so
 729 * hopefully we won't need to invest that complexity in making it more
 730 * efficient.  By copying we can share a simpler core with TCP which has to
 731 * copy.
 732 */
 733static void rds_ib_cong_recv(struct rds_connection *conn,
 734			      struct rds_ib_incoming *ibinc)
 735{
 736	struct rds_cong_map *map;
 737	unsigned int map_off;
 738	unsigned int map_page;
 739	struct rds_page_frag *frag;
 740	unsigned long frag_off;
 741	unsigned long to_copy;
 742	unsigned long copied;
 743	uint64_t uncongested = 0;
 744	void *addr;
 745
 746	/* catch completely corrupt packets */
 747	if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
 748		return;
 749
 750	map = conn->c_fcong;
 751	map_page = 0;
 752	map_off = 0;
 753
 754	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 755	frag_off = 0;
 756
 757	copied = 0;
 758
 759	while (copied < RDS_CONG_MAP_BYTES) {
 760		uint64_t *src, *dst;
 761		unsigned int k;
 762
 763		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
 764		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 765
 766		addr = kmap_atomic(sg_page(&frag->f_sg));
 767
 768		src = addr + frag_off;
 769		dst = (void *)map->m_page_addrs[map_page] + map_off;
 770		for (k = 0; k < to_copy; k += 8) {
 771			/* Record ports that became uncongested, ie
 772			 * bits that changed from 0 to 1. */
 773			uncongested |= ~(*src) & *dst;
 774			*dst++ = *src++;
 775		}
 776		kunmap_atomic(addr);
 777
 778		copied += to_copy;
 779
 780		map_off += to_copy;
 781		if (map_off == PAGE_SIZE) {
 782			map_off = 0;
 783			map_page++;
 784		}
 785
 786		frag_off += to_copy;
 787		if (frag_off == RDS_FRAG_SIZE) {
 788			frag = list_entry(frag->f_item.next,
 789					  struct rds_page_frag, f_item);
 790			frag_off = 0;
 791		}
 792	}
 793
 794	/* the congestion map is in little endian order */
 795	uncongested = le64_to_cpu(uncongested);
 796
 797	rds_cong_map_updated(map, uncongested);
 798}
 799
 800/*
 801 * Rings are posted with all the allocations they'll need to queue the
 802 * incoming message to the receiving socket so this can't fail.
 803 * All fragments start with a header, so we can make sure we're not receiving
 804 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
 805 */
 806struct rds_ib_ack_state {
 807	u64		ack_next;
 808	u64		ack_recv;
 809	unsigned int	ack_required:1;
 810	unsigned int	ack_next_valid:1;
 811	unsigned int	ack_recv_valid:1;
 812};
 813
 814static void rds_ib_process_recv(struct rds_connection *conn,
 815				struct rds_ib_recv_work *recv, u32 data_len,
 816				struct rds_ib_ack_state *state)
 817{
 818	struct rds_ib_connection *ic = conn->c_transport_data;
 819	struct rds_ib_incoming *ibinc = ic->i_ibinc;
 820	struct rds_header *ihdr, *hdr;
 
 821
 822	/* XXX shut down the connection if port 0,0 are seen? */
 823
 824	rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
 825		 data_len);
 826
 827	if (data_len < sizeof(struct rds_header)) {
 828		rds_ib_conn_error(conn, "incoming message "
 829		       "from %pI4 didn't include a "
 830		       "header, disconnecting and "
 831		       "reconnecting\n",
 832		       &conn->c_faddr);
 833		return;
 834	}
 835	data_len -= sizeof(struct rds_header);
 836
 837	ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
 838
 
 
 839	/* Validate the checksum. */
 840	if (!rds_message_verify_checksum(ihdr)) {
 841		rds_ib_conn_error(conn, "incoming message "
 842		       "from %pI4 has corrupted header - "
 843		       "forcing a reconnect\n",
 844		       &conn->c_faddr);
 845		rds_stats_inc(s_recv_drop_bad_checksum);
 846		return;
 847	}
 848
 849	/* Process the ACK sequence which comes with every packet */
 850	state->ack_recv = be64_to_cpu(ihdr->h_ack);
 851	state->ack_recv_valid = 1;
 852
 853	/* Process the credits update if there was one */
 854	if (ihdr->h_credit)
 855		rds_ib_send_add_credits(conn, ihdr->h_credit);
 856
 857	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
 858		/* This is an ACK-only packet. The fact that it gets
 859		 * special treatment here is that historically, ACKs
 860		 * were rather special beasts.
 861		 */
 862		rds_ib_stats_inc(s_ib_ack_received);
 863
 864		/*
 865		 * Usually the frags make their way on to incs and are then freed as
 866		 * the inc is freed.  We don't go that route, so we have to drop the
 867		 * page ref ourselves.  We can't just leave the page on the recv
 868		 * because that confuses the dma mapping of pages and each recv's use
 869		 * of a partial page.
 870		 *
 871		 * FIXME: Fold this into the code path below.
 872		 */
 873		rds_ib_frag_free(ic, recv->r_frag);
 874		recv->r_frag = NULL;
 875		return;
 876	}
 877
 878	/*
 879	 * If we don't already have an inc on the connection then this
 880	 * fragment has a header and starts a message.. copy its header
 881	 * into the inc and save the inc so we can hang upcoming fragments
 882	 * off its list.
 883	 */
 884	if (!ibinc) {
 885		ibinc = recv->r_ibinc;
 886		recv->r_ibinc = NULL;
 887		ic->i_ibinc = ibinc;
 888
 889		hdr = &ibinc->ii_inc.i_hdr;
 
 
 890		memcpy(hdr, ihdr, sizeof(*hdr));
 891		ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
 
 
 892
 893		rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
 894			 ic->i_recv_data_rem, hdr->h_flags);
 895	} else {
 896		hdr = &ibinc->ii_inc.i_hdr;
 897		/* We can't just use memcmp here; fragments of a
 898		 * single message may carry different ACKs */
 899		if (hdr->h_sequence != ihdr->h_sequence ||
 900		    hdr->h_len != ihdr->h_len ||
 901		    hdr->h_sport != ihdr->h_sport ||
 902		    hdr->h_dport != ihdr->h_dport) {
 903			rds_ib_conn_error(conn,
 904				"fragment header mismatch; forcing reconnect\n");
 905			return;
 906		}
 907	}
 908
 909	list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
 910	recv->r_frag = NULL;
 911
 912	if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
 913		ic->i_recv_data_rem -= RDS_FRAG_SIZE;
 914	else {
 915		ic->i_recv_data_rem = 0;
 916		ic->i_ibinc = NULL;
 917
 918		if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
 919			rds_ib_cong_recv(conn, ibinc);
 920		else {
 921			rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
 922					  &ibinc->ii_inc, GFP_ATOMIC);
 923			state->ack_next = be64_to_cpu(hdr->h_sequence);
 924			state->ack_next_valid = 1;
 925		}
 926
 927		/* Evaluate the ACK_REQUIRED flag *after* we received
 928		 * the complete frame, and after bumping the next_rx
 929		 * sequence. */
 930		if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
 931			rds_stats_inc(s_recv_ack_required);
 932			state->ack_required = 1;
 933		}
 934
 935		rds_inc_put(&ibinc->ii_inc);
 936	}
 
 
 
 937}
 938
 939/*
 940 * Plucking the oldest entry from the ring can be done concurrently with
 941 * the thread refilling the ring.  Each ring operation is protected by
 942 * spinlocks and the transient state of refilling doesn't change the
 943 * recording of which entry is oldest.
 944 *
 945 * This relies on IB only calling one cq comp_handler for each cq so that
 946 * there will only be one caller of rds_recv_incoming() per RDS connection.
 947 */
 948void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
 949{
 950	struct rds_connection *conn = context;
 951	struct rds_ib_connection *ic = conn->c_transport_data;
 952
 953	rdsdebug("conn %p cq %p\n", conn, cq);
 954
 955	rds_ib_stats_inc(s_ib_rx_cq_call);
 956
 957	tasklet_schedule(&ic->i_recv_tasklet);
 958}
 959
 960static inline void rds_poll_cq(struct rds_ib_connection *ic,
 961			       struct rds_ib_ack_state *state)
 962{
 963	struct rds_connection *conn = ic->conn;
 964	struct ib_wc wc;
 965	struct rds_ib_recv_work *recv;
 966
 967	while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
 968		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
 969			 (unsigned long long)wc.wr_id, wc.status,
 970			 rds_ib_wc_status_str(wc.status), wc.byte_len,
 971			 be32_to_cpu(wc.ex.imm_data));
 972		rds_ib_stats_inc(s_ib_rx_cq_event);
 973
 974		recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
 975
 976		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
 977
 978		/*
 979		 * Also process recvs in connecting state because it is possible
 980		 * to get a recv completion _before_ the rdmacm ESTABLISHED
 981		 * event is processed.
 982		 */
 983		if (wc.status == IB_WC_SUCCESS) {
 984			rds_ib_process_recv(conn, recv, wc.byte_len, state);
 985		} else {
 986			/* We expect errors as the qp is drained during shutdown */
 987			if (rds_conn_up(conn) || rds_conn_connecting(conn))
 988				rds_ib_conn_error(conn, "recv completion on %pI4 had "
 989						  "status %u (%s), disconnecting and "
 990						  "reconnecting\n", &conn->c_faddr,
 991						  wc.status,
 992						  rds_ib_wc_status_str(wc.status));
 993		}
 994
 995		/*
 996		 * It's very important that we only free this ring entry if we've truly
 997		 * freed the resources allocated to the entry.  The refilling path can
 998		 * leak if we don't.
 999		 */
1000		rds_ib_ring_free(&ic->i_recv_ring, 1);
1001	}
1002}
1003
1004void rds_ib_recv_tasklet_fn(unsigned long data)
1005{
1006	struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
1007	struct rds_connection *conn = ic->conn;
1008	struct rds_ib_ack_state state = { 0, };
1009
1010	rds_poll_cq(ic, &state);
1011	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
1012	rds_poll_cq(ic, &state);
1013
1014	if (state.ack_next_valid)
1015		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
1016	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
1017		rds_send_drop_acked(conn, state.ack_recv, NULL);
1018		ic->i_ack_recv = state.ack_recv;
 
1019	}
1020	if (rds_conn_up(conn))
1021		rds_ib_attempt_ack(ic);
1022
1023	/* If we ever end up with a really empty receive ring, we're
1024	 * in deep trouble, as the sender will definitely see RNR
1025	 * timeouts. */
1026	if (rds_ib_ring_empty(&ic->i_recv_ring))
1027		rds_ib_stats_inc(s_ib_rx_ring_empty);
1028
1029	if (rds_ib_ring_low(&ic->i_recv_ring))
1030		rds_ib_recv_refill(conn, 0);
 
 
1031}
1032
1033int rds_ib_recv(struct rds_connection *conn)
1034{
 
1035	struct rds_ib_connection *ic = conn->c_transport_data;
1036	int ret = 0;
1037
1038	rdsdebug("conn %p\n", conn);
1039	if (rds_conn_up(conn))
1040		rds_ib_attempt_ack(ic);
 
 
 
1041
1042	return ret;
1043}
1044
1045int rds_ib_recv_init(void)
1046{
1047	struct sysinfo si;
1048	int ret = -ENOMEM;
1049
1050	/* Default to 30% of all available RAM for recv memory */
1051	si_meminfo(&si);
1052	rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1053
1054	rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1055					sizeof(struct rds_ib_incoming),
1056					0, SLAB_HWCACHE_ALIGN, NULL);
 
 
 
 
 
1057	if (!rds_ib_incoming_slab)
1058		goto out;
1059
1060	rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1061					sizeof(struct rds_page_frag),
1062					0, SLAB_HWCACHE_ALIGN, NULL);
1063	if (!rds_ib_frag_slab)
1064		kmem_cache_destroy(rds_ib_incoming_slab);
1065	else
 
1066		ret = 0;
1067out:
1068	return ret;
1069}
1070
1071void rds_ib_recv_exit(void)
1072{
 
 
1073	kmem_cache_destroy(rds_ib_incoming_slab);
1074	kmem_cache_destroy(rds_ib_frag_slab);
1075}
v5.14.15
   1/*
   2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/slab.h>
  35#include <linux/pci.h>
  36#include <linux/dma-mapping.h>
  37#include <rdma/rdma_cm.h>
  38
  39#include "rds_single_path.h"
  40#include "rds.h"
  41#include "ib.h"
  42
  43static struct kmem_cache *rds_ib_incoming_slab;
  44static struct kmem_cache *rds_ib_frag_slab;
  45static atomic_t	rds_ib_allocation = ATOMIC_INIT(0);
  46
  47void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
  48{
  49	struct rds_ib_recv_work *recv;
  50	u32 i;
  51
  52	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
  53		struct ib_sge *sge;
  54
  55		recv->r_ibinc = NULL;
  56		recv->r_frag = NULL;
  57
  58		recv->r_wr.next = NULL;
  59		recv->r_wr.wr_id = i;
  60		recv->r_wr.sg_list = recv->r_sge;
  61		recv->r_wr.num_sge = RDS_IB_RECV_SGE;
  62
  63		sge = &recv->r_sge[0];
  64		sge->addr = ic->i_recv_hdrs_dma[i];
  65		sge->length = sizeof(struct rds_header);
  66		sge->lkey = ic->i_pd->local_dma_lkey;
  67
  68		sge = &recv->r_sge[1];
  69		sge->addr = 0;
  70		sge->length = RDS_FRAG_SIZE;
  71		sge->lkey = ic->i_pd->local_dma_lkey;
  72	}
  73}
  74
  75/*
  76 * The entire 'from' list, including the from element itself, is put on
  77 * to the tail of the 'to' list.
  78 */
  79static void list_splice_entire_tail(struct list_head *from,
  80				    struct list_head *to)
  81{
  82	struct list_head *from_last = from->prev;
  83
  84	list_splice_tail(from_last, to);
  85	list_add_tail(from_last, to);
  86}
  87
  88static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
  89{
  90	struct list_head *tmp;
  91
  92	tmp = xchg(&cache->xfer, NULL);
  93	if (tmp) {
  94		if (cache->ready)
  95			list_splice_entire_tail(tmp, cache->ready);
  96		else
  97			cache->ready = tmp;
  98	}
  99}
 100
 101static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
 102{
 103	struct rds_ib_cache_head *head;
 104	int cpu;
 105
 106	cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
 107	if (!cache->percpu)
 108	       return -ENOMEM;
 109
 110	for_each_possible_cpu(cpu) {
 111		head = per_cpu_ptr(cache->percpu, cpu);
 112		head->first = NULL;
 113		head->count = 0;
 114	}
 115	cache->xfer = NULL;
 116	cache->ready = NULL;
 117
 118	return 0;
 119}
 120
 121int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
 122{
 123	int ret;
 124
 125	ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
 126	if (!ret) {
 127		ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
 128		if (ret)
 129			free_percpu(ic->i_cache_incs.percpu);
 130	}
 131
 132	return ret;
 133}
 134
 135static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
 136					  struct list_head *caller_list)
 137{
 138	struct rds_ib_cache_head *head;
 139	int cpu;
 140
 141	for_each_possible_cpu(cpu) {
 142		head = per_cpu_ptr(cache->percpu, cpu);
 143		if (head->first) {
 144			list_splice_entire_tail(head->first, caller_list);
 145			head->first = NULL;
 146		}
 147	}
 148
 149	if (cache->ready) {
 150		list_splice_entire_tail(cache->ready, caller_list);
 151		cache->ready = NULL;
 152	}
 153}
 154
 155void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
 156{
 157	struct rds_ib_incoming *inc;
 158	struct rds_ib_incoming *inc_tmp;
 159	struct rds_page_frag *frag;
 160	struct rds_page_frag *frag_tmp;
 161	LIST_HEAD(list);
 162
 163	rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 164	rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
 165	free_percpu(ic->i_cache_incs.percpu);
 166
 167	list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
 168		list_del(&inc->ii_cache_entry);
 169		WARN_ON(!list_empty(&inc->ii_frags));
 170		kmem_cache_free(rds_ib_incoming_slab, inc);
 171		atomic_dec(&rds_ib_allocation);
 172	}
 173
 174	rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 175	rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
 176	free_percpu(ic->i_cache_frags.percpu);
 177
 178	list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
 179		list_del(&frag->f_cache_entry);
 180		WARN_ON(!list_empty(&frag->f_item));
 181		kmem_cache_free(rds_ib_frag_slab, frag);
 182	}
 183}
 184
 185/* fwd decl */
 186static void rds_ib_recv_cache_put(struct list_head *new_item,
 187				  struct rds_ib_refill_cache *cache);
 188static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
 189
 190
 191/* Recycle frag and attached recv buffer f_sg */
 192static void rds_ib_frag_free(struct rds_ib_connection *ic,
 193			     struct rds_page_frag *frag)
 194{
 195	rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
 196
 197	rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
 198	atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
 199	rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
 200}
 201
 202/* Recycle inc after freeing attached frags */
 203void rds_ib_inc_free(struct rds_incoming *inc)
 204{
 205	struct rds_ib_incoming *ibinc;
 206	struct rds_page_frag *frag;
 207	struct rds_page_frag *pos;
 208	struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
 209
 210	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 211
 212	/* Free attached frags */
 213	list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
 214		list_del_init(&frag->f_item);
 215		rds_ib_frag_free(ic, frag);
 216	}
 217	BUG_ON(!list_empty(&ibinc->ii_frags));
 218
 219	rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
 220	rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
 221}
 222
 223static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
 224				  struct rds_ib_recv_work *recv)
 225{
 226	if (recv->r_ibinc) {
 227		rds_inc_put(&recv->r_ibinc->ii_inc);
 228		recv->r_ibinc = NULL;
 229	}
 230	if (recv->r_frag) {
 231		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
 232		rds_ib_frag_free(ic, recv->r_frag);
 233		recv->r_frag = NULL;
 234	}
 235}
 236
 237void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
 238{
 239	u32 i;
 240
 241	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
 242		rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
 243}
 244
 245static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
 246						     gfp_t slab_mask)
 247{
 248	struct rds_ib_incoming *ibinc;
 249	struct list_head *cache_item;
 250	int avail_allocs;
 251
 252	cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
 253	if (cache_item) {
 254		ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
 255	} else {
 256		avail_allocs = atomic_add_unless(&rds_ib_allocation,
 257						 1, rds_ib_sysctl_max_recv_allocation);
 258		if (!avail_allocs) {
 259			rds_ib_stats_inc(s_ib_rx_alloc_limit);
 260			return NULL;
 261		}
 262		ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
 263		if (!ibinc) {
 264			atomic_dec(&rds_ib_allocation);
 265			return NULL;
 266		}
 267		rds_ib_stats_inc(s_ib_rx_total_incs);
 268	}
 269	INIT_LIST_HEAD(&ibinc->ii_frags);
 270	rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
 271
 272	return ibinc;
 273}
 274
 275static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
 276						    gfp_t slab_mask, gfp_t page_mask)
 277{
 278	struct rds_page_frag *frag;
 279	struct list_head *cache_item;
 280	int ret;
 281
 282	cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
 283	if (cache_item) {
 284		frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
 285		atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
 286		rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
 287	} else {
 288		frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
 289		if (!frag)
 290			return NULL;
 291
 292		sg_init_table(&frag->f_sg, 1);
 293		ret = rds_page_remainder_alloc(&frag->f_sg,
 294					       RDS_FRAG_SIZE, page_mask);
 295		if (ret) {
 296			kmem_cache_free(rds_ib_frag_slab, frag);
 297			return NULL;
 298		}
 299		rds_ib_stats_inc(s_ib_rx_total_frags);
 300	}
 301
 302	INIT_LIST_HEAD(&frag->f_item);
 303
 304	return frag;
 305}
 306
 307static int rds_ib_recv_refill_one(struct rds_connection *conn,
 308				  struct rds_ib_recv_work *recv, gfp_t gfp)
 309{
 310	struct rds_ib_connection *ic = conn->c_transport_data;
 311	struct ib_sge *sge;
 312	int ret = -ENOMEM;
 313	gfp_t slab_mask = gfp;
 314	gfp_t page_mask = gfp;
 315
 316	if (gfp & __GFP_DIRECT_RECLAIM) {
 317		slab_mask = GFP_KERNEL;
 318		page_mask = GFP_HIGHUSER;
 319	}
 320
 321	if (!ic->i_cache_incs.ready)
 322		rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 323	if (!ic->i_cache_frags.ready)
 324		rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 325
 326	/*
 327	 * ibinc was taken from recv if recv contained the start of a message.
 328	 * recvs that were continuations will still have this allocated.
 329	 */
 330	if (!recv->r_ibinc) {
 331		recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
 332		if (!recv->r_ibinc)
 333			goto out;
 334	}
 335
 336	WARN_ON(recv->r_frag); /* leak! */
 337	recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
 338	if (!recv->r_frag)
 339		goto out;
 340
 341	ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
 342			    1, DMA_FROM_DEVICE);
 343	WARN_ON(ret != 1);
 344
 345	sge = &recv->r_sge[0];
 346	sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
 347	sge->length = sizeof(struct rds_header);
 348
 349	sge = &recv->r_sge[1];
 350	sge->addr = sg_dma_address(&recv->r_frag->f_sg);
 351	sge->length = sg_dma_len(&recv->r_frag->f_sg);
 352
 353	ret = 0;
 354out:
 355	return ret;
 356}
 357
 358static int acquire_refill(struct rds_connection *conn)
 359{
 360	return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
 361}
 362
 363static void release_refill(struct rds_connection *conn)
 364{
 365	clear_bit(RDS_RECV_REFILL, &conn->c_flags);
 366
 367	/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 368	 * hot path and finding waiters is very rare.  We don't want to walk
 369	 * the system-wide hashed waitqueue buckets in the fast path only to
 370	 * almost never find waiters.
 371	 */
 372	if (waitqueue_active(&conn->c_waitq))
 373		wake_up_all(&conn->c_waitq);
 374}
 375
 376/*
 377 * This tries to allocate and post unused work requests after making sure that
 378 * they have all the allocations they need to queue received fragments into
 379 * sockets.
 
 
 380 */
 381void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
 382{
 383	struct rds_ib_connection *ic = conn->c_transport_data;
 384	struct rds_ib_recv_work *recv;
 
 385	unsigned int posted = 0;
 386	int ret = 0;
 387	bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
 388	bool must_wake = false;
 389	u32 pos;
 390
 391	/* the goal here is to just make sure that someone, somewhere
 392	 * is posting buffers.  If we can't get the refill lock,
 393	 * let them do their thing
 394	 */
 395	if (!acquire_refill(conn))
 396		return;
 397
 398	while ((prefill || rds_conn_up(conn)) &&
 399	       rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
 400		if (pos >= ic->i_recv_ring.w_nr) {
 401			printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
 402					pos);
 403			break;
 404		}
 405
 406		recv = &ic->i_recvs[pos];
 407		ret = rds_ib_recv_refill_one(conn, recv, gfp);
 408		if (ret) {
 409			must_wake = true;
 410			break;
 411		}
 412
 413		rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
 
 
 414			 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
 415			 (long)sg_dma_address(&recv->r_frag->f_sg));
 416
 417		/* XXX when can this fail? */
 418		ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
 419		if (ret) {
 420			rds_ib_conn_error(conn, "recv post on "
 421			       "%pI6c returned %d, disconnecting and "
 422			       "reconnecting\n", &conn->c_faddr,
 423			       ret);
 424			break;
 425		}
 426
 427		posted++;
 428
 429		if ((posted > 128 && need_resched()) || posted > 8192) {
 430			must_wake = true;
 431			break;
 432		}
 433	}
 434
 435	/* We're doing flow control - update the window. */
 436	if (ic->i_flowctl && posted)
 437		rds_ib_advertise_credits(conn, posted);
 438
 439	if (ret)
 440		rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
 441
 442	release_refill(conn);
 443
 444	/* if we're called from the softirq handler, we'll be GFP_NOWAIT.
 445	 * in this case the ring being low is going to lead to more interrupts
 446	 * and we can safely let the softirq code take care of it unless the
 447	 * ring is completely empty.
 448	 *
 449	 * if we're called from krdsd, we'll be GFP_KERNEL.  In this case
 450	 * we might have raced with the softirq code while we had the refill
 451	 * lock held.  Use rds_ib_ring_low() instead of ring_empty to decide
 452	 * if we should requeue.
 453	 */
 454	if (rds_conn_up(conn) &&
 455	    (must_wake ||
 456	    (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
 457	    rds_ib_ring_empty(&ic->i_recv_ring))) {
 458		queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
 459	}
 460	if (can_wait)
 461		cond_resched();
 462}
 463
 464/*
 465 * We want to recycle several types of recv allocations, like incs and frags.
 466 * To use this, the *_free() function passes in the ptr to a list_head within
 467 * the recyclee, as well as the cache to put it on.
 468 *
 469 * First, we put the memory on a percpu list. When this reaches a certain size,
 470 * We move it to an intermediate non-percpu list in a lockless manner, with some
 471 * xchg/compxchg wizardry.
 472 *
 473 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
 474 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
 475 * list_empty() will return true with one element is actually present.
 476 */
 477static void rds_ib_recv_cache_put(struct list_head *new_item,
 478				 struct rds_ib_refill_cache *cache)
 479{
 480	unsigned long flags;
 481	struct list_head *old, *chpfirst;
 
 482
 483	local_irq_save(flags);
 484
 485	chpfirst = __this_cpu_read(cache->percpu->first);
 486	if (!chpfirst)
 487		INIT_LIST_HEAD(new_item);
 488	else /* put on front */
 489		list_add_tail(new_item, chpfirst);
 490
 491	__this_cpu_write(cache->percpu->first, new_item);
 492	__this_cpu_inc(cache->percpu->count);
 493
 494	if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
 495		goto end;
 496
 497	/*
 498	 * Return our per-cpu first list to the cache's xfer by atomically
 499	 * grabbing the current xfer list, appending it to our per-cpu list,
 500	 * and then atomically returning that entire list back to the
 501	 * cache's xfer list as long as it's still empty.
 502	 */
 503	do {
 504		old = xchg(&cache->xfer, NULL);
 505		if (old)
 506			list_splice_entire_tail(old, chpfirst);
 507		old = cmpxchg(&cache->xfer, NULL, chpfirst);
 508	} while (old);
 509
 510
 511	__this_cpu_write(cache->percpu->first, NULL);
 512	__this_cpu_write(cache->percpu->count, 0);
 513end:
 514	local_irq_restore(flags);
 515}
 516
 517static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
 518{
 519	struct list_head *head = cache->ready;
 520
 521	if (head) {
 522		if (!list_empty(head)) {
 523			cache->ready = head->next;
 524			list_del_init(head);
 525		} else
 526			cache->ready = NULL;
 527	}
 528
 529	return head;
 530}
 531
 532int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 
 533{
 534	struct rds_ib_incoming *ibinc;
 535	struct rds_page_frag *frag;
 
 536	unsigned long to_copy;
 537	unsigned long frag_off = 0;
 
 538	int copied = 0;
 539	int ret;
 540	u32 len;
 541
 542	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 543	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 544	len = be32_to_cpu(inc->i_hdr.h_len);
 545
 546	while (iov_iter_count(to) && copied < len) {
 547		if (frag_off == RDS_FRAG_SIZE) {
 548			frag = list_entry(frag->f_item.next,
 549					  struct rds_page_frag, f_item);
 550			frag_off = 0;
 551		}
 552		to_copy = min_t(unsigned long, iov_iter_count(to),
 553				RDS_FRAG_SIZE - frag_off);
 
 
 
 
 
 554		to_copy = min_t(unsigned long, to_copy, len - copied);
 555
 
 
 
 
 
 556		/* XXX needs + offset for multiple recvs per page */
 557		rds_stats_add(s_copy_to_user, to_copy);
 558		ret = copy_page_to_iter(sg_page(&frag->f_sg),
 559					frag->f_sg.offset + frag_off,
 560					to_copy,
 561					to);
 562		if (ret != to_copy)
 563			return -EFAULT;
 
 564
 
 565		frag_off += to_copy;
 566		copied += to_copy;
 567	}
 568
 569	return copied;
 570}
 571
 572/* ic starts out kzalloc()ed */
 573void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
 574{
 575	struct ib_send_wr *wr = &ic->i_ack_wr;
 576	struct ib_sge *sge = &ic->i_ack_sge;
 577
 578	sge->addr = ic->i_ack_dma;
 579	sge->length = sizeof(struct rds_header);
 580	sge->lkey = ic->i_pd->local_dma_lkey;
 581
 582	wr->sg_list = sge;
 583	wr->num_sge = 1;
 584	wr->opcode = IB_WR_SEND;
 585	wr->wr_id = RDS_IB_ACK_WR_ID;
 586	wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
 587}
 588
 589/*
 590 * You'd think that with reliable IB connections you wouldn't need to ack
 591 * messages that have been received.  The problem is that IB hardware generates
 592 * an ack message before it has DMAed the message into memory.  This creates a
 593 * potential message loss if the HCA is disabled for any reason between when it
 594 * sends the ack and before the message is DMAed and processed.  This is only a
 595 * potential issue if another HCA is available for fail-over.
 596 *
 597 * When the remote host receives our ack they'll free the sent message from
 598 * their send queue.  To decrease the latency of this we always send an ack
 599 * immediately after we've received messages.
 600 *
 601 * For simplicity, we only have one ack in flight at a time.  This puts
 602 * pressure on senders to have deep enough send queues to absorb the latency of
 603 * a single ack frame being in flight.  This might not be good enough.
 604 *
 605 * This is implemented by have a long-lived send_wr and sge which point to a
 606 * statically allocated ack frame.  This ack wr does not fall under the ring
 607 * accounting that the tx and rx wrs do.  The QP attribute specifically makes
 608 * room for it beyond the ring size.  Send completion notices its special
 609 * wr_id and avoids working with the ring in that case.
 610 */
 611#ifndef KERNEL_HAS_ATOMIC64
 612void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 
 613{
 614	unsigned long flags;
 615
 616	spin_lock_irqsave(&ic->i_ack_lock, flags);
 617	ic->i_ack_next = seq;
 618	if (ack_required)
 619		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 620	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 621}
 622
 623static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 624{
 625	unsigned long flags;
 626	u64 seq;
 627
 628	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 629
 630	spin_lock_irqsave(&ic->i_ack_lock, flags);
 631	seq = ic->i_ack_next;
 632	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 633
 634	return seq;
 635}
 636#else
 637void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 
 638{
 639	atomic64_set(&ic->i_ack_next, seq);
 640	if (ack_required) {
 641		smp_mb__before_atomic();
 642		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 643	}
 644}
 645
 646static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 647{
 648	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 649	smp_mb__after_atomic();
 650
 651	return atomic64_read(&ic->i_ack_next);
 652}
 653#endif
 654
 655
 656static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
 657{
 658	struct rds_header *hdr = ic->i_ack;
 
 659	u64 seq;
 660	int ret;
 661
 662	seq = rds_ib_get_ack(ic);
 663
 664	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
 665
 666	ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma,
 667				   sizeof(*hdr), DMA_TO_DEVICE);
 668	rds_message_populate_header(hdr, 0, 0, 0);
 669	hdr->h_ack = cpu_to_be64(seq);
 670	hdr->h_credit = adv_credits;
 671	rds_message_make_checksum(hdr);
 672	ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma,
 673				      sizeof(*hdr), DMA_TO_DEVICE);
 674
 675	ic->i_ack_queued = jiffies;
 676
 677	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
 678	if (unlikely(ret)) {
 679		/* Failed to send. Release the WR, and
 680		 * force another ACK.
 681		 */
 682		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 683		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 684
 685		rds_ib_stats_inc(s_ib_ack_send_failure);
 686
 687		rds_ib_conn_error(ic->conn, "sending ack failed\n");
 688	} else
 689		rds_ib_stats_inc(s_ib_ack_sent);
 690}
 691
 692/*
 693 * There are 3 ways of getting acknowledgements to the peer:
 694 *  1.	We call rds_ib_attempt_ack from the recv completion handler
 695 *	to send an ACK-only frame.
 696 *	However, there can be only one such frame in the send queue
 697 *	at any time, so we may have to postpone it.
 698 *  2.	When another (data) packet is transmitted while there's
 699 *	an ACK in the queue, we piggyback the ACK sequence number
 700 *	on the data packet.
 701 *  3.	If the ACK WR is done sending, we get called from the
 702 *	send queue completion handler, and check whether there's
 703 *	another ACK pending (postponed because the WR was on the
 704 *	queue). If so, we transmit it.
 705 *
 706 * We maintain 2 variables:
 707 *  -	i_ack_flags, which keeps track of whether the ACK WR
 708 *	is currently in the send queue or not (IB_ACK_IN_FLIGHT)
 709 *  -	i_ack_next, which is the last sequence number we received
 710 *
 711 * Potentially, send queue and receive queue handlers can run concurrently.
 712 * It would be nice to not have to use a spinlock to synchronize things,
 713 * but the one problem that rules this out is that 64bit updates are
 714 * not atomic on all platforms. Things would be a lot simpler if
 715 * we had atomic64 or maybe cmpxchg64 everywhere.
 716 *
 717 * Reconnecting complicates this picture just slightly. When we
 718 * reconnect, we may be seeing duplicate packets. The peer
 719 * is retransmitting them, because it hasn't seen an ACK for
 720 * them. It is important that we ACK these.
 721 *
 722 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
 723 * this flag set *MUST* be acknowledged immediately.
 724 */
 725
 726/*
 727 * When we get here, we're called from the recv queue handler.
 728 * Check whether we ought to transmit an ACK.
 729 */
 730void rds_ib_attempt_ack(struct rds_ib_connection *ic)
 731{
 732	unsigned int adv_credits;
 733
 734	if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 735		return;
 736
 737	if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
 738		rds_ib_stats_inc(s_ib_ack_send_delayed);
 739		return;
 740	}
 741
 742	/* Can we get a send credit? */
 743	if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
 744		rds_ib_stats_inc(s_ib_tx_throttle);
 745		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 746		return;
 747	}
 748
 749	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 750	rds_ib_send_ack(ic, adv_credits);
 751}
 752
 753/*
 754 * We get here from the send completion handler, when the
 755 * adapter tells us the ACK frame was sent.
 756 */
 757void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
 758{
 759	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 760	rds_ib_attempt_ack(ic);
 761}
 762
 763/*
 764 * This is called by the regular xmit code when it wants to piggyback
 765 * an ACK on an outgoing frame.
 766 */
 767u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
 768{
 769	if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 770		rds_ib_stats_inc(s_ib_ack_send_piggybacked);
 771	return rds_ib_get_ack(ic);
 772}
 773
 774/*
 775 * It's kind of lame that we're copying from the posted receive pages into
 776 * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
 777 * them.  But receiving new congestion bitmaps should be a *rare* event, so
 778 * hopefully we won't need to invest that complexity in making it more
 779 * efficient.  By copying we can share a simpler core with TCP which has to
 780 * copy.
 781 */
 782static void rds_ib_cong_recv(struct rds_connection *conn,
 783			      struct rds_ib_incoming *ibinc)
 784{
 785	struct rds_cong_map *map;
 786	unsigned int map_off;
 787	unsigned int map_page;
 788	struct rds_page_frag *frag;
 789	unsigned long frag_off;
 790	unsigned long to_copy;
 791	unsigned long copied;
 792	__le64 uncongested = 0;
 793	void *addr;
 794
 795	/* catch completely corrupt packets */
 796	if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
 797		return;
 798
 799	map = conn->c_fcong;
 800	map_page = 0;
 801	map_off = 0;
 802
 803	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 804	frag_off = 0;
 805
 806	copied = 0;
 807
 808	while (copied < RDS_CONG_MAP_BYTES) {
 809		__le64 *src, *dst;
 810		unsigned int k;
 811
 812		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
 813		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 814
 815		addr = kmap_atomic(sg_page(&frag->f_sg));
 816
 817		src = addr + frag->f_sg.offset + frag_off;
 818		dst = (void *)map->m_page_addrs[map_page] + map_off;
 819		for (k = 0; k < to_copy; k += 8) {
 820			/* Record ports that became uncongested, ie
 821			 * bits that changed from 0 to 1. */
 822			uncongested |= ~(*src) & *dst;
 823			*dst++ = *src++;
 824		}
 825		kunmap_atomic(addr);
 826
 827		copied += to_copy;
 828
 829		map_off += to_copy;
 830		if (map_off == PAGE_SIZE) {
 831			map_off = 0;
 832			map_page++;
 833		}
 834
 835		frag_off += to_copy;
 836		if (frag_off == RDS_FRAG_SIZE) {
 837			frag = list_entry(frag->f_item.next,
 838					  struct rds_page_frag, f_item);
 839			frag_off = 0;
 840		}
 841	}
 842
 843	/* the congestion map is in little endian order */
 844	rds_cong_map_updated(map, le64_to_cpu(uncongested));
 
 
 845}
 846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847static void rds_ib_process_recv(struct rds_connection *conn,
 848				struct rds_ib_recv_work *recv, u32 data_len,
 849				struct rds_ib_ack_state *state)
 850{
 851	struct rds_ib_connection *ic = conn->c_transport_data;
 852	struct rds_ib_incoming *ibinc = ic->i_ibinc;
 853	struct rds_header *ihdr, *hdr;
 854	dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
 855
 856	/* XXX shut down the connection if port 0,0 are seen? */
 857
 858	rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
 859		 data_len);
 860
 861	if (data_len < sizeof(struct rds_header)) {
 862		rds_ib_conn_error(conn, "incoming message "
 863		       "from %pI6c didn't include a "
 864		       "header, disconnecting and "
 865		       "reconnecting\n",
 866		       &conn->c_faddr);
 867		return;
 868	}
 869	data_len -= sizeof(struct rds_header);
 870
 871	ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
 872
 873	ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr,
 874				   sizeof(*ihdr), DMA_FROM_DEVICE);
 875	/* Validate the checksum. */
 876	if (!rds_message_verify_checksum(ihdr)) {
 877		rds_ib_conn_error(conn, "incoming message "
 878		       "from %pI6c has corrupted header - "
 879		       "forcing a reconnect\n",
 880		       &conn->c_faddr);
 881		rds_stats_inc(s_recv_drop_bad_checksum);
 882		goto done;
 883	}
 884
 885	/* Process the ACK sequence which comes with every packet */
 886	state->ack_recv = be64_to_cpu(ihdr->h_ack);
 887	state->ack_recv_valid = 1;
 888
 889	/* Process the credits update if there was one */
 890	if (ihdr->h_credit)
 891		rds_ib_send_add_credits(conn, ihdr->h_credit);
 892
 893	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
 894		/* This is an ACK-only packet. The fact that it gets
 895		 * special treatment here is that historically, ACKs
 896		 * were rather special beasts.
 897		 */
 898		rds_ib_stats_inc(s_ib_ack_received);
 899
 900		/*
 901		 * Usually the frags make their way on to incs and are then freed as
 902		 * the inc is freed.  We don't go that route, so we have to drop the
 903		 * page ref ourselves.  We can't just leave the page on the recv
 904		 * because that confuses the dma mapping of pages and each recv's use
 905		 * of a partial page.
 906		 *
 907		 * FIXME: Fold this into the code path below.
 908		 */
 909		rds_ib_frag_free(ic, recv->r_frag);
 910		recv->r_frag = NULL;
 911		goto done;
 912	}
 913
 914	/*
 915	 * If we don't already have an inc on the connection then this
 916	 * fragment has a header and starts a message.. copy its header
 917	 * into the inc and save the inc so we can hang upcoming fragments
 918	 * off its list.
 919	 */
 920	if (!ibinc) {
 921		ibinc = recv->r_ibinc;
 922		recv->r_ibinc = NULL;
 923		ic->i_ibinc = ibinc;
 924
 925		hdr = &ibinc->ii_inc.i_hdr;
 926		ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
 927				local_clock();
 928		memcpy(hdr, ihdr, sizeof(*hdr));
 929		ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
 930		ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
 931				local_clock();
 932
 933		rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
 934			 ic->i_recv_data_rem, hdr->h_flags);
 935	} else {
 936		hdr = &ibinc->ii_inc.i_hdr;
 937		/* We can't just use memcmp here; fragments of a
 938		 * single message may carry different ACKs */
 939		if (hdr->h_sequence != ihdr->h_sequence ||
 940		    hdr->h_len != ihdr->h_len ||
 941		    hdr->h_sport != ihdr->h_sport ||
 942		    hdr->h_dport != ihdr->h_dport) {
 943			rds_ib_conn_error(conn,
 944				"fragment header mismatch; forcing reconnect\n");
 945			goto done;
 946		}
 947	}
 948
 949	list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
 950	recv->r_frag = NULL;
 951
 952	if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
 953		ic->i_recv_data_rem -= RDS_FRAG_SIZE;
 954	else {
 955		ic->i_recv_data_rem = 0;
 956		ic->i_ibinc = NULL;
 957
 958		if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
 959			rds_ib_cong_recv(conn, ibinc);
 960		} else {
 961			rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
 962					  &ibinc->ii_inc, GFP_ATOMIC);
 963			state->ack_next = be64_to_cpu(hdr->h_sequence);
 964			state->ack_next_valid = 1;
 965		}
 966
 967		/* Evaluate the ACK_REQUIRED flag *after* we received
 968		 * the complete frame, and after bumping the next_rx
 969		 * sequence. */
 970		if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
 971			rds_stats_inc(s_recv_ack_required);
 972			state->ack_required = 1;
 973		}
 974
 975		rds_inc_put(&ibinc->ii_inc);
 976	}
 977done:
 978	ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr,
 979				      sizeof(*ihdr), DMA_FROM_DEVICE);
 980}
 981
 982void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
 983			     struct ib_wc *wc,
 984			     struct rds_ib_ack_state *state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985{
 986	struct rds_connection *conn = ic->conn;
 
 987	struct rds_ib_recv_work *recv;
 988
 989	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
 990		 (unsigned long long)wc->wr_id, wc->status,
 991		 ib_wc_status_msg(wc->status), wc->byte_len,
 992		 be32_to_cpu(wc->ex.imm_data));
 993
 994	rds_ib_stats_inc(s_ib_rx_cq_event);
 995	recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
 996	ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
 997			DMA_FROM_DEVICE);
 998
 999	/* Also process recvs in connecting state because it is possible
1000	 * to get a recv completion _before_ the rdmacm ESTABLISHED
1001	 * event is processed.
1002	 */
1003	if (wc->status == IB_WC_SUCCESS) {
1004		rds_ib_process_recv(conn, recv, wc->byte_len, state);
1005	} else {
1006		/* We expect errors as the qp is drained during shutdown */
1007		if (rds_conn_up(conn) || rds_conn_connecting(conn))
1008			rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
1009					  &conn->c_laddr, &conn->c_faddr,
1010					  conn->c_tos, wc->status,
1011					  ib_wc_status_msg(wc->status),
1012					  wc->vendor_err);
 
 
 
 
 
 
 
 
 
 
1013	}
 
 
 
 
 
 
 
1014
1015	/* rds_ib_process_recv() doesn't always consume the frag, and
1016	 * we might not have called it at all if the wc didn't indicate
1017	 * success. We already unmapped the frag's pages, though, and
1018	 * the following rds_ib_ring_free() call tells the refill path
1019	 * that it will not find an allocated frag here. Make sure we
1020	 * keep that promise by freeing a frag that's still on the ring.
1021	 */
1022	if (recv->r_frag) {
1023		rds_ib_frag_free(ic, recv->r_frag);
1024		recv->r_frag = NULL;
1025	}
1026	rds_ib_ring_free(&ic->i_recv_ring, 1);
 
1027
1028	/* If we ever end up with a really empty receive ring, we're
1029	 * in deep trouble, as the sender will definitely see RNR
1030	 * timeouts. */
1031	if (rds_ib_ring_empty(&ic->i_recv_ring))
1032		rds_ib_stats_inc(s_ib_rx_ring_empty);
1033
1034	if (rds_ib_ring_low(&ic->i_recv_ring)) {
1035		rds_ib_recv_refill(conn, 0, GFP_NOWAIT | __GFP_NOWARN);
1036		rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1037	}
1038}
1039
1040int rds_ib_recv_path(struct rds_conn_path *cp)
1041{
1042	struct rds_connection *conn = cp->cp_conn;
1043	struct rds_ib_connection *ic = conn->c_transport_data;
 
1044
1045	rdsdebug("conn %p\n", conn);
1046	if (rds_conn_up(conn)) {
1047		rds_ib_attempt_ack(ic);
1048		rds_ib_recv_refill(conn, 0, GFP_KERNEL);
1049		rds_ib_stats_inc(s_ib_rx_refill_from_thread);
1050	}
1051
1052	return 0;
1053}
1054
1055int rds_ib_recv_init(void)
1056{
1057	struct sysinfo si;
1058	int ret = -ENOMEM;
1059
1060	/* Default to 30% of all available RAM for recv memory */
1061	si_meminfo(&si);
1062	rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1063
1064	rds_ib_incoming_slab =
1065		kmem_cache_create_usercopy("rds_ib_incoming",
1066					   sizeof(struct rds_ib_incoming),
1067					   0, SLAB_HWCACHE_ALIGN,
1068					   offsetof(struct rds_ib_incoming,
1069						    ii_inc.i_usercopy),
1070					   sizeof(struct rds_inc_usercopy),
1071					   NULL);
1072	if (!rds_ib_incoming_slab)
1073		goto out;
1074
1075	rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1076					sizeof(struct rds_page_frag),
1077					0, SLAB_HWCACHE_ALIGN, NULL);
1078	if (!rds_ib_frag_slab) {
1079		kmem_cache_destroy(rds_ib_incoming_slab);
1080		rds_ib_incoming_slab = NULL;
1081	} else
1082		ret = 0;
1083out:
1084	return ret;
1085}
1086
1087void rds_ib_recv_exit(void)
1088{
1089	WARN_ON(atomic_read(&rds_ib_allocation));
1090
1091	kmem_cache_destroy(rds_ib_incoming_slab);
1092	kmem_cache_destroy(rds_ib_frag_slab);
1093}