Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.10.11
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/slab.h>
  35#include <linux/pci.h>
  36#include <linux/dma-mapping.h>
  37#include <rdma/rdma_cm.h>
  38
  39#include "rds_single_path.h"
  40#include "rds.h"
  41#include "ib.h"
  42
  43static struct kmem_cache *rds_ib_incoming_slab;
  44static struct kmem_cache *rds_ib_frag_slab;
  45static atomic_t	rds_ib_allocation = ATOMIC_INIT(0);
  46
  47void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
  48{
  49	struct rds_ib_recv_work *recv;
  50	u32 i;
  51
  52	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
  53		struct ib_sge *sge;
  54
  55		recv->r_ibinc = NULL;
  56		recv->r_frag = NULL;
  57
  58		recv->r_wr.next = NULL;
  59		recv->r_wr.wr_id = i;
  60		recv->r_wr.sg_list = recv->r_sge;
  61		recv->r_wr.num_sge = RDS_IB_RECV_SGE;
  62
  63		sge = &recv->r_sge[0];
  64		sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
  65		sge->length = sizeof(struct rds_header);
  66		sge->lkey = ic->i_pd->local_dma_lkey;
  67
  68		sge = &recv->r_sge[1];
  69		sge->addr = 0;
  70		sge->length = RDS_FRAG_SIZE;
  71		sge->lkey = ic->i_pd->local_dma_lkey;
  72	}
  73}
  74
  75/*
  76 * The entire 'from' list, including the from element itself, is put on
  77 * to the tail of the 'to' list.
  78 */
  79static void list_splice_entire_tail(struct list_head *from,
  80				    struct list_head *to)
  81{
  82	struct list_head *from_last = from->prev;
  83
  84	list_splice_tail(from_last, to);
  85	list_add_tail(from_last, to);
  86}
  87
  88static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
  89{
  90	struct list_head *tmp;
  91
  92	tmp = xchg(&cache->xfer, NULL);
  93	if (tmp) {
  94		if (cache->ready)
  95			list_splice_entire_tail(tmp, cache->ready);
  96		else
  97			cache->ready = tmp;
  98	}
  99}
 100
 101static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
 102{
 103	struct rds_ib_cache_head *head;
 104	int cpu;
 105
 106	cache->percpu = alloc_percpu(struct rds_ib_cache_head);
 107	if (!cache->percpu)
 108	       return -ENOMEM;
 109
 110	for_each_possible_cpu(cpu) {
 111		head = per_cpu_ptr(cache->percpu, cpu);
 112		head->first = NULL;
 113		head->count = 0;
 114	}
 115	cache->xfer = NULL;
 116	cache->ready = NULL;
 117
 118	return 0;
 119}
 120
 121int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
 122{
 123	int ret;
 124
 125	ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
 126	if (!ret) {
 127		ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
 128		if (ret)
 129			free_percpu(ic->i_cache_incs.percpu);
 130	}
 131
 132	return ret;
 133}
 134
 135static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
 136					  struct list_head *caller_list)
 137{
 138	struct rds_ib_cache_head *head;
 139	int cpu;
 140
 141	for_each_possible_cpu(cpu) {
 142		head = per_cpu_ptr(cache->percpu, cpu);
 143		if (head->first) {
 144			list_splice_entire_tail(head->first, caller_list);
 145			head->first = NULL;
 146		}
 147	}
 148
 149	if (cache->ready) {
 150		list_splice_entire_tail(cache->ready, caller_list);
 151		cache->ready = NULL;
 152	}
 153}
 154
 155void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
 156{
 157	struct rds_ib_incoming *inc;
 158	struct rds_ib_incoming *inc_tmp;
 159	struct rds_page_frag *frag;
 160	struct rds_page_frag *frag_tmp;
 161	LIST_HEAD(list);
 162
 163	rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 164	rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
 165	free_percpu(ic->i_cache_incs.percpu);
 166
 167	list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
 168		list_del(&inc->ii_cache_entry);
 169		WARN_ON(!list_empty(&inc->ii_frags));
 170		kmem_cache_free(rds_ib_incoming_slab, inc);
 
 171	}
 172
 173	rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 174	rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
 175	free_percpu(ic->i_cache_frags.percpu);
 176
 177	list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
 178		list_del(&frag->f_cache_entry);
 179		WARN_ON(!list_empty(&frag->f_item));
 180		kmem_cache_free(rds_ib_frag_slab, frag);
 181	}
 182}
 183
 184/* fwd decl */
 185static void rds_ib_recv_cache_put(struct list_head *new_item,
 186				  struct rds_ib_refill_cache *cache);
 187static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
 188
 189
 190/* Recycle frag and attached recv buffer f_sg */
 191static void rds_ib_frag_free(struct rds_ib_connection *ic,
 192			     struct rds_page_frag *frag)
 193{
 194	rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
 195
 196	rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
 
 
 197}
 198
 199/* Recycle inc after freeing attached frags */
 200void rds_ib_inc_free(struct rds_incoming *inc)
 201{
 202	struct rds_ib_incoming *ibinc;
 203	struct rds_page_frag *frag;
 204	struct rds_page_frag *pos;
 205	struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
 206
 207	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 208
 209	/* Free attached frags */
 210	list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
 211		list_del_init(&frag->f_item);
 212		rds_ib_frag_free(ic, frag);
 213	}
 214	BUG_ON(!list_empty(&ibinc->ii_frags));
 215
 216	rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
 217	rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
 218}
 219
 220static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
 221				  struct rds_ib_recv_work *recv)
 222{
 223	if (recv->r_ibinc) {
 224		rds_inc_put(&recv->r_ibinc->ii_inc);
 225		recv->r_ibinc = NULL;
 226	}
 227	if (recv->r_frag) {
 228		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
 229		rds_ib_frag_free(ic, recv->r_frag);
 230		recv->r_frag = NULL;
 231	}
 232}
 233
 234void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
 235{
 236	u32 i;
 237
 238	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
 239		rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
 240}
 241
 242static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
 243						     gfp_t slab_mask)
 244{
 245	struct rds_ib_incoming *ibinc;
 246	struct list_head *cache_item;
 247	int avail_allocs;
 248
 249	cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
 250	if (cache_item) {
 251		ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
 252	} else {
 253		avail_allocs = atomic_add_unless(&rds_ib_allocation,
 254						 1, rds_ib_sysctl_max_recv_allocation);
 255		if (!avail_allocs) {
 256			rds_ib_stats_inc(s_ib_rx_alloc_limit);
 257			return NULL;
 258		}
 259		ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
 260		if (!ibinc) {
 261			atomic_dec(&rds_ib_allocation);
 262			return NULL;
 263		}
 
 264	}
 265	INIT_LIST_HEAD(&ibinc->ii_frags);
 266	rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
 267
 268	return ibinc;
 269}
 270
 271static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
 272						    gfp_t slab_mask, gfp_t page_mask)
 273{
 274	struct rds_page_frag *frag;
 275	struct list_head *cache_item;
 276	int ret;
 277
 278	cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
 279	if (cache_item) {
 280		frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
 
 
 281	} else {
 282		frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
 283		if (!frag)
 284			return NULL;
 285
 286		sg_init_table(&frag->f_sg, 1);
 287		ret = rds_page_remainder_alloc(&frag->f_sg,
 288					       RDS_FRAG_SIZE, page_mask);
 289		if (ret) {
 290			kmem_cache_free(rds_ib_frag_slab, frag);
 291			return NULL;
 292		}
 
 293	}
 294
 295	INIT_LIST_HEAD(&frag->f_item);
 296
 297	return frag;
 298}
 299
 300static int rds_ib_recv_refill_one(struct rds_connection *conn,
 301				  struct rds_ib_recv_work *recv, gfp_t gfp)
 302{
 303	struct rds_ib_connection *ic = conn->c_transport_data;
 304	struct ib_sge *sge;
 305	int ret = -ENOMEM;
 306	gfp_t slab_mask = GFP_NOWAIT;
 307	gfp_t page_mask = GFP_NOWAIT;
 308
 309	if (gfp & __GFP_DIRECT_RECLAIM) {
 310		slab_mask = GFP_KERNEL;
 311		page_mask = GFP_HIGHUSER;
 312	}
 313
 314	if (!ic->i_cache_incs.ready)
 315		rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 316	if (!ic->i_cache_frags.ready)
 317		rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 318
 319	/*
 320	 * ibinc was taken from recv if recv contained the start of a message.
 321	 * recvs that were continuations will still have this allocated.
 322	 */
 323	if (!recv->r_ibinc) {
 324		recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
 325		if (!recv->r_ibinc)
 326			goto out;
 327	}
 328
 329	WARN_ON(recv->r_frag); /* leak! */
 330	recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
 331	if (!recv->r_frag)
 332		goto out;
 333
 334	ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
 335			    1, DMA_FROM_DEVICE);
 336	WARN_ON(ret != 1);
 337
 338	sge = &recv->r_sge[0];
 339	sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
 340	sge->length = sizeof(struct rds_header);
 341
 342	sge = &recv->r_sge[1];
 343	sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
 344	sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
 345
 346	ret = 0;
 347out:
 348	return ret;
 349}
 350
 351static int acquire_refill(struct rds_connection *conn)
 352{
 353	return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
 354}
 355
 356static void release_refill(struct rds_connection *conn)
 357{
 358	clear_bit(RDS_RECV_REFILL, &conn->c_flags);
 
 359
 360	/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 361	 * hot path and finding waiters is very rare.  We don't want to walk
 362	 * the system-wide hashed waitqueue buckets in the fast path only to
 363	 * almost never find waiters.
 364	 */
 365	if (waitqueue_active(&conn->c_waitq))
 366		wake_up_all(&conn->c_waitq);
 367}
 368
 369/*
 370 * This tries to allocate and post unused work requests after making sure that
 371 * they have all the allocations they need to queue received fragments into
 372 * sockets.
 373 *
 374 * -1 is returned if posting fails due to temporary resource exhaustion.
 375 */
 376void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
 377{
 378	struct rds_ib_connection *ic = conn->c_transport_data;
 379	struct rds_ib_recv_work *recv;
 380	struct ib_recv_wr *failed_wr;
 381	unsigned int posted = 0;
 382	int ret = 0;
 383	bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
 
 384	u32 pos;
 385
 386	/* the goal here is to just make sure that someone, somewhere
 387	 * is posting buffers.  If we can't get the refill lock,
 388	 * let them do their thing
 389	 */
 390	if (!acquire_refill(conn))
 391		return;
 392
 393	while ((prefill || rds_conn_up(conn)) &&
 394	       rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
 395		if (pos >= ic->i_recv_ring.w_nr) {
 396			printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
 397					pos);
 398			break;
 399		}
 400
 401		recv = &ic->i_recvs[pos];
 402		ret = rds_ib_recv_refill_one(conn, recv, gfp);
 403		if (ret) {
 
 404			break;
 405		}
 406
 407		/* XXX when can this fail? */
 408		ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
 409		rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
 410			 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
 411			 (long) ib_sg_dma_address(
 412				ic->i_cm_id->device,
 413				&recv->r_frag->f_sg),
 414			ret);
 415		if (ret) {
 416			rds_ib_conn_error(conn, "recv post on "
 417			       "%pI4 returned %d, disconnecting and "
 418			       "reconnecting\n", &conn->c_faddr,
 419			       ret);
 420			break;
 421		}
 422
 423		posted++;
 
 
 
 
 
 424	}
 425
 426	/* We're doing flow control - update the window. */
 427	if (ic->i_flowctl && posted)
 428		rds_ib_advertise_credits(conn, posted);
 429
 430	if (ret)
 431		rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
 432
 433	release_refill(conn);
 434
 435	/* if we're called from the softirq handler, we'll be GFP_NOWAIT.
 436	 * in this case the ring being low is going to lead to more interrupts
 437	 * and we can safely let the softirq code take care of it unless the
 438	 * ring is completely empty.
 439	 *
 440	 * if we're called from krdsd, we'll be GFP_KERNEL.  In this case
 441	 * we might have raced with the softirq code while we had the refill
 442	 * lock held.  Use rds_ib_ring_low() instead of ring_empty to decide
 443	 * if we should requeue.
 444	 */
 445	if (rds_conn_up(conn) &&
 446	    ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
 
 447	    rds_ib_ring_empty(&ic->i_recv_ring))) {
 448		queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
 449	}
 
 
 450}
 451
 452/*
 453 * We want to recycle several types of recv allocations, like incs and frags.
 454 * To use this, the *_free() function passes in the ptr to a list_head within
 455 * the recyclee, as well as the cache to put it on.
 456 *
 457 * First, we put the memory on a percpu list. When this reaches a certain size,
 458 * We move it to an intermediate non-percpu list in a lockless manner, with some
 459 * xchg/compxchg wizardry.
 460 *
 461 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
 462 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
 463 * list_empty() will return true with one element is actually present.
 464 */
 465static void rds_ib_recv_cache_put(struct list_head *new_item,
 466				 struct rds_ib_refill_cache *cache)
 467{
 468	unsigned long flags;
 469	struct list_head *old, *chpfirst;
 470
 471	local_irq_save(flags);
 472
 473	chpfirst = __this_cpu_read(cache->percpu->first);
 474	if (!chpfirst)
 475		INIT_LIST_HEAD(new_item);
 476	else /* put on front */
 477		list_add_tail(new_item, chpfirst);
 478
 479	__this_cpu_write(cache->percpu->first, new_item);
 480	__this_cpu_inc(cache->percpu->count);
 481
 482	if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
 483		goto end;
 484
 485	/*
 486	 * Return our per-cpu first list to the cache's xfer by atomically
 487	 * grabbing the current xfer list, appending it to our per-cpu list,
 488	 * and then atomically returning that entire list back to the
 489	 * cache's xfer list as long as it's still empty.
 490	 */
 491	do {
 492		old = xchg(&cache->xfer, NULL);
 493		if (old)
 494			list_splice_entire_tail(old, chpfirst);
 495		old = cmpxchg(&cache->xfer, NULL, chpfirst);
 496	} while (old);
 497
 498
 499	__this_cpu_write(cache->percpu->first, NULL);
 500	__this_cpu_write(cache->percpu->count, 0);
 501end:
 502	local_irq_restore(flags);
 503}
 504
 505static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
 506{
 507	struct list_head *head = cache->ready;
 508
 509	if (head) {
 510		if (!list_empty(head)) {
 511			cache->ready = head->next;
 512			list_del_init(head);
 513		} else
 514			cache->ready = NULL;
 515	}
 516
 517	return head;
 518}
 519
 520int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 521{
 522	struct rds_ib_incoming *ibinc;
 523	struct rds_page_frag *frag;
 524	unsigned long to_copy;
 525	unsigned long frag_off = 0;
 526	int copied = 0;
 527	int ret;
 528	u32 len;
 529
 530	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 531	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 532	len = be32_to_cpu(inc->i_hdr.h_len);
 533
 534	while (iov_iter_count(to) && copied < len) {
 535		if (frag_off == RDS_FRAG_SIZE) {
 536			frag = list_entry(frag->f_item.next,
 537					  struct rds_page_frag, f_item);
 538			frag_off = 0;
 539		}
 540		to_copy = min_t(unsigned long, iov_iter_count(to),
 541				RDS_FRAG_SIZE - frag_off);
 542		to_copy = min_t(unsigned long, to_copy, len - copied);
 543
 544		/* XXX needs + offset for multiple recvs per page */
 545		rds_stats_add(s_copy_to_user, to_copy);
 546		ret = copy_page_to_iter(sg_page(&frag->f_sg),
 547					frag->f_sg.offset + frag_off,
 548					to_copy,
 549					to);
 550		if (ret != to_copy)
 551			return -EFAULT;
 552
 553		frag_off += to_copy;
 554		copied += to_copy;
 555	}
 556
 557	return copied;
 558}
 559
 560/* ic starts out kzalloc()ed */
 561void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
 562{
 563	struct ib_send_wr *wr = &ic->i_ack_wr;
 564	struct ib_sge *sge = &ic->i_ack_sge;
 565
 566	sge->addr = ic->i_ack_dma;
 567	sge->length = sizeof(struct rds_header);
 568	sge->lkey = ic->i_pd->local_dma_lkey;
 569
 570	wr->sg_list = sge;
 571	wr->num_sge = 1;
 572	wr->opcode = IB_WR_SEND;
 573	wr->wr_id = RDS_IB_ACK_WR_ID;
 574	wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
 575}
 576
 577/*
 578 * You'd think that with reliable IB connections you wouldn't need to ack
 579 * messages that have been received.  The problem is that IB hardware generates
 580 * an ack message before it has DMAed the message into memory.  This creates a
 581 * potential message loss if the HCA is disabled for any reason between when it
 582 * sends the ack and before the message is DMAed and processed.  This is only a
 583 * potential issue if another HCA is available for fail-over.
 584 *
 585 * When the remote host receives our ack they'll free the sent message from
 586 * their send queue.  To decrease the latency of this we always send an ack
 587 * immediately after we've received messages.
 588 *
 589 * For simplicity, we only have one ack in flight at a time.  This puts
 590 * pressure on senders to have deep enough send queues to absorb the latency of
 591 * a single ack frame being in flight.  This might not be good enough.
 592 *
 593 * This is implemented by have a long-lived send_wr and sge which point to a
 594 * statically allocated ack frame.  This ack wr does not fall under the ring
 595 * accounting that the tx and rx wrs do.  The QP attribute specifically makes
 596 * room for it beyond the ring size.  Send completion notices its special
 597 * wr_id and avoids working with the ring in that case.
 598 */
 599#ifndef KERNEL_HAS_ATOMIC64
 600void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 601{
 602	unsigned long flags;
 603
 604	spin_lock_irqsave(&ic->i_ack_lock, flags);
 605	ic->i_ack_next = seq;
 606	if (ack_required)
 607		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 608	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 609}
 610
 611static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 612{
 613	unsigned long flags;
 614	u64 seq;
 615
 616	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 617
 618	spin_lock_irqsave(&ic->i_ack_lock, flags);
 619	seq = ic->i_ack_next;
 620	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 621
 622	return seq;
 623}
 624#else
 625void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 626{
 627	atomic64_set(&ic->i_ack_next, seq);
 628	if (ack_required) {
 629		smp_mb__before_atomic();
 630		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 631	}
 632}
 633
 634static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 635{
 636	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 637	smp_mb__after_atomic();
 638
 639	return atomic64_read(&ic->i_ack_next);
 640}
 641#endif
 642
 643
 644static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
 645{
 646	struct rds_header *hdr = ic->i_ack;
 647	struct ib_send_wr *failed_wr;
 648	u64 seq;
 649	int ret;
 650
 651	seq = rds_ib_get_ack(ic);
 652
 653	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
 
 
 
 654	rds_message_populate_header(hdr, 0, 0, 0);
 655	hdr->h_ack = cpu_to_be64(seq);
 656	hdr->h_credit = adv_credits;
 657	rds_message_make_checksum(hdr);
 
 
 
 658	ic->i_ack_queued = jiffies;
 659
 660	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
 661	if (unlikely(ret)) {
 662		/* Failed to send. Release the WR, and
 663		 * force another ACK.
 664		 */
 665		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 666		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 667
 668		rds_ib_stats_inc(s_ib_ack_send_failure);
 669
 670		rds_ib_conn_error(ic->conn, "sending ack failed\n");
 671	} else
 672		rds_ib_stats_inc(s_ib_ack_sent);
 673}
 674
 675/*
 676 * There are 3 ways of getting acknowledgements to the peer:
 677 *  1.	We call rds_ib_attempt_ack from the recv completion handler
 678 *	to send an ACK-only frame.
 679 *	However, there can be only one such frame in the send queue
 680 *	at any time, so we may have to postpone it.
 681 *  2.	When another (data) packet is transmitted while there's
 682 *	an ACK in the queue, we piggyback the ACK sequence number
 683 *	on the data packet.
 684 *  3.	If the ACK WR is done sending, we get called from the
 685 *	send queue completion handler, and check whether there's
 686 *	another ACK pending (postponed because the WR was on the
 687 *	queue). If so, we transmit it.
 688 *
 689 * We maintain 2 variables:
 690 *  -	i_ack_flags, which keeps track of whether the ACK WR
 691 *	is currently in the send queue or not (IB_ACK_IN_FLIGHT)
 692 *  -	i_ack_next, which is the last sequence number we received
 693 *
 694 * Potentially, send queue and receive queue handlers can run concurrently.
 695 * It would be nice to not have to use a spinlock to synchronize things,
 696 * but the one problem that rules this out is that 64bit updates are
 697 * not atomic on all platforms. Things would be a lot simpler if
 698 * we had atomic64 or maybe cmpxchg64 everywhere.
 699 *
 700 * Reconnecting complicates this picture just slightly. When we
 701 * reconnect, we may be seeing duplicate packets. The peer
 702 * is retransmitting them, because it hasn't seen an ACK for
 703 * them. It is important that we ACK these.
 704 *
 705 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
 706 * this flag set *MUST* be acknowledged immediately.
 707 */
 708
 709/*
 710 * When we get here, we're called from the recv queue handler.
 711 * Check whether we ought to transmit an ACK.
 712 */
 713void rds_ib_attempt_ack(struct rds_ib_connection *ic)
 714{
 715	unsigned int adv_credits;
 716
 717	if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 718		return;
 719
 720	if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
 721		rds_ib_stats_inc(s_ib_ack_send_delayed);
 722		return;
 723	}
 724
 725	/* Can we get a send credit? */
 726	if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
 727		rds_ib_stats_inc(s_ib_tx_throttle);
 728		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 729		return;
 730	}
 731
 732	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 733	rds_ib_send_ack(ic, adv_credits);
 734}
 735
 736/*
 737 * We get here from the send completion handler, when the
 738 * adapter tells us the ACK frame was sent.
 739 */
 740void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
 741{
 742	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 743	rds_ib_attempt_ack(ic);
 744}
 745
 746/*
 747 * This is called by the regular xmit code when it wants to piggyback
 748 * an ACK on an outgoing frame.
 749 */
 750u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
 751{
 752	if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 753		rds_ib_stats_inc(s_ib_ack_send_piggybacked);
 754	return rds_ib_get_ack(ic);
 755}
 756
 757/*
 758 * It's kind of lame that we're copying from the posted receive pages into
 759 * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
 760 * them.  But receiving new congestion bitmaps should be a *rare* event, so
 761 * hopefully we won't need to invest that complexity in making it more
 762 * efficient.  By copying we can share a simpler core with TCP which has to
 763 * copy.
 764 */
 765static void rds_ib_cong_recv(struct rds_connection *conn,
 766			      struct rds_ib_incoming *ibinc)
 767{
 768	struct rds_cong_map *map;
 769	unsigned int map_off;
 770	unsigned int map_page;
 771	struct rds_page_frag *frag;
 772	unsigned long frag_off;
 773	unsigned long to_copy;
 774	unsigned long copied;
 775	uint64_t uncongested = 0;
 776	void *addr;
 777
 778	/* catch completely corrupt packets */
 779	if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
 780		return;
 781
 782	map = conn->c_fcong;
 783	map_page = 0;
 784	map_off = 0;
 785
 786	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 787	frag_off = 0;
 788
 789	copied = 0;
 790
 791	while (copied < RDS_CONG_MAP_BYTES) {
 792		uint64_t *src, *dst;
 793		unsigned int k;
 794
 795		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
 796		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 797
 798		addr = kmap_atomic(sg_page(&frag->f_sg));
 799
 800		src = addr + frag->f_sg.offset + frag_off;
 801		dst = (void *)map->m_page_addrs[map_page] + map_off;
 802		for (k = 0; k < to_copy; k += 8) {
 803			/* Record ports that became uncongested, ie
 804			 * bits that changed from 0 to 1. */
 805			uncongested |= ~(*src) & *dst;
 806			*dst++ = *src++;
 807		}
 808		kunmap_atomic(addr);
 809
 810		copied += to_copy;
 811
 812		map_off += to_copy;
 813		if (map_off == PAGE_SIZE) {
 814			map_off = 0;
 815			map_page++;
 816		}
 817
 818		frag_off += to_copy;
 819		if (frag_off == RDS_FRAG_SIZE) {
 820			frag = list_entry(frag->f_item.next,
 821					  struct rds_page_frag, f_item);
 822			frag_off = 0;
 823		}
 824	}
 825
 826	/* the congestion map is in little endian order */
 827	uncongested = le64_to_cpu(uncongested);
 828
 829	rds_cong_map_updated(map, uncongested);
 830}
 831
 832static void rds_ib_process_recv(struct rds_connection *conn,
 833				struct rds_ib_recv_work *recv, u32 data_len,
 834				struct rds_ib_ack_state *state)
 835{
 836	struct rds_ib_connection *ic = conn->c_transport_data;
 837	struct rds_ib_incoming *ibinc = ic->i_ibinc;
 838	struct rds_header *ihdr, *hdr;
 
 839
 840	/* XXX shut down the connection if port 0,0 are seen? */
 841
 842	rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
 843		 data_len);
 844
 845	if (data_len < sizeof(struct rds_header)) {
 846		rds_ib_conn_error(conn, "incoming message "
 847		       "from %pI4 didn't include a "
 848		       "header, disconnecting and "
 849		       "reconnecting\n",
 850		       &conn->c_faddr);
 851		return;
 852	}
 853	data_len -= sizeof(struct rds_header);
 854
 855	ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
 856
 
 
 857	/* Validate the checksum. */
 858	if (!rds_message_verify_checksum(ihdr)) {
 859		rds_ib_conn_error(conn, "incoming message "
 860		       "from %pI4 has corrupted header - "
 861		       "forcing a reconnect\n",
 862		       &conn->c_faddr);
 863		rds_stats_inc(s_recv_drop_bad_checksum);
 864		return;
 865	}
 866
 867	/* Process the ACK sequence which comes with every packet */
 868	state->ack_recv = be64_to_cpu(ihdr->h_ack);
 869	state->ack_recv_valid = 1;
 870
 871	/* Process the credits update if there was one */
 872	if (ihdr->h_credit)
 873		rds_ib_send_add_credits(conn, ihdr->h_credit);
 874
 875	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
 876		/* This is an ACK-only packet. The fact that it gets
 877		 * special treatment here is that historically, ACKs
 878		 * were rather special beasts.
 879		 */
 880		rds_ib_stats_inc(s_ib_ack_received);
 881
 882		/*
 883		 * Usually the frags make their way on to incs and are then freed as
 884		 * the inc is freed.  We don't go that route, so we have to drop the
 885		 * page ref ourselves.  We can't just leave the page on the recv
 886		 * because that confuses the dma mapping of pages and each recv's use
 887		 * of a partial page.
 888		 *
 889		 * FIXME: Fold this into the code path below.
 890		 */
 891		rds_ib_frag_free(ic, recv->r_frag);
 892		recv->r_frag = NULL;
 893		return;
 894	}
 895
 896	/*
 897	 * If we don't already have an inc on the connection then this
 898	 * fragment has a header and starts a message.. copy its header
 899	 * into the inc and save the inc so we can hang upcoming fragments
 900	 * off its list.
 901	 */
 902	if (!ibinc) {
 903		ibinc = recv->r_ibinc;
 904		recv->r_ibinc = NULL;
 905		ic->i_ibinc = ibinc;
 906
 907		hdr = &ibinc->ii_inc.i_hdr;
 
 
 908		memcpy(hdr, ihdr, sizeof(*hdr));
 909		ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
 
 
 910
 911		rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
 912			 ic->i_recv_data_rem, hdr->h_flags);
 913	} else {
 914		hdr = &ibinc->ii_inc.i_hdr;
 915		/* We can't just use memcmp here; fragments of a
 916		 * single message may carry different ACKs */
 917		if (hdr->h_sequence != ihdr->h_sequence ||
 918		    hdr->h_len != ihdr->h_len ||
 919		    hdr->h_sport != ihdr->h_sport ||
 920		    hdr->h_dport != ihdr->h_dport) {
 921			rds_ib_conn_error(conn,
 922				"fragment header mismatch; forcing reconnect\n");
 923			return;
 924		}
 925	}
 926
 927	list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
 928	recv->r_frag = NULL;
 929
 930	if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
 931		ic->i_recv_data_rem -= RDS_FRAG_SIZE;
 932	else {
 933		ic->i_recv_data_rem = 0;
 934		ic->i_ibinc = NULL;
 935
 936		if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
 937			rds_ib_cong_recv(conn, ibinc);
 938		else {
 939			rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
 940					  &ibinc->ii_inc, GFP_ATOMIC);
 941			state->ack_next = be64_to_cpu(hdr->h_sequence);
 942			state->ack_next_valid = 1;
 943		}
 944
 945		/* Evaluate the ACK_REQUIRED flag *after* we received
 946		 * the complete frame, and after bumping the next_rx
 947		 * sequence. */
 948		if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
 949			rds_stats_inc(s_recv_ack_required);
 950			state->ack_required = 1;
 951		}
 952
 953		rds_inc_put(&ibinc->ii_inc);
 954	}
 
 
 
 955}
 956
 957void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
 958			     struct ib_wc *wc,
 959			     struct rds_ib_ack_state *state)
 960{
 961	struct rds_connection *conn = ic->conn;
 962	struct rds_ib_recv_work *recv;
 963
 964	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
 965		 (unsigned long long)wc->wr_id, wc->status,
 966		 ib_wc_status_msg(wc->status), wc->byte_len,
 967		 be32_to_cpu(wc->ex.imm_data));
 968
 969	rds_ib_stats_inc(s_ib_rx_cq_event);
 970	recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
 971	ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
 972			DMA_FROM_DEVICE);
 973
 974	/* Also process recvs in connecting state because it is possible
 975	 * to get a recv completion _before_ the rdmacm ESTABLISHED
 976	 * event is processed.
 977	 */
 978	if (wc->status == IB_WC_SUCCESS) {
 979		rds_ib_process_recv(conn, recv, wc->byte_len, state);
 980	} else {
 981		/* We expect errors as the qp is drained during shutdown */
 982		if (rds_conn_up(conn) || rds_conn_connecting(conn))
 983			rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
 984					  &conn->c_faddr,
 985					  wc->status,
 986					  ib_wc_status_msg(wc->status));
 
 987	}
 988
 989	/* rds_ib_process_recv() doesn't always consume the frag, and
 990	 * we might not have called it at all if the wc didn't indicate
 991	 * success. We already unmapped the frag's pages, though, and
 992	 * the following rds_ib_ring_free() call tells the refill path
 993	 * that it will not find an allocated frag here. Make sure we
 994	 * keep that promise by freeing a frag that's still on the ring.
 995	 */
 996	if (recv->r_frag) {
 997		rds_ib_frag_free(ic, recv->r_frag);
 998		recv->r_frag = NULL;
 999	}
1000	rds_ib_ring_free(&ic->i_recv_ring, 1);
1001
1002	/* If we ever end up with a really empty receive ring, we're
1003	 * in deep trouble, as the sender will definitely see RNR
1004	 * timeouts. */
1005	if (rds_ib_ring_empty(&ic->i_recv_ring))
1006		rds_ib_stats_inc(s_ib_rx_ring_empty);
1007
1008	if (rds_ib_ring_low(&ic->i_recv_ring))
1009		rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
 
 
1010}
1011
1012int rds_ib_recv_path(struct rds_conn_path *cp)
1013{
1014	struct rds_connection *conn = cp->cp_conn;
1015	struct rds_ib_connection *ic = conn->c_transport_data;
1016	int ret = 0;
1017
1018	rdsdebug("conn %p\n", conn);
1019	if (rds_conn_up(conn)) {
1020		rds_ib_attempt_ack(ic);
1021		rds_ib_recv_refill(conn, 0, GFP_KERNEL);
 
1022	}
1023
1024	return ret;
1025}
1026
1027int rds_ib_recv_init(void)
1028{
1029	struct sysinfo si;
1030	int ret = -ENOMEM;
1031
1032	/* Default to 30% of all available RAM for recv memory */
1033	si_meminfo(&si);
1034	rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1035
1036	rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1037					sizeof(struct rds_ib_incoming),
1038					0, SLAB_HWCACHE_ALIGN, NULL);
 
 
 
 
 
1039	if (!rds_ib_incoming_slab)
1040		goto out;
1041
1042	rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1043					sizeof(struct rds_page_frag),
1044					0, SLAB_HWCACHE_ALIGN, NULL);
1045	if (!rds_ib_frag_slab) {
1046		kmem_cache_destroy(rds_ib_incoming_slab);
1047		rds_ib_incoming_slab = NULL;
1048	} else
1049		ret = 0;
1050out:
1051	return ret;
1052}
1053
1054void rds_ib_recv_exit(void)
1055{
 
 
1056	kmem_cache_destroy(rds_ib_incoming_slab);
1057	kmem_cache_destroy(rds_ib_frag_slab);
1058}
v6.2
   1/*
   2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/slab.h>
  35#include <linux/pci.h>
  36#include <linux/dma-mapping.h>
  37#include <rdma/rdma_cm.h>
  38
  39#include "rds_single_path.h"
  40#include "rds.h"
  41#include "ib.h"
  42
  43static struct kmem_cache *rds_ib_incoming_slab;
  44static struct kmem_cache *rds_ib_frag_slab;
  45static atomic_t	rds_ib_allocation = ATOMIC_INIT(0);
  46
  47void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
  48{
  49	struct rds_ib_recv_work *recv;
  50	u32 i;
  51
  52	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
  53		struct ib_sge *sge;
  54
  55		recv->r_ibinc = NULL;
  56		recv->r_frag = NULL;
  57
  58		recv->r_wr.next = NULL;
  59		recv->r_wr.wr_id = i;
  60		recv->r_wr.sg_list = recv->r_sge;
  61		recv->r_wr.num_sge = RDS_IB_RECV_SGE;
  62
  63		sge = &recv->r_sge[0];
  64		sge->addr = ic->i_recv_hdrs_dma[i];
  65		sge->length = sizeof(struct rds_header);
  66		sge->lkey = ic->i_pd->local_dma_lkey;
  67
  68		sge = &recv->r_sge[1];
  69		sge->addr = 0;
  70		sge->length = RDS_FRAG_SIZE;
  71		sge->lkey = ic->i_pd->local_dma_lkey;
  72	}
  73}
  74
  75/*
  76 * The entire 'from' list, including the from element itself, is put on
  77 * to the tail of the 'to' list.
  78 */
  79static void list_splice_entire_tail(struct list_head *from,
  80				    struct list_head *to)
  81{
  82	struct list_head *from_last = from->prev;
  83
  84	list_splice_tail(from_last, to);
  85	list_add_tail(from_last, to);
  86}
  87
  88static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
  89{
  90	struct list_head *tmp;
  91
  92	tmp = xchg(&cache->xfer, NULL);
  93	if (tmp) {
  94		if (cache->ready)
  95			list_splice_entire_tail(tmp, cache->ready);
  96		else
  97			cache->ready = tmp;
  98	}
  99}
 100
 101static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
 102{
 103	struct rds_ib_cache_head *head;
 104	int cpu;
 105
 106	cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
 107	if (!cache->percpu)
 108	       return -ENOMEM;
 109
 110	for_each_possible_cpu(cpu) {
 111		head = per_cpu_ptr(cache->percpu, cpu);
 112		head->first = NULL;
 113		head->count = 0;
 114	}
 115	cache->xfer = NULL;
 116	cache->ready = NULL;
 117
 118	return 0;
 119}
 120
 121int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
 122{
 123	int ret;
 124
 125	ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
 126	if (!ret) {
 127		ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
 128		if (ret)
 129			free_percpu(ic->i_cache_incs.percpu);
 130	}
 131
 132	return ret;
 133}
 134
 135static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
 136					  struct list_head *caller_list)
 137{
 138	struct rds_ib_cache_head *head;
 139	int cpu;
 140
 141	for_each_possible_cpu(cpu) {
 142		head = per_cpu_ptr(cache->percpu, cpu);
 143		if (head->first) {
 144			list_splice_entire_tail(head->first, caller_list);
 145			head->first = NULL;
 146		}
 147	}
 148
 149	if (cache->ready) {
 150		list_splice_entire_tail(cache->ready, caller_list);
 151		cache->ready = NULL;
 152	}
 153}
 154
 155void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
 156{
 157	struct rds_ib_incoming *inc;
 158	struct rds_ib_incoming *inc_tmp;
 159	struct rds_page_frag *frag;
 160	struct rds_page_frag *frag_tmp;
 161	LIST_HEAD(list);
 162
 163	rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 164	rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
 165	free_percpu(ic->i_cache_incs.percpu);
 166
 167	list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
 168		list_del(&inc->ii_cache_entry);
 169		WARN_ON(!list_empty(&inc->ii_frags));
 170		kmem_cache_free(rds_ib_incoming_slab, inc);
 171		atomic_dec(&rds_ib_allocation);
 172	}
 173
 174	rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 175	rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
 176	free_percpu(ic->i_cache_frags.percpu);
 177
 178	list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
 179		list_del(&frag->f_cache_entry);
 180		WARN_ON(!list_empty(&frag->f_item));
 181		kmem_cache_free(rds_ib_frag_slab, frag);
 182	}
 183}
 184
 185/* fwd decl */
 186static void rds_ib_recv_cache_put(struct list_head *new_item,
 187				  struct rds_ib_refill_cache *cache);
 188static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
 189
 190
 191/* Recycle frag and attached recv buffer f_sg */
 192static void rds_ib_frag_free(struct rds_ib_connection *ic,
 193			     struct rds_page_frag *frag)
 194{
 195	rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
 196
 197	rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
 198	atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
 199	rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
 200}
 201
 202/* Recycle inc after freeing attached frags */
 203void rds_ib_inc_free(struct rds_incoming *inc)
 204{
 205	struct rds_ib_incoming *ibinc;
 206	struct rds_page_frag *frag;
 207	struct rds_page_frag *pos;
 208	struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
 209
 210	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 211
 212	/* Free attached frags */
 213	list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
 214		list_del_init(&frag->f_item);
 215		rds_ib_frag_free(ic, frag);
 216	}
 217	BUG_ON(!list_empty(&ibinc->ii_frags));
 218
 219	rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
 220	rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
 221}
 222
 223static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
 224				  struct rds_ib_recv_work *recv)
 225{
 226	if (recv->r_ibinc) {
 227		rds_inc_put(&recv->r_ibinc->ii_inc);
 228		recv->r_ibinc = NULL;
 229	}
 230	if (recv->r_frag) {
 231		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
 232		rds_ib_frag_free(ic, recv->r_frag);
 233		recv->r_frag = NULL;
 234	}
 235}
 236
 237void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
 238{
 239	u32 i;
 240
 241	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
 242		rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
 243}
 244
 245static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
 246						     gfp_t slab_mask)
 247{
 248	struct rds_ib_incoming *ibinc;
 249	struct list_head *cache_item;
 250	int avail_allocs;
 251
 252	cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
 253	if (cache_item) {
 254		ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
 255	} else {
 256		avail_allocs = atomic_add_unless(&rds_ib_allocation,
 257						 1, rds_ib_sysctl_max_recv_allocation);
 258		if (!avail_allocs) {
 259			rds_ib_stats_inc(s_ib_rx_alloc_limit);
 260			return NULL;
 261		}
 262		ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
 263		if (!ibinc) {
 264			atomic_dec(&rds_ib_allocation);
 265			return NULL;
 266		}
 267		rds_ib_stats_inc(s_ib_rx_total_incs);
 268	}
 269	INIT_LIST_HEAD(&ibinc->ii_frags);
 270	rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
 271
 272	return ibinc;
 273}
 274
 275static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
 276						    gfp_t slab_mask, gfp_t page_mask)
 277{
 278	struct rds_page_frag *frag;
 279	struct list_head *cache_item;
 280	int ret;
 281
 282	cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
 283	if (cache_item) {
 284		frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
 285		atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
 286		rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
 287	} else {
 288		frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
 289		if (!frag)
 290			return NULL;
 291
 292		sg_init_table(&frag->f_sg, 1);
 293		ret = rds_page_remainder_alloc(&frag->f_sg,
 294					       RDS_FRAG_SIZE, page_mask);
 295		if (ret) {
 296			kmem_cache_free(rds_ib_frag_slab, frag);
 297			return NULL;
 298		}
 299		rds_ib_stats_inc(s_ib_rx_total_frags);
 300	}
 301
 302	INIT_LIST_HEAD(&frag->f_item);
 303
 304	return frag;
 305}
 306
 307static int rds_ib_recv_refill_one(struct rds_connection *conn,
 308				  struct rds_ib_recv_work *recv, gfp_t gfp)
 309{
 310	struct rds_ib_connection *ic = conn->c_transport_data;
 311	struct ib_sge *sge;
 312	int ret = -ENOMEM;
 313	gfp_t slab_mask = gfp;
 314	gfp_t page_mask = gfp;
 315
 316	if (gfp & __GFP_DIRECT_RECLAIM) {
 317		slab_mask = GFP_KERNEL;
 318		page_mask = GFP_HIGHUSER;
 319	}
 320
 321	if (!ic->i_cache_incs.ready)
 322		rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
 323	if (!ic->i_cache_frags.ready)
 324		rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
 325
 326	/*
 327	 * ibinc was taken from recv if recv contained the start of a message.
 328	 * recvs that were continuations will still have this allocated.
 329	 */
 330	if (!recv->r_ibinc) {
 331		recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
 332		if (!recv->r_ibinc)
 333			goto out;
 334	}
 335
 336	WARN_ON(recv->r_frag); /* leak! */
 337	recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
 338	if (!recv->r_frag)
 339		goto out;
 340
 341	ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
 342			    1, DMA_FROM_DEVICE);
 343	WARN_ON(ret != 1);
 344
 345	sge = &recv->r_sge[0];
 346	sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
 347	sge->length = sizeof(struct rds_header);
 348
 349	sge = &recv->r_sge[1];
 350	sge->addr = sg_dma_address(&recv->r_frag->f_sg);
 351	sge->length = sg_dma_len(&recv->r_frag->f_sg);
 352
 353	ret = 0;
 354out:
 355	return ret;
 356}
 357
 358static int acquire_refill(struct rds_connection *conn)
 359{
 360	return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
 361}
 362
 363static void release_refill(struct rds_connection *conn)
 364{
 365	clear_bit(RDS_RECV_REFILL, &conn->c_flags);
 366	smp_mb__after_atomic();
 367
 368	/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 369	 * hot path and finding waiters is very rare.  We don't want to walk
 370	 * the system-wide hashed waitqueue buckets in the fast path only to
 371	 * almost never find waiters.
 372	 */
 373	if (waitqueue_active(&conn->c_waitq))
 374		wake_up_all(&conn->c_waitq);
 375}
 376
 377/*
 378 * This tries to allocate and post unused work requests after making sure that
 379 * they have all the allocations they need to queue received fragments into
 380 * sockets.
 
 
 381 */
 382void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
 383{
 384	struct rds_ib_connection *ic = conn->c_transport_data;
 385	struct rds_ib_recv_work *recv;
 
 386	unsigned int posted = 0;
 387	int ret = 0;
 388	bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
 389	bool must_wake = false;
 390	u32 pos;
 391
 392	/* the goal here is to just make sure that someone, somewhere
 393	 * is posting buffers.  If we can't get the refill lock,
 394	 * let them do their thing
 395	 */
 396	if (!acquire_refill(conn))
 397		return;
 398
 399	while ((prefill || rds_conn_up(conn)) &&
 400	       rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
 401		if (pos >= ic->i_recv_ring.w_nr) {
 402			printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
 403					pos);
 404			break;
 405		}
 406
 407		recv = &ic->i_recvs[pos];
 408		ret = rds_ib_recv_refill_one(conn, recv, gfp);
 409		if (ret) {
 410			must_wake = true;
 411			break;
 412		}
 413
 414		rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
 
 
 415			 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
 416			 (long)sg_dma_address(&recv->r_frag->f_sg));
 417
 418		/* XXX when can this fail? */
 419		ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
 420		if (ret) {
 421			rds_ib_conn_error(conn, "recv post on "
 422			       "%pI6c returned %d, disconnecting and "
 423			       "reconnecting\n", &conn->c_faddr,
 424			       ret);
 425			break;
 426		}
 427
 428		posted++;
 429
 430		if ((posted > 128 && need_resched()) || posted > 8192) {
 431			must_wake = true;
 432			break;
 433		}
 434	}
 435
 436	/* We're doing flow control - update the window. */
 437	if (ic->i_flowctl && posted)
 438		rds_ib_advertise_credits(conn, posted);
 439
 440	if (ret)
 441		rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
 442
 443	release_refill(conn);
 444
 445	/* if we're called from the softirq handler, we'll be GFP_NOWAIT.
 446	 * in this case the ring being low is going to lead to more interrupts
 447	 * and we can safely let the softirq code take care of it unless the
 448	 * ring is completely empty.
 449	 *
 450	 * if we're called from krdsd, we'll be GFP_KERNEL.  In this case
 451	 * we might have raced with the softirq code while we had the refill
 452	 * lock held.  Use rds_ib_ring_low() instead of ring_empty to decide
 453	 * if we should requeue.
 454	 */
 455	if (rds_conn_up(conn) &&
 456	    (must_wake ||
 457	    (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
 458	    rds_ib_ring_empty(&ic->i_recv_ring))) {
 459		queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
 460	}
 461	if (can_wait)
 462		cond_resched();
 463}
 464
 465/*
 466 * We want to recycle several types of recv allocations, like incs and frags.
 467 * To use this, the *_free() function passes in the ptr to a list_head within
 468 * the recyclee, as well as the cache to put it on.
 469 *
 470 * First, we put the memory on a percpu list. When this reaches a certain size,
 471 * We move it to an intermediate non-percpu list in a lockless manner, with some
 472 * xchg/compxchg wizardry.
 473 *
 474 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
 475 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
 476 * list_empty() will return true with one element is actually present.
 477 */
 478static void rds_ib_recv_cache_put(struct list_head *new_item,
 479				 struct rds_ib_refill_cache *cache)
 480{
 481	unsigned long flags;
 482	struct list_head *old, *chpfirst;
 483
 484	local_irq_save(flags);
 485
 486	chpfirst = __this_cpu_read(cache->percpu->first);
 487	if (!chpfirst)
 488		INIT_LIST_HEAD(new_item);
 489	else /* put on front */
 490		list_add_tail(new_item, chpfirst);
 491
 492	__this_cpu_write(cache->percpu->first, new_item);
 493	__this_cpu_inc(cache->percpu->count);
 494
 495	if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
 496		goto end;
 497
 498	/*
 499	 * Return our per-cpu first list to the cache's xfer by atomically
 500	 * grabbing the current xfer list, appending it to our per-cpu list,
 501	 * and then atomically returning that entire list back to the
 502	 * cache's xfer list as long as it's still empty.
 503	 */
 504	do {
 505		old = xchg(&cache->xfer, NULL);
 506		if (old)
 507			list_splice_entire_tail(old, chpfirst);
 508		old = cmpxchg(&cache->xfer, NULL, chpfirst);
 509	} while (old);
 510
 511
 512	__this_cpu_write(cache->percpu->first, NULL);
 513	__this_cpu_write(cache->percpu->count, 0);
 514end:
 515	local_irq_restore(flags);
 516}
 517
 518static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
 519{
 520	struct list_head *head = cache->ready;
 521
 522	if (head) {
 523		if (!list_empty(head)) {
 524			cache->ready = head->next;
 525			list_del_init(head);
 526		} else
 527			cache->ready = NULL;
 528	}
 529
 530	return head;
 531}
 532
 533int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 534{
 535	struct rds_ib_incoming *ibinc;
 536	struct rds_page_frag *frag;
 537	unsigned long to_copy;
 538	unsigned long frag_off = 0;
 539	int copied = 0;
 540	int ret;
 541	u32 len;
 542
 543	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 544	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 545	len = be32_to_cpu(inc->i_hdr.h_len);
 546
 547	while (iov_iter_count(to) && copied < len) {
 548		if (frag_off == RDS_FRAG_SIZE) {
 549			frag = list_entry(frag->f_item.next,
 550					  struct rds_page_frag, f_item);
 551			frag_off = 0;
 552		}
 553		to_copy = min_t(unsigned long, iov_iter_count(to),
 554				RDS_FRAG_SIZE - frag_off);
 555		to_copy = min_t(unsigned long, to_copy, len - copied);
 556
 557		/* XXX needs + offset for multiple recvs per page */
 558		rds_stats_add(s_copy_to_user, to_copy);
 559		ret = copy_page_to_iter(sg_page(&frag->f_sg),
 560					frag->f_sg.offset + frag_off,
 561					to_copy,
 562					to);
 563		if (ret != to_copy)
 564			return -EFAULT;
 565
 566		frag_off += to_copy;
 567		copied += to_copy;
 568	}
 569
 570	return copied;
 571}
 572
 573/* ic starts out kzalloc()ed */
 574void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
 575{
 576	struct ib_send_wr *wr = &ic->i_ack_wr;
 577	struct ib_sge *sge = &ic->i_ack_sge;
 578
 579	sge->addr = ic->i_ack_dma;
 580	sge->length = sizeof(struct rds_header);
 581	sge->lkey = ic->i_pd->local_dma_lkey;
 582
 583	wr->sg_list = sge;
 584	wr->num_sge = 1;
 585	wr->opcode = IB_WR_SEND;
 586	wr->wr_id = RDS_IB_ACK_WR_ID;
 587	wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
 588}
 589
 590/*
 591 * You'd think that with reliable IB connections you wouldn't need to ack
 592 * messages that have been received.  The problem is that IB hardware generates
 593 * an ack message before it has DMAed the message into memory.  This creates a
 594 * potential message loss if the HCA is disabled for any reason between when it
 595 * sends the ack and before the message is DMAed and processed.  This is only a
 596 * potential issue if another HCA is available for fail-over.
 597 *
 598 * When the remote host receives our ack they'll free the sent message from
 599 * their send queue.  To decrease the latency of this we always send an ack
 600 * immediately after we've received messages.
 601 *
 602 * For simplicity, we only have one ack in flight at a time.  This puts
 603 * pressure on senders to have deep enough send queues to absorb the latency of
 604 * a single ack frame being in flight.  This might not be good enough.
 605 *
 606 * This is implemented by have a long-lived send_wr and sge which point to a
 607 * statically allocated ack frame.  This ack wr does not fall under the ring
 608 * accounting that the tx and rx wrs do.  The QP attribute specifically makes
 609 * room for it beyond the ring size.  Send completion notices its special
 610 * wr_id and avoids working with the ring in that case.
 611 */
 612#ifndef KERNEL_HAS_ATOMIC64
 613void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 614{
 615	unsigned long flags;
 616
 617	spin_lock_irqsave(&ic->i_ack_lock, flags);
 618	ic->i_ack_next = seq;
 619	if (ack_required)
 620		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 621	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 622}
 623
 624static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 625{
 626	unsigned long flags;
 627	u64 seq;
 628
 629	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 630
 631	spin_lock_irqsave(&ic->i_ack_lock, flags);
 632	seq = ic->i_ack_next;
 633	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
 634
 635	return seq;
 636}
 637#else
 638void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 639{
 640	atomic64_set(&ic->i_ack_next, seq);
 641	if (ack_required) {
 642		smp_mb__before_atomic();
 643		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 644	}
 645}
 646
 647static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 648{
 649	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 650	smp_mb__after_atomic();
 651
 652	return atomic64_read(&ic->i_ack_next);
 653}
 654#endif
 655
 656
 657static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
 658{
 659	struct rds_header *hdr = ic->i_ack;
 
 660	u64 seq;
 661	int ret;
 662
 663	seq = rds_ib_get_ack(ic);
 664
 665	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
 666
 667	ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma,
 668				   sizeof(*hdr), DMA_TO_DEVICE);
 669	rds_message_populate_header(hdr, 0, 0, 0);
 670	hdr->h_ack = cpu_to_be64(seq);
 671	hdr->h_credit = adv_credits;
 672	rds_message_make_checksum(hdr);
 673	ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma,
 674				      sizeof(*hdr), DMA_TO_DEVICE);
 675
 676	ic->i_ack_queued = jiffies;
 677
 678	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
 679	if (unlikely(ret)) {
 680		/* Failed to send. Release the WR, and
 681		 * force another ACK.
 682		 */
 683		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 684		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 685
 686		rds_ib_stats_inc(s_ib_ack_send_failure);
 687
 688		rds_ib_conn_error(ic->conn, "sending ack failed\n");
 689	} else
 690		rds_ib_stats_inc(s_ib_ack_sent);
 691}
 692
 693/*
 694 * There are 3 ways of getting acknowledgements to the peer:
 695 *  1.	We call rds_ib_attempt_ack from the recv completion handler
 696 *	to send an ACK-only frame.
 697 *	However, there can be only one such frame in the send queue
 698 *	at any time, so we may have to postpone it.
 699 *  2.	When another (data) packet is transmitted while there's
 700 *	an ACK in the queue, we piggyback the ACK sequence number
 701 *	on the data packet.
 702 *  3.	If the ACK WR is done sending, we get called from the
 703 *	send queue completion handler, and check whether there's
 704 *	another ACK pending (postponed because the WR was on the
 705 *	queue). If so, we transmit it.
 706 *
 707 * We maintain 2 variables:
 708 *  -	i_ack_flags, which keeps track of whether the ACK WR
 709 *	is currently in the send queue or not (IB_ACK_IN_FLIGHT)
 710 *  -	i_ack_next, which is the last sequence number we received
 711 *
 712 * Potentially, send queue and receive queue handlers can run concurrently.
 713 * It would be nice to not have to use a spinlock to synchronize things,
 714 * but the one problem that rules this out is that 64bit updates are
 715 * not atomic on all platforms. Things would be a lot simpler if
 716 * we had atomic64 or maybe cmpxchg64 everywhere.
 717 *
 718 * Reconnecting complicates this picture just slightly. When we
 719 * reconnect, we may be seeing duplicate packets. The peer
 720 * is retransmitting them, because it hasn't seen an ACK for
 721 * them. It is important that we ACK these.
 722 *
 723 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
 724 * this flag set *MUST* be acknowledged immediately.
 725 */
 726
 727/*
 728 * When we get here, we're called from the recv queue handler.
 729 * Check whether we ought to transmit an ACK.
 730 */
 731void rds_ib_attempt_ack(struct rds_ib_connection *ic)
 732{
 733	unsigned int adv_credits;
 734
 735	if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 736		return;
 737
 738	if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
 739		rds_ib_stats_inc(s_ib_ack_send_delayed);
 740		return;
 741	}
 742
 743	/* Can we get a send credit? */
 744	if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
 745		rds_ib_stats_inc(s_ib_tx_throttle);
 746		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 747		return;
 748	}
 749
 750	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 751	rds_ib_send_ack(ic, adv_credits);
 752}
 753
 754/*
 755 * We get here from the send completion handler, when the
 756 * adapter tells us the ACK frame was sent.
 757 */
 758void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
 759{
 760	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 761	rds_ib_attempt_ack(ic);
 762}
 763
 764/*
 765 * This is called by the regular xmit code when it wants to piggyback
 766 * an ACK on an outgoing frame.
 767 */
 768u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
 769{
 770	if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
 771		rds_ib_stats_inc(s_ib_ack_send_piggybacked);
 772	return rds_ib_get_ack(ic);
 773}
 774
 775/*
 776 * It's kind of lame that we're copying from the posted receive pages into
 777 * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
 778 * them.  But receiving new congestion bitmaps should be a *rare* event, so
 779 * hopefully we won't need to invest that complexity in making it more
 780 * efficient.  By copying we can share a simpler core with TCP which has to
 781 * copy.
 782 */
 783static void rds_ib_cong_recv(struct rds_connection *conn,
 784			      struct rds_ib_incoming *ibinc)
 785{
 786	struct rds_cong_map *map;
 787	unsigned int map_off;
 788	unsigned int map_page;
 789	struct rds_page_frag *frag;
 790	unsigned long frag_off;
 791	unsigned long to_copy;
 792	unsigned long copied;
 793	__le64 uncongested = 0;
 794	void *addr;
 795
 796	/* catch completely corrupt packets */
 797	if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
 798		return;
 799
 800	map = conn->c_fcong;
 801	map_page = 0;
 802	map_off = 0;
 803
 804	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 805	frag_off = 0;
 806
 807	copied = 0;
 808
 809	while (copied < RDS_CONG_MAP_BYTES) {
 810		__le64 *src, *dst;
 811		unsigned int k;
 812
 813		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
 814		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 815
 816		addr = kmap_atomic(sg_page(&frag->f_sg));
 817
 818		src = addr + frag->f_sg.offset + frag_off;
 819		dst = (void *)map->m_page_addrs[map_page] + map_off;
 820		for (k = 0; k < to_copy; k += 8) {
 821			/* Record ports that became uncongested, ie
 822			 * bits that changed from 0 to 1. */
 823			uncongested |= ~(*src) & *dst;
 824			*dst++ = *src++;
 825		}
 826		kunmap_atomic(addr);
 827
 828		copied += to_copy;
 829
 830		map_off += to_copy;
 831		if (map_off == PAGE_SIZE) {
 832			map_off = 0;
 833			map_page++;
 834		}
 835
 836		frag_off += to_copy;
 837		if (frag_off == RDS_FRAG_SIZE) {
 838			frag = list_entry(frag->f_item.next,
 839					  struct rds_page_frag, f_item);
 840			frag_off = 0;
 841		}
 842	}
 843
 844	/* the congestion map is in little endian order */
 845	rds_cong_map_updated(map, le64_to_cpu(uncongested));
 
 
 846}
 847
 848static void rds_ib_process_recv(struct rds_connection *conn,
 849				struct rds_ib_recv_work *recv, u32 data_len,
 850				struct rds_ib_ack_state *state)
 851{
 852	struct rds_ib_connection *ic = conn->c_transport_data;
 853	struct rds_ib_incoming *ibinc = ic->i_ibinc;
 854	struct rds_header *ihdr, *hdr;
 855	dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
 856
 857	/* XXX shut down the connection if port 0,0 are seen? */
 858
 859	rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
 860		 data_len);
 861
 862	if (data_len < sizeof(struct rds_header)) {
 863		rds_ib_conn_error(conn, "incoming message "
 864		       "from %pI6c didn't include a "
 865		       "header, disconnecting and "
 866		       "reconnecting\n",
 867		       &conn->c_faddr);
 868		return;
 869	}
 870	data_len -= sizeof(struct rds_header);
 871
 872	ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
 873
 874	ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr,
 875				   sizeof(*ihdr), DMA_FROM_DEVICE);
 876	/* Validate the checksum. */
 877	if (!rds_message_verify_checksum(ihdr)) {
 878		rds_ib_conn_error(conn, "incoming message "
 879		       "from %pI6c has corrupted header - "
 880		       "forcing a reconnect\n",
 881		       &conn->c_faddr);
 882		rds_stats_inc(s_recv_drop_bad_checksum);
 883		goto done;
 884	}
 885
 886	/* Process the ACK sequence which comes with every packet */
 887	state->ack_recv = be64_to_cpu(ihdr->h_ack);
 888	state->ack_recv_valid = 1;
 889
 890	/* Process the credits update if there was one */
 891	if (ihdr->h_credit)
 892		rds_ib_send_add_credits(conn, ihdr->h_credit);
 893
 894	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
 895		/* This is an ACK-only packet. The fact that it gets
 896		 * special treatment here is that historically, ACKs
 897		 * were rather special beasts.
 898		 */
 899		rds_ib_stats_inc(s_ib_ack_received);
 900
 901		/*
 902		 * Usually the frags make their way on to incs and are then freed as
 903		 * the inc is freed.  We don't go that route, so we have to drop the
 904		 * page ref ourselves.  We can't just leave the page on the recv
 905		 * because that confuses the dma mapping of pages and each recv's use
 906		 * of a partial page.
 907		 *
 908		 * FIXME: Fold this into the code path below.
 909		 */
 910		rds_ib_frag_free(ic, recv->r_frag);
 911		recv->r_frag = NULL;
 912		goto done;
 913	}
 914
 915	/*
 916	 * If we don't already have an inc on the connection then this
 917	 * fragment has a header and starts a message.. copy its header
 918	 * into the inc and save the inc so we can hang upcoming fragments
 919	 * off its list.
 920	 */
 921	if (!ibinc) {
 922		ibinc = recv->r_ibinc;
 923		recv->r_ibinc = NULL;
 924		ic->i_ibinc = ibinc;
 925
 926		hdr = &ibinc->ii_inc.i_hdr;
 927		ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
 928				local_clock();
 929		memcpy(hdr, ihdr, sizeof(*hdr));
 930		ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
 931		ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
 932				local_clock();
 933
 934		rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
 935			 ic->i_recv_data_rem, hdr->h_flags);
 936	} else {
 937		hdr = &ibinc->ii_inc.i_hdr;
 938		/* We can't just use memcmp here; fragments of a
 939		 * single message may carry different ACKs */
 940		if (hdr->h_sequence != ihdr->h_sequence ||
 941		    hdr->h_len != ihdr->h_len ||
 942		    hdr->h_sport != ihdr->h_sport ||
 943		    hdr->h_dport != ihdr->h_dport) {
 944			rds_ib_conn_error(conn,
 945				"fragment header mismatch; forcing reconnect\n");
 946			goto done;
 947		}
 948	}
 949
 950	list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
 951	recv->r_frag = NULL;
 952
 953	if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
 954		ic->i_recv_data_rem -= RDS_FRAG_SIZE;
 955	else {
 956		ic->i_recv_data_rem = 0;
 957		ic->i_ibinc = NULL;
 958
 959		if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
 960			rds_ib_cong_recv(conn, ibinc);
 961		} else {
 962			rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
 963					  &ibinc->ii_inc, GFP_ATOMIC);
 964			state->ack_next = be64_to_cpu(hdr->h_sequence);
 965			state->ack_next_valid = 1;
 966		}
 967
 968		/* Evaluate the ACK_REQUIRED flag *after* we received
 969		 * the complete frame, and after bumping the next_rx
 970		 * sequence. */
 971		if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
 972			rds_stats_inc(s_recv_ack_required);
 973			state->ack_required = 1;
 974		}
 975
 976		rds_inc_put(&ibinc->ii_inc);
 977	}
 978done:
 979	ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr,
 980				      sizeof(*ihdr), DMA_FROM_DEVICE);
 981}
 982
 983void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
 984			     struct ib_wc *wc,
 985			     struct rds_ib_ack_state *state)
 986{
 987	struct rds_connection *conn = ic->conn;
 988	struct rds_ib_recv_work *recv;
 989
 990	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
 991		 (unsigned long long)wc->wr_id, wc->status,
 992		 ib_wc_status_msg(wc->status), wc->byte_len,
 993		 be32_to_cpu(wc->ex.imm_data));
 994
 995	rds_ib_stats_inc(s_ib_rx_cq_event);
 996	recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
 997	ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
 998			DMA_FROM_DEVICE);
 999
1000	/* Also process recvs in connecting state because it is possible
1001	 * to get a recv completion _before_ the rdmacm ESTABLISHED
1002	 * event is processed.
1003	 */
1004	if (wc->status == IB_WC_SUCCESS) {
1005		rds_ib_process_recv(conn, recv, wc->byte_len, state);
1006	} else {
1007		/* We expect errors as the qp is drained during shutdown */
1008		if (rds_conn_up(conn) || rds_conn_connecting(conn))
1009			rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
1010					  &conn->c_laddr, &conn->c_faddr,
1011					  conn->c_tos, wc->status,
1012					  ib_wc_status_msg(wc->status),
1013					  wc->vendor_err);
1014	}
1015
1016	/* rds_ib_process_recv() doesn't always consume the frag, and
1017	 * we might not have called it at all if the wc didn't indicate
1018	 * success. We already unmapped the frag's pages, though, and
1019	 * the following rds_ib_ring_free() call tells the refill path
1020	 * that it will not find an allocated frag here. Make sure we
1021	 * keep that promise by freeing a frag that's still on the ring.
1022	 */
1023	if (recv->r_frag) {
1024		rds_ib_frag_free(ic, recv->r_frag);
1025		recv->r_frag = NULL;
1026	}
1027	rds_ib_ring_free(&ic->i_recv_ring, 1);
1028
1029	/* If we ever end up with a really empty receive ring, we're
1030	 * in deep trouble, as the sender will definitely see RNR
1031	 * timeouts. */
1032	if (rds_ib_ring_empty(&ic->i_recv_ring))
1033		rds_ib_stats_inc(s_ib_rx_ring_empty);
1034
1035	if (rds_ib_ring_low(&ic->i_recv_ring)) {
1036		rds_ib_recv_refill(conn, 0, GFP_NOWAIT | __GFP_NOWARN);
1037		rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1038	}
1039}
1040
1041int rds_ib_recv_path(struct rds_conn_path *cp)
1042{
1043	struct rds_connection *conn = cp->cp_conn;
1044	struct rds_ib_connection *ic = conn->c_transport_data;
 
1045
1046	rdsdebug("conn %p\n", conn);
1047	if (rds_conn_up(conn)) {
1048		rds_ib_attempt_ack(ic);
1049		rds_ib_recv_refill(conn, 0, GFP_KERNEL);
1050		rds_ib_stats_inc(s_ib_rx_refill_from_thread);
1051	}
1052
1053	return 0;
1054}
1055
1056int rds_ib_recv_init(void)
1057{
1058	struct sysinfo si;
1059	int ret = -ENOMEM;
1060
1061	/* Default to 30% of all available RAM for recv memory */
1062	si_meminfo(&si);
1063	rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1064
1065	rds_ib_incoming_slab =
1066		kmem_cache_create_usercopy("rds_ib_incoming",
1067					   sizeof(struct rds_ib_incoming),
1068					   0, SLAB_HWCACHE_ALIGN,
1069					   offsetof(struct rds_ib_incoming,
1070						    ii_inc.i_usercopy),
1071					   sizeof(struct rds_inc_usercopy),
1072					   NULL);
1073	if (!rds_ib_incoming_slab)
1074		goto out;
1075
1076	rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1077					sizeof(struct rds_page_frag),
1078					0, SLAB_HWCACHE_ALIGN, NULL);
1079	if (!rds_ib_frag_slab) {
1080		kmem_cache_destroy(rds_ib_incoming_slab);
1081		rds_ib_incoming_slab = NULL;
1082	} else
1083		ret = 0;
1084out:
1085	return ret;
1086}
1087
1088void rds_ib_recv_exit(void)
1089{
1090	WARN_ON(atomic_read(&rds_ib_allocation));
1091
1092	kmem_cache_destroy(rds_ib_incoming_slab);
1093	kmem_cache_destroy(rds_ib_frag_slab);
1094}