Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#include <linux/errno.h>
  40#include <linux/err.h>
  41#include <linux/export.h>
  42#include <linux/string.h>
  43#include <linux/slab.h>
  44#include <linux/in.h>
  45#include <linux/in6.h>
  46#include <net/addrconf.h>
  47#include <linux/security.h>
  48
  49#include <rdma/ib_verbs.h>
  50#include <rdma/ib_cache.h>
  51#include <rdma/ib_addr.h>
  52#include <rdma/rw.h>
  53#include <rdma/lag.h>
  54
  55#include "core_priv.h"
  56#include <trace/events/rdma_core.h>
  57
  58static int ib_resolve_eth_dmac(struct ib_device *device,
  59			       struct rdma_ah_attr *ah_attr);
  60
  61static const char * const ib_events[] = {
  62	[IB_EVENT_CQ_ERR]		= "CQ error",
  63	[IB_EVENT_QP_FATAL]		= "QP fatal error",
  64	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
  65	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
  66	[IB_EVENT_COMM_EST]		= "communication established",
  67	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
  68	[IB_EVENT_PATH_MIG]		= "path migration successful",
  69	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
  70	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
  71	[IB_EVENT_PORT_ACTIVE]		= "port active",
  72	[IB_EVENT_PORT_ERR]		= "port error",
  73	[IB_EVENT_LID_CHANGE]		= "LID change",
  74	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
  75	[IB_EVENT_SM_CHANGE]		= "SM change",
  76	[IB_EVENT_SRQ_ERR]		= "SRQ error",
  77	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
  78	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
  79	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
  80	[IB_EVENT_GID_CHANGE]		= "GID changed",
  81};
  82
  83const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
  84{
  85	size_t index = event;
  86
  87	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
  88			ib_events[index] : "unrecognized event";
  89}
  90EXPORT_SYMBOL(ib_event_msg);
  91
  92static const char * const wc_statuses[] = {
  93	[IB_WC_SUCCESS]			= "success",
  94	[IB_WC_LOC_LEN_ERR]		= "local length error",
  95	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
  96	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
  97	[IB_WC_LOC_PROT_ERR]		= "local protection error",
  98	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
  99	[IB_WC_MW_BIND_ERR]		= "memory bind operation error",
 100	[IB_WC_BAD_RESP_ERR]		= "bad response error",
 101	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
 102	[IB_WC_REM_INV_REQ_ERR]		= "remote invalid request error",
 103	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
 104	[IB_WC_REM_OP_ERR]		= "remote operation error",
 105	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
 106	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
 107	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
 108	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
 109	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
 110	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
 111	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
 112	[IB_WC_FATAL_ERR]		= "fatal error",
 113	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
 114	[IB_WC_GENERAL_ERR]		= "general error",
 115};
 116
 117const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
 118{
 119	size_t index = status;
 120
 121	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
 122			wc_statuses[index] : "unrecognized status";
 123}
 124EXPORT_SYMBOL(ib_wc_status_msg);
 125
 126__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
 127{
 128	switch (rate) {
 129	case IB_RATE_2_5_GBPS: return   1;
 130	case IB_RATE_5_GBPS:   return   2;
 131	case IB_RATE_10_GBPS:  return   4;
 132	case IB_RATE_20_GBPS:  return   8;
 133	case IB_RATE_30_GBPS:  return  12;
 134	case IB_RATE_40_GBPS:  return  16;
 135	case IB_RATE_60_GBPS:  return  24;
 136	case IB_RATE_80_GBPS:  return  32;
 137	case IB_RATE_120_GBPS: return  48;
 138	case IB_RATE_14_GBPS:  return   6;
 139	case IB_RATE_56_GBPS:  return  22;
 140	case IB_RATE_112_GBPS: return  45;
 141	case IB_RATE_168_GBPS: return  67;
 142	case IB_RATE_25_GBPS:  return  10;
 143	case IB_RATE_100_GBPS: return  40;
 144	case IB_RATE_200_GBPS: return  80;
 145	case IB_RATE_300_GBPS: return 120;
 146	case IB_RATE_28_GBPS:  return  11;
 147	case IB_RATE_50_GBPS:  return  20;
 148	case IB_RATE_400_GBPS: return 160;
 149	case IB_RATE_600_GBPS: return 240;
 150	case IB_RATE_800_GBPS: return 320;
 151	default:	       return  -1;
 152	}
 153}
 154EXPORT_SYMBOL(ib_rate_to_mult);
 155
 156__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
 157{
 158	switch (mult) {
 159	case 1:   return IB_RATE_2_5_GBPS;
 160	case 2:   return IB_RATE_5_GBPS;
 161	case 4:   return IB_RATE_10_GBPS;
 162	case 8:   return IB_RATE_20_GBPS;
 163	case 12:  return IB_RATE_30_GBPS;
 164	case 16:  return IB_RATE_40_GBPS;
 165	case 24:  return IB_RATE_60_GBPS;
 166	case 32:  return IB_RATE_80_GBPS;
 167	case 48:  return IB_RATE_120_GBPS;
 168	case 6:   return IB_RATE_14_GBPS;
 169	case 22:  return IB_RATE_56_GBPS;
 170	case 45:  return IB_RATE_112_GBPS;
 171	case 67:  return IB_RATE_168_GBPS;
 172	case 10:  return IB_RATE_25_GBPS;
 173	case 40:  return IB_RATE_100_GBPS;
 174	case 80:  return IB_RATE_200_GBPS;
 175	case 120: return IB_RATE_300_GBPS;
 176	case 11:  return IB_RATE_28_GBPS;
 177	case 20:  return IB_RATE_50_GBPS;
 178	case 160: return IB_RATE_400_GBPS;
 179	case 240: return IB_RATE_600_GBPS;
 180	case 320: return IB_RATE_800_GBPS;
 181	default:  return IB_RATE_PORT_CURRENT;
 182	}
 183}
 184EXPORT_SYMBOL(mult_to_ib_rate);
 185
 186__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
 187{
 188	switch (rate) {
 189	case IB_RATE_2_5_GBPS: return 2500;
 190	case IB_RATE_5_GBPS:   return 5000;
 191	case IB_RATE_10_GBPS:  return 10000;
 192	case IB_RATE_20_GBPS:  return 20000;
 193	case IB_RATE_30_GBPS:  return 30000;
 194	case IB_RATE_40_GBPS:  return 40000;
 195	case IB_RATE_60_GBPS:  return 60000;
 196	case IB_RATE_80_GBPS:  return 80000;
 197	case IB_RATE_120_GBPS: return 120000;
 198	case IB_RATE_14_GBPS:  return 14062;
 199	case IB_RATE_56_GBPS:  return 56250;
 200	case IB_RATE_112_GBPS: return 112500;
 201	case IB_RATE_168_GBPS: return 168750;
 202	case IB_RATE_25_GBPS:  return 25781;
 203	case IB_RATE_100_GBPS: return 103125;
 204	case IB_RATE_200_GBPS: return 206250;
 205	case IB_RATE_300_GBPS: return 309375;
 206	case IB_RATE_28_GBPS:  return 28125;
 207	case IB_RATE_50_GBPS:  return 53125;
 208	case IB_RATE_400_GBPS: return 425000;
 209	case IB_RATE_600_GBPS: return 637500;
 210	case IB_RATE_800_GBPS: return 850000;
 211	default:	       return -1;
 212	}
 213}
 214EXPORT_SYMBOL(ib_rate_to_mbps);
 215
 216__attribute_const__ enum rdma_transport_type
 217rdma_node_get_transport(unsigned int node_type)
 218{
 219
 220	if (node_type == RDMA_NODE_USNIC)
 221		return RDMA_TRANSPORT_USNIC;
 222	if (node_type == RDMA_NODE_USNIC_UDP)
 223		return RDMA_TRANSPORT_USNIC_UDP;
 224	if (node_type == RDMA_NODE_RNIC)
 225		return RDMA_TRANSPORT_IWARP;
 226	if (node_type == RDMA_NODE_UNSPECIFIED)
 227		return RDMA_TRANSPORT_UNSPECIFIED;
 228
 229	return RDMA_TRANSPORT_IB;
 230}
 231EXPORT_SYMBOL(rdma_node_get_transport);
 232
 233enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
 234					      u32 port_num)
 235{
 236	enum rdma_transport_type lt;
 237	if (device->ops.get_link_layer)
 238		return device->ops.get_link_layer(device, port_num);
 239
 240	lt = rdma_node_get_transport(device->node_type);
 241	if (lt == RDMA_TRANSPORT_IB)
 242		return IB_LINK_LAYER_INFINIBAND;
 243
 244	return IB_LINK_LAYER_ETHERNET;
 245}
 246EXPORT_SYMBOL(rdma_port_get_link_layer);
 247
 248/* Protection domains */
 249
 250/**
 251 * __ib_alloc_pd - Allocates an unused protection domain.
 252 * @device: The device on which to allocate the protection domain.
 253 * @flags: protection domain flags
 254 * @caller: caller's build-time module name
 255 *
 256 * A protection domain object provides an association between QPs, shared
 257 * receive queues, address handles, memory regions, and memory windows.
 258 *
 259 * Every PD has a local_dma_lkey which can be used as the lkey value for local
 260 * memory operations.
 261 */
 262struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
 263		const char *caller)
 264{
 265	struct ib_pd *pd;
 266	int mr_access_flags = 0;
 267	int ret;
 268
 269	pd = rdma_zalloc_drv_obj(device, ib_pd);
 270	if (!pd)
 271		return ERR_PTR(-ENOMEM);
 272
 273	pd->device = device;
 274	pd->flags = flags;
 275
 276	rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
 277	rdma_restrack_set_name(&pd->res, caller);
 278
 279	ret = device->ops.alloc_pd(pd, NULL);
 280	if (ret) {
 281		rdma_restrack_put(&pd->res);
 282		kfree(pd);
 283		return ERR_PTR(ret);
 284	}
 285	rdma_restrack_add(&pd->res);
 286
 287	if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
 288		pd->local_dma_lkey = device->local_dma_lkey;
 289	else
 290		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
 291
 292	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
 293		pr_warn("%s: enabling unsafe global rkey\n", caller);
 294		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
 295	}
 296
 297	if (mr_access_flags) {
 298		struct ib_mr *mr;
 299
 300		mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
 301		if (IS_ERR(mr)) {
 302			ib_dealloc_pd(pd);
 303			return ERR_CAST(mr);
 304		}
 305
 306		mr->device	= pd->device;
 307		mr->pd		= pd;
 308		mr->type        = IB_MR_TYPE_DMA;
 309		mr->uobject	= NULL;
 310		mr->need_inval	= false;
 311
 312		pd->__internal_mr = mr;
 313
 314		if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
 315			pd->local_dma_lkey = pd->__internal_mr->lkey;
 316
 317		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
 318			pd->unsafe_global_rkey = pd->__internal_mr->rkey;
 319	}
 320
 321	return pd;
 322}
 323EXPORT_SYMBOL(__ib_alloc_pd);
 324
 325/**
 326 * ib_dealloc_pd_user - Deallocates a protection domain.
 327 * @pd: The protection domain to deallocate.
 328 * @udata: Valid user data or NULL for kernel object
 329 *
 330 * It is an error to call this function while any resources in the pd still
 331 * exist.  The caller is responsible to synchronously destroy them and
 332 * guarantee no new allocations will happen.
 333 */
 334int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
 335{
 336	int ret;
 337
 338	if (pd->__internal_mr) {
 339		ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
 340		WARN_ON(ret);
 341		pd->__internal_mr = NULL;
 342	}
 343
 344	ret = pd->device->ops.dealloc_pd(pd, udata);
 345	if (ret)
 346		return ret;
 347
 348	rdma_restrack_del(&pd->res);
 349	kfree(pd);
 350	return ret;
 351}
 352EXPORT_SYMBOL(ib_dealloc_pd_user);
 353
 354/* Address handles */
 355
 356/**
 357 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
 358 * @dest:       Pointer to destination ah_attr. Contents of the destination
 359 *              pointer is assumed to be invalid and attribute are overwritten.
 360 * @src:        Pointer to source ah_attr.
 361 */
 362void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
 363		       const struct rdma_ah_attr *src)
 364{
 365	*dest = *src;
 366	if (dest->grh.sgid_attr)
 367		rdma_hold_gid_attr(dest->grh.sgid_attr);
 368}
 369EXPORT_SYMBOL(rdma_copy_ah_attr);
 370
 371/**
 372 * rdma_replace_ah_attr - Replace valid ah_attr with new one.
 373 * @old:        Pointer to existing ah_attr which needs to be replaced.
 374 *              old is assumed to be valid or zero'd
 375 * @new:        Pointer to the new ah_attr.
 376 *
 377 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
 378 * old the ah_attr is valid; after that it copies the new attribute and holds
 379 * the reference to the replaced ah_attr.
 380 */
 381void rdma_replace_ah_attr(struct rdma_ah_attr *old,
 382			  const struct rdma_ah_attr *new)
 383{
 384	rdma_destroy_ah_attr(old);
 385	*old = *new;
 386	if (old->grh.sgid_attr)
 387		rdma_hold_gid_attr(old->grh.sgid_attr);
 388}
 389EXPORT_SYMBOL(rdma_replace_ah_attr);
 390
 391/**
 392 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
 393 * @dest:       Pointer to destination ah_attr to copy to.
 394 *              dest is assumed to be valid or zero'd
 395 * @src:        Pointer to the new ah_attr.
 396 *
 397 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
 398 * if it is valid. This also transfers ownership of internal references from
 399 * src to dest, making src invalid in the process. No new reference of the src
 400 * ah_attr is taken.
 401 */
 402void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
 403{
 404	rdma_destroy_ah_attr(dest);
 405	*dest = *src;
 406	src->grh.sgid_attr = NULL;
 407}
 408EXPORT_SYMBOL(rdma_move_ah_attr);
 409
 410/*
 411 * Validate that the rdma_ah_attr is valid for the device before passing it
 412 * off to the driver.
 413 */
 414static int rdma_check_ah_attr(struct ib_device *device,
 415			      struct rdma_ah_attr *ah_attr)
 416{
 417	if (!rdma_is_port_valid(device, ah_attr->port_num))
 418		return -EINVAL;
 419
 420	if ((rdma_is_grh_required(device, ah_attr->port_num) ||
 421	     ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
 422	    !(ah_attr->ah_flags & IB_AH_GRH))
 423		return -EINVAL;
 424
 425	if (ah_attr->grh.sgid_attr) {
 426		/*
 427		 * Make sure the passed sgid_attr is consistent with the
 428		 * parameters
 429		 */
 430		if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
 431		    ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
 432			return -EINVAL;
 433	}
 434	return 0;
 435}
 436
 437/*
 438 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
 439 * On success the caller is responsible to call rdma_unfill_sgid_attr().
 440 */
 441static int rdma_fill_sgid_attr(struct ib_device *device,
 442			       struct rdma_ah_attr *ah_attr,
 443			       const struct ib_gid_attr **old_sgid_attr)
 444{
 445	const struct ib_gid_attr *sgid_attr;
 446	struct ib_global_route *grh;
 447	int ret;
 448
 449	*old_sgid_attr = ah_attr->grh.sgid_attr;
 450
 451	ret = rdma_check_ah_attr(device, ah_attr);
 452	if (ret)
 453		return ret;
 454
 455	if (!(ah_attr->ah_flags & IB_AH_GRH))
 456		return 0;
 457
 458	grh = rdma_ah_retrieve_grh(ah_attr);
 459	if (grh->sgid_attr)
 460		return 0;
 461
 462	sgid_attr =
 463		rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
 464	if (IS_ERR(sgid_attr))
 465		return PTR_ERR(sgid_attr);
 466
 467	/* Move ownerhip of the kref into the ah_attr */
 468	grh->sgid_attr = sgid_attr;
 469	return 0;
 470}
 471
 472static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
 473				  const struct ib_gid_attr *old_sgid_attr)
 474{
 475	/*
 476	 * Fill didn't change anything, the caller retains ownership of
 477	 * whatever it passed
 478	 */
 479	if (ah_attr->grh.sgid_attr == old_sgid_attr)
 480		return;
 481
 482	/*
 483	 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
 484	 * doesn't see any change in the rdma_ah_attr. If we get here
 485	 * old_sgid_attr is NULL.
 486	 */
 487	rdma_destroy_ah_attr(ah_attr);
 488}
 489
 490static const struct ib_gid_attr *
 491rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
 492		      const struct ib_gid_attr *old_attr)
 493{
 494	if (old_attr)
 495		rdma_put_gid_attr(old_attr);
 496	if (ah_attr->ah_flags & IB_AH_GRH) {
 497		rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
 498		return ah_attr->grh.sgid_attr;
 499	}
 500	return NULL;
 501}
 502
 503static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
 504				     struct rdma_ah_attr *ah_attr,
 505				     u32 flags,
 506				     struct ib_udata *udata,
 507				     struct net_device *xmit_slave)
 508{
 509	struct rdma_ah_init_attr init_attr = {};
 510	struct ib_device *device = pd->device;
 511	struct ib_ah *ah;
 512	int ret;
 513
 514	might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
 515
 516	if (!udata && !device->ops.create_ah)
 517		return ERR_PTR(-EOPNOTSUPP);
 518
 519	ah = rdma_zalloc_drv_obj_gfp(
 520		device, ib_ah,
 521		(flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
 522	if (!ah)
 523		return ERR_PTR(-ENOMEM);
 524
 525	ah->device = device;
 526	ah->pd = pd;
 527	ah->type = ah_attr->type;
 528	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
 529	init_attr.ah_attr = ah_attr;
 530	init_attr.flags = flags;
 531	init_attr.xmit_slave = xmit_slave;
 532
 533	if (udata)
 534		ret = device->ops.create_user_ah(ah, &init_attr, udata);
 535	else
 536		ret = device->ops.create_ah(ah, &init_attr, NULL);
 537	if (ret) {
 538		if (ah->sgid_attr)
 539			rdma_put_gid_attr(ah->sgid_attr);
 540		kfree(ah);
 541		return ERR_PTR(ret);
 542	}
 543
 544	atomic_inc(&pd->usecnt);
 545	return ah;
 546}
 547
 548/**
 549 * rdma_create_ah - Creates an address handle for the
 550 * given address vector.
 551 * @pd: The protection domain associated with the address handle.
 552 * @ah_attr: The attributes of the address vector.
 553 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
 554 *
 555 * It returns 0 on success and returns appropriate error code on error.
 556 * The address handle is used to reference a local or global destination
 557 * in all UD QP post sends.
 558 */
 559struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
 560			     u32 flags)
 561{
 562	const struct ib_gid_attr *old_sgid_attr;
 563	struct net_device *slave;
 564	struct ib_ah *ah;
 565	int ret;
 566
 567	ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
 568	if (ret)
 569		return ERR_PTR(ret);
 570	slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
 571					   (flags & RDMA_CREATE_AH_SLEEPABLE) ?
 572					   GFP_KERNEL : GFP_ATOMIC);
 573	if (IS_ERR(slave)) {
 574		rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 575		return (void *)slave;
 576	}
 577	ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
 578	rdma_lag_put_ah_roce_slave(slave);
 579	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 580	return ah;
 581}
 582EXPORT_SYMBOL(rdma_create_ah);
 583
 584/**
 585 * rdma_create_user_ah - Creates an address handle for the
 586 * given address vector.
 587 * It resolves destination mac address for ah attribute of RoCE type.
 588 * @pd: The protection domain associated with the address handle.
 589 * @ah_attr: The attributes of the address vector.
 590 * @udata: pointer to user's input output buffer information need by
 591 *         provider driver.
 592 *
 593 * It returns 0 on success and returns appropriate error code on error.
 594 * The address handle is used to reference a local or global destination
 595 * in all UD QP post sends.
 596 */
 597struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
 598				  struct rdma_ah_attr *ah_attr,
 599				  struct ib_udata *udata)
 600{
 601	const struct ib_gid_attr *old_sgid_attr;
 602	struct ib_ah *ah;
 603	int err;
 604
 605	err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
 606	if (err)
 607		return ERR_PTR(err);
 608
 609	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
 610		err = ib_resolve_eth_dmac(pd->device, ah_attr);
 611		if (err) {
 612			ah = ERR_PTR(err);
 613			goto out;
 614		}
 615	}
 616
 617	ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
 618			     udata, NULL);
 619
 620out:
 621	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 622	return ah;
 623}
 624EXPORT_SYMBOL(rdma_create_user_ah);
 625
 626int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
 627{
 628	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
 629	struct iphdr ip4h_checked;
 630	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
 631
 632	/* If it's IPv6, the version must be 6, otherwise, the first
 633	 * 20 bytes (before the IPv4 header) are garbled.
 634	 */
 635	if (ip6h->version != 6)
 636		return (ip4h->version == 4) ? 4 : 0;
 637	/* version may be 6 or 4 because the first 20 bytes could be garbled */
 638
 639	/* RoCE v2 requires no options, thus header length
 640	 * must be 5 words
 641	 */
 642	if (ip4h->ihl != 5)
 643		return 6;
 644
 645	/* Verify checksum.
 646	 * We can't write on scattered buffers so we need to copy to
 647	 * temp buffer.
 648	 */
 649	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
 650	ip4h_checked.check = 0;
 651	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
 652	/* if IPv4 header checksum is OK, believe it */
 653	if (ip4h->check == ip4h_checked.check)
 654		return 4;
 655	return 6;
 656}
 657EXPORT_SYMBOL(ib_get_rdma_header_version);
 658
 659static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
 660						     u32 port_num,
 661						     const struct ib_grh *grh)
 662{
 663	int grh_version;
 664
 665	if (rdma_protocol_ib(device, port_num))
 666		return RDMA_NETWORK_IB;
 667
 668	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
 669
 670	if (grh_version == 4)
 671		return RDMA_NETWORK_IPV4;
 672
 673	if (grh->next_hdr == IPPROTO_UDP)
 674		return RDMA_NETWORK_IPV6;
 675
 676	return RDMA_NETWORK_ROCE_V1;
 677}
 678
 679struct find_gid_index_context {
 680	u16 vlan_id;
 681	enum ib_gid_type gid_type;
 682};
 683
 684static bool find_gid_index(const union ib_gid *gid,
 685			   const struct ib_gid_attr *gid_attr,
 686			   void *context)
 687{
 688	struct find_gid_index_context *ctx = context;
 689	u16 vlan_id = 0xffff;
 690	int ret;
 691
 692	if (ctx->gid_type != gid_attr->gid_type)
 693		return false;
 694
 695	ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
 696	if (ret)
 697		return false;
 698
 699	return ctx->vlan_id == vlan_id;
 700}
 701
 702static const struct ib_gid_attr *
 703get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
 704		       u16 vlan_id, const union ib_gid *sgid,
 705		       enum ib_gid_type gid_type)
 706{
 707	struct find_gid_index_context context = {.vlan_id = vlan_id,
 708						 .gid_type = gid_type};
 709
 710	return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
 711				       &context);
 712}
 713
 714int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
 715			      enum rdma_network_type net_type,
 716			      union ib_gid *sgid, union ib_gid *dgid)
 717{
 718	struct sockaddr_in  src_in;
 719	struct sockaddr_in  dst_in;
 720	__be32 src_saddr, dst_saddr;
 721
 722	if (!sgid || !dgid)
 723		return -EINVAL;
 724
 725	if (net_type == RDMA_NETWORK_IPV4) {
 726		memcpy(&src_in.sin_addr.s_addr,
 727		       &hdr->roce4grh.saddr, 4);
 728		memcpy(&dst_in.sin_addr.s_addr,
 729		       &hdr->roce4grh.daddr, 4);
 730		src_saddr = src_in.sin_addr.s_addr;
 731		dst_saddr = dst_in.sin_addr.s_addr;
 732		ipv6_addr_set_v4mapped(src_saddr,
 733				       (struct in6_addr *)sgid);
 734		ipv6_addr_set_v4mapped(dst_saddr,
 735				       (struct in6_addr *)dgid);
 736		return 0;
 737	} else if (net_type == RDMA_NETWORK_IPV6 ||
 738		   net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
 739		*dgid = hdr->ibgrh.dgid;
 740		*sgid = hdr->ibgrh.sgid;
 741		return 0;
 742	} else {
 743		return -EINVAL;
 744	}
 745}
 746EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
 747
 748/* Resolve destination mac address and hop limit for unicast destination
 749 * GID entry, considering the source GID entry as well.
 750 * ah_attribute must have valid port_num, sgid_index.
 751 */
 752static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
 753				       struct rdma_ah_attr *ah_attr)
 754{
 755	struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
 756	const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
 757	int hop_limit = 0xff;
 758	int ret = 0;
 759
 760	/* If destination is link local and source GID is RoCEv1,
 761	 * IP stack is not used.
 762	 */
 763	if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
 764	    sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
 765		rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
 766				ah_attr->roce.dmac);
 767		return ret;
 768	}
 769
 770	ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
 771					   ah_attr->roce.dmac,
 772					   sgid_attr, &hop_limit);
 773
 774	grh->hop_limit = hop_limit;
 775	return ret;
 776}
 777
 778/*
 779 * This function initializes address handle attributes from the incoming packet.
 780 * Incoming packet has dgid of the receiver node on which this code is
 781 * getting executed and, sgid contains the GID of the sender.
 782 *
 783 * When resolving mac address of destination, the arrived dgid is used
 784 * as sgid and, sgid is used as dgid because sgid contains destinations
 785 * GID whom to respond to.
 786 *
 787 * On success the caller is responsible to call rdma_destroy_ah_attr on the
 788 * attr.
 789 */
 790int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
 791			    const struct ib_wc *wc, const struct ib_grh *grh,
 792			    struct rdma_ah_attr *ah_attr)
 793{
 794	u32 flow_class;
 795	int ret;
 796	enum rdma_network_type net_type = RDMA_NETWORK_IB;
 797	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
 798	const struct ib_gid_attr *sgid_attr;
 799	int hoplimit = 0xff;
 800	union ib_gid dgid;
 801	union ib_gid sgid;
 802
 803	might_sleep();
 804
 805	memset(ah_attr, 0, sizeof *ah_attr);
 806	ah_attr->type = rdma_ah_find_type(device, port_num);
 807	if (rdma_cap_eth_ah(device, port_num)) {
 808		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
 809			net_type = wc->network_hdr_type;
 810		else
 811			net_type = ib_get_net_type_by_grh(device, port_num, grh);
 812		gid_type = ib_network_to_gid_type(net_type);
 813	}
 814	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
 815					&sgid, &dgid);
 816	if (ret)
 817		return ret;
 818
 819	rdma_ah_set_sl(ah_attr, wc->sl);
 820	rdma_ah_set_port_num(ah_attr, port_num);
 821
 822	if (rdma_protocol_roce(device, port_num)) {
 823		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
 824				wc->vlan_id : 0xffff;
 825
 826		if (!(wc->wc_flags & IB_WC_GRH))
 827			return -EPROTOTYPE;
 828
 829		sgid_attr = get_sgid_attr_from_eth(device, port_num,
 830						   vlan_id, &dgid,
 831						   gid_type);
 832		if (IS_ERR(sgid_attr))
 833			return PTR_ERR(sgid_attr);
 834
 835		flow_class = be32_to_cpu(grh->version_tclass_flow);
 836		rdma_move_grh_sgid_attr(ah_attr,
 837					&sgid,
 838					flow_class & 0xFFFFF,
 839					hoplimit,
 840					(flow_class >> 20) & 0xFF,
 841					sgid_attr);
 842
 843		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
 844		if (ret)
 845			rdma_destroy_ah_attr(ah_attr);
 846
 847		return ret;
 848	} else {
 849		rdma_ah_set_dlid(ah_attr, wc->slid);
 850		rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
 851
 852		if ((wc->wc_flags & IB_WC_GRH) == 0)
 853			return 0;
 854
 855		if (dgid.global.interface_id !=
 856					cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
 857			sgid_attr = rdma_find_gid_by_port(
 858				device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
 859		} else
 860			sgid_attr = rdma_get_gid_attr(device, port_num, 0);
 861
 862		if (IS_ERR(sgid_attr))
 863			return PTR_ERR(sgid_attr);
 864		flow_class = be32_to_cpu(grh->version_tclass_flow);
 865		rdma_move_grh_sgid_attr(ah_attr,
 866					&sgid,
 867					flow_class & 0xFFFFF,
 868					hoplimit,
 869					(flow_class >> 20) & 0xFF,
 870					sgid_attr);
 871
 872		return 0;
 873	}
 874}
 875EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
 876
 877/**
 878 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
 879 * of the reference
 880 *
 881 * @attr:	Pointer to AH attribute structure
 882 * @dgid:	Destination GID
 883 * @flow_label:	Flow label
 884 * @hop_limit:	Hop limit
 885 * @traffic_class: traffic class
 886 * @sgid_attr:	Pointer to SGID attribute
 887 *
 888 * This takes ownership of the sgid_attr reference. The caller must ensure
 889 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
 890 * calling this function.
 891 */
 892void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
 893			     u32 flow_label, u8 hop_limit, u8 traffic_class,
 894			     const struct ib_gid_attr *sgid_attr)
 895{
 896	rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
 897			traffic_class);
 898	attr->grh.sgid_attr = sgid_attr;
 899}
 900EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
 901
 902/**
 903 * rdma_destroy_ah_attr - Release reference to SGID attribute of
 904 * ah attribute.
 905 * @ah_attr: Pointer to ah attribute
 906 *
 907 * Release reference to the SGID attribute of the ah attribute if it is
 908 * non NULL. It is safe to call this multiple times, and safe to call it on
 909 * a zero initialized ah_attr.
 910 */
 911void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
 912{
 913	if (ah_attr->grh.sgid_attr) {
 914		rdma_put_gid_attr(ah_attr->grh.sgid_attr);
 915		ah_attr->grh.sgid_attr = NULL;
 916	}
 917}
 918EXPORT_SYMBOL(rdma_destroy_ah_attr);
 919
 920struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
 921				   const struct ib_grh *grh, u32 port_num)
 922{
 923	struct rdma_ah_attr ah_attr;
 924	struct ib_ah *ah;
 925	int ret;
 926
 927	ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
 928	if (ret)
 929		return ERR_PTR(ret);
 930
 931	ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
 932
 933	rdma_destroy_ah_attr(&ah_attr);
 934	return ah;
 935}
 936EXPORT_SYMBOL(ib_create_ah_from_wc);
 937
 938int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
 939{
 940	const struct ib_gid_attr *old_sgid_attr;
 941	int ret;
 942
 943	if (ah->type != ah_attr->type)
 944		return -EINVAL;
 945
 946	ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
 947	if (ret)
 948		return ret;
 949
 950	ret = ah->device->ops.modify_ah ?
 951		ah->device->ops.modify_ah(ah, ah_attr) :
 952		-EOPNOTSUPP;
 953
 954	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
 955	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 956	return ret;
 957}
 958EXPORT_SYMBOL(rdma_modify_ah);
 959
 960int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
 961{
 962	ah_attr->grh.sgid_attr = NULL;
 963
 964	return ah->device->ops.query_ah ?
 965		ah->device->ops.query_ah(ah, ah_attr) :
 966		-EOPNOTSUPP;
 967}
 968EXPORT_SYMBOL(rdma_query_ah);
 969
 970int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
 971{
 972	const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
 973	struct ib_pd *pd;
 974	int ret;
 975
 976	might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
 977
 978	pd = ah->pd;
 979
 980	ret = ah->device->ops.destroy_ah(ah, flags);
 981	if (ret)
 982		return ret;
 983
 984	atomic_dec(&pd->usecnt);
 985	if (sgid_attr)
 986		rdma_put_gid_attr(sgid_attr);
 987
 988	kfree(ah);
 989	return ret;
 990}
 991EXPORT_SYMBOL(rdma_destroy_ah_user);
 992
 993/* Shared receive queues */
 994
 995/**
 996 * ib_create_srq_user - Creates a SRQ associated with the specified protection
 997 *   domain.
 998 * @pd: The protection domain associated with the SRQ.
 999 * @srq_init_attr: A list of initial attributes required to create the
1000 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1001 *   the actual capabilities of the created SRQ.
1002 * @uobject: uobject pointer if this is not a kernel SRQ
1003 * @udata: udata pointer if this is not a kernel SRQ
1004 *
1005 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1006 * requested size of the SRQ, and set to the actual values allocated
1007 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1008 * will always be at least as large as the requested values.
1009 */
1010struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1011				  struct ib_srq_init_attr *srq_init_attr,
1012				  struct ib_usrq_object *uobject,
1013				  struct ib_udata *udata)
1014{
1015	struct ib_srq *srq;
1016	int ret;
1017
1018	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1019	if (!srq)
1020		return ERR_PTR(-ENOMEM);
1021
1022	srq->device = pd->device;
1023	srq->pd = pd;
1024	srq->event_handler = srq_init_attr->event_handler;
1025	srq->srq_context = srq_init_attr->srq_context;
1026	srq->srq_type = srq_init_attr->srq_type;
1027	srq->uobject = uobject;
1028
1029	if (ib_srq_has_cq(srq->srq_type)) {
1030		srq->ext.cq = srq_init_attr->ext.cq;
1031		atomic_inc(&srq->ext.cq->usecnt);
1032	}
1033	if (srq->srq_type == IB_SRQT_XRC) {
1034		srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1035		if (srq->ext.xrc.xrcd)
1036			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1037	}
1038	atomic_inc(&pd->usecnt);
1039
1040	rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1041	rdma_restrack_parent_name(&srq->res, &pd->res);
1042
1043	ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1044	if (ret) {
1045		rdma_restrack_put(&srq->res);
1046		atomic_dec(&pd->usecnt);
1047		if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1048			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1049		if (ib_srq_has_cq(srq->srq_type))
1050			atomic_dec(&srq->ext.cq->usecnt);
1051		kfree(srq);
1052		return ERR_PTR(ret);
1053	}
1054
1055	rdma_restrack_add(&srq->res);
1056
1057	return srq;
1058}
1059EXPORT_SYMBOL(ib_create_srq_user);
1060
1061int ib_modify_srq(struct ib_srq *srq,
1062		  struct ib_srq_attr *srq_attr,
1063		  enum ib_srq_attr_mask srq_attr_mask)
1064{
1065	return srq->device->ops.modify_srq ?
1066		srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1067					    NULL) : -EOPNOTSUPP;
1068}
1069EXPORT_SYMBOL(ib_modify_srq);
1070
1071int ib_query_srq(struct ib_srq *srq,
1072		 struct ib_srq_attr *srq_attr)
1073{
1074	return srq->device->ops.query_srq ?
1075		srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1076}
1077EXPORT_SYMBOL(ib_query_srq);
1078
1079int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1080{
1081	int ret;
1082
1083	if (atomic_read(&srq->usecnt))
1084		return -EBUSY;
1085
1086	ret = srq->device->ops.destroy_srq(srq, udata);
1087	if (ret)
1088		return ret;
1089
1090	atomic_dec(&srq->pd->usecnt);
1091	if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1092		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1093	if (ib_srq_has_cq(srq->srq_type))
1094		atomic_dec(&srq->ext.cq->usecnt);
1095	rdma_restrack_del(&srq->res);
1096	kfree(srq);
1097
1098	return ret;
1099}
1100EXPORT_SYMBOL(ib_destroy_srq_user);
1101
1102/* Queue pairs */
1103
1104static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1105{
1106	struct ib_qp *qp = context;
1107	unsigned long flags;
1108
1109	spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1110	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1111		if (event->element.qp->event_handler)
1112			event->element.qp->event_handler(event, event->element.qp->qp_context);
1113	spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1114}
1115
1116static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1117				  void (*event_handler)(struct ib_event *, void *),
1118				  void *qp_context)
1119{
1120	struct ib_qp *qp;
1121	unsigned long flags;
1122	int err;
1123
1124	qp = kzalloc(sizeof *qp, GFP_KERNEL);
1125	if (!qp)
1126		return ERR_PTR(-ENOMEM);
1127
1128	qp->real_qp = real_qp;
1129	err = ib_open_shared_qp_security(qp, real_qp->device);
1130	if (err) {
1131		kfree(qp);
1132		return ERR_PTR(err);
1133	}
1134
1135	qp->real_qp = real_qp;
1136	atomic_inc(&real_qp->usecnt);
1137	qp->device = real_qp->device;
1138	qp->event_handler = event_handler;
1139	qp->qp_context = qp_context;
1140	qp->qp_num = real_qp->qp_num;
1141	qp->qp_type = real_qp->qp_type;
1142
1143	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1144	list_add(&qp->open_list, &real_qp->open_list);
1145	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1146
1147	return qp;
1148}
1149
1150struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1151			 struct ib_qp_open_attr *qp_open_attr)
1152{
1153	struct ib_qp *qp, *real_qp;
1154
1155	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1156		return ERR_PTR(-EINVAL);
1157
1158	down_read(&xrcd->tgt_qps_rwsem);
1159	real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1160	if (!real_qp) {
1161		up_read(&xrcd->tgt_qps_rwsem);
1162		return ERR_PTR(-EINVAL);
1163	}
1164	qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1165			  qp_open_attr->qp_context);
1166	up_read(&xrcd->tgt_qps_rwsem);
1167	return qp;
1168}
1169EXPORT_SYMBOL(ib_open_qp);
1170
1171static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1172					struct ib_qp_init_attr *qp_init_attr)
1173{
1174	struct ib_qp *real_qp = qp;
1175	int err;
1176
1177	qp->event_handler = __ib_shared_qp_event_handler;
1178	qp->qp_context = qp;
1179	qp->pd = NULL;
1180	qp->send_cq = qp->recv_cq = NULL;
1181	qp->srq = NULL;
1182	qp->xrcd = qp_init_attr->xrcd;
1183	atomic_inc(&qp_init_attr->xrcd->usecnt);
1184	INIT_LIST_HEAD(&qp->open_list);
1185
1186	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1187			  qp_init_attr->qp_context);
1188	if (IS_ERR(qp))
1189		return qp;
1190
1191	err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1192			      real_qp, GFP_KERNEL));
1193	if (err) {
1194		ib_close_qp(qp);
1195		return ERR_PTR(err);
1196	}
1197	return qp;
1198}
1199
1200static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
1201			       struct ib_qp_init_attr *attr,
1202			       struct ib_udata *udata,
1203			       struct ib_uqp_object *uobj, const char *caller)
1204{
1205	struct ib_udata dummy = {};
1206	struct ib_qp *qp;
1207	int ret;
1208
1209	if (!dev->ops.create_qp)
1210		return ERR_PTR(-EOPNOTSUPP);
1211
1212	qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1213	if (!qp)
1214		return ERR_PTR(-ENOMEM);
1215
1216	qp->device = dev;
1217	qp->pd = pd;
1218	qp->uobject = uobj;
1219	qp->real_qp = qp;
1220
1221	qp->qp_type = attr->qp_type;
1222	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1223	qp->srq = attr->srq;
1224	qp->event_handler = attr->event_handler;
1225	qp->port = attr->port_num;
1226	qp->qp_context = attr->qp_context;
1227
1228	spin_lock_init(&qp->mr_lock);
1229	INIT_LIST_HEAD(&qp->rdma_mrs);
1230	INIT_LIST_HEAD(&qp->sig_mrs);
1231
1232	qp->send_cq = attr->send_cq;
1233	qp->recv_cq = attr->recv_cq;
1234
1235	rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1236	WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
1237	rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1238	ret = dev->ops.create_qp(qp, attr, udata);
1239	if (ret)
1240		goto err_create;
1241
1242	/*
1243	 * TODO: The mlx4 internally overwrites send_cq and recv_cq.
1244	 * Unfortunately, it is not an easy task to fix that driver.
1245	 */
1246	qp->send_cq = attr->send_cq;
1247	qp->recv_cq = attr->recv_cq;
1248
1249	ret = ib_create_qp_security(qp, dev);
1250	if (ret)
1251		goto err_security;
1252
1253	rdma_restrack_add(&qp->res);
1254	return qp;
1255
1256err_security:
1257	qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1258err_create:
1259	rdma_restrack_put(&qp->res);
1260	kfree(qp);
1261	return ERR_PTR(ret);
1262
1263}
1264
1265/**
1266 * ib_create_qp_user - Creates a QP associated with the specified protection
1267 *   domain.
1268 * @dev: IB device
1269 * @pd: The protection domain associated with the QP.
1270 * @attr: A list of initial attributes required to create the
1271 *   QP.  If QP creation succeeds, then the attributes are updated to
1272 *   the actual capabilities of the created QP.
1273 * @udata: User data
1274 * @uobj: uverbs obect
1275 * @caller: caller's build-time module name
1276 */
1277struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
1278				struct ib_qp_init_attr *attr,
1279				struct ib_udata *udata,
1280				struct ib_uqp_object *uobj, const char *caller)
1281{
1282	struct ib_qp *qp, *xrc_qp;
1283
1284	if (attr->qp_type == IB_QPT_XRC_TGT)
1285		qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1286	else
1287		qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1288	if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1289		return qp;
1290
1291	xrc_qp = create_xrc_qp_user(qp, attr);
1292	if (IS_ERR(xrc_qp)) {
1293		ib_destroy_qp(qp);
1294		return xrc_qp;
1295	}
1296
1297	xrc_qp->uobject = uobj;
1298	return xrc_qp;
1299}
1300EXPORT_SYMBOL(ib_create_qp_user);
1301
1302void ib_qp_usecnt_inc(struct ib_qp *qp)
1303{
1304	if (qp->pd)
1305		atomic_inc(&qp->pd->usecnt);
1306	if (qp->send_cq)
1307		atomic_inc(&qp->send_cq->usecnt);
1308	if (qp->recv_cq)
1309		atomic_inc(&qp->recv_cq->usecnt);
1310	if (qp->srq)
1311		atomic_inc(&qp->srq->usecnt);
1312	if (qp->rwq_ind_tbl)
1313		atomic_inc(&qp->rwq_ind_tbl->usecnt);
1314}
1315EXPORT_SYMBOL(ib_qp_usecnt_inc);
1316
1317void ib_qp_usecnt_dec(struct ib_qp *qp)
1318{
1319	if (qp->rwq_ind_tbl)
1320		atomic_dec(&qp->rwq_ind_tbl->usecnt);
1321	if (qp->srq)
1322		atomic_dec(&qp->srq->usecnt);
1323	if (qp->recv_cq)
1324		atomic_dec(&qp->recv_cq->usecnt);
1325	if (qp->send_cq)
1326		atomic_dec(&qp->send_cq->usecnt);
1327	if (qp->pd)
1328		atomic_dec(&qp->pd->usecnt);
1329}
1330EXPORT_SYMBOL(ib_qp_usecnt_dec);
1331
1332struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
1333				  struct ib_qp_init_attr *qp_init_attr,
1334				  const char *caller)
1335{
1336	struct ib_device *device = pd->device;
1337	struct ib_qp *qp;
1338	int ret;
1339
1340	/*
1341	 * If the callers is using the RDMA API calculate the resources
1342	 * needed for the RDMA READ/WRITE operations.
1343	 *
1344	 * Note that these callers need to pass in a port number.
1345	 */
1346	if (qp_init_attr->cap.max_rdma_ctxs)
1347		rdma_rw_init_qp(device, qp_init_attr);
1348
1349	qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1350	if (IS_ERR(qp))
1351		return qp;
1352
1353	ib_qp_usecnt_inc(qp);
1354
1355	if (qp_init_attr->cap.max_rdma_ctxs) {
1356		ret = rdma_rw_init_mrs(qp, qp_init_attr);
1357		if (ret)
1358			goto err;
1359	}
1360
1361	/*
1362	 * Note: all hw drivers guarantee that max_send_sge is lower than
1363	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1364	 * max_send_sge <= max_sge_rd.
1365	 */
1366	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1367	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1368				 device->attrs.max_sge_rd);
1369	if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1370		qp->integrity_en = true;
1371
1372	return qp;
1373
1374err:
1375	ib_destroy_qp(qp);
1376	return ERR_PTR(ret);
1377
1378}
1379EXPORT_SYMBOL(ib_create_qp_kernel);
1380
1381static const struct {
1382	int			valid;
1383	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
1384	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
1385} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1386	[IB_QPS_RESET] = {
1387		[IB_QPS_RESET] = { .valid = 1 },
1388		[IB_QPS_INIT]  = {
1389			.valid = 1,
1390			.req_param = {
1391				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1392						IB_QP_PORT			|
1393						IB_QP_QKEY),
1394				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
1395				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1396						IB_QP_PORT			|
1397						IB_QP_ACCESS_FLAGS),
1398				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1399						IB_QP_PORT			|
1400						IB_QP_ACCESS_FLAGS),
1401				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1402						IB_QP_PORT			|
1403						IB_QP_ACCESS_FLAGS),
1404				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1405						IB_QP_PORT			|
1406						IB_QP_ACCESS_FLAGS),
1407				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1408						IB_QP_QKEY),
1409				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1410						IB_QP_QKEY),
1411			}
1412		},
1413	},
1414	[IB_QPS_INIT]  = {
1415		[IB_QPS_RESET] = { .valid = 1 },
1416		[IB_QPS_ERR] =   { .valid = 1 },
1417		[IB_QPS_INIT]  = {
1418			.valid = 1,
1419			.opt_param = {
1420				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1421						IB_QP_PORT			|
1422						IB_QP_QKEY),
1423				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1424						IB_QP_PORT			|
1425						IB_QP_ACCESS_FLAGS),
1426				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1427						IB_QP_PORT			|
1428						IB_QP_ACCESS_FLAGS),
1429				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1430						IB_QP_PORT			|
1431						IB_QP_ACCESS_FLAGS),
1432				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1433						IB_QP_PORT			|
1434						IB_QP_ACCESS_FLAGS),
1435				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1436						IB_QP_QKEY),
1437				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1438						IB_QP_QKEY),
1439			}
1440		},
1441		[IB_QPS_RTR]   = {
1442			.valid = 1,
1443			.req_param = {
1444				[IB_QPT_UC]  = (IB_QP_AV			|
1445						IB_QP_PATH_MTU			|
1446						IB_QP_DEST_QPN			|
1447						IB_QP_RQ_PSN),
1448				[IB_QPT_RC]  = (IB_QP_AV			|
1449						IB_QP_PATH_MTU			|
1450						IB_QP_DEST_QPN			|
1451						IB_QP_RQ_PSN			|
1452						IB_QP_MAX_DEST_RD_ATOMIC	|
1453						IB_QP_MIN_RNR_TIMER),
1454				[IB_QPT_XRC_INI] = (IB_QP_AV			|
1455						IB_QP_PATH_MTU			|
1456						IB_QP_DEST_QPN			|
1457						IB_QP_RQ_PSN),
1458				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
1459						IB_QP_PATH_MTU			|
1460						IB_QP_DEST_QPN			|
1461						IB_QP_RQ_PSN			|
1462						IB_QP_MAX_DEST_RD_ATOMIC	|
1463						IB_QP_MIN_RNR_TIMER),
1464			},
1465			.opt_param = {
1466				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1467						 IB_QP_QKEY),
1468				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
1469						 IB_QP_ACCESS_FLAGS		|
1470						 IB_QP_PKEY_INDEX),
1471				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
1472						 IB_QP_ACCESS_FLAGS		|
1473						 IB_QP_PKEY_INDEX),
1474				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
1475						 IB_QP_ACCESS_FLAGS		|
1476						 IB_QP_PKEY_INDEX),
1477				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
1478						 IB_QP_ACCESS_FLAGS		|
1479						 IB_QP_PKEY_INDEX),
1480				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1481						 IB_QP_QKEY),
1482				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1483						 IB_QP_QKEY),
1484			 },
1485		},
1486	},
1487	[IB_QPS_RTR]   = {
1488		[IB_QPS_RESET] = { .valid = 1 },
1489		[IB_QPS_ERR] =   { .valid = 1 },
1490		[IB_QPS_RTS]   = {
1491			.valid = 1,
1492			.req_param = {
1493				[IB_QPT_UD]  = IB_QP_SQ_PSN,
1494				[IB_QPT_UC]  = IB_QP_SQ_PSN,
1495				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
1496						IB_QP_RETRY_CNT			|
1497						IB_QP_RNR_RETRY			|
1498						IB_QP_SQ_PSN			|
1499						IB_QP_MAX_QP_RD_ATOMIC),
1500				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
1501						IB_QP_RETRY_CNT			|
1502						IB_QP_RNR_RETRY			|
1503						IB_QP_SQ_PSN			|
1504						IB_QP_MAX_QP_RD_ATOMIC),
1505				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
1506						IB_QP_SQ_PSN),
1507				[IB_QPT_SMI] = IB_QP_SQ_PSN,
1508				[IB_QPT_GSI] = IB_QP_SQ_PSN,
1509			},
1510			.opt_param = {
1511				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
1512						 IB_QP_QKEY),
1513				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
1514						 IB_QP_ALT_PATH			|
1515						 IB_QP_ACCESS_FLAGS		|
1516						 IB_QP_PATH_MIG_STATE),
1517				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
1518						 IB_QP_ALT_PATH			|
1519						 IB_QP_ACCESS_FLAGS		|
1520						 IB_QP_MIN_RNR_TIMER		|
1521						 IB_QP_PATH_MIG_STATE),
1522				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1523						 IB_QP_ALT_PATH			|
1524						 IB_QP_ACCESS_FLAGS		|
1525						 IB_QP_PATH_MIG_STATE),
1526				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1527						 IB_QP_ALT_PATH			|
1528						 IB_QP_ACCESS_FLAGS		|
1529						 IB_QP_MIN_RNR_TIMER		|
1530						 IB_QP_PATH_MIG_STATE),
1531				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
1532						 IB_QP_QKEY),
1533				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
1534						 IB_QP_QKEY),
1535				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1536			 }
1537		}
1538	},
1539	[IB_QPS_RTS]   = {
1540		[IB_QPS_RESET] = { .valid = 1 },
1541		[IB_QPS_ERR] =   { .valid = 1 },
1542		[IB_QPS_RTS]   = {
1543			.valid = 1,
1544			.opt_param = {
1545				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1546						IB_QP_QKEY),
1547				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1548						IB_QP_ACCESS_FLAGS		|
1549						IB_QP_ALT_PATH			|
1550						IB_QP_PATH_MIG_STATE),
1551				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1552						IB_QP_ACCESS_FLAGS		|
1553						IB_QP_ALT_PATH			|
1554						IB_QP_PATH_MIG_STATE		|
1555						IB_QP_MIN_RNR_TIMER),
1556				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1557						IB_QP_ACCESS_FLAGS		|
1558						IB_QP_ALT_PATH			|
1559						IB_QP_PATH_MIG_STATE),
1560				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1561						IB_QP_ACCESS_FLAGS		|
1562						IB_QP_ALT_PATH			|
1563						IB_QP_PATH_MIG_STATE		|
1564						IB_QP_MIN_RNR_TIMER),
1565				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1566						IB_QP_QKEY),
1567				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1568						IB_QP_QKEY),
1569				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1570			}
1571		},
1572		[IB_QPS_SQD]   = {
1573			.valid = 1,
1574			.opt_param = {
1575				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1576				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1577				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1578				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1579				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1580				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1581				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1582			}
1583		},
1584	},
1585	[IB_QPS_SQD]   = {
1586		[IB_QPS_RESET] = { .valid = 1 },
1587		[IB_QPS_ERR] =   { .valid = 1 },
1588		[IB_QPS_RTS]   = {
1589			.valid = 1,
1590			.opt_param = {
1591				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1592						IB_QP_QKEY),
1593				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1594						IB_QP_ALT_PATH			|
1595						IB_QP_ACCESS_FLAGS		|
1596						IB_QP_PATH_MIG_STATE),
1597				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1598						IB_QP_ALT_PATH			|
1599						IB_QP_ACCESS_FLAGS		|
1600						IB_QP_MIN_RNR_TIMER		|
1601						IB_QP_PATH_MIG_STATE),
1602				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1603						IB_QP_ALT_PATH			|
1604						IB_QP_ACCESS_FLAGS		|
1605						IB_QP_PATH_MIG_STATE),
1606				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1607						IB_QP_ALT_PATH			|
1608						IB_QP_ACCESS_FLAGS		|
1609						IB_QP_MIN_RNR_TIMER		|
1610						IB_QP_PATH_MIG_STATE),
1611				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1612						IB_QP_QKEY),
1613				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1614						IB_QP_QKEY),
1615			}
1616		},
1617		[IB_QPS_SQD]   = {
1618			.valid = 1,
1619			.opt_param = {
1620				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1621						IB_QP_QKEY),
1622				[IB_QPT_UC]  = (IB_QP_AV			|
1623						IB_QP_ALT_PATH			|
1624						IB_QP_ACCESS_FLAGS		|
1625						IB_QP_PKEY_INDEX		|
1626						IB_QP_PATH_MIG_STATE),
1627				[IB_QPT_RC]  = (IB_QP_PORT			|
1628						IB_QP_AV			|
1629						IB_QP_TIMEOUT			|
1630						IB_QP_RETRY_CNT			|
1631						IB_QP_RNR_RETRY			|
1632						IB_QP_MAX_QP_RD_ATOMIC		|
1633						IB_QP_MAX_DEST_RD_ATOMIC	|
1634						IB_QP_ALT_PATH			|
1635						IB_QP_ACCESS_FLAGS		|
1636						IB_QP_PKEY_INDEX		|
1637						IB_QP_MIN_RNR_TIMER		|
1638						IB_QP_PATH_MIG_STATE),
1639				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1640						IB_QP_AV			|
1641						IB_QP_TIMEOUT			|
1642						IB_QP_RETRY_CNT			|
1643						IB_QP_RNR_RETRY			|
1644						IB_QP_MAX_QP_RD_ATOMIC		|
1645						IB_QP_ALT_PATH			|
1646						IB_QP_ACCESS_FLAGS		|
1647						IB_QP_PKEY_INDEX		|
1648						IB_QP_PATH_MIG_STATE),
1649				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1650						IB_QP_AV			|
1651						IB_QP_TIMEOUT			|
1652						IB_QP_MAX_DEST_RD_ATOMIC	|
1653						IB_QP_ALT_PATH			|
1654						IB_QP_ACCESS_FLAGS		|
1655						IB_QP_PKEY_INDEX		|
1656						IB_QP_MIN_RNR_TIMER		|
1657						IB_QP_PATH_MIG_STATE),
1658				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1659						IB_QP_QKEY),
1660				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1661						IB_QP_QKEY),
1662			}
1663		}
1664	},
1665	[IB_QPS_SQE]   = {
1666		[IB_QPS_RESET] = { .valid = 1 },
1667		[IB_QPS_ERR] =   { .valid = 1 },
1668		[IB_QPS_RTS]   = {
1669			.valid = 1,
1670			.opt_param = {
1671				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1672						IB_QP_QKEY),
1673				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1674						IB_QP_ACCESS_FLAGS),
1675				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1676						IB_QP_QKEY),
1677				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1678						IB_QP_QKEY),
1679			}
1680		}
1681	},
1682	[IB_QPS_ERR] = {
1683		[IB_QPS_RESET] = { .valid = 1 },
1684		[IB_QPS_ERR] =   { .valid = 1 }
1685	}
1686};
1687
1688bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1689			enum ib_qp_type type, enum ib_qp_attr_mask mask)
1690{
1691	enum ib_qp_attr_mask req_param, opt_param;
1692
1693	if (mask & IB_QP_CUR_STATE  &&
1694	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1695	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1696		return false;
1697
1698	if (!qp_state_table[cur_state][next_state].valid)
1699		return false;
1700
1701	req_param = qp_state_table[cur_state][next_state].req_param[type];
1702	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1703
1704	if ((mask & req_param) != req_param)
1705		return false;
1706
1707	if (mask & ~(req_param | opt_param | IB_QP_STATE))
1708		return false;
1709
1710	return true;
1711}
1712EXPORT_SYMBOL(ib_modify_qp_is_ok);
1713
1714/**
1715 * ib_resolve_eth_dmac - Resolve destination mac address
1716 * @device:		Device to consider
1717 * @ah_attr:		address handle attribute which describes the
1718 *			source and destination parameters
1719 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1720 * returns 0 on success or appropriate error code. It initializes the
1721 * necessary ah_attr fields when call is successful.
1722 */
1723static int ib_resolve_eth_dmac(struct ib_device *device,
1724			       struct rdma_ah_attr *ah_attr)
1725{
1726	int ret = 0;
1727
1728	if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1729		if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1730			__be32 addr = 0;
1731
1732			memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1733			ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1734		} else {
1735			ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1736					(char *)ah_attr->roce.dmac);
1737		}
1738	} else {
1739		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1740	}
1741	return ret;
1742}
1743
1744static bool is_qp_type_connected(const struct ib_qp *qp)
1745{
1746	return (qp->qp_type == IB_QPT_UC ||
1747		qp->qp_type == IB_QPT_RC ||
1748		qp->qp_type == IB_QPT_XRC_INI ||
1749		qp->qp_type == IB_QPT_XRC_TGT);
1750}
1751
1752/*
1753 * IB core internal function to perform QP attributes modification.
1754 */
1755static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1756			 int attr_mask, struct ib_udata *udata)
1757{
1758	u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1759	const struct ib_gid_attr *old_sgid_attr_av;
1760	const struct ib_gid_attr *old_sgid_attr_alt_av;
1761	int ret;
1762
1763	attr->xmit_slave = NULL;
1764	if (attr_mask & IB_QP_AV) {
1765		ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1766					  &old_sgid_attr_av);
1767		if (ret)
1768			return ret;
1769
1770		if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1771		    is_qp_type_connected(qp)) {
1772			struct net_device *slave;
1773
1774			/*
1775			 * If the user provided the qp_attr then we have to
1776			 * resolve it. Kerne users have to provide already
1777			 * resolved rdma_ah_attr's.
1778			 */
1779			if (udata) {
1780				ret = ib_resolve_eth_dmac(qp->device,
1781							  &attr->ah_attr);
1782				if (ret)
1783					goto out_av;
1784			}
1785			slave = rdma_lag_get_ah_roce_slave(qp->device,
1786							   &attr->ah_attr,
1787							   GFP_KERNEL);
1788			if (IS_ERR(slave)) {
1789				ret = PTR_ERR(slave);
1790				goto out_av;
1791			}
1792			attr->xmit_slave = slave;
1793		}
1794	}
1795	if (attr_mask & IB_QP_ALT_PATH) {
1796		/*
1797		 * FIXME: This does not track the migration state, so if the
1798		 * user loads a new alternate path after the HW has migrated
1799		 * from primary->alternate we will keep the wrong
1800		 * references. This is OK for IB because the reference
1801		 * counting does not serve any functional purpose.
1802		 */
1803		ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1804					  &old_sgid_attr_alt_av);
1805		if (ret)
1806			goto out_av;
1807
1808		/*
1809		 * Today the core code can only handle alternate paths and APM
1810		 * for IB. Ban them in roce mode.
1811		 */
1812		if (!(rdma_protocol_ib(qp->device,
1813				       attr->alt_ah_attr.port_num) &&
1814		      rdma_protocol_ib(qp->device, port))) {
1815			ret = -EINVAL;
1816			goto out;
1817		}
1818	}
1819
1820	if (rdma_ib_or_roce(qp->device, port)) {
1821		if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1822			dev_warn(&qp->device->dev,
1823				 "%s rq_psn overflow, masking to 24 bits\n",
1824				 __func__);
1825			attr->rq_psn &= 0xffffff;
1826		}
1827
1828		if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1829			dev_warn(&qp->device->dev,
1830				 " %s sq_psn overflow, masking to 24 bits\n",
1831				 __func__);
1832			attr->sq_psn &= 0xffffff;
1833		}
1834	}
1835
1836	/*
1837	 * Bind this qp to a counter automatically based on the rdma counter
1838	 * rules. This only set in RST2INIT with port specified
1839	 */
1840	if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1841	    ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1842		rdma_counter_bind_qp_auto(qp, attr->port_num);
1843
1844	ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1845	if (ret)
1846		goto out;
1847
1848	if (attr_mask & IB_QP_PORT)
1849		qp->port = attr->port_num;
1850	if (attr_mask & IB_QP_AV)
1851		qp->av_sgid_attr =
1852			rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1853	if (attr_mask & IB_QP_ALT_PATH)
1854		qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1855			&attr->alt_ah_attr, qp->alt_path_sgid_attr);
1856
1857out:
1858	if (attr_mask & IB_QP_ALT_PATH)
1859		rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1860out_av:
1861	if (attr_mask & IB_QP_AV) {
1862		rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1863		rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1864	}
1865	return ret;
1866}
1867
1868/**
1869 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1870 * @ib_qp: The QP to modify.
1871 * @attr: On input, specifies the QP attributes to modify.  On output,
1872 *   the current values of selected QP attributes are returned.
1873 * @attr_mask: A bit-mask used to specify which attributes of the QP
1874 *   are being modified.
1875 * @udata: pointer to user's input output buffer information
1876 *   are being modified.
1877 * It returns 0 on success and returns appropriate error code on error.
1878 */
1879int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1880			    int attr_mask, struct ib_udata *udata)
1881{
1882	return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1883}
1884EXPORT_SYMBOL(ib_modify_qp_with_udata);
1885
1886static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
1887				   u16 *speed, u8 *width)
1888{
1889	if (!lanes) {
1890		if (netdev_speed <= SPEED_1000) {
1891			*width = IB_WIDTH_1X;
1892			*speed = IB_SPEED_SDR;
1893		} else if (netdev_speed <= SPEED_10000) {
1894			*width = IB_WIDTH_1X;
1895			*speed = IB_SPEED_FDR10;
1896		} else if (netdev_speed <= SPEED_20000) {
1897			*width = IB_WIDTH_4X;
1898			*speed = IB_SPEED_DDR;
1899		} else if (netdev_speed <= SPEED_25000) {
1900			*width = IB_WIDTH_1X;
1901			*speed = IB_SPEED_EDR;
1902		} else if (netdev_speed <= SPEED_40000) {
1903			*width = IB_WIDTH_4X;
1904			*speed = IB_SPEED_FDR10;
1905		} else if (netdev_speed <= SPEED_50000) {
1906			*width = IB_WIDTH_2X;
1907			*speed = IB_SPEED_EDR;
1908		} else if (netdev_speed <= SPEED_100000) {
1909			*width = IB_WIDTH_4X;
1910			*speed = IB_SPEED_EDR;
1911		} else if (netdev_speed <= SPEED_200000) {
1912			*width = IB_WIDTH_4X;
1913			*speed = IB_SPEED_HDR;
1914		} else {
1915			*width = IB_WIDTH_4X;
1916			*speed = IB_SPEED_NDR;
1917		}
1918
1919		return;
1920	}
1921
1922	switch (lanes) {
1923	case 1:
1924		*width = IB_WIDTH_1X;
1925		break;
1926	case 2:
1927		*width = IB_WIDTH_2X;
1928		break;
1929	case 4:
1930		*width = IB_WIDTH_4X;
1931		break;
1932	case 8:
1933		*width = IB_WIDTH_8X;
1934		break;
1935	case 12:
1936		*width = IB_WIDTH_12X;
1937		break;
1938	default:
1939		*width = IB_WIDTH_1X;
1940	}
1941
1942	switch (netdev_speed / lanes) {
1943	case SPEED_2500:
1944		*speed = IB_SPEED_SDR;
1945		break;
1946	case SPEED_5000:
1947		*speed = IB_SPEED_DDR;
1948		break;
1949	case SPEED_10000:
1950		*speed = IB_SPEED_FDR10;
1951		break;
1952	case SPEED_14000:
1953		*speed = IB_SPEED_FDR;
1954		break;
1955	case SPEED_25000:
1956		*speed = IB_SPEED_EDR;
1957		break;
1958	case SPEED_50000:
1959		*speed = IB_SPEED_HDR;
1960		break;
1961	case SPEED_100000:
1962		*speed = IB_SPEED_NDR;
1963		break;
1964	default:
1965		*speed = IB_SPEED_SDR;
1966	}
1967}
1968
1969int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
1970{
1971	int rc;
1972	u32 netdev_speed;
1973	struct net_device *netdev;
1974	struct ethtool_link_ksettings lksettings = {};
1975
1976	if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1977		return -EINVAL;
1978
1979	netdev = ib_device_get_netdev(dev, port_num);
1980	if (!netdev)
1981		return -ENODEV;
1982
1983	rtnl_lock();
1984	rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1985	rtnl_unlock();
1986
1987	dev_put(netdev);
1988
1989	if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1990		netdev_speed = lksettings.base.speed;
1991	} else {
1992		netdev_speed = SPEED_1000;
1993		if (rc)
1994			pr_warn("%s speed is unknown, defaulting to %u\n",
1995				netdev->name, netdev_speed);
1996	}
1997
1998	ib_get_width_and_speed(netdev_speed, lksettings.lanes,
1999			       speed, width);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2000
2001	return 0;
2002}
2003EXPORT_SYMBOL(ib_get_eth_speed);
2004
2005int ib_modify_qp(struct ib_qp *qp,
2006		 struct ib_qp_attr *qp_attr,
2007		 int qp_attr_mask)
2008{
2009	return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
2010}
2011EXPORT_SYMBOL(ib_modify_qp);
2012
2013int ib_query_qp(struct ib_qp *qp,
2014		struct ib_qp_attr *qp_attr,
2015		int qp_attr_mask,
2016		struct ib_qp_init_attr *qp_init_attr)
2017{
2018	qp_attr->ah_attr.grh.sgid_attr = NULL;
2019	qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
2020
2021	return qp->device->ops.query_qp ?
2022		qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
2023					 qp_init_attr) : -EOPNOTSUPP;
2024}
2025EXPORT_SYMBOL(ib_query_qp);
2026
2027int ib_close_qp(struct ib_qp *qp)
2028{
2029	struct ib_qp *real_qp;
2030	unsigned long flags;
2031
2032	real_qp = qp->real_qp;
2033	if (real_qp == qp)
2034		return -EINVAL;
2035
2036	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
2037	list_del(&qp->open_list);
2038	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
2039
2040	atomic_dec(&real_qp->usecnt);
2041	if (qp->qp_sec)
2042		ib_close_shared_qp_security(qp->qp_sec);
2043	kfree(qp);
2044
2045	return 0;
2046}
2047EXPORT_SYMBOL(ib_close_qp);
2048
2049static int __ib_destroy_shared_qp(struct ib_qp *qp)
2050{
2051	struct ib_xrcd *xrcd;
2052	struct ib_qp *real_qp;
2053	int ret;
2054
2055	real_qp = qp->real_qp;
2056	xrcd = real_qp->xrcd;
2057	down_write(&xrcd->tgt_qps_rwsem);
2058	ib_close_qp(qp);
2059	if (atomic_read(&real_qp->usecnt) == 0)
2060		xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
2061	else
2062		real_qp = NULL;
2063	up_write(&xrcd->tgt_qps_rwsem);
2064
2065	if (real_qp) {
2066		ret = ib_destroy_qp(real_qp);
2067		if (!ret)
2068			atomic_dec(&xrcd->usecnt);
2069	}
2070
2071	return 0;
2072}
2073
2074int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2075{
2076	const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2077	const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2078	struct ib_qp_security *sec;
2079	int ret;
2080
2081	WARN_ON_ONCE(qp->mrs_used > 0);
2082
2083	if (atomic_read(&qp->usecnt))
2084		return -EBUSY;
2085
2086	if (qp->real_qp != qp)
2087		return __ib_destroy_shared_qp(qp);
2088
2089	sec  = qp->qp_sec;
2090	if (sec)
2091		ib_destroy_qp_security_begin(sec);
2092
2093	if (!qp->uobject)
2094		rdma_rw_cleanup_mrs(qp);
2095
2096	rdma_counter_unbind_qp(qp, true);
2097	ret = qp->device->ops.destroy_qp(qp, udata);
2098	if (ret) {
2099		if (sec)
2100			ib_destroy_qp_security_abort(sec);
2101		return ret;
2102	}
2103
2104	if (alt_path_sgid_attr)
2105		rdma_put_gid_attr(alt_path_sgid_attr);
2106	if (av_sgid_attr)
2107		rdma_put_gid_attr(av_sgid_attr);
2108
2109	ib_qp_usecnt_dec(qp);
2110	if (sec)
2111		ib_destroy_qp_security_end(sec);
2112
2113	rdma_restrack_del(&qp->res);
2114	kfree(qp);
2115	return ret;
2116}
2117EXPORT_SYMBOL(ib_destroy_qp_user);
2118
2119/* Completion queues */
2120
2121struct ib_cq *__ib_create_cq(struct ib_device *device,
2122			     ib_comp_handler comp_handler,
2123			     void (*event_handler)(struct ib_event *, void *),
2124			     void *cq_context,
2125			     const struct ib_cq_init_attr *cq_attr,
2126			     const char *caller)
2127{
2128	struct ib_cq *cq;
2129	int ret;
2130
2131	cq = rdma_zalloc_drv_obj(device, ib_cq);
2132	if (!cq)
2133		return ERR_PTR(-ENOMEM);
2134
2135	cq->device = device;
2136	cq->uobject = NULL;
2137	cq->comp_handler = comp_handler;
2138	cq->event_handler = event_handler;
2139	cq->cq_context = cq_context;
2140	atomic_set(&cq->usecnt, 0);
2141
2142	rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2143	rdma_restrack_set_name(&cq->res, caller);
2144
2145	ret = device->ops.create_cq(cq, cq_attr, NULL);
2146	if (ret) {
2147		rdma_restrack_put(&cq->res);
2148		kfree(cq);
2149		return ERR_PTR(ret);
2150	}
2151
2152	rdma_restrack_add(&cq->res);
2153	return cq;
2154}
2155EXPORT_SYMBOL(__ib_create_cq);
2156
2157int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2158{
2159	if (cq->shared)
2160		return -EOPNOTSUPP;
2161
2162	return cq->device->ops.modify_cq ?
2163		cq->device->ops.modify_cq(cq, cq_count,
2164					  cq_period) : -EOPNOTSUPP;
2165}
2166EXPORT_SYMBOL(rdma_set_cq_moderation);
2167
2168int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2169{
2170	int ret;
2171
2172	if (WARN_ON_ONCE(cq->shared))
2173		return -EOPNOTSUPP;
2174
2175	if (atomic_read(&cq->usecnt))
2176		return -EBUSY;
2177
2178	ret = cq->device->ops.destroy_cq(cq, udata);
2179	if (ret)
2180		return ret;
2181
2182	rdma_restrack_del(&cq->res);
2183	kfree(cq);
2184	return ret;
2185}
2186EXPORT_SYMBOL(ib_destroy_cq_user);
2187
2188int ib_resize_cq(struct ib_cq *cq, int cqe)
2189{
2190	if (cq->shared)
2191		return -EOPNOTSUPP;
2192
2193	return cq->device->ops.resize_cq ?
2194		cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2195}
2196EXPORT_SYMBOL(ib_resize_cq);
2197
2198/* Memory regions */
2199
2200struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2201			     u64 virt_addr, int access_flags)
2202{
2203	struct ib_mr *mr;
2204
2205	if (access_flags & IB_ACCESS_ON_DEMAND) {
2206		if (!(pd->device->attrs.kernel_cap_flags &
2207		      IBK_ON_DEMAND_PAGING)) {
2208			pr_debug("ODP support not available\n");
2209			return ERR_PTR(-EINVAL);
2210		}
2211	}
2212
2213	mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2214					 access_flags, NULL);
2215
2216	if (IS_ERR(mr))
2217		return mr;
2218
2219	mr->device = pd->device;
2220	mr->type = IB_MR_TYPE_USER;
2221	mr->pd = pd;
2222	mr->dm = NULL;
2223	atomic_inc(&pd->usecnt);
2224	mr->iova =  virt_addr;
2225	mr->length = length;
2226
2227	rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2228	rdma_restrack_parent_name(&mr->res, &pd->res);
2229	rdma_restrack_add(&mr->res);
2230
2231	return mr;
2232}
2233EXPORT_SYMBOL(ib_reg_user_mr);
2234
2235int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2236		 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2237{
2238	if (!pd->device->ops.advise_mr)
2239		return -EOPNOTSUPP;
2240
2241	if (!num_sge)
2242		return 0;
2243
2244	return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2245					 NULL);
2246}
2247EXPORT_SYMBOL(ib_advise_mr);
2248
2249int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2250{
2251	struct ib_pd *pd = mr->pd;
2252	struct ib_dm *dm = mr->dm;
2253	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2254	int ret;
2255
2256	trace_mr_dereg(mr);
2257	rdma_restrack_del(&mr->res);
2258	ret = mr->device->ops.dereg_mr(mr, udata);
2259	if (!ret) {
2260		atomic_dec(&pd->usecnt);
2261		if (dm)
2262			atomic_dec(&dm->usecnt);
2263		kfree(sig_attrs);
2264	}
2265
2266	return ret;
2267}
2268EXPORT_SYMBOL(ib_dereg_mr_user);
2269
2270/**
2271 * ib_alloc_mr() - Allocates a memory region
2272 * @pd:            protection domain associated with the region
2273 * @mr_type:       memory region type
2274 * @max_num_sg:    maximum sg entries available for registration.
2275 *
2276 * Notes:
2277 * Memory registeration page/sg lists must not exceed max_num_sg.
2278 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2279 * max_num_sg * used_page_size.
2280 *
2281 */
2282struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2283			  u32 max_num_sg)
2284{
2285	struct ib_mr *mr;
2286
2287	if (!pd->device->ops.alloc_mr) {
2288		mr = ERR_PTR(-EOPNOTSUPP);
2289		goto out;
2290	}
2291
2292	if (mr_type == IB_MR_TYPE_INTEGRITY) {
2293		WARN_ON_ONCE(1);
2294		mr = ERR_PTR(-EINVAL);
2295		goto out;
2296	}
2297
2298	mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2299	if (IS_ERR(mr))
2300		goto out;
2301
2302	mr->device = pd->device;
2303	mr->pd = pd;
2304	mr->dm = NULL;
2305	mr->uobject = NULL;
2306	atomic_inc(&pd->usecnt);
2307	mr->need_inval = false;
2308	mr->type = mr_type;
2309	mr->sig_attrs = NULL;
2310
2311	rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2312	rdma_restrack_parent_name(&mr->res, &pd->res);
2313	rdma_restrack_add(&mr->res);
2314out:
2315	trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2316	return mr;
2317}
2318EXPORT_SYMBOL(ib_alloc_mr);
2319
2320/**
2321 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2322 * @pd:                      protection domain associated with the region
2323 * @max_num_data_sg:         maximum data sg entries available for registration
2324 * @max_num_meta_sg:         maximum metadata sg entries available for
2325 *                           registration
2326 *
2327 * Notes:
2328 * Memory registration page/sg lists must not exceed max_num_sg,
2329 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2330 *
2331 */
2332struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2333				    u32 max_num_data_sg,
2334				    u32 max_num_meta_sg)
2335{
2336	struct ib_mr *mr;
2337	struct ib_sig_attrs *sig_attrs;
2338
2339	if (!pd->device->ops.alloc_mr_integrity ||
2340	    !pd->device->ops.map_mr_sg_pi) {
2341		mr = ERR_PTR(-EOPNOTSUPP);
2342		goto out;
2343	}
2344
2345	if (!max_num_meta_sg) {
2346		mr = ERR_PTR(-EINVAL);
2347		goto out;
2348	}
2349
2350	sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2351	if (!sig_attrs) {
2352		mr = ERR_PTR(-ENOMEM);
2353		goto out;
2354	}
2355
2356	mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2357						max_num_meta_sg);
2358	if (IS_ERR(mr)) {
2359		kfree(sig_attrs);
2360		goto out;
2361	}
2362
2363	mr->device = pd->device;
2364	mr->pd = pd;
2365	mr->dm = NULL;
2366	mr->uobject = NULL;
2367	atomic_inc(&pd->usecnt);
2368	mr->need_inval = false;
2369	mr->type = IB_MR_TYPE_INTEGRITY;
2370	mr->sig_attrs = sig_attrs;
2371
2372	rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2373	rdma_restrack_parent_name(&mr->res, &pd->res);
2374	rdma_restrack_add(&mr->res);
2375out:
2376	trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2377	return mr;
2378}
2379EXPORT_SYMBOL(ib_alloc_mr_integrity);
2380
2381/* Multicast groups */
2382
2383static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2384{
2385	struct ib_qp_init_attr init_attr = {};
2386	struct ib_qp_attr attr = {};
2387	int num_eth_ports = 0;
2388	unsigned int port;
2389
2390	/* If QP state >= init, it is assigned to a port and we can check this
2391	 * port only.
2392	 */
2393	if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2394		if (attr.qp_state >= IB_QPS_INIT) {
2395			if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2396			    IB_LINK_LAYER_INFINIBAND)
2397				return true;
2398			goto lid_check;
2399		}
2400	}
2401
2402	/* Can't get a quick answer, iterate over all ports */
2403	rdma_for_each_port(qp->device, port)
2404		if (rdma_port_get_link_layer(qp->device, port) !=
2405		    IB_LINK_LAYER_INFINIBAND)
2406			num_eth_ports++;
2407
2408	/* If we have at lease one Ethernet port, RoCE annex declares that
2409	 * multicast LID should be ignored. We can't tell at this step if the
2410	 * QP belongs to an IB or Ethernet port.
2411	 */
2412	if (num_eth_ports)
2413		return true;
2414
2415	/* If all the ports are IB, we can check according to IB spec. */
2416lid_check:
2417	return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2418		 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2419}
2420
2421int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2422{
2423	int ret;
2424
2425	if (!qp->device->ops.attach_mcast)
2426		return -EOPNOTSUPP;
2427
2428	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2429	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2430		return -EINVAL;
2431
2432	ret = qp->device->ops.attach_mcast(qp, gid, lid);
2433	if (!ret)
2434		atomic_inc(&qp->usecnt);
2435	return ret;
2436}
2437EXPORT_SYMBOL(ib_attach_mcast);
2438
2439int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2440{
2441	int ret;
2442
2443	if (!qp->device->ops.detach_mcast)
2444		return -EOPNOTSUPP;
2445
2446	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2447	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2448		return -EINVAL;
2449
2450	ret = qp->device->ops.detach_mcast(qp, gid, lid);
2451	if (!ret)
2452		atomic_dec(&qp->usecnt);
2453	return ret;
2454}
2455EXPORT_SYMBOL(ib_detach_mcast);
2456
2457/**
2458 * ib_alloc_xrcd_user - Allocates an XRC domain.
2459 * @device: The device on which to allocate the XRC domain.
2460 * @inode: inode to connect XRCD
2461 * @udata: Valid user data or NULL for kernel object
2462 */
2463struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2464				   struct inode *inode, struct ib_udata *udata)
2465{
2466	struct ib_xrcd *xrcd;
2467	int ret;
2468
2469	if (!device->ops.alloc_xrcd)
2470		return ERR_PTR(-EOPNOTSUPP);
2471
2472	xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2473	if (!xrcd)
2474		return ERR_PTR(-ENOMEM);
2475
2476	xrcd->device = device;
2477	xrcd->inode = inode;
2478	atomic_set(&xrcd->usecnt, 0);
2479	init_rwsem(&xrcd->tgt_qps_rwsem);
2480	xa_init(&xrcd->tgt_qps);
2481
2482	ret = device->ops.alloc_xrcd(xrcd, udata);
2483	if (ret)
2484		goto err;
2485	return xrcd;
2486err:
2487	kfree(xrcd);
2488	return ERR_PTR(ret);
2489}
2490EXPORT_SYMBOL(ib_alloc_xrcd_user);
2491
2492/**
2493 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2494 * @xrcd: The XRC domain to deallocate.
2495 * @udata: Valid user data or NULL for kernel object
2496 */
2497int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2498{
2499	int ret;
2500
2501	if (atomic_read(&xrcd->usecnt))
2502		return -EBUSY;
2503
2504	WARN_ON(!xa_empty(&xrcd->tgt_qps));
2505	ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2506	if (ret)
2507		return ret;
2508	kfree(xrcd);
2509	return ret;
2510}
2511EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2512
2513/**
2514 * ib_create_wq - Creates a WQ associated with the specified protection
2515 * domain.
2516 * @pd: The protection domain associated with the WQ.
2517 * @wq_attr: A list of initial attributes required to create the
2518 * WQ. If WQ creation succeeds, then the attributes are updated to
2519 * the actual capabilities of the created WQ.
2520 *
2521 * wq_attr->max_wr and wq_attr->max_sge determine
2522 * the requested size of the WQ, and set to the actual values allocated
2523 * on return.
2524 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2525 * at least as large as the requested values.
2526 */
2527struct ib_wq *ib_create_wq(struct ib_pd *pd,
2528			   struct ib_wq_init_attr *wq_attr)
2529{
2530	struct ib_wq *wq;
2531
2532	if (!pd->device->ops.create_wq)
2533		return ERR_PTR(-EOPNOTSUPP);
2534
2535	wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2536	if (!IS_ERR(wq)) {
2537		wq->event_handler = wq_attr->event_handler;
2538		wq->wq_context = wq_attr->wq_context;
2539		wq->wq_type = wq_attr->wq_type;
2540		wq->cq = wq_attr->cq;
2541		wq->device = pd->device;
2542		wq->pd = pd;
2543		wq->uobject = NULL;
2544		atomic_inc(&pd->usecnt);
2545		atomic_inc(&wq_attr->cq->usecnt);
2546		atomic_set(&wq->usecnt, 0);
2547	}
2548	return wq;
2549}
2550EXPORT_SYMBOL(ib_create_wq);
2551
2552/**
2553 * ib_destroy_wq_user - Destroys the specified user WQ.
2554 * @wq: The WQ to destroy.
2555 * @udata: Valid user data
2556 */
2557int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2558{
2559	struct ib_cq *cq = wq->cq;
2560	struct ib_pd *pd = wq->pd;
2561	int ret;
2562
2563	if (atomic_read(&wq->usecnt))
2564		return -EBUSY;
2565
2566	ret = wq->device->ops.destroy_wq(wq, udata);
2567	if (ret)
2568		return ret;
2569
2570	atomic_dec(&pd->usecnt);
2571	atomic_dec(&cq->usecnt);
2572	return ret;
2573}
2574EXPORT_SYMBOL(ib_destroy_wq_user);
2575
2576int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2577		       struct ib_mr_status *mr_status)
2578{
2579	if (!mr->device->ops.check_mr_status)
2580		return -EOPNOTSUPP;
2581
2582	return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2583}
2584EXPORT_SYMBOL(ib_check_mr_status);
2585
2586int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2587			 int state)
2588{
2589	if (!device->ops.set_vf_link_state)
2590		return -EOPNOTSUPP;
2591
2592	return device->ops.set_vf_link_state(device, vf, port, state);
2593}
2594EXPORT_SYMBOL(ib_set_vf_link_state);
2595
2596int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2597		     struct ifla_vf_info *info)
2598{
2599	if (!device->ops.get_vf_config)
2600		return -EOPNOTSUPP;
2601
2602	return device->ops.get_vf_config(device, vf, port, info);
2603}
2604EXPORT_SYMBOL(ib_get_vf_config);
2605
2606int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2607		    struct ifla_vf_stats *stats)
2608{
2609	if (!device->ops.get_vf_stats)
2610		return -EOPNOTSUPP;
2611
2612	return device->ops.get_vf_stats(device, vf, port, stats);
2613}
2614EXPORT_SYMBOL(ib_get_vf_stats);
2615
2616int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2617		   int type)
2618{
2619	if (!device->ops.set_vf_guid)
2620		return -EOPNOTSUPP;
2621
2622	return device->ops.set_vf_guid(device, vf, port, guid, type);
2623}
2624EXPORT_SYMBOL(ib_set_vf_guid);
2625
2626int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2627		   struct ifla_vf_guid *node_guid,
2628		   struct ifla_vf_guid *port_guid)
2629{
2630	if (!device->ops.get_vf_guid)
2631		return -EOPNOTSUPP;
2632
2633	return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2634}
2635EXPORT_SYMBOL(ib_get_vf_guid);
2636/**
2637 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2638 *     information) and set an appropriate memory region for registration.
2639 * @mr:             memory region
2640 * @data_sg:        dma mapped scatterlist for data
2641 * @data_sg_nents:  number of entries in data_sg
2642 * @data_sg_offset: offset in bytes into data_sg
2643 * @meta_sg:        dma mapped scatterlist for metadata
2644 * @meta_sg_nents:  number of entries in meta_sg
2645 * @meta_sg_offset: offset in bytes into meta_sg
2646 * @page_size:      page vector desired page size
2647 *
2648 * Constraints:
2649 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2650 *
2651 * Return: 0 on success.
2652 *
2653 * After this completes successfully, the  memory region
2654 * is ready for registration.
2655 */
2656int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2657		    int data_sg_nents, unsigned int *data_sg_offset,
2658		    struct scatterlist *meta_sg, int meta_sg_nents,
2659		    unsigned int *meta_sg_offset, unsigned int page_size)
2660{
2661	if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2662		     WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2663		return -EOPNOTSUPP;
2664
2665	mr->page_size = page_size;
2666
2667	return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2668					    data_sg_offset, meta_sg,
2669					    meta_sg_nents, meta_sg_offset);
2670}
2671EXPORT_SYMBOL(ib_map_mr_sg_pi);
2672
2673/**
2674 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2675 *     and set it the memory region.
2676 * @mr:            memory region
2677 * @sg:            dma mapped scatterlist
2678 * @sg_nents:      number of entries in sg
2679 * @sg_offset:     offset in bytes into sg
2680 * @page_size:     page vector desired page size
2681 *
2682 * Constraints:
2683 *
2684 * - The first sg element is allowed to have an offset.
2685 * - Each sg element must either be aligned to page_size or virtually
2686 *   contiguous to the previous element. In case an sg element has a
2687 *   non-contiguous offset, the mapping prefix will not include it.
2688 * - The last sg element is allowed to have length less than page_size.
2689 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2690 *   then only max_num_sg entries will be mapped.
2691 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2692 *   constraints holds and the page_size argument is ignored.
2693 *
2694 * Returns the number of sg elements that were mapped to the memory region.
2695 *
2696 * After this completes successfully, the  memory region
2697 * is ready for registration.
2698 */
2699int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2700		 unsigned int *sg_offset, unsigned int page_size)
2701{
2702	if (unlikely(!mr->device->ops.map_mr_sg))
2703		return -EOPNOTSUPP;
2704
2705	mr->page_size = page_size;
2706
2707	return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2708}
2709EXPORT_SYMBOL(ib_map_mr_sg);
2710
2711/**
2712 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2713 *     to a page vector
2714 * @mr:            memory region
2715 * @sgl:           dma mapped scatterlist
2716 * @sg_nents:      number of entries in sg
2717 * @sg_offset_p:   ==== =======================================================
2718 *                 IN   start offset in bytes into sg
2719 *                 OUT  offset in bytes for element n of the sg of the first
2720 *                      byte that has not been processed where n is the return
2721 *                      value of this function.
2722 *                 ==== =======================================================
2723 * @set_page:      driver page assignment function pointer
2724 *
2725 * Core service helper for drivers to convert the largest
2726 * prefix of given sg list to a page vector. The sg list
2727 * prefix converted is the prefix that meet the requirements
2728 * of ib_map_mr_sg.
2729 *
2730 * Returns the number of sg elements that were assigned to
2731 * a page vector.
2732 */
2733int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2734		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2735{
2736	struct scatterlist *sg;
2737	u64 last_end_dma_addr = 0;
2738	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2739	unsigned int last_page_off = 0;
2740	u64 page_mask = ~((u64)mr->page_size - 1);
2741	int i, ret;
2742
2743	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2744		return -EINVAL;
2745
2746	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2747	mr->length = 0;
2748
2749	for_each_sg(sgl, sg, sg_nents, i) {
2750		u64 dma_addr = sg_dma_address(sg) + sg_offset;
2751		u64 prev_addr = dma_addr;
2752		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2753		u64 end_dma_addr = dma_addr + dma_len;
2754		u64 page_addr = dma_addr & page_mask;
2755
2756		/*
2757		 * For the second and later elements, check whether either the
2758		 * end of element i-1 or the start of element i is not aligned
2759		 * on a page boundary.
2760		 */
2761		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2762			/* Stop mapping if there is a gap. */
2763			if (last_end_dma_addr != dma_addr)
2764				break;
2765
2766			/*
2767			 * Coalesce this element with the last. If it is small
2768			 * enough just update mr->length. Otherwise start
2769			 * mapping from the next page.
2770			 */
2771			goto next_page;
2772		}
2773
2774		do {
2775			ret = set_page(mr, page_addr);
2776			if (unlikely(ret < 0)) {
2777				sg_offset = prev_addr - sg_dma_address(sg);
2778				mr->length += prev_addr - dma_addr;
2779				if (sg_offset_p)
2780					*sg_offset_p = sg_offset;
2781				return i || sg_offset ? i : ret;
2782			}
2783			prev_addr = page_addr;
2784next_page:
2785			page_addr += mr->page_size;
2786		} while (page_addr < end_dma_addr);
2787
2788		mr->length += dma_len;
2789		last_end_dma_addr = end_dma_addr;
2790		last_page_off = end_dma_addr & ~page_mask;
2791
2792		sg_offset = 0;
2793	}
2794
2795	if (sg_offset_p)
2796		*sg_offset_p = 0;
2797	return i;
2798}
2799EXPORT_SYMBOL(ib_sg_to_pages);
2800
2801struct ib_drain_cqe {
2802	struct ib_cqe cqe;
2803	struct completion done;
2804};
2805
2806static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2807{
2808	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2809						cqe);
2810
2811	complete(&cqe->done);
2812}
2813
2814/*
2815 * Post a WR and block until its completion is reaped for the SQ.
2816 */
2817static void __ib_drain_sq(struct ib_qp *qp)
2818{
2819	struct ib_cq *cq = qp->send_cq;
2820	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2821	struct ib_drain_cqe sdrain;
2822	struct ib_rdma_wr swr = {
2823		.wr = {
2824			.next = NULL,
2825			{ .wr_cqe	= &sdrain.cqe, },
2826			.opcode	= IB_WR_RDMA_WRITE,
2827		},
2828	};
2829	int ret;
2830
2831	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2832	if (ret) {
2833		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2834		return;
2835	}
2836
2837	sdrain.cqe.done = ib_drain_qp_done;
2838	init_completion(&sdrain.done);
2839
2840	ret = ib_post_send(qp, &swr.wr, NULL);
2841	if (ret) {
2842		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2843		return;
2844	}
2845
2846	if (cq->poll_ctx == IB_POLL_DIRECT)
2847		while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2848			ib_process_cq_direct(cq, -1);
2849	else
2850		wait_for_completion(&sdrain.done);
2851}
2852
2853/*
2854 * Post a WR and block until its completion is reaped for the RQ.
2855 */
2856static void __ib_drain_rq(struct ib_qp *qp)
2857{
2858	struct ib_cq *cq = qp->recv_cq;
2859	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2860	struct ib_drain_cqe rdrain;
2861	struct ib_recv_wr rwr = {};
2862	int ret;
2863
2864	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2865	if (ret) {
2866		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2867		return;
2868	}
2869
2870	rwr.wr_cqe = &rdrain.cqe;
2871	rdrain.cqe.done = ib_drain_qp_done;
2872	init_completion(&rdrain.done);
2873
2874	ret = ib_post_recv(qp, &rwr, NULL);
2875	if (ret) {
2876		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2877		return;
2878	}
2879
2880	if (cq->poll_ctx == IB_POLL_DIRECT)
2881		while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2882			ib_process_cq_direct(cq, -1);
2883	else
2884		wait_for_completion(&rdrain.done);
2885}
2886
2887/**
2888 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2889 *		   application.
2890 * @qp:            queue pair to drain
2891 *
2892 * If the device has a provider-specific drain function, then
2893 * call that.  Otherwise call the generic drain function
2894 * __ib_drain_sq().
2895 *
2896 * The caller must:
2897 *
2898 * ensure there is room in the CQ and SQ for the drain work request and
2899 * completion.
2900 *
2901 * allocate the CQ using ib_alloc_cq().
2902 *
2903 * ensure that there are no other contexts that are posting WRs concurrently.
2904 * Otherwise the drain is not guaranteed.
2905 */
2906void ib_drain_sq(struct ib_qp *qp)
2907{
2908	if (qp->device->ops.drain_sq)
2909		qp->device->ops.drain_sq(qp);
2910	else
2911		__ib_drain_sq(qp);
2912	trace_cq_drain_complete(qp->send_cq);
2913}
2914EXPORT_SYMBOL(ib_drain_sq);
2915
2916/**
2917 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2918 *		   application.
2919 * @qp:            queue pair to drain
2920 *
2921 * If the device has a provider-specific drain function, then
2922 * call that.  Otherwise call the generic drain function
2923 * __ib_drain_rq().
2924 *
2925 * The caller must:
2926 *
2927 * ensure there is room in the CQ and RQ for the drain work request and
2928 * completion.
2929 *
2930 * allocate the CQ using ib_alloc_cq().
2931 *
2932 * ensure that there are no other contexts that are posting WRs concurrently.
2933 * Otherwise the drain is not guaranteed.
2934 */
2935void ib_drain_rq(struct ib_qp *qp)
2936{
2937	if (qp->device->ops.drain_rq)
2938		qp->device->ops.drain_rq(qp);
2939	else
2940		__ib_drain_rq(qp);
2941	trace_cq_drain_complete(qp->recv_cq);
2942}
2943EXPORT_SYMBOL(ib_drain_rq);
2944
2945/**
2946 * ib_drain_qp() - Block until all CQEs have been consumed by the
2947 *		   application on both the RQ and SQ.
2948 * @qp:            queue pair to drain
2949 *
2950 * The caller must:
2951 *
2952 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2953 * and completions.
2954 *
2955 * allocate the CQs using ib_alloc_cq().
2956 *
2957 * ensure that there are no other contexts that are posting WRs concurrently.
2958 * Otherwise the drain is not guaranteed.
2959 */
2960void ib_drain_qp(struct ib_qp *qp)
2961{
2962	ib_drain_sq(qp);
2963	if (!qp->srq)
2964		ib_drain_rq(qp);
2965}
2966EXPORT_SYMBOL(ib_drain_qp);
2967
2968struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
2969				     enum rdma_netdev_t type, const char *name,
2970				     unsigned char name_assign_type,
2971				     void (*setup)(struct net_device *))
2972{
2973	struct rdma_netdev_alloc_params params;
2974	struct net_device *netdev;
2975	int rc;
2976
2977	if (!device->ops.rdma_netdev_get_params)
2978		return ERR_PTR(-EOPNOTSUPP);
2979
2980	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2981						&params);
2982	if (rc)
2983		return ERR_PTR(rc);
2984
2985	netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2986				  setup, params.txqs, params.rxqs);
2987	if (!netdev)
2988		return ERR_PTR(-ENOMEM);
2989
2990	return netdev;
2991}
2992EXPORT_SYMBOL(rdma_alloc_netdev);
2993
2994int rdma_init_netdev(struct ib_device *device, u32 port_num,
2995		     enum rdma_netdev_t type, const char *name,
2996		     unsigned char name_assign_type,
2997		     void (*setup)(struct net_device *),
2998		     struct net_device *netdev)
2999{
3000	struct rdma_netdev_alloc_params params;
3001	int rc;
3002
3003	if (!device->ops.rdma_netdev_get_params)
3004		return -EOPNOTSUPP;
3005
3006	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
3007						&params);
3008	if (rc)
3009		return rc;
3010
3011	return params.initialize_rdma_netdev(device, port_num,
3012					     netdev, params.param);
3013}
3014EXPORT_SYMBOL(rdma_init_netdev);
3015
3016void __rdma_block_iter_start(struct ib_block_iter *biter,
3017			     struct scatterlist *sglist, unsigned int nents,
3018			     unsigned long pgsz)
3019{
3020	memset(biter, 0, sizeof(struct ib_block_iter));
3021	biter->__sg = sglist;
3022	biter->__sg_nents = nents;
3023
3024	/* Driver provides best block size to use */
3025	biter->__pg_bit = __fls(pgsz);
3026}
3027EXPORT_SYMBOL(__rdma_block_iter_start);
3028
3029bool __rdma_block_iter_next(struct ib_block_iter *biter)
3030{
3031	unsigned int block_offset;
3032	unsigned int sg_delta;
3033
3034	if (!biter->__sg_nents || !biter->__sg)
3035		return false;
3036
3037	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
3038	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
3039	sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
3040
3041	if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
3042		biter->__sg_advance += sg_delta;
3043	} else {
3044		biter->__sg_advance = 0;
3045		biter->__sg = sg_next(biter->__sg);
3046		biter->__sg_nents--;
3047	}
3048
3049	return true;
3050}
3051EXPORT_SYMBOL(__rdma_block_iter_next);
3052
3053/**
3054 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
3055 *   for the drivers.
3056 * @descs: array of static descriptors
3057 * @num_counters: number of elements in array
3058 * @lifespan: milliseconds between updates
3059 */
3060struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
3061	const struct rdma_stat_desc *descs, int num_counters,
3062	unsigned long lifespan)
3063{
3064	struct rdma_hw_stats *stats;
3065
3066	stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
3067	if (!stats)
3068		return NULL;
3069
3070	stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
3071				     sizeof(*stats->is_disabled), GFP_KERNEL);
3072	if (!stats->is_disabled)
3073		goto err;
3074
3075	stats->descs = descs;
3076	stats->num_counters = num_counters;
3077	stats->lifespan = msecs_to_jiffies(lifespan);
3078	mutex_init(&stats->lock);
3079
3080	return stats;
3081
3082err:
3083	kfree(stats);
3084	return NULL;
3085}
3086EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
3087
3088/**
3089 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
3090 * @stats: statistics to release
3091 */
3092void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
3093{
3094	if (!stats)
3095		return;
3096
3097	kfree(stats->is_disabled);
3098	kfree(stats);
3099}
3100EXPORT_SYMBOL(rdma_free_hw_stats_struct);
v6.2
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#include <linux/errno.h>
  40#include <linux/err.h>
  41#include <linux/export.h>
  42#include <linux/string.h>
  43#include <linux/slab.h>
  44#include <linux/in.h>
  45#include <linux/in6.h>
  46#include <net/addrconf.h>
  47#include <linux/security.h>
  48
  49#include <rdma/ib_verbs.h>
  50#include <rdma/ib_cache.h>
  51#include <rdma/ib_addr.h>
  52#include <rdma/rw.h>
  53#include <rdma/lag.h>
  54
  55#include "core_priv.h"
  56#include <trace/events/rdma_core.h>
  57
  58static int ib_resolve_eth_dmac(struct ib_device *device,
  59			       struct rdma_ah_attr *ah_attr);
  60
  61static const char * const ib_events[] = {
  62	[IB_EVENT_CQ_ERR]		= "CQ error",
  63	[IB_EVENT_QP_FATAL]		= "QP fatal error",
  64	[IB_EVENT_QP_REQ_ERR]		= "QP request error",
  65	[IB_EVENT_QP_ACCESS_ERR]	= "QP access error",
  66	[IB_EVENT_COMM_EST]		= "communication established",
  67	[IB_EVENT_SQ_DRAINED]		= "send queue drained",
  68	[IB_EVENT_PATH_MIG]		= "path migration successful",
  69	[IB_EVENT_PATH_MIG_ERR]		= "path migration error",
  70	[IB_EVENT_DEVICE_FATAL]		= "device fatal error",
  71	[IB_EVENT_PORT_ACTIVE]		= "port active",
  72	[IB_EVENT_PORT_ERR]		= "port error",
  73	[IB_EVENT_LID_CHANGE]		= "LID change",
  74	[IB_EVENT_PKEY_CHANGE]		= "P_key change",
  75	[IB_EVENT_SM_CHANGE]		= "SM change",
  76	[IB_EVENT_SRQ_ERR]		= "SRQ error",
  77	[IB_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
  78	[IB_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
  79	[IB_EVENT_CLIENT_REREGISTER]	= "client reregister",
  80	[IB_EVENT_GID_CHANGE]		= "GID changed",
  81};
  82
  83const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
  84{
  85	size_t index = event;
  86
  87	return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
  88			ib_events[index] : "unrecognized event";
  89}
  90EXPORT_SYMBOL(ib_event_msg);
  91
  92static const char * const wc_statuses[] = {
  93	[IB_WC_SUCCESS]			= "success",
  94	[IB_WC_LOC_LEN_ERR]		= "local length error",
  95	[IB_WC_LOC_QP_OP_ERR]		= "local QP operation error",
  96	[IB_WC_LOC_EEC_OP_ERR]		= "local EE context operation error",
  97	[IB_WC_LOC_PROT_ERR]		= "local protection error",
  98	[IB_WC_WR_FLUSH_ERR]		= "WR flushed",
  99	[IB_WC_MW_BIND_ERR]		= "memory bind operation error",
 100	[IB_WC_BAD_RESP_ERR]		= "bad response error",
 101	[IB_WC_LOC_ACCESS_ERR]		= "local access error",
 102	[IB_WC_REM_INV_REQ_ERR]		= "remote invalid request error",
 103	[IB_WC_REM_ACCESS_ERR]		= "remote access error",
 104	[IB_WC_REM_OP_ERR]		= "remote operation error",
 105	[IB_WC_RETRY_EXC_ERR]		= "transport retry counter exceeded",
 106	[IB_WC_RNR_RETRY_EXC_ERR]	= "RNR retry counter exceeded",
 107	[IB_WC_LOC_RDD_VIOL_ERR]	= "local RDD violation error",
 108	[IB_WC_REM_INV_RD_REQ_ERR]	= "remote invalid RD request",
 109	[IB_WC_REM_ABORT_ERR]		= "operation aborted",
 110	[IB_WC_INV_EECN_ERR]		= "invalid EE context number",
 111	[IB_WC_INV_EEC_STATE_ERR]	= "invalid EE context state",
 112	[IB_WC_FATAL_ERR]		= "fatal error",
 113	[IB_WC_RESP_TIMEOUT_ERR]	= "response timeout error",
 114	[IB_WC_GENERAL_ERR]		= "general error",
 115};
 116
 117const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
 118{
 119	size_t index = status;
 120
 121	return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
 122			wc_statuses[index] : "unrecognized status";
 123}
 124EXPORT_SYMBOL(ib_wc_status_msg);
 125
 126__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
 127{
 128	switch (rate) {
 129	case IB_RATE_2_5_GBPS: return   1;
 130	case IB_RATE_5_GBPS:   return   2;
 131	case IB_RATE_10_GBPS:  return   4;
 132	case IB_RATE_20_GBPS:  return   8;
 133	case IB_RATE_30_GBPS:  return  12;
 134	case IB_RATE_40_GBPS:  return  16;
 135	case IB_RATE_60_GBPS:  return  24;
 136	case IB_RATE_80_GBPS:  return  32;
 137	case IB_RATE_120_GBPS: return  48;
 138	case IB_RATE_14_GBPS:  return   6;
 139	case IB_RATE_56_GBPS:  return  22;
 140	case IB_RATE_112_GBPS: return  45;
 141	case IB_RATE_168_GBPS: return  67;
 142	case IB_RATE_25_GBPS:  return  10;
 143	case IB_RATE_100_GBPS: return  40;
 144	case IB_RATE_200_GBPS: return  80;
 145	case IB_RATE_300_GBPS: return 120;
 146	case IB_RATE_28_GBPS:  return  11;
 147	case IB_RATE_50_GBPS:  return  20;
 148	case IB_RATE_400_GBPS: return 160;
 149	case IB_RATE_600_GBPS: return 240;
 
 150	default:	       return  -1;
 151	}
 152}
 153EXPORT_SYMBOL(ib_rate_to_mult);
 154
 155__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
 156{
 157	switch (mult) {
 158	case 1:   return IB_RATE_2_5_GBPS;
 159	case 2:   return IB_RATE_5_GBPS;
 160	case 4:   return IB_RATE_10_GBPS;
 161	case 8:   return IB_RATE_20_GBPS;
 162	case 12:  return IB_RATE_30_GBPS;
 163	case 16:  return IB_RATE_40_GBPS;
 164	case 24:  return IB_RATE_60_GBPS;
 165	case 32:  return IB_RATE_80_GBPS;
 166	case 48:  return IB_RATE_120_GBPS;
 167	case 6:   return IB_RATE_14_GBPS;
 168	case 22:  return IB_RATE_56_GBPS;
 169	case 45:  return IB_RATE_112_GBPS;
 170	case 67:  return IB_RATE_168_GBPS;
 171	case 10:  return IB_RATE_25_GBPS;
 172	case 40:  return IB_RATE_100_GBPS;
 173	case 80:  return IB_RATE_200_GBPS;
 174	case 120: return IB_RATE_300_GBPS;
 175	case 11:  return IB_RATE_28_GBPS;
 176	case 20:  return IB_RATE_50_GBPS;
 177	case 160: return IB_RATE_400_GBPS;
 178	case 240: return IB_RATE_600_GBPS;
 
 179	default:  return IB_RATE_PORT_CURRENT;
 180	}
 181}
 182EXPORT_SYMBOL(mult_to_ib_rate);
 183
 184__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
 185{
 186	switch (rate) {
 187	case IB_RATE_2_5_GBPS: return 2500;
 188	case IB_RATE_5_GBPS:   return 5000;
 189	case IB_RATE_10_GBPS:  return 10000;
 190	case IB_RATE_20_GBPS:  return 20000;
 191	case IB_RATE_30_GBPS:  return 30000;
 192	case IB_RATE_40_GBPS:  return 40000;
 193	case IB_RATE_60_GBPS:  return 60000;
 194	case IB_RATE_80_GBPS:  return 80000;
 195	case IB_RATE_120_GBPS: return 120000;
 196	case IB_RATE_14_GBPS:  return 14062;
 197	case IB_RATE_56_GBPS:  return 56250;
 198	case IB_RATE_112_GBPS: return 112500;
 199	case IB_RATE_168_GBPS: return 168750;
 200	case IB_RATE_25_GBPS:  return 25781;
 201	case IB_RATE_100_GBPS: return 103125;
 202	case IB_RATE_200_GBPS: return 206250;
 203	case IB_RATE_300_GBPS: return 309375;
 204	case IB_RATE_28_GBPS:  return 28125;
 205	case IB_RATE_50_GBPS:  return 53125;
 206	case IB_RATE_400_GBPS: return 425000;
 207	case IB_RATE_600_GBPS: return 637500;
 
 208	default:	       return -1;
 209	}
 210}
 211EXPORT_SYMBOL(ib_rate_to_mbps);
 212
 213__attribute_const__ enum rdma_transport_type
 214rdma_node_get_transport(unsigned int node_type)
 215{
 216
 217	if (node_type == RDMA_NODE_USNIC)
 218		return RDMA_TRANSPORT_USNIC;
 219	if (node_type == RDMA_NODE_USNIC_UDP)
 220		return RDMA_TRANSPORT_USNIC_UDP;
 221	if (node_type == RDMA_NODE_RNIC)
 222		return RDMA_TRANSPORT_IWARP;
 223	if (node_type == RDMA_NODE_UNSPECIFIED)
 224		return RDMA_TRANSPORT_UNSPECIFIED;
 225
 226	return RDMA_TRANSPORT_IB;
 227}
 228EXPORT_SYMBOL(rdma_node_get_transport);
 229
 230enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
 231					      u32 port_num)
 232{
 233	enum rdma_transport_type lt;
 234	if (device->ops.get_link_layer)
 235		return device->ops.get_link_layer(device, port_num);
 236
 237	lt = rdma_node_get_transport(device->node_type);
 238	if (lt == RDMA_TRANSPORT_IB)
 239		return IB_LINK_LAYER_INFINIBAND;
 240
 241	return IB_LINK_LAYER_ETHERNET;
 242}
 243EXPORT_SYMBOL(rdma_port_get_link_layer);
 244
 245/* Protection domains */
 246
 247/**
 248 * __ib_alloc_pd - Allocates an unused protection domain.
 249 * @device: The device on which to allocate the protection domain.
 250 * @flags: protection domain flags
 251 * @caller: caller's build-time module name
 252 *
 253 * A protection domain object provides an association between QPs, shared
 254 * receive queues, address handles, memory regions, and memory windows.
 255 *
 256 * Every PD has a local_dma_lkey which can be used as the lkey value for local
 257 * memory operations.
 258 */
 259struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
 260		const char *caller)
 261{
 262	struct ib_pd *pd;
 263	int mr_access_flags = 0;
 264	int ret;
 265
 266	pd = rdma_zalloc_drv_obj(device, ib_pd);
 267	if (!pd)
 268		return ERR_PTR(-ENOMEM);
 269
 270	pd->device = device;
 271	pd->flags = flags;
 272
 273	rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
 274	rdma_restrack_set_name(&pd->res, caller);
 275
 276	ret = device->ops.alloc_pd(pd, NULL);
 277	if (ret) {
 278		rdma_restrack_put(&pd->res);
 279		kfree(pd);
 280		return ERR_PTR(ret);
 281	}
 282	rdma_restrack_add(&pd->res);
 283
 284	if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
 285		pd->local_dma_lkey = device->local_dma_lkey;
 286	else
 287		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
 288
 289	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
 290		pr_warn("%s: enabling unsafe global rkey\n", caller);
 291		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
 292	}
 293
 294	if (mr_access_flags) {
 295		struct ib_mr *mr;
 296
 297		mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
 298		if (IS_ERR(mr)) {
 299			ib_dealloc_pd(pd);
 300			return ERR_CAST(mr);
 301		}
 302
 303		mr->device	= pd->device;
 304		mr->pd		= pd;
 305		mr->type        = IB_MR_TYPE_DMA;
 306		mr->uobject	= NULL;
 307		mr->need_inval	= false;
 308
 309		pd->__internal_mr = mr;
 310
 311		if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
 312			pd->local_dma_lkey = pd->__internal_mr->lkey;
 313
 314		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
 315			pd->unsafe_global_rkey = pd->__internal_mr->rkey;
 316	}
 317
 318	return pd;
 319}
 320EXPORT_SYMBOL(__ib_alloc_pd);
 321
 322/**
 323 * ib_dealloc_pd_user - Deallocates a protection domain.
 324 * @pd: The protection domain to deallocate.
 325 * @udata: Valid user data or NULL for kernel object
 326 *
 327 * It is an error to call this function while any resources in the pd still
 328 * exist.  The caller is responsible to synchronously destroy them and
 329 * guarantee no new allocations will happen.
 330 */
 331int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
 332{
 333	int ret;
 334
 335	if (pd->__internal_mr) {
 336		ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
 337		WARN_ON(ret);
 338		pd->__internal_mr = NULL;
 339	}
 340
 341	ret = pd->device->ops.dealloc_pd(pd, udata);
 342	if (ret)
 343		return ret;
 344
 345	rdma_restrack_del(&pd->res);
 346	kfree(pd);
 347	return ret;
 348}
 349EXPORT_SYMBOL(ib_dealloc_pd_user);
 350
 351/* Address handles */
 352
 353/**
 354 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
 355 * @dest:       Pointer to destination ah_attr. Contents of the destination
 356 *              pointer is assumed to be invalid and attribute are overwritten.
 357 * @src:        Pointer to source ah_attr.
 358 */
 359void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
 360		       const struct rdma_ah_attr *src)
 361{
 362	*dest = *src;
 363	if (dest->grh.sgid_attr)
 364		rdma_hold_gid_attr(dest->grh.sgid_attr);
 365}
 366EXPORT_SYMBOL(rdma_copy_ah_attr);
 367
 368/**
 369 * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
 370 * @old:        Pointer to existing ah_attr which needs to be replaced.
 371 *              old is assumed to be valid or zero'd
 372 * @new:        Pointer to the new ah_attr.
 373 *
 374 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
 375 * old the ah_attr is valid; after that it copies the new attribute and holds
 376 * the reference to the replaced ah_attr.
 377 */
 378void rdma_replace_ah_attr(struct rdma_ah_attr *old,
 379			  const struct rdma_ah_attr *new)
 380{
 381	rdma_destroy_ah_attr(old);
 382	*old = *new;
 383	if (old->grh.sgid_attr)
 384		rdma_hold_gid_attr(old->grh.sgid_attr);
 385}
 386EXPORT_SYMBOL(rdma_replace_ah_attr);
 387
 388/**
 389 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
 390 * @dest:       Pointer to destination ah_attr to copy to.
 391 *              dest is assumed to be valid or zero'd
 392 * @src:        Pointer to the new ah_attr.
 393 *
 394 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
 395 * if it is valid. This also transfers ownership of internal references from
 396 * src to dest, making src invalid in the process. No new reference of the src
 397 * ah_attr is taken.
 398 */
 399void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
 400{
 401	rdma_destroy_ah_attr(dest);
 402	*dest = *src;
 403	src->grh.sgid_attr = NULL;
 404}
 405EXPORT_SYMBOL(rdma_move_ah_attr);
 406
 407/*
 408 * Validate that the rdma_ah_attr is valid for the device before passing it
 409 * off to the driver.
 410 */
 411static int rdma_check_ah_attr(struct ib_device *device,
 412			      struct rdma_ah_attr *ah_attr)
 413{
 414	if (!rdma_is_port_valid(device, ah_attr->port_num))
 415		return -EINVAL;
 416
 417	if ((rdma_is_grh_required(device, ah_attr->port_num) ||
 418	     ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
 419	    !(ah_attr->ah_flags & IB_AH_GRH))
 420		return -EINVAL;
 421
 422	if (ah_attr->grh.sgid_attr) {
 423		/*
 424		 * Make sure the passed sgid_attr is consistent with the
 425		 * parameters
 426		 */
 427		if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
 428		    ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
 429			return -EINVAL;
 430	}
 431	return 0;
 432}
 433
 434/*
 435 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
 436 * On success the caller is responsible to call rdma_unfill_sgid_attr().
 437 */
 438static int rdma_fill_sgid_attr(struct ib_device *device,
 439			       struct rdma_ah_attr *ah_attr,
 440			       const struct ib_gid_attr **old_sgid_attr)
 441{
 442	const struct ib_gid_attr *sgid_attr;
 443	struct ib_global_route *grh;
 444	int ret;
 445
 446	*old_sgid_attr = ah_attr->grh.sgid_attr;
 447
 448	ret = rdma_check_ah_attr(device, ah_attr);
 449	if (ret)
 450		return ret;
 451
 452	if (!(ah_attr->ah_flags & IB_AH_GRH))
 453		return 0;
 454
 455	grh = rdma_ah_retrieve_grh(ah_attr);
 456	if (grh->sgid_attr)
 457		return 0;
 458
 459	sgid_attr =
 460		rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
 461	if (IS_ERR(sgid_attr))
 462		return PTR_ERR(sgid_attr);
 463
 464	/* Move ownerhip of the kref into the ah_attr */
 465	grh->sgid_attr = sgid_attr;
 466	return 0;
 467}
 468
 469static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
 470				  const struct ib_gid_attr *old_sgid_attr)
 471{
 472	/*
 473	 * Fill didn't change anything, the caller retains ownership of
 474	 * whatever it passed
 475	 */
 476	if (ah_attr->grh.sgid_attr == old_sgid_attr)
 477		return;
 478
 479	/*
 480	 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
 481	 * doesn't see any change in the rdma_ah_attr. If we get here
 482	 * old_sgid_attr is NULL.
 483	 */
 484	rdma_destroy_ah_attr(ah_attr);
 485}
 486
 487static const struct ib_gid_attr *
 488rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
 489		      const struct ib_gid_attr *old_attr)
 490{
 491	if (old_attr)
 492		rdma_put_gid_attr(old_attr);
 493	if (ah_attr->ah_flags & IB_AH_GRH) {
 494		rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
 495		return ah_attr->grh.sgid_attr;
 496	}
 497	return NULL;
 498}
 499
 500static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
 501				     struct rdma_ah_attr *ah_attr,
 502				     u32 flags,
 503				     struct ib_udata *udata,
 504				     struct net_device *xmit_slave)
 505{
 506	struct rdma_ah_init_attr init_attr = {};
 507	struct ib_device *device = pd->device;
 508	struct ib_ah *ah;
 509	int ret;
 510
 511	might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
 512
 513	if (!udata && !device->ops.create_ah)
 514		return ERR_PTR(-EOPNOTSUPP);
 515
 516	ah = rdma_zalloc_drv_obj_gfp(
 517		device, ib_ah,
 518		(flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
 519	if (!ah)
 520		return ERR_PTR(-ENOMEM);
 521
 522	ah->device = device;
 523	ah->pd = pd;
 524	ah->type = ah_attr->type;
 525	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
 526	init_attr.ah_attr = ah_attr;
 527	init_attr.flags = flags;
 528	init_attr.xmit_slave = xmit_slave;
 529
 530	if (udata)
 531		ret = device->ops.create_user_ah(ah, &init_attr, udata);
 532	else
 533		ret = device->ops.create_ah(ah, &init_attr, NULL);
 534	if (ret) {
 
 
 535		kfree(ah);
 536		return ERR_PTR(ret);
 537	}
 538
 539	atomic_inc(&pd->usecnt);
 540	return ah;
 541}
 542
 543/**
 544 * rdma_create_ah - Creates an address handle for the
 545 * given address vector.
 546 * @pd: The protection domain associated with the address handle.
 547 * @ah_attr: The attributes of the address vector.
 548 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
 549 *
 550 * It returns 0 on success and returns appropriate error code on error.
 551 * The address handle is used to reference a local or global destination
 552 * in all UD QP post sends.
 553 */
 554struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
 555			     u32 flags)
 556{
 557	const struct ib_gid_attr *old_sgid_attr;
 558	struct net_device *slave;
 559	struct ib_ah *ah;
 560	int ret;
 561
 562	ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
 563	if (ret)
 564		return ERR_PTR(ret);
 565	slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
 566					   (flags & RDMA_CREATE_AH_SLEEPABLE) ?
 567					   GFP_KERNEL : GFP_ATOMIC);
 568	if (IS_ERR(slave)) {
 569		rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 570		return (void *)slave;
 571	}
 572	ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
 573	rdma_lag_put_ah_roce_slave(slave);
 574	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 575	return ah;
 576}
 577EXPORT_SYMBOL(rdma_create_ah);
 578
 579/**
 580 * rdma_create_user_ah - Creates an address handle for the
 581 * given address vector.
 582 * It resolves destination mac address for ah attribute of RoCE type.
 583 * @pd: The protection domain associated with the address handle.
 584 * @ah_attr: The attributes of the address vector.
 585 * @udata: pointer to user's input output buffer information need by
 586 *         provider driver.
 587 *
 588 * It returns 0 on success and returns appropriate error code on error.
 589 * The address handle is used to reference a local or global destination
 590 * in all UD QP post sends.
 591 */
 592struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
 593				  struct rdma_ah_attr *ah_attr,
 594				  struct ib_udata *udata)
 595{
 596	const struct ib_gid_attr *old_sgid_attr;
 597	struct ib_ah *ah;
 598	int err;
 599
 600	err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
 601	if (err)
 602		return ERR_PTR(err);
 603
 604	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
 605		err = ib_resolve_eth_dmac(pd->device, ah_attr);
 606		if (err) {
 607			ah = ERR_PTR(err);
 608			goto out;
 609		}
 610	}
 611
 612	ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
 613			     udata, NULL);
 614
 615out:
 616	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 617	return ah;
 618}
 619EXPORT_SYMBOL(rdma_create_user_ah);
 620
 621int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
 622{
 623	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
 624	struct iphdr ip4h_checked;
 625	const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
 626
 627	/* If it's IPv6, the version must be 6, otherwise, the first
 628	 * 20 bytes (before the IPv4 header) are garbled.
 629	 */
 630	if (ip6h->version != 6)
 631		return (ip4h->version == 4) ? 4 : 0;
 632	/* version may be 6 or 4 because the first 20 bytes could be garbled */
 633
 634	/* RoCE v2 requires no options, thus header length
 635	 * must be 5 words
 636	 */
 637	if (ip4h->ihl != 5)
 638		return 6;
 639
 640	/* Verify checksum.
 641	 * We can't write on scattered buffers so we need to copy to
 642	 * temp buffer.
 643	 */
 644	memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
 645	ip4h_checked.check = 0;
 646	ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
 647	/* if IPv4 header checksum is OK, believe it */
 648	if (ip4h->check == ip4h_checked.check)
 649		return 4;
 650	return 6;
 651}
 652EXPORT_SYMBOL(ib_get_rdma_header_version);
 653
 654static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
 655						     u32 port_num,
 656						     const struct ib_grh *grh)
 657{
 658	int grh_version;
 659
 660	if (rdma_protocol_ib(device, port_num))
 661		return RDMA_NETWORK_IB;
 662
 663	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
 664
 665	if (grh_version == 4)
 666		return RDMA_NETWORK_IPV4;
 667
 668	if (grh->next_hdr == IPPROTO_UDP)
 669		return RDMA_NETWORK_IPV6;
 670
 671	return RDMA_NETWORK_ROCE_V1;
 672}
 673
 674struct find_gid_index_context {
 675	u16 vlan_id;
 676	enum ib_gid_type gid_type;
 677};
 678
 679static bool find_gid_index(const union ib_gid *gid,
 680			   const struct ib_gid_attr *gid_attr,
 681			   void *context)
 682{
 683	struct find_gid_index_context *ctx = context;
 684	u16 vlan_id = 0xffff;
 685	int ret;
 686
 687	if (ctx->gid_type != gid_attr->gid_type)
 688		return false;
 689
 690	ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
 691	if (ret)
 692		return false;
 693
 694	return ctx->vlan_id == vlan_id;
 695}
 696
 697static const struct ib_gid_attr *
 698get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
 699		       u16 vlan_id, const union ib_gid *sgid,
 700		       enum ib_gid_type gid_type)
 701{
 702	struct find_gid_index_context context = {.vlan_id = vlan_id,
 703						 .gid_type = gid_type};
 704
 705	return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
 706				       &context);
 707}
 708
 709int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
 710			      enum rdma_network_type net_type,
 711			      union ib_gid *sgid, union ib_gid *dgid)
 712{
 713	struct sockaddr_in  src_in;
 714	struct sockaddr_in  dst_in;
 715	__be32 src_saddr, dst_saddr;
 716
 717	if (!sgid || !dgid)
 718		return -EINVAL;
 719
 720	if (net_type == RDMA_NETWORK_IPV4) {
 721		memcpy(&src_in.sin_addr.s_addr,
 722		       &hdr->roce4grh.saddr, 4);
 723		memcpy(&dst_in.sin_addr.s_addr,
 724		       &hdr->roce4grh.daddr, 4);
 725		src_saddr = src_in.sin_addr.s_addr;
 726		dst_saddr = dst_in.sin_addr.s_addr;
 727		ipv6_addr_set_v4mapped(src_saddr,
 728				       (struct in6_addr *)sgid);
 729		ipv6_addr_set_v4mapped(dst_saddr,
 730				       (struct in6_addr *)dgid);
 731		return 0;
 732	} else if (net_type == RDMA_NETWORK_IPV6 ||
 733		   net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
 734		*dgid = hdr->ibgrh.dgid;
 735		*sgid = hdr->ibgrh.sgid;
 736		return 0;
 737	} else {
 738		return -EINVAL;
 739	}
 740}
 741EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
 742
 743/* Resolve destination mac address and hop limit for unicast destination
 744 * GID entry, considering the source GID entry as well.
 745 * ah_attribute must have have valid port_num, sgid_index.
 746 */
 747static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
 748				       struct rdma_ah_attr *ah_attr)
 749{
 750	struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
 751	const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
 752	int hop_limit = 0xff;
 753	int ret = 0;
 754
 755	/* If destination is link local and source GID is RoCEv1,
 756	 * IP stack is not used.
 757	 */
 758	if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
 759	    sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
 760		rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
 761				ah_attr->roce.dmac);
 762		return ret;
 763	}
 764
 765	ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
 766					   ah_attr->roce.dmac,
 767					   sgid_attr, &hop_limit);
 768
 769	grh->hop_limit = hop_limit;
 770	return ret;
 771}
 772
 773/*
 774 * This function initializes address handle attributes from the incoming packet.
 775 * Incoming packet has dgid of the receiver node on which this code is
 776 * getting executed and, sgid contains the GID of the sender.
 777 *
 778 * When resolving mac address of destination, the arrived dgid is used
 779 * as sgid and, sgid is used as dgid because sgid contains destinations
 780 * GID whom to respond to.
 781 *
 782 * On success the caller is responsible to call rdma_destroy_ah_attr on the
 783 * attr.
 784 */
 785int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
 786			    const struct ib_wc *wc, const struct ib_grh *grh,
 787			    struct rdma_ah_attr *ah_attr)
 788{
 789	u32 flow_class;
 790	int ret;
 791	enum rdma_network_type net_type = RDMA_NETWORK_IB;
 792	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
 793	const struct ib_gid_attr *sgid_attr;
 794	int hoplimit = 0xff;
 795	union ib_gid dgid;
 796	union ib_gid sgid;
 797
 798	might_sleep();
 799
 800	memset(ah_attr, 0, sizeof *ah_attr);
 801	ah_attr->type = rdma_ah_find_type(device, port_num);
 802	if (rdma_cap_eth_ah(device, port_num)) {
 803		if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
 804			net_type = wc->network_hdr_type;
 805		else
 806			net_type = ib_get_net_type_by_grh(device, port_num, grh);
 807		gid_type = ib_network_to_gid_type(net_type);
 808	}
 809	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
 810					&sgid, &dgid);
 811	if (ret)
 812		return ret;
 813
 814	rdma_ah_set_sl(ah_attr, wc->sl);
 815	rdma_ah_set_port_num(ah_attr, port_num);
 816
 817	if (rdma_protocol_roce(device, port_num)) {
 818		u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
 819				wc->vlan_id : 0xffff;
 820
 821		if (!(wc->wc_flags & IB_WC_GRH))
 822			return -EPROTOTYPE;
 823
 824		sgid_attr = get_sgid_attr_from_eth(device, port_num,
 825						   vlan_id, &dgid,
 826						   gid_type);
 827		if (IS_ERR(sgid_attr))
 828			return PTR_ERR(sgid_attr);
 829
 830		flow_class = be32_to_cpu(grh->version_tclass_flow);
 831		rdma_move_grh_sgid_attr(ah_attr,
 832					&sgid,
 833					flow_class & 0xFFFFF,
 834					hoplimit,
 835					(flow_class >> 20) & 0xFF,
 836					sgid_attr);
 837
 838		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
 839		if (ret)
 840			rdma_destroy_ah_attr(ah_attr);
 841
 842		return ret;
 843	} else {
 844		rdma_ah_set_dlid(ah_attr, wc->slid);
 845		rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
 846
 847		if ((wc->wc_flags & IB_WC_GRH) == 0)
 848			return 0;
 849
 850		if (dgid.global.interface_id !=
 851					cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
 852			sgid_attr = rdma_find_gid_by_port(
 853				device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
 854		} else
 855			sgid_attr = rdma_get_gid_attr(device, port_num, 0);
 856
 857		if (IS_ERR(sgid_attr))
 858			return PTR_ERR(sgid_attr);
 859		flow_class = be32_to_cpu(grh->version_tclass_flow);
 860		rdma_move_grh_sgid_attr(ah_attr,
 861					&sgid,
 862					flow_class & 0xFFFFF,
 863					hoplimit,
 864					(flow_class >> 20) & 0xFF,
 865					sgid_attr);
 866
 867		return 0;
 868	}
 869}
 870EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
 871
 872/**
 873 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
 874 * of the reference
 875 *
 876 * @attr:	Pointer to AH attribute structure
 877 * @dgid:	Destination GID
 878 * @flow_label:	Flow label
 879 * @hop_limit:	Hop limit
 880 * @traffic_class: traffic class
 881 * @sgid_attr:	Pointer to SGID attribute
 882 *
 883 * This takes ownership of the sgid_attr reference. The caller must ensure
 884 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
 885 * calling this function.
 886 */
 887void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
 888			     u32 flow_label, u8 hop_limit, u8 traffic_class,
 889			     const struct ib_gid_attr *sgid_attr)
 890{
 891	rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
 892			traffic_class);
 893	attr->grh.sgid_attr = sgid_attr;
 894}
 895EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
 896
 897/**
 898 * rdma_destroy_ah_attr - Release reference to SGID attribute of
 899 * ah attribute.
 900 * @ah_attr: Pointer to ah attribute
 901 *
 902 * Release reference to the SGID attribute of the ah attribute if it is
 903 * non NULL. It is safe to call this multiple times, and safe to call it on
 904 * a zero initialized ah_attr.
 905 */
 906void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
 907{
 908	if (ah_attr->grh.sgid_attr) {
 909		rdma_put_gid_attr(ah_attr->grh.sgid_attr);
 910		ah_attr->grh.sgid_attr = NULL;
 911	}
 912}
 913EXPORT_SYMBOL(rdma_destroy_ah_attr);
 914
 915struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
 916				   const struct ib_grh *grh, u32 port_num)
 917{
 918	struct rdma_ah_attr ah_attr;
 919	struct ib_ah *ah;
 920	int ret;
 921
 922	ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
 923	if (ret)
 924		return ERR_PTR(ret);
 925
 926	ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
 927
 928	rdma_destroy_ah_attr(&ah_attr);
 929	return ah;
 930}
 931EXPORT_SYMBOL(ib_create_ah_from_wc);
 932
 933int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
 934{
 935	const struct ib_gid_attr *old_sgid_attr;
 936	int ret;
 937
 938	if (ah->type != ah_attr->type)
 939		return -EINVAL;
 940
 941	ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
 942	if (ret)
 943		return ret;
 944
 945	ret = ah->device->ops.modify_ah ?
 946		ah->device->ops.modify_ah(ah, ah_attr) :
 947		-EOPNOTSUPP;
 948
 949	ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
 950	rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
 951	return ret;
 952}
 953EXPORT_SYMBOL(rdma_modify_ah);
 954
 955int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
 956{
 957	ah_attr->grh.sgid_attr = NULL;
 958
 959	return ah->device->ops.query_ah ?
 960		ah->device->ops.query_ah(ah, ah_attr) :
 961		-EOPNOTSUPP;
 962}
 963EXPORT_SYMBOL(rdma_query_ah);
 964
 965int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
 966{
 967	const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
 968	struct ib_pd *pd;
 969	int ret;
 970
 971	might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
 972
 973	pd = ah->pd;
 974
 975	ret = ah->device->ops.destroy_ah(ah, flags);
 976	if (ret)
 977		return ret;
 978
 979	atomic_dec(&pd->usecnt);
 980	if (sgid_attr)
 981		rdma_put_gid_attr(sgid_attr);
 982
 983	kfree(ah);
 984	return ret;
 985}
 986EXPORT_SYMBOL(rdma_destroy_ah_user);
 987
 988/* Shared receive queues */
 989
 990/**
 991 * ib_create_srq_user - Creates a SRQ associated with the specified protection
 992 *   domain.
 993 * @pd: The protection domain associated with the SRQ.
 994 * @srq_init_attr: A list of initial attributes required to create the
 995 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
 996 *   the actual capabilities of the created SRQ.
 997 * @uobject: uobject pointer if this is not a kernel SRQ
 998 * @udata: udata pointer if this is not a kernel SRQ
 999 *
1000 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1001 * requested size of the SRQ, and set to the actual values allocated
1002 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1003 * will always be at least as large as the requested values.
1004 */
1005struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1006				  struct ib_srq_init_attr *srq_init_attr,
1007				  struct ib_usrq_object *uobject,
1008				  struct ib_udata *udata)
1009{
1010	struct ib_srq *srq;
1011	int ret;
1012
1013	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1014	if (!srq)
1015		return ERR_PTR(-ENOMEM);
1016
1017	srq->device = pd->device;
1018	srq->pd = pd;
1019	srq->event_handler = srq_init_attr->event_handler;
1020	srq->srq_context = srq_init_attr->srq_context;
1021	srq->srq_type = srq_init_attr->srq_type;
1022	srq->uobject = uobject;
1023
1024	if (ib_srq_has_cq(srq->srq_type)) {
1025		srq->ext.cq = srq_init_attr->ext.cq;
1026		atomic_inc(&srq->ext.cq->usecnt);
1027	}
1028	if (srq->srq_type == IB_SRQT_XRC) {
1029		srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1030		if (srq->ext.xrc.xrcd)
1031			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1032	}
1033	atomic_inc(&pd->usecnt);
1034
1035	rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1036	rdma_restrack_parent_name(&srq->res, &pd->res);
1037
1038	ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1039	if (ret) {
1040		rdma_restrack_put(&srq->res);
1041		atomic_dec(&pd->usecnt);
1042		if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1043			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1044		if (ib_srq_has_cq(srq->srq_type))
1045			atomic_dec(&srq->ext.cq->usecnt);
1046		kfree(srq);
1047		return ERR_PTR(ret);
1048	}
1049
1050	rdma_restrack_add(&srq->res);
1051
1052	return srq;
1053}
1054EXPORT_SYMBOL(ib_create_srq_user);
1055
1056int ib_modify_srq(struct ib_srq *srq,
1057		  struct ib_srq_attr *srq_attr,
1058		  enum ib_srq_attr_mask srq_attr_mask)
1059{
1060	return srq->device->ops.modify_srq ?
1061		srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1062					    NULL) : -EOPNOTSUPP;
1063}
1064EXPORT_SYMBOL(ib_modify_srq);
1065
1066int ib_query_srq(struct ib_srq *srq,
1067		 struct ib_srq_attr *srq_attr)
1068{
1069	return srq->device->ops.query_srq ?
1070		srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1071}
1072EXPORT_SYMBOL(ib_query_srq);
1073
1074int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1075{
1076	int ret;
1077
1078	if (atomic_read(&srq->usecnt))
1079		return -EBUSY;
1080
1081	ret = srq->device->ops.destroy_srq(srq, udata);
1082	if (ret)
1083		return ret;
1084
1085	atomic_dec(&srq->pd->usecnt);
1086	if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1087		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1088	if (ib_srq_has_cq(srq->srq_type))
1089		atomic_dec(&srq->ext.cq->usecnt);
1090	rdma_restrack_del(&srq->res);
1091	kfree(srq);
1092
1093	return ret;
1094}
1095EXPORT_SYMBOL(ib_destroy_srq_user);
1096
1097/* Queue pairs */
1098
1099static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1100{
1101	struct ib_qp *qp = context;
1102	unsigned long flags;
1103
1104	spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1105	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1106		if (event->element.qp->event_handler)
1107			event->element.qp->event_handler(event, event->element.qp->qp_context);
1108	spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1109}
1110
1111static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1112				  void (*event_handler)(struct ib_event *, void *),
1113				  void *qp_context)
1114{
1115	struct ib_qp *qp;
1116	unsigned long flags;
1117	int err;
1118
1119	qp = kzalloc(sizeof *qp, GFP_KERNEL);
1120	if (!qp)
1121		return ERR_PTR(-ENOMEM);
1122
1123	qp->real_qp = real_qp;
1124	err = ib_open_shared_qp_security(qp, real_qp->device);
1125	if (err) {
1126		kfree(qp);
1127		return ERR_PTR(err);
1128	}
1129
1130	qp->real_qp = real_qp;
1131	atomic_inc(&real_qp->usecnt);
1132	qp->device = real_qp->device;
1133	qp->event_handler = event_handler;
1134	qp->qp_context = qp_context;
1135	qp->qp_num = real_qp->qp_num;
1136	qp->qp_type = real_qp->qp_type;
1137
1138	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1139	list_add(&qp->open_list, &real_qp->open_list);
1140	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1141
1142	return qp;
1143}
1144
1145struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1146			 struct ib_qp_open_attr *qp_open_attr)
1147{
1148	struct ib_qp *qp, *real_qp;
1149
1150	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1151		return ERR_PTR(-EINVAL);
1152
1153	down_read(&xrcd->tgt_qps_rwsem);
1154	real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1155	if (!real_qp) {
1156		up_read(&xrcd->tgt_qps_rwsem);
1157		return ERR_PTR(-EINVAL);
1158	}
1159	qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1160			  qp_open_attr->qp_context);
1161	up_read(&xrcd->tgt_qps_rwsem);
1162	return qp;
1163}
1164EXPORT_SYMBOL(ib_open_qp);
1165
1166static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1167					struct ib_qp_init_attr *qp_init_attr)
1168{
1169	struct ib_qp *real_qp = qp;
1170	int err;
1171
1172	qp->event_handler = __ib_shared_qp_event_handler;
1173	qp->qp_context = qp;
1174	qp->pd = NULL;
1175	qp->send_cq = qp->recv_cq = NULL;
1176	qp->srq = NULL;
1177	qp->xrcd = qp_init_attr->xrcd;
1178	atomic_inc(&qp_init_attr->xrcd->usecnt);
1179	INIT_LIST_HEAD(&qp->open_list);
1180
1181	qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1182			  qp_init_attr->qp_context);
1183	if (IS_ERR(qp))
1184		return qp;
1185
1186	err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1187			      real_qp, GFP_KERNEL));
1188	if (err) {
1189		ib_close_qp(qp);
1190		return ERR_PTR(err);
1191	}
1192	return qp;
1193}
1194
1195static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
1196			       struct ib_qp_init_attr *attr,
1197			       struct ib_udata *udata,
1198			       struct ib_uqp_object *uobj, const char *caller)
1199{
1200	struct ib_udata dummy = {};
1201	struct ib_qp *qp;
1202	int ret;
1203
1204	if (!dev->ops.create_qp)
1205		return ERR_PTR(-EOPNOTSUPP);
1206
1207	qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1208	if (!qp)
1209		return ERR_PTR(-ENOMEM);
1210
1211	qp->device = dev;
1212	qp->pd = pd;
1213	qp->uobject = uobj;
1214	qp->real_qp = qp;
1215
1216	qp->qp_type = attr->qp_type;
1217	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1218	qp->srq = attr->srq;
1219	qp->event_handler = attr->event_handler;
1220	qp->port = attr->port_num;
1221	qp->qp_context = attr->qp_context;
1222
1223	spin_lock_init(&qp->mr_lock);
1224	INIT_LIST_HEAD(&qp->rdma_mrs);
1225	INIT_LIST_HEAD(&qp->sig_mrs);
1226
1227	qp->send_cq = attr->send_cq;
1228	qp->recv_cq = attr->recv_cq;
1229
1230	rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1231	WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
1232	rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1233	ret = dev->ops.create_qp(qp, attr, udata);
1234	if (ret)
1235		goto err_create;
1236
1237	/*
1238	 * TODO: The mlx4 internally overwrites send_cq and recv_cq.
1239	 * Unfortunately, it is not an easy task to fix that driver.
1240	 */
1241	qp->send_cq = attr->send_cq;
1242	qp->recv_cq = attr->recv_cq;
1243
1244	ret = ib_create_qp_security(qp, dev);
1245	if (ret)
1246		goto err_security;
1247
1248	rdma_restrack_add(&qp->res);
1249	return qp;
1250
1251err_security:
1252	qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1253err_create:
1254	rdma_restrack_put(&qp->res);
1255	kfree(qp);
1256	return ERR_PTR(ret);
1257
1258}
1259
1260/**
1261 * ib_create_qp_user - Creates a QP associated with the specified protection
1262 *   domain.
1263 * @dev: IB device
1264 * @pd: The protection domain associated with the QP.
1265 * @attr: A list of initial attributes required to create the
1266 *   QP.  If QP creation succeeds, then the attributes are updated to
1267 *   the actual capabilities of the created QP.
1268 * @udata: User data
1269 * @uobj: uverbs obect
1270 * @caller: caller's build-time module name
1271 */
1272struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
1273				struct ib_qp_init_attr *attr,
1274				struct ib_udata *udata,
1275				struct ib_uqp_object *uobj, const char *caller)
1276{
1277	struct ib_qp *qp, *xrc_qp;
1278
1279	if (attr->qp_type == IB_QPT_XRC_TGT)
1280		qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1281	else
1282		qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1283	if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1284		return qp;
1285
1286	xrc_qp = create_xrc_qp_user(qp, attr);
1287	if (IS_ERR(xrc_qp)) {
1288		ib_destroy_qp(qp);
1289		return xrc_qp;
1290	}
1291
1292	xrc_qp->uobject = uobj;
1293	return xrc_qp;
1294}
1295EXPORT_SYMBOL(ib_create_qp_user);
1296
1297void ib_qp_usecnt_inc(struct ib_qp *qp)
1298{
1299	if (qp->pd)
1300		atomic_inc(&qp->pd->usecnt);
1301	if (qp->send_cq)
1302		atomic_inc(&qp->send_cq->usecnt);
1303	if (qp->recv_cq)
1304		atomic_inc(&qp->recv_cq->usecnt);
1305	if (qp->srq)
1306		atomic_inc(&qp->srq->usecnt);
1307	if (qp->rwq_ind_tbl)
1308		atomic_inc(&qp->rwq_ind_tbl->usecnt);
1309}
1310EXPORT_SYMBOL(ib_qp_usecnt_inc);
1311
1312void ib_qp_usecnt_dec(struct ib_qp *qp)
1313{
1314	if (qp->rwq_ind_tbl)
1315		atomic_dec(&qp->rwq_ind_tbl->usecnt);
1316	if (qp->srq)
1317		atomic_dec(&qp->srq->usecnt);
1318	if (qp->recv_cq)
1319		atomic_dec(&qp->recv_cq->usecnt);
1320	if (qp->send_cq)
1321		atomic_dec(&qp->send_cq->usecnt);
1322	if (qp->pd)
1323		atomic_dec(&qp->pd->usecnt);
1324}
1325EXPORT_SYMBOL(ib_qp_usecnt_dec);
1326
1327struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
1328				  struct ib_qp_init_attr *qp_init_attr,
1329				  const char *caller)
1330{
1331	struct ib_device *device = pd->device;
1332	struct ib_qp *qp;
1333	int ret;
1334
1335	/*
1336	 * If the callers is using the RDMA API calculate the resources
1337	 * needed for the RDMA READ/WRITE operations.
1338	 *
1339	 * Note that these callers need to pass in a port number.
1340	 */
1341	if (qp_init_attr->cap.max_rdma_ctxs)
1342		rdma_rw_init_qp(device, qp_init_attr);
1343
1344	qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1345	if (IS_ERR(qp))
1346		return qp;
1347
1348	ib_qp_usecnt_inc(qp);
1349
1350	if (qp_init_attr->cap.max_rdma_ctxs) {
1351		ret = rdma_rw_init_mrs(qp, qp_init_attr);
1352		if (ret)
1353			goto err;
1354	}
1355
1356	/*
1357	 * Note: all hw drivers guarantee that max_send_sge is lower than
1358	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1359	 * max_send_sge <= max_sge_rd.
1360	 */
1361	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1362	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1363				 device->attrs.max_sge_rd);
1364	if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1365		qp->integrity_en = true;
1366
1367	return qp;
1368
1369err:
1370	ib_destroy_qp(qp);
1371	return ERR_PTR(ret);
1372
1373}
1374EXPORT_SYMBOL(ib_create_qp_kernel);
1375
1376static const struct {
1377	int			valid;
1378	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
1379	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
1380} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1381	[IB_QPS_RESET] = {
1382		[IB_QPS_RESET] = { .valid = 1 },
1383		[IB_QPS_INIT]  = {
1384			.valid = 1,
1385			.req_param = {
1386				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1387						IB_QP_PORT			|
1388						IB_QP_QKEY),
1389				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
1390				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1391						IB_QP_PORT			|
1392						IB_QP_ACCESS_FLAGS),
1393				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1394						IB_QP_PORT			|
1395						IB_QP_ACCESS_FLAGS),
1396				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1397						IB_QP_PORT			|
1398						IB_QP_ACCESS_FLAGS),
1399				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1400						IB_QP_PORT			|
1401						IB_QP_ACCESS_FLAGS),
1402				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1403						IB_QP_QKEY),
1404				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1405						IB_QP_QKEY),
1406			}
1407		},
1408	},
1409	[IB_QPS_INIT]  = {
1410		[IB_QPS_RESET] = { .valid = 1 },
1411		[IB_QPS_ERR] =   { .valid = 1 },
1412		[IB_QPS_INIT]  = {
1413			.valid = 1,
1414			.opt_param = {
1415				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1416						IB_QP_PORT			|
1417						IB_QP_QKEY),
1418				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
1419						IB_QP_PORT			|
1420						IB_QP_ACCESS_FLAGS),
1421				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
1422						IB_QP_PORT			|
1423						IB_QP_ACCESS_FLAGS),
1424				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
1425						IB_QP_PORT			|
1426						IB_QP_ACCESS_FLAGS),
1427				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
1428						IB_QP_PORT			|
1429						IB_QP_ACCESS_FLAGS),
1430				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1431						IB_QP_QKEY),
1432				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1433						IB_QP_QKEY),
1434			}
1435		},
1436		[IB_QPS_RTR]   = {
1437			.valid = 1,
1438			.req_param = {
1439				[IB_QPT_UC]  = (IB_QP_AV			|
1440						IB_QP_PATH_MTU			|
1441						IB_QP_DEST_QPN			|
1442						IB_QP_RQ_PSN),
1443				[IB_QPT_RC]  = (IB_QP_AV			|
1444						IB_QP_PATH_MTU			|
1445						IB_QP_DEST_QPN			|
1446						IB_QP_RQ_PSN			|
1447						IB_QP_MAX_DEST_RD_ATOMIC	|
1448						IB_QP_MIN_RNR_TIMER),
1449				[IB_QPT_XRC_INI] = (IB_QP_AV			|
1450						IB_QP_PATH_MTU			|
1451						IB_QP_DEST_QPN			|
1452						IB_QP_RQ_PSN),
1453				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
1454						IB_QP_PATH_MTU			|
1455						IB_QP_DEST_QPN			|
1456						IB_QP_RQ_PSN			|
1457						IB_QP_MAX_DEST_RD_ATOMIC	|
1458						IB_QP_MIN_RNR_TIMER),
1459			},
1460			.opt_param = {
1461				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1462						 IB_QP_QKEY),
1463				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
1464						 IB_QP_ACCESS_FLAGS		|
1465						 IB_QP_PKEY_INDEX),
1466				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
1467						 IB_QP_ACCESS_FLAGS		|
1468						 IB_QP_PKEY_INDEX),
1469				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
1470						 IB_QP_ACCESS_FLAGS		|
1471						 IB_QP_PKEY_INDEX),
1472				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
1473						 IB_QP_ACCESS_FLAGS		|
1474						 IB_QP_PKEY_INDEX),
1475				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1476						 IB_QP_QKEY),
1477				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1478						 IB_QP_QKEY),
1479			 },
1480		},
1481	},
1482	[IB_QPS_RTR]   = {
1483		[IB_QPS_RESET] = { .valid = 1 },
1484		[IB_QPS_ERR] =   { .valid = 1 },
1485		[IB_QPS_RTS]   = {
1486			.valid = 1,
1487			.req_param = {
1488				[IB_QPT_UD]  = IB_QP_SQ_PSN,
1489				[IB_QPT_UC]  = IB_QP_SQ_PSN,
1490				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
1491						IB_QP_RETRY_CNT			|
1492						IB_QP_RNR_RETRY			|
1493						IB_QP_SQ_PSN			|
1494						IB_QP_MAX_QP_RD_ATOMIC),
1495				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
1496						IB_QP_RETRY_CNT			|
1497						IB_QP_RNR_RETRY			|
1498						IB_QP_SQ_PSN			|
1499						IB_QP_MAX_QP_RD_ATOMIC),
1500				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
1501						IB_QP_SQ_PSN),
1502				[IB_QPT_SMI] = IB_QP_SQ_PSN,
1503				[IB_QPT_GSI] = IB_QP_SQ_PSN,
1504			},
1505			.opt_param = {
1506				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
1507						 IB_QP_QKEY),
1508				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
1509						 IB_QP_ALT_PATH			|
1510						 IB_QP_ACCESS_FLAGS		|
1511						 IB_QP_PATH_MIG_STATE),
1512				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
1513						 IB_QP_ALT_PATH			|
1514						 IB_QP_ACCESS_FLAGS		|
1515						 IB_QP_MIN_RNR_TIMER		|
1516						 IB_QP_PATH_MIG_STATE),
1517				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1518						 IB_QP_ALT_PATH			|
1519						 IB_QP_ACCESS_FLAGS		|
1520						 IB_QP_PATH_MIG_STATE),
1521				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1522						 IB_QP_ALT_PATH			|
1523						 IB_QP_ACCESS_FLAGS		|
1524						 IB_QP_MIN_RNR_TIMER		|
1525						 IB_QP_PATH_MIG_STATE),
1526				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
1527						 IB_QP_QKEY),
1528				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
1529						 IB_QP_QKEY),
1530				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1531			 }
1532		}
1533	},
1534	[IB_QPS_RTS]   = {
1535		[IB_QPS_RESET] = { .valid = 1 },
1536		[IB_QPS_ERR] =   { .valid = 1 },
1537		[IB_QPS_RTS]   = {
1538			.valid = 1,
1539			.opt_param = {
1540				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1541						IB_QP_QKEY),
1542				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1543						IB_QP_ACCESS_FLAGS		|
1544						IB_QP_ALT_PATH			|
1545						IB_QP_PATH_MIG_STATE),
1546				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1547						IB_QP_ACCESS_FLAGS		|
1548						IB_QP_ALT_PATH			|
1549						IB_QP_PATH_MIG_STATE		|
1550						IB_QP_MIN_RNR_TIMER),
1551				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1552						IB_QP_ACCESS_FLAGS		|
1553						IB_QP_ALT_PATH			|
1554						IB_QP_PATH_MIG_STATE),
1555				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1556						IB_QP_ACCESS_FLAGS		|
1557						IB_QP_ALT_PATH			|
1558						IB_QP_PATH_MIG_STATE		|
1559						IB_QP_MIN_RNR_TIMER),
1560				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1561						IB_QP_QKEY),
1562				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1563						IB_QP_QKEY),
1564				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1565			}
1566		},
1567		[IB_QPS_SQD]   = {
1568			.valid = 1,
1569			.opt_param = {
1570				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1571				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1572				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1573				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1574				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1575				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1576				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1577			}
1578		},
1579	},
1580	[IB_QPS_SQD]   = {
1581		[IB_QPS_RESET] = { .valid = 1 },
1582		[IB_QPS_ERR] =   { .valid = 1 },
1583		[IB_QPS_RTS]   = {
1584			.valid = 1,
1585			.opt_param = {
1586				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1587						IB_QP_QKEY),
1588				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1589						IB_QP_ALT_PATH			|
1590						IB_QP_ACCESS_FLAGS		|
1591						IB_QP_PATH_MIG_STATE),
1592				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
1593						IB_QP_ALT_PATH			|
1594						IB_QP_ACCESS_FLAGS		|
1595						IB_QP_MIN_RNR_TIMER		|
1596						IB_QP_PATH_MIG_STATE),
1597				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
1598						IB_QP_ALT_PATH			|
1599						IB_QP_ACCESS_FLAGS		|
1600						IB_QP_PATH_MIG_STATE),
1601				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
1602						IB_QP_ALT_PATH			|
1603						IB_QP_ACCESS_FLAGS		|
1604						IB_QP_MIN_RNR_TIMER		|
1605						IB_QP_PATH_MIG_STATE),
1606				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1607						IB_QP_QKEY),
1608				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1609						IB_QP_QKEY),
1610			}
1611		},
1612		[IB_QPS_SQD]   = {
1613			.valid = 1,
1614			.opt_param = {
1615				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
1616						IB_QP_QKEY),
1617				[IB_QPT_UC]  = (IB_QP_AV			|
1618						IB_QP_ALT_PATH			|
1619						IB_QP_ACCESS_FLAGS		|
1620						IB_QP_PKEY_INDEX		|
1621						IB_QP_PATH_MIG_STATE),
1622				[IB_QPT_RC]  = (IB_QP_PORT			|
1623						IB_QP_AV			|
1624						IB_QP_TIMEOUT			|
1625						IB_QP_RETRY_CNT			|
1626						IB_QP_RNR_RETRY			|
1627						IB_QP_MAX_QP_RD_ATOMIC		|
1628						IB_QP_MAX_DEST_RD_ATOMIC	|
1629						IB_QP_ALT_PATH			|
1630						IB_QP_ACCESS_FLAGS		|
1631						IB_QP_PKEY_INDEX		|
1632						IB_QP_MIN_RNR_TIMER		|
1633						IB_QP_PATH_MIG_STATE),
1634				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
1635						IB_QP_AV			|
1636						IB_QP_TIMEOUT			|
1637						IB_QP_RETRY_CNT			|
1638						IB_QP_RNR_RETRY			|
1639						IB_QP_MAX_QP_RD_ATOMIC		|
1640						IB_QP_ALT_PATH			|
1641						IB_QP_ACCESS_FLAGS		|
1642						IB_QP_PKEY_INDEX		|
1643						IB_QP_PATH_MIG_STATE),
1644				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
1645						IB_QP_AV			|
1646						IB_QP_TIMEOUT			|
1647						IB_QP_MAX_DEST_RD_ATOMIC	|
1648						IB_QP_ALT_PATH			|
1649						IB_QP_ACCESS_FLAGS		|
1650						IB_QP_PKEY_INDEX		|
1651						IB_QP_MIN_RNR_TIMER		|
1652						IB_QP_PATH_MIG_STATE),
1653				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
1654						IB_QP_QKEY),
1655				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
1656						IB_QP_QKEY),
1657			}
1658		}
1659	},
1660	[IB_QPS_SQE]   = {
1661		[IB_QPS_RESET] = { .valid = 1 },
1662		[IB_QPS_ERR] =   { .valid = 1 },
1663		[IB_QPS_RTS]   = {
1664			.valid = 1,
1665			.opt_param = {
1666				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
1667						IB_QP_QKEY),
1668				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
1669						IB_QP_ACCESS_FLAGS),
1670				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
1671						IB_QP_QKEY),
1672				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
1673						IB_QP_QKEY),
1674			}
1675		}
1676	},
1677	[IB_QPS_ERR] = {
1678		[IB_QPS_RESET] = { .valid = 1 },
1679		[IB_QPS_ERR] =   { .valid = 1 }
1680	}
1681};
1682
1683bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1684			enum ib_qp_type type, enum ib_qp_attr_mask mask)
1685{
1686	enum ib_qp_attr_mask req_param, opt_param;
1687
1688	if (mask & IB_QP_CUR_STATE  &&
1689	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1690	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1691		return false;
1692
1693	if (!qp_state_table[cur_state][next_state].valid)
1694		return false;
1695
1696	req_param = qp_state_table[cur_state][next_state].req_param[type];
1697	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1698
1699	if ((mask & req_param) != req_param)
1700		return false;
1701
1702	if (mask & ~(req_param | opt_param | IB_QP_STATE))
1703		return false;
1704
1705	return true;
1706}
1707EXPORT_SYMBOL(ib_modify_qp_is_ok);
1708
1709/**
1710 * ib_resolve_eth_dmac - Resolve destination mac address
1711 * @device:		Device to consider
1712 * @ah_attr:		address handle attribute which describes the
1713 *			source and destination parameters
1714 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1715 * returns 0 on success or appropriate error code. It initializes the
1716 * necessary ah_attr fields when call is successful.
1717 */
1718static int ib_resolve_eth_dmac(struct ib_device *device,
1719			       struct rdma_ah_attr *ah_attr)
1720{
1721	int ret = 0;
1722
1723	if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1724		if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1725			__be32 addr = 0;
1726
1727			memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1728			ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1729		} else {
1730			ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1731					(char *)ah_attr->roce.dmac);
1732		}
1733	} else {
1734		ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1735	}
1736	return ret;
1737}
1738
1739static bool is_qp_type_connected(const struct ib_qp *qp)
1740{
1741	return (qp->qp_type == IB_QPT_UC ||
1742		qp->qp_type == IB_QPT_RC ||
1743		qp->qp_type == IB_QPT_XRC_INI ||
1744		qp->qp_type == IB_QPT_XRC_TGT);
1745}
1746
1747/*
1748 * IB core internal function to perform QP attributes modification.
1749 */
1750static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1751			 int attr_mask, struct ib_udata *udata)
1752{
1753	u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1754	const struct ib_gid_attr *old_sgid_attr_av;
1755	const struct ib_gid_attr *old_sgid_attr_alt_av;
1756	int ret;
1757
1758	attr->xmit_slave = NULL;
1759	if (attr_mask & IB_QP_AV) {
1760		ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1761					  &old_sgid_attr_av);
1762		if (ret)
1763			return ret;
1764
1765		if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1766		    is_qp_type_connected(qp)) {
1767			struct net_device *slave;
1768
1769			/*
1770			 * If the user provided the qp_attr then we have to
1771			 * resolve it. Kerne users have to provide already
1772			 * resolved rdma_ah_attr's.
1773			 */
1774			if (udata) {
1775				ret = ib_resolve_eth_dmac(qp->device,
1776							  &attr->ah_attr);
1777				if (ret)
1778					goto out_av;
1779			}
1780			slave = rdma_lag_get_ah_roce_slave(qp->device,
1781							   &attr->ah_attr,
1782							   GFP_KERNEL);
1783			if (IS_ERR(slave)) {
1784				ret = PTR_ERR(slave);
1785				goto out_av;
1786			}
1787			attr->xmit_slave = slave;
1788		}
1789	}
1790	if (attr_mask & IB_QP_ALT_PATH) {
1791		/*
1792		 * FIXME: This does not track the migration state, so if the
1793		 * user loads a new alternate path after the HW has migrated
1794		 * from primary->alternate we will keep the wrong
1795		 * references. This is OK for IB because the reference
1796		 * counting does not serve any functional purpose.
1797		 */
1798		ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1799					  &old_sgid_attr_alt_av);
1800		if (ret)
1801			goto out_av;
1802
1803		/*
1804		 * Today the core code can only handle alternate paths and APM
1805		 * for IB. Ban them in roce mode.
1806		 */
1807		if (!(rdma_protocol_ib(qp->device,
1808				       attr->alt_ah_attr.port_num) &&
1809		      rdma_protocol_ib(qp->device, port))) {
1810			ret = -EINVAL;
1811			goto out;
1812		}
1813	}
1814
1815	if (rdma_ib_or_roce(qp->device, port)) {
1816		if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1817			dev_warn(&qp->device->dev,
1818				 "%s rq_psn overflow, masking to 24 bits\n",
1819				 __func__);
1820			attr->rq_psn &= 0xffffff;
1821		}
1822
1823		if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1824			dev_warn(&qp->device->dev,
1825				 " %s sq_psn overflow, masking to 24 bits\n",
1826				 __func__);
1827			attr->sq_psn &= 0xffffff;
1828		}
1829	}
1830
1831	/*
1832	 * Bind this qp to a counter automatically based on the rdma counter
1833	 * rules. This only set in RST2INIT with port specified
1834	 */
1835	if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1836	    ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1837		rdma_counter_bind_qp_auto(qp, attr->port_num);
1838
1839	ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1840	if (ret)
1841		goto out;
1842
1843	if (attr_mask & IB_QP_PORT)
1844		qp->port = attr->port_num;
1845	if (attr_mask & IB_QP_AV)
1846		qp->av_sgid_attr =
1847			rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1848	if (attr_mask & IB_QP_ALT_PATH)
1849		qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1850			&attr->alt_ah_attr, qp->alt_path_sgid_attr);
1851
1852out:
1853	if (attr_mask & IB_QP_ALT_PATH)
1854		rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1855out_av:
1856	if (attr_mask & IB_QP_AV) {
1857		rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1858		rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1859	}
1860	return ret;
1861}
1862
1863/**
1864 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1865 * @ib_qp: The QP to modify.
1866 * @attr: On input, specifies the QP attributes to modify.  On output,
1867 *   the current values of selected QP attributes are returned.
1868 * @attr_mask: A bit-mask used to specify which attributes of the QP
1869 *   are being modified.
1870 * @udata: pointer to user's input output buffer information
1871 *   are being modified.
1872 * It returns 0 on success and returns appropriate error code on error.
1873 */
1874int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1875			    int attr_mask, struct ib_udata *udata)
1876{
1877	return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1878}
1879EXPORT_SYMBOL(ib_modify_qp_with_udata);
1880
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
1882{
1883	int rc;
1884	u32 netdev_speed;
1885	struct net_device *netdev;
1886	struct ethtool_link_ksettings lksettings;
1887
1888	if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1889		return -EINVAL;
1890
1891	netdev = ib_device_get_netdev(dev, port_num);
1892	if (!netdev)
1893		return -ENODEV;
1894
1895	rtnl_lock();
1896	rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1897	rtnl_unlock();
1898
1899	dev_put(netdev);
1900
1901	if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1902		netdev_speed = lksettings.base.speed;
1903	} else {
1904		netdev_speed = SPEED_1000;
1905		pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
1906			netdev_speed);
 
1907	}
1908
1909	if (netdev_speed <= SPEED_1000) {
1910		*width = IB_WIDTH_1X;
1911		*speed = IB_SPEED_SDR;
1912	} else if (netdev_speed <= SPEED_10000) {
1913		*width = IB_WIDTH_1X;
1914		*speed = IB_SPEED_FDR10;
1915	} else if (netdev_speed <= SPEED_20000) {
1916		*width = IB_WIDTH_4X;
1917		*speed = IB_SPEED_DDR;
1918	} else if (netdev_speed <= SPEED_25000) {
1919		*width = IB_WIDTH_1X;
1920		*speed = IB_SPEED_EDR;
1921	} else if (netdev_speed <= SPEED_40000) {
1922		*width = IB_WIDTH_4X;
1923		*speed = IB_SPEED_FDR10;
1924	} else {
1925		*width = IB_WIDTH_4X;
1926		*speed = IB_SPEED_EDR;
1927	}
1928
1929	return 0;
1930}
1931EXPORT_SYMBOL(ib_get_eth_speed);
1932
1933int ib_modify_qp(struct ib_qp *qp,
1934		 struct ib_qp_attr *qp_attr,
1935		 int qp_attr_mask)
1936{
1937	return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1938}
1939EXPORT_SYMBOL(ib_modify_qp);
1940
1941int ib_query_qp(struct ib_qp *qp,
1942		struct ib_qp_attr *qp_attr,
1943		int qp_attr_mask,
1944		struct ib_qp_init_attr *qp_init_attr)
1945{
1946	qp_attr->ah_attr.grh.sgid_attr = NULL;
1947	qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1948
1949	return qp->device->ops.query_qp ?
1950		qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1951					 qp_init_attr) : -EOPNOTSUPP;
1952}
1953EXPORT_SYMBOL(ib_query_qp);
1954
1955int ib_close_qp(struct ib_qp *qp)
1956{
1957	struct ib_qp *real_qp;
1958	unsigned long flags;
1959
1960	real_qp = qp->real_qp;
1961	if (real_qp == qp)
1962		return -EINVAL;
1963
1964	spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1965	list_del(&qp->open_list);
1966	spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1967
1968	atomic_dec(&real_qp->usecnt);
1969	if (qp->qp_sec)
1970		ib_close_shared_qp_security(qp->qp_sec);
1971	kfree(qp);
1972
1973	return 0;
1974}
1975EXPORT_SYMBOL(ib_close_qp);
1976
1977static int __ib_destroy_shared_qp(struct ib_qp *qp)
1978{
1979	struct ib_xrcd *xrcd;
1980	struct ib_qp *real_qp;
1981	int ret;
1982
1983	real_qp = qp->real_qp;
1984	xrcd = real_qp->xrcd;
1985	down_write(&xrcd->tgt_qps_rwsem);
1986	ib_close_qp(qp);
1987	if (atomic_read(&real_qp->usecnt) == 0)
1988		xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
1989	else
1990		real_qp = NULL;
1991	up_write(&xrcd->tgt_qps_rwsem);
1992
1993	if (real_qp) {
1994		ret = ib_destroy_qp(real_qp);
1995		if (!ret)
1996			atomic_dec(&xrcd->usecnt);
1997	}
1998
1999	return 0;
2000}
2001
2002int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2003{
2004	const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2005	const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2006	struct ib_qp_security *sec;
2007	int ret;
2008
2009	WARN_ON_ONCE(qp->mrs_used > 0);
2010
2011	if (atomic_read(&qp->usecnt))
2012		return -EBUSY;
2013
2014	if (qp->real_qp != qp)
2015		return __ib_destroy_shared_qp(qp);
2016
2017	sec  = qp->qp_sec;
2018	if (sec)
2019		ib_destroy_qp_security_begin(sec);
2020
2021	if (!qp->uobject)
2022		rdma_rw_cleanup_mrs(qp);
2023
2024	rdma_counter_unbind_qp(qp, true);
2025	ret = qp->device->ops.destroy_qp(qp, udata);
2026	if (ret) {
2027		if (sec)
2028			ib_destroy_qp_security_abort(sec);
2029		return ret;
2030	}
2031
2032	if (alt_path_sgid_attr)
2033		rdma_put_gid_attr(alt_path_sgid_attr);
2034	if (av_sgid_attr)
2035		rdma_put_gid_attr(av_sgid_attr);
2036
2037	ib_qp_usecnt_dec(qp);
2038	if (sec)
2039		ib_destroy_qp_security_end(sec);
2040
2041	rdma_restrack_del(&qp->res);
2042	kfree(qp);
2043	return ret;
2044}
2045EXPORT_SYMBOL(ib_destroy_qp_user);
2046
2047/* Completion queues */
2048
2049struct ib_cq *__ib_create_cq(struct ib_device *device,
2050			     ib_comp_handler comp_handler,
2051			     void (*event_handler)(struct ib_event *, void *),
2052			     void *cq_context,
2053			     const struct ib_cq_init_attr *cq_attr,
2054			     const char *caller)
2055{
2056	struct ib_cq *cq;
2057	int ret;
2058
2059	cq = rdma_zalloc_drv_obj(device, ib_cq);
2060	if (!cq)
2061		return ERR_PTR(-ENOMEM);
2062
2063	cq->device = device;
2064	cq->uobject = NULL;
2065	cq->comp_handler = comp_handler;
2066	cq->event_handler = event_handler;
2067	cq->cq_context = cq_context;
2068	atomic_set(&cq->usecnt, 0);
2069
2070	rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2071	rdma_restrack_set_name(&cq->res, caller);
2072
2073	ret = device->ops.create_cq(cq, cq_attr, NULL);
2074	if (ret) {
2075		rdma_restrack_put(&cq->res);
2076		kfree(cq);
2077		return ERR_PTR(ret);
2078	}
2079
2080	rdma_restrack_add(&cq->res);
2081	return cq;
2082}
2083EXPORT_SYMBOL(__ib_create_cq);
2084
2085int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2086{
2087	if (cq->shared)
2088		return -EOPNOTSUPP;
2089
2090	return cq->device->ops.modify_cq ?
2091		cq->device->ops.modify_cq(cq, cq_count,
2092					  cq_period) : -EOPNOTSUPP;
2093}
2094EXPORT_SYMBOL(rdma_set_cq_moderation);
2095
2096int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2097{
2098	int ret;
2099
2100	if (WARN_ON_ONCE(cq->shared))
2101		return -EOPNOTSUPP;
2102
2103	if (atomic_read(&cq->usecnt))
2104		return -EBUSY;
2105
2106	ret = cq->device->ops.destroy_cq(cq, udata);
2107	if (ret)
2108		return ret;
2109
2110	rdma_restrack_del(&cq->res);
2111	kfree(cq);
2112	return ret;
2113}
2114EXPORT_SYMBOL(ib_destroy_cq_user);
2115
2116int ib_resize_cq(struct ib_cq *cq, int cqe)
2117{
2118	if (cq->shared)
2119		return -EOPNOTSUPP;
2120
2121	return cq->device->ops.resize_cq ?
2122		cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2123}
2124EXPORT_SYMBOL(ib_resize_cq);
2125
2126/* Memory regions */
2127
2128struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2129			     u64 virt_addr, int access_flags)
2130{
2131	struct ib_mr *mr;
2132
2133	if (access_flags & IB_ACCESS_ON_DEMAND) {
2134		if (!(pd->device->attrs.kernel_cap_flags &
2135		      IBK_ON_DEMAND_PAGING)) {
2136			pr_debug("ODP support not available\n");
2137			return ERR_PTR(-EINVAL);
2138		}
2139	}
2140
2141	mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2142					 access_flags, NULL);
2143
2144	if (IS_ERR(mr))
2145		return mr;
2146
2147	mr->device = pd->device;
2148	mr->type = IB_MR_TYPE_USER;
2149	mr->pd = pd;
2150	mr->dm = NULL;
2151	atomic_inc(&pd->usecnt);
2152	mr->iova =  virt_addr;
2153	mr->length = length;
2154
2155	rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2156	rdma_restrack_parent_name(&mr->res, &pd->res);
2157	rdma_restrack_add(&mr->res);
2158
2159	return mr;
2160}
2161EXPORT_SYMBOL(ib_reg_user_mr);
2162
2163int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2164		 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2165{
2166	if (!pd->device->ops.advise_mr)
2167		return -EOPNOTSUPP;
2168
2169	if (!num_sge)
2170		return 0;
2171
2172	return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2173					 NULL);
2174}
2175EXPORT_SYMBOL(ib_advise_mr);
2176
2177int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2178{
2179	struct ib_pd *pd = mr->pd;
2180	struct ib_dm *dm = mr->dm;
2181	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2182	int ret;
2183
2184	trace_mr_dereg(mr);
2185	rdma_restrack_del(&mr->res);
2186	ret = mr->device->ops.dereg_mr(mr, udata);
2187	if (!ret) {
2188		atomic_dec(&pd->usecnt);
2189		if (dm)
2190			atomic_dec(&dm->usecnt);
2191		kfree(sig_attrs);
2192	}
2193
2194	return ret;
2195}
2196EXPORT_SYMBOL(ib_dereg_mr_user);
2197
2198/**
2199 * ib_alloc_mr() - Allocates a memory region
2200 * @pd:            protection domain associated with the region
2201 * @mr_type:       memory region type
2202 * @max_num_sg:    maximum sg entries available for registration.
2203 *
2204 * Notes:
2205 * Memory registeration page/sg lists must not exceed max_num_sg.
2206 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2207 * max_num_sg * used_page_size.
2208 *
2209 */
2210struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2211			  u32 max_num_sg)
2212{
2213	struct ib_mr *mr;
2214
2215	if (!pd->device->ops.alloc_mr) {
2216		mr = ERR_PTR(-EOPNOTSUPP);
2217		goto out;
2218	}
2219
2220	if (mr_type == IB_MR_TYPE_INTEGRITY) {
2221		WARN_ON_ONCE(1);
2222		mr = ERR_PTR(-EINVAL);
2223		goto out;
2224	}
2225
2226	mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2227	if (IS_ERR(mr))
2228		goto out;
2229
2230	mr->device = pd->device;
2231	mr->pd = pd;
2232	mr->dm = NULL;
2233	mr->uobject = NULL;
2234	atomic_inc(&pd->usecnt);
2235	mr->need_inval = false;
2236	mr->type = mr_type;
2237	mr->sig_attrs = NULL;
2238
2239	rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2240	rdma_restrack_parent_name(&mr->res, &pd->res);
2241	rdma_restrack_add(&mr->res);
2242out:
2243	trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2244	return mr;
2245}
2246EXPORT_SYMBOL(ib_alloc_mr);
2247
2248/**
2249 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2250 * @pd:                      protection domain associated with the region
2251 * @max_num_data_sg:         maximum data sg entries available for registration
2252 * @max_num_meta_sg:         maximum metadata sg entries available for
2253 *                           registration
2254 *
2255 * Notes:
2256 * Memory registration page/sg lists must not exceed max_num_sg,
2257 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2258 *
2259 */
2260struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2261				    u32 max_num_data_sg,
2262				    u32 max_num_meta_sg)
2263{
2264	struct ib_mr *mr;
2265	struct ib_sig_attrs *sig_attrs;
2266
2267	if (!pd->device->ops.alloc_mr_integrity ||
2268	    !pd->device->ops.map_mr_sg_pi) {
2269		mr = ERR_PTR(-EOPNOTSUPP);
2270		goto out;
2271	}
2272
2273	if (!max_num_meta_sg) {
2274		mr = ERR_PTR(-EINVAL);
2275		goto out;
2276	}
2277
2278	sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2279	if (!sig_attrs) {
2280		mr = ERR_PTR(-ENOMEM);
2281		goto out;
2282	}
2283
2284	mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2285						max_num_meta_sg);
2286	if (IS_ERR(mr)) {
2287		kfree(sig_attrs);
2288		goto out;
2289	}
2290
2291	mr->device = pd->device;
2292	mr->pd = pd;
2293	mr->dm = NULL;
2294	mr->uobject = NULL;
2295	atomic_inc(&pd->usecnt);
2296	mr->need_inval = false;
2297	mr->type = IB_MR_TYPE_INTEGRITY;
2298	mr->sig_attrs = sig_attrs;
2299
2300	rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2301	rdma_restrack_parent_name(&mr->res, &pd->res);
2302	rdma_restrack_add(&mr->res);
2303out:
2304	trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2305	return mr;
2306}
2307EXPORT_SYMBOL(ib_alloc_mr_integrity);
2308
2309/* Multicast groups */
2310
2311static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2312{
2313	struct ib_qp_init_attr init_attr = {};
2314	struct ib_qp_attr attr = {};
2315	int num_eth_ports = 0;
2316	unsigned int port;
2317
2318	/* If QP state >= init, it is assigned to a port and we can check this
2319	 * port only.
2320	 */
2321	if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2322		if (attr.qp_state >= IB_QPS_INIT) {
2323			if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2324			    IB_LINK_LAYER_INFINIBAND)
2325				return true;
2326			goto lid_check;
2327		}
2328	}
2329
2330	/* Can't get a quick answer, iterate over all ports */
2331	rdma_for_each_port(qp->device, port)
2332		if (rdma_port_get_link_layer(qp->device, port) !=
2333		    IB_LINK_LAYER_INFINIBAND)
2334			num_eth_ports++;
2335
2336	/* If we have at lease one Ethernet port, RoCE annex declares that
2337	 * multicast LID should be ignored. We can't tell at this step if the
2338	 * QP belongs to an IB or Ethernet port.
2339	 */
2340	if (num_eth_ports)
2341		return true;
2342
2343	/* If all the ports are IB, we can check according to IB spec. */
2344lid_check:
2345	return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2346		 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2347}
2348
2349int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2350{
2351	int ret;
2352
2353	if (!qp->device->ops.attach_mcast)
2354		return -EOPNOTSUPP;
2355
2356	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2357	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2358		return -EINVAL;
2359
2360	ret = qp->device->ops.attach_mcast(qp, gid, lid);
2361	if (!ret)
2362		atomic_inc(&qp->usecnt);
2363	return ret;
2364}
2365EXPORT_SYMBOL(ib_attach_mcast);
2366
2367int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2368{
2369	int ret;
2370
2371	if (!qp->device->ops.detach_mcast)
2372		return -EOPNOTSUPP;
2373
2374	if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2375	    qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2376		return -EINVAL;
2377
2378	ret = qp->device->ops.detach_mcast(qp, gid, lid);
2379	if (!ret)
2380		atomic_dec(&qp->usecnt);
2381	return ret;
2382}
2383EXPORT_SYMBOL(ib_detach_mcast);
2384
2385/**
2386 * ib_alloc_xrcd_user - Allocates an XRC domain.
2387 * @device: The device on which to allocate the XRC domain.
2388 * @inode: inode to connect XRCD
2389 * @udata: Valid user data or NULL for kernel object
2390 */
2391struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2392				   struct inode *inode, struct ib_udata *udata)
2393{
2394	struct ib_xrcd *xrcd;
2395	int ret;
2396
2397	if (!device->ops.alloc_xrcd)
2398		return ERR_PTR(-EOPNOTSUPP);
2399
2400	xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2401	if (!xrcd)
2402		return ERR_PTR(-ENOMEM);
2403
2404	xrcd->device = device;
2405	xrcd->inode = inode;
2406	atomic_set(&xrcd->usecnt, 0);
2407	init_rwsem(&xrcd->tgt_qps_rwsem);
2408	xa_init(&xrcd->tgt_qps);
2409
2410	ret = device->ops.alloc_xrcd(xrcd, udata);
2411	if (ret)
2412		goto err;
2413	return xrcd;
2414err:
2415	kfree(xrcd);
2416	return ERR_PTR(ret);
2417}
2418EXPORT_SYMBOL(ib_alloc_xrcd_user);
2419
2420/**
2421 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2422 * @xrcd: The XRC domain to deallocate.
2423 * @udata: Valid user data or NULL for kernel object
2424 */
2425int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2426{
2427	int ret;
2428
2429	if (atomic_read(&xrcd->usecnt))
2430		return -EBUSY;
2431
2432	WARN_ON(!xa_empty(&xrcd->tgt_qps));
2433	ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2434	if (ret)
2435		return ret;
2436	kfree(xrcd);
2437	return ret;
2438}
2439EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2440
2441/**
2442 * ib_create_wq - Creates a WQ associated with the specified protection
2443 * domain.
2444 * @pd: The protection domain associated with the WQ.
2445 * @wq_attr: A list of initial attributes required to create the
2446 * WQ. If WQ creation succeeds, then the attributes are updated to
2447 * the actual capabilities of the created WQ.
2448 *
2449 * wq_attr->max_wr and wq_attr->max_sge determine
2450 * the requested size of the WQ, and set to the actual values allocated
2451 * on return.
2452 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2453 * at least as large as the requested values.
2454 */
2455struct ib_wq *ib_create_wq(struct ib_pd *pd,
2456			   struct ib_wq_init_attr *wq_attr)
2457{
2458	struct ib_wq *wq;
2459
2460	if (!pd->device->ops.create_wq)
2461		return ERR_PTR(-EOPNOTSUPP);
2462
2463	wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2464	if (!IS_ERR(wq)) {
2465		wq->event_handler = wq_attr->event_handler;
2466		wq->wq_context = wq_attr->wq_context;
2467		wq->wq_type = wq_attr->wq_type;
2468		wq->cq = wq_attr->cq;
2469		wq->device = pd->device;
2470		wq->pd = pd;
2471		wq->uobject = NULL;
2472		atomic_inc(&pd->usecnt);
2473		atomic_inc(&wq_attr->cq->usecnt);
2474		atomic_set(&wq->usecnt, 0);
2475	}
2476	return wq;
2477}
2478EXPORT_SYMBOL(ib_create_wq);
2479
2480/**
2481 * ib_destroy_wq_user - Destroys the specified user WQ.
2482 * @wq: The WQ to destroy.
2483 * @udata: Valid user data
2484 */
2485int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2486{
2487	struct ib_cq *cq = wq->cq;
2488	struct ib_pd *pd = wq->pd;
2489	int ret;
2490
2491	if (atomic_read(&wq->usecnt))
2492		return -EBUSY;
2493
2494	ret = wq->device->ops.destroy_wq(wq, udata);
2495	if (ret)
2496		return ret;
2497
2498	atomic_dec(&pd->usecnt);
2499	atomic_dec(&cq->usecnt);
2500	return ret;
2501}
2502EXPORT_SYMBOL(ib_destroy_wq_user);
2503
2504int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2505		       struct ib_mr_status *mr_status)
2506{
2507	if (!mr->device->ops.check_mr_status)
2508		return -EOPNOTSUPP;
2509
2510	return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2511}
2512EXPORT_SYMBOL(ib_check_mr_status);
2513
2514int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2515			 int state)
2516{
2517	if (!device->ops.set_vf_link_state)
2518		return -EOPNOTSUPP;
2519
2520	return device->ops.set_vf_link_state(device, vf, port, state);
2521}
2522EXPORT_SYMBOL(ib_set_vf_link_state);
2523
2524int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2525		     struct ifla_vf_info *info)
2526{
2527	if (!device->ops.get_vf_config)
2528		return -EOPNOTSUPP;
2529
2530	return device->ops.get_vf_config(device, vf, port, info);
2531}
2532EXPORT_SYMBOL(ib_get_vf_config);
2533
2534int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2535		    struct ifla_vf_stats *stats)
2536{
2537	if (!device->ops.get_vf_stats)
2538		return -EOPNOTSUPP;
2539
2540	return device->ops.get_vf_stats(device, vf, port, stats);
2541}
2542EXPORT_SYMBOL(ib_get_vf_stats);
2543
2544int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2545		   int type)
2546{
2547	if (!device->ops.set_vf_guid)
2548		return -EOPNOTSUPP;
2549
2550	return device->ops.set_vf_guid(device, vf, port, guid, type);
2551}
2552EXPORT_SYMBOL(ib_set_vf_guid);
2553
2554int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2555		   struct ifla_vf_guid *node_guid,
2556		   struct ifla_vf_guid *port_guid)
2557{
2558	if (!device->ops.get_vf_guid)
2559		return -EOPNOTSUPP;
2560
2561	return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2562}
2563EXPORT_SYMBOL(ib_get_vf_guid);
2564/**
2565 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2566 *     information) and set an appropriate memory region for registration.
2567 * @mr:             memory region
2568 * @data_sg:        dma mapped scatterlist for data
2569 * @data_sg_nents:  number of entries in data_sg
2570 * @data_sg_offset: offset in bytes into data_sg
2571 * @meta_sg:        dma mapped scatterlist for metadata
2572 * @meta_sg_nents:  number of entries in meta_sg
2573 * @meta_sg_offset: offset in bytes into meta_sg
2574 * @page_size:      page vector desired page size
2575 *
2576 * Constraints:
2577 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2578 *
2579 * Return: 0 on success.
2580 *
2581 * After this completes successfully, the  memory region
2582 * is ready for registration.
2583 */
2584int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2585		    int data_sg_nents, unsigned int *data_sg_offset,
2586		    struct scatterlist *meta_sg, int meta_sg_nents,
2587		    unsigned int *meta_sg_offset, unsigned int page_size)
2588{
2589	if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2590		     WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2591		return -EOPNOTSUPP;
2592
2593	mr->page_size = page_size;
2594
2595	return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2596					    data_sg_offset, meta_sg,
2597					    meta_sg_nents, meta_sg_offset);
2598}
2599EXPORT_SYMBOL(ib_map_mr_sg_pi);
2600
2601/**
2602 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2603 *     and set it the memory region.
2604 * @mr:            memory region
2605 * @sg:            dma mapped scatterlist
2606 * @sg_nents:      number of entries in sg
2607 * @sg_offset:     offset in bytes into sg
2608 * @page_size:     page vector desired page size
2609 *
2610 * Constraints:
2611 *
2612 * - The first sg element is allowed to have an offset.
2613 * - Each sg element must either be aligned to page_size or virtually
2614 *   contiguous to the previous element. In case an sg element has a
2615 *   non-contiguous offset, the mapping prefix will not include it.
2616 * - The last sg element is allowed to have length less than page_size.
2617 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2618 *   then only max_num_sg entries will be mapped.
2619 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2620 *   constraints holds and the page_size argument is ignored.
2621 *
2622 * Returns the number of sg elements that were mapped to the memory region.
2623 *
2624 * After this completes successfully, the  memory region
2625 * is ready for registration.
2626 */
2627int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2628		 unsigned int *sg_offset, unsigned int page_size)
2629{
2630	if (unlikely(!mr->device->ops.map_mr_sg))
2631		return -EOPNOTSUPP;
2632
2633	mr->page_size = page_size;
2634
2635	return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2636}
2637EXPORT_SYMBOL(ib_map_mr_sg);
2638
2639/**
2640 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2641 *     to a page vector
2642 * @mr:            memory region
2643 * @sgl:           dma mapped scatterlist
2644 * @sg_nents:      number of entries in sg
2645 * @sg_offset_p:   ==== =======================================================
2646 *                 IN   start offset in bytes into sg
2647 *                 OUT  offset in bytes for element n of the sg of the first
2648 *                      byte that has not been processed where n is the return
2649 *                      value of this function.
2650 *                 ==== =======================================================
2651 * @set_page:      driver page assignment function pointer
2652 *
2653 * Core service helper for drivers to convert the largest
2654 * prefix of given sg list to a page vector. The sg list
2655 * prefix converted is the prefix that meet the requirements
2656 * of ib_map_mr_sg.
2657 *
2658 * Returns the number of sg elements that were assigned to
2659 * a page vector.
2660 */
2661int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2662		unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2663{
2664	struct scatterlist *sg;
2665	u64 last_end_dma_addr = 0;
2666	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2667	unsigned int last_page_off = 0;
2668	u64 page_mask = ~((u64)mr->page_size - 1);
2669	int i, ret;
2670
2671	if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2672		return -EINVAL;
2673
2674	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2675	mr->length = 0;
2676
2677	for_each_sg(sgl, sg, sg_nents, i) {
2678		u64 dma_addr = sg_dma_address(sg) + sg_offset;
2679		u64 prev_addr = dma_addr;
2680		unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2681		u64 end_dma_addr = dma_addr + dma_len;
2682		u64 page_addr = dma_addr & page_mask;
2683
2684		/*
2685		 * For the second and later elements, check whether either the
2686		 * end of element i-1 or the start of element i is not aligned
2687		 * on a page boundary.
2688		 */
2689		if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2690			/* Stop mapping if there is a gap. */
2691			if (last_end_dma_addr != dma_addr)
2692				break;
2693
2694			/*
2695			 * Coalesce this element with the last. If it is small
2696			 * enough just update mr->length. Otherwise start
2697			 * mapping from the next page.
2698			 */
2699			goto next_page;
2700		}
2701
2702		do {
2703			ret = set_page(mr, page_addr);
2704			if (unlikely(ret < 0)) {
2705				sg_offset = prev_addr - sg_dma_address(sg);
2706				mr->length += prev_addr - dma_addr;
2707				if (sg_offset_p)
2708					*sg_offset_p = sg_offset;
2709				return i || sg_offset ? i : ret;
2710			}
2711			prev_addr = page_addr;
2712next_page:
2713			page_addr += mr->page_size;
2714		} while (page_addr < end_dma_addr);
2715
2716		mr->length += dma_len;
2717		last_end_dma_addr = end_dma_addr;
2718		last_page_off = end_dma_addr & ~page_mask;
2719
2720		sg_offset = 0;
2721	}
2722
2723	if (sg_offset_p)
2724		*sg_offset_p = 0;
2725	return i;
2726}
2727EXPORT_SYMBOL(ib_sg_to_pages);
2728
2729struct ib_drain_cqe {
2730	struct ib_cqe cqe;
2731	struct completion done;
2732};
2733
2734static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2735{
2736	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2737						cqe);
2738
2739	complete(&cqe->done);
2740}
2741
2742/*
2743 * Post a WR and block until its completion is reaped for the SQ.
2744 */
2745static void __ib_drain_sq(struct ib_qp *qp)
2746{
2747	struct ib_cq *cq = qp->send_cq;
2748	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2749	struct ib_drain_cqe sdrain;
2750	struct ib_rdma_wr swr = {
2751		.wr = {
2752			.next = NULL,
2753			{ .wr_cqe	= &sdrain.cqe, },
2754			.opcode	= IB_WR_RDMA_WRITE,
2755		},
2756	};
2757	int ret;
2758
2759	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2760	if (ret) {
2761		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2762		return;
2763	}
2764
2765	sdrain.cqe.done = ib_drain_qp_done;
2766	init_completion(&sdrain.done);
2767
2768	ret = ib_post_send(qp, &swr.wr, NULL);
2769	if (ret) {
2770		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2771		return;
2772	}
2773
2774	if (cq->poll_ctx == IB_POLL_DIRECT)
2775		while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2776			ib_process_cq_direct(cq, -1);
2777	else
2778		wait_for_completion(&sdrain.done);
2779}
2780
2781/*
2782 * Post a WR and block until its completion is reaped for the RQ.
2783 */
2784static void __ib_drain_rq(struct ib_qp *qp)
2785{
2786	struct ib_cq *cq = qp->recv_cq;
2787	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2788	struct ib_drain_cqe rdrain;
2789	struct ib_recv_wr rwr = {};
2790	int ret;
2791
2792	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2793	if (ret) {
2794		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2795		return;
2796	}
2797
2798	rwr.wr_cqe = &rdrain.cqe;
2799	rdrain.cqe.done = ib_drain_qp_done;
2800	init_completion(&rdrain.done);
2801
2802	ret = ib_post_recv(qp, &rwr, NULL);
2803	if (ret) {
2804		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2805		return;
2806	}
2807
2808	if (cq->poll_ctx == IB_POLL_DIRECT)
2809		while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2810			ib_process_cq_direct(cq, -1);
2811	else
2812		wait_for_completion(&rdrain.done);
2813}
2814
2815/**
2816 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2817 *		   application.
2818 * @qp:            queue pair to drain
2819 *
2820 * If the device has a provider-specific drain function, then
2821 * call that.  Otherwise call the generic drain function
2822 * __ib_drain_sq().
2823 *
2824 * The caller must:
2825 *
2826 * ensure there is room in the CQ and SQ for the drain work request and
2827 * completion.
2828 *
2829 * allocate the CQ using ib_alloc_cq().
2830 *
2831 * ensure that there are no other contexts that are posting WRs concurrently.
2832 * Otherwise the drain is not guaranteed.
2833 */
2834void ib_drain_sq(struct ib_qp *qp)
2835{
2836	if (qp->device->ops.drain_sq)
2837		qp->device->ops.drain_sq(qp);
2838	else
2839		__ib_drain_sq(qp);
2840	trace_cq_drain_complete(qp->send_cq);
2841}
2842EXPORT_SYMBOL(ib_drain_sq);
2843
2844/**
2845 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2846 *		   application.
2847 * @qp:            queue pair to drain
2848 *
2849 * If the device has a provider-specific drain function, then
2850 * call that.  Otherwise call the generic drain function
2851 * __ib_drain_rq().
2852 *
2853 * The caller must:
2854 *
2855 * ensure there is room in the CQ and RQ for the drain work request and
2856 * completion.
2857 *
2858 * allocate the CQ using ib_alloc_cq().
2859 *
2860 * ensure that there are no other contexts that are posting WRs concurrently.
2861 * Otherwise the drain is not guaranteed.
2862 */
2863void ib_drain_rq(struct ib_qp *qp)
2864{
2865	if (qp->device->ops.drain_rq)
2866		qp->device->ops.drain_rq(qp);
2867	else
2868		__ib_drain_rq(qp);
2869	trace_cq_drain_complete(qp->recv_cq);
2870}
2871EXPORT_SYMBOL(ib_drain_rq);
2872
2873/**
2874 * ib_drain_qp() - Block until all CQEs have been consumed by the
2875 *		   application on both the RQ and SQ.
2876 * @qp:            queue pair to drain
2877 *
2878 * The caller must:
2879 *
2880 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2881 * and completions.
2882 *
2883 * allocate the CQs using ib_alloc_cq().
2884 *
2885 * ensure that there are no other contexts that are posting WRs concurrently.
2886 * Otherwise the drain is not guaranteed.
2887 */
2888void ib_drain_qp(struct ib_qp *qp)
2889{
2890	ib_drain_sq(qp);
2891	if (!qp->srq)
2892		ib_drain_rq(qp);
2893}
2894EXPORT_SYMBOL(ib_drain_qp);
2895
2896struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
2897				     enum rdma_netdev_t type, const char *name,
2898				     unsigned char name_assign_type,
2899				     void (*setup)(struct net_device *))
2900{
2901	struct rdma_netdev_alloc_params params;
2902	struct net_device *netdev;
2903	int rc;
2904
2905	if (!device->ops.rdma_netdev_get_params)
2906		return ERR_PTR(-EOPNOTSUPP);
2907
2908	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2909						&params);
2910	if (rc)
2911		return ERR_PTR(rc);
2912
2913	netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2914				  setup, params.txqs, params.rxqs);
2915	if (!netdev)
2916		return ERR_PTR(-ENOMEM);
2917
2918	return netdev;
2919}
2920EXPORT_SYMBOL(rdma_alloc_netdev);
2921
2922int rdma_init_netdev(struct ib_device *device, u32 port_num,
2923		     enum rdma_netdev_t type, const char *name,
2924		     unsigned char name_assign_type,
2925		     void (*setup)(struct net_device *),
2926		     struct net_device *netdev)
2927{
2928	struct rdma_netdev_alloc_params params;
2929	int rc;
2930
2931	if (!device->ops.rdma_netdev_get_params)
2932		return -EOPNOTSUPP;
2933
2934	rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2935						&params);
2936	if (rc)
2937		return rc;
2938
2939	return params.initialize_rdma_netdev(device, port_num,
2940					     netdev, params.param);
2941}
2942EXPORT_SYMBOL(rdma_init_netdev);
2943
2944void __rdma_block_iter_start(struct ib_block_iter *biter,
2945			     struct scatterlist *sglist, unsigned int nents,
2946			     unsigned long pgsz)
2947{
2948	memset(biter, 0, sizeof(struct ib_block_iter));
2949	biter->__sg = sglist;
2950	biter->__sg_nents = nents;
2951
2952	/* Driver provides best block size to use */
2953	biter->__pg_bit = __fls(pgsz);
2954}
2955EXPORT_SYMBOL(__rdma_block_iter_start);
2956
2957bool __rdma_block_iter_next(struct ib_block_iter *biter)
2958{
2959	unsigned int block_offset;
2960	unsigned int sg_delta;
2961
2962	if (!biter->__sg_nents || !biter->__sg)
2963		return false;
2964
2965	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2966	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2967	sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
2968
2969	if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
2970		biter->__sg_advance += sg_delta;
2971	} else {
2972		biter->__sg_advance = 0;
2973		biter->__sg = sg_next(biter->__sg);
2974		biter->__sg_nents--;
2975	}
2976
2977	return true;
2978}
2979EXPORT_SYMBOL(__rdma_block_iter_next);
2980
2981/**
2982 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
2983 *   for the drivers.
2984 * @descs: array of static descriptors
2985 * @num_counters: number of elements in array
2986 * @lifespan: milliseconds between updates
2987 */
2988struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
2989	const struct rdma_stat_desc *descs, int num_counters,
2990	unsigned long lifespan)
2991{
2992	struct rdma_hw_stats *stats;
2993
2994	stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
2995	if (!stats)
2996		return NULL;
2997
2998	stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
2999				     sizeof(*stats->is_disabled), GFP_KERNEL);
3000	if (!stats->is_disabled)
3001		goto err;
3002
3003	stats->descs = descs;
3004	stats->num_counters = num_counters;
3005	stats->lifespan = msecs_to_jiffies(lifespan);
3006	mutex_init(&stats->lock);
3007
3008	return stats;
3009
3010err:
3011	kfree(stats);
3012	return NULL;
3013}
3014EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
3015
3016/**
3017 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
3018 * @stats: statistics to release
3019 */
3020void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
3021{
3022	if (!stats)
3023		return;
3024
3025	kfree(stats->is_disabled);
3026	kfree(stats);
3027}
3028EXPORT_SYMBOL(rdma_free_hw_stats_struct);