Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51
 
 
 
 
 
 
 
 
  52#include <linux/atomic.h>
  53#include <asm/uaccess.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54
  55extern struct workqueue_struct *ib_wq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56
  57union ib_gid {
  58	u8	raw[16];
  59	struct {
  60		__be64	subnet_prefix;
  61		__be64	interface_id;
  62	} global;
  63};
  64
  65enum rdma_node_type {
  66	/* IB values map to NodeInfo:NodeType. */
  67	RDMA_NODE_IB_CA 	= 1,
  68	RDMA_NODE_IB_SWITCH,
  69	RDMA_NODE_IB_ROUTER,
  70	RDMA_NODE_RNIC
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71};
  72
  73enum rdma_transport_type {
  74	RDMA_TRANSPORT_IB,
  75	RDMA_TRANSPORT_IWARP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76};
  77
  78enum rdma_transport_type
  79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  80
  81enum rdma_link_layer {
  82	IB_LINK_LAYER_UNSPECIFIED,
  83	IB_LINK_LAYER_INFINIBAND,
  84	IB_LINK_LAYER_ETHERNET,
  85};
  86
  87enum ib_device_cap_flags {
  88	IB_DEVICE_RESIZE_MAX_WR		= 1,
  89	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
  90	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
  91	IB_DEVICE_RAW_MULTI		= (1<<3),
  92	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
  93	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
  94	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
  95	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
  96	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
  97	IB_DEVICE_INIT_TYPE		= (1<<9),
  98	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
  99	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
 100	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
 101	IB_DEVICE_SRQ_RESIZE		= (1<<13),
 102	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
 103	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
 104	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
 105	IB_DEVICE_MEM_WINDOW		= (1<<17),
 
 
 
 
 
 
 
 
 106	/*
 107	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 108	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
 109	 * messages and can verify the validity of checksum for
 110	 * incoming messages.  Setting this flag implies that the
 111	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 112	 */
 113	IB_DEVICE_UD_IP_CSUM		= (1<<18),
 114	IB_DEVICE_UD_TSO		= (1<<19),
 115	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
 116	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117};
 118
 119enum ib_atomic_cap {
 120	IB_ATOMIC_NONE,
 121	IB_ATOMIC_HCA,
 122	IB_ATOMIC_GLOB
 123};
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125struct ib_device_attr {
 126	u64			fw_ver;
 127	__be64			sys_image_guid;
 128	u64			max_mr_size;
 129	u64			page_size_cap;
 130	u32			vendor_id;
 131	u32			vendor_part_id;
 132	u32			hw_ver;
 133	int			max_qp;
 134	int			max_qp_wr;
 135	int			device_cap_flags;
 136	int			max_sge;
 
 137	int			max_sge_rd;
 138	int			max_cq;
 139	int			max_cqe;
 140	int			max_mr;
 141	int			max_pd;
 142	int			max_qp_rd_atom;
 143	int			max_ee_rd_atom;
 144	int			max_res_rd_atom;
 145	int			max_qp_init_rd_atom;
 146	int			max_ee_init_rd_atom;
 147	enum ib_atomic_cap	atomic_cap;
 148	enum ib_atomic_cap	masked_atomic_cap;
 149	int			max_ee;
 150	int			max_rdd;
 151	int			max_mw;
 152	int			max_raw_ipv6_qp;
 153	int			max_raw_ethy_qp;
 154	int			max_mcast_grp;
 155	int			max_mcast_qp_attach;
 156	int			max_total_mcast_qp_attach;
 157	int			max_ah;
 158	int			max_fmr;
 159	int			max_map_per_fmr;
 160	int			max_srq;
 161	int			max_srq_wr;
 162	int			max_srq_sge;
 163	unsigned int		max_fast_reg_page_list_len;
 
 164	u16			max_pkeys;
 165	u8			local_ca_ack_delay;
 
 
 
 
 
 
 
 
 
 
 
 166};
 167
 168enum ib_mtu {
 169	IB_MTU_256  = 1,
 170	IB_MTU_512  = 2,
 171	IB_MTU_1024 = 3,
 172	IB_MTU_2048 = 4,
 173	IB_MTU_4096 = 5
 174};
 175
 176static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 177{
 178	switch (mtu) {
 179	case IB_MTU_256:  return  256;
 180	case IB_MTU_512:  return  512;
 181	case IB_MTU_1024: return 1024;
 182	case IB_MTU_2048: return 2048;
 183	case IB_MTU_4096: return 4096;
 184	default: 	  return -1;
 185	}
 186}
 187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188enum ib_port_state {
 189	IB_PORT_NOP		= 0,
 190	IB_PORT_DOWN		= 1,
 191	IB_PORT_INIT		= 2,
 192	IB_PORT_ARMED		= 3,
 193	IB_PORT_ACTIVE		= 4,
 194	IB_PORT_ACTIVE_DEFER	= 5
 195};
 196
 197enum ib_port_cap_flags {
 198	IB_PORT_SM				= 1 <<  1,
 199	IB_PORT_NOTICE_SUP			= 1 <<  2,
 200	IB_PORT_TRAP_SUP			= 1 <<  3,
 201	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 202	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
 203	IB_PORT_SL_MAP_SUP			= 1 <<  6,
 204	IB_PORT_MKEY_NVRAM			= 1 <<  7,
 205	IB_PORT_PKEY_NVRAM			= 1 <<  8,
 206	IB_PORT_LED_INFO_SUP			= 1 <<  9,
 207	IB_PORT_SM_DISABLED			= 1 << 10,
 208	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
 209	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
 210	IB_PORT_CM_SUP				= 1 << 16,
 211	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
 212	IB_PORT_REINIT_SUP			= 1 << 18,
 213	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
 214	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
 215	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
 216	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
 217	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
 218	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
 219	IB_PORT_CLIENT_REG_SUP			= 1 << 25
 220};
 221
 222enum ib_port_width {
 223	IB_WIDTH_1X	= 1,
 
 224	IB_WIDTH_4X	= 2,
 225	IB_WIDTH_8X	= 4,
 226	IB_WIDTH_12X	= 8
 227};
 228
 229static inline int ib_width_enum_to_int(enum ib_port_width width)
 230{
 231	switch (width) {
 232	case IB_WIDTH_1X:  return  1;
 
 233	case IB_WIDTH_4X:  return  4;
 234	case IB_WIDTH_8X:  return  8;
 235	case IB_WIDTH_12X: return 12;
 236	default: 	  return -1;
 237	}
 238}
 239
 240struct ib_protocol_stats {
 241	/* TBD... */
 
 
 
 
 
 
 242};
 243
 244struct iw_protocol_stats {
 245	u64	ipInReceives;
 246	u64	ipInHdrErrors;
 247	u64	ipInTooBigErrors;
 248	u64	ipInNoRoutes;
 249	u64	ipInAddrErrors;
 250	u64	ipInUnknownProtos;
 251	u64	ipInTruncatedPkts;
 252	u64	ipInDiscards;
 253	u64	ipInDelivers;
 254	u64	ipOutForwDatagrams;
 255	u64	ipOutRequests;
 256	u64	ipOutDiscards;
 257	u64	ipOutNoRoutes;
 258	u64	ipReasmTimeout;
 259	u64	ipReasmReqds;
 260	u64	ipReasmOKs;
 261	u64	ipReasmFails;
 262	u64	ipFragOKs;
 263	u64	ipFragFails;
 264	u64	ipFragCreates;
 265	u64	ipInMcastPkts;
 266	u64	ipOutMcastPkts;
 267	u64	ipInBcastPkts;
 268	u64	ipOutBcastPkts;
 269
 270	u64	tcpRtoAlgorithm;
 271	u64	tcpRtoMin;
 272	u64	tcpRtoMax;
 273	u64	tcpMaxConn;
 274	u64	tcpActiveOpens;
 275	u64	tcpPassiveOpens;
 276	u64	tcpAttemptFails;
 277	u64	tcpEstabResets;
 278	u64	tcpCurrEstab;
 279	u64	tcpInSegs;
 280	u64	tcpOutSegs;
 281	u64	tcpRetransSegs;
 282	u64	tcpInErrs;
 283	u64	tcpOutRsts;
 284};
 285
 286union rdma_protocol_stats {
 287	struct ib_protocol_stats	ib;
 288	struct iw_protocol_stats	iw;
 289};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290
 291struct ib_port_attr {
 
 292	enum ib_port_state	state;
 293	enum ib_mtu		max_mtu;
 294	enum ib_mtu		active_mtu;
 295	int			gid_tbl_len;
 
 
 296	u32			port_cap_flags;
 297	u32			max_msg_sz;
 298	u32			bad_pkey_cntr;
 299	u32			qkey_viol_cntr;
 300	u16			pkey_tbl_len;
 301	u16			lid;
 302	u16			sm_lid;
 303	u8			lmc;
 304	u8			max_vl_num;
 305	u8			sm_sl;
 306	u8			subnet_timeout;
 307	u8			init_type_reply;
 308	u8			active_width;
 309	u8			active_speed;
 310	u8                      phys_state;
 
 311};
 312
 313enum ib_device_modify_flags {
 314	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
 315	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
 316};
 317
 
 
 318struct ib_device_modify {
 319	u64	sys_image_guid;
 320	char	node_desc[64];
 321};
 322
 323enum ib_port_modify_flags {
 324	IB_PORT_SHUTDOWN		= 1,
 325	IB_PORT_INIT_TYPE		= (1<<2),
 326	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
 
 327};
 328
 329struct ib_port_modify {
 330	u32	set_port_cap_mask;
 331	u32	clr_port_cap_mask;
 332	u8	init_type;
 333};
 334
 335enum ib_event_type {
 336	IB_EVENT_CQ_ERR,
 337	IB_EVENT_QP_FATAL,
 338	IB_EVENT_QP_REQ_ERR,
 339	IB_EVENT_QP_ACCESS_ERR,
 340	IB_EVENT_COMM_EST,
 341	IB_EVENT_SQ_DRAINED,
 342	IB_EVENT_PATH_MIG,
 343	IB_EVENT_PATH_MIG_ERR,
 344	IB_EVENT_DEVICE_FATAL,
 345	IB_EVENT_PORT_ACTIVE,
 346	IB_EVENT_PORT_ERR,
 347	IB_EVENT_LID_CHANGE,
 348	IB_EVENT_PKEY_CHANGE,
 349	IB_EVENT_SM_CHANGE,
 350	IB_EVENT_SRQ_ERR,
 351	IB_EVENT_SRQ_LIMIT_REACHED,
 352	IB_EVENT_QP_LAST_WQE_REACHED,
 353	IB_EVENT_CLIENT_REREGISTER,
 354	IB_EVENT_GID_CHANGE,
 
 355};
 356
 
 
 357struct ib_event {
 358	struct ib_device	*device;
 359	union {
 360		struct ib_cq	*cq;
 361		struct ib_qp	*qp;
 362		struct ib_srq	*srq;
 
 363		u8		port_num;
 364	} element;
 365	enum ib_event_type	event;
 366};
 367
 368struct ib_event_handler {
 369	struct ib_device *device;
 370	void            (*handler)(struct ib_event_handler *, struct ib_event *);
 371	struct list_head  list;
 372};
 373
 374#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
 375	do {							\
 376		(_ptr)->device  = _device;			\
 377		(_ptr)->handler = _handler;			\
 378		INIT_LIST_HEAD(&(_ptr)->list);			\
 379	} while (0)
 380
 381struct ib_global_route {
 
 382	union ib_gid	dgid;
 383	u32		flow_label;
 384	u8		sgid_index;
 385	u8		hop_limit;
 386	u8		traffic_class;
 387};
 388
 389struct ib_grh {
 390	__be32		version_tclass_flow;
 391	__be16		paylen;
 392	u8		next_hdr;
 393	u8		hop_limit;
 394	union ib_gid	sgid;
 395	union ib_gid	dgid;
 396};
 397
 
 
 
 
 
 
 
 
 
 
 
 
 
 398enum {
 399	IB_MULTICAST_QPN = 0xffffff
 400};
 401
 402#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
 
 403
 404enum ib_ah_flags {
 405	IB_AH_GRH	= 1
 406};
 407
 408enum ib_rate {
 409	IB_RATE_PORT_CURRENT = 0,
 410	IB_RATE_2_5_GBPS = 2,
 411	IB_RATE_5_GBPS   = 5,
 412	IB_RATE_10_GBPS  = 3,
 413	IB_RATE_20_GBPS  = 6,
 414	IB_RATE_30_GBPS  = 4,
 415	IB_RATE_40_GBPS  = 7,
 416	IB_RATE_60_GBPS  = 8,
 417	IB_RATE_80_GBPS  = 9,
 418	IB_RATE_120_GBPS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 419};
 420
 421/**
 422 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 423 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 424 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 425 * @rate: rate to convert.
 426 */
 427int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428
 429/**
 430 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 431 * enum.
 432 * @mult: multiple to convert.
 433 */
 434enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
 
 
 
 
 
 
 
 435
 436struct ib_ah_attr {
 437	struct ib_global_route	grh;
 438	u16			dlid;
 439	u8			sl;
 440	u8			src_path_bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441	u8			static_rate;
 442	u8			ah_flags;
 443	u8			port_num;
 
 
 
 
 
 
 
 444};
 445
 446enum ib_wc_status {
 447	IB_WC_SUCCESS,
 448	IB_WC_LOC_LEN_ERR,
 449	IB_WC_LOC_QP_OP_ERR,
 450	IB_WC_LOC_EEC_OP_ERR,
 451	IB_WC_LOC_PROT_ERR,
 452	IB_WC_WR_FLUSH_ERR,
 453	IB_WC_MW_BIND_ERR,
 454	IB_WC_BAD_RESP_ERR,
 455	IB_WC_LOC_ACCESS_ERR,
 456	IB_WC_REM_INV_REQ_ERR,
 457	IB_WC_REM_ACCESS_ERR,
 458	IB_WC_REM_OP_ERR,
 459	IB_WC_RETRY_EXC_ERR,
 460	IB_WC_RNR_RETRY_EXC_ERR,
 461	IB_WC_LOC_RDD_VIOL_ERR,
 462	IB_WC_REM_INV_RD_REQ_ERR,
 463	IB_WC_REM_ABORT_ERR,
 464	IB_WC_INV_EECN_ERR,
 465	IB_WC_INV_EEC_STATE_ERR,
 466	IB_WC_FATAL_ERR,
 467	IB_WC_RESP_TIMEOUT_ERR,
 468	IB_WC_GENERAL_ERR
 469};
 470
 
 
 471enum ib_wc_opcode {
 472	IB_WC_SEND,
 473	IB_WC_RDMA_WRITE,
 474	IB_WC_RDMA_READ,
 475	IB_WC_COMP_SWAP,
 476	IB_WC_FETCH_ADD,
 477	IB_WC_BIND_MW,
 478	IB_WC_LSO,
 479	IB_WC_LOCAL_INV,
 480	IB_WC_FAST_REG_MR,
 481	IB_WC_MASKED_COMP_SWAP,
 482	IB_WC_MASKED_FETCH_ADD,
 483/*
 484 * Set value of IB_WC_RECV so consumers can test if a completion is a
 485 * receive by testing (opcode & IB_WC_RECV).
 486 */
 487	IB_WC_RECV			= 1 << 7,
 488	IB_WC_RECV_RDMA_WITH_IMM
 489};
 490
 491enum ib_wc_flags {
 492	IB_WC_GRH		= 1,
 493	IB_WC_WITH_IMM		= (1<<1),
 494	IB_WC_WITH_INVALIDATE	= (1<<2),
 
 
 
 
 495};
 496
 497struct ib_wc {
 498	u64			wr_id;
 
 
 
 499	enum ib_wc_status	status;
 500	enum ib_wc_opcode	opcode;
 501	u32			vendor_err;
 502	u32			byte_len;
 503	struct ib_qp	       *qp;
 504	union {
 505		__be32		imm_data;
 506		u32		invalidate_rkey;
 507	} ex;
 508	u32			src_qp;
 
 509	int			wc_flags;
 510	u16			pkey_index;
 511	u16			slid;
 512	u8			sl;
 513	u8			dlid_path_bits;
 514	u8			port_num;	/* valid only for DR SMPs on switches */
 515	int			csum_ok;
 
 
 516};
 517
 518enum ib_cq_notify_flags {
 519	IB_CQ_SOLICITED			= 1 << 0,
 520	IB_CQ_NEXT_COMP			= 1 << 1,
 521	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 522	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
 523};
 524
 
 
 
 
 
 
 
 
 
 
 
 
 525enum ib_srq_attr_mask {
 526	IB_SRQ_MAX_WR	= 1 << 0,
 527	IB_SRQ_LIMIT	= 1 << 1,
 528};
 529
 530struct ib_srq_attr {
 531	u32	max_wr;
 532	u32	max_sge;
 533	u32	srq_limit;
 534};
 535
 536struct ib_srq_init_attr {
 537	void		      (*event_handler)(struct ib_event *, void *);
 538	void		       *srq_context;
 539	struct ib_srq_attr	attr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540};
 541
 542struct ib_qp_cap {
 543	u32	max_send_wr;
 544	u32	max_recv_wr;
 545	u32	max_send_sge;
 546	u32	max_recv_sge;
 547	u32	max_inline_data;
 
 
 
 
 
 
 
 548};
 549
 550enum ib_sig_type {
 551	IB_SIGNAL_ALL_WR,
 552	IB_SIGNAL_REQ_WR
 553};
 554
 555enum ib_qp_type {
 556	/*
 557	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 558	 * here (and in that order) since the MAD layer uses them as
 559	 * indices into a 2-entry table.
 560	 */
 561	IB_QPT_SMI,
 562	IB_QPT_GSI,
 563
 564	IB_QPT_RC,
 565	IB_QPT_UC,
 566	IB_QPT_UD,
 567	IB_QPT_RAW_IPV6,
 568	IB_QPT_RAW_ETHERTYPE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569};
 570
 571enum ib_qp_create_flags {
 572	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
 573	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 574};
 575
 
 
 
 
 
 576struct ib_qp_init_attr {
 
 577	void                  (*event_handler)(struct ib_event *, void *);
 
 578	void		       *qp_context;
 579	struct ib_cq	       *send_cq;
 580	struct ib_cq	       *recv_cq;
 581	struct ib_srq	       *srq;
 
 582	struct ib_qp_cap	cap;
 583	enum ib_sig_type	sq_sig_type;
 584	enum ib_qp_type		qp_type;
 585	enum ib_qp_create_flags	create_flags;
 586	u8			port_num; /* special QP types only */
 
 
 
 
 
 
 
 
 
 
 
 
 
 587};
 588
 589enum ib_rnr_timeout {
 590	IB_RNR_TIMER_655_36 =  0,
 591	IB_RNR_TIMER_000_01 =  1,
 592	IB_RNR_TIMER_000_02 =  2,
 593	IB_RNR_TIMER_000_03 =  3,
 594	IB_RNR_TIMER_000_04 =  4,
 595	IB_RNR_TIMER_000_06 =  5,
 596	IB_RNR_TIMER_000_08 =  6,
 597	IB_RNR_TIMER_000_12 =  7,
 598	IB_RNR_TIMER_000_16 =  8,
 599	IB_RNR_TIMER_000_24 =  9,
 600	IB_RNR_TIMER_000_32 = 10,
 601	IB_RNR_TIMER_000_48 = 11,
 602	IB_RNR_TIMER_000_64 = 12,
 603	IB_RNR_TIMER_000_96 = 13,
 604	IB_RNR_TIMER_001_28 = 14,
 605	IB_RNR_TIMER_001_92 = 15,
 606	IB_RNR_TIMER_002_56 = 16,
 607	IB_RNR_TIMER_003_84 = 17,
 608	IB_RNR_TIMER_005_12 = 18,
 609	IB_RNR_TIMER_007_68 = 19,
 610	IB_RNR_TIMER_010_24 = 20,
 611	IB_RNR_TIMER_015_36 = 21,
 612	IB_RNR_TIMER_020_48 = 22,
 613	IB_RNR_TIMER_030_72 = 23,
 614	IB_RNR_TIMER_040_96 = 24,
 615	IB_RNR_TIMER_061_44 = 25,
 616	IB_RNR_TIMER_081_92 = 26,
 617	IB_RNR_TIMER_122_88 = 27,
 618	IB_RNR_TIMER_163_84 = 28,
 619	IB_RNR_TIMER_245_76 = 29,
 620	IB_RNR_TIMER_327_68 = 30,
 621	IB_RNR_TIMER_491_52 = 31
 622};
 623
 624enum ib_qp_attr_mask {
 625	IB_QP_STATE			= 1,
 626	IB_QP_CUR_STATE			= (1<<1),
 627	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
 628	IB_QP_ACCESS_FLAGS		= (1<<3),
 629	IB_QP_PKEY_INDEX		= (1<<4),
 630	IB_QP_PORT			= (1<<5),
 631	IB_QP_QKEY			= (1<<6),
 632	IB_QP_AV			= (1<<7),
 633	IB_QP_PATH_MTU			= (1<<8),
 634	IB_QP_TIMEOUT			= (1<<9),
 635	IB_QP_RETRY_CNT			= (1<<10),
 636	IB_QP_RNR_RETRY			= (1<<11),
 637	IB_QP_RQ_PSN			= (1<<12),
 638	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
 639	IB_QP_ALT_PATH			= (1<<14),
 640	IB_QP_MIN_RNR_TIMER		= (1<<15),
 641	IB_QP_SQ_PSN			= (1<<16),
 642	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
 643	IB_QP_PATH_MIG_STATE		= (1<<18),
 644	IB_QP_CAP			= (1<<19),
 645	IB_QP_DEST_QPN			= (1<<20)
 
 
 
 
 
 646};
 647
 648enum ib_qp_state {
 649	IB_QPS_RESET,
 650	IB_QPS_INIT,
 651	IB_QPS_RTR,
 652	IB_QPS_RTS,
 653	IB_QPS_SQD,
 654	IB_QPS_SQE,
 655	IB_QPS_ERR
 656};
 657
 658enum ib_mig_state {
 659	IB_MIG_MIGRATED,
 660	IB_MIG_REARM,
 661	IB_MIG_ARMED
 662};
 663
 
 
 
 
 
 664struct ib_qp_attr {
 665	enum ib_qp_state	qp_state;
 666	enum ib_qp_state	cur_qp_state;
 667	enum ib_mtu		path_mtu;
 668	enum ib_mig_state	path_mig_state;
 669	u32			qkey;
 670	u32			rq_psn;
 671	u32			sq_psn;
 672	u32			dest_qp_num;
 673	int			qp_access_flags;
 674	struct ib_qp_cap	cap;
 675	struct ib_ah_attr	ah_attr;
 676	struct ib_ah_attr	alt_ah_attr;
 677	u16			pkey_index;
 678	u16			alt_pkey_index;
 679	u8			en_sqd_async_notify;
 680	u8			sq_draining;
 681	u8			max_rd_atomic;
 682	u8			max_dest_rd_atomic;
 683	u8			min_rnr_timer;
 684	u8			port_num;
 685	u8			timeout;
 686	u8			retry_cnt;
 687	u8			rnr_retry;
 688	u8			alt_port_num;
 689	u8			alt_timeout;
 
 690};
 691
 692enum ib_wr_opcode {
 693	IB_WR_RDMA_WRITE,
 694	IB_WR_RDMA_WRITE_WITH_IMM,
 695	IB_WR_SEND,
 696	IB_WR_SEND_WITH_IMM,
 697	IB_WR_RDMA_READ,
 698	IB_WR_ATOMIC_CMP_AND_SWP,
 699	IB_WR_ATOMIC_FETCH_AND_ADD,
 700	IB_WR_LSO,
 701	IB_WR_SEND_WITH_INV,
 702	IB_WR_RDMA_READ_WITH_INV,
 703	IB_WR_LOCAL_INV,
 704	IB_WR_FAST_REG_MR,
 705	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
 706	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707};
 708
 709enum ib_send_flags {
 710	IB_SEND_FENCE		= 1,
 711	IB_SEND_SIGNALED	= (1<<1),
 712	IB_SEND_SOLICITED	= (1<<2),
 713	IB_SEND_INLINE		= (1<<3),
 714	IB_SEND_IP_CSUM		= (1<<4)
 
 
 
 
 715};
 716
 717struct ib_sge {
 718	u64	addr;
 719	u32	length;
 720	u32	lkey;
 721};
 722
 723struct ib_fast_reg_page_list {
 724	struct ib_device       *device;
 725	u64		       *page_list;
 726	unsigned int		max_page_list_len;
 727};
 728
 729struct ib_send_wr {
 730	struct ib_send_wr      *next;
 731	u64			wr_id;
 
 
 
 732	struct ib_sge	       *sg_list;
 733	int			num_sge;
 734	enum ib_wr_opcode	opcode;
 735	int			send_flags;
 736	union {
 737		__be32		imm_data;
 738		u32		invalidate_rkey;
 739	} ex;
 740	union {
 741		struct {
 742			u64	remote_addr;
 743			u32	rkey;
 744		} rdma;
 745		struct {
 746			u64	remote_addr;
 747			u64	compare_add;
 748			u64	swap;
 749			u64	compare_add_mask;
 750			u64	swap_mask;
 751			u32	rkey;
 752		} atomic;
 753		struct {
 754			struct ib_ah *ah;
 755			void   *header;
 756			int     hlen;
 757			int     mss;
 758			u32	remote_qpn;
 759			u32	remote_qkey;
 760			u16	pkey_index; /* valid for GSI only */
 761			u8	port_num;   /* valid for DR SMPs on switch only */
 762		} ud;
 763		struct {
 764			u64				iova_start;
 765			struct ib_fast_reg_page_list   *page_list;
 766			unsigned int			page_shift;
 767			unsigned int			page_list_len;
 768			u32				length;
 769			int				access_flags;
 770			u32				rkey;
 771		} fast_reg;
 772	} wr;
 773};
 774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775struct ib_recv_wr {
 776	struct ib_recv_wr      *next;
 777	u64			wr_id;
 
 
 
 778	struct ib_sge	       *sg_list;
 779	int			num_sge;
 780};
 781
 782enum ib_access_flags {
 783	IB_ACCESS_LOCAL_WRITE	= 1,
 784	IB_ACCESS_REMOTE_WRITE	= (1<<1),
 785	IB_ACCESS_REMOTE_READ	= (1<<2),
 786	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
 787	IB_ACCESS_MW_BIND	= (1<<4)
 788};
 789
 790struct ib_phys_buf {
 791	u64      addr;
 792	u64      size;
 793};
 794
 795struct ib_mr_attr {
 796	struct ib_pd	*pd;
 797	u64		device_virt_addr;
 798	u64		size;
 799	int		mr_access_flags;
 800	u32		lkey;
 801	u32		rkey;
 802};
 803
 
 
 
 
 804enum ib_mr_rereg_flags {
 805	IB_MR_REREG_TRANS	= 1,
 806	IB_MR_REREG_PD		= (1<<1),
 807	IB_MR_REREG_ACCESS	= (1<<2)
 808};
 809
 810struct ib_mw_bind {
 811	struct ib_mr   *mr;
 812	u64		wr_id;
 813	u64		addr;
 814	u32		length;
 815	int		send_flags;
 816	int		mw_access_flags;
 817};
 818
 819struct ib_fmr_attr {
 820	int	max_pages;
 821	int	max_maps;
 822	u8	page_shift;
 823};
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825struct ib_ucontext {
 826	struct ib_device       *device;
 827	struct list_head	pd_list;
 828	struct list_head	mr_list;
 829	struct list_head	mw_list;
 830	struct list_head	cq_list;
 831	struct list_head	qp_list;
 832	struct list_head	srq_list;
 833	struct list_head	ah_list;
 834	int			closing;
 
 
 
 
 
 
 
 835};
 836
 837struct ib_uobject {
 838	u64			user_handle;	/* handle given to us by userspace */
 
 
 
 839	struct ib_ucontext     *context;	/* associated user context */
 840	void		       *object;		/* containing object */
 841	struct list_head	list;		/* link to context's list */
 
 842	int			id;		/* index into kernel idr */
 843	struct kref		ref;
 844	struct rw_semaphore	mutex;		/* protects .live */
 845	int			live;
 
 
 846};
 847
 848struct ib_udata {
 849	void __user *inbuf;
 850	void __user *outbuf;
 851	size_t       inlen;
 852	size_t       outlen;
 853};
 854
 855struct ib_pd {
 
 
 856	struct ib_device       *device;
 857	struct ib_uobject      *uobject;
 858	atomic_t          	usecnt; /* count all resources */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859};
 860
 861struct ib_ah {
 862	struct ib_device	*device;
 863	struct ib_pd		*pd;
 864	struct ib_uobject	*uobject;
 
 
 865};
 866
 867typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 868
 
 
 
 
 
 
 
 869struct ib_cq {
 870	struct ib_device       *device;
 871	struct ib_uobject      *uobject;
 872	ib_comp_handler   	comp_handler;
 873	void                  (*event_handler)(struct ib_event *, void *);
 874	void                   *cq_context;
 875	int               	cqe;
 876	atomic_t          	usecnt; /* count number of work queues */
 
 
 
 
 
 
 
 
 
 
 
 
 877};
 878
 879struct ib_srq {
 880	struct ib_device       *device;
 881	struct ib_pd	       *pd;
 882	struct ib_uobject      *uobject;
 883	void		      (*event_handler)(struct ib_event *, void *);
 884	void		       *srq_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885	atomic_t		usecnt;
 886};
 887
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888struct ib_qp {
 889	struct ib_device       *device;
 890	struct ib_pd	       *pd;
 891	struct ib_cq	       *send_cq;
 892	struct ib_cq	       *recv_cq;
 
 
 
 
 893	struct ib_srq	       *srq;
 
 
 
 
 
 
 
 894	struct ib_uobject      *uobject;
 895	void                  (*event_handler)(struct ib_event *, void *);
 896	void		       *qp_context;
 
 
 
 897	u32			qp_num;
 
 
 898	enum ib_qp_type		qp_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899};
 900
 901struct ib_mr {
 902	struct ib_device  *device;
 903	struct ib_pd	  *pd;
 904	struct ib_uobject *uobject;
 905	u32		   lkey;
 906	u32		   rkey;
 907	atomic_t	   usecnt; /* count number of MWs */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908};
 909
 910struct ib_mw {
 911	struct ib_device	*device;
 912	struct ib_pd		*pd;
 913	struct ib_uobject	*uobject;
 914	u32			rkey;
 
 915};
 916
 917struct ib_fmr {
 918	struct ib_device	*device;
 919	struct ib_pd		*pd;
 920	struct list_head	list;
 921	u32			lkey;
 922	u32			rkey;
 923};
 924
 925struct ib_mad;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926struct ib_grh;
 927
 928enum ib_process_mad_flags {
 929	IB_MAD_IGNORE_MKEY	= 1,
 930	IB_MAD_IGNORE_BKEY	= 2,
 931	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
 932};
 933
 934enum ib_mad_result {
 935	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
 936	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
 937	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
 938	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
 939};
 940
 941#define IB_DEVICE_NAME_MAX 64
 
 
 
 
 
 
 942
 943struct ib_cache {
 944	rwlock_t                lock;
 945	struct ib_event_handler event_handler;
 946	struct ib_pkey_cache  **pkey_cache;
 947	struct ib_gid_cache   **gid_cache;
 948	u8                     *lmc_cache;
 949};
 950
 951struct ib_dma_mapping_ops {
 952	int		(*mapping_error)(struct ib_device *dev,
 953					 u64 dma_addr);
 954	u64		(*map_single)(struct ib_device *dev,
 955				      void *ptr, size_t size,
 956				      enum dma_data_direction direction);
 957	void		(*unmap_single)(struct ib_device *dev,
 958					u64 addr, size_t size,
 959					enum dma_data_direction direction);
 960	u64		(*map_page)(struct ib_device *dev,
 961				    struct page *page, unsigned long offset,
 962				    size_t size,
 963				    enum dma_data_direction direction);
 964	void		(*unmap_page)(struct ib_device *dev,
 965				      u64 addr, size_t size,
 966				      enum dma_data_direction direction);
 967	int		(*map_sg)(struct ib_device *dev,
 968				  struct scatterlist *sg, int nents,
 969				  enum dma_data_direction direction);
 970	void		(*unmap_sg)(struct ib_device *dev,
 971				    struct scatterlist *sg, int nents,
 972				    enum dma_data_direction direction);
 973	u64		(*dma_address)(struct ib_device *dev,
 974				       struct scatterlist *sg);
 975	unsigned int	(*dma_len)(struct ib_device *dev,
 976				   struct scatterlist *sg);
 977	void		(*sync_single_for_cpu)(struct ib_device *dev,
 978					       u64 dma_handle,
 979					       size_t size,
 980					       enum dma_data_direction dir);
 981	void		(*sync_single_for_device)(struct ib_device *dev,
 982						  u64 dma_handle,
 983						  size_t size,
 984						  enum dma_data_direction dir);
 985	void		*(*alloc_coherent)(struct ib_device *dev,
 986					   size_t size,
 987					   u64 *dma_handle,
 988					   gfp_t flag);
 989	void		(*free_coherent)(struct ib_device *dev,
 990					 size_t size, void *cpu_addr,
 991					 u64 dma_handle);
 992};
 993
 994struct iw_cm_verbs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996struct ib_device {
 
 997	struct device                *dma_device;
 998
 999	char                          name[IB_DEVICE_NAME_MAX];
 
1000
1001	struct list_head              event_handler_list;
1002	spinlock_t                    event_handler_lock;
1003
1004	spinlock_t                    client_data_lock;
1005	struct list_head              core_list;
1006	struct list_head              client_data_list;
1007
1008	struct ib_cache               cache;
1009	int                          *pkey_tbl_len;
1010	int                          *gid_tbl_len;
 
 
1011
1012	int			      num_comp_vectors;
1013
1014	struct iw_cm_verbs	     *iwcm;
1015
1016	int		           (*get_protocol_stats)(struct ib_device *device,
1017							 union rdma_protocol_stats *stats);
1018	int		           (*query_device)(struct ib_device *device,
1019						   struct ib_device_attr *device_attr);
1020	int		           (*query_port)(struct ib_device *device,
1021						 u8 port_num,
1022						 struct ib_port_attr *port_attr);
1023	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1024						     u8 port_num);
1025	int		           (*query_gid)(struct ib_device *device,
1026						u8 port_num, int index,
1027						union ib_gid *gid);
1028	int		           (*query_pkey)(struct ib_device *device,
1029						 u8 port_num, u16 index, u16 *pkey);
1030	int		           (*modify_device)(struct ib_device *device,
1031						    int device_modify_mask,
1032						    struct ib_device_modify *device_modify);
1033	int		           (*modify_port)(struct ib_device *device,
1034						  u8 port_num, int port_modify_mask,
1035						  struct ib_port_modify *port_modify);
1036	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1037						     struct ib_udata *udata);
1038	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1039	int                        (*mmap)(struct ib_ucontext *context,
1040					   struct vm_area_struct *vma);
1041	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1042					       struct ib_ucontext *context,
1043					       struct ib_udata *udata);
1044	int                        (*dealloc_pd)(struct ib_pd *pd);
1045	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1046						struct ib_ah_attr *ah_attr);
1047	int                        (*modify_ah)(struct ib_ah *ah,
1048						struct ib_ah_attr *ah_attr);
1049	int                        (*query_ah)(struct ib_ah *ah,
1050					       struct ib_ah_attr *ah_attr);
1051	int                        (*destroy_ah)(struct ib_ah *ah);
1052	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1053						 struct ib_srq_init_attr *srq_init_attr,
1054						 struct ib_udata *udata);
1055	int                        (*modify_srq)(struct ib_srq *srq,
1056						 struct ib_srq_attr *srq_attr,
1057						 enum ib_srq_attr_mask srq_attr_mask,
1058						 struct ib_udata *udata);
1059	int                        (*query_srq)(struct ib_srq *srq,
1060						struct ib_srq_attr *srq_attr);
1061	int                        (*destroy_srq)(struct ib_srq *srq);
1062	int                        (*post_srq_recv)(struct ib_srq *srq,
1063						    struct ib_recv_wr *recv_wr,
1064						    struct ib_recv_wr **bad_recv_wr);
1065	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1066						struct ib_qp_init_attr *qp_init_attr,
1067						struct ib_udata *udata);
1068	int                        (*modify_qp)(struct ib_qp *qp,
1069						struct ib_qp_attr *qp_attr,
1070						int qp_attr_mask,
1071						struct ib_udata *udata);
1072	int                        (*query_qp)(struct ib_qp *qp,
1073					       struct ib_qp_attr *qp_attr,
1074					       int qp_attr_mask,
1075					       struct ib_qp_init_attr *qp_init_attr);
1076	int                        (*destroy_qp)(struct ib_qp *qp);
1077	int                        (*post_send)(struct ib_qp *qp,
1078						struct ib_send_wr *send_wr,
1079						struct ib_send_wr **bad_send_wr);
1080	int                        (*post_recv)(struct ib_qp *qp,
1081						struct ib_recv_wr *recv_wr,
1082						struct ib_recv_wr **bad_recv_wr);
1083	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1084						int comp_vector,
1085						struct ib_ucontext *context,
1086						struct ib_udata *udata);
1087	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1088						u16 cq_period);
1089	int                        (*destroy_cq)(struct ib_cq *cq);
1090	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1091						struct ib_udata *udata);
1092	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1093					      struct ib_wc *wc);
1094	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1095	int                        (*req_notify_cq)(struct ib_cq *cq,
1096						    enum ib_cq_notify_flags flags);
1097	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1098						      int wc_cnt);
1099	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1100						 int mr_access_flags);
1101	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1102						  struct ib_phys_buf *phys_buf_array,
1103						  int num_phys_buf,
1104						  int mr_access_flags,
1105						  u64 *iova_start);
1106	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1107						  u64 start, u64 length,
1108						  u64 virt_addr,
1109						  int mr_access_flags,
1110						  struct ib_udata *udata);
1111	int                        (*query_mr)(struct ib_mr *mr,
1112					       struct ib_mr_attr *mr_attr);
1113	int                        (*dereg_mr)(struct ib_mr *mr);
1114	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1115					       int max_page_list_len);
1116	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1117								   int page_list_len);
1118	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1119	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1120						    int mr_rereg_mask,
1121						    struct ib_pd *pd,
1122						    struct ib_phys_buf *phys_buf_array,
1123						    int num_phys_buf,
1124						    int mr_access_flags,
1125						    u64 *iova_start);
1126	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1127	int                        (*bind_mw)(struct ib_qp *qp,
1128					      struct ib_mw *mw,
1129					      struct ib_mw_bind *mw_bind);
1130	int                        (*dealloc_mw)(struct ib_mw *mw);
1131	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1132						int mr_access_flags,
1133						struct ib_fmr_attr *fmr_attr);
1134	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1135						   u64 *page_list, int list_len,
1136						   u64 iova);
1137	int		           (*unmap_fmr)(struct list_head *fmr_list);
1138	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1139	int                        (*attach_mcast)(struct ib_qp *qp,
1140						   union ib_gid *gid,
1141						   u16 lid);
1142	int                        (*detach_mcast)(struct ib_qp *qp,
1143						   union ib_gid *gid,
1144						   u16 lid);
1145	int                        (*process_mad)(struct ib_device *device,
1146						  int process_mad_flags,
1147						  u8 port_num,
1148						  struct ib_wc *in_wc,
1149						  struct ib_grh *in_grh,
1150						  struct ib_mad *in_mad,
1151						  struct ib_mad *out_mad);
1152
1153	struct ib_dma_mapping_ops   *dma_ops;
1154
1155	struct module               *owner;
1156	struct device                dev;
1157	struct kobject               *ports_parent;
1158	struct list_head             port_list;
1159
1160	enum {
1161		IB_DEV_UNINITIALIZED,
1162		IB_DEV_REGISTERED,
1163		IB_DEV_UNREGISTERED
1164	}                            reg_state;
1165
1166	int			     uverbs_abi_ver;
1167	u64			     uverbs_cmd_mask;
 
1168
1169	char			     node_desc[64];
1170	__be64			     node_guid;
1171	u32			     local_dma_lkey;
 
 
 
 
 
1172	u8                           node_type;
1173	u8                           phys_port_cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174};
1175
 
1176struct ib_client {
1177	char  *name;
1178	void (*add)   (struct ib_device *);
1179	void (*remove)(struct ib_device *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
1181	struct list_head list;
 
1182};
1183
1184struct ib_device *ib_alloc_device(size_t size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185void ib_dealloc_device(struct ib_device *device);
1186
1187int ib_register_device(struct ib_device *device,
1188		       int (*port_callback)(struct ib_device *,
1189					    u8, struct kobject *));
1190void ib_unregister_device(struct ib_device *device);
 
 
 
1191
1192int ib_register_client   (struct ib_client *client);
1193void ib_unregister_client(struct ib_client *client);
1194
1195void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1197			 void *data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198
1199static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1200{
1201	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1202}
1203
1204static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1205{
1206	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1207}
1208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209/**
1210 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1211 * contains all required attributes and no attributes not allowed for
1212 * the given QP state transition.
1213 * @cur_state: Current QP state
1214 * @next_state: Next QP state
1215 * @type: QP type
1216 * @mask: Mask of supplied QP attributes
1217 *
1218 * This function is a helper function that a low-level driver's
1219 * modify_qp method can use to validate the consumer's input.  It
1220 * checks that cur_state and next_state are valid QP states, that a
1221 * transition from cur_state to next_state is allowed by the IB spec,
1222 * and that the attribute mask supplied is allowed for the transition.
1223 */
1224int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1225		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1226
1227int ib_register_event_handler  (struct ib_event_handler *event_handler);
1228int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1229void ib_dispatch_event(struct ib_event *event);
1230
1231int ib_query_device(struct ib_device *device,
1232		    struct ib_device_attr *device_attr);
1233
1234int ib_query_port(struct ib_device *device,
1235		  u8 port_num, struct ib_port_attr *port_attr);
1236
1237enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1238					       u8 port_num);
1239
1240int ib_query_gid(struct ib_device *device,
1241		 u8 port_num, int index, union ib_gid *gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1242
1243int ib_query_pkey(struct ib_device *device,
1244		  u8 port_num, u16 index, u16 *pkey);
1245
1246int ib_modify_device(struct ib_device *device,
1247		     int device_modify_mask,
1248		     struct ib_device_modify *device_modify);
1249
1250int ib_modify_port(struct ib_device *device,
1251		   u8 port_num, int port_modify_mask,
1252		   struct ib_port_modify *port_modify);
1253
1254int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1255		u8 *port_num, u16 *index);
1256
1257int ib_find_pkey(struct ib_device *device,
1258		 u8 port_num, u16 pkey, u16 *index);
1259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1260/**
1261 * ib_alloc_pd - Allocates an unused protection domain.
1262 * @device: The device on which to allocate the protection domain.
1263 *
1264 * A protection domain object provides an association between QPs, shared
1265 * receive queues, address handles, memory regions, and memory windows.
1266 */
1267struct ib_pd *ib_alloc_pd(struct ib_device *device);
 
 
 
 
 
 
 
 
1268
1269/**
1270 * ib_dealloc_pd - Deallocates a protection domain.
1271 * @pd: The protection domain to deallocate.
 
 
 
 
 
1272 */
1273int ib_dealloc_pd(struct ib_pd *pd);
 
1274
1275/**
1276 * ib_create_ah - Creates an address handle for the given address vector.
 
1277 * @pd: The protection domain associated with the address handle.
1278 * @ah_attr: The attributes of the address vector.
 
 
1279 *
 
1280 * The address handle is used to reference a local or global destination
1281 * in all UD QP post sends.
1282 */
1283struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284
1285/**
1286 * ib_init_ah_from_wc - Initializes address handle attributes from a
1287 *   work completion.
1288 * @device: Device on which the received message arrived.
1289 * @port_num: Port on which the received message arrived.
1290 * @wc: Work completion associated with the received message.
1291 * @grh: References the received global route header.  This parameter is
1292 *   ignored unless the work completion indicates that the GRH is valid.
1293 * @ah_attr: Returned attributes that can be used when creating an address
1294 *   handle for replying to the message.
 
 
 
 
 
 
 
1295 */
1296int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1297		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
 
1298
1299/**
1300 * ib_create_ah_from_wc - Creates an address handle associated with the
1301 *   sender of the specified work completion.
1302 * @pd: The protection domain associated with the address handle.
1303 * @wc: Work completion information associated with a received message.
1304 * @grh: References the received global route header.  This parameter is
1305 *   ignored unless the work completion indicates that the GRH is valid.
1306 * @port_num: The outbound port number to associate with the address.
1307 *
1308 * The address handle is used to reference a local or global destination
1309 * in all UD QP post sends.
1310 */
1311struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1312				   struct ib_grh *grh, u8 port_num);
1313
1314/**
1315 * ib_modify_ah - Modifies the address vector associated with an address
1316 *   handle.
1317 * @ah: The address handle to modify.
1318 * @ah_attr: The new address vector attributes to associate with the
1319 *   address handle.
1320 */
1321int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1322
1323/**
1324 * ib_query_ah - Queries the address vector associated with an address
1325 *   handle.
1326 * @ah: The address handle to query.
1327 * @ah_attr: The address vector attributes associated with the address
1328 *   handle.
1329 */
1330int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
1331
1332/**
1333 * ib_destroy_ah - Destroys an address handle.
1334 * @ah: The address handle to destroy.
 
 
 
1335 */
1336int ib_destroy_ah(struct ib_ah *ah);
 
 
 
1337
1338/**
1339 * ib_create_srq - Creates a SRQ associated with the specified protection
1340 *   domain.
1341 * @pd: The protection domain associated with the SRQ.
1342 * @srq_init_attr: A list of initial attributes required to create the
1343 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1344 *   the actual capabilities of the created SRQ.
1345 *
1346 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1347 * requested size of the SRQ, and set to the actual values allocated
1348 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1349 * will always be at least as large as the requested values.
1350 */
1351struct ib_srq *ib_create_srq(struct ib_pd *pd,
1352			     struct ib_srq_init_attr *srq_init_attr);
1353
1354/**
1355 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1356 * @srq: The SRQ to modify.
1357 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1358 *   the current values of selected SRQ attributes are returned.
1359 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1360 *   are being modified.
1361 *
1362 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1363 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1364 * the number of receives queued drops below the limit.
1365 */
1366int ib_modify_srq(struct ib_srq *srq,
1367		  struct ib_srq_attr *srq_attr,
1368		  enum ib_srq_attr_mask srq_attr_mask);
1369
1370/**
1371 * ib_query_srq - Returns the attribute list and current values for the
1372 *   specified SRQ.
1373 * @srq: The SRQ to query.
1374 * @srq_attr: The attributes of the specified SRQ.
1375 */
1376int ib_query_srq(struct ib_srq *srq,
1377		 struct ib_srq_attr *srq_attr);
1378
1379/**
1380 * ib_destroy_srq - Destroys the specified SRQ.
1381 * @srq: The SRQ to destroy.
 
1382 */
1383int ib_destroy_srq(struct ib_srq *srq);
 
 
 
 
 
 
 
 
 
 
 
1384
1385/**
1386 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1387 * @srq: The SRQ to post the work request on.
1388 * @recv_wr: A list of work requests to post on the receive queue.
1389 * @bad_recv_wr: On an immediate failure, this parameter will reference
1390 *   the work request that failed to be posted on the QP.
1391 */
1392static inline int ib_post_srq_recv(struct ib_srq *srq,
1393				   struct ib_recv_wr *recv_wr,
1394				   struct ib_recv_wr **bad_recv_wr)
1395{
1396	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
 
 
 
1397}
1398
1399/**
1400 * ib_create_qp - Creates a QP associated with the specified protection
1401 *   domain.
1402 * @pd: The protection domain associated with the QP.
1403 * @qp_init_attr: A list of initial attributes required to create the
1404 *   QP.  If QP creation succeeds, then the attributes are updated to
1405 *   the actual capabilities of the created QP.
 
1406 */
1407struct ib_qp *ib_create_qp(struct ib_pd *pd,
1408			   struct ib_qp_init_attr *qp_init_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409
1410/**
1411 * ib_modify_qp - Modifies the attributes for the specified QP and then
1412 *   transitions the QP to the given state.
1413 * @qp: The QP to modify.
1414 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1415 *   the current values of selected QP attributes are returned.
1416 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1417 *   are being modified.
1418 */
1419int ib_modify_qp(struct ib_qp *qp,
1420		 struct ib_qp_attr *qp_attr,
1421		 int qp_attr_mask);
1422
1423/**
1424 * ib_query_qp - Returns the attribute list and current values for the
1425 *   specified QP.
1426 * @qp: The QP to query.
1427 * @qp_attr: The attributes of the specified QP.
1428 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1429 * @qp_init_attr: Additional attributes of the selected QP.
1430 *
1431 * The qp_attr_mask may be used to limit the query to gathering only the
1432 * selected attributes.
1433 */
1434int ib_query_qp(struct ib_qp *qp,
1435		struct ib_qp_attr *qp_attr,
1436		int qp_attr_mask,
1437		struct ib_qp_init_attr *qp_init_attr);
1438
1439/**
1440 * ib_destroy_qp - Destroys the specified QP.
1441 * @qp: The QP to destroy.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1442 */
1443int ib_destroy_qp(struct ib_qp *qp);
 
 
 
 
 
 
 
 
 
 
1444
1445/**
1446 * ib_post_send - Posts a list of work requests to the send queue of
1447 *   the specified QP.
1448 * @qp: The QP to post the work request on.
1449 * @send_wr: A list of work requests to post on the send queue.
1450 * @bad_send_wr: On an immediate failure, this parameter will reference
1451 *   the work request that failed to be posted on the QP.
1452 *
1453 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1454 * error is returned, the QP state shall not be affected,
1455 * ib_post_send() will return an immediate error after queueing any
1456 * earlier work requests in the list.
1457 */
1458static inline int ib_post_send(struct ib_qp *qp,
1459			       struct ib_send_wr *send_wr,
1460			       struct ib_send_wr **bad_send_wr)
1461{
1462	return qp->device->post_send(qp, send_wr, bad_send_wr);
 
 
1463}
1464
1465/**
1466 * ib_post_recv - Posts a list of work requests to the receive queue of
1467 *   the specified QP.
1468 * @qp: The QP to post the work request on.
1469 * @recv_wr: A list of work requests to post on the receive queue.
1470 * @bad_recv_wr: On an immediate failure, this parameter will reference
1471 *   the work request that failed to be posted on the QP.
1472 */
1473static inline int ib_post_recv(struct ib_qp *qp,
1474			       struct ib_recv_wr *recv_wr,
1475			       struct ib_recv_wr **bad_recv_wr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476{
1477	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1478}
1479
 
 
1480/**
1481 * ib_create_cq - Creates a CQ on the specified device.
1482 * @device: The device on which to create the CQ.
1483 * @comp_handler: A user-specified callback that is invoked when a
1484 *   completion event occurs on the CQ.
1485 * @event_handler: A user-specified callback that is invoked when an
1486 *   asynchronous event not associated with a completion occurs on the CQ.
1487 * @cq_context: Context associated with the CQ returned to the user via
1488 *   the associated completion and event handlers.
1489 * @cqe: The minimum size of the CQ.
1490 * @comp_vector - Completion vector used to signal completion events.
1491 *     Must be >= 0 and < context->num_comp_vectors.
1492 *
1493 * Users can examine the cq structure to determine the actual CQ size.
1494 */
1495struct ib_cq *ib_create_cq(struct ib_device *device,
1496			   ib_comp_handler comp_handler,
1497			   void (*event_handler)(struct ib_event *, void *),
1498			   void *cq_context, int cqe, int comp_vector);
 
 
 
 
1499
1500/**
1501 * ib_resize_cq - Modifies the capacity of the CQ.
1502 * @cq: The CQ to resize.
1503 * @cqe: The minimum size of the CQ.
1504 *
1505 * Users can examine the cq structure to determine the actual CQ size.
1506 */
1507int ib_resize_cq(struct ib_cq *cq, int cqe);
1508
1509/**
1510 * ib_modify_cq - Modifies moderation params of the CQ
1511 * @cq: The CQ to modify.
1512 * @cq_count: number of CQEs that will trigger an event
1513 * @cq_period: max period of time in usec before triggering an event
1514 *
1515 */
1516int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1517
1518/**
1519 * ib_destroy_cq - Destroys the specified CQ.
1520 * @cq: The CQ to destroy.
 
 
 
 
 
 
 
 
 
1521 */
1522int ib_destroy_cq(struct ib_cq *cq);
 
 
 
1523
1524/**
1525 * ib_poll_cq - poll a CQ for completion(s)
1526 * @cq:the CQ being polled
1527 * @num_entries:maximum number of completions to return
1528 * @wc:array of at least @num_entries &struct ib_wc where completions
1529 *   will be returned
1530 *
1531 * Poll a CQ for (possibly multiple) completions.  If the return value
1532 * is < 0, an error occurred.  If the return value is >= 0, it is the
1533 * number of completions returned.  If the return value is
1534 * non-negative and < num_entries, then the CQ was emptied.
1535 */
1536static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1537			     struct ib_wc *wc)
1538{
1539	return cq->device->poll_cq(cq, num_entries, wc);
1540}
1541
1542/**
1543 * ib_peek_cq - Returns the number of unreaped completions currently
1544 *   on the specified CQ.
1545 * @cq: The CQ to peek.
1546 * @wc_cnt: A minimum number of unreaped completions to check for.
1547 *
1548 * If the number of unreaped completions is greater than or equal to wc_cnt,
1549 * this function returns wc_cnt, otherwise, it returns the actual number of
1550 * unreaped completions.
1551 */
1552int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1553
1554/**
1555 * ib_req_notify_cq - Request completion notification on a CQ.
1556 * @cq: The CQ to generate an event for.
1557 * @flags:
1558 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1559 *   to request an event on the next solicited event or next work
1560 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1561 *   may also be |ed in to request a hint about missed events, as
1562 *   described below.
1563 *
1564 * Return Value:
1565 *    < 0 means an error occurred while requesting notification
1566 *   == 0 means notification was requested successfully, and if
1567 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1568 *        were missed and it is safe to wait for another event.  In
1569 *        this case is it guaranteed that any work completions added
1570 *        to the CQ since the last CQ poll will trigger a completion
1571 *        notification event.
1572 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1573 *        in.  It means that the consumer must poll the CQ again to
1574 *        make sure it is empty to avoid missing an event because of a
1575 *        race between requesting notification and an entry being
1576 *        added to the CQ.  This return value means it is possible
1577 *        (but not guaranteed) that a work completion has been added
1578 *        to the CQ since the last poll without triggering a
1579 *        completion notification event.
1580 */
1581static inline int ib_req_notify_cq(struct ib_cq *cq,
1582				   enum ib_cq_notify_flags flags)
1583{
1584	return cq->device->req_notify_cq(cq, flags);
1585}
1586
1587/**
1588 * ib_req_ncomp_notif - Request completion notification when there are
1589 *   at least the specified number of unreaped completions on the CQ.
1590 * @cq: The CQ to generate an event for.
1591 * @wc_cnt: The number of unreaped completions that should be on the
1592 *   CQ before an event is generated.
1593 */
1594static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1595{
1596	return cq->device->req_ncomp_notif ?
1597		cq->device->req_ncomp_notif(cq, wc_cnt) :
1598		-ENOSYS;
1599}
1600
1601/**
1602 * ib_get_dma_mr - Returns a memory region for system memory that is
1603 *   usable for DMA.
1604 * @pd: The protection domain associated with the memory region.
1605 * @mr_access_flags: Specifies the memory access rights.
1606 *
1607 * Note that the ib_dma_*() functions defined below must be used
1608 * to create/destroy addresses used with the Lkey or Rkey returned
1609 * by ib_get_dma_mr().
1610 */
1611struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1612
1613/**
1614 * ib_dma_mapping_error - check a DMA addr for error
1615 * @dev: The device for which the dma_addr was created
1616 * @dma_addr: The DMA address to check
1617 */
1618static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1619{
1620	if (dev->dma_ops)
1621		return dev->dma_ops->mapping_error(dev, dma_addr);
1622	return dma_mapping_error(dev->dma_device, dma_addr);
1623}
1624
1625/**
1626 * ib_dma_map_single - Map a kernel virtual address to DMA address
1627 * @dev: The device for which the dma_addr is to be created
1628 * @cpu_addr: The kernel virtual address
1629 * @size: The size of the region in bytes
1630 * @direction: The direction of the DMA
1631 */
1632static inline u64 ib_dma_map_single(struct ib_device *dev,
1633				    void *cpu_addr, size_t size,
1634				    enum dma_data_direction direction)
1635{
1636	if (dev->dma_ops)
1637		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1638	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1639}
1640
1641/**
1642 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1643 * @dev: The device for which the DMA address was created
1644 * @addr: The DMA address
1645 * @size: The size of the region in bytes
1646 * @direction: The direction of the DMA
1647 */
1648static inline void ib_dma_unmap_single(struct ib_device *dev,
1649				       u64 addr, size_t size,
1650				       enum dma_data_direction direction)
1651{
1652	if (dev->dma_ops)
1653		dev->dma_ops->unmap_single(dev, addr, size, direction);
1654	else
1655		dma_unmap_single(dev->dma_device, addr, size, direction);
1656}
1657
1658static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1659					  void *cpu_addr, size_t size,
1660					  enum dma_data_direction direction,
1661					  struct dma_attrs *attrs)
1662{
1663	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1664				    direction, attrs);
1665}
1666
1667static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1668					     u64 addr, size_t size,
1669					     enum dma_data_direction direction,
1670					     struct dma_attrs *attrs)
1671{
1672	return dma_unmap_single_attrs(dev->dma_device, addr, size,
1673				      direction, attrs);
1674}
1675
1676/**
1677 * ib_dma_map_page - Map a physical page to DMA address
1678 * @dev: The device for which the dma_addr is to be created
1679 * @page: The page to be mapped
1680 * @offset: The offset within the page
1681 * @size: The size of the region in bytes
1682 * @direction: The direction of the DMA
1683 */
1684static inline u64 ib_dma_map_page(struct ib_device *dev,
1685				  struct page *page,
1686				  unsigned long offset,
1687				  size_t size,
1688					 enum dma_data_direction direction)
1689{
1690	if (dev->dma_ops)
1691		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1692	return dma_map_page(dev->dma_device, page, offset, size, direction);
1693}
1694
1695/**
1696 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1697 * @dev: The device for which the DMA address was created
1698 * @addr: The DMA address
1699 * @size: The size of the region in bytes
1700 * @direction: The direction of the DMA
1701 */
1702static inline void ib_dma_unmap_page(struct ib_device *dev,
1703				     u64 addr, size_t size,
1704				     enum dma_data_direction direction)
1705{
1706	if (dev->dma_ops)
1707		dev->dma_ops->unmap_page(dev, addr, size, direction);
1708	else
1709		dma_unmap_page(dev->dma_device, addr, size, direction);
1710}
1711
1712/**
1713 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1714 * @dev: The device for which the DMA addresses are to be created
1715 * @sg: The array of scatter/gather entries
1716 * @nents: The number of scatter/gather entries
1717 * @direction: The direction of the DMA
1718 */
1719static inline int ib_dma_map_sg(struct ib_device *dev,
1720				struct scatterlist *sg, int nents,
1721				enum dma_data_direction direction)
1722{
1723	if (dev->dma_ops)
1724		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1725	return dma_map_sg(dev->dma_device, sg, nents, direction);
1726}
1727
1728/**
1729 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1730 * @dev: The device for which the DMA addresses were created
1731 * @sg: The array of scatter/gather entries
1732 * @nents: The number of scatter/gather entries
1733 * @direction: The direction of the DMA
1734 */
1735static inline void ib_dma_unmap_sg(struct ib_device *dev,
1736				   struct scatterlist *sg, int nents,
1737				   enum dma_data_direction direction)
1738{
1739	if (dev->dma_ops)
1740		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1741	else
1742		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1743}
1744
1745static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1746				      struct scatterlist *sg, int nents,
1747				      enum dma_data_direction direction,
1748				      struct dma_attrs *attrs)
1749{
1750	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
 
1751}
1752
1753static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1754					 struct scatterlist *sg, int nents,
1755					 enum dma_data_direction direction,
1756					 struct dma_attrs *attrs)
1757{
1758	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1759}
1760/**
1761 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1762 * @dev: The device for which the DMA addresses were created
1763 * @sg: The scatter/gather entry
1764 */
1765static inline u64 ib_sg_dma_address(struct ib_device *dev,
1766				    struct scatterlist *sg)
1767{
1768	if (dev->dma_ops)
1769		return dev->dma_ops->dma_address(dev, sg);
1770	return sg_dma_address(sg);
1771}
1772
1773/**
1774 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1775 * @dev: The device for which the DMA addresses were created
1776 * @sg: The scatter/gather entry
 
1777 */
1778static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1779					 struct scatterlist *sg)
1780{
1781	if (dev->dma_ops)
1782		return dev->dma_ops->dma_len(dev, sg);
1783	return sg_dma_len(sg);
1784}
1785
1786/**
1787 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1788 * @dev: The device for which the DMA address was created
1789 * @addr: The DMA address
1790 * @size: The size of the region in bytes
1791 * @dir: The direction of the DMA
1792 */
1793static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1794					      u64 addr,
1795					      size_t size,
1796					      enum dma_data_direction dir)
1797{
1798	if (dev->dma_ops)
1799		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1800	else
1801		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1802}
1803
1804/**
1805 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1806 * @dev: The device for which the DMA address was created
1807 * @addr: The DMA address
1808 * @size: The size of the region in bytes
1809 * @dir: The direction of the DMA
1810 */
1811static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1812						 u64 addr,
1813						 size_t size,
1814						 enum dma_data_direction dir)
1815{
1816	if (dev->dma_ops)
1817		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1818	else
1819		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1820}
1821
1822/**
1823 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1824 * @dev: The device for which the DMA address is requested
1825 * @size: The size of the region to allocate in bytes
1826 * @dma_handle: A pointer for returning the DMA address of the region
1827 * @flag: memory allocator flags
1828 */
1829static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1830					   size_t size,
1831					   u64 *dma_handle,
1832					   gfp_t flag)
1833{
1834	if (dev->dma_ops)
1835		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1836	else {
1837		dma_addr_t handle;
1838		void *ret;
1839
1840		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1841		*dma_handle = handle;
1842		return ret;
1843	}
1844}
1845
1846/**
1847 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1848 * @dev: The device for which the DMA addresses were allocated
1849 * @size: The size of the region
1850 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1851 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1852 */
1853static inline void ib_dma_free_coherent(struct ib_device *dev,
1854					size_t size, void *cpu_addr,
1855					u64 dma_handle)
1856{
1857	if (dev->dma_ops)
1858		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1859	else
1860		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1861}
1862
1863/**
1864 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1865 *   by an HCA.
1866 * @pd: The protection domain associated assigned to the registered region.
1867 * @phys_buf_array: Specifies a list of physical buffers to use in the
1868 *   memory region.
1869 * @num_phys_buf: Specifies the size of the phys_buf_array.
1870 * @mr_access_flags: Specifies the memory access rights.
1871 * @iova_start: The offset of the region's starting I/O virtual address.
1872 */
1873struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1874			     struct ib_phys_buf *phys_buf_array,
1875			     int num_phys_buf,
1876			     int mr_access_flags,
1877			     u64 *iova_start);
1878
1879/**
1880 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1881 *   Conceptually, this call performs the functions deregister memory region
1882 *   followed by register physical memory region.  Where possible,
1883 *   resources are reused instead of deallocated and reallocated.
1884 * @mr: The memory region to modify.
1885 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1886 *   properties of the memory region are being modified.
1887 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1888 *   the new protection domain to associated with the memory region,
1889 *   otherwise, this parameter is ignored.
1890 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1891 *   field specifies a list of physical buffers to use in the new
1892 *   translation, otherwise, this parameter is ignored.
1893 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1894 *   field specifies the size of the phys_buf_array, otherwise, this
1895 *   parameter is ignored.
1896 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1897 *   field specifies the new memory access rights, otherwise, this
1898 *   parameter is ignored.
1899 * @iova_start: The offset of the region's starting I/O virtual address.
1900 */
1901int ib_rereg_phys_mr(struct ib_mr *mr,
1902		     int mr_rereg_mask,
1903		     struct ib_pd *pd,
1904		     struct ib_phys_buf *phys_buf_array,
1905		     int num_phys_buf,
1906		     int mr_access_flags,
1907		     u64 *iova_start);
1908
1909/**
1910 * ib_query_mr - Retrieves information about a specific memory region.
1911 * @mr: The memory region to retrieve information about.
1912 * @mr_attr: The attributes of the specified memory region.
1913 */
1914int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1915
1916/**
1917 * ib_dereg_mr - Deregisters a memory region and removes it from the
1918 *   HCA translation table.
1919 * @mr: The memory region to deregister.
 
 
 
1920 */
1921int ib_dereg_mr(struct ib_mr *mr);
1922
1923/**
1924 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1925 *   IB_WR_FAST_REG_MR send work request.
1926 * @pd: The protection domain associated with the region.
1927 * @max_page_list_len: requested max physical buffer list length to be
1928 *   used with fast register work requests for this MR.
1929 */
1930struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1931
1932/**
1933 * ib_alloc_fast_reg_page_list - Allocates a page list array
1934 * @device - ib device pointer.
1935 * @page_list_len - size of the page list array to be allocated.
1936 *
1937 * This allocates and returns a struct ib_fast_reg_page_list * and a
1938 * page_list array that is at least page_list_len in size.  The actual
1939 * size is returned in max_page_list_len.  The caller is responsible
1940 * for initializing the contents of the page_list array before posting
1941 * a send work request with the IB_WC_FAST_REG_MR opcode.
1942 *
1943 * The page_list array entries must be translated using one of the
1944 * ib_dma_*() functions just like the addresses passed to
1945 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
1946 * ib_fast_reg_page_list must not be modified by the caller until the
1947 * IB_WC_FAST_REG_MR work request completes.
1948 */
1949struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1950				struct ib_device *device, int page_list_len);
 
 
1951
1952/**
1953 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1954 *   page list array.
1955 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1956 */
1957void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
 
 
 
 
 
 
1958
1959/**
1960 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1961 *   R_Key and L_Key.
1962 * @mr - struct ib_mr pointer to be updated.
1963 * @newkey - new key to be used.
1964 */
1965static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1966{
1967	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1968	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1969}
1970
1971/**
1972 * ib_alloc_mw - Allocates a memory window.
1973 * @pd: The protection domain associated with the memory window.
 
1974 */
1975struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1976
1977/**
1978 * ib_bind_mw - Posts a work request to the send queue of the specified
1979 *   QP, which binds the memory window to the given address range and
1980 *   remote access attributes.
1981 * @qp: QP to post the bind work request on.
1982 * @mw: The memory window to bind.
1983 * @mw_bind: Specifies information about the memory window, including
1984 *   its address range, remote access rights, and associated memory region.
1985 */
1986static inline int ib_bind_mw(struct ib_qp *qp,
1987			     struct ib_mw *mw,
1988			     struct ib_mw_bind *mw_bind)
1989{
1990	/* XXX reference counting in corresponding MR? */
1991	return mw->device->bind_mw ?
1992		mw->device->bind_mw(qp, mw, mw_bind) :
1993		-ENOSYS;
1994}
1995
1996/**
1997 * ib_dealloc_mw - Deallocates a memory window.
1998 * @mw: The memory window to deallocate.
1999 */
2000int ib_dealloc_mw(struct ib_mw *mw);
2001
2002/**
2003 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2004 * @pd: The protection domain associated with the unmapped region.
2005 * @mr_access_flags: Specifies the memory access rights.
2006 * @fmr_attr: Attributes of the unmapped region.
2007 *
2008 * A fast memory region must be mapped before it can be used as part of
2009 * a work request.
2010 */
2011struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2012			    int mr_access_flags,
2013			    struct ib_fmr_attr *fmr_attr);
2014
2015/**
2016 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2017 * @fmr: The fast memory region to associate with the pages.
2018 * @page_list: An array of physical pages to map to the fast memory region.
2019 * @list_len: The number of pages in page_list.
2020 * @iova: The I/O virtual address to use with the mapped region.
2021 */
2022static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2023				  u64 *page_list, int list_len,
2024				  u64 iova)
2025{
2026	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2027}
2028
2029/**
2030 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2031 * @fmr_list: A linked list of fast memory regions to unmap.
2032 */
2033int ib_unmap_fmr(struct list_head *fmr_list);
2034
2035/**
2036 * ib_dealloc_fmr - Deallocates a fast memory region.
2037 * @fmr: The fast memory region to deallocate.
2038 */
2039int ib_dealloc_fmr(struct ib_fmr *fmr);
2040
2041/**
2042 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2043 * @qp: QP to attach to the multicast group.  The QP must be type
2044 *   IB_QPT_UD.
2045 * @gid: Multicast group GID.
2046 * @lid: Multicast group LID in host byte order.
2047 *
2048 * In order to send and receive multicast packets, subnet
2049 * administration must have created the multicast group and configured
2050 * the fabric appropriately.  The port associated with the specified
2051 * QP must also be a member of the multicast group.
2052 */
2053int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2054
2055/**
2056 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2057 * @qp: QP to detach from the multicast group.
2058 * @gid: Multicast group GID.
2059 * @lid: Multicast group LID in host byte order.
2060 */
2061int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063#endif /* IB_VERBS_H */
v5.4
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
 
  44#include <linux/dma-mapping.h>
  45#include <linux/kref.h>
  46#include <linux/list.h>
  47#include <linux/rwsem.h>
 
  48#include <linux/workqueue.h>
  49#include <linux/irq_poll.h>
  50#include <uapi/linux/if_ether.h>
  51#include <net/ipv6.h>
  52#include <net/ip.h>
  53#include <linux/string.h>
  54#include <linux/slab.h>
  55#include <linux/netdevice.h>
  56#include <linux/refcount.h>
  57#include <linux/if_link.h>
  58#include <linux/atomic.h>
  59#include <linux/mmu_notifier.h>
  60#include <linux/uaccess.h>
  61#include <linux/cgroup_rdma.h>
  62#include <linux/irqflags.h>
  63#include <linux/preempt.h>
  64#include <linux/dim.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66#include <rdma/rdma_counter.h>
  67#include <rdma/restrack.h>
  68#include <rdma/signature.h>
  69#include <uapi/rdma/rdma_user_ioctl.h>
  70#include <uapi/rdma/ib_user_ioctl_verbs.h>
  71
  72#define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
  73
  74struct ib_umem_odp;
  75
  76extern struct workqueue_struct *ib_wq;
  77extern struct workqueue_struct *ib_comp_wq;
  78extern struct workqueue_struct *ib_comp_unbound_wq;
  79
  80__printf(3, 4) __cold
  81void ibdev_printk(const char *level, const struct ib_device *ibdev,
  82		  const char *format, ...);
  83__printf(2, 3) __cold
  84void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
  85__printf(2, 3) __cold
  86void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
  87__printf(2, 3) __cold
  88void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
  89__printf(2, 3) __cold
  90void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
  91__printf(2, 3) __cold
  92void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
  93__printf(2, 3) __cold
  94void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
  95__printf(2, 3) __cold
  96void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
  97
  98#if defined(CONFIG_DYNAMIC_DEBUG)
  99#define ibdev_dbg(__dev, format, args...)                       \
 100	dynamic_ibdev_dbg(__dev, format, ##args)
 101#else
 102__printf(2, 3) __cold
 103static inline
 104void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
 105#endif
 106
 107#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
 108do {                                                                    \
 109	static DEFINE_RATELIMIT_STATE(_rs,                              \
 110				      DEFAULT_RATELIMIT_INTERVAL,       \
 111				      DEFAULT_RATELIMIT_BURST);         \
 112	if (__ratelimit(&_rs))                                          \
 113		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
 114} while (0)
 115
 116#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
 117	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
 118#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
 119	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
 120#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
 121	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
 122#define ibdev_err_ratelimited(ibdev, fmt, ...) \
 123	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
 124#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
 125	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
 126#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
 127	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
 128#define ibdev_info_ratelimited(ibdev, fmt, ...) \
 129	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
 130
 131#if defined(CONFIG_DYNAMIC_DEBUG)
 132/* descriptor check is first to prevent flooding with "callbacks suppressed" */
 133#define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
 134do {                                                                    \
 135	static DEFINE_RATELIMIT_STATE(_rs,                              \
 136				      DEFAULT_RATELIMIT_INTERVAL,       \
 137				      DEFAULT_RATELIMIT_BURST);         \
 138	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
 139	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
 140		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
 141				    ##__VA_ARGS__);                     \
 142} while (0)
 143#else
 144__printf(2, 3) __cold
 145static inline
 146void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
 147#endif
 148
 149union ib_gid {
 150	u8	raw[16];
 151	struct {
 152		__be64	subnet_prefix;
 153		__be64	interface_id;
 154	} global;
 155};
 156
 157extern union ib_gid zgid;
 158
 159enum ib_gid_type {
 160	/* If link layer is Ethernet, this is RoCE V1 */
 161	IB_GID_TYPE_IB        = 0,
 162	IB_GID_TYPE_ROCE      = 0,
 163	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
 164	IB_GID_TYPE_SIZE
 165};
 166
 167#define ROCE_V2_UDP_DPORT      4791
 168struct ib_gid_attr {
 169	struct net_device __rcu	*ndev;
 170	struct ib_device	*device;
 171	union ib_gid		gid;
 172	enum ib_gid_type	gid_type;
 173	u16			index;
 174	u8			port_num;
 175};
 176
 177enum {
 178	/* set the local administered indication */
 179	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
 180};
 181
 182enum rdma_transport_type {
 183	RDMA_TRANSPORT_IB,
 184	RDMA_TRANSPORT_IWARP,
 185	RDMA_TRANSPORT_USNIC,
 186	RDMA_TRANSPORT_USNIC_UDP,
 187	RDMA_TRANSPORT_UNSPECIFIED,
 188};
 189
 190enum rdma_protocol_type {
 191	RDMA_PROTOCOL_IB,
 192	RDMA_PROTOCOL_IBOE,
 193	RDMA_PROTOCOL_IWARP,
 194	RDMA_PROTOCOL_USNIC_UDP
 195};
 196
 197__attribute_const__ enum rdma_transport_type
 198rdma_node_get_transport(unsigned int node_type);
 199
 200enum rdma_network_type {
 201	RDMA_NETWORK_IB,
 202	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 203	RDMA_NETWORK_IPV4,
 204	RDMA_NETWORK_IPV6
 205};
 206
 207static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 208{
 209	if (network_type == RDMA_NETWORK_IPV4 ||
 210	    network_type == RDMA_NETWORK_IPV6)
 211		return IB_GID_TYPE_ROCE_UDP_ENCAP;
 212
 213	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 214	return IB_GID_TYPE_IB;
 215}
 216
 217static inline enum rdma_network_type
 218rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
 219{
 220	if (attr->gid_type == IB_GID_TYPE_IB)
 221		return RDMA_NETWORK_IB;
 222
 223	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
 224		return RDMA_NETWORK_IPV4;
 225	else
 226		return RDMA_NETWORK_IPV6;
 227}
 228
 229enum rdma_link_layer {
 230	IB_LINK_LAYER_UNSPECIFIED,
 231	IB_LINK_LAYER_INFINIBAND,
 232	IB_LINK_LAYER_ETHERNET,
 233};
 234
 235enum ib_device_cap_flags {
 236	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
 237	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
 238	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
 239	IB_DEVICE_RAW_MULTI			= (1 << 3),
 240	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
 241	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
 242	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
 243	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
 244	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
 245	/* Not in use, former INIT_TYPE		= (1 << 9),*/
 246	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
 247	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
 248	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
 249	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
 250	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
 251
 252	/*
 253	 * This device supports a per-device lkey or stag that can be
 254	 * used without performing a memory registration for the local
 255	 * memory.  Note that ULPs should never check this flag, but
 256	 * instead of use the local_dma_lkey flag in the ib_pd structure,
 257	 * which will always contain a usable lkey.
 258	 */
 259	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
 260	/* Reserved, old SEND_W_INV		= (1 << 16),*/
 261	IB_DEVICE_MEM_WINDOW			= (1 << 17),
 262	/*
 263	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 264	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
 265	 * messages and can verify the validity of checksum for
 266	 * incoming messages.  Setting this flag implies that the
 267	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 268	 */
 269	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
 270	IB_DEVICE_UD_TSO			= (1 << 19),
 271	IB_DEVICE_XRC				= (1 << 20),
 272
 273	/*
 274	 * This device supports the IB "base memory management extension",
 275	 * which includes support for fast registrations (IB_WR_REG_MR,
 276	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 277	 * also be set by any iWarp device which must support FRs to comply
 278	 * to the iWarp verbs spec.  iWarp devices also support the
 279	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 280	 * stag.
 281	 */
 282	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
 283	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
 284	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
 285	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
 286	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
 287	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 288	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
 289	/*
 290	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 291	 * support execution of WQEs that involve synchronization
 292	 * of I/O operations with single completion queue managed
 293	 * by hardware.
 294	 */
 295	IB_DEVICE_CROSS_CHANNEL			= (1 << 27),
 296	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
 297	IB_DEVICE_INTEGRITY_HANDOVER		= (1 << 30),
 298	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
 299	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
 300	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
 301	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 302	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
 303	IB_DEVICE_RDMA_NETDEV_OPA_VNIC		= (1ULL << 35),
 304	/* The device supports padding incoming writes to cacheline. */
 305	IB_DEVICE_PCI_WRITE_END_PADDING		= (1ULL << 36),
 306	IB_DEVICE_ALLOW_USER_UNREG		= (1ULL << 37),
 307};
 308
 309enum ib_atomic_cap {
 310	IB_ATOMIC_NONE,
 311	IB_ATOMIC_HCA,
 312	IB_ATOMIC_GLOB
 313};
 314
 315enum ib_odp_general_cap_bits {
 316	IB_ODP_SUPPORT		= 1 << 0,
 317	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 318};
 319
 320enum ib_odp_transport_cap_bits {
 321	IB_ODP_SUPPORT_SEND	= 1 << 0,
 322	IB_ODP_SUPPORT_RECV	= 1 << 1,
 323	IB_ODP_SUPPORT_WRITE	= 1 << 2,
 324	IB_ODP_SUPPORT_READ	= 1 << 3,
 325	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
 326	IB_ODP_SUPPORT_SRQ_RECV	= 1 << 5,
 327};
 328
 329struct ib_odp_caps {
 330	uint64_t general_caps;
 331	struct {
 332		uint32_t  rc_odp_caps;
 333		uint32_t  uc_odp_caps;
 334		uint32_t  ud_odp_caps;
 335		uint32_t  xrc_odp_caps;
 336	} per_transport_caps;
 337};
 338
 339struct ib_rss_caps {
 340	/* Corresponding bit will be set if qp type from
 341	 * 'enum ib_qp_type' is supported, e.g.
 342	 * supported_qpts |= 1 << IB_QPT_UD
 343	 */
 344	u32 supported_qpts;
 345	u32 max_rwq_indirection_tables;
 346	u32 max_rwq_indirection_table_size;
 347};
 348
 349enum ib_tm_cap_flags {
 350	/*  Support tag matching with rendezvous offload for RC transport */
 351	IB_TM_CAP_RNDV_RC = 1 << 0,
 352};
 353
 354struct ib_tm_caps {
 355	/* Max size of RNDV header */
 356	u32 max_rndv_hdr_size;
 357	/* Max number of entries in tag matching list */
 358	u32 max_num_tags;
 359	/* From enum ib_tm_cap_flags */
 360	u32 flags;
 361	/* Max number of outstanding list operations */
 362	u32 max_ops;
 363	/* Max number of SGE in tag matching entry */
 364	u32 max_sge;
 365};
 366
 367struct ib_cq_init_attr {
 368	unsigned int	cqe;
 369	u32		comp_vector;
 370	u32		flags;
 371};
 372
 373enum ib_cq_attr_mask {
 374	IB_CQ_MODERATE = 1 << 0,
 375};
 376
 377struct ib_cq_caps {
 378	u16     max_cq_moderation_count;
 379	u16     max_cq_moderation_period;
 380};
 381
 382struct ib_dm_mr_attr {
 383	u64		length;
 384	u64		offset;
 385	u32		access_flags;
 386};
 387
 388struct ib_dm_alloc_attr {
 389	u64	length;
 390	u32	alignment;
 391	u32	flags;
 392};
 393
 394struct ib_device_attr {
 395	u64			fw_ver;
 396	__be64			sys_image_guid;
 397	u64			max_mr_size;
 398	u64			page_size_cap;
 399	u32			vendor_id;
 400	u32			vendor_part_id;
 401	u32			hw_ver;
 402	int			max_qp;
 403	int			max_qp_wr;
 404	u64			device_cap_flags;
 405	int			max_send_sge;
 406	int			max_recv_sge;
 407	int			max_sge_rd;
 408	int			max_cq;
 409	int			max_cqe;
 410	int			max_mr;
 411	int			max_pd;
 412	int			max_qp_rd_atom;
 413	int			max_ee_rd_atom;
 414	int			max_res_rd_atom;
 415	int			max_qp_init_rd_atom;
 416	int			max_ee_init_rd_atom;
 417	enum ib_atomic_cap	atomic_cap;
 418	enum ib_atomic_cap	masked_atomic_cap;
 419	int			max_ee;
 420	int			max_rdd;
 421	int			max_mw;
 422	int			max_raw_ipv6_qp;
 423	int			max_raw_ethy_qp;
 424	int			max_mcast_grp;
 425	int			max_mcast_qp_attach;
 426	int			max_total_mcast_qp_attach;
 427	int			max_ah;
 428	int			max_fmr;
 429	int			max_map_per_fmr;
 430	int			max_srq;
 431	int			max_srq_wr;
 432	int			max_srq_sge;
 433	unsigned int		max_fast_reg_page_list_len;
 434	unsigned int		max_pi_fast_reg_page_list_len;
 435	u16			max_pkeys;
 436	u8			local_ca_ack_delay;
 437	int			sig_prot_cap;
 438	int			sig_guard_cap;
 439	struct ib_odp_caps	odp_caps;
 440	uint64_t		timestamp_mask;
 441	uint64_t		hca_core_clock; /* in KHZ */
 442	struct ib_rss_caps	rss_caps;
 443	u32			max_wq_type_rq;
 444	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
 445	struct ib_tm_caps	tm_caps;
 446	struct ib_cq_caps       cq_caps;
 447	u64			max_dm_size;
 448};
 449
 450enum ib_mtu {
 451	IB_MTU_256  = 1,
 452	IB_MTU_512  = 2,
 453	IB_MTU_1024 = 3,
 454	IB_MTU_2048 = 4,
 455	IB_MTU_4096 = 5
 456};
 457
 458static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 459{
 460	switch (mtu) {
 461	case IB_MTU_256:  return  256;
 462	case IB_MTU_512:  return  512;
 463	case IB_MTU_1024: return 1024;
 464	case IB_MTU_2048: return 2048;
 465	case IB_MTU_4096: return 4096;
 466	default: 	  return -1;
 467	}
 468}
 469
 470static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 471{
 472	if (mtu >= 4096)
 473		return IB_MTU_4096;
 474	else if (mtu >= 2048)
 475		return IB_MTU_2048;
 476	else if (mtu >= 1024)
 477		return IB_MTU_1024;
 478	else if (mtu >= 512)
 479		return IB_MTU_512;
 480	else
 481		return IB_MTU_256;
 482}
 483
 484enum ib_port_state {
 485	IB_PORT_NOP		= 0,
 486	IB_PORT_DOWN		= 1,
 487	IB_PORT_INIT		= 2,
 488	IB_PORT_ARMED		= 3,
 489	IB_PORT_ACTIVE		= 4,
 490	IB_PORT_ACTIVE_DEFER	= 5
 491};
 492
 493enum ib_port_phys_state {
 494	IB_PORT_PHYS_STATE_SLEEP = 1,
 495	IB_PORT_PHYS_STATE_POLLING = 2,
 496	IB_PORT_PHYS_STATE_DISABLED = 3,
 497	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
 498	IB_PORT_PHYS_STATE_LINK_UP = 5,
 499	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
 500	IB_PORT_PHYS_STATE_PHY_TEST = 7,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501};
 502
 503enum ib_port_width {
 504	IB_WIDTH_1X	= 1,
 505	IB_WIDTH_2X	= 16,
 506	IB_WIDTH_4X	= 2,
 507	IB_WIDTH_8X	= 4,
 508	IB_WIDTH_12X	= 8
 509};
 510
 511static inline int ib_width_enum_to_int(enum ib_port_width width)
 512{
 513	switch (width) {
 514	case IB_WIDTH_1X:  return  1;
 515	case IB_WIDTH_2X:  return  2;
 516	case IB_WIDTH_4X:  return  4;
 517	case IB_WIDTH_8X:  return  8;
 518	case IB_WIDTH_12X: return 12;
 519	default: 	  return -1;
 520	}
 521}
 522
 523enum ib_port_speed {
 524	IB_SPEED_SDR	= 1,
 525	IB_SPEED_DDR	= 2,
 526	IB_SPEED_QDR	= 4,
 527	IB_SPEED_FDR10	= 8,
 528	IB_SPEED_FDR	= 16,
 529	IB_SPEED_EDR	= 32,
 530	IB_SPEED_HDR	= 64
 531};
 532
 533/**
 534 * struct rdma_hw_stats
 535 * @lock - Mutex to protect parallel write access to lifespan and values
 536 *    of counters, which are 64bits and not guaranteeed to be written
 537 *    atomicaly on 32bits systems.
 538 * @timestamp - Used by the core code to track when the last update was
 539 * @lifespan - Used by the core code to determine how old the counters
 540 *   should be before being updated again.  Stored in jiffies, defaults
 541 *   to 10 milliseconds, drivers can override the default be specifying
 542 *   their own value during their allocation routine.
 543 * @name - Array of pointers to static names used for the counters in
 544 *   directory.
 545 * @num_counters - How many hardware counters there are.  If name is
 546 *   shorter than this number, a kernel oops will result.  Driver authors
 547 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 548 *   in their code to prevent this.
 549 * @value - Array of u64 counters that are accessed by the sysfs code and
 550 *   filled in by the drivers get_stats routine
 551 */
 552struct rdma_hw_stats {
 553	struct mutex	lock; /* Protect lifespan and values[] */
 554	unsigned long	timestamp;
 555	unsigned long	lifespan;
 556	const char * const *names;
 557	int		num_counters;
 558	u64		value[];
 559};
 560
 561#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 562/**
 563 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 564 *   for drivers.
 565 * @names - Array of static const char *
 566 * @num_counters - How many elements in array
 567 * @lifespan - How many milliseconds between updates
 568 */
 569static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 570		const char * const *names, int num_counters,
 571		unsigned long lifespan)
 572{
 573	struct rdma_hw_stats *stats;
 574
 575	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 576			GFP_KERNEL);
 577	if (!stats)
 578		return NULL;
 579	stats->names = names;
 580	stats->num_counters = num_counters;
 581	stats->lifespan = msecs_to_jiffies(lifespan);
 582
 583	return stats;
 584}
 585
 586
 587/* Define bits for the various functionality this port needs to be supported by
 588 * the core.
 589 */
 590/* Management                           0x00000FFF */
 591#define RDMA_CORE_CAP_IB_MAD            0x00000001
 592#define RDMA_CORE_CAP_IB_SMI            0x00000002
 593#define RDMA_CORE_CAP_IB_CM             0x00000004
 594#define RDMA_CORE_CAP_IW_CM             0x00000008
 595#define RDMA_CORE_CAP_IB_SA             0x00000010
 596#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 597
 598/* Address format                       0x000FF000 */
 599#define RDMA_CORE_CAP_AF_IB             0x00001000
 600#define RDMA_CORE_CAP_ETH_AH            0x00002000
 601#define RDMA_CORE_CAP_OPA_AH            0x00004000
 602#define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
 603
 604/* Protocol                             0xFFF00000 */
 605#define RDMA_CORE_CAP_PROT_IB           0x00100000
 606#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 607#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 608#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 609#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 610#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 611
 612#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
 613					| RDMA_CORE_CAP_PROT_ROCE     \
 614					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
 615
 616#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 617					| RDMA_CORE_CAP_IB_MAD \
 618					| RDMA_CORE_CAP_IB_SMI \
 619					| RDMA_CORE_CAP_IB_CM  \
 620					| RDMA_CORE_CAP_IB_SA  \
 621					| RDMA_CORE_CAP_AF_IB)
 622#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 623					| RDMA_CORE_CAP_IB_MAD  \
 624					| RDMA_CORE_CAP_IB_CM   \
 625					| RDMA_CORE_CAP_AF_IB   \
 626					| RDMA_CORE_CAP_ETH_AH)
 627#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
 628					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 629					| RDMA_CORE_CAP_IB_MAD  \
 630					| RDMA_CORE_CAP_IB_CM   \
 631					| RDMA_CORE_CAP_AF_IB   \
 632					| RDMA_CORE_CAP_ETH_AH)
 633#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 634					| RDMA_CORE_CAP_IW_CM)
 635#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 636					| RDMA_CORE_CAP_OPA_MAD)
 637
 638#define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
 639
 640#define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
 641
 642struct ib_port_attr {
 643	u64			subnet_prefix;
 644	enum ib_port_state	state;
 645	enum ib_mtu		max_mtu;
 646	enum ib_mtu		active_mtu;
 647	int			gid_tbl_len;
 648	unsigned int		ip_gids:1;
 649	/* This is the value from PortInfo CapabilityMask, defined by IBA */
 650	u32			port_cap_flags;
 651	u32			max_msg_sz;
 652	u32			bad_pkey_cntr;
 653	u32			qkey_viol_cntr;
 654	u16			pkey_tbl_len;
 655	u32			sm_lid;
 656	u32			lid;
 657	u8			lmc;
 658	u8			max_vl_num;
 659	u8			sm_sl;
 660	u8			subnet_timeout;
 661	u8			init_type_reply;
 662	u8			active_width;
 663	u8			active_speed;
 664	u8                      phys_state;
 665	u16			port_cap_flags2;
 666};
 667
 668enum ib_device_modify_flags {
 669	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
 670	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
 671};
 672
 673#define IB_DEVICE_NODE_DESC_MAX 64
 674
 675struct ib_device_modify {
 676	u64	sys_image_guid;
 677	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
 678};
 679
 680enum ib_port_modify_flags {
 681	IB_PORT_SHUTDOWN		= 1,
 682	IB_PORT_INIT_TYPE		= (1<<2),
 683	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
 684	IB_PORT_OPA_MASK_CHG		= (1<<4)
 685};
 686
 687struct ib_port_modify {
 688	u32	set_port_cap_mask;
 689	u32	clr_port_cap_mask;
 690	u8	init_type;
 691};
 692
 693enum ib_event_type {
 694	IB_EVENT_CQ_ERR,
 695	IB_EVENT_QP_FATAL,
 696	IB_EVENT_QP_REQ_ERR,
 697	IB_EVENT_QP_ACCESS_ERR,
 698	IB_EVENT_COMM_EST,
 699	IB_EVENT_SQ_DRAINED,
 700	IB_EVENT_PATH_MIG,
 701	IB_EVENT_PATH_MIG_ERR,
 702	IB_EVENT_DEVICE_FATAL,
 703	IB_EVENT_PORT_ACTIVE,
 704	IB_EVENT_PORT_ERR,
 705	IB_EVENT_LID_CHANGE,
 706	IB_EVENT_PKEY_CHANGE,
 707	IB_EVENT_SM_CHANGE,
 708	IB_EVENT_SRQ_ERR,
 709	IB_EVENT_SRQ_LIMIT_REACHED,
 710	IB_EVENT_QP_LAST_WQE_REACHED,
 711	IB_EVENT_CLIENT_REREGISTER,
 712	IB_EVENT_GID_CHANGE,
 713	IB_EVENT_WQ_FATAL,
 714};
 715
 716const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 717
 718struct ib_event {
 719	struct ib_device	*device;
 720	union {
 721		struct ib_cq	*cq;
 722		struct ib_qp	*qp;
 723		struct ib_srq	*srq;
 724		struct ib_wq	*wq;
 725		u8		port_num;
 726	} element;
 727	enum ib_event_type	event;
 728};
 729
 730struct ib_event_handler {
 731	struct ib_device *device;
 732	void            (*handler)(struct ib_event_handler *, struct ib_event *);
 733	struct list_head  list;
 734};
 735
 736#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
 737	do {							\
 738		(_ptr)->device  = _device;			\
 739		(_ptr)->handler = _handler;			\
 740		INIT_LIST_HEAD(&(_ptr)->list);			\
 741	} while (0)
 742
 743struct ib_global_route {
 744	const struct ib_gid_attr *sgid_attr;
 745	union ib_gid	dgid;
 746	u32		flow_label;
 747	u8		sgid_index;
 748	u8		hop_limit;
 749	u8		traffic_class;
 750};
 751
 752struct ib_grh {
 753	__be32		version_tclass_flow;
 754	__be16		paylen;
 755	u8		next_hdr;
 756	u8		hop_limit;
 757	union ib_gid	sgid;
 758	union ib_gid	dgid;
 759};
 760
 761union rdma_network_hdr {
 762	struct ib_grh ibgrh;
 763	struct {
 764		/* The IB spec states that if it's IPv4, the header
 765		 * is located in the last 20 bytes of the header.
 766		 */
 767		u8		reserved[20];
 768		struct iphdr	roce4grh;
 769	};
 770};
 771
 772#define IB_QPN_MASK		0xFFFFFF
 773
 774enum {
 775	IB_MULTICAST_QPN = 0xffffff
 776};
 777
 778#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
 779#define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
 780
 781enum ib_ah_flags {
 782	IB_AH_GRH	= 1
 783};
 784
 785enum ib_rate {
 786	IB_RATE_PORT_CURRENT = 0,
 787	IB_RATE_2_5_GBPS = 2,
 788	IB_RATE_5_GBPS   = 5,
 789	IB_RATE_10_GBPS  = 3,
 790	IB_RATE_20_GBPS  = 6,
 791	IB_RATE_30_GBPS  = 4,
 792	IB_RATE_40_GBPS  = 7,
 793	IB_RATE_60_GBPS  = 8,
 794	IB_RATE_80_GBPS  = 9,
 795	IB_RATE_120_GBPS = 10,
 796	IB_RATE_14_GBPS  = 11,
 797	IB_RATE_56_GBPS  = 12,
 798	IB_RATE_112_GBPS = 13,
 799	IB_RATE_168_GBPS = 14,
 800	IB_RATE_25_GBPS  = 15,
 801	IB_RATE_100_GBPS = 16,
 802	IB_RATE_200_GBPS = 17,
 803	IB_RATE_300_GBPS = 18,
 804	IB_RATE_28_GBPS  = 19,
 805	IB_RATE_50_GBPS  = 20,
 806	IB_RATE_400_GBPS = 21,
 807	IB_RATE_600_GBPS = 22,
 808};
 809
 810/**
 811 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 812 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 813 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 814 * @rate: rate to convert.
 815 */
 816__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 817
 818/**
 819 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 820 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 821 * @rate: rate to convert.
 822 */
 823__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 824
 825
 826/**
 827 * enum ib_mr_type - memory region type
 828 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 829 *                            normal registration
 830 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 831 *                            register any arbitrary sg lists (without
 832 *                            the normal mr constraints - see
 833 *                            ib_map_mr_sg)
 834 * @IB_MR_TYPE_DM:            memory region that is used for device
 835 *                            memory registration
 836 * @IB_MR_TYPE_USER:          memory region that is used for the user-space
 837 *                            application
 838 * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
 839 *                            without address translations (VA=PA)
 840 * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
 841 *                            data integrity operations
 842 */
 843enum ib_mr_type {
 844	IB_MR_TYPE_MEM_REG,
 845	IB_MR_TYPE_SG_GAPS,
 846	IB_MR_TYPE_DM,
 847	IB_MR_TYPE_USER,
 848	IB_MR_TYPE_DMA,
 849	IB_MR_TYPE_INTEGRITY,
 850};
 851
 852enum ib_mr_status_check {
 853	IB_MR_CHECK_SIG_STATUS = 1,
 854};
 855
 856/**
 857 * struct ib_mr_status - Memory region status container
 858 *
 859 * @fail_status: Bitmask of MR checks status. For each
 860 *     failed check a corresponding status bit is set.
 861 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 862 *     failure.
 863 */
 864struct ib_mr_status {
 865	u32		    fail_status;
 866	struct ib_sig_err   sig_err;
 867};
 868
 869/**
 870 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 871 * enum.
 872 * @mult: multiple to convert.
 873 */
 874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 875
 876enum rdma_ah_attr_type {
 877	RDMA_AH_ATTR_TYPE_UNDEFINED,
 878	RDMA_AH_ATTR_TYPE_IB,
 879	RDMA_AH_ATTR_TYPE_ROCE,
 880	RDMA_AH_ATTR_TYPE_OPA,
 881};
 882
 883struct ib_ah_attr {
 
 884	u16			dlid;
 
 885	u8			src_path_bits;
 886};
 887
 888struct roce_ah_attr {
 889	u8			dmac[ETH_ALEN];
 890};
 891
 892struct opa_ah_attr {
 893	u32			dlid;
 894	u8			src_path_bits;
 895	bool			make_grd;
 896};
 897
 898struct rdma_ah_attr {
 899	struct ib_global_route	grh;
 900	u8			sl;
 901	u8			static_rate;
 
 902	u8			port_num;
 903	u8			ah_flags;
 904	enum rdma_ah_attr_type type;
 905	union {
 906		struct ib_ah_attr ib;
 907		struct roce_ah_attr roce;
 908		struct opa_ah_attr opa;
 909	};
 910};
 911
 912enum ib_wc_status {
 913	IB_WC_SUCCESS,
 914	IB_WC_LOC_LEN_ERR,
 915	IB_WC_LOC_QP_OP_ERR,
 916	IB_WC_LOC_EEC_OP_ERR,
 917	IB_WC_LOC_PROT_ERR,
 918	IB_WC_WR_FLUSH_ERR,
 919	IB_WC_MW_BIND_ERR,
 920	IB_WC_BAD_RESP_ERR,
 921	IB_WC_LOC_ACCESS_ERR,
 922	IB_WC_REM_INV_REQ_ERR,
 923	IB_WC_REM_ACCESS_ERR,
 924	IB_WC_REM_OP_ERR,
 925	IB_WC_RETRY_EXC_ERR,
 926	IB_WC_RNR_RETRY_EXC_ERR,
 927	IB_WC_LOC_RDD_VIOL_ERR,
 928	IB_WC_REM_INV_RD_REQ_ERR,
 929	IB_WC_REM_ABORT_ERR,
 930	IB_WC_INV_EECN_ERR,
 931	IB_WC_INV_EEC_STATE_ERR,
 932	IB_WC_FATAL_ERR,
 933	IB_WC_RESP_TIMEOUT_ERR,
 934	IB_WC_GENERAL_ERR
 935};
 936
 937const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 938
 939enum ib_wc_opcode {
 940	IB_WC_SEND,
 941	IB_WC_RDMA_WRITE,
 942	IB_WC_RDMA_READ,
 943	IB_WC_COMP_SWAP,
 944	IB_WC_FETCH_ADD,
 
 945	IB_WC_LSO,
 946	IB_WC_LOCAL_INV,
 947	IB_WC_REG_MR,
 948	IB_WC_MASKED_COMP_SWAP,
 949	IB_WC_MASKED_FETCH_ADD,
 950/*
 951 * Set value of IB_WC_RECV so consumers can test if a completion is a
 952 * receive by testing (opcode & IB_WC_RECV).
 953 */
 954	IB_WC_RECV			= 1 << 7,
 955	IB_WC_RECV_RDMA_WITH_IMM
 956};
 957
 958enum ib_wc_flags {
 959	IB_WC_GRH		= 1,
 960	IB_WC_WITH_IMM		= (1<<1),
 961	IB_WC_WITH_INVALIDATE	= (1<<2),
 962	IB_WC_IP_CSUM_OK	= (1<<3),
 963	IB_WC_WITH_SMAC		= (1<<4),
 964	IB_WC_WITH_VLAN		= (1<<5),
 965	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
 966};
 967
 968struct ib_wc {
 969	union {
 970		u64		wr_id;
 971		struct ib_cqe	*wr_cqe;
 972	};
 973	enum ib_wc_status	status;
 974	enum ib_wc_opcode	opcode;
 975	u32			vendor_err;
 976	u32			byte_len;
 977	struct ib_qp	       *qp;
 978	union {
 979		__be32		imm_data;
 980		u32		invalidate_rkey;
 981	} ex;
 982	u32			src_qp;
 983	u32			slid;
 984	int			wc_flags;
 985	u16			pkey_index;
 
 986	u8			sl;
 987	u8			dlid_path_bits;
 988	u8			port_num;	/* valid only for DR SMPs on switches */
 989	u8			smac[ETH_ALEN];
 990	u16			vlan_id;
 991	u8			network_hdr_type;
 992};
 993
 994enum ib_cq_notify_flags {
 995	IB_CQ_SOLICITED			= 1 << 0,
 996	IB_CQ_NEXT_COMP			= 1 << 1,
 997	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 998	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
 999};
1000
1001enum ib_srq_type {
1002	IB_SRQT_BASIC,
1003	IB_SRQT_XRC,
1004	IB_SRQT_TM,
1005};
1006
1007static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1008{
1009	return srq_type == IB_SRQT_XRC ||
1010	       srq_type == IB_SRQT_TM;
1011}
1012
1013enum ib_srq_attr_mask {
1014	IB_SRQ_MAX_WR	= 1 << 0,
1015	IB_SRQ_LIMIT	= 1 << 1,
1016};
1017
1018struct ib_srq_attr {
1019	u32	max_wr;
1020	u32	max_sge;
1021	u32	srq_limit;
1022};
1023
1024struct ib_srq_init_attr {
1025	void		      (*event_handler)(struct ib_event *, void *);
1026	void		       *srq_context;
1027	struct ib_srq_attr	attr;
1028	enum ib_srq_type	srq_type;
1029
1030	struct {
1031		struct ib_cq   *cq;
1032		union {
1033			struct {
1034				struct ib_xrcd *xrcd;
1035			} xrc;
1036
1037			struct {
1038				u32		max_num_tags;
1039			} tag_matching;
1040		};
1041	} ext;
1042};
1043
1044struct ib_qp_cap {
1045	u32	max_send_wr;
1046	u32	max_recv_wr;
1047	u32	max_send_sge;
1048	u32	max_recv_sge;
1049	u32	max_inline_data;
1050
1051	/*
1052	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1053	 * ib_create_qp() will calculate the right amount of neededed WRs
1054	 * and MRs based on this.
1055	 */
1056	u32	max_rdma_ctxs;
1057};
1058
1059enum ib_sig_type {
1060	IB_SIGNAL_ALL_WR,
1061	IB_SIGNAL_REQ_WR
1062};
1063
1064enum ib_qp_type {
1065	/*
1066	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1067	 * here (and in that order) since the MAD layer uses them as
1068	 * indices into a 2-entry table.
1069	 */
1070	IB_QPT_SMI,
1071	IB_QPT_GSI,
1072
1073	IB_QPT_RC,
1074	IB_QPT_UC,
1075	IB_QPT_UD,
1076	IB_QPT_RAW_IPV6,
1077	IB_QPT_RAW_ETHERTYPE,
1078	IB_QPT_RAW_PACKET = 8,
1079	IB_QPT_XRC_INI = 9,
1080	IB_QPT_XRC_TGT,
1081	IB_QPT_MAX,
1082	IB_QPT_DRIVER = 0xFF,
1083	/* Reserve a range for qp types internal to the low level driver.
1084	 * These qp types will not be visible at the IB core layer, so the
1085	 * IB_QPT_MAX usages should not be affected in the core layer
1086	 */
1087	IB_QPT_RESERVED1 = 0x1000,
1088	IB_QPT_RESERVED2,
1089	IB_QPT_RESERVED3,
1090	IB_QPT_RESERVED4,
1091	IB_QPT_RESERVED5,
1092	IB_QPT_RESERVED6,
1093	IB_QPT_RESERVED7,
1094	IB_QPT_RESERVED8,
1095	IB_QPT_RESERVED9,
1096	IB_QPT_RESERVED10,
1097};
1098
1099enum ib_qp_create_flags {
1100	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1101	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
1102	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1103	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1104	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1105	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1106	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
1107	/* FREE					= 1 << 7, */
1108	IB_QP_CREATE_SCATTER_FCS		= 1 << 8,
1109	IB_QP_CREATE_CVLAN_STRIPPING		= 1 << 9,
1110	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1111	IB_QP_CREATE_PCI_WRITE_END_PADDING	= 1 << 11,
1112	/* reserve bits 26-31 for low level drivers' internal use */
1113	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1114	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1115};
1116
1117/*
1118 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1119 * callback to destroy the passed in QP.
1120 */
1121
1122struct ib_qp_init_attr {
1123	/* Consumer's event_handler callback must not block */
1124	void                  (*event_handler)(struct ib_event *, void *);
1125
1126	void		       *qp_context;
1127	struct ib_cq	       *send_cq;
1128	struct ib_cq	       *recv_cq;
1129	struct ib_srq	       *srq;
1130	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1131	struct ib_qp_cap	cap;
1132	enum ib_sig_type	sq_sig_type;
1133	enum ib_qp_type		qp_type;
1134	u32			create_flags;
1135
1136	/*
1137	 * Only needed for special QP types, or when using the RW API.
1138	 */
1139	u8			port_num;
1140	struct ib_rwq_ind_table *rwq_ind_tbl;
1141	u32			source_qpn;
1142};
1143
1144struct ib_qp_open_attr {
1145	void                  (*event_handler)(struct ib_event *, void *);
1146	void		       *qp_context;
1147	u32			qp_num;
1148	enum ib_qp_type		qp_type;
1149};
1150
1151enum ib_rnr_timeout {
1152	IB_RNR_TIMER_655_36 =  0,
1153	IB_RNR_TIMER_000_01 =  1,
1154	IB_RNR_TIMER_000_02 =  2,
1155	IB_RNR_TIMER_000_03 =  3,
1156	IB_RNR_TIMER_000_04 =  4,
1157	IB_RNR_TIMER_000_06 =  5,
1158	IB_RNR_TIMER_000_08 =  6,
1159	IB_RNR_TIMER_000_12 =  7,
1160	IB_RNR_TIMER_000_16 =  8,
1161	IB_RNR_TIMER_000_24 =  9,
1162	IB_RNR_TIMER_000_32 = 10,
1163	IB_RNR_TIMER_000_48 = 11,
1164	IB_RNR_TIMER_000_64 = 12,
1165	IB_RNR_TIMER_000_96 = 13,
1166	IB_RNR_TIMER_001_28 = 14,
1167	IB_RNR_TIMER_001_92 = 15,
1168	IB_RNR_TIMER_002_56 = 16,
1169	IB_RNR_TIMER_003_84 = 17,
1170	IB_RNR_TIMER_005_12 = 18,
1171	IB_RNR_TIMER_007_68 = 19,
1172	IB_RNR_TIMER_010_24 = 20,
1173	IB_RNR_TIMER_015_36 = 21,
1174	IB_RNR_TIMER_020_48 = 22,
1175	IB_RNR_TIMER_030_72 = 23,
1176	IB_RNR_TIMER_040_96 = 24,
1177	IB_RNR_TIMER_061_44 = 25,
1178	IB_RNR_TIMER_081_92 = 26,
1179	IB_RNR_TIMER_122_88 = 27,
1180	IB_RNR_TIMER_163_84 = 28,
1181	IB_RNR_TIMER_245_76 = 29,
1182	IB_RNR_TIMER_327_68 = 30,
1183	IB_RNR_TIMER_491_52 = 31
1184};
1185
1186enum ib_qp_attr_mask {
1187	IB_QP_STATE			= 1,
1188	IB_QP_CUR_STATE			= (1<<1),
1189	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1190	IB_QP_ACCESS_FLAGS		= (1<<3),
1191	IB_QP_PKEY_INDEX		= (1<<4),
1192	IB_QP_PORT			= (1<<5),
1193	IB_QP_QKEY			= (1<<6),
1194	IB_QP_AV			= (1<<7),
1195	IB_QP_PATH_MTU			= (1<<8),
1196	IB_QP_TIMEOUT			= (1<<9),
1197	IB_QP_RETRY_CNT			= (1<<10),
1198	IB_QP_RNR_RETRY			= (1<<11),
1199	IB_QP_RQ_PSN			= (1<<12),
1200	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1201	IB_QP_ALT_PATH			= (1<<14),
1202	IB_QP_MIN_RNR_TIMER		= (1<<15),
1203	IB_QP_SQ_PSN			= (1<<16),
1204	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1205	IB_QP_PATH_MIG_STATE		= (1<<18),
1206	IB_QP_CAP			= (1<<19),
1207	IB_QP_DEST_QPN			= (1<<20),
1208	IB_QP_RESERVED1			= (1<<21),
1209	IB_QP_RESERVED2			= (1<<22),
1210	IB_QP_RESERVED3			= (1<<23),
1211	IB_QP_RESERVED4			= (1<<24),
1212	IB_QP_RATE_LIMIT		= (1<<25),
1213};
1214
1215enum ib_qp_state {
1216	IB_QPS_RESET,
1217	IB_QPS_INIT,
1218	IB_QPS_RTR,
1219	IB_QPS_RTS,
1220	IB_QPS_SQD,
1221	IB_QPS_SQE,
1222	IB_QPS_ERR
1223};
1224
1225enum ib_mig_state {
1226	IB_MIG_MIGRATED,
1227	IB_MIG_REARM,
1228	IB_MIG_ARMED
1229};
1230
1231enum ib_mw_type {
1232	IB_MW_TYPE_1 = 1,
1233	IB_MW_TYPE_2 = 2
1234};
1235
1236struct ib_qp_attr {
1237	enum ib_qp_state	qp_state;
1238	enum ib_qp_state	cur_qp_state;
1239	enum ib_mtu		path_mtu;
1240	enum ib_mig_state	path_mig_state;
1241	u32			qkey;
1242	u32			rq_psn;
1243	u32			sq_psn;
1244	u32			dest_qp_num;
1245	int			qp_access_flags;
1246	struct ib_qp_cap	cap;
1247	struct rdma_ah_attr	ah_attr;
1248	struct rdma_ah_attr	alt_ah_attr;
1249	u16			pkey_index;
1250	u16			alt_pkey_index;
1251	u8			en_sqd_async_notify;
1252	u8			sq_draining;
1253	u8			max_rd_atomic;
1254	u8			max_dest_rd_atomic;
1255	u8			min_rnr_timer;
1256	u8			port_num;
1257	u8			timeout;
1258	u8			retry_cnt;
1259	u8			rnr_retry;
1260	u8			alt_port_num;
1261	u8			alt_timeout;
1262	u32			rate_limit;
1263};
1264
1265enum ib_wr_opcode {
1266	/* These are shared with userspace */
1267	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1268	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1269	IB_WR_SEND = IB_UVERBS_WR_SEND,
1270	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1271	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1272	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1273	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1274	IB_WR_LSO = IB_UVERBS_WR_TSO,
1275	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1276	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1277	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1278	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1279		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1280	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1281		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1282
1283	/* These are kernel only and can not be issued by userspace */
1284	IB_WR_REG_MR = 0x20,
1285	IB_WR_REG_MR_INTEGRITY,
1286
1287	/* reserve values for low level drivers' internal use.
1288	 * These values will not be used at all in the ib core layer.
1289	 */
1290	IB_WR_RESERVED1 = 0xf0,
1291	IB_WR_RESERVED2,
1292	IB_WR_RESERVED3,
1293	IB_WR_RESERVED4,
1294	IB_WR_RESERVED5,
1295	IB_WR_RESERVED6,
1296	IB_WR_RESERVED7,
1297	IB_WR_RESERVED8,
1298	IB_WR_RESERVED9,
1299	IB_WR_RESERVED10,
1300};
1301
1302enum ib_send_flags {
1303	IB_SEND_FENCE		= 1,
1304	IB_SEND_SIGNALED	= (1<<1),
1305	IB_SEND_SOLICITED	= (1<<2),
1306	IB_SEND_INLINE		= (1<<3),
1307	IB_SEND_IP_CSUM		= (1<<4),
1308
1309	/* reserve bits 26-31 for low level drivers' internal use */
1310	IB_SEND_RESERVED_START	= (1 << 26),
1311	IB_SEND_RESERVED_END	= (1 << 31),
1312};
1313
1314struct ib_sge {
1315	u64	addr;
1316	u32	length;
1317	u32	lkey;
1318};
1319
1320struct ib_cqe {
1321	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
 
 
1322};
1323
1324struct ib_send_wr {
1325	struct ib_send_wr      *next;
1326	union {
1327		u64		wr_id;
1328		struct ib_cqe	*wr_cqe;
1329	};
1330	struct ib_sge	       *sg_list;
1331	int			num_sge;
1332	enum ib_wr_opcode	opcode;
1333	int			send_flags;
1334	union {
1335		__be32		imm_data;
1336		u32		invalidate_rkey;
1337	} ex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338};
1339
1340struct ib_rdma_wr {
1341	struct ib_send_wr	wr;
1342	u64			remote_addr;
1343	u32			rkey;
1344};
1345
1346static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1347{
1348	return container_of(wr, struct ib_rdma_wr, wr);
1349}
1350
1351struct ib_atomic_wr {
1352	struct ib_send_wr	wr;
1353	u64			remote_addr;
1354	u64			compare_add;
1355	u64			swap;
1356	u64			compare_add_mask;
1357	u64			swap_mask;
1358	u32			rkey;
1359};
1360
1361static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1362{
1363	return container_of(wr, struct ib_atomic_wr, wr);
1364}
1365
1366struct ib_ud_wr {
1367	struct ib_send_wr	wr;
1368	struct ib_ah		*ah;
1369	void			*header;
1370	int			hlen;
1371	int			mss;
1372	u32			remote_qpn;
1373	u32			remote_qkey;
1374	u16			pkey_index; /* valid for GSI only */
1375	u8			port_num;   /* valid for DR SMPs on switch only */
1376};
1377
1378static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1379{
1380	return container_of(wr, struct ib_ud_wr, wr);
1381}
1382
1383struct ib_reg_wr {
1384	struct ib_send_wr	wr;
1385	struct ib_mr		*mr;
1386	u32			key;
1387	int			access;
1388};
1389
1390static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1391{
1392	return container_of(wr, struct ib_reg_wr, wr);
1393}
1394
1395struct ib_recv_wr {
1396	struct ib_recv_wr      *next;
1397	union {
1398		u64		wr_id;
1399		struct ib_cqe	*wr_cqe;
1400	};
1401	struct ib_sge	       *sg_list;
1402	int			num_sge;
1403};
1404
1405enum ib_access_flags {
1406	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1407	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1408	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1409	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1410	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1411	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1412	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1413	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1414
1415	IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
 
 
 
 
 
 
 
 
 
1416};
1417
1418/*
1419 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1420 * are hidden here instead of a uapi header!
1421 */
1422enum ib_mr_rereg_flags {
1423	IB_MR_REREG_TRANS	= 1,
1424	IB_MR_REREG_PD		= (1<<1),
1425	IB_MR_REREG_ACCESS	= (1<<2),
1426	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
 
 
 
 
 
 
 
 
1427};
1428
1429struct ib_fmr_attr {
1430	int	max_pages;
1431	int	max_maps;
1432	u8	page_shift;
1433};
1434
1435struct ib_umem;
1436
1437enum rdma_remove_reason {
1438	/*
1439	 * Userspace requested uobject deletion or initial try
1440	 * to remove uobject via cleanup. Call could fail
1441	 */
1442	RDMA_REMOVE_DESTROY,
1443	/* Context deletion. This call should delete the actual object itself */
1444	RDMA_REMOVE_CLOSE,
1445	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1446	RDMA_REMOVE_DRIVER_REMOVE,
1447	/* uobj is being cleaned-up before being committed */
1448	RDMA_REMOVE_ABORT,
1449};
1450
1451struct ib_rdmacg_object {
1452#ifdef CONFIG_CGROUP_RDMA
1453	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1454#endif
1455};
1456
1457struct ib_ucontext {
1458	struct ib_device       *device;
1459	struct ib_uverbs_file  *ufile;
1460	/*
1461	 * 'closing' can be read by the driver only during a destroy callback,
1462	 * it is set when we are closing the file descriptor and indicates
1463	 * that mm_sem may be locked.
1464	 */
1465	bool closing;
1466
1467	bool cleanup_retryable;
1468
1469	struct ib_rdmacg_object	cg_obj;
1470	/*
1471	 * Implementation details of the RDMA core, don't use in drivers:
1472	 */
1473	struct rdma_restrack_entry res;
1474};
1475
1476struct ib_uobject {
1477	u64			user_handle;	/* handle given to us by userspace */
1478	/* ufile & ucontext owning this object */
1479	struct ib_uverbs_file  *ufile;
1480	/* FIXME, save memory: ufile->context == context */
1481	struct ib_ucontext     *context;	/* associated user context */
1482	void		       *object;		/* containing object */
1483	struct list_head	list;		/* link to context's list */
1484	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1485	int			id;		/* index into kernel idr */
1486	struct kref		ref;
1487	atomic_t		usecnt;		/* protects exclusive access */
1488	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1489
1490	const struct uverbs_api_object *uapi_object;
1491};
1492
1493struct ib_udata {
1494	const void __user *inbuf;
1495	void __user *outbuf;
1496	size_t       inlen;
1497	size_t       outlen;
1498};
1499
1500struct ib_pd {
1501	u32			local_dma_lkey;
1502	u32			flags;
1503	struct ib_device       *device;
1504	struct ib_uobject      *uobject;
1505	atomic_t          	usecnt; /* count all resources */
1506
1507	u32			unsafe_global_rkey;
1508
1509	/*
1510	 * Implementation details of the RDMA core, don't use in drivers:
1511	 */
1512	struct ib_mr	       *__internal_mr;
1513	struct rdma_restrack_entry res;
1514};
1515
1516struct ib_xrcd {
1517	struct ib_device       *device;
1518	atomic_t		usecnt; /* count all exposed resources */
1519	struct inode	       *inode;
1520
1521	struct mutex		tgt_qp_mutex;
1522	struct list_head	tgt_qp_list;
1523};
1524
1525struct ib_ah {
1526	struct ib_device	*device;
1527	struct ib_pd		*pd;
1528	struct ib_uobject	*uobject;
1529	const struct ib_gid_attr *sgid_attr;
1530	enum rdma_ah_attr_type	type;
1531};
1532
1533typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1534
1535enum ib_poll_context {
1536	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1537	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1538	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1539	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1540};
1541
1542struct ib_cq {
1543	struct ib_device       *device;
1544	struct ib_uobject      *uobject;
1545	ib_comp_handler   	comp_handler;
1546	void                  (*event_handler)(struct ib_event *, void *);
1547	void                   *cq_context;
1548	int               	cqe;
1549	atomic_t          	usecnt; /* count number of work queues */
1550	enum ib_poll_context	poll_ctx;
1551	struct ib_wc		*wc;
1552	union {
1553		struct irq_poll		iop;
1554		struct work_struct	work;
1555	};
1556	struct workqueue_struct *comp_wq;
1557	struct dim *dim;
1558	/*
1559	 * Implementation details of the RDMA core, don't use in drivers:
1560	 */
1561	struct rdma_restrack_entry res;
1562};
1563
1564struct ib_srq {
1565	struct ib_device       *device;
1566	struct ib_pd	       *pd;
1567	struct ib_uobject      *uobject;
1568	void		      (*event_handler)(struct ib_event *, void *);
1569	void		       *srq_context;
1570	enum ib_srq_type	srq_type;
1571	atomic_t		usecnt;
1572
1573	struct {
1574		struct ib_cq   *cq;
1575		union {
1576			struct {
1577				struct ib_xrcd *xrcd;
1578				u32		srq_num;
1579			} xrc;
1580		};
1581	} ext;
1582};
1583
1584enum ib_raw_packet_caps {
1585	/* Strip cvlan from incoming packet and report it in the matching work
1586	 * completion is supported.
1587	 */
1588	IB_RAW_PACKET_CAP_CVLAN_STRIPPING	= (1 << 0),
1589	/* Scatter FCS field of an incoming packet to host memory is supported.
1590	 */
1591	IB_RAW_PACKET_CAP_SCATTER_FCS		= (1 << 1),
1592	/* Checksum offloads are supported (for both send and receive). */
1593	IB_RAW_PACKET_CAP_IP_CSUM		= (1 << 2),
1594	/* When a packet is received for an RQ with no receive WQEs, the
1595	 * packet processing is delayed.
1596	 */
1597	IB_RAW_PACKET_CAP_DELAY_DROP		= (1 << 3),
1598};
1599
1600enum ib_wq_type {
1601	IB_WQT_RQ
1602};
1603
1604enum ib_wq_state {
1605	IB_WQS_RESET,
1606	IB_WQS_RDY,
1607	IB_WQS_ERR
1608};
1609
1610struct ib_wq {
1611	struct ib_device       *device;
1612	struct ib_uobject      *uobject;
1613	void		    *wq_context;
1614	void		    (*event_handler)(struct ib_event *, void *);
1615	struct ib_pd	       *pd;
1616	struct ib_cq	       *cq;
1617	u32		wq_num;
1618	enum ib_wq_state       state;
1619	enum ib_wq_type	wq_type;
1620	atomic_t		usecnt;
1621};
1622
1623enum ib_wq_flags {
1624	IB_WQ_FLAGS_CVLAN_STRIPPING	= 1 << 0,
1625	IB_WQ_FLAGS_SCATTER_FCS		= 1 << 1,
1626	IB_WQ_FLAGS_DELAY_DROP		= 1 << 2,
1627	IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1628};
1629
1630struct ib_wq_init_attr {
1631	void		       *wq_context;
1632	enum ib_wq_type	wq_type;
1633	u32		max_wr;
1634	u32		max_sge;
1635	struct	ib_cq	       *cq;
1636	void		    (*event_handler)(struct ib_event *, void *);
1637	u32		create_flags; /* Use enum ib_wq_flags */
1638};
1639
1640enum ib_wq_attr_mask {
1641	IB_WQ_STATE		= 1 << 0,
1642	IB_WQ_CUR_STATE		= 1 << 1,
1643	IB_WQ_FLAGS		= 1 << 2,
1644};
1645
1646struct ib_wq_attr {
1647	enum	ib_wq_state	wq_state;
1648	enum	ib_wq_state	curr_wq_state;
1649	u32			flags; /* Use enum ib_wq_flags */
1650	u32			flags_mask; /* Use enum ib_wq_flags */
1651};
1652
1653struct ib_rwq_ind_table {
1654	struct ib_device	*device;
1655	struct ib_uobject      *uobject;
1656	atomic_t		usecnt;
1657	u32		ind_tbl_num;
1658	u32		log_ind_tbl_size;
1659	struct ib_wq	**ind_tbl;
1660};
1661
1662struct ib_rwq_ind_table_init_attr {
1663	u32		log_ind_tbl_size;
1664	/* Each entry is a pointer to Receive Work Queue */
1665	struct ib_wq	**ind_tbl;
1666};
1667
1668enum port_pkey_state {
1669	IB_PORT_PKEY_NOT_VALID = 0,
1670	IB_PORT_PKEY_VALID = 1,
1671	IB_PORT_PKEY_LISTED = 2,
1672};
1673
1674struct ib_qp_security;
1675
1676struct ib_port_pkey {
1677	enum port_pkey_state	state;
1678	u16			pkey_index;
1679	u8			port_num;
1680	struct list_head	qp_list;
1681	struct list_head	to_error_list;
1682	struct ib_qp_security  *sec;
1683};
1684
1685struct ib_ports_pkeys {
1686	struct ib_port_pkey	main;
1687	struct ib_port_pkey	alt;
1688};
1689
1690struct ib_qp_security {
1691	struct ib_qp	       *qp;
1692	struct ib_device       *dev;
1693	/* Hold this mutex when changing port and pkey settings. */
1694	struct mutex		mutex;
1695	struct ib_ports_pkeys  *ports_pkeys;
1696	/* A list of all open shared QP handles.  Required to enforce security
1697	 * properly for all users of a shared QP.
1698	 */
1699	struct list_head        shared_qp_list;
1700	void                   *security;
1701	bool			destroying;
1702	atomic_t		error_list_count;
1703	struct completion	error_complete;
1704	int			error_comps_pending;
1705};
1706
1707/*
1708 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1709 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1710 */
1711struct ib_qp {
1712	struct ib_device       *device;
1713	struct ib_pd	       *pd;
1714	struct ib_cq	       *send_cq;
1715	struct ib_cq	       *recv_cq;
1716	spinlock_t		mr_lock;
1717	int			mrs_used;
1718	struct list_head	rdma_mrs;
1719	struct list_head	sig_mrs;
1720	struct ib_srq	       *srq;
1721	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1722	struct list_head	xrcd_list;
1723
1724	/* count times opened, mcast attaches, flow attaches */
1725	atomic_t		usecnt;
1726	struct list_head	open_list;
1727	struct ib_qp           *real_qp;
1728	struct ib_uobject      *uobject;
1729	void                  (*event_handler)(struct ib_event *, void *);
1730	void		       *qp_context;
1731	/* sgid_attrs associated with the AV's */
1732	const struct ib_gid_attr *av_sgid_attr;
1733	const struct ib_gid_attr *alt_path_sgid_attr;
1734	u32			qp_num;
1735	u32			max_write_sge;
1736	u32			max_read_sge;
1737	enum ib_qp_type		qp_type;
1738	struct ib_rwq_ind_table *rwq_ind_tbl;
1739	struct ib_qp_security  *qp_sec;
1740	u8			port;
1741
1742	bool			integrity_en;
1743	/*
1744	 * Implementation details of the RDMA core, don't use in drivers:
1745	 */
1746	struct rdma_restrack_entry     res;
1747
1748	/* The counter the qp is bind to */
1749	struct rdma_counter    *counter;
1750};
1751
1752struct ib_dm {
1753	struct ib_device  *device;
1754	u32		   length;
1755	u32		   flags;
1756	struct ib_uobject *uobject;
1757	atomic_t	   usecnt;
1758};
1759
1760struct ib_mr {
1761	struct ib_device  *device;
1762	struct ib_pd	  *pd;
 
1763	u32		   lkey;
1764	u32		   rkey;
1765	u64		   iova;
1766	u64		   length;
1767	unsigned int	   page_size;
1768	enum ib_mr_type	   type;
1769	bool		   need_inval;
1770	union {
1771		struct ib_uobject	*uobject;	/* user */
1772		struct list_head	qp_entry;	/* FR */
1773	};
1774
1775	struct ib_dm      *dm;
1776	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1777	/*
1778	 * Implementation details of the RDMA core, don't use in drivers:
1779	 */
1780	struct rdma_restrack_entry res;
1781};
1782
1783struct ib_mw {
1784	struct ib_device	*device;
1785	struct ib_pd		*pd;
1786	struct ib_uobject	*uobject;
1787	u32			rkey;
1788	enum ib_mw_type         type;
1789};
1790
1791struct ib_fmr {
1792	struct ib_device	*device;
1793	struct ib_pd		*pd;
1794	struct list_head	list;
1795	u32			lkey;
1796	u32			rkey;
1797};
1798
1799/* Supported steering options */
1800enum ib_flow_attr_type {
1801	/* steering according to rule specifications */
1802	IB_FLOW_ATTR_NORMAL		= 0x0,
1803	/* default unicast and multicast rule -
1804	 * receive all Eth traffic which isn't steered to any QP
1805	 */
1806	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1807	/* default multicast rule -
1808	 * receive all Eth multicast traffic which isn't steered to any QP
1809	 */
1810	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1811	/* sniffer rule - receive all port traffic */
1812	IB_FLOW_ATTR_SNIFFER		= 0x3
1813};
1814
1815/* Supported steering header types */
1816enum ib_flow_spec_type {
1817	/* L2 headers*/
1818	IB_FLOW_SPEC_ETH		= 0x20,
1819	IB_FLOW_SPEC_IB			= 0x22,
1820	/* L3 header*/
1821	IB_FLOW_SPEC_IPV4		= 0x30,
1822	IB_FLOW_SPEC_IPV6		= 0x31,
1823	IB_FLOW_SPEC_ESP                = 0x34,
1824	/* L4 headers*/
1825	IB_FLOW_SPEC_TCP		= 0x40,
1826	IB_FLOW_SPEC_UDP		= 0x41,
1827	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1828	IB_FLOW_SPEC_GRE		= 0x51,
1829	IB_FLOW_SPEC_MPLS		= 0x60,
1830	IB_FLOW_SPEC_INNER		= 0x100,
1831	/* Actions */
1832	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1833	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1834	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1835	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1836};
1837#define IB_FLOW_SPEC_LAYER_MASK	0xF0
1838#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1839
1840/* Flow steering rule priority is set according to it's domain.
1841 * Lower domain value means higher priority.
1842 */
1843enum ib_flow_domain {
1844	IB_FLOW_DOMAIN_USER,
1845	IB_FLOW_DOMAIN_ETHTOOL,
1846	IB_FLOW_DOMAIN_RFS,
1847	IB_FLOW_DOMAIN_NIC,
1848	IB_FLOW_DOMAIN_NUM /* Must be last */
1849};
1850
1851enum ib_flow_flags {
1852	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1853	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1854	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1855};
1856
1857struct ib_flow_eth_filter {
1858	u8	dst_mac[6];
1859	u8	src_mac[6];
1860	__be16	ether_type;
1861	__be16	vlan_tag;
1862	/* Must be last */
1863	u8	real_sz[0];
1864};
1865
1866struct ib_flow_spec_eth {
1867	u32			  type;
1868	u16			  size;
1869	struct ib_flow_eth_filter val;
1870	struct ib_flow_eth_filter mask;
1871};
1872
1873struct ib_flow_ib_filter {
1874	__be16 dlid;
1875	__u8   sl;
1876	/* Must be last */
1877	u8	real_sz[0];
1878};
1879
1880struct ib_flow_spec_ib {
1881	u32			 type;
1882	u16			 size;
1883	struct ib_flow_ib_filter val;
1884	struct ib_flow_ib_filter mask;
1885};
1886
1887/* IPv4 header flags */
1888enum ib_ipv4_flags {
1889	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1890	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1891				    last have this flag set */
1892};
1893
1894struct ib_flow_ipv4_filter {
1895	__be32	src_ip;
1896	__be32	dst_ip;
1897	u8	proto;
1898	u8	tos;
1899	u8	ttl;
1900	u8	flags;
1901	/* Must be last */
1902	u8	real_sz[0];
1903};
1904
1905struct ib_flow_spec_ipv4 {
1906	u32			   type;
1907	u16			   size;
1908	struct ib_flow_ipv4_filter val;
1909	struct ib_flow_ipv4_filter mask;
1910};
1911
1912struct ib_flow_ipv6_filter {
1913	u8	src_ip[16];
1914	u8	dst_ip[16];
1915	__be32	flow_label;
1916	u8	next_hdr;
1917	u8	traffic_class;
1918	u8	hop_limit;
1919	/* Must be last */
1920	u8	real_sz[0];
1921};
1922
1923struct ib_flow_spec_ipv6 {
1924	u32			   type;
1925	u16			   size;
1926	struct ib_flow_ipv6_filter val;
1927	struct ib_flow_ipv6_filter mask;
1928};
1929
1930struct ib_flow_tcp_udp_filter {
1931	__be16	dst_port;
1932	__be16	src_port;
1933	/* Must be last */
1934	u8	real_sz[0];
1935};
1936
1937struct ib_flow_spec_tcp_udp {
1938	u32			      type;
1939	u16			      size;
1940	struct ib_flow_tcp_udp_filter val;
1941	struct ib_flow_tcp_udp_filter mask;
1942};
1943
1944struct ib_flow_tunnel_filter {
1945	__be32	tunnel_id;
1946	u8	real_sz[0];
1947};
1948
1949/* ib_flow_spec_tunnel describes the Vxlan tunnel
1950 * the tunnel_id from val has the vni value
1951 */
1952struct ib_flow_spec_tunnel {
1953	u32			      type;
1954	u16			      size;
1955	struct ib_flow_tunnel_filter  val;
1956	struct ib_flow_tunnel_filter  mask;
1957};
1958
1959struct ib_flow_esp_filter {
1960	__be32	spi;
1961	__be32  seq;
1962	/* Must be last */
1963	u8	real_sz[0];
1964};
1965
1966struct ib_flow_spec_esp {
1967	u32                           type;
1968	u16			      size;
1969	struct ib_flow_esp_filter     val;
1970	struct ib_flow_esp_filter     mask;
1971};
1972
1973struct ib_flow_gre_filter {
1974	__be16 c_ks_res0_ver;
1975	__be16 protocol;
1976	__be32 key;
1977	/* Must be last */
1978	u8	real_sz[0];
1979};
1980
1981struct ib_flow_spec_gre {
1982	u32                           type;
1983	u16			      size;
1984	struct ib_flow_gre_filter     val;
1985	struct ib_flow_gre_filter     mask;
1986};
1987
1988struct ib_flow_mpls_filter {
1989	__be32 tag;
1990	/* Must be last */
1991	u8	real_sz[0];
1992};
1993
1994struct ib_flow_spec_mpls {
1995	u32                           type;
1996	u16			      size;
1997	struct ib_flow_mpls_filter     val;
1998	struct ib_flow_mpls_filter     mask;
1999};
2000
2001struct ib_flow_spec_action_tag {
2002	enum ib_flow_spec_type	      type;
2003	u16			      size;
2004	u32                           tag_id;
2005};
2006
2007struct ib_flow_spec_action_drop {
2008	enum ib_flow_spec_type	      type;
2009	u16			      size;
2010};
2011
2012struct ib_flow_spec_action_handle {
2013	enum ib_flow_spec_type	      type;
2014	u16			      size;
2015	struct ib_flow_action	     *act;
2016};
2017
2018enum ib_counters_description {
2019	IB_COUNTER_PACKETS,
2020	IB_COUNTER_BYTES,
2021};
2022
2023struct ib_flow_spec_action_count {
2024	enum ib_flow_spec_type type;
2025	u16 size;
2026	struct ib_counters *counters;
2027};
2028
2029union ib_flow_spec {
2030	struct {
2031		u32			type;
2032		u16			size;
2033	};
2034	struct ib_flow_spec_eth		eth;
2035	struct ib_flow_spec_ib		ib;
2036	struct ib_flow_spec_ipv4        ipv4;
2037	struct ib_flow_spec_tcp_udp	tcp_udp;
2038	struct ib_flow_spec_ipv6        ipv6;
2039	struct ib_flow_spec_tunnel      tunnel;
2040	struct ib_flow_spec_esp		esp;
2041	struct ib_flow_spec_gre		gre;
2042	struct ib_flow_spec_mpls	mpls;
2043	struct ib_flow_spec_action_tag  flow_tag;
2044	struct ib_flow_spec_action_drop drop;
2045	struct ib_flow_spec_action_handle action;
2046	struct ib_flow_spec_action_count flow_count;
2047};
2048
2049struct ib_flow_attr {
2050	enum ib_flow_attr_type type;
2051	u16	     size;
2052	u16	     priority;
2053	u32	     flags;
2054	u8	     num_of_specs;
2055	u8	     port;
2056	union ib_flow_spec flows[];
2057};
2058
2059struct ib_flow {
2060	struct ib_qp		*qp;
2061	struct ib_device	*device;
2062	struct ib_uobject	*uobject;
2063};
2064
2065enum ib_flow_action_type {
2066	IB_FLOW_ACTION_UNSPECIFIED,
2067	IB_FLOW_ACTION_ESP = 1,
2068};
2069
2070struct ib_flow_action_attrs_esp_keymats {
2071	enum ib_uverbs_flow_action_esp_keymat			protocol;
2072	union {
2073		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2074	} keymat;
2075};
2076
2077struct ib_flow_action_attrs_esp_replays {
2078	enum ib_uverbs_flow_action_esp_replay			protocol;
2079	union {
2080		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2081	} replay;
2082};
2083
2084enum ib_flow_action_attrs_esp_flags {
2085	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2086	 * This is done in order to share the same flags between user-space and
2087	 * kernel and spare an unnecessary translation.
2088	 */
2089
2090	/* Kernel flags */
2091	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2092	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2093};
2094
2095struct ib_flow_spec_list {
2096	struct ib_flow_spec_list	*next;
2097	union ib_flow_spec		spec;
2098};
2099
2100struct ib_flow_action_attrs_esp {
2101	struct ib_flow_action_attrs_esp_keymats		*keymat;
2102	struct ib_flow_action_attrs_esp_replays		*replay;
2103	struct ib_flow_spec_list			*encap;
2104	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2105	 * Value of 0 is a valid value.
2106	 */
2107	u32						esn;
2108	u32						spi;
2109	u32						seq;
2110	u32						tfc_pad;
2111	/* Use enum ib_flow_action_attrs_esp_flags */
2112	u64						flags;
2113	u64						hard_limit_pkts;
2114};
2115
2116struct ib_flow_action {
2117	struct ib_device		*device;
2118	struct ib_uobject		*uobject;
2119	enum ib_flow_action_type	type;
2120	atomic_t			usecnt;
2121};
2122
2123struct ib_mad_hdr;
2124struct ib_grh;
2125
2126enum ib_process_mad_flags {
2127	IB_MAD_IGNORE_MKEY	= 1,
2128	IB_MAD_IGNORE_BKEY	= 2,
2129	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2130};
2131
2132enum ib_mad_result {
2133	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2134	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2135	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2136	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2137};
2138
2139struct ib_port_cache {
2140	u64		      subnet_prefix;
2141	struct ib_pkey_cache  *pkey;
2142	struct ib_gid_table   *gid;
2143	u8                     lmc;
2144	enum ib_port_state     port_state;
2145};
2146
2147struct ib_cache {
2148	rwlock_t                lock;
2149	struct ib_event_handler event_handler;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2150};
2151
2152struct ib_port_immutable {
2153	int                           pkey_tbl_len;
2154	int                           gid_tbl_len;
2155	u32                           core_cap_flags;
2156	u32                           max_mad_size;
2157};
2158
2159struct ib_port_data {
2160	struct ib_device *ib_dev;
2161
2162	struct ib_port_immutable immutable;
2163
2164	spinlock_t pkey_list_lock;
2165	struct list_head pkey_list;
2166
2167	struct ib_port_cache cache;
2168
2169	spinlock_t netdev_lock;
2170	struct net_device __rcu *netdev;
2171	struct hlist_node ndev_hash_link;
2172	struct rdma_port_counter port_counter;
2173	struct rdma_hw_stats *hw_stats;
2174};
2175
2176/* rdma netdev type - specifies protocol type */
2177enum rdma_netdev_t {
2178	RDMA_NETDEV_OPA_VNIC,
2179	RDMA_NETDEV_IPOIB,
2180};
2181
2182/**
2183 * struct rdma_netdev - rdma netdev
2184 * For cases where netstack interfacing is required.
2185 */
2186struct rdma_netdev {
2187	void              *clnt_priv;
2188	struct ib_device  *hca;
2189	u8                 port_num;
2190
2191	/*
2192	 * cleanup function must be specified.
2193	 * FIXME: This is only used for OPA_VNIC and that usage should be
2194	 * removed too.
2195	 */
2196	void (*free_rdma_netdev)(struct net_device *netdev);
2197
2198	/* control functions */
2199	void (*set_id)(struct net_device *netdev, int id);
2200	/* send packet */
2201	int (*send)(struct net_device *dev, struct sk_buff *skb,
2202		    struct ib_ah *address, u32 dqpn);
2203	/* multicast */
2204	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2205			    union ib_gid *gid, u16 mlid,
2206			    int set_qkey, u32 qkey);
2207	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2208			    union ib_gid *gid, u16 mlid);
2209};
2210
2211struct rdma_netdev_alloc_params {
2212	size_t sizeof_priv;
2213	unsigned int txqs;
2214	unsigned int rxqs;
2215	void *param;
2216
2217	int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2218				      struct net_device *netdev, void *param);
2219};
2220
2221struct ib_counters {
2222	struct ib_device	*device;
2223	struct ib_uobject	*uobject;
2224	/* num of objects attached */
2225	atomic_t	usecnt;
2226};
2227
2228struct ib_counters_read_attr {
2229	u64	*counters_buff;
2230	u32	ncounters;
2231	u32	flags; /* use enum ib_read_counters_flags */
2232};
2233
2234struct uverbs_attr_bundle;
2235struct iw_cm_id;
2236struct iw_cm_conn_param;
2237
2238#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2239	.size_##ib_struct =                                                    \
2240		(sizeof(struct drv_struct) +                                   \
2241		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2242		 BUILD_BUG_ON_ZERO(                                            \
2243			 !__same_type(((struct drv_struct *)NULL)->member,     \
2244				      struct ib_struct)))
2245
2246#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
2247	((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2248
2249#define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2250	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2251
2252#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2253
2254/**
2255 * struct ib_device_ops - InfiniBand device operations
2256 * This structure defines all the InfiniBand device operations, providers will
2257 * need to define the supported operations, otherwise they will be set to null.
2258 */
2259struct ib_device_ops {
2260	struct module *owner;
2261	enum rdma_driver_id driver_id;
2262	u32 uverbs_abi_ver;
2263	unsigned int uverbs_no_driver_id_binding:1;
2264
2265	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2266			 const struct ib_send_wr **bad_send_wr);
2267	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2268			 const struct ib_recv_wr **bad_recv_wr);
2269	void (*drain_rq)(struct ib_qp *qp);
2270	void (*drain_sq)(struct ib_qp *qp);
2271	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2272	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2273	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2274	int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2275	int (*post_srq_recv)(struct ib_srq *srq,
2276			     const struct ib_recv_wr *recv_wr,
2277			     const struct ib_recv_wr **bad_recv_wr);
2278	int (*process_mad)(struct ib_device *device, int process_mad_flags,
2279			   u8 port_num, const struct ib_wc *in_wc,
2280			   const struct ib_grh *in_grh,
2281			   const struct ib_mad_hdr *in_mad, size_t in_mad_size,
2282			   struct ib_mad_hdr *out_mad, size_t *out_mad_size,
2283			   u16 *out_mad_pkey_index);
2284	int (*query_device)(struct ib_device *device,
2285			    struct ib_device_attr *device_attr,
2286			    struct ib_udata *udata);
2287	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2288			     struct ib_device_modify *device_modify);
2289	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2290	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2291						     int comp_vector);
2292	int (*query_port)(struct ib_device *device, u8 port_num,
2293			  struct ib_port_attr *port_attr);
2294	int (*modify_port)(struct ib_device *device, u8 port_num,
2295			   int port_modify_mask,
2296			   struct ib_port_modify *port_modify);
2297	/**
2298	 * The following mandatory functions are used only at device
2299	 * registration.  Keep functions such as these at the end of this
2300	 * structure to avoid cache line misses when accessing struct ib_device
2301	 * in fast paths.
2302	 */
2303	int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2304				  struct ib_port_immutable *immutable);
2305	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2306					       u8 port_num);
2307	/**
2308	 * When calling get_netdev, the HW vendor's driver should return the
2309	 * net device of device @device at port @port_num or NULL if such
2310	 * a net device doesn't exist. The vendor driver should call dev_hold
2311	 * on this net device. The HW vendor's device driver must guarantee
2312	 * that this function returns NULL before the net device has finished
2313	 * NETDEV_UNREGISTER state.
2314	 */
2315	struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2316	/**
2317	 * rdma netdev operation
2318	 *
2319	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2320	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2321	 */
2322	struct net_device *(*alloc_rdma_netdev)(
2323		struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2324		const char *name, unsigned char name_assign_type,
2325		void (*setup)(struct net_device *));
2326
2327	int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2328				      enum rdma_netdev_t type,
2329				      struct rdma_netdev_alloc_params *params);
2330	/**
2331	 * query_gid should be return GID value for @device, when @port_num
2332	 * link layer is either IB or iWarp. It is no-op if @port_num port
2333	 * is RoCE link layer.
2334	 */
2335	int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2336			 union ib_gid *gid);
2337	/**
2338	 * When calling add_gid, the HW vendor's driver should add the gid
2339	 * of device of port at gid index available at @attr. Meta-info of
2340	 * that gid (for example, the network device related to this gid) is
2341	 * available at @attr. @context allows the HW vendor driver to store
2342	 * extra information together with a GID entry. The HW vendor driver may
2343	 * allocate memory to contain this information and store it in @context
2344	 * when a new GID entry is written to. Params are consistent until the
2345	 * next call of add_gid or delete_gid. The function should return 0 on
2346	 * success or error otherwise. The function could be called
2347	 * concurrently for different ports. This function is only called when
2348	 * roce_gid_table is used.
2349	 */
2350	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2351	/**
2352	 * When calling del_gid, the HW vendor's driver should delete the
2353	 * gid of device @device at gid index gid_index of port port_num
2354	 * available in @attr.
2355	 * Upon the deletion of a GID entry, the HW vendor must free any
2356	 * allocated memory. The caller will clear @context afterwards.
2357	 * This function is only called when roce_gid_table is used.
2358	 */
2359	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2360	int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2361			  u16 *pkey);
2362	int (*alloc_ucontext)(struct ib_ucontext *context,
2363			      struct ib_udata *udata);
2364	void (*dealloc_ucontext)(struct ib_ucontext *context);
2365	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2366	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2367	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2368	void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2369	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2370			 u32 flags, struct ib_udata *udata);
2371	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2372	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2373	void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2374	int (*create_srq)(struct ib_srq *srq,
2375			  struct ib_srq_init_attr *srq_init_attr,
2376			  struct ib_udata *udata);
2377	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2378			  enum ib_srq_attr_mask srq_attr_mask,
2379			  struct ib_udata *udata);
2380	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2381	void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2382	struct ib_qp *(*create_qp)(struct ib_pd *pd,
2383				   struct ib_qp_init_attr *qp_init_attr,
2384				   struct ib_udata *udata);
2385	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2386			 int qp_attr_mask, struct ib_udata *udata);
2387	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2388			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2389	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2390	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2391			 struct ib_udata *udata);
2392	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2393	void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2394	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2395	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2396	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2397				     u64 virt_addr, int mr_access_flags,
2398				     struct ib_udata *udata);
2399	int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2400			     u64 virt_addr, int mr_access_flags,
2401			     struct ib_pd *pd, struct ib_udata *udata);
2402	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2403	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2404				  u32 max_num_sg, struct ib_udata *udata);
2405	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2406					    u32 max_num_data_sg,
2407					    u32 max_num_meta_sg);
2408	int (*advise_mr)(struct ib_pd *pd,
2409			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2410			 struct ib_sge *sg_list, u32 num_sge,
2411			 struct uverbs_attr_bundle *attrs);
2412	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2413			 unsigned int *sg_offset);
2414	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2415			       struct ib_mr_status *mr_status);
2416	struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2417				  struct ib_udata *udata);
2418	int (*dealloc_mw)(struct ib_mw *mw);
2419	struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2420				    struct ib_fmr_attr *fmr_attr);
2421	int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2422			    u64 iova);
2423	int (*unmap_fmr)(struct list_head *fmr_list);
2424	int (*dealloc_fmr)(struct ib_fmr *fmr);
2425	void (*invalidate_range)(struct ib_umem_odp *umem_odp,
2426				 unsigned long start, unsigned long end);
2427	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2428	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2429	struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2430				      struct ib_udata *udata);
2431	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2432	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2433				       struct ib_flow_attr *flow_attr,
2434				       int domain, struct ib_udata *udata);
2435	int (*destroy_flow)(struct ib_flow *flow_id);
2436	struct ib_flow_action *(*create_flow_action_esp)(
2437		struct ib_device *device,
2438		const struct ib_flow_action_attrs_esp *attr,
2439		struct uverbs_attr_bundle *attrs);
2440	int (*destroy_flow_action)(struct ib_flow_action *action);
2441	int (*modify_flow_action_esp)(
2442		struct ib_flow_action *action,
2443		const struct ib_flow_action_attrs_esp *attr,
2444		struct uverbs_attr_bundle *attrs);
2445	int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2446				 int state);
2447	int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2448			     struct ifla_vf_info *ivf);
2449	int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2450			    struct ifla_vf_stats *stats);
2451	int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2452			   int type);
2453	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2454				   struct ib_wq_init_attr *init_attr,
2455				   struct ib_udata *udata);
2456	void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2457	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2458			 u32 wq_attr_mask, struct ib_udata *udata);
2459	struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2460		struct ib_device *device,
2461		struct ib_rwq_ind_table_init_attr *init_attr,
2462		struct ib_udata *udata);
2463	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2464	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2465				  struct ib_ucontext *context,
2466				  struct ib_dm_alloc_attr *attr,
2467				  struct uverbs_attr_bundle *attrs);
2468	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2469	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2470				   struct ib_dm_mr_attr *attr,
2471				   struct uverbs_attr_bundle *attrs);
2472	struct ib_counters *(*create_counters)(
2473		struct ib_device *device, struct uverbs_attr_bundle *attrs);
2474	int (*destroy_counters)(struct ib_counters *counters);
2475	int (*read_counters)(struct ib_counters *counters,
2476			     struct ib_counters_read_attr *counters_read_attr,
2477			     struct uverbs_attr_bundle *attrs);
2478	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2479			    int data_sg_nents, unsigned int *data_sg_offset,
2480			    struct scatterlist *meta_sg, int meta_sg_nents,
2481			    unsigned int *meta_sg_offset);
2482
2483	/**
2484	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2485	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
2486	 *   core when the device is removed.  A lifespan of -1 in the return
2487	 *   struct tells the core to set a default lifespan.
2488	 */
2489	struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2490						u8 port_num);
2491	/**
2492	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2493	 * @index - The index in the value array we wish to have updated, or
2494	 *   num_counters if we want all stats updated
2495	 * Return codes -
2496	 *   < 0 - Error, no counters updated
2497	 *   index - Updated the single counter pointed to by index
2498	 *   num_counters - Updated all counters (will reset the timestamp
2499	 *     and prevent further calls for lifespan milliseconds)
2500	 * Drivers are allowed to update all counters in leiu of just the
2501	 *   one given in index at their option
2502	 */
2503	int (*get_hw_stats)(struct ib_device *device,
2504			    struct rdma_hw_stats *stats, u8 port, int index);
2505	/*
2506	 * This function is called once for each port when a ib device is
2507	 * registered.
2508	 */
2509	int (*init_port)(struct ib_device *device, u8 port_num,
2510			 struct kobject *port_sysfs);
2511	/**
2512	 * Allows rdma drivers to add their own restrack attributes.
2513	 */
2514	int (*fill_res_entry)(struct sk_buff *msg,
2515			      struct rdma_restrack_entry *entry);
2516
2517	/* Device lifecycle callbacks */
2518	/*
2519	 * Called after the device becomes registered, before clients are
2520	 * attached
2521	 */
2522	int (*enable_driver)(struct ib_device *dev);
2523	/*
2524	 * This is called as part of ib_dealloc_device().
2525	 */
2526	void (*dealloc_driver)(struct ib_device *dev);
2527
2528	/* iWarp CM callbacks */
2529	void (*iw_add_ref)(struct ib_qp *qp);
2530	void (*iw_rem_ref)(struct ib_qp *qp);
2531	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2532	int (*iw_connect)(struct iw_cm_id *cm_id,
2533			  struct iw_cm_conn_param *conn_param);
2534	int (*iw_accept)(struct iw_cm_id *cm_id,
2535			 struct iw_cm_conn_param *conn_param);
2536	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2537			 u8 pdata_len);
2538	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2539	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2540	/**
2541	 * counter_bind_qp - Bind a QP to a counter.
2542	 * @counter - The counter to be bound. If counter->id is zero then
2543	 *   the driver needs to allocate a new counter and set counter->id
2544	 */
2545	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2546	/**
2547	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2548	 *   counter and bind it onto the default one
2549	 */
2550	int (*counter_unbind_qp)(struct ib_qp *qp);
2551	/**
2552	 * counter_dealloc -De-allocate the hw counter
2553	 */
2554	int (*counter_dealloc)(struct rdma_counter *counter);
2555	/**
2556	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2557	 * the driver initialized data.
2558	 */
2559	struct rdma_hw_stats *(*counter_alloc_stats)(
2560		struct rdma_counter *counter);
2561	/**
2562	 * counter_update_stats - Query the stats value of this counter
2563	 */
2564	int (*counter_update_stats)(struct rdma_counter *counter);
2565
2566	DECLARE_RDMA_OBJ_SIZE(ib_ah);
2567	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2568	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2569	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2570	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2571};
2572
2573struct ib_core_device {
2574	/* device must be the first element in structure until,
2575	 * union of ib_core_device and device exists in ib_device.
2576	 */
2577	struct device dev;
2578	possible_net_t rdma_net;
2579	struct kobject *ports_kobj;
2580	struct list_head port_list;
2581	struct ib_device *owner; /* reach back to owner ib_device */
2582};
2583
2584struct rdma_restrack_root;
2585struct ib_device {
2586	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2587	struct device                *dma_device;
2588	struct ib_device_ops	     ops;
2589	char                          name[IB_DEVICE_NAME_MAX];
2590	struct rcu_head rcu_head;
2591
2592	struct list_head              event_handler_list;
2593	spinlock_t                    event_handler_lock;
2594
2595	struct rw_semaphore	      client_data_rwsem;
2596	struct xarray                 client_data;
2597	struct mutex                  unregistration_lock;
2598
2599	struct ib_cache               cache;
2600	/**
2601	 * port_data is indexed by port number
2602	 */
2603	struct ib_port_data *port_data;
2604
2605	int			      num_comp_vectors;
2606
2607	union {
2608		struct device		dev;
2609		struct ib_core_device	coredev;
2610	};
2611
2612	/* First group for device attributes,
2613	 * Second group for driver provided attributes (optional).
2614	 * It is NULL terminated array.
2615	 */
2616	const struct attribute_group	*groups[3];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2617
 
2618	u64			     uverbs_cmd_mask;
2619	u64			     uverbs_ex_cmd_mask;
2620
2621	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2622	__be64			     node_guid;
2623	u32			     local_dma_lkey;
2624	u16                          is_switch:1;
2625	/* Indicates kernel verbs support, should not be used in drivers */
2626	u16                          kverbs_provider:1;
2627	/* CQ adaptive moderation (RDMA DIM) */
2628	u16                          use_cq_dim:1;
2629	u8                           node_type;
2630	u8                           phys_port_cnt;
2631	struct ib_device_attr        attrs;
2632	struct attribute_group	     *hw_stats_ag;
2633	struct rdma_hw_stats         *hw_stats;
2634
2635#ifdef CONFIG_CGROUP_RDMA
2636	struct rdmacg_device         cg_device;
2637#endif
2638
2639	u32                          index;
2640	struct rdma_restrack_root *res;
2641
2642	const struct uapi_definition   *driver_def;
2643
2644	/*
2645	 * Positive refcount indicates that the device is currently
2646	 * registered and cannot be unregistered.
2647	 */
2648	refcount_t refcount;
2649	struct completion unreg_completion;
2650	struct work_struct unregistration_work;
2651
2652	const struct rdma_link_ops *link_ops;
2653
2654	/* Protects compat_devs xarray modifications */
2655	struct mutex compat_devs_mutex;
2656	/* Maintains compat devices for each net namespace */
2657	struct xarray compat_devs;
2658
2659	/* Used by iWarp CM */
2660	char iw_ifname[IFNAMSIZ];
2661	u32 iw_driver_flags;
2662};
2663
2664struct ib_client_nl_info;
2665struct ib_client {
2666	const char *name;
2667	void (*add)   (struct ib_device *);
2668	void (*remove)(struct ib_device *, void *client_data);
2669	void (*rename)(struct ib_device *dev, void *client_data);
2670	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2671			   struct ib_client_nl_info *res);
2672	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2673
2674	/* Returns the net_dev belonging to this ib_client and matching the
2675	 * given parameters.
2676	 * @dev:	 An RDMA device that the net_dev use for communication.
2677	 * @port:	 A physical port number on the RDMA device.
2678	 * @pkey:	 P_Key that the net_dev uses if applicable.
2679	 * @gid:	 A GID that the net_dev uses to communicate.
2680	 * @addr:	 An IP address the net_dev is configured with.
2681	 * @client_data: The device's client data set by ib_set_client_data().
2682	 *
2683	 * An ib_client that implements a net_dev on top of RDMA devices
2684	 * (such as IP over IB) should implement this callback, allowing the
2685	 * rdma_cm module to find the right net_dev for a given request.
2686	 *
2687	 * The caller is responsible for calling dev_put on the returned
2688	 * netdev. */
2689	struct net_device *(*get_net_dev_by_params)(
2690			struct ib_device *dev,
2691			u8 port,
2692			u16 pkey,
2693			const union ib_gid *gid,
2694			const struct sockaddr *addr,
2695			void *client_data);
2696
2697	refcount_t uses;
2698	struct completion uses_zero;
2699	u32 client_id;
2700
2701	/* kverbs are not required by the client */
2702	u8 no_kverbs_req:1;
2703};
2704
2705/*
2706 * IB block DMA iterator
2707 *
2708 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2709 * to a HW supported page size.
2710 */
2711struct ib_block_iter {
2712	/* internal states */
2713	struct scatterlist *__sg;	/* sg holding the current aligned block */
2714	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2715	unsigned int __sg_nents;	/* number of SG entries */
2716	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2717	unsigned int __pg_bit;		/* alignment of current block */
2718};
2719
2720struct ib_device *_ib_alloc_device(size_t size);
2721#define ib_alloc_device(drv_struct, member)                                    \
2722	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2723				      BUILD_BUG_ON_ZERO(offsetof(              \
2724					      struct drv_struct, member))),    \
2725		     struct drv_struct, member)
2726
2727void ib_dealloc_device(struct ib_device *device);
2728
2729void ib_get_device_fw_str(struct ib_device *device, char *str);
2730
2731int ib_register_device(struct ib_device *device, const char *name);
2732void ib_unregister_device(struct ib_device *device);
2733void ib_unregister_driver(enum rdma_driver_id driver_id);
2734void ib_unregister_device_and_put(struct ib_device *device);
2735void ib_unregister_device_queued(struct ib_device *ib_dev);
2736
2737int ib_register_client   (struct ib_client *client);
2738void ib_unregister_client(struct ib_client *client);
2739
2740void __rdma_block_iter_start(struct ib_block_iter *biter,
2741			     struct scatterlist *sglist,
2742			     unsigned int nents,
2743			     unsigned long pgsz);
2744bool __rdma_block_iter_next(struct ib_block_iter *biter);
2745
2746/**
2747 * rdma_block_iter_dma_address - get the aligned dma address of the current
2748 * block held by the block iterator.
2749 * @biter: block iterator holding the memory block
2750 */
2751static inline dma_addr_t
2752rdma_block_iter_dma_address(struct ib_block_iter *biter)
2753{
2754	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2755}
2756
2757/**
2758 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2759 * @sglist: sglist to iterate over
2760 * @biter: block iterator holding the memory block
2761 * @nents: maximum number of sg entries to iterate over
2762 * @pgsz: best HW supported page size to use
2763 *
2764 * Callers may use rdma_block_iter_dma_address() to get each
2765 * blocks aligned DMA address.
2766 */
2767#define rdma_for_each_block(sglist, biter, nents, pgsz)		\
2768	for (__rdma_block_iter_start(biter, sglist, nents,	\
2769				     pgsz);			\
2770	     __rdma_block_iter_next(biter);)
2771
2772/**
2773 * ib_get_client_data - Get IB client context
2774 * @device:Device to get context for
2775 * @client:Client to get context for
2776 *
2777 * ib_get_client_data() returns the client context data set with
2778 * ib_set_client_data(). This can only be called while the client is
2779 * registered to the device, once the ib_client remove() callback returns this
2780 * cannot be called.
2781 */
2782static inline void *ib_get_client_data(struct ib_device *device,
2783				       struct ib_client *client)
2784{
2785	return xa_load(&device->client_data, client->client_id);
2786}
2787void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2788			 void *data);
2789void ib_set_device_ops(struct ib_device *device,
2790		       const struct ib_device_ops *ops);
2791
2792#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2793int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2794		      unsigned long pfn, unsigned long size, pgprot_t prot);
2795#else
2796static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2797				    struct vm_area_struct *vma,
2798				    unsigned long pfn, unsigned long size,
2799				    pgprot_t prot)
2800{
2801	return -EINVAL;
2802}
2803#endif
2804
2805static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2806{
2807	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2808}
2809
2810static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2811{
2812	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2813}
2814
2815static inline bool ib_is_buffer_cleared(const void __user *p,
2816					size_t len)
2817{
2818	bool ret;
2819	u8 *buf;
2820
2821	if (len > USHRT_MAX)
2822		return false;
2823
2824	buf = memdup_user(p, len);
2825	if (IS_ERR(buf))
2826		return false;
2827
2828	ret = !memchr_inv(buf, 0, len);
2829	kfree(buf);
2830	return ret;
2831}
2832
2833static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2834				       size_t offset,
2835				       size_t len)
2836{
2837	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2838}
2839
2840/**
2841 * ib_is_destroy_retryable - Check whether the uobject destruction
2842 * is retryable.
2843 * @ret: The initial destruction return code
2844 * @why: remove reason
2845 * @uobj: The uobject that is destroyed
2846 *
2847 * This function is a helper function that IB layer and low-level drivers
2848 * can use to consider whether the destruction of the given uobject is
2849 * retry-able.
2850 * It checks the original return code, if it wasn't success the destruction
2851 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2852 * the remove reason. (i.e. why).
2853 * Must be called with the object locked for destroy.
2854 */
2855static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2856					   struct ib_uobject *uobj)
2857{
2858	return ret && (why == RDMA_REMOVE_DESTROY ||
2859		       uobj->context->cleanup_retryable);
2860}
2861
2862/**
2863 * ib_destroy_usecnt - Called during destruction to check the usecnt
2864 * @usecnt: The usecnt atomic
2865 * @why: remove reason
2866 * @uobj: The uobject that is destroyed
2867 *
2868 * Non-zero usecnts will block destruction unless destruction was triggered by
2869 * a ucontext cleanup.
2870 */
2871static inline int ib_destroy_usecnt(atomic_t *usecnt,
2872				    enum rdma_remove_reason why,
2873				    struct ib_uobject *uobj)
2874{
2875	if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2876		return -EBUSY;
2877	return 0;
2878}
2879
2880/**
2881 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2882 * contains all required attributes and no attributes not allowed for
2883 * the given QP state transition.
2884 * @cur_state: Current QP state
2885 * @next_state: Next QP state
2886 * @type: QP type
2887 * @mask: Mask of supplied QP attributes
2888 *
2889 * This function is a helper function that a low-level driver's
2890 * modify_qp method can use to validate the consumer's input.  It
2891 * checks that cur_state and next_state are valid QP states, that a
2892 * transition from cur_state to next_state is allowed by the IB spec,
2893 * and that the attribute mask supplied is allowed for the transition.
2894 */
2895bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2896			enum ib_qp_type type, enum ib_qp_attr_mask mask);
2897
2898void ib_register_event_handler(struct ib_event_handler *event_handler);
2899void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2900void ib_dispatch_event(struct ib_event *event);
2901
 
 
 
2902int ib_query_port(struct ib_device *device,
2903		  u8 port_num, struct ib_port_attr *port_attr);
2904
2905enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2906					       u8 port_num);
2907
2908/**
2909 * rdma_cap_ib_switch - Check if the device is IB switch
2910 * @device: Device to check
2911 *
2912 * Device driver is responsible for setting is_switch bit on
2913 * in ib_device structure at init time.
2914 *
2915 * Return: true if the device is IB switch.
2916 */
2917static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2918{
2919	return device->is_switch;
2920}
2921
2922/**
2923 * rdma_start_port - Return the first valid port number for the device
2924 * specified
2925 *
2926 * @device: Device to be checked
2927 *
2928 * Return start port number
2929 */
2930static inline u8 rdma_start_port(const struct ib_device *device)
2931{
2932	return rdma_cap_ib_switch(device) ? 0 : 1;
2933}
2934
2935/**
2936 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
2937 * @device - The struct ib_device * to iterate over
2938 * @iter - The unsigned int to store the port number
2939 */
2940#define rdma_for_each_port(device, iter)                                       \
2941	for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type(   \
2942						     unsigned int, iter)));    \
2943	     iter <= rdma_end_port(device); (iter)++)
2944
2945/**
2946 * rdma_end_port - Return the last valid port number for the device
2947 * specified
2948 *
2949 * @device: Device to be checked
2950 *
2951 * Return last port number
2952 */
2953static inline u8 rdma_end_port(const struct ib_device *device)
2954{
2955	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2956}
2957
2958static inline int rdma_is_port_valid(const struct ib_device *device,
2959				     unsigned int port)
2960{
2961	return (port >= rdma_start_port(device) &&
2962		port <= rdma_end_port(device));
2963}
2964
2965static inline bool rdma_is_grh_required(const struct ib_device *device,
2966					u8 port_num)
2967{
2968	return device->port_data[port_num].immutable.core_cap_flags &
2969	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
2970}
2971
2972static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2973{
2974	return device->port_data[port_num].immutable.core_cap_flags &
2975	       RDMA_CORE_CAP_PROT_IB;
2976}
2977
2978static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2979{
2980	return device->port_data[port_num].immutable.core_cap_flags &
2981	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2982}
2983
2984static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2985{
2986	return device->port_data[port_num].immutable.core_cap_flags &
2987	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2988}
2989
2990static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2991{
2992	return device->port_data[port_num].immutable.core_cap_flags &
2993	       RDMA_CORE_CAP_PROT_ROCE;
2994}
2995
2996static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2997{
2998	return device->port_data[port_num].immutable.core_cap_flags &
2999	       RDMA_CORE_CAP_PROT_IWARP;
3000}
3001
3002static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3003{
3004	return rdma_protocol_ib(device, port_num) ||
3005		rdma_protocol_roce(device, port_num);
3006}
3007
3008static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3009{
3010	return device->port_data[port_num].immutable.core_cap_flags &
3011	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3012}
3013
3014static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3015{
3016	return device->port_data[port_num].immutable.core_cap_flags &
3017	       RDMA_CORE_CAP_PROT_USNIC;
3018}
3019
3020/**
3021 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3022 * Management Datagrams.
3023 * @device: Device to check
3024 * @port_num: Port number to check
3025 *
3026 * Management Datagrams (MAD) are a required part of the InfiniBand
3027 * specification and are supported on all InfiniBand devices.  A slightly
3028 * extended version are also supported on OPA interfaces.
3029 *
3030 * Return: true if the port supports sending/receiving of MAD packets.
3031 */
3032static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3033{
3034	return device->port_data[port_num].immutable.core_cap_flags &
3035	       RDMA_CORE_CAP_IB_MAD;
3036}
3037
3038/**
3039 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3040 * Management Datagrams.
3041 * @device: Device to check
3042 * @port_num: Port number to check
3043 *
3044 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3045 * datagrams with their own versions.  These OPA MADs share many but not all of
3046 * the characteristics of InfiniBand MADs.
3047 *
3048 * OPA MADs differ in the following ways:
3049 *
3050 *    1) MADs are variable size up to 2K
3051 *       IBTA defined MADs remain fixed at 256 bytes
3052 *    2) OPA SMPs must carry valid PKeys
3053 *    3) OPA SMP packets are a different format
3054 *
3055 * Return: true if the port supports OPA MAD packet formats.
3056 */
3057static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3058{
3059	return device->port_data[port_num].immutable.core_cap_flags &
3060		RDMA_CORE_CAP_OPA_MAD;
3061}
3062
3063/**
3064 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3065 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3066 * @device: Device to check
3067 * @port_num: Port number to check
3068 *
3069 * Each InfiniBand node is required to provide a Subnet Management Agent
3070 * that the subnet manager can access.  Prior to the fabric being fully
3071 * configured by the subnet manager, the SMA is accessed via a well known
3072 * interface called the Subnet Management Interface (SMI).  This interface
3073 * uses directed route packets to communicate with the SM to get around the
3074 * chicken and egg problem of the SM needing to know what's on the fabric
3075 * in order to configure the fabric, and needing to configure the fabric in
3076 * order to send packets to the devices on the fabric.  These directed
3077 * route packets do not need the fabric fully configured in order to reach
3078 * their destination.  The SMI is the only method allowed to send
3079 * directed route packets on an InfiniBand fabric.
3080 *
3081 * Return: true if the port provides an SMI.
3082 */
3083static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3084{
3085	return device->port_data[port_num].immutable.core_cap_flags &
3086	       RDMA_CORE_CAP_IB_SMI;
3087}
3088
3089/**
3090 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3091 * Communication Manager.
3092 * @device: Device to check
3093 * @port_num: Port number to check
3094 *
3095 * The InfiniBand Communication Manager is one of many pre-defined General
3096 * Service Agents (GSA) that are accessed via the General Service
3097 * Interface (GSI).  It's role is to facilitate establishment of connections
3098 * between nodes as well as other management related tasks for established
3099 * connections.
3100 *
3101 * Return: true if the port supports an IB CM (this does not guarantee that
3102 * a CM is actually running however).
3103 */
3104static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3105{
3106	return device->port_data[port_num].immutable.core_cap_flags &
3107	       RDMA_CORE_CAP_IB_CM;
3108}
3109
3110/**
3111 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3112 * Communication Manager.
3113 * @device: Device to check
3114 * @port_num: Port number to check
3115 *
3116 * Similar to above, but specific to iWARP connections which have a different
3117 * managment protocol than InfiniBand.
3118 *
3119 * Return: true if the port supports an iWARP CM (this does not guarantee that
3120 * a CM is actually running however).
3121 */
3122static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3123{
3124	return device->port_data[port_num].immutable.core_cap_flags &
3125	       RDMA_CORE_CAP_IW_CM;
3126}
3127
3128/**
3129 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3130 * Subnet Administration.
3131 * @device: Device to check
3132 * @port_num: Port number to check
3133 *
3134 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3135 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3136 * fabrics, devices should resolve routes to other hosts by contacting the
3137 * SA to query the proper route.
3138 *
3139 * Return: true if the port should act as a client to the fabric Subnet
3140 * Administration interface.  This does not imply that the SA service is
3141 * running locally.
3142 */
3143static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3144{
3145	return device->port_data[port_num].immutable.core_cap_flags &
3146	       RDMA_CORE_CAP_IB_SA;
3147}
3148
3149/**
3150 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3151 * Multicast.
3152 * @device: Device to check
3153 * @port_num: Port number to check
3154 *
3155 * InfiniBand multicast registration is more complex than normal IPv4 or
3156 * IPv6 multicast registration.  Each Host Channel Adapter must register
3157 * with the Subnet Manager when it wishes to join a multicast group.  It
3158 * should do so only once regardless of how many queue pairs it subscribes
3159 * to this group.  And it should leave the group only after all queue pairs
3160 * attached to the group have been detached.
3161 *
3162 * Return: true if the port must undertake the additional adminstrative
3163 * overhead of registering/unregistering with the SM and tracking of the
3164 * total number of queue pairs attached to the multicast group.
3165 */
3166static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3167{
3168	return rdma_cap_ib_sa(device, port_num);
3169}
3170
3171/**
3172 * rdma_cap_af_ib - Check if the port of device has the capability
3173 * Native Infiniband Address.
3174 * @device: Device to check
3175 * @port_num: Port number to check
3176 *
3177 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3178 * GID.  RoCE uses a different mechanism, but still generates a GID via
3179 * a prescribed mechanism and port specific data.
3180 *
3181 * Return: true if the port uses a GID address to identify devices on the
3182 * network.
3183 */
3184static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3185{
3186	return device->port_data[port_num].immutable.core_cap_flags &
3187	       RDMA_CORE_CAP_AF_IB;
3188}
3189
3190/**
3191 * rdma_cap_eth_ah - Check if the port of device has the capability
3192 * Ethernet Address Handle.
3193 * @device: Device to check
3194 * @port_num: Port number to check
3195 *
3196 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3197 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3198 * port.  Normally, packet headers are generated by the sending host
3199 * adapter, but when sending connectionless datagrams, we must manually
3200 * inject the proper headers for the fabric we are communicating over.
3201 *
3202 * Return: true if we are running as a RoCE port and must force the
3203 * addition of a Global Route Header built from our Ethernet Address
3204 * Handle into our header list for connectionless packets.
3205 */
3206static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3207{
3208	return device->port_data[port_num].immutable.core_cap_flags &
3209	       RDMA_CORE_CAP_ETH_AH;
3210}
3211
3212/**
3213 * rdma_cap_opa_ah - Check if the port of device supports
3214 * OPA Address handles
3215 * @device: Device to check
3216 * @port_num: Port number to check
3217 *
3218 * Return: true if we are running on an OPA device which supports
3219 * the extended OPA addressing.
3220 */
3221static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3222{
3223	return (device->port_data[port_num].immutable.core_cap_flags &
3224		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3225}
3226
3227/**
3228 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3229 *
3230 * @device: Device
3231 * @port_num: Port number
3232 *
3233 * This MAD size includes the MAD headers and MAD payload.  No other headers
3234 * are included.
3235 *
3236 * Return the max MAD size required by the Port.  Will return 0 if the port
3237 * does not support MADs
3238 */
3239static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3240{
3241	return device->port_data[port_num].immutable.max_mad_size;
3242}
3243
3244/**
3245 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3246 * @device: Device to check
3247 * @port_num: Port number to check
3248 *
3249 * RoCE GID table mechanism manages the various GIDs for a device.
3250 *
3251 * NOTE: if allocating the port's GID table has failed, this call will still
3252 * return true, but any RoCE GID table API will fail.
3253 *
3254 * Return: true if the port uses RoCE GID table mechanism in order to manage
3255 * its GIDs.
3256 */
3257static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3258					   u8 port_num)
3259{
3260	return rdma_protocol_roce(device, port_num) &&
3261		device->ops.add_gid && device->ops.del_gid;
3262}
3263
3264/*
3265 * Check if the device supports READ W/ INVALIDATE.
3266 */
3267static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3268{
3269	/*
3270	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3271	 * has support for it yet.
3272	 */
3273	return rdma_protocol_iwarp(dev, port_num);
3274}
3275
3276/**
3277 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
3278 *
3279 * @addr: address
3280 * @pgsz_bitmap: bitmap of HW supported page sizes
3281 */
3282static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3283					    unsigned long pgsz_bitmap)
3284{
3285	unsigned long align;
3286	unsigned long pgsz;
3287
3288	align = addr & -addr;
3289
3290	/* Find page bit such that addr is aligned to the highest supported
3291	 * HW page size
3292	 */
3293	pgsz = pgsz_bitmap & ~(-align << 1);
3294	if (!pgsz)
3295		return __ffs(pgsz_bitmap);
3296
3297	return __fls(pgsz);
3298}
3299
3300int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3301			 int state);
3302int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3303		     struct ifla_vf_info *info);
3304int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3305		    struct ifla_vf_stats *stats);
3306int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3307		   int type);
3308
3309int ib_query_pkey(struct ib_device *device,
3310		  u8 port_num, u16 index, u16 *pkey);
3311
3312int ib_modify_device(struct ib_device *device,
3313		     int device_modify_mask,
3314		     struct ib_device_modify *device_modify);
3315
3316int ib_modify_port(struct ib_device *device,
3317		   u8 port_num, int port_modify_mask,
3318		   struct ib_port_modify *port_modify);
3319
3320int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3321		u8 *port_num, u16 *index);
3322
3323int ib_find_pkey(struct ib_device *device,
3324		 u8 port_num, u16 pkey, u16 *index);
3325
3326enum ib_pd_flags {
3327	/*
3328	 * Create a memory registration for all memory in the system and place
3329	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3330	 * ULPs to avoid the overhead of dynamic MRs.
3331	 *
3332	 * This flag is generally considered unsafe and must only be used in
3333	 * extremly trusted environments.  Every use of it will log a warning
3334	 * in the kernel log.
3335	 */
3336	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3337};
3338
3339struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3340		const char *caller);
3341
3342#define ib_alloc_pd(device, flags) \
3343	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3344
3345/**
3346 * ib_dealloc_pd_user - Deallocate kernel/user PD
3347 * @pd: The protection domain
3348 * @udata: Valid user data or NULL for kernel objects
3349 */
3350void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3351
3352/**
3353 * ib_dealloc_pd - Deallocate kernel PD
3354 * @pd: The protection domain
3355 *
3356 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
 
3357 */
3358static inline void ib_dealloc_pd(struct ib_pd *pd)
3359{
3360	ib_dealloc_pd_user(pd, NULL);
3361}
3362
3363enum rdma_create_ah_flags {
3364	/* In a sleepable context */
3365	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3366};
3367
3368/**
3369 * rdma_create_ah - Creates an address handle for the given address vector.
3370 * @pd: The protection domain associated with the address handle.
3371 * @ah_attr: The attributes of the address vector.
3372 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3373 *
3374 * The address handle is used to reference a local or global destination
3375 * in all UD QP post sends.
3376 */
3377struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3378			     u32 flags);
3379
3380/**
3381 * rdma_create_user_ah - Creates an address handle for the given address vector.
3382 * It resolves destination mac address for ah attribute of RoCE type.
3383 * @pd: The protection domain associated with the address handle.
3384 * @ah_attr: The attributes of the address vector.
3385 * @udata: pointer to user's input output buffer information need by
3386 *         provider driver.
3387 *
3388 * It returns 0 on success and returns appropriate error code on error.
3389 * The address handle is used to reference a local or global destination
3390 * in all UD QP post sends.
3391 */
3392struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3393				  struct rdma_ah_attr *ah_attr,
3394				  struct ib_udata *udata);
3395/**
3396 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3397 *   work completion.
3398 * @hdr: the L3 header to parse
3399 * @net_type: type of header to parse
3400 * @sgid: place to store source gid
3401 * @dgid: place to store destination gid
3402 */
3403int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3404			      enum rdma_network_type net_type,
3405			      union ib_gid *sgid, union ib_gid *dgid);
3406
3407/**
3408 * ib_get_rdma_header_version - Get the header version
3409 * @hdr: the L3 header to parse
3410 */
3411int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3412
3413/**
3414 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3415 *   work completion.
3416 * @device: Device on which the received message arrived.
3417 * @port_num: Port on which the received message arrived.
3418 * @wc: Work completion associated with the received message.
3419 * @grh: References the received global route header.  This parameter is
3420 *   ignored unless the work completion indicates that the GRH is valid.
3421 * @ah_attr: Returned attributes that can be used when creating an address
3422 *   handle for replying to the message.
3423 * When ib_init_ah_attr_from_wc() returns success,
3424 * (a) for IB link layer it optionally contains a reference to SGID attribute
3425 * when GRH is present for IB link layer.
3426 * (b) for RoCE link layer it contains a reference to SGID attribute.
3427 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3428 * attributes which are initialized using ib_init_ah_attr_from_wc().
3429 *
3430 */
3431int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3432			    const struct ib_wc *wc, const struct ib_grh *grh,
3433			    struct rdma_ah_attr *ah_attr);
3434
3435/**
3436 * ib_create_ah_from_wc - Creates an address handle associated with the
3437 *   sender of the specified work completion.
3438 * @pd: The protection domain associated with the address handle.
3439 * @wc: Work completion information associated with a received message.
3440 * @grh: References the received global route header.  This parameter is
3441 *   ignored unless the work completion indicates that the GRH is valid.
3442 * @port_num: The outbound port number to associate with the address.
3443 *
3444 * The address handle is used to reference a local or global destination
3445 * in all UD QP post sends.
3446 */
3447struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3448				   const struct ib_grh *grh, u8 port_num);
3449
3450/**
3451 * rdma_modify_ah - Modifies the address vector associated with an address
3452 *   handle.
3453 * @ah: The address handle to modify.
3454 * @ah_attr: The new address vector attributes to associate with the
3455 *   address handle.
3456 */
3457int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3458
3459/**
3460 * rdma_query_ah - Queries the address vector associated with an address
3461 *   handle.
3462 * @ah: The address handle to query.
3463 * @ah_attr: The address vector attributes associated with the address
3464 *   handle.
3465 */
3466int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3467
3468enum rdma_destroy_ah_flags {
3469	/* In a sleepable context */
3470	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3471};
3472
3473/**
3474 * rdma_destroy_ah_user - Destroys an address handle.
3475 * @ah: The address handle to destroy.
3476 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3477 * @udata: Valid user data or NULL for kernel objects
3478 */
3479int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3480
3481/**
3482 * rdma_destroy_ah - Destroys an kernel address handle.
3483 * @ah: The address handle to destroy.
3484 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3485 *
3486 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3487 */
3488static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3489{
3490	return rdma_destroy_ah_user(ah, flags, NULL);
3491}
3492
3493/**
3494 * ib_create_srq - Creates a SRQ associated with the specified protection
3495 *   domain.
3496 * @pd: The protection domain associated with the SRQ.
3497 * @srq_init_attr: A list of initial attributes required to create the
3498 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
3499 *   the actual capabilities of the created SRQ.
3500 *
3501 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3502 * requested size of the SRQ, and set to the actual values allocated
3503 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
3504 * will always be at least as large as the requested values.
3505 */
3506struct ib_srq *ib_create_srq(struct ib_pd *pd,
3507			     struct ib_srq_init_attr *srq_init_attr);
3508
3509/**
3510 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3511 * @srq: The SRQ to modify.
3512 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3513 *   the current values of selected SRQ attributes are returned.
3514 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3515 *   are being modified.
3516 *
3517 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3518 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3519 * the number of receives queued drops below the limit.
3520 */
3521int ib_modify_srq(struct ib_srq *srq,
3522		  struct ib_srq_attr *srq_attr,
3523		  enum ib_srq_attr_mask srq_attr_mask);
3524
3525/**
3526 * ib_query_srq - Returns the attribute list and current values for the
3527 *   specified SRQ.
3528 * @srq: The SRQ to query.
3529 * @srq_attr: The attributes of the specified SRQ.
3530 */
3531int ib_query_srq(struct ib_srq *srq,
3532		 struct ib_srq_attr *srq_attr);
3533
3534/**
3535 * ib_destroy_srq_user - Destroys the specified SRQ.
3536 * @srq: The SRQ to destroy.
3537 * @udata: Valid user data or NULL for kernel objects
3538 */
3539int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3540
3541/**
3542 * ib_destroy_srq - Destroys the specified kernel SRQ.
3543 * @srq: The SRQ to destroy.
3544 *
3545 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3546 */
3547static inline int ib_destroy_srq(struct ib_srq *srq)
3548{
3549	return ib_destroy_srq_user(srq, NULL);
3550}
3551
3552/**
3553 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3554 * @srq: The SRQ to post the work request on.
3555 * @recv_wr: A list of work requests to post on the receive queue.
3556 * @bad_recv_wr: On an immediate failure, this parameter will reference
3557 *   the work request that failed to be posted on the QP.
3558 */
3559static inline int ib_post_srq_recv(struct ib_srq *srq,
3560				   const struct ib_recv_wr *recv_wr,
3561				   const struct ib_recv_wr **bad_recv_wr)
3562{
3563	const struct ib_recv_wr *dummy;
3564
3565	return srq->device->ops.post_srq_recv(srq, recv_wr,
3566					      bad_recv_wr ? : &dummy);
3567}
3568
3569/**
3570 * ib_create_qp_user - Creates a QP associated with the specified protection
3571 *   domain.
3572 * @pd: The protection domain associated with the QP.
3573 * @qp_init_attr: A list of initial attributes required to create the
3574 *   QP.  If QP creation succeeds, then the attributes are updated to
3575 *   the actual capabilities of the created QP.
3576 * @udata: Valid user data or NULL for kernel objects
3577 */
3578struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3579				struct ib_qp_init_attr *qp_init_attr,
3580				struct ib_udata *udata);
3581
3582/**
3583 * ib_create_qp - Creates a kernel QP associated with the specified protection
3584 *   domain.
3585 * @pd: The protection domain associated with the QP.
3586 * @qp_init_attr: A list of initial attributes required to create the
3587 *   QP.  If QP creation succeeds, then the attributes are updated to
3588 *   the actual capabilities of the created QP.
3589 * @udata: Valid user data or NULL for kernel objects
3590 *
3591 * NOTE: for user qp use ib_create_qp_user with valid udata!
3592 */
3593static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3594					 struct ib_qp_init_attr *qp_init_attr)
3595{
3596	return ib_create_qp_user(pd, qp_init_attr, NULL);
3597}
3598
3599/**
3600 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3601 * @qp: The QP to modify.
3602 * @attr: On input, specifies the QP attributes to modify.  On output,
3603 *   the current values of selected QP attributes are returned.
3604 * @attr_mask: A bit-mask used to specify which attributes of the QP
3605 *   are being modified.
3606 * @udata: pointer to user's input output buffer information
3607 *   are being modified.
3608 * It returns 0 on success and returns appropriate error code on error.
3609 */
3610int ib_modify_qp_with_udata(struct ib_qp *qp,
3611			    struct ib_qp_attr *attr,
3612			    int attr_mask,
3613			    struct ib_udata *udata);
3614
3615/**
3616 * ib_modify_qp - Modifies the attributes for the specified QP and then
3617 *   transitions the QP to the given state.
3618 * @qp: The QP to modify.
3619 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3620 *   the current values of selected QP attributes are returned.
3621 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3622 *   are being modified.
3623 */
3624int ib_modify_qp(struct ib_qp *qp,
3625		 struct ib_qp_attr *qp_attr,
3626		 int qp_attr_mask);
3627
3628/**
3629 * ib_query_qp - Returns the attribute list and current values for the
3630 *   specified QP.
3631 * @qp: The QP to query.
3632 * @qp_attr: The attributes of the specified QP.
3633 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3634 * @qp_init_attr: Additional attributes of the selected QP.
3635 *
3636 * The qp_attr_mask may be used to limit the query to gathering only the
3637 * selected attributes.
3638 */
3639int ib_query_qp(struct ib_qp *qp,
3640		struct ib_qp_attr *qp_attr,
3641		int qp_attr_mask,
3642		struct ib_qp_init_attr *qp_init_attr);
3643
3644/**
3645 * ib_destroy_qp - Destroys the specified QP.
3646 * @qp: The QP to destroy.
3647 * @udata: Valid udata or NULL for kernel objects
3648 */
3649int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3650
3651/**
3652 * ib_destroy_qp - Destroys the specified kernel QP.
3653 * @qp: The QP to destroy.
3654 *
3655 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3656 */
3657static inline int ib_destroy_qp(struct ib_qp *qp)
3658{
3659	return ib_destroy_qp_user(qp, NULL);
3660}
3661
3662/**
3663 * ib_open_qp - Obtain a reference to an existing sharable QP.
3664 * @xrcd - XRC domain
3665 * @qp_open_attr: Attributes identifying the QP to open.
3666 *
3667 * Returns a reference to a sharable QP.
3668 */
3669struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3670			 struct ib_qp_open_attr *qp_open_attr);
3671
3672/**
3673 * ib_close_qp - Release an external reference to a QP.
3674 * @qp: The QP handle to release
3675 *
3676 * The opened QP handle is released by the caller.  The underlying
3677 * shared QP is not destroyed until all internal references are released.
3678 */
3679int ib_close_qp(struct ib_qp *qp);
3680
3681/**
3682 * ib_post_send - Posts a list of work requests to the send queue of
3683 *   the specified QP.
3684 * @qp: The QP to post the work request on.
3685 * @send_wr: A list of work requests to post on the send queue.
3686 * @bad_send_wr: On an immediate failure, this parameter will reference
3687 *   the work request that failed to be posted on the QP.
3688 *
3689 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3690 * error is returned, the QP state shall not be affected,
3691 * ib_post_send() will return an immediate error after queueing any
3692 * earlier work requests in the list.
3693 */
3694static inline int ib_post_send(struct ib_qp *qp,
3695			       const struct ib_send_wr *send_wr,
3696			       const struct ib_send_wr **bad_send_wr)
3697{
3698	const struct ib_send_wr *dummy;
3699
3700	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3701}
3702
3703/**
3704 * ib_post_recv - Posts a list of work requests to the receive queue of
3705 *   the specified QP.
3706 * @qp: The QP to post the work request on.
3707 * @recv_wr: A list of work requests to post on the receive queue.
3708 * @bad_recv_wr: On an immediate failure, this parameter will reference
3709 *   the work request that failed to be posted on the QP.
3710 */
3711static inline int ib_post_recv(struct ib_qp *qp,
3712			       const struct ib_recv_wr *recv_wr,
3713			       const struct ib_recv_wr **bad_recv_wr)
3714{
3715	const struct ib_recv_wr *dummy;
3716
3717	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3718}
3719
3720struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3721				 int nr_cqe, int comp_vector,
3722				 enum ib_poll_context poll_ctx,
3723				 const char *caller, struct ib_udata *udata);
3724
3725/**
3726 * ib_alloc_cq_user: Allocate kernel/user CQ
3727 * @dev: The IB device
3728 * @private: Private data attached to the CQE
3729 * @nr_cqe: Number of CQEs in the CQ
3730 * @comp_vector: Completion vector used for the IRQs
3731 * @poll_ctx: Context used for polling the CQ
3732 * @udata: Valid user data or NULL for kernel objects
3733 */
3734static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3735					     void *private, int nr_cqe,
3736					     int comp_vector,
3737					     enum ib_poll_context poll_ctx,
3738					     struct ib_udata *udata)
3739{
3740	return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3741				  KBUILD_MODNAME, udata);
3742}
3743
3744/**
3745 * ib_alloc_cq: Allocate kernel CQ
3746 * @dev: The IB device
3747 * @private: Private data attached to the CQE
3748 * @nr_cqe: Number of CQEs in the CQ
3749 * @comp_vector: Completion vector used for the IRQs
3750 * @poll_ctx: Context used for polling the CQ
3751 *
3752 * NOTE: for user cq use ib_alloc_cq_user with valid udata!
3753 */
3754static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3755					int nr_cqe, int comp_vector,
3756					enum ib_poll_context poll_ctx)
3757{
3758	return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3759				NULL);
3760}
3761
3762struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3763				int nr_cqe, enum ib_poll_context poll_ctx,
3764				const char *caller);
3765
3766/**
3767 * ib_alloc_cq_any: Allocate kernel CQ
3768 * @dev: The IB device
3769 * @private: Private data attached to the CQE
3770 * @nr_cqe: Number of CQEs in the CQ
3771 * @poll_ctx: Context used for polling the CQ
3772 */
3773static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3774					    void *private, int nr_cqe,
3775					    enum ib_poll_context poll_ctx)
3776{
3777	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3778				 KBUILD_MODNAME);
3779}
3780
3781/**
3782 * ib_free_cq_user - Free kernel/user CQ
3783 * @cq: The CQ to free
3784 * @udata: Valid user data or NULL for kernel objects
3785 */
3786void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3787
3788/**
3789 * ib_free_cq - Free kernel CQ
3790 * @cq: The CQ to free
3791 *
3792 * NOTE: for user cq use ib_free_cq_user with valid udata!
3793 */
3794static inline void ib_free_cq(struct ib_cq *cq)
3795{
3796	ib_free_cq_user(cq, NULL);
3797}
3798
3799int ib_process_cq_direct(struct ib_cq *cq, int budget);
3800
3801/**
3802 * ib_create_cq - Creates a CQ on the specified device.
3803 * @device: The device on which to create the CQ.
3804 * @comp_handler: A user-specified callback that is invoked when a
3805 *   completion event occurs on the CQ.
3806 * @event_handler: A user-specified callback that is invoked when an
3807 *   asynchronous event not associated with a completion occurs on the CQ.
3808 * @cq_context: Context associated with the CQ returned to the user via
3809 *   the associated completion and event handlers.
3810 * @cq_attr: The attributes the CQ should be created upon.
 
 
3811 *
3812 * Users can examine the cq structure to determine the actual CQ size.
3813 */
3814struct ib_cq *__ib_create_cq(struct ib_device *device,
3815			     ib_comp_handler comp_handler,
3816			     void (*event_handler)(struct ib_event *, void *),
3817			     void *cq_context,
3818			     const struct ib_cq_init_attr *cq_attr,
3819			     const char *caller);
3820#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3821	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3822
3823/**
3824 * ib_resize_cq - Modifies the capacity of the CQ.
3825 * @cq: The CQ to resize.
3826 * @cqe: The minimum size of the CQ.
3827 *
3828 * Users can examine the cq structure to determine the actual CQ size.
3829 */
3830int ib_resize_cq(struct ib_cq *cq, int cqe);
3831
3832/**
3833 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3834 * @cq: The CQ to modify.
3835 * @cq_count: number of CQEs that will trigger an event
3836 * @cq_period: max period of time in usec before triggering an event
3837 *
3838 */
3839int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3840
3841/**
3842 * ib_destroy_cq_user - Destroys the specified CQ.
3843 * @cq: The CQ to destroy.
3844 * @udata: Valid user data or NULL for kernel objects
3845 */
3846int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3847
3848/**
3849 * ib_destroy_cq - Destroys the specified kernel CQ.
3850 * @cq: The CQ to destroy.
3851 *
3852 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3853 */
3854static inline void ib_destroy_cq(struct ib_cq *cq)
3855{
3856	ib_destroy_cq_user(cq, NULL);
3857}
3858
3859/**
3860 * ib_poll_cq - poll a CQ for completion(s)
3861 * @cq:the CQ being polled
3862 * @num_entries:maximum number of completions to return
3863 * @wc:array of at least @num_entries &struct ib_wc where completions
3864 *   will be returned
3865 *
3866 * Poll a CQ for (possibly multiple) completions.  If the return value
3867 * is < 0, an error occurred.  If the return value is >= 0, it is the
3868 * number of completions returned.  If the return value is
3869 * non-negative and < num_entries, then the CQ was emptied.
3870 */
3871static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3872			     struct ib_wc *wc)
3873{
3874	return cq->device->ops.poll_cq(cq, num_entries, wc);
3875}
3876
3877/**
 
 
 
 
 
 
 
 
 
 
 
 
3878 * ib_req_notify_cq - Request completion notification on a CQ.
3879 * @cq: The CQ to generate an event for.
3880 * @flags:
3881 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3882 *   to request an event on the next solicited event or next work
3883 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3884 *   may also be |ed in to request a hint about missed events, as
3885 *   described below.
3886 *
3887 * Return Value:
3888 *    < 0 means an error occurred while requesting notification
3889 *   == 0 means notification was requested successfully, and if
3890 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3891 *        were missed and it is safe to wait for another event.  In
3892 *        this case is it guaranteed that any work completions added
3893 *        to the CQ since the last CQ poll will trigger a completion
3894 *        notification event.
3895 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3896 *        in.  It means that the consumer must poll the CQ again to
3897 *        make sure it is empty to avoid missing an event because of a
3898 *        race between requesting notification and an entry being
3899 *        added to the CQ.  This return value means it is possible
3900 *        (but not guaranteed) that a work completion has been added
3901 *        to the CQ since the last poll without triggering a
3902 *        completion notification event.
3903 */
3904static inline int ib_req_notify_cq(struct ib_cq *cq,
3905				   enum ib_cq_notify_flags flags)
3906{
3907	return cq->device->ops.req_notify_cq(cq, flags);
3908}
3909
3910/**
3911 * ib_req_ncomp_notif - Request completion notification when there are
3912 *   at least the specified number of unreaped completions on the CQ.
3913 * @cq: The CQ to generate an event for.
3914 * @wc_cnt: The number of unreaped completions that should be on the
3915 *   CQ before an event is generated.
3916 */
3917static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3918{
3919	return cq->device->ops.req_ncomp_notif ?
3920		cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3921		-ENOSYS;
3922}
3923
3924/**
 
 
 
 
 
 
 
 
 
 
 
 
3925 * ib_dma_mapping_error - check a DMA addr for error
3926 * @dev: The device for which the dma_addr was created
3927 * @dma_addr: The DMA address to check
3928 */
3929static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3930{
 
 
3931	return dma_mapping_error(dev->dma_device, dma_addr);
3932}
3933
3934/**
3935 * ib_dma_map_single - Map a kernel virtual address to DMA address
3936 * @dev: The device for which the dma_addr is to be created
3937 * @cpu_addr: The kernel virtual address
3938 * @size: The size of the region in bytes
3939 * @direction: The direction of the DMA
3940 */
3941static inline u64 ib_dma_map_single(struct ib_device *dev,
3942				    void *cpu_addr, size_t size,
3943				    enum dma_data_direction direction)
3944{
 
 
3945	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3946}
3947
3948/**
3949 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3950 * @dev: The device for which the DMA address was created
3951 * @addr: The DMA address
3952 * @size: The size of the region in bytes
3953 * @direction: The direction of the DMA
3954 */
3955static inline void ib_dma_unmap_single(struct ib_device *dev,
3956				       u64 addr, size_t size,
3957				       enum dma_data_direction direction)
3958{
3959	dma_unmap_single(dev->dma_device, addr, size, direction);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3960}
3961
3962/**
3963 * ib_dma_map_page - Map a physical page to DMA address
3964 * @dev: The device for which the dma_addr is to be created
3965 * @page: The page to be mapped
3966 * @offset: The offset within the page
3967 * @size: The size of the region in bytes
3968 * @direction: The direction of the DMA
3969 */
3970static inline u64 ib_dma_map_page(struct ib_device *dev,
3971				  struct page *page,
3972				  unsigned long offset,
3973				  size_t size,
3974					 enum dma_data_direction direction)
3975{
 
 
3976	return dma_map_page(dev->dma_device, page, offset, size, direction);
3977}
3978
3979/**
3980 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3981 * @dev: The device for which the DMA address was created
3982 * @addr: The DMA address
3983 * @size: The size of the region in bytes
3984 * @direction: The direction of the DMA
3985 */
3986static inline void ib_dma_unmap_page(struct ib_device *dev,
3987				     u64 addr, size_t size,
3988				     enum dma_data_direction direction)
3989{
3990	dma_unmap_page(dev->dma_device, addr, size, direction);
 
 
 
3991}
3992
3993/**
3994 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3995 * @dev: The device for which the DMA addresses are to be created
3996 * @sg: The array of scatter/gather entries
3997 * @nents: The number of scatter/gather entries
3998 * @direction: The direction of the DMA
3999 */
4000static inline int ib_dma_map_sg(struct ib_device *dev,
4001				struct scatterlist *sg, int nents,
4002				enum dma_data_direction direction)
4003{
 
 
4004	return dma_map_sg(dev->dma_device, sg, nents, direction);
4005}
4006
4007/**
4008 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4009 * @dev: The device for which the DMA addresses were created
4010 * @sg: The array of scatter/gather entries
4011 * @nents: The number of scatter/gather entries
4012 * @direction: The direction of the DMA
4013 */
4014static inline void ib_dma_unmap_sg(struct ib_device *dev,
4015				   struct scatterlist *sg, int nents,
4016				   enum dma_data_direction direction)
4017{
4018	dma_unmap_sg(dev->dma_device, sg, nents, direction);
 
 
 
4019}
4020
4021static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4022				      struct scatterlist *sg, int nents,
4023				      enum dma_data_direction direction,
4024				      unsigned long dma_attrs)
4025{
4026	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4027				dma_attrs);
4028}
4029
4030static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4031					 struct scatterlist *sg, int nents,
4032					 enum dma_data_direction direction,
4033					 unsigned long dma_attrs)
4034{
4035	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
 
 
 
 
 
 
 
 
 
 
 
 
4036}
4037
4038/**
4039 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4040 * @dev: The device to query
4041 *
4042 * The returned value represents a size in bytes.
4043 */
4044static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
 
4045{
4046	struct device_dma_parameters *p = dev->dma_device->dma_parms;
4047
4048	return p ? p->max_segment_size : UINT_MAX;
4049}
4050
4051/**
4052 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4053 * @dev: The device for which the DMA address was created
4054 * @addr: The DMA address
4055 * @size: The size of the region in bytes
4056 * @dir: The direction of the DMA
4057 */
4058static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4059					      u64 addr,
4060					      size_t size,
4061					      enum dma_data_direction dir)
4062{
4063	dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
 
 
 
4064}
4065
4066/**
4067 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4068 * @dev: The device for which the DMA address was created
4069 * @addr: The DMA address
4070 * @size: The size of the region in bytes
4071 * @dir: The direction of the DMA
4072 */
4073static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4074						 u64 addr,
4075						 size_t size,
4076						 enum dma_data_direction dir)
4077{
4078	dma_sync_single_for_device(dev->dma_device, addr, size, dir);
 
 
 
4079}
4080
4081/**
4082 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4083 * @dev: The device for which the DMA address is requested
4084 * @size: The size of the region to allocate in bytes
4085 * @dma_handle: A pointer for returning the DMA address of the region
4086 * @flag: memory allocator flags
4087 */
4088static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4089					   size_t size,
4090					   dma_addr_t *dma_handle,
4091					   gfp_t flag)
4092{
4093	return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
 
 
 
 
 
 
 
 
 
4094}
4095
4096/**
4097 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4098 * @dev: The device for which the DMA addresses were allocated
4099 * @size: The size of the region
4100 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4101 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4102 */
4103static inline void ib_dma_free_coherent(struct ib_device *dev,
4104					size_t size, void *cpu_addr,
4105					dma_addr_t dma_handle)
4106{
4107	dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
 
 
 
4108}
4109
4110/**
4111 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4112 *   HCA translation table.
4113 * @mr: The memory region to deregister.
4114 * @udata: Valid user data or NULL for kernel object
4115 *
4116 * This function can fail, if the memory region has memory windows bound to it.
4117 */
4118int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
 
 
 
 
 
 
 
 
 
4119
4120/**
4121 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4122 *   HCA translation table.
4123 * @mr: The memory region to deregister.
4124 *
4125 * This function can fail, if the memory region has memory windows bound to it.
 
 
 
 
4126 *
4127 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
 
 
 
 
4128 */
4129static inline int ib_dereg_mr(struct ib_mr *mr)
4130{
4131	return ib_dereg_mr_user(mr, NULL);
4132}
4133
4134struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4135			       u32 max_num_sg, struct ib_udata *udata);
4136
4137static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4138					enum ib_mr_type mr_type, u32 max_num_sg)
4139{
4140	return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4141}
4142
4143struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4144				    u32 max_num_data_sg,
4145				    u32 max_num_meta_sg);
4146
4147/**
4148 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4149 *   R_Key and L_Key.
4150 * @mr - struct ib_mr pointer to be updated.
4151 * @newkey - new key to be used.
4152 */
4153static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4154{
4155	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4156	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4157}
4158
4159/**
4160 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4161 * for calculating a new rkey for type 2 memory windows.
4162 * @rkey - the rkey to increment.
4163 */
4164static inline u32 ib_inc_rkey(u32 rkey)
4165{
4166	const u32 mask = 0x000000ff;
4167	return ((rkey + 1) & mask) | (rkey & ~mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4168}
4169
4170/**
 
 
 
 
 
 
4171 * ib_alloc_fmr - Allocates a unmapped fast memory region.
4172 * @pd: The protection domain associated with the unmapped region.
4173 * @mr_access_flags: Specifies the memory access rights.
4174 * @fmr_attr: Attributes of the unmapped region.
4175 *
4176 * A fast memory region must be mapped before it can be used as part of
4177 * a work request.
4178 */
4179struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4180			    int mr_access_flags,
4181			    struct ib_fmr_attr *fmr_attr);
4182
4183/**
4184 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
4185 * @fmr: The fast memory region to associate with the pages.
4186 * @page_list: An array of physical pages to map to the fast memory region.
4187 * @list_len: The number of pages in page_list.
4188 * @iova: The I/O virtual address to use with the mapped region.
4189 */
4190static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4191				  u64 *page_list, int list_len,
4192				  u64 iova)
4193{
4194	return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
4195}
4196
4197/**
4198 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
4199 * @fmr_list: A linked list of fast memory regions to unmap.
4200 */
4201int ib_unmap_fmr(struct list_head *fmr_list);
4202
4203/**
4204 * ib_dealloc_fmr - Deallocates a fast memory region.
4205 * @fmr: The fast memory region to deallocate.
4206 */
4207int ib_dealloc_fmr(struct ib_fmr *fmr);
4208
4209/**
4210 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4211 * @qp: QP to attach to the multicast group.  The QP must be type
4212 *   IB_QPT_UD.
4213 * @gid: Multicast group GID.
4214 * @lid: Multicast group LID in host byte order.
4215 *
4216 * In order to send and receive multicast packets, subnet
4217 * administration must have created the multicast group and configured
4218 * the fabric appropriately.  The port associated with the specified
4219 * QP must also be a member of the multicast group.
4220 */
4221int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4222
4223/**
4224 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4225 * @qp: QP to detach from the multicast group.
4226 * @gid: Multicast group GID.
4227 * @lid: Multicast group LID in host byte order.
4228 */
4229int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4230
4231/**
4232 * ib_alloc_xrcd - Allocates an XRC domain.
4233 * @device: The device on which to allocate the XRC domain.
4234 * @caller: Module name for kernel consumers
4235 */
4236struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4237#define ib_alloc_xrcd(device) \
4238	__ib_alloc_xrcd((device), KBUILD_MODNAME)
4239
4240/**
4241 * ib_dealloc_xrcd - Deallocates an XRC domain.
4242 * @xrcd: The XRC domain to deallocate.
4243 * @udata: Valid user data or NULL for kernel object
4244 */
4245int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4246
4247static inline int ib_check_mr_access(int flags)
4248{
4249	/*
4250	 * Local write permission is required if remote write or
4251	 * remote atomic permission is also requested.
4252	 */
4253	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4254	    !(flags & IB_ACCESS_LOCAL_WRITE))
4255		return -EINVAL;
4256
4257	return 0;
4258}
4259
4260static inline bool ib_access_writable(int access_flags)
4261{
4262	/*
4263	 * We have writable memory backing the MR if any of the following
4264	 * access flags are set.  "Local write" and "remote write" obviously
4265	 * require write access.  "Remote atomic" can do things like fetch and
4266	 * add, which will modify memory, and "MW bind" can change permissions
4267	 * by binding a window.
4268	 */
4269	return access_flags &
4270		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4271		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4272}
4273
4274/**
4275 * ib_check_mr_status: lightweight check of MR status.
4276 *     This routine may provide status checks on a selected
4277 *     ib_mr. first use is for signature status check.
4278 *
4279 * @mr: A memory region.
4280 * @check_mask: Bitmask of which checks to perform from
4281 *     ib_mr_status_check enumeration.
4282 * @mr_status: The container of relevant status checks.
4283 *     failed checks will be indicated in the status bitmask
4284 *     and the relevant info shall be in the error item.
4285 */
4286int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4287		       struct ib_mr_status *mr_status);
4288
4289/**
4290 * ib_device_try_get: Hold a registration lock
4291 * device: The device to lock
4292 *
4293 * A device under an active registration lock cannot become unregistered. It
4294 * is only possible to obtain a registration lock on a device that is fully
4295 * registered, otherwise this function returns false.
4296 *
4297 * The registration lock is only necessary for actions which require the
4298 * device to still be registered. Uses that only require the device pointer to
4299 * be valid should use get_device(&ibdev->dev) to hold the memory.
4300 *
4301 */
4302static inline bool ib_device_try_get(struct ib_device *dev)
4303{
4304	return refcount_inc_not_zero(&dev->refcount);
4305}
4306
4307void ib_device_put(struct ib_device *device);
4308struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4309					  enum rdma_driver_id driver_id);
4310struct ib_device *ib_device_get_by_name(const char *name,
4311					enum rdma_driver_id driver_id);
4312struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4313					    u16 pkey, const union ib_gid *gid,
4314					    const struct sockaddr *addr);
4315int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4316			 unsigned int port);
4317struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4318
4319struct ib_wq *ib_create_wq(struct ib_pd *pd,
4320			   struct ib_wq_init_attr *init_attr);
4321int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4322int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4323		 u32 wq_attr_mask);
4324struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4325						 struct ib_rwq_ind_table_init_attr*
4326						 wq_ind_table_init_attr);
4327int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4328
4329int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4330		 unsigned int *sg_offset, unsigned int page_size);
4331int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4332		    int data_sg_nents, unsigned int *data_sg_offset,
4333		    struct scatterlist *meta_sg, int meta_sg_nents,
4334		    unsigned int *meta_sg_offset, unsigned int page_size);
4335
4336static inline int
4337ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4338		  unsigned int *sg_offset, unsigned int page_size)
4339{
4340	int n;
4341
4342	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4343	mr->iova = 0;
4344
4345	return n;
4346}
4347
4348int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4349		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4350
4351void ib_drain_rq(struct ib_qp *qp);
4352void ib_drain_sq(struct ib_qp *qp);
4353void ib_drain_qp(struct ib_qp *qp);
4354
4355int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4356
4357static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4358{
4359	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4360		return attr->roce.dmac;
4361	return NULL;
4362}
4363
4364static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4365{
4366	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4367		attr->ib.dlid = (u16)dlid;
4368	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4369		attr->opa.dlid = dlid;
4370}
4371
4372static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4373{
4374	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4375		return attr->ib.dlid;
4376	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4377		return attr->opa.dlid;
4378	return 0;
4379}
4380
4381static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4382{
4383	attr->sl = sl;
4384}
4385
4386static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4387{
4388	return attr->sl;
4389}
4390
4391static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4392					 u8 src_path_bits)
4393{
4394	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4395		attr->ib.src_path_bits = src_path_bits;
4396	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4397		attr->opa.src_path_bits = src_path_bits;
4398}
4399
4400static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4401{
4402	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4403		return attr->ib.src_path_bits;
4404	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4405		return attr->opa.src_path_bits;
4406	return 0;
4407}
4408
4409static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4410					bool make_grd)
4411{
4412	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4413		attr->opa.make_grd = make_grd;
4414}
4415
4416static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4417{
4418	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4419		return attr->opa.make_grd;
4420	return false;
4421}
4422
4423static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4424{
4425	attr->port_num = port_num;
4426}
4427
4428static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4429{
4430	return attr->port_num;
4431}
4432
4433static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4434					   u8 static_rate)
4435{
4436	attr->static_rate = static_rate;
4437}
4438
4439static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4440{
4441	return attr->static_rate;
4442}
4443
4444static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4445					enum ib_ah_flags flag)
4446{
4447	attr->ah_flags = flag;
4448}
4449
4450static inline enum ib_ah_flags
4451		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4452{
4453	return attr->ah_flags;
4454}
4455
4456static inline const struct ib_global_route
4457		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4458{
4459	return &attr->grh;
4460}
4461
4462/*To retrieve and modify the grh */
4463static inline struct ib_global_route
4464		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4465{
4466	return &attr->grh;
4467}
4468
4469static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4470{
4471	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4472
4473	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4474}
4475
4476static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4477					     __be64 prefix)
4478{
4479	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4480
4481	grh->dgid.global.subnet_prefix = prefix;
4482}
4483
4484static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4485					    __be64 if_id)
4486{
4487	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4488
4489	grh->dgid.global.interface_id = if_id;
4490}
4491
4492static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4493				   union ib_gid *dgid, u32 flow_label,
4494				   u8 sgid_index, u8 hop_limit,
4495				   u8 traffic_class)
4496{
4497	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4498
4499	attr->ah_flags = IB_AH_GRH;
4500	if (dgid)
4501		grh->dgid = *dgid;
4502	grh->flow_label = flow_label;
4503	grh->sgid_index = sgid_index;
4504	grh->hop_limit = hop_limit;
4505	grh->traffic_class = traffic_class;
4506	grh->sgid_attr = NULL;
4507}
4508
4509void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4510void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4511			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4512			     const struct ib_gid_attr *sgid_attr);
4513void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4514		       const struct rdma_ah_attr *src);
4515void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4516			  const struct rdma_ah_attr *new);
4517void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4518
4519/**
4520 * rdma_ah_find_type - Return address handle type.
4521 *
4522 * @dev: Device to be checked
4523 * @port_num: Port number
4524 */
4525static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4526						       u8 port_num)
4527{
4528	if (rdma_protocol_roce(dev, port_num))
4529		return RDMA_AH_ATTR_TYPE_ROCE;
4530	if (rdma_protocol_ib(dev, port_num)) {
4531		if (rdma_cap_opa_ah(dev, port_num))
4532			return RDMA_AH_ATTR_TYPE_OPA;
4533		return RDMA_AH_ATTR_TYPE_IB;
4534	}
4535
4536	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4537}
4538
4539/**
4540 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4541 *     In the current implementation the only way to get
4542 *     get the 32bit lid is from other sources for OPA.
4543 *     For IB, lids will always be 16bits so cast the
4544 *     value accordingly.
4545 *
4546 * @lid: A 32bit LID
4547 */
4548static inline u16 ib_lid_cpu16(u32 lid)
4549{
4550	WARN_ON_ONCE(lid & 0xFFFF0000);
4551	return (u16)lid;
4552}
4553
4554/**
4555 * ib_lid_be16 - Return lid in 16bit BE encoding.
4556 *
4557 * @lid: A 32bit LID
4558 */
4559static inline __be16 ib_lid_be16(u32 lid)
4560{
4561	WARN_ON_ONCE(lid & 0xFFFF0000);
4562	return cpu_to_be16((u16)lid);
4563}
4564
4565/**
4566 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4567 *   vector
4568 * @device:         the rdma device
4569 * @comp_vector:    index of completion vector
4570 *
4571 * Returns NULL on failure, otherwise a corresponding cpu map of the
4572 * completion vector (returns all-cpus map if the device driver doesn't
4573 * implement get_vector_affinity).
4574 */
4575static inline const struct cpumask *
4576ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4577{
4578	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4579	    !device->ops.get_vector_affinity)
4580		return NULL;
4581
4582	return device->ops.get_vector_affinity(device, comp_vector);
4583
4584}
4585
4586/**
4587 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4588 * and add their gids, as needed, to the relevant RoCE devices.
4589 *
4590 * @device:         the rdma device
4591 */
4592void rdma_roce_rescan_device(struct ib_device *ibdev);
4593
4594struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4595
4596int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4597
4598struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4599				     enum rdma_netdev_t type, const char *name,
4600				     unsigned char name_assign_type,
4601				     void (*setup)(struct net_device *));
4602
4603int rdma_init_netdev(struct ib_device *device, u8 port_num,
4604		     enum rdma_netdev_t type, const char *name,
4605		     unsigned char name_assign_type,
4606		     void (*setup)(struct net_device *),
4607		     struct net_device *netdev);
4608
4609/**
4610 * rdma_set_device_sysfs_group - Set device attributes group to have
4611 *				 driver specific sysfs entries at
4612 *				 for infiniband class.
4613 *
4614 * @device:	device pointer for which attributes to be created
4615 * @group:	Pointer to group which should be added when device
4616 *		is registered with sysfs.
4617 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4618 * group per device to have sysfs attributes.
4619 *
4620 * NOTE: New drivers should not make use of this API; instead new device
4621 * parameter should be exposed via netlink command. This API and mechanism
4622 * exist only for existing drivers.
4623 */
4624static inline void
4625rdma_set_device_sysfs_group(struct ib_device *dev,
4626			    const struct attribute_group *group)
4627{
4628	dev->groups[1] = group;
4629}
4630
4631/**
4632 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4633 *
4634 * @device:	device pointer for which ib_device pointer to retrieve
4635 *
4636 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4637 *
4638 */
4639static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4640{
4641	struct ib_core_device *coredev =
4642		container_of(device, struct ib_core_device, dev);
4643
4644	return coredev->owner;
4645}
4646
4647/**
4648 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4649 *			       ib_device holder structure from device pointer.
4650 *
4651 * NOTE: New drivers should not make use of this API; This API is only for
4652 * existing drivers who have exposed sysfs entries using
4653 * rdma_set_device_sysfs_group().
4654 */
4655#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4656	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4657
4658bool rdma_dev_access_netns(const struct ib_device *device,
4659			   const struct net *net);
4660#endif /* IB_VERBS_H */