Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
 
 
 
 
 
 
 
 
  51
 
  52#include <linux/atomic.h>
  53#include <asm/uaccess.h>
 
 
 
 
 
 
 
 
  54
  55extern struct workqueue_struct *ib_wq;
 
  56
  57union ib_gid {
  58	u8	raw[16];
  59	struct {
  60		__be64	subnet_prefix;
  61		__be64	interface_id;
  62	} global;
  63};
  64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65enum rdma_node_type {
  66	/* IB values map to NodeInfo:NodeType. */
  67	RDMA_NODE_IB_CA 	= 1,
  68	RDMA_NODE_IB_SWITCH,
  69	RDMA_NODE_IB_ROUTER,
  70	RDMA_NODE_RNIC
 
 
 
 
 
 
 
  71};
  72
  73enum rdma_transport_type {
  74	RDMA_TRANSPORT_IB,
  75	RDMA_TRANSPORT_IWARP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76};
  77
  78enum rdma_transport_type
  79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  80
  81enum rdma_link_layer {
  82	IB_LINK_LAYER_UNSPECIFIED,
  83	IB_LINK_LAYER_INFINIBAND,
  84	IB_LINK_LAYER_ETHERNET,
  85};
  86
  87enum ib_device_cap_flags {
  88	IB_DEVICE_RESIZE_MAX_WR		= 1,
  89	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
  90	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
  91	IB_DEVICE_RAW_MULTI		= (1<<3),
  92	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
  93	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
  94	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
  95	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
  96	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
  97	IB_DEVICE_INIT_TYPE		= (1<<9),
  98	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
  99	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
 100	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
 101	IB_DEVICE_SRQ_RESIZE		= (1<<13),
 102	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
 103	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
 104	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
 105	IB_DEVICE_MEM_WINDOW		= (1<<17),
 
 
 
 
 
 
 
 
 106	/*
 107	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 108	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
 109	 * messages and can verify the validity of checksum for
 110	 * incoming messages.  Setting this flag implies that the
 111	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 112	 */
 113	IB_DEVICE_UD_IP_CSUM		= (1<<18),
 114	IB_DEVICE_UD_TSO		= (1<<19),
 115	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
 116	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117};
 118
 119enum ib_atomic_cap {
 120	IB_ATOMIC_NONE,
 121	IB_ATOMIC_HCA,
 122	IB_ATOMIC_GLOB
 123};
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125struct ib_device_attr {
 126	u64			fw_ver;
 127	__be64			sys_image_guid;
 128	u64			max_mr_size;
 129	u64			page_size_cap;
 130	u32			vendor_id;
 131	u32			vendor_part_id;
 132	u32			hw_ver;
 133	int			max_qp;
 134	int			max_qp_wr;
 135	int			device_cap_flags;
 136	int			max_sge;
 137	int			max_sge_rd;
 138	int			max_cq;
 139	int			max_cqe;
 140	int			max_mr;
 141	int			max_pd;
 142	int			max_qp_rd_atom;
 143	int			max_ee_rd_atom;
 144	int			max_res_rd_atom;
 145	int			max_qp_init_rd_atom;
 146	int			max_ee_init_rd_atom;
 147	enum ib_atomic_cap	atomic_cap;
 148	enum ib_atomic_cap	masked_atomic_cap;
 149	int			max_ee;
 150	int			max_rdd;
 151	int			max_mw;
 152	int			max_raw_ipv6_qp;
 153	int			max_raw_ethy_qp;
 154	int			max_mcast_grp;
 155	int			max_mcast_qp_attach;
 156	int			max_total_mcast_qp_attach;
 157	int			max_ah;
 158	int			max_fmr;
 159	int			max_map_per_fmr;
 160	int			max_srq;
 161	int			max_srq_wr;
 162	int			max_srq_sge;
 163	unsigned int		max_fast_reg_page_list_len;
 164	u16			max_pkeys;
 165	u8			local_ca_ack_delay;
 
 
 
 
 
 
 
 
 
 
 
 166};
 167
 168enum ib_mtu {
 169	IB_MTU_256  = 1,
 170	IB_MTU_512  = 2,
 171	IB_MTU_1024 = 3,
 172	IB_MTU_2048 = 4,
 173	IB_MTU_4096 = 5
 174};
 175
 176static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 177{
 178	switch (mtu) {
 179	case IB_MTU_256:  return  256;
 180	case IB_MTU_512:  return  512;
 181	case IB_MTU_1024: return 1024;
 182	case IB_MTU_2048: return 2048;
 183	case IB_MTU_4096: return 4096;
 184	default: 	  return -1;
 185	}
 186}
 187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188enum ib_port_state {
 189	IB_PORT_NOP		= 0,
 190	IB_PORT_DOWN		= 1,
 191	IB_PORT_INIT		= 2,
 192	IB_PORT_ARMED		= 3,
 193	IB_PORT_ACTIVE		= 4,
 194	IB_PORT_ACTIVE_DEFER	= 5
 195};
 196
 197enum ib_port_cap_flags {
 198	IB_PORT_SM				= 1 <<  1,
 199	IB_PORT_NOTICE_SUP			= 1 <<  2,
 200	IB_PORT_TRAP_SUP			= 1 <<  3,
 201	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 202	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
 203	IB_PORT_SL_MAP_SUP			= 1 <<  6,
 204	IB_PORT_MKEY_NVRAM			= 1 <<  7,
 205	IB_PORT_PKEY_NVRAM			= 1 <<  8,
 206	IB_PORT_LED_INFO_SUP			= 1 <<  9,
 207	IB_PORT_SM_DISABLED			= 1 << 10,
 208	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
 209	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
 
 210	IB_PORT_CM_SUP				= 1 << 16,
 211	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
 212	IB_PORT_REINIT_SUP			= 1 << 18,
 213	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
 214	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
 215	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
 216	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
 217	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
 218	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
 219	IB_PORT_CLIENT_REG_SUP			= 1 << 25
 
 220};
 221
 222enum ib_port_width {
 223	IB_WIDTH_1X	= 1,
 224	IB_WIDTH_4X	= 2,
 225	IB_WIDTH_8X	= 4,
 226	IB_WIDTH_12X	= 8
 227};
 228
 229static inline int ib_width_enum_to_int(enum ib_port_width width)
 230{
 231	switch (width) {
 232	case IB_WIDTH_1X:  return  1;
 233	case IB_WIDTH_4X:  return  4;
 234	case IB_WIDTH_8X:  return  8;
 235	case IB_WIDTH_12X: return 12;
 236	default: 	  return -1;
 237	}
 238}
 239
 240struct ib_protocol_stats {
 241	/* TBD... */
 242};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244struct iw_protocol_stats {
 245	u64	ipInReceives;
 246	u64	ipInHdrErrors;
 247	u64	ipInTooBigErrors;
 248	u64	ipInNoRoutes;
 249	u64	ipInAddrErrors;
 250	u64	ipInUnknownProtos;
 251	u64	ipInTruncatedPkts;
 252	u64	ipInDiscards;
 253	u64	ipInDelivers;
 254	u64	ipOutForwDatagrams;
 255	u64	ipOutRequests;
 256	u64	ipOutDiscards;
 257	u64	ipOutNoRoutes;
 258	u64	ipReasmTimeout;
 259	u64	ipReasmReqds;
 260	u64	ipReasmOKs;
 261	u64	ipReasmFails;
 262	u64	ipFragOKs;
 263	u64	ipFragFails;
 264	u64	ipFragCreates;
 265	u64	ipInMcastPkts;
 266	u64	ipOutMcastPkts;
 267	u64	ipInBcastPkts;
 268	u64	ipOutBcastPkts;
 269
 270	u64	tcpRtoAlgorithm;
 271	u64	tcpRtoMin;
 272	u64	tcpRtoMax;
 273	u64	tcpMaxConn;
 274	u64	tcpActiveOpens;
 275	u64	tcpPassiveOpens;
 276	u64	tcpAttemptFails;
 277	u64	tcpEstabResets;
 278	u64	tcpCurrEstab;
 279	u64	tcpInSegs;
 280	u64	tcpOutSegs;
 281	u64	tcpRetransSegs;
 282	u64	tcpInErrs;
 283	u64	tcpOutRsts;
 284};
 285
 286union rdma_protocol_stats {
 287	struct ib_protocol_stats	ib;
 288	struct iw_protocol_stats	iw;
 289};
 290
 291struct ib_port_attr {
 
 292	enum ib_port_state	state;
 293	enum ib_mtu		max_mtu;
 294	enum ib_mtu		active_mtu;
 295	int			gid_tbl_len;
 296	u32			port_cap_flags;
 297	u32			max_msg_sz;
 298	u32			bad_pkey_cntr;
 299	u32			qkey_viol_cntr;
 300	u16			pkey_tbl_len;
 301	u16			lid;
 302	u16			sm_lid;
 303	u8			lmc;
 304	u8			max_vl_num;
 305	u8			sm_sl;
 306	u8			subnet_timeout;
 307	u8			init_type_reply;
 308	u8			active_width;
 309	u8			active_speed;
 310	u8                      phys_state;
 
 311};
 312
 313enum ib_device_modify_flags {
 314	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
 315	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
 316};
 317
 
 
 318struct ib_device_modify {
 319	u64	sys_image_guid;
 320	char	node_desc[64];
 321};
 322
 323enum ib_port_modify_flags {
 324	IB_PORT_SHUTDOWN		= 1,
 325	IB_PORT_INIT_TYPE		= (1<<2),
 326	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
 
 327};
 328
 329struct ib_port_modify {
 330	u32	set_port_cap_mask;
 331	u32	clr_port_cap_mask;
 332	u8	init_type;
 333};
 334
 335enum ib_event_type {
 336	IB_EVENT_CQ_ERR,
 337	IB_EVENT_QP_FATAL,
 338	IB_EVENT_QP_REQ_ERR,
 339	IB_EVENT_QP_ACCESS_ERR,
 340	IB_EVENT_COMM_EST,
 341	IB_EVENT_SQ_DRAINED,
 342	IB_EVENT_PATH_MIG,
 343	IB_EVENT_PATH_MIG_ERR,
 344	IB_EVENT_DEVICE_FATAL,
 345	IB_EVENT_PORT_ACTIVE,
 346	IB_EVENT_PORT_ERR,
 347	IB_EVENT_LID_CHANGE,
 348	IB_EVENT_PKEY_CHANGE,
 349	IB_EVENT_SM_CHANGE,
 350	IB_EVENT_SRQ_ERR,
 351	IB_EVENT_SRQ_LIMIT_REACHED,
 352	IB_EVENT_QP_LAST_WQE_REACHED,
 353	IB_EVENT_CLIENT_REREGISTER,
 354	IB_EVENT_GID_CHANGE,
 
 355};
 356
 
 
 357struct ib_event {
 358	struct ib_device	*device;
 359	union {
 360		struct ib_cq	*cq;
 361		struct ib_qp	*qp;
 362		struct ib_srq	*srq;
 
 363		u8		port_num;
 364	} element;
 365	enum ib_event_type	event;
 366};
 367
 368struct ib_event_handler {
 369	struct ib_device *device;
 370	void            (*handler)(struct ib_event_handler *, struct ib_event *);
 371	struct list_head  list;
 372};
 373
 374#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
 375	do {							\
 376		(_ptr)->device  = _device;			\
 377		(_ptr)->handler = _handler;			\
 378		INIT_LIST_HEAD(&(_ptr)->list);			\
 379	} while (0)
 380
 381struct ib_global_route {
 382	union ib_gid	dgid;
 383	u32		flow_label;
 384	u8		sgid_index;
 385	u8		hop_limit;
 386	u8		traffic_class;
 387};
 388
 389struct ib_grh {
 390	__be32		version_tclass_flow;
 391	__be16		paylen;
 392	u8		next_hdr;
 393	u8		hop_limit;
 394	union ib_gid	sgid;
 395	union ib_gid	dgid;
 396};
 397
 
 
 
 
 
 
 
 
 
 
 
 
 
 398enum {
 399	IB_MULTICAST_QPN = 0xffffff
 400};
 401
 402#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
 
 403
 404enum ib_ah_flags {
 405	IB_AH_GRH	= 1
 406};
 407
 408enum ib_rate {
 409	IB_RATE_PORT_CURRENT = 0,
 410	IB_RATE_2_5_GBPS = 2,
 411	IB_RATE_5_GBPS   = 5,
 412	IB_RATE_10_GBPS  = 3,
 413	IB_RATE_20_GBPS  = 6,
 414	IB_RATE_30_GBPS  = 4,
 415	IB_RATE_40_GBPS  = 7,
 416	IB_RATE_60_GBPS  = 8,
 417	IB_RATE_80_GBPS  = 9,
 418	IB_RATE_120_GBPS = 10
 
 
 
 
 
 
 
 
 419};
 420
 421/**
 422 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 423 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 424 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 425 * @rate: rate to convert.
 426 */
 427int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428
 429/**
 430 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 431 * enum.
 432 * @mult: multiple to convert.
 433 */
 434enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
 
 
 
 
 
 
 
 435
 436struct ib_ah_attr {
 437	struct ib_global_route	grh;
 438	u16			dlid;
 439	u8			sl;
 440	u8			src_path_bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441	u8			static_rate;
 442	u8			ah_flags;
 443	u8			port_num;
 
 
 
 
 
 
 
 444};
 445
 446enum ib_wc_status {
 447	IB_WC_SUCCESS,
 448	IB_WC_LOC_LEN_ERR,
 449	IB_WC_LOC_QP_OP_ERR,
 450	IB_WC_LOC_EEC_OP_ERR,
 451	IB_WC_LOC_PROT_ERR,
 452	IB_WC_WR_FLUSH_ERR,
 453	IB_WC_MW_BIND_ERR,
 454	IB_WC_BAD_RESP_ERR,
 455	IB_WC_LOC_ACCESS_ERR,
 456	IB_WC_REM_INV_REQ_ERR,
 457	IB_WC_REM_ACCESS_ERR,
 458	IB_WC_REM_OP_ERR,
 459	IB_WC_RETRY_EXC_ERR,
 460	IB_WC_RNR_RETRY_EXC_ERR,
 461	IB_WC_LOC_RDD_VIOL_ERR,
 462	IB_WC_REM_INV_RD_REQ_ERR,
 463	IB_WC_REM_ABORT_ERR,
 464	IB_WC_INV_EECN_ERR,
 465	IB_WC_INV_EEC_STATE_ERR,
 466	IB_WC_FATAL_ERR,
 467	IB_WC_RESP_TIMEOUT_ERR,
 468	IB_WC_GENERAL_ERR
 469};
 470
 
 
 471enum ib_wc_opcode {
 472	IB_WC_SEND,
 473	IB_WC_RDMA_WRITE,
 474	IB_WC_RDMA_READ,
 475	IB_WC_COMP_SWAP,
 476	IB_WC_FETCH_ADD,
 477	IB_WC_BIND_MW,
 478	IB_WC_LSO,
 479	IB_WC_LOCAL_INV,
 480	IB_WC_FAST_REG_MR,
 481	IB_WC_MASKED_COMP_SWAP,
 482	IB_WC_MASKED_FETCH_ADD,
 483/*
 484 * Set value of IB_WC_RECV so consumers can test if a completion is a
 485 * receive by testing (opcode & IB_WC_RECV).
 486 */
 487	IB_WC_RECV			= 1 << 7,
 488	IB_WC_RECV_RDMA_WITH_IMM
 489};
 490
 491enum ib_wc_flags {
 492	IB_WC_GRH		= 1,
 493	IB_WC_WITH_IMM		= (1<<1),
 494	IB_WC_WITH_INVALIDATE	= (1<<2),
 
 
 
 
 495};
 496
 497struct ib_wc {
 498	u64			wr_id;
 
 
 
 499	enum ib_wc_status	status;
 500	enum ib_wc_opcode	opcode;
 501	u32			vendor_err;
 502	u32			byte_len;
 503	struct ib_qp	       *qp;
 504	union {
 505		__be32		imm_data;
 506		u32		invalidate_rkey;
 507	} ex;
 508	u32			src_qp;
 
 509	int			wc_flags;
 510	u16			pkey_index;
 511	u16			slid;
 512	u8			sl;
 513	u8			dlid_path_bits;
 514	u8			port_num;	/* valid only for DR SMPs on switches */
 515	int			csum_ok;
 
 
 516};
 517
 518enum ib_cq_notify_flags {
 519	IB_CQ_SOLICITED			= 1 << 0,
 520	IB_CQ_NEXT_COMP			= 1 << 1,
 521	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 522	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
 523};
 524
 
 
 
 
 
 
 
 
 
 
 
 
 525enum ib_srq_attr_mask {
 526	IB_SRQ_MAX_WR	= 1 << 0,
 527	IB_SRQ_LIMIT	= 1 << 1,
 528};
 529
 530struct ib_srq_attr {
 531	u32	max_wr;
 532	u32	max_sge;
 533	u32	srq_limit;
 534};
 535
 536struct ib_srq_init_attr {
 537	void		      (*event_handler)(struct ib_event *, void *);
 538	void		       *srq_context;
 539	struct ib_srq_attr	attr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540};
 541
 542struct ib_qp_cap {
 543	u32	max_send_wr;
 544	u32	max_recv_wr;
 545	u32	max_send_sge;
 546	u32	max_recv_sge;
 547	u32	max_inline_data;
 
 
 
 
 
 
 
 548};
 549
 550enum ib_sig_type {
 551	IB_SIGNAL_ALL_WR,
 552	IB_SIGNAL_REQ_WR
 553};
 554
 555enum ib_qp_type {
 556	/*
 557	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 558	 * here (and in that order) since the MAD layer uses them as
 559	 * indices into a 2-entry table.
 560	 */
 561	IB_QPT_SMI,
 562	IB_QPT_GSI,
 563
 564	IB_QPT_RC,
 565	IB_QPT_UC,
 566	IB_QPT_UD,
 567	IB_QPT_RAW_IPV6,
 568	IB_QPT_RAW_ETHERTYPE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569};
 570
 571enum ib_qp_create_flags {
 572	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
 573	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 574};
 575
 
 
 
 
 
 576struct ib_qp_init_attr {
 577	void                  (*event_handler)(struct ib_event *, void *);
 578	void		       *qp_context;
 579	struct ib_cq	       *send_cq;
 580	struct ib_cq	       *recv_cq;
 581	struct ib_srq	       *srq;
 
 582	struct ib_qp_cap	cap;
 583	enum ib_sig_type	sq_sig_type;
 584	enum ib_qp_type		qp_type;
 585	enum ib_qp_create_flags	create_flags;
 586	u8			port_num; /* special QP types only */
 
 
 
 
 
 
 
 
 
 
 
 
 
 587};
 588
 589enum ib_rnr_timeout {
 590	IB_RNR_TIMER_655_36 =  0,
 591	IB_RNR_TIMER_000_01 =  1,
 592	IB_RNR_TIMER_000_02 =  2,
 593	IB_RNR_TIMER_000_03 =  3,
 594	IB_RNR_TIMER_000_04 =  4,
 595	IB_RNR_TIMER_000_06 =  5,
 596	IB_RNR_TIMER_000_08 =  6,
 597	IB_RNR_TIMER_000_12 =  7,
 598	IB_RNR_TIMER_000_16 =  8,
 599	IB_RNR_TIMER_000_24 =  9,
 600	IB_RNR_TIMER_000_32 = 10,
 601	IB_RNR_TIMER_000_48 = 11,
 602	IB_RNR_TIMER_000_64 = 12,
 603	IB_RNR_TIMER_000_96 = 13,
 604	IB_RNR_TIMER_001_28 = 14,
 605	IB_RNR_TIMER_001_92 = 15,
 606	IB_RNR_TIMER_002_56 = 16,
 607	IB_RNR_TIMER_003_84 = 17,
 608	IB_RNR_TIMER_005_12 = 18,
 609	IB_RNR_TIMER_007_68 = 19,
 610	IB_RNR_TIMER_010_24 = 20,
 611	IB_RNR_TIMER_015_36 = 21,
 612	IB_RNR_TIMER_020_48 = 22,
 613	IB_RNR_TIMER_030_72 = 23,
 614	IB_RNR_TIMER_040_96 = 24,
 615	IB_RNR_TIMER_061_44 = 25,
 616	IB_RNR_TIMER_081_92 = 26,
 617	IB_RNR_TIMER_122_88 = 27,
 618	IB_RNR_TIMER_163_84 = 28,
 619	IB_RNR_TIMER_245_76 = 29,
 620	IB_RNR_TIMER_327_68 = 30,
 621	IB_RNR_TIMER_491_52 = 31
 622};
 623
 624enum ib_qp_attr_mask {
 625	IB_QP_STATE			= 1,
 626	IB_QP_CUR_STATE			= (1<<1),
 627	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
 628	IB_QP_ACCESS_FLAGS		= (1<<3),
 629	IB_QP_PKEY_INDEX		= (1<<4),
 630	IB_QP_PORT			= (1<<5),
 631	IB_QP_QKEY			= (1<<6),
 632	IB_QP_AV			= (1<<7),
 633	IB_QP_PATH_MTU			= (1<<8),
 634	IB_QP_TIMEOUT			= (1<<9),
 635	IB_QP_RETRY_CNT			= (1<<10),
 636	IB_QP_RNR_RETRY			= (1<<11),
 637	IB_QP_RQ_PSN			= (1<<12),
 638	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
 639	IB_QP_ALT_PATH			= (1<<14),
 640	IB_QP_MIN_RNR_TIMER		= (1<<15),
 641	IB_QP_SQ_PSN			= (1<<16),
 642	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
 643	IB_QP_PATH_MIG_STATE		= (1<<18),
 644	IB_QP_CAP			= (1<<19),
 645	IB_QP_DEST_QPN			= (1<<20)
 
 
 
 
 
 646};
 647
 648enum ib_qp_state {
 649	IB_QPS_RESET,
 650	IB_QPS_INIT,
 651	IB_QPS_RTR,
 652	IB_QPS_RTS,
 653	IB_QPS_SQD,
 654	IB_QPS_SQE,
 655	IB_QPS_ERR
 656};
 657
 658enum ib_mig_state {
 659	IB_MIG_MIGRATED,
 660	IB_MIG_REARM,
 661	IB_MIG_ARMED
 662};
 663
 
 
 
 
 
 664struct ib_qp_attr {
 665	enum ib_qp_state	qp_state;
 666	enum ib_qp_state	cur_qp_state;
 667	enum ib_mtu		path_mtu;
 668	enum ib_mig_state	path_mig_state;
 669	u32			qkey;
 670	u32			rq_psn;
 671	u32			sq_psn;
 672	u32			dest_qp_num;
 673	int			qp_access_flags;
 674	struct ib_qp_cap	cap;
 675	struct ib_ah_attr	ah_attr;
 676	struct ib_ah_attr	alt_ah_attr;
 677	u16			pkey_index;
 678	u16			alt_pkey_index;
 679	u8			en_sqd_async_notify;
 680	u8			sq_draining;
 681	u8			max_rd_atomic;
 682	u8			max_dest_rd_atomic;
 683	u8			min_rnr_timer;
 684	u8			port_num;
 685	u8			timeout;
 686	u8			retry_cnt;
 687	u8			rnr_retry;
 688	u8			alt_port_num;
 689	u8			alt_timeout;
 
 690};
 691
 692enum ib_wr_opcode {
 693	IB_WR_RDMA_WRITE,
 694	IB_WR_RDMA_WRITE_WITH_IMM,
 695	IB_WR_SEND,
 696	IB_WR_SEND_WITH_IMM,
 697	IB_WR_RDMA_READ,
 698	IB_WR_ATOMIC_CMP_AND_SWP,
 699	IB_WR_ATOMIC_FETCH_AND_ADD,
 700	IB_WR_LSO,
 701	IB_WR_SEND_WITH_INV,
 702	IB_WR_RDMA_READ_WITH_INV,
 703	IB_WR_LOCAL_INV,
 704	IB_WR_FAST_REG_MR,
 705	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
 706	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707};
 708
 709enum ib_send_flags {
 710	IB_SEND_FENCE		= 1,
 711	IB_SEND_SIGNALED	= (1<<1),
 712	IB_SEND_SOLICITED	= (1<<2),
 713	IB_SEND_INLINE		= (1<<3),
 714	IB_SEND_IP_CSUM		= (1<<4)
 
 
 
 
 715};
 716
 717struct ib_sge {
 718	u64	addr;
 719	u32	length;
 720	u32	lkey;
 721};
 722
 723struct ib_fast_reg_page_list {
 724	struct ib_device       *device;
 725	u64		       *page_list;
 726	unsigned int		max_page_list_len;
 727};
 728
 729struct ib_send_wr {
 730	struct ib_send_wr      *next;
 731	u64			wr_id;
 
 
 
 732	struct ib_sge	       *sg_list;
 733	int			num_sge;
 734	enum ib_wr_opcode	opcode;
 735	int			send_flags;
 736	union {
 737		__be32		imm_data;
 738		u32		invalidate_rkey;
 739	} ex;
 740	union {
 741		struct {
 742			u64	remote_addr;
 743			u32	rkey;
 744		} rdma;
 745		struct {
 746			u64	remote_addr;
 747			u64	compare_add;
 748			u64	swap;
 749			u64	compare_add_mask;
 750			u64	swap_mask;
 751			u32	rkey;
 752		} atomic;
 753		struct {
 754			struct ib_ah *ah;
 755			void   *header;
 756			int     hlen;
 757			int     mss;
 758			u32	remote_qpn;
 759			u32	remote_qkey;
 760			u16	pkey_index; /* valid for GSI only */
 761			u8	port_num;   /* valid for DR SMPs on switch only */
 762		} ud;
 763		struct {
 764			u64				iova_start;
 765			struct ib_fast_reg_page_list   *page_list;
 766			unsigned int			page_shift;
 767			unsigned int			page_list_len;
 768			u32				length;
 769			int				access_flags;
 770			u32				rkey;
 771		} fast_reg;
 772	} wr;
 773};
 774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775struct ib_recv_wr {
 776	struct ib_recv_wr      *next;
 777	u64			wr_id;
 
 
 
 778	struct ib_sge	       *sg_list;
 779	int			num_sge;
 780};
 781
 782enum ib_access_flags {
 783	IB_ACCESS_LOCAL_WRITE	= 1,
 784	IB_ACCESS_REMOTE_WRITE	= (1<<1),
 785	IB_ACCESS_REMOTE_READ	= (1<<2),
 786	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
 787	IB_ACCESS_MW_BIND	= (1<<4)
 788};
 789
 790struct ib_phys_buf {
 791	u64      addr;
 792	u64      size;
 793};
 794
 795struct ib_mr_attr {
 796	struct ib_pd	*pd;
 797	u64		device_virt_addr;
 798	u64		size;
 799	int		mr_access_flags;
 800	u32		lkey;
 801	u32		rkey;
 802};
 803
 
 
 
 
 804enum ib_mr_rereg_flags {
 805	IB_MR_REREG_TRANS	= 1,
 806	IB_MR_REREG_PD		= (1<<1),
 807	IB_MR_REREG_ACCESS	= (1<<2)
 808};
 809
 810struct ib_mw_bind {
 811	struct ib_mr   *mr;
 812	u64		wr_id;
 813	u64		addr;
 814	u32		length;
 815	int		send_flags;
 816	int		mw_access_flags;
 817};
 818
 819struct ib_fmr_attr {
 820	int	max_pages;
 821	int	max_maps;
 822	u8	page_shift;
 823};
 824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825struct ib_ucontext {
 826	struct ib_device       *device;
 827	struct list_head	pd_list;
 828	struct list_head	mr_list;
 829	struct list_head	mw_list;
 830	struct list_head	cq_list;
 831	struct list_head	qp_list;
 832	struct list_head	srq_list;
 833	struct list_head	ah_list;
 834	int			closing;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835};
 836
 837struct ib_uobject {
 838	u64			user_handle;	/* handle given to us by userspace */
 839	struct ib_ucontext     *context;	/* associated user context */
 840	void		       *object;		/* containing object */
 841	struct list_head	list;		/* link to context's list */
 
 842	int			id;		/* index into kernel idr */
 843	struct kref		ref;
 844	struct rw_semaphore	mutex;		/* protects .live */
 845	int			live;
 
 
 
 
 
 
 
 
 846};
 847
 848struct ib_udata {
 849	void __user *inbuf;
 850	void __user *outbuf;
 851	size_t       inlen;
 852	size_t       outlen;
 853};
 854
 855struct ib_pd {
 
 
 856	struct ib_device       *device;
 857	struct ib_uobject      *uobject;
 858	atomic_t          	usecnt; /* count all resources */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859};
 860
 861struct ib_ah {
 862	struct ib_device	*device;
 863	struct ib_pd		*pd;
 864	struct ib_uobject	*uobject;
 
 865};
 866
 867typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 868
 
 
 
 
 
 
 869struct ib_cq {
 870	struct ib_device       *device;
 871	struct ib_uobject      *uobject;
 872	ib_comp_handler   	comp_handler;
 873	void                  (*event_handler)(struct ib_event *, void *);
 874	void                   *cq_context;
 875	int               	cqe;
 876	atomic_t          	usecnt; /* count number of work queues */
 
 
 
 
 
 
 
 
 
 
 877};
 878
 879struct ib_srq {
 880	struct ib_device       *device;
 881	struct ib_pd	       *pd;
 882	struct ib_uobject      *uobject;
 883	void		      (*event_handler)(struct ib_event *, void *);
 884	void		       *srq_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885	atomic_t		usecnt;
 886};
 887
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888struct ib_qp {
 889	struct ib_device       *device;
 890	struct ib_pd	       *pd;
 891	struct ib_cq	       *send_cq;
 892	struct ib_cq	       *recv_cq;
 
 
 
 
 893	struct ib_srq	       *srq;
 
 
 
 
 
 
 
 894	struct ib_uobject      *uobject;
 895	void                  (*event_handler)(struct ib_event *, void *);
 896	void		       *qp_context;
 897	u32			qp_num;
 
 
 898	enum ib_qp_type		qp_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899};
 900
 901struct ib_mr {
 902	struct ib_device  *device;
 903	struct ib_pd	  *pd;
 904	struct ib_uobject *uobject;
 905	u32		   lkey;
 906	u32		   rkey;
 907	atomic_t	   usecnt; /* count number of MWs */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908};
 909
 910struct ib_mw {
 911	struct ib_device	*device;
 912	struct ib_pd		*pd;
 913	struct ib_uobject	*uobject;
 914	u32			rkey;
 
 915};
 916
 917struct ib_fmr {
 918	struct ib_device	*device;
 919	struct ib_pd		*pd;
 920	struct list_head	list;
 921	u32			lkey;
 922	u32			rkey;
 923};
 924
 925struct ib_mad;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926struct ib_grh;
 927
 928enum ib_process_mad_flags {
 929	IB_MAD_IGNORE_MKEY	= 1,
 930	IB_MAD_IGNORE_BKEY	= 2,
 931	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
 932};
 933
 934enum ib_mad_result {
 935	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
 936	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
 937	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
 938	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
 939};
 940
 941#define IB_DEVICE_NAME_MAX 64
 
 
 
 
 
 
 942
 943struct ib_cache {
 944	rwlock_t                lock;
 945	struct ib_event_handler event_handler;
 946	struct ib_pkey_cache  **pkey_cache;
 947	struct ib_gid_cache   **gid_cache;
 948	u8                     *lmc_cache;
 949};
 950
 951struct ib_dma_mapping_ops {
 952	int		(*mapping_error)(struct ib_device *dev,
 953					 u64 dma_addr);
 954	u64		(*map_single)(struct ib_device *dev,
 955				      void *ptr, size_t size,
 956				      enum dma_data_direction direction);
 957	void		(*unmap_single)(struct ib_device *dev,
 958					u64 addr, size_t size,
 959					enum dma_data_direction direction);
 960	u64		(*map_page)(struct ib_device *dev,
 961				    struct page *page, unsigned long offset,
 962				    size_t size,
 963				    enum dma_data_direction direction);
 964	void		(*unmap_page)(struct ib_device *dev,
 965				      u64 addr, size_t size,
 966				      enum dma_data_direction direction);
 967	int		(*map_sg)(struct ib_device *dev,
 968				  struct scatterlist *sg, int nents,
 969				  enum dma_data_direction direction);
 970	void		(*unmap_sg)(struct ib_device *dev,
 971				    struct scatterlist *sg, int nents,
 972				    enum dma_data_direction direction);
 973	u64		(*dma_address)(struct ib_device *dev,
 974				       struct scatterlist *sg);
 975	unsigned int	(*dma_len)(struct ib_device *dev,
 976				   struct scatterlist *sg);
 977	void		(*sync_single_for_cpu)(struct ib_device *dev,
 978					       u64 dma_handle,
 979					       size_t size,
 980					       enum dma_data_direction dir);
 981	void		(*sync_single_for_device)(struct ib_device *dev,
 982						  u64 dma_handle,
 983						  size_t size,
 984						  enum dma_data_direction dir);
 985	void		*(*alloc_coherent)(struct ib_device *dev,
 986					   size_t size,
 987					   u64 *dma_handle,
 988					   gfp_t flag);
 989	void		(*free_coherent)(struct ib_device *dev,
 990					 size_t size, void *cpu_addr,
 991					 u64 dma_handle);
 992};
 993
 994struct iw_cm_verbs;
 995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996struct ib_device {
 
 997	struct device                *dma_device;
 998
 999	char                          name[IB_DEVICE_NAME_MAX];
1000
1001	struct list_head              event_handler_list;
1002	spinlock_t                    event_handler_lock;
1003
1004	spinlock_t                    client_data_lock;
1005	struct list_head              core_list;
 
 
1006	struct list_head              client_data_list;
1007
1008	struct ib_cache               cache;
1009	int                          *pkey_tbl_len;
1010	int                          *gid_tbl_len;
 
 
1011
1012	int			      num_comp_vectors;
1013
 
 
1014	struct iw_cm_verbs	     *iwcm;
1015
1016	int		           (*get_protocol_stats)(struct ib_device *device,
1017							 union rdma_protocol_stats *stats);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1018	int		           (*query_device)(struct ib_device *device,
1019						   struct ib_device_attr *device_attr);
 
1020	int		           (*query_port)(struct ib_device *device,
1021						 u8 port_num,
1022						 struct ib_port_attr *port_attr);
1023	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1024						     u8 port_num);
 
 
 
 
 
 
 
 
 
 
 
 
 
1025	int		           (*query_gid)(struct ib_device *device,
1026						u8 port_num, int index,
1027						union ib_gid *gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028	int		           (*query_pkey)(struct ib_device *device,
1029						 u8 port_num, u16 index, u16 *pkey);
1030	int		           (*modify_device)(struct ib_device *device,
1031						    int device_modify_mask,
1032						    struct ib_device_modify *device_modify);
1033	int		           (*modify_port)(struct ib_device *device,
1034						  u8 port_num, int port_modify_mask,
1035						  struct ib_port_modify *port_modify);
1036	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1037						     struct ib_udata *udata);
1038	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1039	int                        (*mmap)(struct ib_ucontext *context,
1040					   struct vm_area_struct *vma);
1041	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1042					       struct ib_ucontext *context,
1043					       struct ib_udata *udata);
1044	int                        (*dealloc_pd)(struct ib_pd *pd);
1045	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1046						struct ib_ah_attr *ah_attr);
 
1047	int                        (*modify_ah)(struct ib_ah *ah,
1048						struct ib_ah_attr *ah_attr);
1049	int                        (*query_ah)(struct ib_ah *ah,
1050					       struct ib_ah_attr *ah_attr);
1051	int                        (*destroy_ah)(struct ib_ah *ah);
1052	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1053						 struct ib_srq_init_attr *srq_init_attr,
1054						 struct ib_udata *udata);
1055	int                        (*modify_srq)(struct ib_srq *srq,
1056						 struct ib_srq_attr *srq_attr,
1057						 enum ib_srq_attr_mask srq_attr_mask,
1058						 struct ib_udata *udata);
1059	int                        (*query_srq)(struct ib_srq *srq,
1060						struct ib_srq_attr *srq_attr);
1061	int                        (*destroy_srq)(struct ib_srq *srq);
1062	int                        (*post_srq_recv)(struct ib_srq *srq,
1063						    struct ib_recv_wr *recv_wr,
1064						    struct ib_recv_wr **bad_recv_wr);
1065	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1066						struct ib_qp_init_attr *qp_init_attr,
1067						struct ib_udata *udata);
1068	int                        (*modify_qp)(struct ib_qp *qp,
1069						struct ib_qp_attr *qp_attr,
1070						int qp_attr_mask,
1071						struct ib_udata *udata);
1072	int                        (*query_qp)(struct ib_qp *qp,
1073					       struct ib_qp_attr *qp_attr,
1074					       int qp_attr_mask,
1075					       struct ib_qp_init_attr *qp_init_attr);
1076	int                        (*destroy_qp)(struct ib_qp *qp);
1077	int                        (*post_send)(struct ib_qp *qp,
1078						struct ib_send_wr *send_wr,
1079						struct ib_send_wr **bad_send_wr);
1080	int                        (*post_recv)(struct ib_qp *qp,
1081						struct ib_recv_wr *recv_wr,
1082						struct ib_recv_wr **bad_recv_wr);
1083	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1084						int comp_vector,
1085						struct ib_ucontext *context,
1086						struct ib_udata *udata);
1087	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1088						u16 cq_period);
1089	int                        (*destroy_cq)(struct ib_cq *cq);
1090	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1091						struct ib_udata *udata);
1092	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1093					      struct ib_wc *wc);
1094	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1095	int                        (*req_notify_cq)(struct ib_cq *cq,
1096						    enum ib_cq_notify_flags flags);
1097	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1098						      int wc_cnt);
1099	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1100						 int mr_access_flags);
1101	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1102						  struct ib_phys_buf *phys_buf_array,
1103						  int num_phys_buf,
1104						  int mr_access_flags,
1105						  u64 *iova_start);
1106	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1107						  u64 start, u64 length,
1108						  u64 virt_addr,
1109						  int mr_access_flags,
1110						  struct ib_udata *udata);
1111	int                        (*query_mr)(struct ib_mr *mr,
1112					       struct ib_mr_attr *mr_attr);
1113	int                        (*dereg_mr)(struct ib_mr *mr);
1114	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1115					       int max_page_list_len);
1116	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1117								   int page_list_len);
1118	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1119	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1120						    int mr_rereg_mask,
1121						    struct ib_pd *pd,
1122						    struct ib_phys_buf *phys_buf_array,
1123						    int num_phys_buf,
1124						    int mr_access_flags,
1125						    u64 *iova_start);
1126	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1127	int                        (*bind_mw)(struct ib_qp *qp,
1128					      struct ib_mw *mw,
1129					      struct ib_mw_bind *mw_bind);
 
 
 
 
 
 
 
 
1130	int                        (*dealloc_mw)(struct ib_mw *mw);
1131	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1132						int mr_access_flags,
1133						struct ib_fmr_attr *fmr_attr);
1134	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1135						   u64 *page_list, int list_len,
1136						   u64 iova);
1137	int		           (*unmap_fmr)(struct list_head *fmr_list);
1138	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1139	int                        (*attach_mcast)(struct ib_qp *qp,
1140						   union ib_gid *gid,
1141						   u16 lid);
1142	int                        (*detach_mcast)(struct ib_qp *qp,
1143						   union ib_gid *gid,
1144						   u16 lid);
1145	int                        (*process_mad)(struct ib_device *device,
1146						  int process_mad_flags,
1147						  u8 port_num,
1148						  struct ib_wc *in_wc,
1149						  struct ib_grh *in_grh,
1150						  struct ib_mad *in_mad,
1151						  struct ib_mad *out_mad);
1152
1153	struct ib_dma_mapping_ops   *dma_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154
1155	struct module               *owner;
1156	struct device                dev;
1157	struct kobject               *ports_parent;
1158	struct list_head             port_list;
1159
1160	enum {
1161		IB_DEV_UNINITIALIZED,
1162		IB_DEV_REGISTERED,
1163		IB_DEV_UNREGISTERED
1164	}                            reg_state;
1165
1166	int			     uverbs_abi_ver;
1167	u64			     uverbs_cmd_mask;
 
1168
1169	char			     node_desc[64];
1170	__be64			     node_guid;
1171	u32			     local_dma_lkey;
 
1172	u8                           node_type;
1173	u8                           phys_port_cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174};
1175
1176struct ib_client {
1177	char  *name;
1178	void (*add)   (struct ib_device *);
1179	void (*remove)(struct ib_device *);
1180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1181	struct list_head list;
1182};
1183
1184struct ib_device *ib_alloc_device(size_t size);
1185void ib_dealloc_device(struct ib_device *device);
1186
 
 
1187int ib_register_device(struct ib_device *device,
1188		       int (*port_callback)(struct ib_device *,
1189					    u8, struct kobject *));
1190void ib_unregister_device(struct ib_device *device);
1191
1192int ib_register_client   (struct ib_client *client);
1193void ib_unregister_client(struct ib_client *client);
1194
1195void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1196void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1197			 void *data);
1198
1199static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1200{
1201	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1202}
1203
1204static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1205{
1206	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1207}
1208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209/**
1210 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1211 * contains all required attributes and no attributes not allowed for
1212 * the given QP state transition.
1213 * @cur_state: Current QP state
1214 * @next_state: Next QP state
1215 * @type: QP type
1216 * @mask: Mask of supplied QP attributes
 
1217 *
1218 * This function is a helper function that a low-level driver's
1219 * modify_qp method can use to validate the consumer's input.  It
1220 * checks that cur_state and next_state are valid QP states, that a
1221 * transition from cur_state to next_state is allowed by the IB spec,
1222 * and that the attribute mask supplied is allowed for the transition.
1223 */
1224int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1225		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
 
1226
1227int ib_register_event_handler  (struct ib_event_handler *event_handler);
1228int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1229void ib_dispatch_event(struct ib_event *event);
1230
1231int ib_query_device(struct ib_device *device,
1232		    struct ib_device_attr *device_attr);
1233
1234int ib_query_port(struct ib_device *device,
1235		  u8 port_num, struct ib_port_attr *port_attr);
1236
1237enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1238					       u8 port_num);
1239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240int ib_query_gid(struct ib_device *device,
1241		 u8 port_num, int index, union ib_gid *gid);
 
 
 
 
 
 
 
 
 
 
1242
1243int ib_query_pkey(struct ib_device *device,
1244		  u8 port_num, u16 index, u16 *pkey);
1245
1246int ib_modify_device(struct ib_device *device,
1247		     int device_modify_mask,
1248		     struct ib_device_modify *device_modify);
1249
1250int ib_modify_port(struct ib_device *device,
1251		   u8 port_num, int port_modify_mask,
1252		   struct ib_port_modify *port_modify);
1253
1254int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1255		u8 *port_num, u16 *index);
1256
1257int ib_find_pkey(struct ib_device *device,
1258		 u8 port_num, u16 pkey, u16 *index);
1259
1260/**
1261 * ib_alloc_pd - Allocates an unused protection domain.
1262 * @device: The device on which to allocate the protection domain.
1263 *
1264 * A protection domain object provides an association between QPs, shared
1265 * receive queues, address handles, memory regions, and memory windows.
1266 */
1267struct ib_pd *ib_alloc_pd(struct ib_device *device);
 
 
 
 
 
 
 
 
 
 
1268
1269/**
1270 * ib_dealloc_pd - Deallocates a protection domain.
1271 * @pd: The protection domain to deallocate.
 
 
 
 
1272 */
1273int ib_dealloc_pd(struct ib_pd *pd);
1274
1275/**
1276 * ib_create_ah - Creates an address handle for the given address vector.
 
1277 * @pd: The protection domain associated with the address handle.
1278 * @ah_attr: The attributes of the address vector.
 
 
1279 *
 
1280 * The address handle is used to reference a local or global destination
1281 * in all UD QP post sends.
1282 */
1283struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
1284
1285/**
1286 * ib_init_ah_from_wc - Initializes address handle attributes from a
 
 
 
 
 
 
1287 *   work completion.
1288 * @device: Device on which the received message arrived.
1289 * @port_num: Port on which the received message arrived.
1290 * @wc: Work completion associated with the received message.
1291 * @grh: References the received global route header.  This parameter is
1292 *   ignored unless the work completion indicates that the GRH is valid.
1293 * @ah_attr: Returned attributes that can be used when creating an address
1294 *   handle for replying to the message.
1295 */
1296int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1297		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
 
1298
1299/**
1300 * ib_create_ah_from_wc - Creates an address handle associated with the
1301 *   sender of the specified work completion.
1302 * @pd: The protection domain associated with the address handle.
1303 * @wc: Work completion information associated with a received message.
1304 * @grh: References the received global route header.  This parameter is
1305 *   ignored unless the work completion indicates that the GRH is valid.
1306 * @port_num: The outbound port number to associate with the address.
1307 *
1308 * The address handle is used to reference a local or global destination
1309 * in all UD QP post sends.
1310 */
1311struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1312				   struct ib_grh *grh, u8 port_num);
1313
1314/**
1315 * ib_modify_ah - Modifies the address vector associated with an address
1316 *   handle.
1317 * @ah: The address handle to modify.
1318 * @ah_attr: The new address vector attributes to associate with the
1319 *   address handle.
1320 */
1321int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1322
1323/**
1324 * ib_query_ah - Queries the address vector associated with an address
1325 *   handle.
1326 * @ah: The address handle to query.
1327 * @ah_attr: The address vector attributes associated with the address
1328 *   handle.
1329 */
1330int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1331
1332/**
1333 * ib_destroy_ah - Destroys an address handle.
1334 * @ah: The address handle to destroy.
1335 */
1336int ib_destroy_ah(struct ib_ah *ah);
1337
1338/**
1339 * ib_create_srq - Creates a SRQ associated with the specified protection
1340 *   domain.
1341 * @pd: The protection domain associated with the SRQ.
1342 * @srq_init_attr: A list of initial attributes required to create the
1343 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1344 *   the actual capabilities of the created SRQ.
1345 *
1346 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1347 * requested size of the SRQ, and set to the actual values allocated
1348 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1349 * will always be at least as large as the requested values.
1350 */
1351struct ib_srq *ib_create_srq(struct ib_pd *pd,
1352			     struct ib_srq_init_attr *srq_init_attr);
1353
1354/**
1355 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1356 * @srq: The SRQ to modify.
1357 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1358 *   the current values of selected SRQ attributes are returned.
1359 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1360 *   are being modified.
1361 *
1362 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1363 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1364 * the number of receives queued drops below the limit.
1365 */
1366int ib_modify_srq(struct ib_srq *srq,
1367		  struct ib_srq_attr *srq_attr,
1368		  enum ib_srq_attr_mask srq_attr_mask);
1369
1370/**
1371 * ib_query_srq - Returns the attribute list and current values for the
1372 *   specified SRQ.
1373 * @srq: The SRQ to query.
1374 * @srq_attr: The attributes of the specified SRQ.
1375 */
1376int ib_query_srq(struct ib_srq *srq,
1377		 struct ib_srq_attr *srq_attr);
1378
1379/**
1380 * ib_destroy_srq - Destroys the specified SRQ.
1381 * @srq: The SRQ to destroy.
1382 */
1383int ib_destroy_srq(struct ib_srq *srq);
1384
1385/**
1386 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1387 * @srq: The SRQ to post the work request on.
1388 * @recv_wr: A list of work requests to post on the receive queue.
1389 * @bad_recv_wr: On an immediate failure, this parameter will reference
1390 *   the work request that failed to be posted on the QP.
1391 */
1392static inline int ib_post_srq_recv(struct ib_srq *srq,
1393				   struct ib_recv_wr *recv_wr,
1394				   struct ib_recv_wr **bad_recv_wr)
1395{
1396	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1397}
1398
1399/**
1400 * ib_create_qp - Creates a QP associated with the specified protection
1401 *   domain.
1402 * @pd: The protection domain associated with the QP.
1403 * @qp_init_attr: A list of initial attributes required to create the
1404 *   QP.  If QP creation succeeds, then the attributes are updated to
1405 *   the actual capabilities of the created QP.
1406 */
1407struct ib_qp *ib_create_qp(struct ib_pd *pd,
1408			   struct ib_qp_init_attr *qp_init_attr);
1409
1410/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1411 * ib_modify_qp - Modifies the attributes for the specified QP and then
1412 *   transitions the QP to the given state.
1413 * @qp: The QP to modify.
1414 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1415 *   the current values of selected QP attributes are returned.
1416 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1417 *   are being modified.
1418 */
1419int ib_modify_qp(struct ib_qp *qp,
1420		 struct ib_qp_attr *qp_attr,
1421		 int qp_attr_mask);
1422
1423/**
1424 * ib_query_qp - Returns the attribute list and current values for the
1425 *   specified QP.
1426 * @qp: The QP to query.
1427 * @qp_attr: The attributes of the specified QP.
1428 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1429 * @qp_init_attr: Additional attributes of the selected QP.
1430 *
1431 * The qp_attr_mask may be used to limit the query to gathering only the
1432 * selected attributes.
1433 */
1434int ib_query_qp(struct ib_qp *qp,
1435		struct ib_qp_attr *qp_attr,
1436		int qp_attr_mask,
1437		struct ib_qp_init_attr *qp_init_attr);
1438
1439/**
1440 * ib_destroy_qp - Destroys the specified QP.
1441 * @qp: The QP to destroy.
1442 */
1443int ib_destroy_qp(struct ib_qp *qp);
1444
1445/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1446 * ib_post_send - Posts a list of work requests to the send queue of
1447 *   the specified QP.
1448 * @qp: The QP to post the work request on.
1449 * @send_wr: A list of work requests to post on the send queue.
1450 * @bad_send_wr: On an immediate failure, this parameter will reference
1451 *   the work request that failed to be posted on the QP.
1452 *
1453 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1454 * error is returned, the QP state shall not be affected,
1455 * ib_post_send() will return an immediate error after queueing any
1456 * earlier work requests in the list.
1457 */
1458static inline int ib_post_send(struct ib_qp *qp,
1459			       struct ib_send_wr *send_wr,
1460			       struct ib_send_wr **bad_send_wr)
1461{
1462	return qp->device->post_send(qp, send_wr, bad_send_wr);
1463}
1464
1465/**
1466 * ib_post_recv - Posts a list of work requests to the receive queue of
1467 *   the specified QP.
1468 * @qp: The QP to post the work request on.
1469 * @recv_wr: A list of work requests to post on the receive queue.
1470 * @bad_recv_wr: On an immediate failure, this parameter will reference
1471 *   the work request that failed to be posted on the QP.
1472 */
1473static inline int ib_post_recv(struct ib_qp *qp,
1474			       struct ib_recv_wr *recv_wr,
1475			       struct ib_recv_wr **bad_recv_wr)
1476{
1477	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1478}
1479
 
 
 
 
 
 
 
 
 
1480/**
1481 * ib_create_cq - Creates a CQ on the specified device.
1482 * @device: The device on which to create the CQ.
1483 * @comp_handler: A user-specified callback that is invoked when a
1484 *   completion event occurs on the CQ.
1485 * @event_handler: A user-specified callback that is invoked when an
1486 *   asynchronous event not associated with a completion occurs on the CQ.
1487 * @cq_context: Context associated with the CQ returned to the user via
1488 *   the associated completion and event handlers.
1489 * @cqe: The minimum size of the CQ.
1490 * @comp_vector - Completion vector used to signal completion events.
1491 *     Must be >= 0 and < context->num_comp_vectors.
1492 *
1493 * Users can examine the cq structure to determine the actual CQ size.
1494 */
1495struct ib_cq *ib_create_cq(struct ib_device *device,
1496			   ib_comp_handler comp_handler,
1497			   void (*event_handler)(struct ib_event *, void *),
1498			   void *cq_context, int cqe, int comp_vector);
 
1499
1500/**
1501 * ib_resize_cq - Modifies the capacity of the CQ.
1502 * @cq: The CQ to resize.
1503 * @cqe: The minimum size of the CQ.
1504 *
1505 * Users can examine the cq structure to determine the actual CQ size.
1506 */
1507int ib_resize_cq(struct ib_cq *cq, int cqe);
1508
1509/**
1510 * ib_modify_cq - Modifies moderation params of the CQ
1511 * @cq: The CQ to modify.
1512 * @cq_count: number of CQEs that will trigger an event
1513 * @cq_period: max period of time in usec before triggering an event
1514 *
1515 */
1516int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1517
1518/**
1519 * ib_destroy_cq - Destroys the specified CQ.
1520 * @cq: The CQ to destroy.
1521 */
1522int ib_destroy_cq(struct ib_cq *cq);
1523
1524/**
1525 * ib_poll_cq - poll a CQ for completion(s)
1526 * @cq:the CQ being polled
1527 * @num_entries:maximum number of completions to return
1528 * @wc:array of at least @num_entries &struct ib_wc where completions
1529 *   will be returned
1530 *
1531 * Poll a CQ for (possibly multiple) completions.  If the return value
1532 * is < 0, an error occurred.  If the return value is >= 0, it is the
1533 * number of completions returned.  If the return value is
1534 * non-negative and < num_entries, then the CQ was emptied.
1535 */
1536static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1537			     struct ib_wc *wc)
1538{
1539	return cq->device->poll_cq(cq, num_entries, wc);
1540}
1541
1542/**
1543 * ib_peek_cq - Returns the number of unreaped completions currently
1544 *   on the specified CQ.
1545 * @cq: The CQ to peek.
1546 * @wc_cnt: A minimum number of unreaped completions to check for.
1547 *
1548 * If the number of unreaped completions is greater than or equal to wc_cnt,
1549 * this function returns wc_cnt, otherwise, it returns the actual number of
1550 * unreaped completions.
1551 */
1552int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1553
1554/**
1555 * ib_req_notify_cq - Request completion notification on a CQ.
1556 * @cq: The CQ to generate an event for.
1557 * @flags:
1558 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1559 *   to request an event on the next solicited event or next work
1560 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1561 *   may also be |ed in to request a hint about missed events, as
1562 *   described below.
1563 *
1564 * Return Value:
1565 *    < 0 means an error occurred while requesting notification
1566 *   == 0 means notification was requested successfully, and if
1567 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1568 *        were missed and it is safe to wait for another event.  In
1569 *        this case is it guaranteed that any work completions added
1570 *        to the CQ since the last CQ poll will trigger a completion
1571 *        notification event.
1572 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1573 *        in.  It means that the consumer must poll the CQ again to
1574 *        make sure it is empty to avoid missing an event because of a
1575 *        race between requesting notification and an entry being
1576 *        added to the CQ.  This return value means it is possible
1577 *        (but not guaranteed) that a work completion has been added
1578 *        to the CQ since the last poll without triggering a
1579 *        completion notification event.
1580 */
1581static inline int ib_req_notify_cq(struct ib_cq *cq,
1582				   enum ib_cq_notify_flags flags)
1583{
1584	return cq->device->req_notify_cq(cq, flags);
1585}
1586
1587/**
1588 * ib_req_ncomp_notif - Request completion notification when there are
1589 *   at least the specified number of unreaped completions on the CQ.
1590 * @cq: The CQ to generate an event for.
1591 * @wc_cnt: The number of unreaped completions that should be on the
1592 *   CQ before an event is generated.
1593 */
1594static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1595{
1596	return cq->device->req_ncomp_notif ?
1597		cq->device->req_ncomp_notif(cq, wc_cnt) :
1598		-ENOSYS;
1599}
1600
1601/**
1602 * ib_get_dma_mr - Returns a memory region for system memory that is
1603 *   usable for DMA.
1604 * @pd: The protection domain associated with the memory region.
1605 * @mr_access_flags: Specifies the memory access rights.
1606 *
1607 * Note that the ib_dma_*() functions defined below must be used
1608 * to create/destroy addresses used with the Lkey or Rkey returned
1609 * by ib_get_dma_mr().
1610 */
1611struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1612
1613/**
1614 * ib_dma_mapping_error - check a DMA addr for error
1615 * @dev: The device for which the dma_addr was created
1616 * @dma_addr: The DMA address to check
1617 */
1618static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1619{
1620	if (dev->dma_ops)
1621		return dev->dma_ops->mapping_error(dev, dma_addr);
1622	return dma_mapping_error(dev->dma_device, dma_addr);
1623}
1624
1625/**
1626 * ib_dma_map_single - Map a kernel virtual address to DMA address
1627 * @dev: The device for which the dma_addr is to be created
1628 * @cpu_addr: The kernel virtual address
1629 * @size: The size of the region in bytes
1630 * @direction: The direction of the DMA
1631 */
1632static inline u64 ib_dma_map_single(struct ib_device *dev,
1633				    void *cpu_addr, size_t size,
1634				    enum dma_data_direction direction)
1635{
1636	if (dev->dma_ops)
1637		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1638	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1639}
1640
1641/**
1642 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1643 * @dev: The device for which the DMA address was created
1644 * @addr: The DMA address
1645 * @size: The size of the region in bytes
1646 * @direction: The direction of the DMA
1647 */
1648static inline void ib_dma_unmap_single(struct ib_device *dev,
1649				       u64 addr, size_t size,
1650				       enum dma_data_direction direction)
1651{
1652	if (dev->dma_ops)
1653		dev->dma_ops->unmap_single(dev, addr, size, direction);
1654	else
1655		dma_unmap_single(dev->dma_device, addr, size, direction);
1656}
1657
1658static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1659					  void *cpu_addr, size_t size,
1660					  enum dma_data_direction direction,
1661					  struct dma_attrs *attrs)
1662{
1663	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1664				    direction, attrs);
1665}
1666
1667static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1668					     u64 addr, size_t size,
1669					     enum dma_data_direction direction,
1670					     struct dma_attrs *attrs)
1671{
1672	return dma_unmap_single_attrs(dev->dma_device, addr, size,
1673				      direction, attrs);
1674}
1675
1676/**
1677 * ib_dma_map_page - Map a physical page to DMA address
1678 * @dev: The device for which the dma_addr is to be created
1679 * @page: The page to be mapped
1680 * @offset: The offset within the page
1681 * @size: The size of the region in bytes
1682 * @direction: The direction of the DMA
1683 */
1684static inline u64 ib_dma_map_page(struct ib_device *dev,
1685				  struct page *page,
1686				  unsigned long offset,
1687				  size_t size,
1688					 enum dma_data_direction direction)
1689{
1690	if (dev->dma_ops)
1691		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1692	return dma_map_page(dev->dma_device, page, offset, size, direction);
1693}
1694
1695/**
1696 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1697 * @dev: The device for which the DMA address was created
1698 * @addr: The DMA address
1699 * @size: The size of the region in bytes
1700 * @direction: The direction of the DMA
1701 */
1702static inline void ib_dma_unmap_page(struct ib_device *dev,
1703				     u64 addr, size_t size,
1704				     enum dma_data_direction direction)
1705{
1706	if (dev->dma_ops)
1707		dev->dma_ops->unmap_page(dev, addr, size, direction);
1708	else
1709		dma_unmap_page(dev->dma_device, addr, size, direction);
1710}
1711
1712/**
1713 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1714 * @dev: The device for which the DMA addresses are to be created
1715 * @sg: The array of scatter/gather entries
1716 * @nents: The number of scatter/gather entries
1717 * @direction: The direction of the DMA
1718 */
1719static inline int ib_dma_map_sg(struct ib_device *dev,
1720				struct scatterlist *sg, int nents,
1721				enum dma_data_direction direction)
1722{
1723	if (dev->dma_ops)
1724		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1725	return dma_map_sg(dev->dma_device, sg, nents, direction);
1726}
1727
1728/**
1729 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1730 * @dev: The device for which the DMA addresses were created
1731 * @sg: The array of scatter/gather entries
1732 * @nents: The number of scatter/gather entries
1733 * @direction: The direction of the DMA
1734 */
1735static inline void ib_dma_unmap_sg(struct ib_device *dev,
1736				   struct scatterlist *sg, int nents,
1737				   enum dma_data_direction direction)
1738{
1739	if (dev->dma_ops)
1740		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1741	else
1742		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1743}
1744
1745static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1746				      struct scatterlist *sg, int nents,
1747				      enum dma_data_direction direction,
1748				      struct dma_attrs *attrs)
1749{
1750	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
 
1751}
1752
1753static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1754					 struct scatterlist *sg, int nents,
1755					 enum dma_data_direction direction,
1756					 struct dma_attrs *attrs)
1757{
1758	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1759}
1760/**
1761 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1762 * @dev: The device for which the DMA addresses were created
1763 * @sg: The scatter/gather entry
 
 
 
1764 */
1765static inline u64 ib_sg_dma_address(struct ib_device *dev,
1766				    struct scatterlist *sg)
1767{
1768	if (dev->dma_ops)
1769		return dev->dma_ops->dma_address(dev, sg);
1770	return sg_dma_address(sg);
1771}
1772
1773/**
1774 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1775 * @dev: The device for which the DMA addresses were created
1776 * @sg: The scatter/gather entry
 
 
 
1777 */
1778static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1779					 struct scatterlist *sg)
1780{
1781	if (dev->dma_ops)
1782		return dev->dma_ops->dma_len(dev, sg);
1783	return sg_dma_len(sg);
1784}
1785
1786/**
1787 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1788 * @dev: The device for which the DMA address was created
1789 * @addr: The DMA address
1790 * @size: The size of the region in bytes
1791 * @dir: The direction of the DMA
1792 */
1793static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1794					      u64 addr,
1795					      size_t size,
1796					      enum dma_data_direction dir)
1797{
1798	if (dev->dma_ops)
1799		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1800	else
1801		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1802}
1803
1804/**
1805 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1806 * @dev: The device for which the DMA address was created
1807 * @addr: The DMA address
1808 * @size: The size of the region in bytes
1809 * @dir: The direction of the DMA
1810 */
1811static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1812						 u64 addr,
1813						 size_t size,
1814						 enum dma_data_direction dir)
1815{
1816	if (dev->dma_ops)
1817		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1818	else
1819		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1820}
1821
1822/**
1823 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1824 * @dev: The device for which the DMA address is requested
1825 * @size: The size of the region to allocate in bytes
1826 * @dma_handle: A pointer for returning the DMA address of the region
1827 * @flag: memory allocator flags
1828 */
1829static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1830					   size_t size,
1831					   u64 *dma_handle,
1832					   gfp_t flag)
1833{
1834	if (dev->dma_ops)
1835		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1836	else {
1837		dma_addr_t handle;
1838		void *ret;
1839
1840		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1841		*dma_handle = handle;
1842		return ret;
1843	}
1844}
1845
1846/**
1847 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1848 * @dev: The device for which the DMA addresses were allocated
1849 * @size: The size of the region
1850 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1851 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1852 */
1853static inline void ib_dma_free_coherent(struct ib_device *dev,
1854					size_t size, void *cpu_addr,
1855					u64 dma_handle)
1856{
1857	if (dev->dma_ops)
1858		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1859	else
1860		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1861}
1862
1863/**
1864 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1865 *   by an HCA.
1866 * @pd: The protection domain associated assigned to the registered region.
1867 * @phys_buf_array: Specifies a list of physical buffers to use in the
1868 *   memory region.
1869 * @num_phys_buf: Specifies the size of the phys_buf_array.
1870 * @mr_access_flags: Specifies the memory access rights.
1871 * @iova_start: The offset of the region's starting I/O virtual address.
1872 */
1873struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1874			     struct ib_phys_buf *phys_buf_array,
1875			     int num_phys_buf,
1876			     int mr_access_flags,
1877			     u64 *iova_start);
1878
1879/**
1880 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1881 *   Conceptually, this call performs the functions deregister memory region
1882 *   followed by register physical memory region.  Where possible,
1883 *   resources are reused instead of deallocated and reallocated.
1884 * @mr: The memory region to modify.
1885 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1886 *   properties of the memory region are being modified.
1887 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1888 *   the new protection domain to associated with the memory region,
1889 *   otherwise, this parameter is ignored.
1890 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1891 *   field specifies a list of physical buffers to use in the new
1892 *   translation, otherwise, this parameter is ignored.
1893 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1894 *   field specifies the size of the phys_buf_array, otherwise, this
1895 *   parameter is ignored.
1896 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1897 *   field specifies the new memory access rights, otherwise, this
1898 *   parameter is ignored.
1899 * @iova_start: The offset of the region's starting I/O virtual address.
1900 */
1901int ib_rereg_phys_mr(struct ib_mr *mr,
1902		     int mr_rereg_mask,
1903		     struct ib_pd *pd,
1904		     struct ib_phys_buf *phys_buf_array,
1905		     int num_phys_buf,
1906		     int mr_access_flags,
1907		     u64 *iova_start);
1908
1909/**
1910 * ib_query_mr - Retrieves information about a specific memory region.
1911 * @mr: The memory region to retrieve information about.
1912 * @mr_attr: The attributes of the specified memory region.
1913 */
1914int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1915
1916/**
1917 * ib_dereg_mr - Deregisters a memory region and removes it from the
1918 *   HCA translation table.
1919 * @mr: The memory region to deregister.
1920 */
1921int ib_dereg_mr(struct ib_mr *mr);
1922
1923/**
1924 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1925 *   IB_WR_FAST_REG_MR send work request.
1926 * @pd: The protection domain associated with the region.
1927 * @max_page_list_len: requested max physical buffer list length to be
1928 *   used with fast register work requests for this MR.
1929 */
1930struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1931
1932/**
1933 * ib_alloc_fast_reg_page_list - Allocates a page list array
1934 * @device - ib device pointer.
1935 * @page_list_len - size of the page list array to be allocated.
1936 *
1937 * This allocates and returns a struct ib_fast_reg_page_list * and a
1938 * page_list array that is at least page_list_len in size.  The actual
1939 * size is returned in max_page_list_len.  The caller is responsible
1940 * for initializing the contents of the page_list array before posting
1941 * a send work request with the IB_WC_FAST_REG_MR opcode.
1942 *
1943 * The page_list array entries must be translated using one of the
1944 * ib_dma_*() functions just like the addresses passed to
1945 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
1946 * ib_fast_reg_page_list must not be modified by the caller until the
1947 * IB_WC_FAST_REG_MR work request completes.
1948 */
1949struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1950				struct ib_device *device, int page_list_len);
1951
1952/**
1953 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1954 *   page list array.
1955 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1956 */
1957void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1958
1959/**
1960 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1961 *   R_Key and L_Key.
1962 * @mr - struct ib_mr pointer to be updated.
1963 * @newkey - new key to be used.
1964 */
1965static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1966{
1967	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1968	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1969}
1970
1971/**
1972 * ib_alloc_mw - Allocates a memory window.
1973 * @pd: The protection domain associated with the memory window.
 
1974 */
1975struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1976
1977/**
1978 * ib_bind_mw - Posts a work request to the send queue of the specified
1979 *   QP, which binds the memory window to the given address range and
1980 *   remote access attributes.
1981 * @qp: QP to post the bind work request on.
1982 * @mw: The memory window to bind.
1983 * @mw_bind: Specifies information about the memory window, including
1984 *   its address range, remote access rights, and associated memory region.
1985 */
1986static inline int ib_bind_mw(struct ib_qp *qp,
1987			     struct ib_mw *mw,
1988			     struct ib_mw_bind *mw_bind)
1989{
1990	/* XXX reference counting in corresponding MR? */
1991	return mw->device->bind_mw ?
1992		mw->device->bind_mw(qp, mw, mw_bind) :
1993		-ENOSYS;
1994}
1995
1996/**
1997 * ib_dealloc_mw - Deallocates a memory window.
1998 * @mw: The memory window to deallocate.
1999 */
2000int ib_dealloc_mw(struct ib_mw *mw);
2001
2002/**
2003 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2004 * @pd: The protection domain associated with the unmapped region.
2005 * @mr_access_flags: Specifies the memory access rights.
2006 * @fmr_attr: Attributes of the unmapped region.
2007 *
2008 * A fast memory region must be mapped before it can be used as part of
2009 * a work request.
2010 */
2011struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2012			    int mr_access_flags,
2013			    struct ib_fmr_attr *fmr_attr);
2014
2015/**
2016 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2017 * @fmr: The fast memory region to associate with the pages.
2018 * @page_list: An array of physical pages to map to the fast memory region.
2019 * @list_len: The number of pages in page_list.
2020 * @iova: The I/O virtual address to use with the mapped region.
2021 */
2022static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2023				  u64 *page_list, int list_len,
2024				  u64 iova)
2025{
2026	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2027}
2028
2029/**
2030 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2031 * @fmr_list: A linked list of fast memory regions to unmap.
2032 */
2033int ib_unmap_fmr(struct list_head *fmr_list);
2034
2035/**
2036 * ib_dealloc_fmr - Deallocates a fast memory region.
2037 * @fmr: The fast memory region to deallocate.
2038 */
2039int ib_dealloc_fmr(struct ib_fmr *fmr);
2040
2041/**
2042 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2043 * @qp: QP to attach to the multicast group.  The QP must be type
2044 *   IB_QPT_UD.
2045 * @gid: Multicast group GID.
2046 * @lid: Multicast group LID in host byte order.
2047 *
2048 * In order to send and receive multicast packets, subnet
2049 * administration must have created the multicast group and configured
2050 * the fabric appropriately.  The port associated with the specified
2051 * QP must also be a member of the multicast group.
2052 */
2053int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2054
2055/**
2056 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2057 * @qp: QP to detach from the multicast group.
2058 * @gid: Multicast group GID.
2059 * @lid: Multicast group LID in host byte order.
2060 */
2061int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2062
2063#endif /* IB_VERBS_H */
v4.17
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51#include <linux/socket.h>
  52#include <linux/irq_poll.h>
  53#include <uapi/linux/if_ether.h>
  54#include <net/ipv6.h>
  55#include <net/ip.h>
  56#include <linux/string.h>
  57#include <linux/slab.h>
  58#include <linux/netdevice.h>
  59
  60#include <linux/if_link.h>
  61#include <linux/atomic.h>
  62#include <linux/mmu_notifier.h>
  63#include <linux/uaccess.h>
  64#include <linux/cgroup_rdma.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66#include <rdma/restrack.h>
  67#include <uapi/rdma/rdma_user_ioctl.h>
  68#include <uapi/rdma/ib_user_ioctl_verbs.h>
  69
  70#define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
  71
  72extern struct workqueue_struct *ib_wq;
  73extern struct workqueue_struct *ib_comp_wq;
  74
  75union ib_gid {
  76	u8	raw[16];
  77	struct {
  78		__be64	subnet_prefix;
  79		__be64	interface_id;
  80	} global;
  81};
  82
  83extern union ib_gid zgid;
  84
  85enum ib_gid_type {
  86	/* If link layer is Ethernet, this is RoCE V1 */
  87	IB_GID_TYPE_IB        = 0,
  88	IB_GID_TYPE_ROCE      = 0,
  89	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
  90	IB_GID_TYPE_SIZE
  91};
  92
  93#define ROCE_V2_UDP_DPORT      4791
  94struct ib_gid_attr {
  95	struct net_device	*ndev;
  96	struct ib_device	*device;
  97	enum ib_gid_type	gid_type;
  98	u16			index;
  99	u8			port_num;
 100};
 101
 102enum rdma_node_type {
 103	/* IB values map to NodeInfo:NodeType. */
 104	RDMA_NODE_IB_CA 	= 1,
 105	RDMA_NODE_IB_SWITCH,
 106	RDMA_NODE_IB_ROUTER,
 107	RDMA_NODE_RNIC,
 108	RDMA_NODE_USNIC,
 109	RDMA_NODE_USNIC_UDP,
 110};
 111
 112enum {
 113	/* set the local administered indication */
 114	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
 115};
 116
 117enum rdma_transport_type {
 118	RDMA_TRANSPORT_IB,
 119	RDMA_TRANSPORT_IWARP,
 120	RDMA_TRANSPORT_USNIC,
 121	RDMA_TRANSPORT_USNIC_UDP
 122};
 123
 124enum rdma_protocol_type {
 125	RDMA_PROTOCOL_IB,
 126	RDMA_PROTOCOL_IBOE,
 127	RDMA_PROTOCOL_IWARP,
 128	RDMA_PROTOCOL_USNIC_UDP
 129};
 130
 131__attribute_const__ enum rdma_transport_type
 132rdma_node_get_transport(enum rdma_node_type node_type);
 133
 134enum rdma_network_type {
 135	RDMA_NETWORK_IB,
 136	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 137	RDMA_NETWORK_IPV4,
 138	RDMA_NETWORK_IPV6
 139};
 140
 141static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 142{
 143	if (network_type == RDMA_NETWORK_IPV4 ||
 144	    network_type == RDMA_NETWORK_IPV6)
 145		return IB_GID_TYPE_ROCE_UDP_ENCAP;
 146
 147	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 148	return IB_GID_TYPE_IB;
 149}
 150
 151static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
 152							    union ib_gid *gid)
 153{
 154	if (gid_type == IB_GID_TYPE_IB)
 155		return RDMA_NETWORK_IB;
 156
 157	if (ipv6_addr_v4mapped((struct in6_addr *)gid))
 158		return RDMA_NETWORK_IPV4;
 159	else
 160		return RDMA_NETWORK_IPV6;
 161}
 162
 163enum rdma_link_layer {
 164	IB_LINK_LAYER_UNSPECIFIED,
 165	IB_LINK_LAYER_INFINIBAND,
 166	IB_LINK_LAYER_ETHERNET,
 167};
 168
 169enum ib_device_cap_flags {
 170	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
 171	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
 172	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
 173	IB_DEVICE_RAW_MULTI			= (1 << 3),
 174	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
 175	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
 176	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
 177	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
 178	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
 179	/* Not in use, former INIT_TYPE		= (1 << 9),*/
 180	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
 181	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
 182	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
 183	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
 184	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
 185
 186	/*
 187	 * This device supports a per-device lkey or stag that can be
 188	 * used without performing a memory registration for the local
 189	 * memory.  Note that ULPs should never check this flag, but
 190	 * instead of use the local_dma_lkey flag in the ib_pd structure,
 191	 * which will always contain a usable lkey.
 192	 */
 193	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
 194	/* Reserved, old SEND_W_INV		= (1 << 16),*/
 195	IB_DEVICE_MEM_WINDOW			= (1 << 17),
 196	/*
 197	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 198	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
 199	 * messages and can verify the validity of checksum for
 200	 * incoming messages.  Setting this flag implies that the
 201	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 202	 */
 203	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
 204	IB_DEVICE_UD_TSO			= (1 << 19),
 205	IB_DEVICE_XRC				= (1 << 20),
 206
 207	/*
 208	 * This device supports the IB "base memory management extension",
 209	 * which includes support for fast registrations (IB_WR_REG_MR,
 210	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 211	 * also be set by any iWarp device which must support FRs to comply
 212	 * to the iWarp verbs spec.  iWarp devices also support the
 213	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 214	 * stag.
 215	 */
 216	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
 217	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
 218	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
 219	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
 220	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
 221	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 222	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
 223	/*
 224	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 225	 * support execution of WQEs that involve synchronization
 226	 * of I/O operations with single completion queue managed
 227	 * by hardware.
 228	 */
 229	IB_DEVICE_CROSS_CHANNEL			= (1 << 27),
 230	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
 231	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
 232	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
 233	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
 234	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
 235	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 236	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
 237	IB_DEVICE_RDMA_NETDEV_OPA_VNIC		= (1ULL << 35),
 238	/* The device supports padding incoming writes to cacheline. */
 239	IB_DEVICE_PCI_WRITE_END_PADDING		= (1ULL << 36),
 240};
 241
 242enum ib_signature_prot_cap {
 243	IB_PROT_T10DIF_TYPE_1 = 1,
 244	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
 245	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
 246};
 247
 248enum ib_signature_guard_cap {
 249	IB_GUARD_T10DIF_CRC	= 1,
 250	IB_GUARD_T10DIF_CSUM	= 1 << 1,
 251};
 252
 253enum ib_atomic_cap {
 254	IB_ATOMIC_NONE,
 255	IB_ATOMIC_HCA,
 256	IB_ATOMIC_GLOB
 257};
 258
 259enum ib_odp_general_cap_bits {
 260	IB_ODP_SUPPORT		= 1 << 0,
 261	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 262};
 263
 264enum ib_odp_transport_cap_bits {
 265	IB_ODP_SUPPORT_SEND	= 1 << 0,
 266	IB_ODP_SUPPORT_RECV	= 1 << 1,
 267	IB_ODP_SUPPORT_WRITE	= 1 << 2,
 268	IB_ODP_SUPPORT_READ	= 1 << 3,
 269	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
 270};
 271
 272struct ib_odp_caps {
 273	uint64_t general_caps;
 274	struct {
 275		uint32_t  rc_odp_caps;
 276		uint32_t  uc_odp_caps;
 277		uint32_t  ud_odp_caps;
 278	} per_transport_caps;
 279};
 280
 281struct ib_rss_caps {
 282	/* Corresponding bit will be set if qp type from
 283	 * 'enum ib_qp_type' is supported, e.g.
 284	 * supported_qpts |= 1 << IB_QPT_UD
 285	 */
 286	u32 supported_qpts;
 287	u32 max_rwq_indirection_tables;
 288	u32 max_rwq_indirection_table_size;
 289};
 290
 291enum ib_tm_cap_flags {
 292	/*  Support tag matching on RC transport */
 293	IB_TM_CAP_RC		    = 1 << 0,
 294};
 295
 296struct ib_tm_caps {
 297	/* Max size of RNDV header */
 298	u32 max_rndv_hdr_size;
 299	/* Max number of entries in tag matching list */
 300	u32 max_num_tags;
 301	/* From enum ib_tm_cap_flags */
 302	u32 flags;
 303	/* Max number of outstanding list operations */
 304	u32 max_ops;
 305	/* Max number of SGE in tag matching entry */
 306	u32 max_sge;
 307};
 308
 309struct ib_cq_init_attr {
 310	unsigned int	cqe;
 311	int		comp_vector;
 312	u32		flags;
 313};
 314
 315enum ib_cq_attr_mask {
 316	IB_CQ_MODERATE = 1 << 0,
 317};
 318
 319struct ib_cq_caps {
 320	u16     max_cq_moderation_count;
 321	u16     max_cq_moderation_period;
 322};
 323
 324struct ib_dm_mr_attr {
 325	u64		length;
 326	u64		offset;
 327	u32		access_flags;
 328};
 329
 330struct ib_dm_alloc_attr {
 331	u64	length;
 332	u32	alignment;
 333	u32	flags;
 334};
 335
 336struct ib_device_attr {
 337	u64			fw_ver;
 338	__be64			sys_image_guid;
 339	u64			max_mr_size;
 340	u64			page_size_cap;
 341	u32			vendor_id;
 342	u32			vendor_part_id;
 343	u32			hw_ver;
 344	int			max_qp;
 345	int			max_qp_wr;
 346	u64			device_cap_flags;
 347	int			max_sge;
 348	int			max_sge_rd;
 349	int			max_cq;
 350	int			max_cqe;
 351	int			max_mr;
 352	int			max_pd;
 353	int			max_qp_rd_atom;
 354	int			max_ee_rd_atom;
 355	int			max_res_rd_atom;
 356	int			max_qp_init_rd_atom;
 357	int			max_ee_init_rd_atom;
 358	enum ib_atomic_cap	atomic_cap;
 359	enum ib_atomic_cap	masked_atomic_cap;
 360	int			max_ee;
 361	int			max_rdd;
 362	int			max_mw;
 363	int			max_raw_ipv6_qp;
 364	int			max_raw_ethy_qp;
 365	int			max_mcast_grp;
 366	int			max_mcast_qp_attach;
 367	int			max_total_mcast_qp_attach;
 368	int			max_ah;
 369	int			max_fmr;
 370	int			max_map_per_fmr;
 371	int			max_srq;
 372	int			max_srq_wr;
 373	int			max_srq_sge;
 374	unsigned int		max_fast_reg_page_list_len;
 375	u16			max_pkeys;
 376	u8			local_ca_ack_delay;
 377	int			sig_prot_cap;
 378	int			sig_guard_cap;
 379	struct ib_odp_caps	odp_caps;
 380	uint64_t		timestamp_mask;
 381	uint64_t		hca_core_clock; /* in KHZ */
 382	struct ib_rss_caps	rss_caps;
 383	u32			max_wq_type_rq;
 384	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
 385	struct ib_tm_caps	tm_caps;
 386	struct ib_cq_caps       cq_caps;
 387	u64			max_dm_size;
 388};
 389
 390enum ib_mtu {
 391	IB_MTU_256  = 1,
 392	IB_MTU_512  = 2,
 393	IB_MTU_1024 = 3,
 394	IB_MTU_2048 = 4,
 395	IB_MTU_4096 = 5
 396};
 397
 398static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 399{
 400	switch (mtu) {
 401	case IB_MTU_256:  return  256;
 402	case IB_MTU_512:  return  512;
 403	case IB_MTU_1024: return 1024;
 404	case IB_MTU_2048: return 2048;
 405	case IB_MTU_4096: return 4096;
 406	default: 	  return -1;
 407	}
 408}
 409
 410static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 411{
 412	if (mtu >= 4096)
 413		return IB_MTU_4096;
 414	else if (mtu >= 2048)
 415		return IB_MTU_2048;
 416	else if (mtu >= 1024)
 417		return IB_MTU_1024;
 418	else if (mtu >= 512)
 419		return IB_MTU_512;
 420	else
 421		return IB_MTU_256;
 422}
 423
 424enum ib_port_state {
 425	IB_PORT_NOP		= 0,
 426	IB_PORT_DOWN		= 1,
 427	IB_PORT_INIT		= 2,
 428	IB_PORT_ARMED		= 3,
 429	IB_PORT_ACTIVE		= 4,
 430	IB_PORT_ACTIVE_DEFER	= 5
 431};
 432
 433enum ib_port_cap_flags {
 434	IB_PORT_SM				= 1 <<  1,
 435	IB_PORT_NOTICE_SUP			= 1 <<  2,
 436	IB_PORT_TRAP_SUP			= 1 <<  3,
 437	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 438	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
 439	IB_PORT_SL_MAP_SUP			= 1 <<  6,
 440	IB_PORT_MKEY_NVRAM			= 1 <<  7,
 441	IB_PORT_PKEY_NVRAM			= 1 <<  8,
 442	IB_PORT_LED_INFO_SUP			= 1 <<  9,
 443	IB_PORT_SM_DISABLED			= 1 << 10,
 444	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
 445	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
 446	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 447	IB_PORT_CM_SUP				= 1 << 16,
 448	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
 449	IB_PORT_REINIT_SUP			= 1 << 18,
 450	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
 451	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
 452	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
 453	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
 454	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
 455	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
 456	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
 457	IB_PORT_IP_BASED_GIDS			= 1 << 26,
 458};
 459
 460enum ib_port_width {
 461	IB_WIDTH_1X	= 1,
 462	IB_WIDTH_4X	= 2,
 463	IB_WIDTH_8X	= 4,
 464	IB_WIDTH_12X	= 8
 465};
 466
 467static inline int ib_width_enum_to_int(enum ib_port_width width)
 468{
 469	switch (width) {
 470	case IB_WIDTH_1X:  return  1;
 471	case IB_WIDTH_4X:  return  4;
 472	case IB_WIDTH_8X:  return  8;
 473	case IB_WIDTH_12X: return 12;
 474	default: 	  return -1;
 475	}
 476}
 477
 478enum ib_port_speed {
 479	IB_SPEED_SDR	= 1,
 480	IB_SPEED_DDR	= 2,
 481	IB_SPEED_QDR	= 4,
 482	IB_SPEED_FDR10	= 8,
 483	IB_SPEED_FDR	= 16,
 484	IB_SPEED_EDR	= 32,
 485	IB_SPEED_HDR	= 64
 486};
 487
 488/**
 489 * struct rdma_hw_stats
 490 * @lock - Mutex to protect parallel write access to lifespan and values
 491 *    of counters, which are 64bits and not guaranteeed to be written
 492 *    atomicaly on 32bits systems.
 493 * @timestamp - Used by the core code to track when the last update was
 494 * @lifespan - Used by the core code to determine how old the counters
 495 *   should be before being updated again.  Stored in jiffies, defaults
 496 *   to 10 milliseconds, drivers can override the default be specifying
 497 *   their own value during their allocation routine.
 498 * @name - Array of pointers to static names used for the counters in
 499 *   directory.
 500 * @num_counters - How many hardware counters there are.  If name is
 501 *   shorter than this number, a kernel oops will result.  Driver authors
 502 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 503 *   in their code to prevent this.
 504 * @value - Array of u64 counters that are accessed by the sysfs code and
 505 *   filled in by the drivers get_stats routine
 506 */
 507struct rdma_hw_stats {
 508	struct mutex	lock; /* Protect lifespan and values[] */
 509	unsigned long	timestamp;
 510	unsigned long	lifespan;
 511	const char * const *names;
 512	int		num_counters;
 513	u64		value[];
 514};
 515
 516#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 517/**
 518 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 519 *   for drivers.
 520 * @names - Array of static const char *
 521 * @num_counters - How many elements in array
 522 * @lifespan - How many milliseconds between updates
 523 */
 524static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 525		const char * const *names, int num_counters,
 526		unsigned long lifespan)
 527{
 528	struct rdma_hw_stats *stats;
 529
 530	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 531			GFP_KERNEL);
 532	if (!stats)
 533		return NULL;
 534	stats->names = names;
 535	stats->num_counters = num_counters;
 536	stats->lifespan = msecs_to_jiffies(lifespan);
 537
 538	return stats;
 539}
 540
 541
 542/* Define bits for the various functionality this port needs to be supported by
 543 * the core.
 544 */
 545/* Management                           0x00000FFF */
 546#define RDMA_CORE_CAP_IB_MAD            0x00000001
 547#define RDMA_CORE_CAP_IB_SMI            0x00000002
 548#define RDMA_CORE_CAP_IB_CM             0x00000004
 549#define RDMA_CORE_CAP_IW_CM             0x00000008
 550#define RDMA_CORE_CAP_IB_SA             0x00000010
 551#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 552
 553/* Address format                       0x000FF000 */
 554#define RDMA_CORE_CAP_AF_IB             0x00001000
 555#define RDMA_CORE_CAP_ETH_AH            0x00002000
 556#define RDMA_CORE_CAP_OPA_AH            0x00004000
 557
 558/* Protocol                             0xFFF00000 */
 559#define RDMA_CORE_CAP_PROT_IB           0x00100000
 560#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 561#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 562#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 563#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 564#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 565
 566#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 567					| RDMA_CORE_CAP_IB_MAD \
 568					| RDMA_CORE_CAP_IB_SMI \
 569					| RDMA_CORE_CAP_IB_CM  \
 570					| RDMA_CORE_CAP_IB_SA  \
 571					| RDMA_CORE_CAP_AF_IB)
 572#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 573					| RDMA_CORE_CAP_IB_MAD  \
 574					| RDMA_CORE_CAP_IB_CM   \
 575					| RDMA_CORE_CAP_AF_IB   \
 576					| RDMA_CORE_CAP_ETH_AH)
 577#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
 578					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 579					| RDMA_CORE_CAP_IB_MAD  \
 580					| RDMA_CORE_CAP_IB_CM   \
 581					| RDMA_CORE_CAP_AF_IB   \
 582					| RDMA_CORE_CAP_ETH_AH)
 583#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 584					| RDMA_CORE_CAP_IW_CM)
 585#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 586					| RDMA_CORE_CAP_OPA_MAD)
 587
 588#define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
 589
 590#define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 591
 592struct ib_port_attr {
 593	u64			subnet_prefix;
 594	enum ib_port_state	state;
 595	enum ib_mtu		max_mtu;
 596	enum ib_mtu		active_mtu;
 597	int			gid_tbl_len;
 598	u32			port_cap_flags;
 599	u32			max_msg_sz;
 600	u32			bad_pkey_cntr;
 601	u32			qkey_viol_cntr;
 602	u16			pkey_tbl_len;
 603	u32			sm_lid;
 604	u32			lid;
 605	u8			lmc;
 606	u8			max_vl_num;
 607	u8			sm_sl;
 608	u8			subnet_timeout;
 609	u8			init_type_reply;
 610	u8			active_width;
 611	u8			active_speed;
 612	u8                      phys_state;
 613	bool			grh_required;
 614};
 615
 616enum ib_device_modify_flags {
 617	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
 618	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
 619};
 620
 621#define IB_DEVICE_NODE_DESC_MAX 64
 622
 623struct ib_device_modify {
 624	u64	sys_image_guid;
 625	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
 626};
 627
 628enum ib_port_modify_flags {
 629	IB_PORT_SHUTDOWN		= 1,
 630	IB_PORT_INIT_TYPE		= (1<<2),
 631	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
 632	IB_PORT_OPA_MASK_CHG		= (1<<4)
 633};
 634
 635struct ib_port_modify {
 636	u32	set_port_cap_mask;
 637	u32	clr_port_cap_mask;
 638	u8	init_type;
 639};
 640
 641enum ib_event_type {
 642	IB_EVENT_CQ_ERR,
 643	IB_EVENT_QP_FATAL,
 644	IB_EVENT_QP_REQ_ERR,
 645	IB_EVENT_QP_ACCESS_ERR,
 646	IB_EVENT_COMM_EST,
 647	IB_EVENT_SQ_DRAINED,
 648	IB_EVENT_PATH_MIG,
 649	IB_EVENT_PATH_MIG_ERR,
 650	IB_EVENT_DEVICE_FATAL,
 651	IB_EVENT_PORT_ACTIVE,
 652	IB_EVENT_PORT_ERR,
 653	IB_EVENT_LID_CHANGE,
 654	IB_EVENT_PKEY_CHANGE,
 655	IB_EVENT_SM_CHANGE,
 656	IB_EVENT_SRQ_ERR,
 657	IB_EVENT_SRQ_LIMIT_REACHED,
 658	IB_EVENT_QP_LAST_WQE_REACHED,
 659	IB_EVENT_CLIENT_REREGISTER,
 660	IB_EVENT_GID_CHANGE,
 661	IB_EVENT_WQ_FATAL,
 662};
 663
 664const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 665
 666struct ib_event {
 667	struct ib_device	*device;
 668	union {
 669		struct ib_cq	*cq;
 670		struct ib_qp	*qp;
 671		struct ib_srq	*srq;
 672		struct ib_wq	*wq;
 673		u8		port_num;
 674	} element;
 675	enum ib_event_type	event;
 676};
 677
 678struct ib_event_handler {
 679	struct ib_device *device;
 680	void            (*handler)(struct ib_event_handler *, struct ib_event *);
 681	struct list_head  list;
 682};
 683
 684#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
 685	do {							\
 686		(_ptr)->device  = _device;			\
 687		(_ptr)->handler = _handler;			\
 688		INIT_LIST_HEAD(&(_ptr)->list);			\
 689	} while (0)
 690
 691struct ib_global_route {
 692	union ib_gid	dgid;
 693	u32		flow_label;
 694	u8		sgid_index;
 695	u8		hop_limit;
 696	u8		traffic_class;
 697};
 698
 699struct ib_grh {
 700	__be32		version_tclass_flow;
 701	__be16		paylen;
 702	u8		next_hdr;
 703	u8		hop_limit;
 704	union ib_gid	sgid;
 705	union ib_gid	dgid;
 706};
 707
 708union rdma_network_hdr {
 709	struct ib_grh ibgrh;
 710	struct {
 711		/* The IB spec states that if it's IPv4, the header
 712		 * is located in the last 20 bytes of the header.
 713		 */
 714		u8		reserved[20];
 715		struct iphdr	roce4grh;
 716	};
 717};
 718
 719#define IB_QPN_MASK		0xFFFFFF
 720
 721enum {
 722	IB_MULTICAST_QPN = 0xffffff
 723};
 724
 725#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
 726#define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
 727
 728enum ib_ah_flags {
 729	IB_AH_GRH	= 1
 730};
 731
 732enum ib_rate {
 733	IB_RATE_PORT_CURRENT = 0,
 734	IB_RATE_2_5_GBPS = 2,
 735	IB_RATE_5_GBPS   = 5,
 736	IB_RATE_10_GBPS  = 3,
 737	IB_RATE_20_GBPS  = 6,
 738	IB_RATE_30_GBPS  = 4,
 739	IB_RATE_40_GBPS  = 7,
 740	IB_RATE_60_GBPS  = 8,
 741	IB_RATE_80_GBPS  = 9,
 742	IB_RATE_120_GBPS = 10,
 743	IB_RATE_14_GBPS  = 11,
 744	IB_RATE_56_GBPS  = 12,
 745	IB_RATE_112_GBPS = 13,
 746	IB_RATE_168_GBPS = 14,
 747	IB_RATE_25_GBPS  = 15,
 748	IB_RATE_100_GBPS = 16,
 749	IB_RATE_200_GBPS = 17,
 750	IB_RATE_300_GBPS = 18
 751};
 752
 753/**
 754 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 755 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 756 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 757 * @rate: rate to convert.
 758 */
 759__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 760
 761/**
 762 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 763 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 764 * @rate: rate to convert.
 765 */
 766__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 767
 768
 769/**
 770 * enum ib_mr_type - memory region type
 771 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 772 *                            normal registration
 773 * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
 774 *                            signature operations (data-integrity
 775 *                            capable regions)
 776 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 777 *                            register any arbitrary sg lists (without
 778 *                            the normal mr constraints - see
 779 *                            ib_map_mr_sg)
 780 */
 781enum ib_mr_type {
 782	IB_MR_TYPE_MEM_REG,
 783	IB_MR_TYPE_SIGNATURE,
 784	IB_MR_TYPE_SG_GAPS,
 785};
 786
 787/**
 788 * Signature types
 789 * IB_SIG_TYPE_NONE: Unprotected.
 790 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
 791 */
 792enum ib_signature_type {
 793	IB_SIG_TYPE_NONE,
 794	IB_SIG_TYPE_T10_DIF,
 795};
 796
 797/**
 798 * Signature T10-DIF block-guard types
 799 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
 800 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
 801 */
 802enum ib_t10_dif_bg_type {
 803	IB_T10DIF_CRC,
 804	IB_T10DIF_CSUM
 805};
 806
 807/**
 808 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
 809 *     domain.
 810 * @bg_type: T10-DIF block guard type (CRC|CSUM)
 811 * @pi_interval: protection information interval.
 812 * @bg: seed of guard computation.
 813 * @app_tag: application tag of guard block
 814 * @ref_tag: initial guard block reference tag.
 815 * @ref_remap: Indicate wethear the reftag increments each block
 816 * @app_escape: Indicate to skip block check if apptag=0xffff
 817 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
 818 * @apptag_check_mask: check bitmask of application tag.
 819 */
 820struct ib_t10_dif_domain {
 821	enum ib_t10_dif_bg_type bg_type;
 822	u16			pi_interval;
 823	u16			bg;
 824	u16			app_tag;
 825	u32			ref_tag;
 826	bool			ref_remap;
 827	bool			app_escape;
 828	bool			ref_escape;
 829	u16			apptag_check_mask;
 830};
 831
 832/**
 833 * struct ib_sig_domain - Parameters for signature domain
 834 * @sig_type: specific signauture type
 835 * @sig: union of all signature domain attributes that may
 836 *     be used to set domain layout.
 837 */
 838struct ib_sig_domain {
 839	enum ib_signature_type sig_type;
 840	union {
 841		struct ib_t10_dif_domain dif;
 842	} sig;
 843};
 844
 845/**
 846 * struct ib_sig_attrs - Parameters for signature handover operation
 847 * @check_mask: bitmask for signature byte check (8 bytes)
 848 * @mem: memory domain layout desciptor.
 849 * @wire: wire domain layout desciptor.
 850 */
 851struct ib_sig_attrs {
 852	u8			check_mask;
 853	struct ib_sig_domain	mem;
 854	struct ib_sig_domain	wire;
 855};
 856
 857enum ib_sig_err_type {
 858	IB_SIG_BAD_GUARD,
 859	IB_SIG_BAD_REFTAG,
 860	IB_SIG_BAD_APPTAG,
 861};
 862
 863/**
 864 * struct ib_sig_err - signature error descriptor
 865 */
 866struct ib_sig_err {
 867	enum ib_sig_err_type	err_type;
 868	u32			expected;
 869	u32			actual;
 870	u64			sig_err_offset;
 871	u32			key;
 872};
 873
 874enum ib_mr_status_check {
 875	IB_MR_CHECK_SIG_STATUS = 1,
 876};
 877
 878/**
 879 * struct ib_mr_status - Memory region status container
 880 *
 881 * @fail_status: Bitmask of MR checks status. For each
 882 *     failed check a corresponding status bit is set.
 883 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 884 *     failure.
 885 */
 886struct ib_mr_status {
 887	u32		    fail_status;
 888	struct ib_sig_err   sig_err;
 889};
 890
 891/**
 892 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 893 * enum.
 894 * @mult: multiple to convert.
 895 */
 896__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 897
 898enum rdma_ah_attr_type {
 899	RDMA_AH_ATTR_TYPE_UNDEFINED,
 900	RDMA_AH_ATTR_TYPE_IB,
 901	RDMA_AH_ATTR_TYPE_ROCE,
 902	RDMA_AH_ATTR_TYPE_OPA,
 903};
 904
 905struct ib_ah_attr {
 
 906	u16			dlid;
 
 907	u8			src_path_bits;
 908};
 909
 910struct roce_ah_attr {
 911	u8			dmac[ETH_ALEN];
 912};
 913
 914struct opa_ah_attr {
 915	u32			dlid;
 916	u8			src_path_bits;
 917	bool			make_grd;
 918};
 919
 920struct rdma_ah_attr {
 921	struct ib_global_route	grh;
 922	u8			sl;
 923	u8			static_rate;
 
 924	u8			port_num;
 925	u8			ah_flags;
 926	enum rdma_ah_attr_type type;
 927	union {
 928		struct ib_ah_attr ib;
 929		struct roce_ah_attr roce;
 930		struct opa_ah_attr opa;
 931	};
 932};
 933
 934enum ib_wc_status {
 935	IB_WC_SUCCESS,
 936	IB_WC_LOC_LEN_ERR,
 937	IB_WC_LOC_QP_OP_ERR,
 938	IB_WC_LOC_EEC_OP_ERR,
 939	IB_WC_LOC_PROT_ERR,
 940	IB_WC_WR_FLUSH_ERR,
 941	IB_WC_MW_BIND_ERR,
 942	IB_WC_BAD_RESP_ERR,
 943	IB_WC_LOC_ACCESS_ERR,
 944	IB_WC_REM_INV_REQ_ERR,
 945	IB_WC_REM_ACCESS_ERR,
 946	IB_WC_REM_OP_ERR,
 947	IB_WC_RETRY_EXC_ERR,
 948	IB_WC_RNR_RETRY_EXC_ERR,
 949	IB_WC_LOC_RDD_VIOL_ERR,
 950	IB_WC_REM_INV_RD_REQ_ERR,
 951	IB_WC_REM_ABORT_ERR,
 952	IB_WC_INV_EECN_ERR,
 953	IB_WC_INV_EEC_STATE_ERR,
 954	IB_WC_FATAL_ERR,
 955	IB_WC_RESP_TIMEOUT_ERR,
 956	IB_WC_GENERAL_ERR
 957};
 958
 959const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 960
 961enum ib_wc_opcode {
 962	IB_WC_SEND,
 963	IB_WC_RDMA_WRITE,
 964	IB_WC_RDMA_READ,
 965	IB_WC_COMP_SWAP,
 966	IB_WC_FETCH_ADD,
 
 967	IB_WC_LSO,
 968	IB_WC_LOCAL_INV,
 969	IB_WC_REG_MR,
 970	IB_WC_MASKED_COMP_SWAP,
 971	IB_WC_MASKED_FETCH_ADD,
 972/*
 973 * Set value of IB_WC_RECV so consumers can test if a completion is a
 974 * receive by testing (opcode & IB_WC_RECV).
 975 */
 976	IB_WC_RECV			= 1 << 7,
 977	IB_WC_RECV_RDMA_WITH_IMM
 978};
 979
 980enum ib_wc_flags {
 981	IB_WC_GRH		= 1,
 982	IB_WC_WITH_IMM		= (1<<1),
 983	IB_WC_WITH_INVALIDATE	= (1<<2),
 984	IB_WC_IP_CSUM_OK	= (1<<3),
 985	IB_WC_WITH_SMAC		= (1<<4),
 986	IB_WC_WITH_VLAN		= (1<<5),
 987	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
 988};
 989
 990struct ib_wc {
 991	union {
 992		u64		wr_id;
 993		struct ib_cqe	*wr_cqe;
 994	};
 995	enum ib_wc_status	status;
 996	enum ib_wc_opcode	opcode;
 997	u32			vendor_err;
 998	u32			byte_len;
 999	struct ib_qp	       *qp;
1000	union {
1001		__be32		imm_data;
1002		u32		invalidate_rkey;
1003	} ex;
1004	u32			src_qp;
1005	u32			slid;
1006	int			wc_flags;
1007	u16			pkey_index;
 
1008	u8			sl;
1009	u8			dlid_path_bits;
1010	u8			port_num;	/* valid only for DR SMPs on switches */
1011	u8			smac[ETH_ALEN];
1012	u16			vlan_id;
1013	u8			network_hdr_type;
1014};
1015
1016enum ib_cq_notify_flags {
1017	IB_CQ_SOLICITED			= 1 << 0,
1018	IB_CQ_NEXT_COMP			= 1 << 1,
1019	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1020	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1021};
1022
1023enum ib_srq_type {
1024	IB_SRQT_BASIC,
1025	IB_SRQT_XRC,
1026	IB_SRQT_TM,
1027};
1028
1029static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1030{
1031	return srq_type == IB_SRQT_XRC ||
1032	       srq_type == IB_SRQT_TM;
1033}
1034
1035enum ib_srq_attr_mask {
1036	IB_SRQ_MAX_WR	= 1 << 0,
1037	IB_SRQ_LIMIT	= 1 << 1,
1038};
1039
1040struct ib_srq_attr {
1041	u32	max_wr;
1042	u32	max_sge;
1043	u32	srq_limit;
1044};
1045
1046struct ib_srq_init_attr {
1047	void		      (*event_handler)(struct ib_event *, void *);
1048	void		       *srq_context;
1049	struct ib_srq_attr	attr;
1050	enum ib_srq_type	srq_type;
1051
1052	struct {
1053		struct ib_cq   *cq;
1054		union {
1055			struct {
1056				struct ib_xrcd *xrcd;
1057			} xrc;
1058
1059			struct {
1060				u32		max_num_tags;
1061			} tag_matching;
1062		};
1063	} ext;
1064};
1065
1066struct ib_qp_cap {
1067	u32	max_send_wr;
1068	u32	max_recv_wr;
1069	u32	max_send_sge;
1070	u32	max_recv_sge;
1071	u32	max_inline_data;
1072
1073	/*
1074	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1075	 * ib_create_qp() will calculate the right amount of neededed WRs
1076	 * and MRs based on this.
1077	 */
1078	u32	max_rdma_ctxs;
1079};
1080
1081enum ib_sig_type {
1082	IB_SIGNAL_ALL_WR,
1083	IB_SIGNAL_REQ_WR
1084};
1085
1086enum ib_qp_type {
1087	/*
1088	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1089	 * here (and in that order) since the MAD layer uses them as
1090	 * indices into a 2-entry table.
1091	 */
1092	IB_QPT_SMI,
1093	IB_QPT_GSI,
1094
1095	IB_QPT_RC,
1096	IB_QPT_UC,
1097	IB_QPT_UD,
1098	IB_QPT_RAW_IPV6,
1099	IB_QPT_RAW_ETHERTYPE,
1100	IB_QPT_RAW_PACKET = 8,
1101	IB_QPT_XRC_INI = 9,
1102	IB_QPT_XRC_TGT,
1103	IB_QPT_MAX,
1104	IB_QPT_DRIVER = 0xFF,
1105	/* Reserve a range for qp types internal to the low level driver.
1106	 * These qp types will not be visible at the IB core layer, so the
1107	 * IB_QPT_MAX usages should not be affected in the core layer
1108	 */
1109	IB_QPT_RESERVED1 = 0x1000,
1110	IB_QPT_RESERVED2,
1111	IB_QPT_RESERVED3,
1112	IB_QPT_RESERVED4,
1113	IB_QPT_RESERVED5,
1114	IB_QPT_RESERVED6,
1115	IB_QPT_RESERVED7,
1116	IB_QPT_RESERVED8,
1117	IB_QPT_RESERVED9,
1118	IB_QPT_RESERVED10,
1119};
1120
1121enum ib_qp_create_flags {
1122	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1123	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
1124	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1125	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1126	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1127	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1128	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
1129	/* FREE					= 1 << 7, */
1130	IB_QP_CREATE_SCATTER_FCS		= 1 << 8,
1131	IB_QP_CREATE_CVLAN_STRIPPING		= 1 << 9,
1132	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1133	IB_QP_CREATE_PCI_WRITE_END_PADDING	= 1 << 11,
1134	/* reserve bits 26-31 for low level drivers' internal use */
1135	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1136	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1137};
1138
1139/*
1140 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1141 * callback to destroy the passed in QP.
1142 */
1143
1144struct ib_qp_init_attr {
1145	void                  (*event_handler)(struct ib_event *, void *);
1146	void		       *qp_context;
1147	struct ib_cq	       *send_cq;
1148	struct ib_cq	       *recv_cq;
1149	struct ib_srq	       *srq;
1150	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1151	struct ib_qp_cap	cap;
1152	enum ib_sig_type	sq_sig_type;
1153	enum ib_qp_type		qp_type;
1154	enum ib_qp_create_flags	create_flags;
1155
1156	/*
1157	 * Only needed for special QP types, or when using the RW API.
1158	 */
1159	u8			port_num;
1160	struct ib_rwq_ind_table *rwq_ind_tbl;
1161	u32			source_qpn;
1162};
1163
1164struct ib_qp_open_attr {
1165	void                  (*event_handler)(struct ib_event *, void *);
1166	void		       *qp_context;
1167	u32			qp_num;
1168	enum ib_qp_type		qp_type;
1169};
1170
1171enum ib_rnr_timeout {
1172	IB_RNR_TIMER_655_36 =  0,
1173	IB_RNR_TIMER_000_01 =  1,
1174	IB_RNR_TIMER_000_02 =  2,
1175	IB_RNR_TIMER_000_03 =  3,
1176	IB_RNR_TIMER_000_04 =  4,
1177	IB_RNR_TIMER_000_06 =  5,
1178	IB_RNR_TIMER_000_08 =  6,
1179	IB_RNR_TIMER_000_12 =  7,
1180	IB_RNR_TIMER_000_16 =  8,
1181	IB_RNR_TIMER_000_24 =  9,
1182	IB_RNR_TIMER_000_32 = 10,
1183	IB_RNR_TIMER_000_48 = 11,
1184	IB_RNR_TIMER_000_64 = 12,
1185	IB_RNR_TIMER_000_96 = 13,
1186	IB_RNR_TIMER_001_28 = 14,
1187	IB_RNR_TIMER_001_92 = 15,
1188	IB_RNR_TIMER_002_56 = 16,
1189	IB_RNR_TIMER_003_84 = 17,
1190	IB_RNR_TIMER_005_12 = 18,
1191	IB_RNR_TIMER_007_68 = 19,
1192	IB_RNR_TIMER_010_24 = 20,
1193	IB_RNR_TIMER_015_36 = 21,
1194	IB_RNR_TIMER_020_48 = 22,
1195	IB_RNR_TIMER_030_72 = 23,
1196	IB_RNR_TIMER_040_96 = 24,
1197	IB_RNR_TIMER_061_44 = 25,
1198	IB_RNR_TIMER_081_92 = 26,
1199	IB_RNR_TIMER_122_88 = 27,
1200	IB_RNR_TIMER_163_84 = 28,
1201	IB_RNR_TIMER_245_76 = 29,
1202	IB_RNR_TIMER_327_68 = 30,
1203	IB_RNR_TIMER_491_52 = 31
1204};
1205
1206enum ib_qp_attr_mask {
1207	IB_QP_STATE			= 1,
1208	IB_QP_CUR_STATE			= (1<<1),
1209	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1210	IB_QP_ACCESS_FLAGS		= (1<<3),
1211	IB_QP_PKEY_INDEX		= (1<<4),
1212	IB_QP_PORT			= (1<<5),
1213	IB_QP_QKEY			= (1<<6),
1214	IB_QP_AV			= (1<<7),
1215	IB_QP_PATH_MTU			= (1<<8),
1216	IB_QP_TIMEOUT			= (1<<9),
1217	IB_QP_RETRY_CNT			= (1<<10),
1218	IB_QP_RNR_RETRY			= (1<<11),
1219	IB_QP_RQ_PSN			= (1<<12),
1220	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1221	IB_QP_ALT_PATH			= (1<<14),
1222	IB_QP_MIN_RNR_TIMER		= (1<<15),
1223	IB_QP_SQ_PSN			= (1<<16),
1224	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1225	IB_QP_PATH_MIG_STATE		= (1<<18),
1226	IB_QP_CAP			= (1<<19),
1227	IB_QP_DEST_QPN			= (1<<20),
1228	IB_QP_RESERVED1			= (1<<21),
1229	IB_QP_RESERVED2			= (1<<22),
1230	IB_QP_RESERVED3			= (1<<23),
1231	IB_QP_RESERVED4			= (1<<24),
1232	IB_QP_RATE_LIMIT		= (1<<25),
1233};
1234
1235enum ib_qp_state {
1236	IB_QPS_RESET,
1237	IB_QPS_INIT,
1238	IB_QPS_RTR,
1239	IB_QPS_RTS,
1240	IB_QPS_SQD,
1241	IB_QPS_SQE,
1242	IB_QPS_ERR
1243};
1244
1245enum ib_mig_state {
1246	IB_MIG_MIGRATED,
1247	IB_MIG_REARM,
1248	IB_MIG_ARMED
1249};
1250
1251enum ib_mw_type {
1252	IB_MW_TYPE_1 = 1,
1253	IB_MW_TYPE_2 = 2
1254};
1255
1256struct ib_qp_attr {
1257	enum ib_qp_state	qp_state;
1258	enum ib_qp_state	cur_qp_state;
1259	enum ib_mtu		path_mtu;
1260	enum ib_mig_state	path_mig_state;
1261	u32			qkey;
1262	u32			rq_psn;
1263	u32			sq_psn;
1264	u32			dest_qp_num;
1265	int			qp_access_flags;
1266	struct ib_qp_cap	cap;
1267	struct rdma_ah_attr	ah_attr;
1268	struct rdma_ah_attr	alt_ah_attr;
1269	u16			pkey_index;
1270	u16			alt_pkey_index;
1271	u8			en_sqd_async_notify;
1272	u8			sq_draining;
1273	u8			max_rd_atomic;
1274	u8			max_dest_rd_atomic;
1275	u8			min_rnr_timer;
1276	u8			port_num;
1277	u8			timeout;
1278	u8			retry_cnt;
1279	u8			rnr_retry;
1280	u8			alt_port_num;
1281	u8			alt_timeout;
1282	u32			rate_limit;
1283};
1284
1285enum ib_wr_opcode {
1286	IB_WR_RDMA_WRITE,
1287	IB_WR_RDMA_WRITE_WITH_IMM,
1288	IB_WR_SEND,
1289	IB_WR_SEND_WITH_IMM,
1290	IB_WR_RDMA_READ,
1291	IB_WR_ATOMIC_CMP_AND_SWP,
1292	IB_WR_ATOMIC_FETCH_AND_ADD,
1293	IB_WR_LSO,
1294	IB_WR_SEND_WITH_INV,
1295	IB_WR_RDMA_READ_WITH_INV,
1296	IB_WR_LOCAL_INV,
1297	IB_WR_REG_MR,
1298	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1299	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1300	IB_WR_REG_SIG_MR,
1301	/* reserve values for low level drivers' internal use.
1302	 * These values will not be used at all in the ib core layer.
1303	 */
1304	IB_WR_RESERVED1 = 0xf0,
1305	IB_WR_RESERVED2,
1306	IB_WR_RESERVED3,
1307	IB_WR_RESERVED4,
1308	IB_WR_RESERVED5,
1309	IB_WR_RESERVED6,
1310	IB_WR_RESERVED7,
1311	IB_WR_RESERVED8,
1312	IB_WR_RESERVED9,
1313	IB_WR_RESERVED10,
1314};
1315
1316enum ib_send_flags {
1317	IB_SEND_FENCE		= 1,
1318	IB_SEND_SIGNALED	= (1<<1),
1319	IB_SEND_SOLICITED	= (1<<2),
1320	IB_SEND_INLINE		= (1<<3),
1321	IB_SEND_IP_CSUM		= (1<<4),
1322
1323	/* reserve bits 26-31 for low level drivers' internal use */
1324	IB_SEND_RESERVED_START	= (1 << 26),
1325	IB_SEND_RESERVED_END	= (1 << 31),
1326};
1327
1328struct ib_sge {
1329	u64	addr;
1330	u32	length;
1331	u32	lkey;
1332};
1333
1334struct ib_cqe {
1335	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
 
 
1336};
1337
1338struct ib_send_wr {
1339	struct ib_send_wr      *next;
1340	union {
1341		u64		wr_id;
1342		struct ib_cqe	*wr_cqe;
1343	};
1344	struct ib_sge	       *sg_list;
1345	int			num_sge;
1346	enum ib_wr_opcode	opcode;
1347	int			send_flags;
1348	union {
1349		__be32		imm_data;
1350		u32		invalidate_rkey;
1351	} ex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1352};
1353
1354struct ib_rdma_wr {
1355	struct ib_send_wr	wr;
1356	u64			remote_addr;
1357	u32			rkey;
1358};
1359
1360static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1361{
1362	return container_of(wr, struct ib_rdma_wr, wr);
1363}
1364
1365struct ib_atomic_wr {
1366	struct ib_send_wr	wr;
1367	u64			remote_addr;
1368	u64			compare_add;
1369	u64			swap;
1370	u64			compare_add_mask;
1371	u64			swap_mask;
1372	u32			rkey;
1373};
1374
1375static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1376{
1377	return container_of(wr, struct ib_atomic_wr, wr);
1378}
1379
1380struct ib_ud_wr {
1381	struct ib_send_wr	wr;
1382	struct ib_ah		*ah;
1383	void			*header;
1384	int			hlen;
1385	int			mss;
1386	u32			remote_qpn;
1387	u32			remote_qkey;
1388	u16			pkey_index; /* valid for GSI only */
1389	u8			port_num;   /* valid for DR SMPs on switch only */
1390};
1391
1392static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1393{
1394	return container_of(wr, struct ib_ud_wr, wr);
1395}
1396
1397struct ib_reg_wr {
1398	struct ib_send_wr	wr;
1399	struct ib_mr		*mr;
1400	u32			key;
1401	int			access;
1402};
1403
1404static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1405{
1406	return container_of(wr, struct ib_reg_wr, wr);
1407}
1408
1409struct ib_sig_handover_wr {
1410	struct ib_send_wr	wr;
1411	struct ib_sig_attrs    *sig_attrs;
1412	struct ib_mr	       *sig_mr;
1413	int			access_flags;
1414	struct ib_sge	       *prot;
1415};
1416
1417static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1418{
1419	return container_of(wr, struct ib_sig_handover_wr, wr);
1420}
1421
1422struct ib_recv_wr {
1423	struct ib_recv_wr      *next;
1424	union {
1425		u64		wr_id;
1426		struct ib_cqe	*wr_cqe;
1427	};
1428	struct ib_sge	       *sg_list;
1429	int			num_sge;
1430};
1431
1432enum ib_access_flags {
1433	IB_ACCESS_LOCAL_WRITE	= 1,
1434	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1435	IB_ACCESS_REMOTE_READ	= (1<<2),
1436	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1437	IB_ACCESS_MW_BIND	= (1<<4),
1438	IB_ZERO_BASED		= (1<<5),
1439	IB_ACCESS_ON_DEMAND     = (1<<6),
1440	IB_ACCESS_HUGETLB	= (1<<7),
 
 
 
 
 
 
 
 
 
 
 
1441};
1442
1443/*
1444 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1445 * are hidden here instead of a uapi header!
1446 */
1447enum ib_mr_rereg_flags {
1448	IB_MR_REREG_TRANS	= 1,
1449	IB_MR_REREG_PD		= (1<<1),
1450	IB_MR_REREG_ACCESS	= (1<<2),
1451	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
 
 
 
 
 
 
 
 
1452};
1453
1454struct ib_fmr_attr {
1455	int	max_pages;
1456	int	max_maps;
1457	u8	page_shift;
1458};
1459
1460struct ib_umem;
1461
1462enum rdma_remove_reason {
1463	/* Userspace requested uobject deletion. Call could fail */
1464	RDMA_REMOVE_DESTROY,
1465	/* Context deletion. This call should delete the actual object itself */
1466	RDMA_REMOVE_CLOSE,
1467	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1468	RDMA_REMOVE_DRIVER_REMOVE,
1469	/* Context is being cleaned-up, but commit was just completed */
1470	RDMA_REMOVE_DURING_CLEANUP,
1471};
1472
1473struct ib_rdmacg_object {
1474#ifdef CONFIG_CGROUP_RDMA
1475	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1476#endif
1477};
1478
1479struct ib_ucontext {
1480	struct ib_device       *device;
1481	struct ib_uverbs_file  *ufile;
 
 
 
 
 
 
1482	int			closing;
1483
1484	/* locking the uobjects_list */
1485	struct mutex		uobjects_lock;
1486	struct list_head	uobjects;
1487	/* protects cleanup process from other actions */
1488	struct rw_semaphore	cleanup_rwsem;
1489	enum rdma_remove_reason cleanup_reason;
1490
1491	struct pid             *tgid;
1492#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1493	struct rb_root_cached   umem_tree;
1494	/*
1495	 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1496	 * mmu notifiers registration.
1497	 */
1498	struct rw_semaphore	umem_rwsem;
1499	void (*invalidate_range)(struct ib_umem *umem,
1500				 unsigned long start, unsigned long end);
1501
1502	struct mmu_notifier	mn;
1503	atomic_t		notifier_count;
1504	/* A list of umems that don't have private mmu notifier counters yet. */
1505	struct list_head	no_private_counters;
1506	int                     odp_mrs_count;
1507#endif
1508
1509	struct ib_rdmacg_object	cg_obj;
1510};
1511
1512struct ib_uobject {
1513	u64			user_handle;	/* handle given to us by userspace */
1514	struct ib_ucontext     *context;	/* associated user context */
1515	void		       *object;		/* containing object */
1516	struct list_head	list;		/* link to context's list */
1517	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1518	int			id;		/* index into kernel idr */
1519	struct kref		ref;
1520	atomic_t		usecnt;		/* protects exclusive access */
1521	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1522
1523	const struct uverbs_obj_type *type;
1524};
1525
1526struct ib_uobject_file {
1527	struct ib_uobject	uobj;
1528	/* ufile contains the lock between context release and file close */
1529	struct ib_uverbs_file	*ufile;
1530};
1531
1532struct ib_udata {
1533	const void __user *inbuf;
1534	void __user *outbuf;
1535	size_t       inlen;
1536	size_t       outlen;
1537};
1538
1539struct ib_pd {
1540	u32			local_dma_lkey;
1541	u32			flags;
1542	struct ib_device       *device;
1543	struct ib_uobject      *uobject;
1544	atomic_t          	usecnt; /* count all resources */
1545
1546	u32			unsafe_global_rkey;
1547
1548	/*
1549	 * Implementation details of the RDMA core, don't use in drivers:
1550	 */
1551	struct ib_mr	       *__internal_mr;
1552	struct rdma_restrack_entry res;
1553};
1554
1555struct ib_xrcd {
1556	struct ib_device       *device;
1557	atomic_t		usecnt; /* count all exposed resources */
1558	struct inode	       *inode;
1559
1560	struct mutex		tgt_qp_mutex;
1561	struct list_head	tgt_qp_list;
1562};
1563
1564struct ib_ah {
1565	struct ib_device	*device;
1566	struct ib_pd		*pd;
1567	struct ib_uobject	*uobject;
1568	enum rdma_ah_attr_type	type;
1569};
1570
1571typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1572
1573enum ib_poll_context {
1574	IB_POLL_DIRECT,		/* caller context, no hw completions */
1575	IB_POLL_SOFTIRQ,	/* poll from softirq context */
1576	IB_POLL_WORKQUEUE,	/* poll from workqueue */
1577};
1578
1579struct ib_cq {
1580	struct ib_device       *device;
1581	struct ib_uobject      *uobject;
1582	ib_comp_handler   	comp_handler;
1583	void                  (*event_handler)(struct ib_event *, void *);
1584	void                   *cq_context;
1585	int               	cqe;
1586	atomic_t          	usecnt; /* count number of work queues */
1587	enum ib_poll_context	poll_ctx;
1588	struct ib_wc		*wc;
1589	union {
1590		struct irq_poll		iop;
1591		struct work_struct	work;
1592	};
1593	/*
1594	 * Implementation details of the RDMA core, don't use in drivers:
1595	 */
1596	struct rdma_restrack_entry res;
1597};
1598
1599struct ib_srq {
1600	struct ib_device       *device;
1601	struct ib_pd	       *pd;
1602	struct ib_uobject      *uobject;
1603	void		      (*event_handler)(struct ib_event *, void *);
1604	void		       *srq_context;
1605	enum ib_srq_type	srq_type;
1606	atomic_t		usecnt;
1607
1608	struct {
1609		struct ib_cq   *cq;
1610		union {
1611			struct {
1612				struct ib_xrcd *xrcd;
1613				u32		srq_num;
1614			} xrc;
1615		};
1616	} ext;
1617};
1618
1619enum ib_raw_packet_caps {
1620	/* Strip cvlan from incoming packet and report it in the matching work
1621	 * completion is supported.
1622	 */
1623	IB_RAW_PACKET_CAP_CVLAN_STRIPPING	= (1 << 0),
1624	/* Scatter FCS field of an incoming packet to host memory is supported.
1625	 */
1626	IB_RAW_PACKET_CAP_SCATTER_FCS		= (1 << 1),
1627	/* Checksum offloads are supported (for both send and receive). */
1628	IB_RAW_PACKET_CAP_IP_CSUM		= (1 << 2),
1629	/* When a packet is received for an RQ with no receive WQEs, the
1630	 * packet processing is delayed.
1631	 */
1632	IB_RAW_PACKET_CAP_DELAY_DROP		= (1 << 3),
1633};
1634
1635enum ib_wq_type {
1636	IB_WQT_RQ
1637};
1638
1639enum ib_wq_state {
1640	IB_WQS_RESET,
1641	IB_WQS_RDY,
1642	IB_WQS_ERR
1643};
1644
1645struct ib_wq {
1646	struct ib_device       *device;
1647	struct ib_uobject      *uobject;
1648	void		    *wq_context;
1649	void		    (*event_handler)(struct ib_event *, void *);
1650	struct ib_pd	       *pd;
1651	struct ib_cq	       *cq;
1652	u32		wq_num;
1653	enum ib_wq_state       state;
1654	enum ib_wq_type	wq_type;
1655	atomic_t		usecnt;
1656};
1657
1658enum ib_wq_flags {
1659	IB_WQ_FLAGS_CVLAN_STRIPPING	= 1 << 0,
1660	IB_WQ_FLAGS_SCATTER_FCS		= 1 << 1,
1661	IB_WQ_FLAGS_DELAY_DROP		= 1 << 2,
1662	IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1663};
1664
1665struct ib_wq_init_attr {
1666	void		       *wq_context;
1667	enum ib_wq_type	wq_type;
1668	u32		max_wr;
1669	u32		max_sge;
1670	struct	ib_cq	       *cq;
1671	void		    (*event_handler)(struct ib_event *, void *);
1672	u32		create_flags; /* Use enum ib_wq_flags */
1673};
1674
1675enum ib_wq_attr_mask {
1676	IB_WQ_STATE		= 1 << 0,
1677	IB_WQ_CUR_STATE		= 1 << 1,
1678	IB_WQ_FLAGS		= 1 << 2,
1679};
1680
1681struct ib_wq_attr {
1682	enum	ib_wq_state	wq_state;
1683	enum	ib_wq_state	curr_wq_state;
1684	u32			flags; /* Use enum ib_wq_flags */
1685	u32			flags_mask; /* Use enum ib_wq_flags */
1686};
1687
1688struct ib_rwq_ind_table {
1689	struct ib_device	*device;
1690	struct ib_uobject      *uobject;
1691	atomic_t		usecnt;
1692	u32		ind_tbl_num;
1693	u32		log_ind_tbl_size;
1694	struct ib_wq	**ind_tbl;
1695};
1696
1697struct ib_rwq_ind_table_init_attr {
1698	u32		log_ind_tbl_size;
1699	/* Each entry is a pointer to Receive Work Queue */
1700	struct ib_wq	**ind_tbl;
1701};
1702
1703enum port_pkey_state {
1704	IB_PORT_PKEY_NOT_VALID = 0,
1705	IB_PORT_PKEY_VALID = 1,
1706	IB_PORT_PKEY_LISTED = 2,
1707};
1708
1709struct ib_qp_security;
1710
1711struct ib_port_pkey {
1712	enum port_pkey_state	state;
1713	u16			pkey_index;
1714	u8			port_num;
1715	struct list_head	qp_list;
1716	struct list_head	to_error_list;
1717	struct ib_qp_security  *sec;
1718};
1719
1720struct ib_ports_pkeys {
1721	struct ib_port_pkey	main;
1722	struct ib_port_pkey	alt;
1723};
1724
1725struct ib_qp_security {
1726	struct ib_qp	       *qp;
1727	struct ib_device       *dev;
1728	/* Hold this mutex when changing port and pkey settings. */
1729	struct mutex		mutex;
1730	struct ib_ports_pkeys  *ports_pkeys;
1731	/* A list of all open shared QP handles.  Required to enforce security
1732	 * properly for all users of a shared QP.
1733	 */
1734	struct list_head        shared_qp_list;
1735	void                   *security;
1736	bool			destroying;
1737	atomic_t		error_list_count;
1738	struct completion	error_complete;
1739	int			error_comps_pending;
1740};
1741
1742/*
1743 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1744 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1745 */
1746struct ib_qp {
1747	struct ib_device       *device;
1748	struct ib_pd	       *pd;
1749	struct ib_cq	       *send_cq;
1750	struct ib_cq	       *recv_cq;
1751	spinlock_t		mr_lock;
1752	int			mrs_used;
1753	struct list_head	rdma_mrs;
1754	struct list_head	sig_mrs;
1755	struct ib_srq	       *srq;
1756	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1757	struct list_head	xrcd_list;
1758
1759	/* count times opened, mcast attaches, flow attaches */
1760	atomic_t		usecnt;
1761	struct list_head	open_list;
1762	struct ib_qp           *real_qp;
1763	struct ib_uobject      *uobject;
1764	void                  (*event_handler)(struct ib_event *, void *);
1765	void		       *qp_context;
1766	u32			qp_num;
1767	u32			max_write_sge;
1768	u32			max_read_sge;
1769	enum ib_qp_type		qp_type;
1770	struct ib_rwq_ind_table *rwq_ind_tbl;
1771	struct ib_qp_security  *qp_sec;
1772	u8			port;
1773
1774	/*
1775	 * Implementation details of the RDMA core, don't use in drivers:
1776	 */
1777	struct rdma_restrack_entry     res;
1778};
1779
1780struct ib_dm {
1781	struct ib_device  *device;
1782	u32		   length;
1783	u32		   flags;
1784	struct ib_uobject *uobject;
1785	atomic_t	   usecnt;
1786};
1787
1788struct ib_mr {
1789	struct ib_device  *device;
1790	struct ib_pd	  *pd;
 
1791	u32		   lkey;
1792	u32		   rkey;
1793	u64		   iova;
1794	u64		   length;
1795	unsigned int	   page_size;
1796	bool		   need_inval;
1797	union {
1798		struct ib_uobject	*uobject;	/* user */
1799		struct list_head	qp_entry;	/* FR */
1800	};
1801
1802	struct ib_dm      *dm;
1803
1804	/*
1805	 * Implementation details of the RDMA core, don't use in drivers:
1806	 */
1807	struct rdma_restrack_entry res;
1808};
1809
1810struct ib_mw {
1811	struct ib_device	*device;
1812	struct ib_pd		*pd;
1813	struct ib_uobject	*uobject;
1814	u32			rkey;
1815	enum ib_mw_type         type;
1816};
1817
1818struct ib_fmr {
1819	struct ib_device	*device;
1820	struct ib_pd		*pd;
1821	struct list_head	list;
1822	u32			lkey;
1823	u32			rkey;
1824};
1825
1826/* Supported steering options */
1827enum ib_flow_attr_type {
1828	/* steering according to rule specifications */
1829	IB_FLOW_ATTR_NORMAL		= 0x0,
1830	/* default unicast and multicast rule -
1831	 * receive all Eth traffic which isn't steered to any QP
1832	 */
1833	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1834	/* default multicast rule -
1835	 * receive all Eth multicast traffic which isn't steered to any QP
1836	 */
1837	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1838	/* sniffer rule - receive all port traffic */
1839	IB_FLOW_ATTR_SNIFFER		= 0x3
1840};
1841
1842/* Supported steering header types */
1843enum ib_flow_spec_type {
1844	/* L2 headers*/
1845	IB_FLOW_SPEC_ETH		= 0x20,
1846	IB_FLOW_SPEC_IB			= 0x22,
1847	/* L3 header*/
1848	IB_FLOW_SPEC_IPV4		= 0x30,
1849	IB_FLOW_SPEC_IPV6		= 0x31,
1850	IB_FLOW_SPEC_ESP                = 0x34,
1851	/* L4 headers*/
1852	IB_FLOW_SPEC_TCP		= 0x40,
1853	IB_FLOW_SPEC_UDP		= 0x41,
1854	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1855	IB_FLOW_SPEC_INNER		= 0x100,
1856	/* Actions */
1857	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1858	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1859	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1860};
1861#define IB_FLOW_SPEC_LAYER_MASK	0xF0
1862#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1863
1864/* Flow steering rule priority is set according to it's domain.
1865 * Lower domain value means higher priority.
1866 */
1867enum ib_flow_domain {
1868	IB_FLOW_DOMAIN_USER,
1869	IB_FLOW_DOMAIN_ETHTOOL,
1870	IB_FLOW_DOMAIN_RFS,
1871	IB_FLOW_DOMAIN_NIC,
1872	IB_FLOW_DOMAIN_NUM /* Must be last */
1873};
1874
1875enum ib_flow_flags {
1876	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1877	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1878	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1879};
1880
1881struct ib_flow_eth_filter {
1882	u8	dst_mac[6];
1883	u8	src_mac[6];
1884	__be16	ether_type;
1885	__be16	vlan_tag;
1886	/* Must be last */
1887	u8	real_sz[0];
1888};
1889
1890struct ib_flow_spec_eth {
1891	u32			  type;
1892	u16			  size;
1893	struct ib_flow_eth_filter val;
1894	struct ib_flow_eth_filter mask;
1895};
1896
1897struct ib_flow_ib_filter {
1898	__be16 dlid;
1899	__u8   sl;
1900	/* Must be last */
1901	u8	real_sz[0];
1902};
1903
1904struct ib_flow_spec_ib {
1905	u32			 type;
1906	u16			 size;
1907	struct ib_flow_ib_filter val;
1908	struct ib_flow_ib_filter mask;
1909};
1910
1911/* IPv4 header flags */
1912enum ib_ipv4_flags {
1913	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1914	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1915				    last have this flag set */
1916};
1917
1918struct ib_flow_ipv4_filter {
1919	__be32	src_ip;
1920	__be32	dst_ip;
1921	u8	proto;
1922	u8	tos;
1923	u8	ttl;
1924	u8	flags;
1925	/* Must be last */
1926	u8	real_sz[0];
1927};
1928
1929struct ib_flow_spec_ipv4 {
1930	u32			   type;
1931	u16			   size;
1932	struct ib_flow_ipv4_filter val;
1933	struct ib_flow_ipv4_filter mask;
1934};
1935
1936struct ib_flow_ipv6_filter {
1937	u8	src_ip[16];
1938	u8	dst_ip[16];
1939	__be32	flow_label;
1940	u8	next_hdr;
1941	u8	traffic_class;
1942	u8	hop_limit;
1943	/* Must be last */
1944	u8	real_sz[0];
1945};
1946
1947struct ib_flow_spec_ipv6 {
1948	u32			   type;
1949	u16			   size;
1950	struct ib_flow_ipv6_filter val;
1951	struct ib_flow_ipv6_filter mask;
1952};
1953
1954struct ib_flow_tcp_udp_filter {
1955	__be16	dst_port;
1956	__be16	src_port;
1957	/* Must be last */
1958	u8	real_sz[0];
1959};
1960
1961struct ib_flow_spec_tcp_udp {
1962	u32			      type;
1963	u16			      size;
1964	struct ib_flow_tcp_udp_filter val;
1965	struct ib_flow_tcp_udp_filter mask;
1966};
1967
1968struct ib_flow_tunnel_filter {
1969	__be32	tunnel_id;
1970	u8	real_sz[0];
1971};
1972
1973/* ib_flow_spec_tunnel describes the Vxlan tunnel
1974 * the tunnel_id from val has the vni value
1975 */
1976struct ib_flow_spec_tunnel {
1977	u32			      type;
1978	u16			      size;
1979	struct ib_flow_tunnel_filter  val;
1980	struct ib_flow_tunnel_filter  mask;
1981};
1982
1983struct ib_flow_esp_filter {
1984	__be32	spi;
1985	__be32  seq;
1986	/* Must be last */
1987	u8	real_sz[0];
1988};
1989
1990struct ib_flow_spec_esp {
1991	u32                           type;
1992	u16			      size;
1993	struct ib_flow_esp_filter     val;
1994	struct ib_flow_esp_filter     mask;
1995};
1996
1997struct ib_flow_spec_action_tag {
1998	enum ib_flow_spec_type	      type;
1999	u16			      size;
2000	u32                           tag_id;
2001};
2002
2003struct ib_flow_spec_action_drop {
2004	enum ib_flow_spec_type	      type;
2005	u16			      size;
2006};
2007
2008struct ib_flow_spec_action_handle {
2009	enum ib_flow_spec_type	      type;
2010	u16			      size;
2011	struct ib_flow_action	     *act;
2012};
2013
2014union ib_flow_spec {
2015	struct {
2016		u32			type;
2017		u16			size;
2018	};
2019	struct ib_flow_spec_eth		eth;
2020	struct ib_flow_spec_ib		ib;
2021	struct ib_flow_spec_ipv4        ipv4;
2022	struct ib_flow_spec_tcp_udp	tcp_udp;
2023	struct ib_flow_spec_ipv6        ipv6;
2024	struct ib_flow_spec_tunnel      tunnel;
2025	struct ib_flow_spec_esp		esp;
2026	struct ib_flow_spec_action_tag  flow_tag;
2027	struct ib_flow_spec_action_drop drop;
2028	struct ib_flow_spec_action_handle action;
2029};
2030
2031struct ib_flow_attr {
2032	enum ib_flow_attr_type type;
2033	u16	     size;
2034	u16	     priority;
2035	u32	     flags;
2036	u8	     num_of_specs;
2037	u8	     port;
2038	/* Following are the optional layers according to user request
2039	 * struct ib_flow_spec_xxx
2040	 * struct ib_flow_spec_yyy
2041	 */
2042};
2043
2044struct ib_flow {
2045	struct ib_qp		*qp;
2046	struct ib_uobject	*uobject;
2047};
2048
2049enum ib_flow_action_type {
2050	IB_FLOW_ACTION_UNSPECIFIED,
2051	IB_FLOW_ACTION_ESP = 1,
2052};
2053
2054struct ib_flow_action_attrs_esp_keymats {
2055	enum ib_uverbs_flow_action_esp_keymat			protocol;
2056	union {
2057		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2058	} keymat;
2059};
2060
2061struct ib_flow_action_attrs_esp_replays {
2062	enum ib_uverbs_flow_action_esp_replay			protocol;
2063	union {
2064		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2065	} replay;
2066};
2067
2068enum ib_flow_action_attrs_esp_flags {
2069	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2070	 * This is done in order to share the same flags between user-space and
2071	 * kernel and spare an unnecessary translation.
2072	 */
2073
2074	/* Kernel flags */
2075	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2076	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2077};
2078
2079struct ib_flow_spec_list {
2080	struct ib_flow_spec_list	*next;
2081	union ib_flow_spec		spec;
2082};
2083
2084struct ib_flow_action_attrs_esp {
2085	struct ib_flow_action_attrs_esp_keymats		*keymat;
2086	struct ib_flow_action_attrs_esp_replays		*replay;
2087	struct ib_flow_spec_list			*encap;
2088	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2089	 * Value of 0 is a valid value.
2090	 */
2091	u32						esn;
2092	u32						spi;
2093	u32						seq;
2094	u32						tfc_pad;
2095	/* Use enum ib_flow_action_attrs_esp_flags */
2096	u64						flags;
2097	u64						hard_limit_pkts;
2098};
2099
2100struct ib_flow_action {
2101	struct ib_device		*device;
2102	struct ib_uobject		*uobject;
2103	enum ib_flow_action_type	type;
2104	atomic_t			usecnt;
2105};
2106
2107struct ib_mad_hdr;
2108struct ib_grh;
2109
2110enum ib_process_mad_flags {
2111	IB_MAD_IGNORE_MKEY	= 1,
2112	IB_MAD_IGNORE_BKEY	= 2,
2113	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2114};
2115
2116enum ib_mad_result {
2117	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2118	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2119	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2120	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2121};
2122
2123struct ib_port_cache {
2124	u64		      subnet_prefix;
2125	struct ib_pkey_cache  *pkey;
2126	struct ib_gid_table   *gid;
2127	u8                     lmc;
2128	enum ib_port_state     port_state;
2129};
2130
2131struct ib_cache {
2132	rwlock_t                lock;
2133	struct ib_event_handler event_handler;
2134	struct ib_port_cache   *ports;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2135};
2136
2137struct iw_cm_verbs;
2138
2139struct ib_port_immutable {
2140	int                           pkey_tbl_len;
2141	int                           gid_tbl_len;
2142	u32                           core_cap_flags;
2143	u32                           max_mad_size;
2144};
2145
2146/* rdma netdev type - specifies protocol type */
2147enum rdma_netdev_t {
2148	RDMA_NETDEV_OPA_VNIC,
2149	RDMA_NETDEV_IPOIB,
2150};
2151
2152/**
2153 * struct rdma_netdev - rdma netdev
2154 * For cases where netstack interfacing is required.
2155 */
2156struct rdma_netdev {
2157	void              *clnt_priv;
2158	struct ib_device  *hca;
2159	u8                 port_num;
2160
2161	/* cleanup function must be specified */
2162	void (*free_rdma_netdev)(struct net_device *netdev);
2163
2164	/* control functions */
2165	void (*set_id)(struct net_device *netdev, int id);
2166	/* send packet */
2167	int (*send)(struct net_device *dev, struct sk_buff *skb,
2168		    struct ib_ah *address, u32 dqpn);
2169	/* multicast */
2170	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2171			    union ib_gid *gid, u16 mlid,
2172			    int set_qkey, u32 qkey);
2173	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2174			    union ib_gid *gid, u16 mlid);
2175};
2176
2177struct ib_port_pkey_list {
2178	/* Lock to hold while modifying the list. */
2179	spinlock_t                    list_lock;
2180	struct list_head              pkey_list;
2181};
2182
2183struct uverbs_attr_bundle;
2184
2185struct ib_device {
2186	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2187	struct device                *dma_device;
2188
2189	char                          name[IB_DEVICE_NAME_MAX];
2190
2191	struct list_head              event_handler_list;
2192	spinlock_t                    event_handler_lock;
2193
2194	spinlock_t                    client_data_lock;
2195	struct list_head              core_list;
2196	/* Access to the client_data_list is protected by the client_data_lock
2197	 * spinlock and the lists_rwsem read-write semaphore */
2198	struct list_head              client_data_list;
2199
2200	struct ib_cache               cache;
2201	/**
2202	 * port_immutable is indexed by port number
2203	 */
2204	struct ib_port_immutable     *port_immutable;
2205
2206	int			      num_comp_vectors;
2207
2208	struct ib_port_pkey_list     *port_pkey_list;
2209
2210	struct iw_cm_verbs	     *iwcm;
2211
2212	/**
2213	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2214	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
2215	 *   core when the device is removed.  A lifespan of -1 in the return
2216	 *   struct tells the core to set a default lifespan.
2217	 */
2218	struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2219						     u8 port_num);
2220	/**
2221	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2222	 * @index - The index in the value array we wish to have updated, or
2223	 *   num_counters if we want all stats updated
2224	 * Return codes -
2225	 *   < 0 - Error, no counters updated
2226	 *   index - Updated the single counter pointed to by index
2227	 *   num_counters - Updated all counters (will reset the timestamp
2228	 *     and prevent further calls for lifespan milliseconds)
2229	 * Drivers are allowed to update all counters in leiu of just the
2230	 *   one given in index at their option
2231	 */
2232	int		           (*get_hw_stats)(struct ib_device *device,
2233						   struct rdma_hw_stats *stats,
2234						   u8 port, int index);
2235	int		           (*query_device)(struct ib_device *device,
2236						   struct ib_device_attr *device_attr,
2237						   struct ib_udata *udata);
2238	int		           (*query_port)(struct ib_device *device,
2239						 u8 port_num,
2240						 struct ib_port_attr *port_attr);
2241	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
2242						     u8 port_num);
2243	/* When calling get_netdev, the HW vendor's driver should return the
2244	 * net device of device @device at port @port_num or NULL if such
2245	 * a net device doesn't exist. The vendor driver should call dev_hold
2246	 * on this net device. The HW vendor's device driver must guarantee
2247	 * that this function returns NULL before the net device has finished
2248	 * NETDEV_UNREGISTER state.
2249	 */
2250	struct net_device	  *(*get_netdev)(struct ib_device *device,
2251						 u8 port_num);
2252	/* query_gid should be return GID value for @device, when @port_num
2253	 * link layer is either IB or iWarp. It is no-op if @port_num port
2254	 * is RoCE link layer.
2255	 */
2256	int		           (*query_gid)(struct ib_device *device,
2257						u8 port_num, int index,
2258						union ib_gid *gid);
2259	/* When calling add_gid, the HW vendor's driver should add the gid
2260	 * of device of port at gid index available at @attr. Meta-info of
2261	 * that gid (for example, the network device related to this gid) is
2262	 * available at @attr. @context allows the HW vendor driver to store
2263	 * extra information together with a GID entry. The HW vendor driver may
2264	 * allocate memory to contain this information and store it in @context
2265	 * when a new GID entry is written to. Params are consistent until the
2266	 * next call of add_gid or delete_gid. The function should return 0 on
2267	 * success or error otherwise. The function could be called
2268	 * concurrently for different ports. This function is only called when
2269	 * roce_gid_table is used.
2270	 */
2271	int		           (*add_gid)(const union ib_gid *gid,
2272					      const struct ib_gid_attr *attr,
2273					      void **context);
2274	/* When calling del_gid, the HW vendor's driver should delete the
2275	 * gid of device @device at gid index gid_index of port port_num
2276	 * available in @attr.
2277	 * Upon the deletion of a GID entry, the HW vendor must free any
2278	 * allocated memory. The caller will clear @context afterwards.
2279	 * This function is only called when roce_gid_table is used.
2280	 */
2281	int		           (*del_gid)(const struct ib_gid_attr *attr,
2282					      void **context);
2283	int		           (*query_pkey)(struct ib_device *device,
2284						 u8 port_num, u16 index, u16 *pkey);
2285	int		           (*modify_device)(struct ib_device *device,
2286						    int device_modify_mask,
2287						    struct ib_device_modify *device_modify);
2288	int		           (*modify_port)(struct ib_device *device,
2289						  u8 port_num, int port_modify_mask,
2290						  struct ib_port_modify *port_modify);
2291	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2292						     struct ib_udata *udata);
2293	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2294	int                        (*mmap)(struct ib_ucontext *context,
2295					   struct vm_area_struct *vma);
2296	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2297					       struct ib_ucontext *context,
2298					       struct ib_udata *udata);
2299	int                        (*dealloc_pd)(struct ib_pd *pd);
2300	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2301						struct rdma_ah_attr *ah_attr,
2302						struct ib_udata *udata);
2303	int                        (*modify_ah)(struct ib_ah *ah,
2304						struct rdma_ah_attr *ah_attr);
2305	int                        (*query_ah)(struct ib_ah *ah,
2306					       struct rdma_ah_attr *ah_attr);
2307	int                        (*destroy_ah)(struct ib_ah *ah);
2308	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2309						 struct ib_srq_init_attr *srq_init_attr,
2310						 struct ib_udata *udata);
2311	int                        (*modify_srq)(struct ib_srq *srq,
2312						 struct ib_srq_attr *srq_attr,
2313						 enum ib_srq_attr_mask srq_attr_mask,
2314						 struct ib_udata *udata);
2315	int                        (*query_srq)(struct ib_srq *srq,
2316						struct ib_srq_attr *srq_attr);
2317	int                        (*destroy_srq)(struct ib_srq *srq);
2318	int                        (*post_srq_recv)(struct ib_srq *srq,
2319						    struct ib_recv_wr *recv_wr,
2320						    struct ib_recv_wr **bad_recv_wr);
2321	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2322						struct ib_qp_init_attr *qp_init_attr,
2323						struct ib_udata *udata);
2324	int                        (*modify_qp)(struct ib_qp *qp,
2325						struct ib_qp_attr *qp_attr,
2326						int qp_attr_mask,
2327						struct ib_udata *udata);
2328	int                        (*query_qp)(struct ib_qp *qp,
2329					       struct ib_qp_attr *qp_attr,
2330					       int qp_attr_mask,
2331					       struct ib_qp_init_attr *qp_init_attr);
2332	int                        (*destroy_qp)(struct ib_qp *qp);
2333	int                        (*post_send)(struct ib_qp *qp,
2334						struct ib_send_wr *send_wr,
2335						struct ib_send_wr **bad_send_wr);
2336	int                        (*post_recv)(struct ib_qp *qp,
2337						struct ib_recv_wr *recv_wr,
2338						struct ib_recv_wr **bad_recv_wr);
2339	struct ib_cq *             (*create_cq)(struct ib_device *device,
2340						const struct ib_cq_init_attr *attr,
2341						struct ib_ucontext *context,
2342						struct ib_udata *udata);
2343	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2344						u16 cq_period);
2345	int                        (*destroy_cq)(struct ib_cq *cq);
2346	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2347						struct ib_udata *udata);
2348	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2349					      struct ib_wc *wc);
2350	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2351	int                        (*req_notify_cq)(struct ib_cq *cq,
2352						    enum ib_cq_notify_flags flags);
2353	int                        (*req_ncomp_notif)(struct ib_cq *cq,
2354						      int wc_cnt);
2355	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2356						 int mr_access_flags);
 
 
 
 
 
2357	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2358						  u64 start, u64 length,
2359						  u64 virt_addr,
2360						  int mr_access_flags,
2361						  struct ib_udata *udata);
2362	int			   (*rereg_user_mr)(struct ib_mr *mr,
2363						    int flags,
2364						    u64 start, u64 length,
2365						    u64 virt_addr,
 
 
 
 
 
 
 
 
 
2366						    int mr_access_flags,
2367						    struct ib_pd *pd,
2368						    struct ib_udata *udata);
2369	int                        (*dereg_mr)(struct ib_mr *mr);
2370	struct ib_mr *		   (*alloc_mr)(struct ib_pd *pd,
2371					       enum ib_mr_type mr_type,
2372					       u32 max_num_sg);
2373	int                        (*map_mr_sg)(struct ib_mr *mr,
2374						struct scatterlist *sg,
2375						int sg_nents,
2376						unsigned int *sg_offset);
2377	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2378					       enum ib_mw_type type,
2379					       struct ib_udata *udata);
2380	int                        (*dealloc_mw)(struct ib_mw *mw);
2381	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
2382						int mr_access_flags,
2383						struct ib_fmr_attr *fmr_attr);
2384	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
2385						   u64 *page_list, int list_len,
2386						   u64 iova);
2387	int		           (*unmap_fmr)(struct list_head *fmr_list);
2388	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
2389	int                        (*attach_mcast)(struct ib_qp *qp,
2390						   union ib_gid *gid,
2391						   u16 lid);
2392	int                        (*detach_mcast)(struct ib_qp *qp,
2393						   union ib_gid *gid,
2394						   u16 lid);
2395	int                        (*process_mad)(struct ib_device *device,
2396						  int process_mad_flags,
2397						  u8 port_num,
2398						  const struct ib_wc *in_wc,
2399						  const struct ib_grh *in_grh,
2400						  const struct ib_mad_hdr *in_mad,
2401						  size_t in_mad_size,
2402						  struct ib_mad_hdr *out_mad,
2403						  size_t *out_mad_size,
2404						  u16 *out_mad_pkey_index);
2405	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
2406						 struct ib_ucontext *ucontext,
2407						 struct ib_udata *udata);
2408	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2409	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
2410						  struct ib_flow_attr
2411						  *flow_attr,
2412						  int domain);
2413	int			   (*destroy_flow)(struct ib_flow *flow_id);
2414	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2415						      struct ib_mr_status *mr_status);
2416	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2417	void			   (*drain_rq)(struct ib_qp *qp);
2418	void			   (*drain_sq)(struct ib_qp *qp);
2419	int			   (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2420							int state);
2421	int			   (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2422						   struct ifla_vf_info *ivf);
2423	int			   (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2424						   struct ifla_vf_stats *stats);
2425	int			   (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2426						  int type);
2427	struct ib_wq *		   (*create_wq)(struct ib_pd *pd,
2428						struct ib_wq_init_attr *init_attr,
2429						struct ib_udata *udata);
2430	int			   (*destroy_wq)(struct ib_wq *wq);
2431	int			   (*modify_wq)(struct ib_wq *wq,
2432						struct ib_wq_attr *attr,
2433						u32 wq_attr_mask,
2434						struct ib_udata *udata);
2435	struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2436							   struct ib_rwq_ind_table_init_attr *init_attr,
2437							   struct ib_udata *udata);
2438	int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2439	struct ib_flow_action *	   (*create_flow_action_esp)(struct ib_device *device,
2440							     const struct ib_flow_action_attrs_esp *attr,
2441							     struct uverbs_attr_bundle *attrs);
2442	int			   (*destroy_flow_action)(struct ib_flow_action *action);
2443	int			   (*modify_flow_action_esp)(struct ib_flow_action *action,
2444							     const struct ib_flow_action_attrs_esp *attr,
2445							     struct uverbs_attr_bundle *attrs);
2446	struct ib_dm *             (*alloc_dm)(struct ib_device *device,
2447					       struct ib_ucontext *context,
2448					       struct ib_dm_alloc_attr *attr,
2449					       struct uverbs_attr_bundle *attrs);
2450	int                        (*dealloc_dm)(struct ib_dm *dm);
2451	struct ib_mr *             (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2452						struct ib_dm_mr_attr *attr,
2453						struct uverbs_attr_bundle *attrs);
2454	/**
2455	 * rdma netdev operation
2456	 *
2457	 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2458	 * doesn't support the specified rdma netdev type.
2459	 */
2460	struct net_device *(*alloc_rdma_netdev)(
2461					struct ib_device *device,
2462					u8 port_num,
2463					enum rdma_netdev_t type,
2464					const char *name,
2465					unsigned char name_assign_type,
2466					void (*setup)(struct net_device *));
2467
2468	struct module               *owner;
2469	struct device                dev;
2470	struct kobject               *ports_parent;
2471	struct list_head             port_list;
2472
2473	enum {
2474		IB_DEV_UNINITIALIZED,
2475		IB_DEV_REGISTERED,
2476		IB_DEV_UNREGISTERED
2477	}                            reg_state;
2478
2479	int			     uverbs_abi_ver;
2480	u64			     uverbs_cmd_mask;
2481	u64			     uverbs_ex_cmd_mask;
2482
2483	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2484	__be64			     node_guid;
2485	u32			     local_dma_lkey;
2486	u16                          is_switch:1;
2487	u8                           node_type;
2488	u8                           phys_port_cnt;
2489	struct ib_device_attr        attrs;
2490	struct attribute_group	     *hw_stats_ag;
2491	struct rdma_hw_stats         *hw_stats;
2492
2493#ifdef CONFIG_CGROUP_RDMA
2494	struct rdmacg_device         cg_device;
2495#endif
2496
2497	u32                          index;
2498	/*
2499	 * Implementation details of the RDMA core, don't use in drivers
2500	 */
2501	struct rdma_restrack_root     res;
2502
2503	/**
2504	 * The following mandatory functions are used only at device
2505	 * registration.  Keep functions such as these at the end of this
2506	 * structure to avoid cache line misses when accessing struct ib_device
2507	 * in fast paths.
2508	 */
2509	int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2510	void (*get_dev_fw_str)(struct ib_device *, char *str);
2511	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2512						     int comp_vector);
2513
2514	struct uverbs_root_spec		*specs_root;
2515	enum rdma_driver_id		driver_id;
2516};
2517
2518struct ib_client {
2519	char  *name;
2520	void (*add)   (struct ib_device *);
2521	void (*remove)(struct ib_device *, void *client_data);
2522
2523	/* Returns the net_dev belonging to this ib_client and matching the
2524	 * given parameters.
2525	 * @dev:	 An RDMA device that the net_dev use for communication.
2526	 * @port:	 A physical port number on the RDMA device.
2527	 * @pkey:	 P_Key that the net_dev uses if applicable.
2528	 * @gid:	 A GID that the net_dev uses to communicate.
2529	 * @addr:	 An IP address the net_dev is configured with.
2530	 * @client_data: The device's client data set by ib_set_client_data().
2531	 *
2532	 * An ib_client that implements a net_dev on top of RDMA devices
2533	 * (such as IP over IB) should implement this callback, allowing the
2534	 * rdma_cm module to find the right net_dev for a given request.
2535	 *
2536	 * The caller is responsible for calling dev_put on the returned
2537	 * netdev. */
2538	struct net_device *(*get_net_dev_by_params)(
2539			struct ib_device *dev,
2540			u8 port,
2541			u16 pkey,
2542			const union ib_gid *gid,
2543			const struct sockaddr *addr,
2544			void *client_data);
2545	struct list_head list;
2546};
2547
2548struct ib_device *ib_alloc_device(size_t size);
2549void ib_dealloc_device(struct ib_device *device);
2550
2551void ib_get_device_fw_str(struct ib_device *device, char *str);
2552
2553int ib_register_device(struct ib_device *device,
2554		       int (*port_callback)(struct ib_device *,
2555					    u8, struct kobject *));
2556void ib_unregister_device(struct ib_device *device);
2557
2558int ib_register_client   (struct ib_client *client);
2559void ib_unregister_client(struct ib_client *client);
2560
2561void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2562void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2563			 void *data);
2564
2565static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2566{
2567	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2568}
2569
2570static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2571{
2572	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2573}
2574
2575static inline bool ib_is_buffer_cleared(const void __user *p,
2576					size_t len)
2577{
2578	bool ret;
2579	u8 *buf;
2580
2581	if (len > USHRT_MAX)
2582		return false;
2583
2584	buf = memdup_user(p, len);
2585	if (IS_ERR(buf))
2586		return false;
2587
2588	ret = !memchr_inv(buf, 0, len);
2589	kfree(buf);
2590	return ret;
2591}
2592
2593static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2594				       size_t offset,
2595				       size_t len)
2596{
2597	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2598}
2599
2600/**
2601 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2602 * contains all required attributes and no attributes not allowed for
2603 * the given QP state transition.
2604 * @cur_state: Current QP state
2605 * @next_state: Next QP state
2606 * @type: QP type
2607 * @mask: Mask of supplied QP attributes
2608 * @ll : link layer of port
2609 *
2610 * This function is a helper function that a low-level driver's
2611 * modify_qp method can use to validate the consumer's input.  It
2612 * checks that cur_state and next_state are valid QP states, that a
2613 * transition from cur_state to next_state is allowed by the IB spec,
2614 * and that the attribute mask supplied is allowed for the transition.
2615 */
2616bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2617			enum ib_qp_type type, enum ib_qp_attr_mask mask,
2618			enum rdma_link_layer ll);
2619
2620void ib_register_event_handler(struct ib_event_handler *event_handler);
2621void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2622void ib_dispatch_event(struct ib_event *event);
2623
 
 
 
2624int ib_query_port(struct ib_device *device,
2625		  u8 port_num, struct ib_port_attr *port_attr);
2626
2627enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2628					       u8 port_num);
2629
2630/**
2631 * rdma_cap_ib_switch - Check if the device is IB switch
2632 * @device: Device to check
2633 *
2634 * Device driver is responsible for setting is_switch bit on
2635 * in ib_device structure at init time.
2636 *
2637 * Return: true if the device is IB switch.
2638 */
2639static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2640{
2641	return device->is_switch;
2642}
2643
2644/**
2645 * rdma_start_port - Return the first valid port number for the device
2646 * specified
2647 *
2648 * @device: Device to be checked
2649 *
2650 * Return start port number
2651 */
2652static inline u8 rdma_start_port(const struct ib_device *device)
2653{
2654	return rdma_cap_ib_switch(device) ? 0 : 1;
2655}
2656
2657/**
2658 * rdma_end_port - Return the last valid port number for the device
2659 * specified
2660 *
2661 * @device: Device to be checked
2662 *
2663 * Return last port number
2664 */
2665static inline u8 rdma_end_port(const struct ib_device *device)
2666{
2667	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2668}
2669
2670static inline int rdma_is_port_valid(const struct ib_device *device,
2671				     unsigned int port)
2672{
2673	return (port >= rdma_start_port(device) &&
2674		port <= rdma_end_port(device));
2675}
2676
2677static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2678{
2679	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2680}
2681
2682static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2683{
2684	return device->port_immutable[port_num].core_cap_flags &
2685		(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2686}
2687
2688static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2689{
2690	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2691}
2692
2693static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2694{
2695	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2696}
2697
2698static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2699{
2700	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2701}
2702
2703static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2704{
2705	return rdma_protocol_ib(device, port_num) ||
2706		rdma_protocol_roce(device, port_num);
2707}
2708
2709static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2710{
2711	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2712}
2713
2714static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2715{
2716	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2717}
2718
2719/**
2720 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2721 * Management Datagrams.
2722 * @device: Device to check
2723 * @port_num: Port number to check
2724 *
2725 * Management Datagrams (MAD) are a required part of the InfiniBand
2726 * specification and are supported on all InfiniBand devices.  A slightly
2727 * extended version are also supported on OPA interfaces.
2728 *
2729 * Return: true if the port supports sending/receiving of MAD packets.
2730 */
2731static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2732{
2733	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2734}
2735
2736/**
2737 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2738 * Management Datagrams.
2739 * @device: Device to check
2740 * @port_num: Port number to check
2741 *
2742 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2743 * datagrams with their own versions.  These OPA MADs share many but not all of
2744 * the characteristics of InfiniBand MADs.
2745 *
2746 * OPA MADs differ in the following ways:
2747 *
2748 *    1) MADs are variable size up to 2K
2749 *       IBTA defined MADs remain fixed at 256 bytes
2750 *    2) OPA SMPs must carry valid PKeys
2751 *    3) OPA SMP packets are a different format
2752 *
2753 * Return: true if the port supports OPA MAD packet formats.
2754 */
2755static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2756{
2757	return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2758		== RDMA_CORE_CAP_OPA_MAD;
2759}
2760
2761/**
2762 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2763 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2764 * @device: Device to check
2765 * @port_num: Port number to check
2766 *
2767 * Each InfiniBand node is required to provide a Subnet Management Agent
2768 * that the subnet manager can access.  Prior to the fabric being fully
2769 * configured by the subnet manager, the SMA is accessed via a well known
2770 * interface called the Subnet Management Interface (SMI).  This interface
2771 * uses directed route packets to communicate with the SM to get around the
2772 * chicken and egg problem of the SM needing to know what's on the fabric
2773 * in order to configure the fabric, and needing to configure the fabric in
2774 * order to send packets to the devices on the fabric.  These directed
2775 * route packets do not need the fabric fully configured in order to reach
2776 * their destination.  The SMI is the only method allowed to send
2777 * directed route packets on an InfiniBand fabric.
2778 *
2779 * Return: true if the port provides an SMI.
2780 */
2781static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2782{
2783	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2784}
2785
2786/**
2787 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2788 * Communication Manager.
2789 * @device: Device to check
2790 * @port_num: Port number to check
2791 *
2792 * The InfiniBand Communication Manager is one of many pre-defined General
2793 * Service Agents (GSA) that are accessed via the General Service
2794 * Interface (GSI).  It's role is to facilitate establishment of connections
2795 * between nodes as well as other management related tasks for established
2796 * connections.
2797 *
2798 * Return: true if the port supports an IB CM (this does not guarantee that
2799 * a CM is actually running however).
2800 */
2801static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2802{
2803	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2804}
2805
2806/**
2807 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2808 * Communication Manager.
2809 * @device: Device to check
2810 * @port_num: Port number to check
2811 *
2812 * Similar to above, but specific to iWARP connections which have a different
2813 * managment protocol than InfiniBand.
2814 *
2815 * Return: true if the port supports an iWARP CM (this does not guarantee that
2816 * a CM is actually running however).
2817 */
2818static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2819{
2820	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2821}
2822
2823/**
2824 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2825 * Subnet Administration.
2826 * @device: Device to check
2827 * @port_num: Port number to check
2828 *
2829 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2830 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2831 * fabrics, devices should resolve routes to other hosts by contacting the
2832 * SA to query the proper route.
2833 *
2834 * Return: true if the port should act as a client to the fabric Subnet
2835 * Administration interface.  This does not imply that the SA service is
2836 * running locally.
2837 */
2838static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2839{
2840	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2841}
2842
2843/**
2844 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2845 * Multicast.
2846 * @device: Device to check
2847 * @port_num: Port number to check
2848 *
2849 * InfiniBand multicast registration is more complex than normal IPv4 or
2850 * IPv6 multicast registration.  Each Host Channel Adapter must register
2851 * with the Subnet Manager when it wishes to join a multicast group.  It
2852 * should do so only once regardless of how many queue pairs it subscribes
2853 * to this group.  And it should leave the group only after all queue pairs
2854 * attached to the group have been detached.
2855 *
2856 * Return: true if the port must undertake the additional adminstrative
2857 * overhead of registering/unregistering with the SM and tracking of the
2858 * total number of queue pairs attached to the multicast group.
2859 */
2860static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2861{
2862	return rdma_cap_ib_sa(device, port_num);
2863}
2864
2865/**
2866 * rdma_cap_af_ib - Check if the port of device has the capability
2867 * Native Infiniband Address.
2868 * @device: Device to check
2869 * @port_num: Port number to check
2870 *
2871 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2872 * GID.  RoCE uses a different mechanism, but still generates a GID via
2873 * a prescribed mechanism and port specific data.
2874 *
2875 * Return: true if the port uses a GID address to identify devices on the
2876 * network.
2877 */
2878static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2879{
2880	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2881}
2882
2883/**
2884 * rdma_cap_eth_ah - Check if the port of device has the capability
2885 * Ethernet Address Handle.
2886 * @device: Device to check
2887 * @port_num: Port number to check
2888 *
2889 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2890 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2891 * port.  Normally, packet headers are generated by the sending host
2892 * adapter, but when sending connectionless datagrams, we must manually
2893 * inject the proper headers for the fabric we are communicating over.
2894 *
2895 * Return: true if we are running as a RoCE port and must force the
2896 * addition of a Global Route Header built from our Ethernet Address
2897 * Handle into our header list for connectionless packets.
2898 */
2899static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2900{
2901	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2902}
2903
2904/**
2905 * rdma_cap_opa_ah - Check if the port of device supports
2906 * OPA Address handles
2907 * @device: Device to check
2908 * @port_num: Port number to check
2909 *
2910 * Return: true if we are running on an OPA device which supports
2911 * the extended OPA addressing.
2912 */
2913static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2914{
2915	return (device->port_immutable[port_num].core_cap_flags &
2916		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2917}
2918
2919/**
2920 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2921 *
2922 * @device: Device
2923 * @port_num: Port number
2924 *
2925 * This MAD size includes the MAD headers and MAD payload.  No other headers
2926 * are included.
2927 *
2928 * Return the max MAD size required by the Port.  Will return 0 if the port
2929 * does not support MADs
2930 */
2931static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2932{
2933	return device->port_immutable[port_num].max_mad_size;
2934}
2935
2936/**
2937 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2938 * @device: Device to check
2939 * @port_num: Port number to check
2940 *
2941 * RoCE GID table mechanism manages the various GIDs for a device.
2942 *
2943 * NOTE: if allocating the port's GID table has failed, this call will still
2944 * return true, but any RoCE GID table API will fail.
2945 *
2946 * Return: true if the port uses RoCE GID table mechanism in order to manage
2947 * its GIDs.
2948 */
2949static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2950					   u8 port_num)
2951{
2952	return rdma_protocol_roce(device, port_num) &&
2953		device->add_gid && device->del_gid;
2954}
2955
2956/*
2957 * Check if the device supports READ W/ INVALIDATE.
2958 */
2959static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2960{
2961	/*
2962	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2963	 * has support for it yet.
2964	 */
2965	return rdma_protocol_iwarp(dev, port_num);
2966}
2967
2968int ib_query_gid(struct ib_device *device,
2969		 u8 port_num, int index, union ib_gid *gid,
2970		 struct ib_gid_attr *attr);
2971
2972int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2973			 int state);
2974int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2975		     struct ifla_vf_info *info);
2976int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2977		    struct ifla_vf_stats *stats);
2978int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2979		   int type);
2980
2981int ib_query_pkey(struct ib_device *device,
2982		  u8 port_num, u16 index, u16 *pkey);
2983
2984int ib_modify_device(struct ib_device *device,
2985		     int device_modify_mask,
2986		     struct ib_device_modify *device_modify);
2987
2988int ib_modify_port(struct ib_device *device,
2989		   u8 port_num, int port_modify_mask,
2990		   struct ib_port_modify *port_modify);
2991
2992int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2993		u8 *port_num, u16 *index);
2994
2995int ib_find_pkey(struct ib_device *device,
2996		 u8 port_num, u16 pkey, u16 *index);
2997
2998enum ib_pd_flags {
2999	/*
3000	 * Create a memory registration for all memory in the system and place
3001	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3002	 * ULPs to avoid the overhead of dynamic MRs.
3003	 *
3004	 * This flag is generally considered unsafe and must only be used in
3005	 * extremly trusted environments.  Every use of it will log a warning
3006	 * in the kernel log.
3007	 */
3008	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3009};
3010
3011struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3012		const char *caller);
3013#define ib_alloc_pd(device, flags) \
3014	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3015void ib_dealloc_pd(struct ib_pd *pd);
3016
3017/**
3018 * rdma_create_ah - Creates an address handle for the given address vector.
3019 * @pd: The protection domain associated with the address handle.
3020 * @ah_attr: The attributes of the address vector.
3021 *
3022 * The address handle is used to reference a local or global destination
3023 * in all UD QP post sends.
3024 */
3025struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
3026
3027/**
3028 * rdma_create_user_ah - Creates an address handle for the given address vector.
3029 * It resolves destination mac address for ah attribute of RoCE type.
3030 * @pd: The protection domain associated with the address handle.
3031 * @ah_attr: The attributes of the address vector.
3032 * @udata: pointer to user's input output buffer information need by
3033 *         provider driver.
3034 *
3035 * It returns 0 on success and returns appropriate error code on error.
3036 * The address handle is used to reference a local or global destination
3037 * in all UD QP post sends.
3038 */
3039struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3040				  struct rdma_ah_attr *ah_attr,
3041				  struct ib_udata *udata);
3042/**
3043 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3044 *   work completion.
3045 * @hdr: the L3 header to parse
3046 * @net_type: type of header to parse
3047 * @sgid: place to store source gid
3048 * @dgid: place to store destination gid
3049 */
3050int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3051			      enum rdma_network_type net_type,
3052			      union ib_gid *sgid, union ib_gid *dgid);
3053
3054/**
3055 * ib_get_rdma_header_version - Get the header version
3056 * @hdr: the L3 header to parse
3057 */
3058int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3059
3060/**
3061 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3062 *   work completion.
3063 * @device: Device on which the received message arrived.
3064 * @port_num: Port on which the received message arrived.
3065 * @wc: Work completion associated with the received message.
3066 * @grh: References the received global route header.  This parameter is
3067 *   ignored unless the work completion indicates that the GRH is valid.
3068 * @ah_attr: Returned attributes that can be used when creating an address
3069 *   handle for replying to the message.
3070 */
3071int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3072			    const struct ib_wc *wc, const struct ib_grh *grh,
3073			    struct rdma_ah_attr *ah_attr);
3074
3075/**
3076 * ib_create_ah_from_wc - Creates an address handle associated with the
3077 *   sender of the specified work completion.
3078 * @pd: The protection domain associated with the address handle.
3079 * @wc: Work completion information associated with a received message.
3080 * @grh: References the received global route header.  This parameter is
3081 *   ignored unless the work completion indicates that the GRH is valid.
3082 * @port_num: The outbound port number to associate with the address.
3083 *
3084 * The address handle is used to reference a local or global destination
3085 * in all UD QP post sends.
3086 */
3087struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3088				   const struct ib_grh *grh, u8 port_num);
3089
3090/**
3091 * rdma_modify_ah - Modifies the address vector associated with an address
3092 *   handle.
3093 * @ah: The address handle to modify.
3094 * @ah_attr: The new address vector attributes to associate with the
3095 *   address handle.
3096 */
3097int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3098
3099/**
3100 * rdma_query_ah - Queries the address vector associated with an address
3101 *   handle.
3102 * @ah: The address handle to query.
3103 * @ah_attr: The address vector attributes associated with the address
3104 *   handle.
3105 */
3106int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3107
3108/**
3109 * rdma_destroy_ah - Destroys an address handle.
3110 * @ah: The address handle to destroy.
3111 */
3112int rdma_destroy_ah(struct ib_ah *ah);
3113
3114/**
3115 * ib_create_srq - Creates a SRQ associated with the specified protection
3116 *   domain.
3117 * @pd: The protection domain associated with the SRQ.
3118 * @srq_init_attr: A list of initial attributes required to create the
3119 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
3120 *   the actual capabilities of the created SRQ.
3121 *
3122 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3123 * requested size of the SRQ, and set to the actual values allocated
3124 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
3125 * will always be at least as large as the requested values.
3126 */
3127struct ib_srq *ib_create_srq(struct ib_pd *pd,
3128			     struct ib_srq_init_attr *srq_init_attr);
3129
3130/**
3131 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3132 * @srq: The SRQ to modify.
3133 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3134 *   the current values of selected SRQ attributes are returned.
3135 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3136 *   are being modified.
3137 *
3138 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3139 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3140 * the number of receives queued drops below the limit.
3141 */
3142int ib_modify_srq(struct ib_srq *srq,
3143		  struct ib_srq_attr *srq_attr,
3144		  enum ib_srq_attr_mask srq_attr_mask);
3145
3146/**
3147 * ib_query_srq - Returns the attribute list and current values for the
3148 *   specified SRQ.
3149 * @srq: The SRQ to query.
3150 * @srq_attr: The attributes of the specified SRQ.
3151 */
3152int ib_query_srq(struct ib_srq *srq,
3153		 struct ib_srq_attr *srq_attr);
3154
3155/**
3156 * ib_destroy_srq - Destroys the specified SRQ.
3157 * @srq: The SRQ to destroy.
3158 */
3159int ib_destroy_srq(struct ib_srq *srq);
3160
3161/**
3162 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3163 * @srq: The SRQ to post the work request on.
3164 * @recv_wr: A list of work requests to post on the receive queue.
3165 * @bad_recv_wr: On an immediate failure, this parameter will reference
3166 *   the work request that failed to be posted on the QP.
3167 */
3168static inline int ib_post_srq_recv(struct ib_srq *srq,
3169				   struct ib_recv_wr *recv_wr,
3170				   struct ib_recv_wr **bad_recv_wr)
3171{
3172	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3173}
3174
3175/**
3176 * ib_create_qp - Creates a QP associated with the specified protection
3177 *   domain.
3178 * @pd: The protection domain associated with the QP.
3179 * @qp_init_attr: A list of initial attributes required to create the
3180 *   QP.  If QP creation succeeds, then the attributes are updated to
3181 *   the actual capabilities of the created QP.
3182 */
3183struct ib_qp *ib_create_qp(struct ib_pd *pd,
3184			   struct ib_qp_init_attr *qp_init_attr);
3185
3186/**
3187 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3188 * @qp: The QP to modify.
3189 * @attr: On input, specifies the QP attributes to modify.  On output,
3190 *   the current values of selected QP attributes are returned.
3191 * @attr_mask: A bit-mask used to specify which attributes of the QP
3192 *   are being modified.
3193 * @udata: pointer to user's input output buffer information
3194 *   are being modified.
3195 * It returns 0 on success and returns appropriate error code on error.
3196 */
3197int ib_modify_qp_with_udata(struct ib_qp *qp,
3198			    struct ib_qp_attr *attr,
3199			    int attr_mask,
3200			    struct ib_udata *udata);
3201
3202/**
3203 * ib_modify_qp - Modifies the attributes for the specified QP and then
3204 *   transitions the QP to the given state.
3205 * @qp: The QP to modify.
3206 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3207 *   the current values of selected QP attributes are returned.
3208 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3209 *   are being modified.
3210 */
3211int ib_modify_qp(struct ib_qp *qp,
3212		 struct ib_qp_attr *qp_attr,
3213		 int qp_attr_mask);
3214
3215/**
3216 * ib_query_qp - Returns the attribute list and current values for the
3217 *   specified QP.
3218 * @qp: The QP to query.
3219 * @qp_attr: The attributes of the specified QP.
3220 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3221 * @qp_init_attr: Additional attributes of the selected QP.
3222 *
3223 * The qp_attr_mask may be used to limit the query to gathering only the
3224 * selected attributes.
3225 */
3226int ib_query_qp(struct ib_qp *qp,
3227		struct ib_qp_attr *qp_attr,
3228		int qp_attr_mask,
3229		struct ib_qp_init_attr *qp_init_attr);
3230
3231/**
3232 * ib_destroy_qp - Destroys the specified QP.
3233 * @qp: The QP to destroy.
3234 */
3235int ib_destroy_qp(struct ib_qp *qp);
3236
3237/**
3238 * ib_open_qp - Obtain a reference to an existing sharable QP.
3239 * @xrcd - XRC domain
3240 * @qp_open_attr: Attributes identifying the QP to open.
3241 *
3242 * Returns a reference to a sharable QP.
3243 */
3244struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3245			 struct ib_qp_open_attr *qp_open_attr);
3246
3247/**
3248 * ib_close_qp - Release an external reference to a QP.
3249 * @qp: The QP handle to release
3250 *
3251 * The opened QP handle is released by the caller.  The underlying
3252 * shared QP is not destroyed until all internal references are released.
3253 */
3254int ib_close_qp(struct ib_qp *qp);
3255
3256/**
3257 * ib_post_send - Posts a list of work requests to the send queue of
3258 *   the specified QP.
3259 * @qp: The QP to post the work request on.
3260 * @send_wr: A list of work requests to post on the send queue.
3261 * @bad_send_wr: On an immediate failure, this parameter will reference
3262 *   the work request that failed to be posted on the QP.
3263 *
3264 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3265 * error is returned, the QP state shall not be affected,
3266 * ib_post_send() will return an immediate error after queueing any
3267 * earlier work requests in the list.
3268 */
3269static inline int ib_post_send(struct ib_qp *qp,
3270			       struct ib_send_wr *send_wr,
3271			       struct ib_send_wr **bad_send_wr)
3272{
3273	return qp->device->post_send(qp, send_wr, bad_send_wr);
3274}
3275
3276/**
3277 * ib_post_recv - Posts a list of work requests to the receive queue of
3278 *   the specified QP.
3279 * @qp: The QP to post the work request on.
3280 * @recv_wr: A list of work requests to post on the receive queue.
3281 * @bad_recv_wr: On an immediate failure, this parameter will reference
3282 *   the work request that failed to be posted on the QP.
3283 */
3284static inline int ib_post_recv(struct ib_qp *qp,
3285			       struct ib_recv_wr *recv_wr,
3286			       struct ib_recv_wr **bad_recv_wr)
3287{
3288	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3289}
3290
3291struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3292			    int nr_cqe, int comp_vector,
3293			    enum ib_poll_context poll_ctx, const char *caller);
3294#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3295	__ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3296
3297void ib_free_cq(struct ib_cq *cq);
3298int ib_process_cq_direct(struct ib_cq *cq, int budget);
3299
3300/**
3301 * ib_create_cq - Creates a CQ on the specified device.
3302 * @device: The device on which to create the CQ.
3303 * @comp_handler: A user-specified callback that is invoked when a
3304 *   completion event occurs on the CQ.
3305 * @event_handler: A user-specified callback that is invoked when an
3306 *   asynchronous event not associated with a completion occurs on the CQ.
3307 * @cq_context: Context associated with the CQ returned to the user via
3308 *   the associated completion and event handlers.
3309 * @cq_attr: The attributes the CQ should be created upon.
 
 
3310 *
3311 * Users can examine the cq structure to determine the actual CQ size.
3312 */
3313struct ib_cq *ib_create_cq(struct ib_device *device,
3314			   ib_comp_handler comp_handler,
3315			   void (*event_handler)(struct ib_event *, void *),
3316			   void *cq_context,
3317			   const struct ib_cq_init_attr *cq_attr);
3318
3319/**
3320 * ib_resize_cq - Modifies the capacity of the CQ.
3321 * @cq: The CQ to resize.
3322 * @cqe: The minimum size of the CQ.
3323 *
3324 * Users can examine the cq structure to determine the actual CQ size.
3325 */
3326int ib_resize_cq(struct ib_cq *cq, int cqe);
3327
3328/**
3329 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3330 * @cq: The CQ to modify.
3331 * @cq_count: number of CQEs that will trigger an event
3332 * @cq_period: max period of time in usec before triggering an event
3333 *
3334 */
3335int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3336
3337/**
3338 * ib_destroy_cq - Destroys the specified CQ.
3339 * @cq: The CQ to destroy.
3340 */
3341int ib_destroy_cq(struct ib_cq *cq);
3342
3343/**
3344 * ib_poll_cq - poll a CQ for completion(s)
3345 * @cq:the CQ being polled
3346 * @num_entries:maximum number of completions to return
3347 * @wc:array of at least @num_entries &struct ib_wc where completions
3348 *   will be returned
3349 *
3350 * Poll a CQ for (possibly multiple) completions.  If the return value
3351 * is < 0, an error occurred.  If the return value is >= 0, it is the
3352 * number of completions returned.  If the return value is
3353 * non-negative and < num_entries, then the CQ was emptied.
3354 */
3355static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3356			     struct ib_wc *wc)
3357{
3358	return cq->device->poll_cq(cq, num_entries, wc);
3359}
3360
3361/**
 
 
 
 
 
 
 
 
 
 
 
 
3362 * ib_req_notify_cq - Request completion notification on a CQ.
3363 * @cq: The CQ to generate an event for.
3364 * @flags:
3365 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3366 *   to request an event on the next solicited event or next work
3367 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3368 *   may also be |ed in to request a hint about missed events, as
3369 *   described below.
3370 *
3371 * Return Value:
3372 *    < 0 means an error occurred while requesting notification
3373 *   == 0 means notification was requested successfully, and if
3374 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3375 *        were missed and it is safe to wait for another event.  In
3376 *        this case is it guaranteed that any work completions added
3377 *        to the CQ since the last CQ poll will trigger a completion
3378 *        notification event.
3379 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3380 *        in.  It means that the consumer must poll the CQ again to
3381 *        make sure it is empty to avoid missing an event because of a
3382 *        race between requesting notification and an entry being
3383 *        added to the CQ.  This return value means it is possible
3384 *        (but not guaranteed) that a work completion has been added
3385 *        to the CQ since the last poll without triggering a
3386 *        completion notification event.
3387 */
3388static inline int ib_req_notify_cq(struct ib_cq *cq,
3389				   enum ib_cq_notify_flags flags)
3390{
3391	return cq->device->req_notify_cq(cq, flags);
3392}
3393
3394/**
3395 * ib_req_ncomp_notif - Request completion notification when there are
3396 *   at least the specified number of unreaped completions on the CQ.
3397 * @cq: The CQ to generate an event for.
3398 * @wc_cnt: The number of unreaped completions that should be on the
3399 *   CQ before an event is generated.
3400 */
3401static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3402{
3403	return cq->device->req_ncomp_notif ?
3404		cq->device->req_ncomp_notif(cq, wc_cnt) :
3405		-ENOSYS;
3406}
3407
3408/**
 
 
 
 
 
 
 
 
 
 
 
 
3409 * ib_dma_mapping_error - check a DMA addr for error
3410 * @dev: The device for which the dma_addr was created
3411 * @dma_addr: The DMA address to check
3412 */
3413static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3414{
 
 
3415	return dma_mapping_error(dev->dma_device, dma_addr);
3416}
3417
3418/**
3419 * ib_dma_map_single - Map a kernel virtual address to DMA address
3420 * @dev: The device for which the dma_addr is to be created
3421 * @cpu_addr: The kernel virtual address
3422 * @size: The size of the region in bytes
3423 * @direction: The direction of the DMA
3424 */
3425static inline u64 ib_dma_map_single(struct ib_device *dev,
3426				    void *cpu_addr, size_t size,
3427				    enum dma_data_direction direction)
3428{
 
 
3429	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3430}
3431
3432/**
3433 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3434 * @dev: The device for which the DMA address was created
3435 * @addr: The DMA address
3436 * @size: The size of the region in bytes
3437 * @direction: The direction of the DMA
3438 */
3439static inline void ib_dma_unmap_single(struct ib_device *dev,
3440				       u64 addr, size_t size,
3441				       enum dma_data_direction direction)
3442{
3443	dma_unmap_single(dev->dma_device, addr, size, direction);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3444}
3445
3446/**
3447 * ib_dma_map_page - Map a physical page to DMA address
3448 * @dev: The device for which the dma_addr is to be created
3449 * @page: The page to be mapped
3450 * @offset: The offset within the page
3451 * @size: The size of the region in bytes
3452 * @direction: The direction of the DMA
3453 */
3454static inline u64 ib_dma_map_page(struct ib_device *dev,
3455				  struct page *page,
3456				  unsigned long offset,
3457				  size_t size,
3458					 enum dma_data_direction direction)
3459{
 
 
3460	return dma_map_page(dev->dma_device, page, offset, size, direction);
3461}
3462
3463/**
3464 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3465 * @dev: The device for which the DMA address was created
3466 * @addr: The DMA address
3467 * @size: The size of the region in bytes
3468 * @direction: The direction of the DMA
3469 */
3470static inline void ib_dma_unmap_page(struct ib_device *dev,
3471				     u64 addr, size_t size,
3472				     enum dma_data_direction direction)
3473{
3474	dma_unmap_page(dev->dma_device, addr, size, direction);
 
 
 
3475}
3476
3477/**
3478 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3479 * @dev: The device for which the DMA addresses are to be created
3480 * @sg: The array of scatter/gather entries
3481 * @nents: The number of scatter/gather entries
3482 * @direction: The direction of the DMA
3483 */
3484static inline int ib_dma_map_sg(struct ib_device *dev,
3485				struct scatterlist *sg, int nents,
3486				enum dma_data_direction direction)
3487{
 
 
3488	return dma_map_sg(dev->dma_device, sg, nents, direction);
3489}
3490
3491/**
3492 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3493 * @dev: The device for which the DMA addresses were created
3494 * @sg: The array of scatter/gather entries
3495 * @nents: The number of scatter/gather entries
3496 * @direction: The direction of the DMA
3497 */
3498static inline void ib_dma_unmap_sg(struct ib_device *dev,
3499				   struct scatterlist *sg, int nents,
3500				   enum dma_data_direction direction)
3501{
3502	dma_unmap_sg(dev->dma_device, sg, nents, direction);
 
 
 
3503}
3504
3505static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3506				      struct scatterlist *sg, int nents,
3507				      enum dma_data_direction direction,
3508				      unsigned long dma_attrs)
3509{
3510	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3511				dma_attrs);
3512}
3513
3514static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3515					 struct scatterlist *sg, int nents,
3516					 enum dma_data_direction direction,
3517					 unsigned long dma_attrs)
3518{
3519	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3520}
3521/**
3522 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3523 * @dev: The device for which the DMA addresses were created
3524 * @sg: The scatter/gather entry
3525 *
3526 * Note: this function is obsolete. To do: change all occurrences of
3527 * ib_sg_dma_address() into sg_dma_address().
3528 */
3529static inline u64 ib_sg_dma_address(struct ib_device *dev,
3530				    struct scatterlist *sg)
3531{
 
 
3532	return sg_dma_address(sg);
3533}
3534
3535/**
3536 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3537 * @dev: The device for which the DMA addresses were created
3538 * @sg: The scatter/gather entry
3539 *
3540 * Note: this function is obsolete. To do: change all occurrences of
3541 * ib_sg_dma_len() into sg_dma_len().
3542 */
3543static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3544					 struct scatterlist *sg)
3545{
 
 
3546	return sg_dma_len(sg);
3547}
3548
3549/**
3550 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3551 * @dev: The device for which the DMA address was created
3552 * @addr: The DMA address
3553 * @size: The size of the region in bytes
3554 * @dir: The direction of the DMA
3555 */
3556static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3557					      u64 addr,
3558					      size_t size,
3559					      enum dma_data_direction dir)
3560{
3561	dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
 
 
 
3562}
3563
3564/**
3565 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3566 * @dev: The device for which the DMA address was created
3567 * @addr: The DMA address
3568 * @size: The size of the region in bytes
3569 * @dir: The direction of the DMA
3570 */
3571static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3572						 u64 addr,
3573						 size_t size,
3574						 enum dma_data_direction dir)
3575{
3576	dma_sync_single_for_device(dev->dma_device, addr, size, dir);
 
 
 
3577}
3578
3579/**
3580 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3581 * @dev: The device for which the DMA address is requested
3582 * @size: The size of the region to allocate in bytes
3583 * @dma_handle: A pointer for returning the DMA address of the region
3584 * @flag: memory allocator flags
3585 */
3586static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3587					   size_t size,
3588					   dma_addr_t *dma_handle,
3589					   gfp_t flag)
3590{
3591	return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
 
 
 
 
 
 
 
 
 
3592}
3593
3594/**
3595 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3596 * @dev: The device for which the DMA addresses were allocated
3597 * @size: The size of the region
3598 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3599 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3600 */
3601static inline void ib_dma_free_coherent(struct ib_device *dev,
3602					size_t size, void *cpu_addr,
3603					dma_addr_t dma_handle)
3604{
3605	dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
 
 
 
3606}
3607
3608/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3609 * ib_dereg_mr - Deregisters a memory region and removes it from the
3610 *   HCA translation table.
3611 * @mr: The memory region to deregister.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612 *
3613 * This function can fail, if the memory region has memory windows bound to it.
 
 
 
 
3614 */
3615int ib_dereg_mr(struct ib_mr *mr);
 
3616
3617struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3618			  enum ib_mr_type mr_type,
3619			  u32 max_num_sg);
 
 
 
3620
3621/**
3622 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3623 *   R_Key and L_Key.
3624 * @mr - struct ib_mr pointer to be updated.
3625 * @newkey - new key to be used.
3626 */
3627static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3628{
3629	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3630	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3631}
3632
3633/**
3634 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3635 * for calculating a new rkey for type 2 memory windows.
3636 * @rkey - the rkey to increment.
3637 */
3638static inline u32 ib_inc_rkey(u32 rkey)
3639{
3640	const u32 mask = 0x000000ff;
3641	return ((rkey + 1) & mask) | (rkey & ~mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3642}
3643
3644/**
 
 
 
 
 
 
3645 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3646 * @pd: The protection domain associated with the unmapped region.
3647 * @mr_access_flags: Specifies the memory access rights.
3648 * @fmr_attr: Attributes of the unmapped region.
3649 *
3650 * A fast memory region must be mapped before it can be used as part of
3651 * a work request.
3652 */
3653struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3654			    int mr_access_flags,
3655			    struct ib_fmr_attr *fmr_attr);
3656
3657/**
3658 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3659 * @fmr: The fast memory region to associate with the pages.
3660 * @page_list: An array of physical pages to map to the fast memory region.
3661 * @list_len: The number of pages in page_list.
3662 * @iova: The I/O virtual address to use with the mapped region.
3663 */
3664static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3665				  u64 *page_list, int list_len,
3666				  u64 iova)
3667{
3668	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3669}
3670
3671/**
3672 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3673 * @fmr_list: A linked list of fast memory regions to unmap.
3674 */
3675int ib_unmap_fmr(struct list_head *fmr_list);
3676
3677/**
3678 * ib_dealloc_fmr - Deallocates a fast memory region.
3679 * @fmr: The fast memory region to deallocate.
3680 */
3681int ib_dealloc_fmr(struct ib_fmr *fmr);
3682
3683/**
3684 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3685 * @qp: QP to attach to the multicast group.  The QP must be type
3686 *   IB_QPT_UD.
3687 * @gid: Multicast group GID.
3688 * @lid: Multicast group LID in host byte order.
3689 *
3690 * In order to send and receive multicast packets, subnet
3691 * administration must have created the multicast group and configured
3692 * the fabric appropriately.  The port associated with the specified
3693 * QP must also be a member of the multicast group.
3694 */
3695int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3696
3697/**
3698 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3699 * @qp: QP to detach from the multicast group.
3700 * @gid: Multicast group GID.
3701 * @lid: Multicast group LID in host byte order.
3702 */
3703int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3704
3705/**
3706 * ib_alloc_xrcd - Allocates an XRC domain.
3707 * @device: The device on which to allocate the XRC domain.
3708 * @caller: Module name for kernel consumers
3709 */
3710struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3711#define ib_alloc_xrcd(device) \
3712	__ib_alloc_xrcd((device), KBUILD_MODNAME)
3713
3714/**
3715 * ib_dealloc_xrcd - Deallocates an XRC domain.
3716 * @xrcd: The XRC domain to deallocate.
3717 */
3718int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3719
3720struct ib_flow *ib_create_flow(struct ib_qp *qp,
3721			       struct ib_flow_attr *flow_attr, int domain);
3722int ib_destroy_flow(struct ib_flow *flow_id);
3723
3724static inline int ib_check_mr_access(int flags)
3725{
3726	/*
3727	 * Local write permission is required if remote write or
3728	 * remote atomic permission is also requested.
3729	 */
3730	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3731	    !(flags & IB_ACCESS_LOCAL_WRITE))
3732		return -EINVAL;
3733
3734	return 0;
3735}
3736
3737/**
3738 * ib_check_mr_status: lightweight check of MR status.
3739 *     This routine may provide status checks on a selected
3740 *     ib_mr. first use is for signature status check.
3741 *
3742 * @mr: A memory region.
3743 * @check_mask: Bitmask of which checks to perform from
3744 *     ib_mr_status_check enumeration.
3745 * @mr_status: The container of relevant status checks.
3746 *     failed checks will be indicated in the status bitmask
3747 *     and the relevant info shall be in the error item.
3748 */
3749int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3750		       struct ib_mr_status *mr_status);
3751
3752struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3753					    u16 pkey, const union ib_gid *gid,
3754					    const struct sockaddr *addr);
3755struct ib_wq *ib_create_wq(struct ib_pd *pd,
3756			   struct ib_wq_init_attr *init_attr);
3757int ib_destroy_wq(struct ib_wq *wq);
3758int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3759		 u32 wq_attr_mask);
3760struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3761						 struct ib_rwq_ind_table_init_attr*
3762						 wq_ind_table_init_attr);
3763int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3764
3765int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3766		 unsigned int *sg_offset, unsigned int page_size);
3767
3768static inline int
3769ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3770		  unsigned int *sg_offset, unsigned int page_size)
3771{
3772	int n;
3773
3774	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3775	mr->iova = 0;
3776
3777	return n;
3778}
3779
3780int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3781		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3782
3783void ib_drain_rq(struct ib_qp *qp);
3784void ib_drain_sq(struct ib_qp *qp);
3785void ib_drain_qp(struct ib_qp *qp);
3786
3787int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3788
3789static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3790{
3791	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3792		return attr->roce.dmac;
3793	return NULL;
3794}
3795
3796static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3797{
3798	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3799		attr->ib.dlid = (u16)dlid;
3800	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3801		attr->opa.dlid = dlid;
3802}
3803
3804static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3805{
3806	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3807		return attr->ib.dlid;
3808	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3809		return attr->opa.dlid;
3810	return 0;
3811}
3812
3813static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3814{
3815	attr->sl = sl;
3816}
3817
3818static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3819{
3820	return attr->sl;
3821}
3822
3823static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3824					 u8 src_path_bits)
3825{
3826	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3827		attr->ib.src_path_bits = src_path_bits;
3828	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3829		attr->opa.src_path_bits = src_path_bits;
3830}
3831
3832static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3833{
3834	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3835		return attr->ib.src_path_bits;
3836	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3837		return attr->opa.src_path_bits;
3838	return 0;
3839}
3840
3841static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3842					bool make_grd)
3843{
3844	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3845		attr->opa.make_grd = make_grd;
3846}
3847
3848static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3849{
3850	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3851		return attr->opa.make_grd;
3852	return false;
3853}
3854
3855static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3856{
3857	attr->port_num = port_num;
3858}
3859
3860static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3861{
3862	return attr->port_num;
3863}
3864
3865static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3866					   u8 static_rate)
3867{
3868	attr->static_rate = static_rate;
3869}
3870
3871static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3872{
3873	return attr->static_rate;
3874}
3875
3876static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3877					enum ib_ah_flags flag)
3878{
3879	attr->ah_flags = flag;
3880}
3881
3882static inline enum ib_ah_flags
3883		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3884{
3885	return attr->ah_flags;
3886}
3887
3888static inline const struct ib_global_route
3889		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3890{
3891	return &attr->grh;
3892}
3893
3894/*To retrieve and modify the grh */
3895static inline struct ib_global_route
3896		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3897{
3898	return &attr->grh;
3899}
3900
3901static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3902{
3903	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3904
3905	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3906}
3907
3908static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3909					     __be64 prefix)
3910{
3911	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3912
3913	grh->dgid.global.subnet_prefix = prefix;
3914}
3915
3916static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3917					    __be64 if_id)
3918{
3919	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3920
3921	grh->dgid.global.interface_id = if_id;
3922}
3923
3924static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3925				   union ib_gid *dgid, u32 flow_label,
3926				   u8 sgid_index, u8 hop_limit,
3927				   u8 traffic_class)
3928{
3929	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3930
3931	attr->ah_flags = IB_AH_GRH;
3932	if (dgid)
3933		grh->dgid = *dgid;
3934	grh->flow_label = flow_label;
3935	grh->sgid_index = sgid_index;
3936	grh->hop_limit = hop_limit;
3937	grh->traffic_class = traffic_class;
3938}
3939
3940/**
3941 * rdma_ah_find_type - Return address handle type.
3942 *
3943 * @dev: Device to be checked
3944 * @port_num: Port number
3945 */
3946static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3947						       u8 port_num)
3948{
3949	if (rdma_protocol_roce(dev, port_num))
3950		return RDMA_AH_ATTR_TYPE_ROCE;
3951	if (rdma_protocol_ib(dev, port_num)) {
3952		if (rdma_cap_opa_ah(dev, port_num))
3953			return RDMA_AH_ATTR_TYPE_OPA;
3954		return RDMA_AH_ATTR_TYPE_IB;
3955	}
3956
3957	return RDMA_AH_ATTR_TYPE_UNDEFINED;
3958}
3959
3960/**
3961 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
3962 *     In the current implementation the only way to get
3963 *     get the 32bit lid is from other sources for OPA.
3964 *     For IB, lids will always be 16bits so cast the
3965 *     value accordingly.
3966 *
3967 * @lid: A 32bit LID
3968 */
3969static inline u16 ib_lid_cpu16(u32 lid)
3970{
3971	WARN_ON_ONCE(lid & 0xFFFF0000);
3972	return (u16)lid;
3973}
3974
3975/**
3976 * ib_lid_be16 - Return lid in 16bit BE encoding.
3977 *
3978 * @lid: A 32bit LID
3979 */
3980static inline __be16 ib_lid_be16(u32 lid)
3981{
3982	WARN_ON_ONCE(lid & 0xFFFF0000);
3983	return cpu_to_be16((u16)lid);
3984}
3985
3986/**
3987 * ib_get_vector_affinity - Get the affinity mappings of a given completion
3988 *   vector
3989 * @device:         the rdma device
3990 * @comp_vector:    index of completion vector
3991 *
3992 * Returns NULL on failure, otherwise a corresponding cpu map of the
3993 * completion vector (returns all-cpus map if the device driver doesn't
3994 * implement get_vector_affinity).
3995 */
3996static inline const struct cpumask *
3997ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3998{
3999	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4000	    !device->get_vector_affinity)
4001		return NULL;
4002
4003	return device->get_vector_affinity(device, comp_vector);
4004
4005}
4006
4007/**
4008 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4009 * and add their gids, as needed, to the relevant RoCE devices.
4010 *
4011 * @device:         the rdma device
4012 */
4013void rdma_roce_rescan_device(struct ib_device *ibdev);
4014
4015#endif /* IB_VERBS_H */