Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
 
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51
 
 
 
 
 
 
 
 
  52#include <linux/atomic.h>
  53#include <asm/uaccess.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54
  55extern struct workqueue_struct *ib_wq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56
  57union ib_gid {
  58	u8	raw[16];
  59	struct {
  60		__be64	subnet_prefix;
  61		__be64	interface_id;
  62	} global;
  63};
  64
  65enum rdma_node_type {
  66	/* IB values map to NodeInfo:NodeType. */
  67	RDMA_NODE_IB_CA 	= 1,
  68	RDMA_NODE_IB_SWITCH,
  69	RDMA_NODE_IB_ROUTER,
  70	RDMA_NODE_RNIC
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71};
  72
  73enum rdma_transport_type {
  74	RDMA_TRANSPORT_IB,
  75	RDMA_TRANSPORT_IWARP
 
 
 
 
 
 
 
 
 
 
  76};
  77
  78enum rdma_transport_type
  79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  80
  81enum rdma_link_layer {
  82	IB_LINK_LAYER_UNSPECIFIED,
  83	IB_LINK_LAYER_INFINIBAND,
  84	IB_LINK_LAYER_ETHERNET,
  85};
  86
  87enum ib_device_cap_flags {
  88	IB_DEVICE_RESIZE_MAX_WR		= 1,
  89	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
  90	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
  91	IB_DEVICE_RAW_MULTI		= (1<<3),
  92	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
  93	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
  94	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
  95	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
  96	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
  97	IB_DEVICE_INIT_TYPE		= (1<<9),
  98	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
  99	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
 100	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
 101	IB_DEVICE_SRQ_RESIZE		= (1<<13),
 102	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
 103	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
 104	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
 105	IB_DEVICE_MEM_WINDOW		= (1<<17),
 106	/*
 107	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 108	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
 109	 * messages and can verify the validity of checksum for
 110	 * incoming messages.  Setting this flag implies that the
 111	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 112	 */
 113	IB_DEVICE_UD_IP_CSUM		= (1<<18),
 114	IB_DEVICE_UD_TSO		= (1<<19),
 115	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
 116	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117};
 118
 119enum ib_atomic_cap {
 120	IB_ATOMIC_NONE,
 121	IB_ATOMIC_HCA,
 122	IB_ATOMIC_GLOB
 123};
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125struct ib_device_attr {
 126	u64			fw_ver;
 127	__be64			sys_image_guid;
 128	u64			max_mr_size;
 129	u64			page_size_cap;
 130	u32			vendor_id;
 131	u32			vendor_part_id;
 132	u32			hw_ver;
 133	int			max_qp;
 134	int			max_qp_wr;
 135	int			device_cap_flags;
 136	int			max_sge;
 
 
 137	int			max_sge_rd;
 138	int			max_cq;
 139	int			max_cqe;
 140	int			max_mr;
 141	int			max_pd;
 142	int			max_qp_rd_atom;
 143	int			max_ee_rd_atom;
 144	int			max_res_rd_atom;
 145	int			max_qp_init_rd_atom;
 146	int			max_ee_init_rd_atom;
 147	enum ib_atomic_cap	atomic_cap;
 148	enum ib_atomic_cap	masked_atomic_cap;
 149	int			max_ee;
 150	int			max_rdd;
 151	int			max_mw;
 152	int			max_raw_ipv6_qp;
 153	int			max_raw_ethy_qp;
 154	int			max_mcast_grp;
 155	int			max_mcast_qp_attach;
 156	int			max_total_mcast_qp_attach;
 157	int			max_ah;
 158	int			max_fmr;
 159	int			max_map_per_fmr;
 160	int			max_srq;
 161	int			max_srq_wr;
 162	int			max_srq_sge;
 163	unsigned int		max_fast_reg_page_list_len;
 
 164	u16			max_pkeys;
 165	u8			local_ca_ack_delay;
 
 
 
 
 
 
 
 
 
 
 
 
 
 166};
 167
 168enum ib_mtu {
 169	IB_MTU_256  = 1,
 170	IB_MTU_512  = 2,
 171	IB_MTU_1024 = 3,
 172	IB_MTU_2048 = 4,
 173	IB_MTU_4096 = 5
 174};
 175
 
 
 
 
 
 176static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 177{
 178	switch (mtu) {
 179	case IB_MTU_256:  return  256;
 180	case IB_MTU_512:  return  512;
 181	case IB_MTU_1024: return 1024;
 182	case IB_MTU_2048: return 2048;
 183	case IB_MTU_4096: return 4096;
 184	default: 	  return -1;
 185	}
 186}
 187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188enum ib_port_state {
 189	IB_PORT_NOP		= 0,
 190	IB_PORT_DOWN		= 1,
 191	IB_PORT_INIT		= 2,
 192	IB_PORT_ARMED		= 3,
 193	IB_PORT_ACTIVE		= 4,
 194	IB_PORT_ACTIVE_DEFER	= 5
 195};
 196
 197enum ib_port_cap_flags {
 198	IB_PORT_SM				= 1 <<  1,
 199	IB_PORT_NOTICE_SUP			= 1 <<  2,
 200	IB_PORT_TRAP_SUP			= 1 <<  3,
 201	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 202	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
 203	IB_PORT_SL_MAP_SUP			= 1 <<  6,
 204	IB_PORT_MKEY_NVRAM			= 1 <<  7,
 205	IB_PORT_PKEY_NVRAM			= 1 <<  8,
 206	IB_PORT_LED_INFO_SUP			= 1 <<  9,
 207	IB_PORT_SM_DISABLED			= 1 << 10,
 208	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
 209	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
 210	IB_PORT_CM_SUP				= 1 << 16,
 211	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
 212	IB_PORT_REINIT_SUP			= 1 << 18,
 213	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
 214	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
 215	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
 216	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
 217	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
 218	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
 219	IB_PORT_CLIENT_REG_SUP			= 1 << 25
 220};
 221
 222enum ib_port_width {
 223	IB_WIDTH_1X	= 1,
 
 224	IB_WIDTH_4X	= 2,
 225	IB_WIDTH_8X	= 4,
 226	IB_WIDTH_12X	= 8
 227};
 228
 229static inline int ib_width_enum_to_int(enum ib_port_width width)
 230{
 231	switch (width) {
 232	case IB_WIDTH_1X:  return  1;
 
 233	case IB_WIDTH_4X:  return  4;
 234	case IB_WIDTH_8X:  return  8;
 235	case IB_WIDTH_12X: return 12;
 236	default: 	  return -1;
 237	}
 238}
 239
 240struct ib_protocol_stats {
 241	/* TBD... */
 242};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244struct iw_protocol_stats {
 245	u64	ipInReceives;
 246	u64	ipInHdrErrors;
 247	u64	ipInTooBigErrors;
 248	u64	ipInNoRoutes;
 249	u64	ipInAddrErrors;
 250	u64	ipInUnknownProtos;
 251	u64	ipInTruncatedPkts;
 252	u64	ipInDiscards;
 253	u64	ipInDelivers;
 254	u64	ipOutForwDatagrams;
 255	u64	ipOutRequests;
 256	u64	ipOutDiscards;
 257	u64	ipOutNoRoutes;
 258	u64	ipReasmTimeout;
 259	u64	ipReasmReqds;
 260	u64	ipReasmOKs;
 261	u64	ipReasmFails;
 262	u64	ipFragOKs;
 263	u64	ipFragFails;
 264	u64	ipFragCreates;
 265	u64	ipInMcastPkts;
 266	u64	ipOutMcastPkts;
 267	u64	ipInBcastPkts;
 268	u64	ipOutBcastPkts;
 269
 270	u64	tcpRtoAlgorithm;
 271	u64	tcpRtoMin;
 272	u64	tcpRtoMax;
 273	u64	tcpMaxConn;
 274	u64	tcpActiveOpens;
 275	u64	tcpPassiveOpens;
 276	u64	tcpAttemptFails;
 277	u64	tcpEstabResets;
 278	u64	tcpCurrEstab;
 279	u64	tcpInSegs;
 280	u64	tcpOutSegs;
 281	u64	tcpRetransSegs;
 282	u64	tcpInErrs;
 283	u64	tcpOutRsts;
 284};
 285
 286union rdma_protocol_stats {
 287	struct ib_protocol_stats	ib;
 288	struct iw_protocol_stats	iw;
 289};
 290
 291struct ib_port_attr {
 
 292	enum ib_port_state	state;
 293	enum ib_mtu		max_mtu;
 294	enum ib_mtu		active_mtu;
 
 295	int			gid_tbl_len;
 
 
 296	u32			port_cap_flags;
 297	u32			max_msg_sz;
 298	u32			bad_pkey_cntr;
 299	u32			qkey_viol_cntr;
 300	u16			pkey_tbl_len;
 301	u16			lid;
 302	u16			sm_lid;
 303	u8			lmc;
 304	u8			max_vl_num;
 305	u8			sm_sl;
 306	u8			subnet_timeout;
 307	u8			init_type_reply;
 308	u8			active_width;
 309	u8			active_speed;
 310	u8                      phys_state;
 
 311};
 312
 313enum ib_device_modify_flags {
 314	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
 315	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
 316};
 317
 
 
 318struct ib_device_modify {
 319	u64	sys_image_guid;
 320	char	node_desc[64];
 321};
 322
 323enum ib_port_modify_flags {
 324	IB_PORT_SHUTDOWN		= 1,
 325	IB_PORT_INIT_TYPE		= (1<<2),
 326	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
 
 327};
 328
 329struct ib_port_modify {
 330	u32	set_port_cap_mask;
 331	u32	clr_port_cap_mask;
 332	u8	init_type;
 333};
 334
 335enum ib_event_type {
 336	IB_EVENT_CQ_ERR,
 337	IB_EVENT_QP_FATAL,
 338	IB_EVENT_QP_REQ_ERR,
 339	IB_EVENT_QP_ACCESS_ERR,
 340	IB_EVENT_COMM_EST,
 341	IB_EVENT_SQ_DRAINED,
 342	IB_EVENT_PATH_MIG,
 343	IB_EVENT_PATH_MIG_ERR,
 344	IB_EVENT_DEVICE_FATAL,
 345	IB_EVENT_PORT_ACTIVE,
 346	IB_EVENT_PORT_ERR,
 347	IB_EVENT_LID_CHANGE,
 348	IB_EVENT_PKEY_CHANGE,
 349	IB_EVENT_SM_CHANGE,
 350	IB_EVENT_SRQ_ERR,
 351	IB_EVENT_SRQ_LIMIT_REACHED,
 352	IB_EVENT_QP_LAST_WQE_REACHED,
 353	IB_EVENT_CLIENT_REREGISTER,
 354	IB_EVENT_GID_CHANGE,
 
 355};
 356
 
 
 357struct ib_event {
 358	struct ib_device	*device;
 359	union {
 360		struct ib_cq	*cq;
 361		struct ib_qp	*qp;
 362		struct ib_srq	*srq;
 363		u8		port_num;
 
 364	} element;
 365	enum ib_event_type	event;
 366};
 367
 368struct ib_event_handler {
 369	struct ib_device *device;
 370	void            (*handler)(struct ib_event_handler *, struct ib_event *);
 371	struct list_head  list;
 372};
 373
 374#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
 375	do {							\
 376		(_ptr)->device  = _device;			\
 377		(_ptr)->handler = _handler;			\
 378		INIT_LIST_HEAD(&(_ptr)->list);			\
 379	} while (0)
 380
 381struct ib_global_route {
 
 382	union ib_gid	dgid;
 383	u32		flow_label;
 384	u8		sgid_index;
 385	u8		hop_limit;
 386	u8		traffic_class;
 387};
 388
 389struct ib_grh {
 390	__be32		version_tclass_flow;
 391	__be16		paylen;
 392	u8		next_hdr;
 393	u8		hop_limit;
 394	union ib_gid	sgid;
 395	union ib_gid	dgid;
 396};
 397
 
 
 
 
 
 
 
 
 
 
 
 
 
 398enum {
 399	IB_MULTICAST_QPN = 0xffffff
 400};
 401
 402#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
 
 403
 404enum ib_ah_flags {
 405	IB_AH_GRH	= 1
 406};
 407
 408enum ib_rate {
 409	IB_RATE_PORT_CURRENT = 0,
 410	IB_RATE_2_5_GBPS = 2,
 411	IB_RATE_5_GBPS   = 5,
 412	IB_RATE_10_GBPS  = 3,
 413	IB_RATE_20_GBPS  = 6,
 414	IB_RATE_30_GBPS  = 4,
 415	IB_RATE_40_GBPS  = 7,
 416	IB_RATE_60_GBPS  = 8,
 417	IB_RATE_80_GBPS  = 9,
 418	IB_RATE_120_GBPS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 419};
 420
 421/**
 422 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 423 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 424 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 425 * @rate: rate to convert.
 426 */
 427int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428
 429/**
 430 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 431 * enum.
 432 * @mult: multiple to convert.
 433 */
 434enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
 
 
 
 
 
 
 
 
 
 
 
 
 
 435
 436struct ib_ah_attr {
 437	struct ib_global_route	grh;
 438	u16			dlid;
 439	u8			sl;
 440	u8			src_path_bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441	u8			static_rate;
 
 442	u8			ah_flags;
 443	u8			port_num;
 
 
 
 
 
 444};
 445
 446enum ib_wc_status {
 447	IB_WC_SUCCESS,
 448	IB_WC_LOC_LEN_ERR,
 449	IB_WC_LOC_QP_OP_ERR,
 450	IB_WC_LOC_EEC_OP_ERR,
 451	IB_WC_LOC_PROT_ERR,
 452	IB_WC_WR_FLUSH_ERR,
 453	IB_WC_MW_BIND_ERR,
 454	IB_WC_BAD_RESP_ERR,
 455	IB_WC_LOC_ACCESS_ERR,
 456	IB_WC_REM_INV_REQ_ERR,
 457	IB_WC_REM_ACCESS_ERR,
 458	IB_WC_REM_OP_ERR,
 459	IB_WC_RETRY_EXC_ERR,
 460	IB_WC_RNR_RETRY_EXC_ERR,
 461	IB_WC_LOC_RDD_VIOL_ERR,
 462	IB_WC_REM_INV_RD_REQ_ERR,
 463	IB_WC_REM_ABORT_ERR,
 464	IB_WC_INV_EECN_ERR,
 465	IB_WC_INV_EEC_STATE_ERR,
 466	IB_WC_FATAL_ERR,
 467	IB_WC_RESP_TIMEOUT_ERR,
 468	IB_WC_GENERAL_ERR
 469};
 470
 
 
 471enum ib_wc_opcode {
 472	IB_WC_SEND,
 473	IB_WC_RDMA_WRITE,
 474	IB_WC_RDMA_READ,
 475	IB_WC_COMP_SWAP,
 476	IB_WC_FETCH_ADD,
 477	IB_WC_BIND_MW,
 478	IB_WC_LSO,
 479	IB_WC_LOCAL_INV,
 480	IB_WC_FAST_REG_MR,
 
 481	IB_WC_MASKED_COMP_SWAP,
 482	IB_WC_MASKED_FETCH_ADD,
 
 483/*
 484 * Set value of IB_WC_RECV so consumers can test if a completion is a
 485 * receive by testing (opcode & IB_WC_RECV).
 486 */
 487	IB_WC_RECV			= 1 << 7,
 488	IB_WC_RECV_RDMA_WITH_IMM
 489};
 490
 491enum ib_wc_flags {
 492	IB_WC_GRH		= 1,
 493	IB_WC_WITH_IMM		= (1<<1),
 494	IB_WC_WITH_INVALIDATE	= (1<<2),
 
 
 
 
 495};
 496
 497struct ib_wc {
 498	u64			wr_id;
 
 
 
 499	enum ib_wc_status	status;
 500	enum ib_wc_opcode	opcode;
 501	u32			vendor_err;
 502	u32			byte_len;
 503	struct ib_qp	       *qp;
 504	union {
 505		__be32		imm_data;
 506		u32		invalidate_rkey;
 507	} ex;
 508	u32			src_qp;
 
 509	int			wc_flags;
 510	u16			pkey_index;
 511	u16			slid;
 512	u8			sl;
 513	u8			dlid_path_bits;
 514	u8			port_num;	/* valid only for DR SMPs on switches */
 515	int			csum_ok;
 
 
 516};
 517
 518enum ib_cq_notify_flags {
 519	IB_CQ_SOLICITED			= 1 << 0,
 520	IB_CQ_NEXT_COMP			= 1 << 1,
 521	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 522	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
 523};
 524
 
 
 
 
 
 
 
 
 
 
 
 
 525enum ib_srq_attr_mask {
 526	IB_SRQ_MAX_WR	= 1 << 0,
 527	IB_SRQ_LIMIT	= 1 << 1,
 528};
 529
 530struct ib_srq_attr {
 531	u32	max_wr;
 532	u32	max_sge;
 533	u32	srq_limit;
 534};
 535
 536struct ib_srq_init_attr {
 537	void		      (*event_handler)(struct ib_event *, void *);
 538	void		       *srq_context;
 539	struct ib_srq_attr	attr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540};
 541
 542struct ib_qp_cap {
 543	u32	max_send_wr;
 544	u32	max_recv_wr;
 545	u32	max_send_sge;
 546	u32	max_recv_sge;
 547	u32	max_inline_data;
 
 
 
 
 
 
 
 548};
 549
 550enum ib_sig_type {
 551	IB_SIGNAL_ALL_WR,
 552	IB_SIGNAL_REQ_WR
 553};
 554
 555enum ib_qp_type {
 556	/*
 557	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 558	 * here (and in that order) since the MAD layer uses them as
 559	 * indices into a 2-entry table.
 560	 */
 561	IB_QPT_SMI,
 562	IB_QPT_GSI,
 563
 564	IB_QPT_RC,
 565	IB_QPT_UC,
 566	IB_QPT_UD,
 567	IB_QPT_RAW_IPV6,
 568	IB_QPT_RAW_ETHERTYPE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569};
 570
 571enum ib_qp_create_flags {
 572	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
 573	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574};
 575
 
 
 
 
 
 576struct ib_qp_init_attr {
 
 577	void                  (*event_handler)(struct ib_event *, void *);
 
 578	void		       *qp_context;
 579	struct ib_cq	       *send_cq;
 580	struct ib_cq	       *recv_cq;
 581	struct ib_srq	       *srq;
 
 582	struct ib_qp_cap	cap;
 583	enum ib_sig_type	sq_sig_type;
 584	enum ib_qp_type		qp_type;
 585	enum ib_qp_create_flags	create_flags;
 586	u8			port_num; /* special QP types only */
 
 
 
 
 
 
 
 
 
 
 
 
 
 587};
 588
 589enum ib_rnr_timeout {
 590	IB_RNR_TIMER_655_36 =  0,
 591	IB_RNR_TIMER_000_01 =  1,
 592	IB_RNR_TIMER_000_02 =  2,
 593	IB_RNR_TIMER_000_03 =  3,
 594	IB_RNR_TIMER_000_04 =  4,
 595	IB_RNR_TIMER_000_06 =  5,
 596	IB_RNR_TIMER_000_08 =  6,
 597	IB_RNR_TIMER_000_12 =  7,
 598	IB_RNR_TIMER_000_16 =  8,
 599	IB_RNR_TIMER_000_24 =  9,
 600	IB_RNR_TIMER_000_32 = 10,
 601	IB_RNR_TIMER_000_48 = 11,
 602	IB_RNR_TIMER_000_64 = 12,
 603	IB_RNR_TIMER_000_96 = 13,
 604	IB_RNR_TIMER_001_28 = 14,
 605	IB_RNR_TIMER_001_92 = 15,
 606	IB_RNR_TIMER_002_56 = 16,
 607	IB_RNR_TIMER_003_84 = 17,
 608	IB_RNR_TIMER_005_12 = 18,
 609	IB_RNR_TIMER_007_68 = 19,
 610	IB_RNR_TIMER_010_24 = 20,
 611	IB_RNR_TIMER_015_36 = 21,
 612	IB_RNR_TIMER_020_48 = 22,
 613	IB_RNR_TIMER_030_72 = 23,
 614	IB_RNR_TIMER_040_96 = 24,
 615	IB_RNR_TIMER_061_44 = 25,
 616	IB_RNR_TIMER_081_92 = 26,
 617	IB_RNR_TIMER_122_88 = 27,
 618	IB_RNR_TIMER_163_84 = 28,
 619	IB_RNR_TIMER_245_76 = 29,
 620	IB_RNR_TIMER_327_68 = 30,
 621	IB_RNR_TIMER_491_52 = 31
 622};
 623
 624enum ib_qp_attr_mask {
 625	IB_QP_STATE			= 1,
 626	IB_QP_CUR_STATE			= (1<<1),
 627	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
 628	IB_QP_ACCESS_FLAGS		= (1<<3),
 629	IB_QP_PKEY_INDEX		= (1<<4),
 630	IB_QP_PORT			= (1<<5),
 631	IB_QP_QKEY			= (1<<6),
 632	IB_QP_AV			= (1<<7),
 633	IB_QP_PATH_MTU			= (1<<8),
 634	IB_QP_TIMEOUT			= (1<<9),
 635	IB_QP_RETRY_CNT			= (1<<10),
 636	IB_QP_RNR_RETRY			= (1<<11),
 637	IB_QP_RQ_PSN			= (1<<12),
 638	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
 639	IB_QP_ALT_PATH			= (1<<14),
 640	IB_QP_MIN_RNR_TIMER		= (1<<15),
 641	IB_QP_SQ_PSN			= (1<<16),
 642	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
 643	IB_QP_PATH_MIG_STATE		= (1<<18),
 644	IB_QP_CAP			= (1<<19),
 645	IB_QP_DEST_QPN			= (1<<20)
 
 
 
 
 
 
 
 646};
 647
 648enum ib_qp_state {
 649	IB_QPS_RESET,
 650	IB_QPS_INIT,
 651	IB_QPS_RTR,
 652	IB_QPS_RTS,
 653	IB_QPS_SQD,
 654	IB_QPS_SQE,
 655	IB_QPS_ERR
 656};
 657
 658enum ib_mig_state {
 659	IB_MIG_MIGRATED,
 660	IB_MIG_REARM,
 661	IB_MIG_ARMED
 662};
 663
 
 
 
 
 
 664struct ib_qp_attr {
 665	enum ib_qp_state	qp_state;
 666	enum ib_qp_state	cur_qp_state;
 667	enum ib_mtu		path_mtu;
 668	enum ib_mig_state	path_mig_state;
 669	u32			qkey;
 670	u32			rq_psn;
 671	u32			sq_psn;
 672	u32			dest_qp_num;
 673	int			qp_access_flags;
 674	struct ib_qp_cap	cap;
 675	struct ib_ah_attr	ah_attr;
 676	struct ib_ah_attr	alt_ah_attr;
 677	u16			pkey_index;
 678	u16			alt_pkey_index;
 679	u8			en_sqd_async_notify;
 680	u8			sq_draining;
 681	u8			max_rd_atomic;
 682	u8			max_dest_rd_atomic;
 683	u8			min_rnr_timer;
 684	u8			port_num;
 685	u8			timeout;
 686	u8			retry_cnt;
 687	u8			rnr_retry;
 688	u8			alt_port_num;
 689	u8			alt_timeout;
 
 
 690};
 691
 692enum ib_wr_opcode {
 693	IB_WR_RDMA_WRITE,
 694	IB_WR_RDMA_WRITE_WITH_IMM,
 695	IB_WR_SEND,
 696	IB_WR_SEND_WITH_IMM,
 697	IB_WR_RDMA_READ,
 698	IB_WR_ATOMIC_CMP_AND_SWP,
 699	IB_WR_ATOMIC_FETCH_AND_ADD,
 700	IB_WR_LSO,
 701	IB_WR_SEND_WITH_INV,
 702	IB_WR_RDMA_READ_WITH_INV,
 703	IB_WR_LOCAL_INV,
 704	IB_WR_FAST_REG_MR,
 705	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
 706	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707};
 708
 709enum ib_send_flags {
 710	IB_SEND_FENCE		= 1,
 711	IB_SEND_SIGNALED	= (1<<1),
 712	IB_SEND_SOLICITED	= (1<<2),
 713	IB_SEND_INLINE		= (1<<3),
 714	IB_SEND_IP_CSUM		= (1<<4)
 
 
 
 
 715};
 716
 717struct ib_sge {
 718	u64	addr;
 719	u32	length;
 720	u32	lkey;
 721};
 722
 723struct ib_fast_reg_page_list {
 724	struct ib_device       *device;
 725	u64		       *page_list;
 726	unsigned int		max_page_list_len;
 727};
 728
 729struct ib_send_wr {
 730	struct ib_send_wr      *next;
 731	u64			wr_id;
 
 
 
 732	struct ib_sge	       *sg_list;
 733	int			num_sge;
 734	enum ib_wr_opcode	opcode;
 735	int			send_flags;
 736	union {
 737		__be32		imm_data;
 738		u32		invalidate_rkey;
 739	} ex;
 740	union {
 741		struct {
 742			u64	remote_addr;
 743			u32	rkey;
 744		} rdma;
 745		struct {
 746			u64	remote_addr;
 747			u64	compare_add;
 748			u64	swap;
 749			u64	compare_add_mask;
 750			u64	swap_mask;
 751			u32	rkey;
 752		} atomic;
 753		struct {
 754			struct ib_ah *ah;
 755			void   *header;
 756			int     hlen;
 757			int     mss;
 758			u32	remote_qpn;
 759			u32	remote_qkey;
 760			u16	pkey_index; /* valid for GSI only */
 761			u8	port_num;   /* valid for DR SMPs on switch only */
 762		} ud;
 763		struct {
 764			u64				iova_start;
 765			struct ib_fast_reg_page_list   *page_list;
 766			unsigned int			page_shift;
 767			unsigned int			page_list_len;
 768			u32				length;
 769			int				access_flags;
 770			u32				rkey;
 771		} fast_reg;
 772	} wr;
 773};
 774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775struct ib_recv_wr {
 776	struct ib_recv_wr      *next;
 777	u64			wr_id;
 
 
 
 778	struct ib_sge	       *sg_list;
 779	int			num_sge;
 780};
 781
 782enum ib_access_flags {
 783	IB_ACCESS_LOCAL_WRITE	= 1,
 784	IB_ACCESS_REMOTE_WRITE	= (1<<1),
 785	IB_ACCESS_REMOTE_READ	= (1<<2),
 786	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
 787	IB_ACCESS_MW_BIND	= (1<<4)
 788};
 789
 790struct ib_phys_buf {
 791	u64      addr;
 792	u64      size;
 793};
 794
 795struct ib_mr_attr {
 796	struct ib_pd	*pd;
 797	u64		device_virt_addr;
 798	u64		size;
 799	int		mr_access_flags;
 800	u32		lkey;
 801	u32		rkey;
 802};
 803
 
 
 
 
 804enum ib_mr_rereg_flags {
 805	IB_MR_REREG_TRANS	= 1,
 806	IB_MR_REREG_PD		= (1<<1),
 807	IB_MR_REREG_ACCESS	= (1<<2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 808};
 809
 810struct ib_mw_bind {
 811	struct ib_mr   *mr;
 812	u64		wr_id;
 813	u64		addr;
 814	u32		length;
 815	int		send_flags;
 816	int		mw_access_flags;
 817};
 818
 819struct ib_fmr_attr {
 820	int	max_pages;
 821	int	max_maps;
 822	u8	page_shift;
 823};
 824
 825struct ib_ucontext {
 826	struct ib_device       *device;
 827	struct list_head	pd_list;
 828	struct list_head	mr_list;
 829	struct list_head	mw_list;
 830	struct list_head	cq_list;
 831	struct list_head	qp_list;
 832	struct list_head	srq_list;
 833	struct list_head	ah_list;
 834	int			closing;
 835};
 836
 837struct ib_uobject {
 838	u64			user_handle;	/* handle given to us by userspace */
 
 
 
 839	struct ib_ucontext     *context;	/* associated user context */
 840	void		       *object;		/* containing object */
 841	struct list_head	list;		/* link to context's list */
 
 842	int			id;		/* index into kernel idr */
 843	struct kref		ref;
 844	struct rw_semaphore	mutex;		/* protects .live */
 845	int			live;
 
 
 846};
 847
 848struct ib_udata {
 849	void __user *inbuf;
 850	void __user *outbuf;
 851	size_t       inlen;
 852	size_t       outlen;
 853};
 854
 855struct ib_pd {
 
 
 856	struct ib_device       *device;
 857	struct ib_uobject      *uobject;
 858	atomic_t          	usecnt; /* count all resources */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859};
 860
 861struct ib_ah {
 862	struct ib_device	*device;
 863	struct ib_pd		*pd;
 864	struct ib_uobject	*uobject;
 
 
 865};
 866
 867typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 868
 
 
 
 
 
 
 
 
 
 869struct ib_cq {
 870	struct ib_device       *device;
 871	struct ib_uobject      *uobject;
 872	ib_comp_handler   	comp_handler;
 873	void                  (*event_handler)(struct ib_event *, void *);
 874	void                   *cq_context;
 875	int               	cqe;
 
 876	atomic_t          	usecnt; /* count number of work queues */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877};
 878
 879struct ib_srq {
 880	struct ib_device       *device;
 881	struct ib_pd	       *pd;
 882	struct ib_uobject      *uobject;
 883	void		      (*event_handler)(struct ib_event *, void *);
 884	void		       *srq_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885	atomic_t		usecnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886};
 887
 
 
 
 
 888struct ib_qp {
 889	struct ib_device       *device;
 890	struct ib_pd	       *pd;
 891	struct ib_cq	       *send_cq;
 892	struct ib_cq	       *recv_cq;
 
 
 
 
 893	struct ib_srq	       *srq;
 894	struct ib_uobject      *uobject;
 
 
 
 
 
 
 
 
 895	void                  (*event_handler)(struct ib_event *, void *);
 
 896	void		       *qp_context;
 
 
 
 897	u32			qp_num;
 
 
 898	enum ib_qp_type		qp_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899};
 900
 901struct ib_mr {
 902	struct ib_device  *device;
 903	struct ib_pd	  *pd;
 904	struct ib_uobject *uobject;
 905	u32		   lkey;
 906	u32		   rkey;
 907	atomic_t	   usecnt; /* count number of MWs */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908};
 909
 910struct ib_mw {
 911	struct ib_device	*device;
 912	struct ib_pd		*pd;
 913	struct ib_uobject	*uobject;
 914	u32			rkey;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915};
 916
 917struct ib_fmr {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 918	struct ib_device	*device;
 919	struct ib_pd		*pd;
 920	struct list_head	list;
 921	u32			lkey;
 922	u32			rkey;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 923};
 924
 925struct ib_mad;
 926struct ib_grh;
 927
 928enum ib_process_mad_flags {
 929	IB_MAD_IGNORE_MKEY	= 1,
 930	IB_MAD_IGNORE_BKEY	= 2,
 931	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
 932};
 933
 934enum ib_mad_result {
 935	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
 936	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
 937	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
 938	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
 939};
 940
 941#define IB_DEVICE_NAME_MAX 64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942
 943struct ib_cache {
 944	rwlock_t                lock;
 945	struct ib_event_handler event_handler;
 946	struct ib_pkey_cache  **pkey_cache;
 947	struct ib_gid_cache   **gid_cache;
 948	u8                     *lmc_cache;
 949};
 950
 951struct ib_dma_mapping_ops {
 952	int		(*mapping_error)(struct ib_device *dev,
 953					 u64 dma_addr);
 954	u64		(*map_single)(struct ib_device *dev,
 955				      void *ptr, size_t size,
 956				      enum dma_data_direction direction);
 957	void		(*unmap_single)(struct ib_device *dev,
 958					u64 addr, size_t size,
 959					enum dma_data_direction direction);
 960	u64		(*map_page)(struct ib_device *dev,
 961				    struct page *page, unsigned long offset,
 962				    size_t size,
 963				    enum dma_data_direction direction);
 964	void		(*unmap_page)(struct ib_device *dev,
 965				      u64 addr, size_t size,
 966				      enum dma_data_direction direction);
 967	int		(*map_sg)(struct ib_device *dev,
 968				  struct scatterlist *sg, int nents,
 969				  enum dma_data_direction direction);
 970	void		(*unmap_sg)(struct ib_device *dev,
 971				    struct scatterlist *sg, int nents,
 972				    enum dma_data_direction direction);
 973	u64		(*dma_address)(struct ib_device *dev,
 974				       struct scatterlist *sg);
 975	unsigned int	(*dma_len)(struct ib_device *dev,
 976				   struct scatterlist *sg);
 977	void		(*sync_single_for_cpu)(struct ib_device *dev,
 978					       u64 dma_handle,
 979					       size_t size,
 980					       enum dma_data_direction dir);
 981	void		(*sync_single_for_device)(struct ib_device *dev,
 982						  u64 dma_handle,
 983						  size_t size,
 984						  enum dma_data_direction dir);
 985	void		*(*alloc_coherent)(struct ib_device *dev,
 986					   size_t size,
 987					   u64 *dma_handle,
 988					   gfp_t flag);
 989	void		(*free_coherent)(struct ib_device *dev,
 990					 size_t size, void *cpu_addr,
 991					 u64 dma_handle);
 992};
 993
 994struct iw_cm_verbs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996struct ib_device {
 
 997	struct device                *dma_device;
 998
 999	char                          name[IB_DEVICE_NAME_MAX];
 
1000
1001	struct list_head              event_handler_list;
1002	spinlock_t                    event_handler_lock;
 
1003
1004	spinlock_t                    client_data_lock;
1005	struct list_head              core_list;
1006	struct list_head              client_data_list;
1007
1008	struct ib_cache               cache;
1009	int                          *pkey_tbl_len;
1010	int                          *gid_tbl_len;
1011
1012	int			      num_comp_vectors;
 
 
 
 
 
 
 
 
 
1013
1014	struct iw_cm_verbs	     *iwcm;
1015
1016	int		           (*get_protocol_stats)(struct ib_device *device,
1017							 union rdma_protocol_stats *stats);
1018	int		           (*query_device)(struct ib_device *device,
1019						   struct ib_device_attr *device_attr);
1020	int		           (*query_port)(struct ib_device *device,
1021						 u8 port_num,
1022						 struct ib_port_attr *port_attr);
1023	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1024						     u8 port_num);
1025	int		           (*query_gid)(struct ib_device *device,
1026						u8 port_num, int index,
1027						union ib_gid *gid);
1028	int		           (*query_pkey)(struct ib_device *device,
1029						 u8 port_num, u16 index, u16 *pkey);
1030	int		           (*modify_device)(struct ib_device *device,
1031						    int device_modify_mask,
1032						    struct ib_device_modify *device_modify);
1033	int		           (*modify_port)(struct ib_device *device,
1034						  u8 port_num, int port_modify_mask,
1035						  struct ib_port_modify *port_modify);
1036	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1037						     struct ib_udata *udata);
1038	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1039	int                        (*mmap)(struct ib_ucontext *context,
1040					   struct vm_area_struct *vma);
1041	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1042					       struct ib_ucontext *context,
1043					       struct ib_udata *udata);
1044	int                        (*dealloc_pd)(struct ib_pd *pd);
1045	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1046						struct ib_ah_attr *ah_attr);
1047	int                        (*modify_ah)(struct ib_ah *ah,
1048						struct ib_ah_attr *ah_attr);
1049	int                        (*query_ah)(struct ib_ah *ah,
1050					       struct ib_ah_attr *ah_attr);
1051	int                        (*destroy_ah)(struct ib_ah *ah);
1052	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1053						 struct ib_srq_init_attr *srq_init_attr,
1054						 struct ib_udata *udata);
1055	int                        (*modify_srq)(struct ib_srq *srq,
1056						 struct ib_srq_attr *srq_attr,
1057						 enum ib_srq_attr_mask srq_attr_mask,
1058						 struct ib_udata *udata);
1059	int                        (*query_srq)(struct ib_srq *srq,
1060						struct ib_srq_attr *srq_attr);
1061	int                        (*destroy_srq)(struct ib_srq *srq);
1062	int                        (*post_srq_recv)(struct ib_srq *srq,
1063						    struct ib_recv_wr *recv_wr,
1064						    struct ib_recv_wr **bad_recv_wr);
1065	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1066						struct ib_qp_init_attr *qp_init_attr,
1067						struct ib_udata *udata);
1068	int                        (*modify_qp)(struct ib_qp *qp,
1069						struct ib_qp_attr *qp_attr,
1070						int qp_attr_mask,
1071						struct ib_udata *udata);
1072	int                        (*query_qp)(struct ib_qp *qp,
1073					       struct ib_qp_attr *qp_attr,
1074					       int qp_attr_mask,
1075					       struct ib_qp_init_attr *qp_init_attr);
1076	int                        (*destroy_qp)(struct ib_qp *qp);
1077	int                        (*post_send)(struct ib_qp *qp,
1078						struct ib_send_wr *send_wr,
1079						struct ib_send_wr **bad_send_wr);
1080	int                        (*post_recv)(struct ib_qp *qp,
1081						struct ib_recv_wr *recv_wr,
1082						struct ib_recv_wr **bad_recv_wr);
1083	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1084						int comp_vector,
1085						struct ib_ucontext *context,
1086						struct ib_udata *udata);
1087	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1088						u16 cq_period);
1089	int                        (*destroy_cq)(struct ib_cq *cq);
1090	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1091						struct ib_udata *udata);
1092	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1093					      struct ib_wc *wc);
1094	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1095	int                        (*req_notify_cq)(struct ib_cq *cq,
1096						    enum ib_cq_notify_flags flags);
1097	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1098						      int wc_cnt);
1099	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1100						 int mr_access_flags);
1101	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1102						  struct ib_phys_buf *phys_buf_array,
1103						  int num_phys_buf,
1104						  int mr_access_flags,
1105						  u64 *iova_start);
1106	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1107						  u64 start, u64 length,
1108						  u64 virt_addr,
1109						  int mr_access_flags,
1110						  struct ib_udata *udata);
1111	int                        (*query_mr)(struct ib_mr *mr,
1112					       struct ib_mr_attr *mr_attr);
1113	int                        (*dereg_mr)(struct ib_mr *mr);
1114	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1115					       int max_page_list_len);
1116	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1117								   int page_list_len);
1118	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1119	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1120						    int mr_rereg_mask,
1121						    struct ib_pd *pd,
1122						    struct ib_phys_buf *phys_buf_array,
1123						    int num_phys_buf,
1124						    int mr_access_flags,
1125						    u64 *iova_start);
1126	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1127	int                        (*bind_mw)(struct ib_qp *qp,
1128					      struct ib_mw *mw,
1129					      struct ib_mw_bind *mw_bind);
1130	int                        (*dealloc_mw)(struct ib_mw *mw);
1131	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1132						int mr_access_flags,
1133						struct ib_fmr_attr *fmr_attr);
1134	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1135						   u64 *page_list, int list_len,
1136						   u64 iova);
1137	int		           (*unmap_fmr)(struct list_head *fmr_list);
1138	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1139	int                        (*attach_mcast)(struct ib_qp *qp,
1140						   union ib_gid *gid,
1141						   u16 lid);
1142	int                        (*detach_mcast)(struct ib_qp *qp,
1143						   union ib_gid *gid,
1144						   u16 lid);
1145	int                        (*process_mad)(struct ib_device *device,
1146						  int process_mad_flags,
1147						  u8 port_num,
1148						  struct ib_wc *in_wc,
1149						  struct ib_grh *in_grh,
1150						  struct ib_mad *in_mad,
1151						  struct ib_mad *out_mad);
1152
1153	struct ib_dma_mapping_ops   *dma_ops;
1154
1155	struct module               *owner;
1156	struct device                dev;
1157	struct kobject               *ports_parent;
1158	struct list_head             port_list;
1159
1160	enum {
1161		IB_DEV_UNINITIALIZED,
1162		IB_DEV_REGISTERED,
1163		IB_DEV_UNREGISTERED
1164	}                            reg_state;
1165
1166	int			     uverbs_abi_ver;
1167	u64			     uverbs_cmd_mask;
1168
1169	char			     node_desc[64];
1170	__be64			     node_guid;
1171	u32			     local_dma_lkey;
 
 
 
 
 
1172	u8                           node_type;
1173	u8                           phys_port_cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174};
1175
 
 
 
 
 
 
 
 
 
 
1176struct ib_client {
1177	char  *name;
1178	void (*add)   (struct ib_device *);
1179	void (*remove)(struct ib_device *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
1181	struct list_head list;
 
1182};
1183
1184struct ib_device *ib_alloc_device(size_t size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185void ib_dealloc_device(struct ib_device *device);
1186
1187int ib_register_device(struct ib_device *device,
1188		       int (*port_callback)(struct ib_device *,
1189					    u8, struct kobject *));
 
1190void ib_unregister_device(struct ib_device *device);
 
 
 
1191
1192int ib_register_client   (struct ib_client *client);
1193void ib_unregister_client(struct ib_client *client);
1194
1195void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1197			 void *data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198
1199static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1200{
1201	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1202}
1203
1204static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1205{
1206	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1207}
1208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209/**
1210 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1211 * contains all required attributes and no attributes not allowed for
1212 * the given QP state transition.
1213 * @cur_state: Current QP state
1214 * @next_state: Next QP state
1215 * @type: QP type
1216 * @mask: Mask of supplied QP attributes
1217 *
1218 * This function is a helper function that a low-level driver's
1219 * modify_qp method can use to validate the consumer's input.  It
1220 * checks that cur_state and next_state are valid QP states, that a
1221 * transition from cur_state to next_state is allowed by the IB spec,
1222 * and that the attribute mask supplied is allowed for the transition.
1223 */
1224int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1225		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1226
1227int ib_register_event_handler  (struct ib_event_handler *event_handler);
1228int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1229void ib_dispatch_event(struct ib_event *event);
1230
1231int ib_query_device(struct ib_device *device,
1232		    struct ib_device_attr *device_attr);
1233
1234int ib_query_port(struct ib_device *device,
1235		  u8 port_num, struct ib_port_attr *port_attr);
1236
1237enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1238					       u8 port_num);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1239
1240int ib_query_gid(struct ib_device *device,
1241		 u8 port_num, int index, union ib_gid *gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1242
1243int ib_query_pkey(struct ib_device *device,
1244		  u8 port_num, u16 index, u16 *pkey);
1245
1246int ib_modify_device(struct ib_device *device,
1247		     int device_modify_mask,
1248		     struct ib_device_modify *device_modify);
1249
1250int ib_modify_port(struct ib_device *device,
1251		   u8 port_num, int port_modify_mask,
1252		   struct ib_port_modify *port_modify);
1253
1254int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1255		u8 *port_num, u16 *index);
1256
1257int ib_find_pkey(struct ib_device *device,
1258		 u8 port_num, u16 pkey, u16 *index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1259
1260/**
1261 * ib_alloc_pd - Allocates an unused protection domain.
1262 * @device: The device on which to allocate the protection domain.
 
1263 *
1264 * A protection domain object provides an association between QPs, shared
1265 * receive queues, address handles, memory regions, and memory windows.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266 */
1267struct ib_pd *ib_alloc_pd(struct ib_device *device);
 
 
 
 
 
 
 
 
 
 
1268
1269/**
1270 * ib_dealloc_pd - Deallocates a protection domain.
1271 * @pd: The protection domain to deallocate.
 
 
 
 
 
1272 */
1273int ib_dealloc_pd(struct ib_pd *pd);
 
1274
1275/**
1276 * ib_create_ah - Creates an address handle for the given address vector.
 
1277 * @pd: The protection domain associated with the address handle.
1278 * @ah_attr: The attributes of the address vector.
 
 
1279 *
 
1280 * The address handle is used to reference a local or global destination
1281 * in all UD QP post sends.
1282 */
1283struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284
1285/**
1286 * ib_init_ah_from_wc - Initializes address handle attributes from a
1287 *   work completion.
1288 * @device: Device on which the received message arrived.
1289 * @port_num: Port on which the received message arrived.
1290 * @wc: Work completion associated with the received message.
1291 * @grh: References the received global route header.  This parameter is
1292 *   ignored unless the work completion indicates that the GRH is valid.
1293 * @ah_attr: Returned attributes that can be used when creating an address
1294 *   handle for replying to the message.
 
 
 
 
 
 
 
1295 */
1296int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1297		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
 
1298
1299/**
1300 * ib_create_ah_from_wc - Creates an address handle associated with the
1301 *   sender of the specified work completion.
1302 * @pd: The protection domain associated with the address handle.
1303 * @wc: Work completion information associated with a received message.
1304 * @grh: References the received global route header.  This parameter is
1305 *   ignored unless the work completion indicates that the GRH is valid.
1306 * @port_num: The outbound port number to associate with the address.
1307 *
1308 * The address handle is used to reference a local or global destination
1309 * in all UD QP post sends.
1310 */
1311struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1312				   struct ib_grh *grh, u8 port_num);
1313
1314/**
1315 * ib_modify_ah - Modifies the address vector associated with an address
1316 *   handle.
1317 * @ah: The address handle to modify.
1318 * @ah_attr: The new address vector attributes to associate with the
1319 *   address handle.
1320 */
1321int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1322
1323/**
1324 * ib_query_ah - Queries the address vector associated with an address
1325 *   handle.
1326 * @ah: The address handle to query.
1327 * @ah_attr: The address vector attributes associated with the address
1328 *   handle.
1329 */
1330int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
 
 
 
 
 
1331
1332/**
1333 * ib_destroy_ah - Destroys an address handle.
1334 * @ah: The address handle to destroy.
 
 
1335 */
1336int ib_destroy_ah(struct ib_ah *ah);
1337
1338/**
1339 * ib_create_srq - Creates a SRQ associated with the specified protection
1340 *   domain.
1341 * @pd: The protection domain associated with the SRQ.
1342 * @srq_init_attr: A list of initial attributes required to create the
1343 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1344 *   the actual capabilities of the created SRQ.
1345 *
1346 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1347 * requested size of the SRQ, and set to the actual values allocated
1348 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1349 * will always be at least as large as the requested values.
1350 */
1351struct ib_srq *ib_create_srq(struct ib_pd *pd,
1352			     struct ib_srq_init_attr *srq_init_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353
1354/**
1355 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1356 * @srq: The SRQ to modify.
1357 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1358 *   the current values of selected SRQ attributes are returned.
1359 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1360 *   are being modified.
1361 *
1362 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1363 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1364 * the number of receives queued drops below the limit.
1365 */
1366int ib_modify_srq(struct ib_srq *srq,
1367		  struct ib_srq_attr *srq_attr,
1368		  enum ib_srq_attr_mask srq_attr_mask);
1369
1370/**
1371 * ib_query_srq - Returns the attribute list and current values for the
1372 *   specified SRQ.
1373 * @srq: The SRQ to query.
1374 * @srq_attr: The attributes of the specified SRQ.
1375 */
1376int ib_query_srq(struct ib_srq *srq,
1377		 struct ib_srq_attr *srq_attr);
1378
1379/**
1380 * ib_destroy_srq - Destroys the specified SRQ.
 
 
 
 
 
 
 
1381 * @srq: The SRQ to destroy.
 
 
1382 */
1383int ib_destroy_srq(struct ib_srq *srq);
 
 
 
 
 
1384
1385/**
1386 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1387 * @srq: The SRQ to post the work request on.
1388 * @recv_wr: A list of work requests to post on the receive queue.
1389 * @bad_recv_wr: On an immediate failure, this parameter will reference
1390 *   the work request that failed to be posted on the QP.
1391 */
1392static inline int ib_post_srq_recv(struct ib_srq *srq,
1393				   struct ib_recv_wr *recv_wr,
1394				   struct ib_recv_wr **bad_recv_wr)
1395{
1396	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
 
 
 
1397}
1398
 
 
 
1399/**
1400 * ib_create_qp - Creates a QP associated with the specified protection
1401 *   domain.
1402 * @pd: The protection domain associated with the QP.
1403 * @qp_init_attr: A list of initial attributes required to create the
1404 *   QP.  If QP creation succeeds, then the attributes are updated to
1405 *   the actual capabilities of the created QP.
1406 */
1407struct ib_qp *ib_create_qp(struct ib_pd *pd,
1408			   struct ib_qp_init_attr *qp_init_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409
1410/**
1411 * ib_modify_qp - Modifies the attributes for the specified QP and then
1412 *   transitions the QP to the given state.
1413 * @qp: The QP to modify.
1414 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1415 *   the current values of selected QP attributes are returned.
1416 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1417 *   are being modified.
1418 */
1419int ib_modify_qp(struct ib_qp *qp,
1420		 struct ib_qp_attr *qp_attr,
1421		 int qp_attr_mask);
1422
1423/**
1424 * ib_query_qp - Returns the attribute list and current values for the
1425 *   specified QP.
1426 * @qp: The QP to query.
1427 * @qp_attr: The attributes of the specified QP.
1428 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1429 * @qp_init_attr: Additional attributes of the selected QP.
1430 *
1431 * The qp_attr_mask may be used to limit the query to gathering only the
1432 * selected attributes.
1433 */
1434int ib_query_qp(struct ib_qp *qp,
1435		struct ib_qp_attr *qp_attr,
1436		int qp_attr_mask,
1437		struct ib_qp_init_attr *qp_init_attr);
1438
1439/**
1440 * ib_destroy_qp - Destroys the specified QP.
1441 * @qp: The QP to destroy.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1442 */
1443int ib_destroy_qp(struct ib_qp *qp);
1444
1445/**
1446 * ib_post_send - Posts a list of work requests to the send queue of
1447 *   the specified QP.
1448 * @qp: The QP to post the work request on.
1449 * @send_wr: A list of work requests to post on the send queue.
1450 * @bad_send_wr: On an immediate failure, this parameter will reference
1451 *   the work request that failed to be posted on the QP.
1452 *
1453 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1454 * error is returned, the QP state shall not be affected,
1455 * ib_post_send() will return an immediate error after queueing any
1456 * earlier work requests in the list.
1457 */
1458static inline int ib_post_send(struct ib_qp *qp,
1459			       struct ib_send_wr *send_wr,
1460			       struct ib_send_wr **bad_send_wr)
1461{
1462	return qp->device->post_send(qp, send_wr, bad_send_wr);
 
 
1463}
1464
1465/**
1466 * ib_post_recv - Posts a list of work requests to the receive queue of
1467 *   the specified QP.
1468 * @qp: The QP to post the work request on.
1469 * @recv_wr: A list of work requests to post on the receive queue.
1470 * @bad_recv_wr: On an immediate failure, this parameter will reference
1471 *   the work request that failed to be posted on the QP.
1472 */
1473static inline int ib_post_recv(struct ib_qp *qp,
1474			       struct ib_recv_wr *recv_wr,
1475			       struct ib_recv_wr **bad_recv_wr)
 
 
 
 
 
 
 
 
 
 
 
 
1476{
1477	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
 
1478}
1479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480/**
1481 * ib_create_cq - Creates a CQ on the specified device.
1482 * @device: The device on which to create the CQ.
1483 * @comp_handler: A user-specified callback that is invoked when a
1484 *   completion event occurs on the CQ.
1485 * @event_handler: A user-specified callback that is invoked when an
1486 *   asynchronous event not associated with a completion occurs on the CQ.
1487 * @cq_context: Context associated with the CQ returned to the user via
1488 *   the associated completion and event handlers.
1489 * @cqe: The minimum size of the CQ.
1490 * @comp_vector - Completion vector used to signal completion events.
1491 *     Must be >= 0 and < context->num_comp_vectors.
1492 *
1493 * Users can examine the cq structure to determine the actual CQ size.
1494 */
1495struct ib_cq *ib_create_cq(struct ib_device *device,
1496			   ib_comp_handler comp_handler,
1497			   void (*event_handler)(struct ib_event *, void *),
1498			   void *cq_context, int cqe, int comp_vector);
 
 
 
 
1499
1500/**
1501 * ib_resize_cq - Modifies the capacity of the CQ.
1502 * @cq: The CQ to resize.
1503 * @cqe: The minimum size of the CQ.
1504 *
1505 * Users can examine the cq structure to determine the actual CQ size.
1506 */
1507int ib_resize_cq(struct ib_cq *cq, int cqe);
1508
1509/**
1510 * ib_modify_cq - Modifies moderation params of the CQ
1511 * @cq: The CQ to modify.
1512 * @cq_count: number of CQEs that will trigger an event
1513 * @cq_period: max period of time in usec before triggering an event
1514 *
1515 */
1516int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
 
 
 
 
 
 
 
1517
1518/**
1519 * ib_destroy_cq - Destroys the specified CQ.
1520 * @cq: The CQ to destroy.
 
 
1521 */
1522int ib_destroy_cq(struct ib_cq *cq);
 
 
 
 
 
1523
1524/**
1525 * ib_poll_cq - poll a CQ for completion(s)
1526 * @cq:the CQ being polled
1527 * @num_entries:maximum number of completions to return
1528 * @wc:array of at least @num_entries &struct ib_wc where completions
1529 *   will be returned
1530 *
1531 * Poll a CQ for (possibly multiple) completions.  If the return value
1532 * is < 0, an error occurred.  If the return value is >= 0, it is the
1533 * number of completions returned.  If the return value is
1534 * non-negative and < num_entries, then the CQ was emptied.
1535 */
1536static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1537			     struct ib_wc *wc)
1538{
1539	return cq->device->poll_cq(cq, num_entries, wc);
1540}
1541
1542/**
1543 * ib_peek_cq - Returns the number of unreaped completions currently
1544 *   on the specified CQ.
1545 * @cq: The CQ to peek.
1546 * @wc_cnt: A minimum number of unreaped completions to check for.
1547 *
1548 * If the number of unreaped completions is greater than or equal to wc_cnt,
1549 * this function returns wc_cnt, otherwise, it returns the actual number of
1550 * unreaped completions.
1551 */
1552int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1553
1554/**
1555 * ib_req_notify_cq - Request completion notification on a CQ.
1556 * @cq: The CQ to generate an event for.
1557 * @flags:
1558 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1559 *   to request an event on the next solicited event or next work
1560 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1561 *   may also be |ed in to request a hint about missed events, as
1562 *   described below.
1563 *
1564 * Return Value:
1565 *    < 0 means an error occurred while requesting notification
1566 *   == 0 means notification was requested successfully, and if
1567 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1568 *        were missed and it is safe to wait for another event.  In
1569 *        this case is it guaranteed that any work completions added
1570 *        to the CQ since the last CQ poll will trigger a completion
1571 *        notification event.
1572 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1573 *        in.  It means that the consumer must poll the CQ again to
1574 *        make sure it is empty to avoid missing an event because of a
1575 *        race between requesting notification and an entry being
1576 *        added to the CQ.  This return value means it is possible
1577 *        (but not guaranteed) that a work completion has been added
1578 *        to the CQ since the last poll without triggering a
1579 *        completion notification event.
1580 */
1581static inline int ib_req_notify_cq(struct ib_cq *cq,
1582				   enum ib_cq_notify_flags flags)
1583{
1584	return cq->device->req_notify_cq(cq, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585}
1586
1587/**
1588 * ib_req_ncomp_notif - Request completion notification when there are
1589 *   at least the specified number of unreaped completions on the CQ.
1590 * @cq: The CQ to generate an event for.
1591 * @wc_cnt: The number of unreaped completions that should be on the
1592 *   CQ before an event is generated.
1593 */
1594static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1595{
1596	return cq->device->req_ncomp_notif ?
1597		cq->device->req_ncomp_notif(cq, wc_cnt) :
1598		-ENOSYS;
1599}
1600
1601/**
1602 * ib_get_dma_mr - Returns a memory region for system memory that is
1603 *   usable for DMA.
1604 * @pd: The protection domain associated with the memory region.
1605 * @mr_access_flags: Specifies the memory access rights.
1606 *
1607 * Note that the ib_dma_*() functions defined below must be used
1608 * to create/destroy addresses used with the Lkey or Rkey returned
1609 * by ib_get_dma_mr().
1610 */
1611struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
 
 
 
1612
1613/**
1614 * ib_dma_mapping_error - check a DMA addr for error
1615 * @dev: The device for which the dma_addr was created
1616 * @dma_addr: The DMA address to check
1617 */
1618static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1619{
1620	if (dev->dma_ops)
1621		return dev->dma_ops->mapping_error(dev, dma_addr);
1622	return dma_mapping_error(dev->dma_device, dma_addr);
1623}
1624
1625/**
1626 * ib_dma_map_single - Map a kernel virtual address to DMA address
1627 * @dev: The device for which the dma_addr is to be created
1628 * @cpu_addr: The kernel virtual address
1629 * @size: The size of the region in bytes
1630 * @direction: The direction of the DMA
1631 */
1632static inline u64 ib_dma_map_single(struct ib_device *dev,
1633				    void *cpu_addr, size_t size,
1634				    enum dma_data_direction direction)
1635{
1636	if (dev->dma_ops)
1637		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1638	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1639}
1640
1641/**
1642 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1643 * @dev: The device for which the DMA address was created
1644 * @addr: The DMA address
1645 * @size: The size of the region in bytes
1646 * @direction: The direction of the DMA
1647 */
1648static inline void ib_dma_unmap_single(struct ib_device *dev,
1649				       u64 addr, size_t size,
1650				       enum dma_data_direction direction)
1651{
1652	if (dev->dma_ops)
1653		dev->dma_ops->unmap_single(dev, addr, size, direction);
1654	else
1655		dma_unmap_single(dev->dma_device, addr, size, direction);
1656}
1657
1658static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1659					  void *cpu_addr, size_t size,
1660					  enum dma_data_direction direction,
1661					  struct dma_attrs *attrs)
1662{
1663	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1664				    direction, attrs);
1665}
1666
1667static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1668					     u64 addr, size_t size,
1669					     enum dma_data_direction direction,
1670					     struct dma_attrs *attrs)
1671{
1672	return dma_unmap_single_attrs(dev->dma_device, addr, size,
1673				      direction, attrs);
1674}
1675
1676/**
1677 * ib_dma_map_page - Map a physical page to DMA address
1678 * @dev: The device for which the dma_addr is to be created
1679 * @page: The page to be mapped
1680 * @offset: The offset within the page
1681 * @size: The size of the region in bytes
1682 * @direction: The direction of the DMA
1683 */
1684static inline u64 ib_dma_map_page(struct ib_device *dev,
1685				  struct page *page,
1686				  unsigned long offset,
1687				  size_t size,
1688					 enum dma_data_direction direction)
1689{
1690	if (dev->dma_ops)
1691		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1692	return dma_map_page(dev->dma_device, page, offset, size, direction);
1693}
1694
1695/**
1696 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1697 * @dev: The device for which the DMA address was created
1698 * @addr: The DMA address
1699 * @size: The size of the region in bytes
1700 * @direction: The direction of the DMA
1701 */
1702static inline void ib_dma_unmap_page(struct ib_device *dev,
1703				     u64 addr, size_t size,
1704				     enum dma_data_direction direction)
1705{
1706	if (dev->dma_ops)
1707		dev->dma_ops->unmap_page(dev, addr, size, direction);
1708	else
1709		dma_unmap_page(dev->dma_device, addr, size, direction);
1710}
1711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1712/**
1713 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1714 * @dev: The device for which the DMA addresses are to be created
1715 * @sg: The array of scatter/gather entries
1716 * @nents: The number of scatter/gather entries
1717 * @direction: The direction of the DMA
1718 */
1719static inline int ib_dma_map_sg(struct ib_device *dev,
1720				struct scatterlist *sg, int nents,
1721				enum dma_data_direction direction)
1722{
1723	if (dev->dma_ops)
1724		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1725	return dma_map_sg(dev->dma_device, sg, nents, direction);
1726}
1727
1728/**
1729 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1730 * @dev: The device for which the DMA addresses were created
1731 * @sg: The array of scatter/gather entries
1732 * @nents: The number of scatter/gather entries
1733 * @direction: The direction of the DMA
1734 */
1735static inline void ib_dma_unmap_sg(struct ib_device *dev,
1736				   struct scatterlist *sg, int nents,
1737				   enum dma_data_direction direction)
1738{
1739	if (dev->dma_ops)
1740		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1741	else
1742		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1743}
1744
1745static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1746				      struct scatterlist *sg, int nents,
1747				      enum dma_data_direction direction,
1748				      struct dma_attrs *attrs)
1749{
1750	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1751}
1752
1753static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1754					 struct scatterlist *sg, int nents,
1755					 enum dma_data_direction direction,
1756					 struct dma_attrs *attrs)
1757{
1758	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1759}
1760/**
1761 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1762 * @dev: The device for which the DMA addresses were created
1763 * @sg: The scatter/gather entry
1764 */
1765static inline u64 ib_sg_dma_address(struct ib_device *dev,
1766				    struct scatterlist *sg)
1767{
1768	if (dev->dma_ops)
1769		return dev->dma_ops->dma_address(dev, sg);
1770	return sg_dma_address(sg);
1771}
1772
1773/**
1774 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1775 * @dev: The device for which the DMA addresses were created
1776 * @sg: The scatter/gather entry
1777 */
1778static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1779					 struct scatterlist *sg)
1780{
1781	if (dev->dma_ops)
1782		return dev->dma_ops->dma_len(dev, sg);
1783	return sg_dma_len(sg);
1784}
1785
1786/**
1787 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1788 * @dev: The device for which the DMA address was created
1789 * @addr: The DMA address
1790 * @size: The size of the region in bytes
1791 * @dir: The direction of the DMA
1792 */
1793static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1794					      u64 addr,
1795					      size_t size,
1796					      enum dma_data_direction dir)
1797{
1798	if (dev->dma_ops)
1799		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1800	else
1801		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1802}
1803
1804/**
1805 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1806 * @dev: The device for which the DMA address was created
1807 * @addr: The DMA address
1808 * @size: The size of the region in bytes
1809 * @dir: The direction of the DMA
1810 */
1811static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1812						 u64 addr,
1813						 size_t size,
1814						 enum dma_data_direction dir)
1815{
1816	if (dev->dma_ops)
1817		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1818	else
1819		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1820}
1821
 
 
 
 
 
 
 
 
 
1822/**
1823 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1824 * @dev: The device for which the DMA address is requested
1825 * @size: The size of the region to allocate in bytes
1826 * @dma_handle: A pointer for returning the DMA address of the region
1827 * @flag: memory allocator flags
1828 */
1829static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1830					   size_t size,
1831					   u64 *dma_handle,
1832					   gfp_t flag)
1833{
1834	if (dev->dma_ops)
1835		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1836	else {
1837		dma_addr_t handle;
1838		void *ret;
1839
1840		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1841		*dma_handle = handle;
1842		return ret;
1843	}
1844}
1845
 
 
 
 
 
 
 
1846/**
1847 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1848 * @dev: The device for which the DMA addresses were allocated
1849 * @size: The size of the region
1850 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1851 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1852 */
1853static inline void ib_dma_free_coherent(struct ib_device *dev,
1854					size_t size, void *cpu_addr,
1855					u64 dma_handle)
1856{
1857	if (dev->dma_ops)
1858		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1859	else
1860		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1861}
1862
1863/**
1864 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1865 *   by an HCA.
1866 * @pd: The protection domain associated assigned to the registered region.
1867 * @phys_buf_array: Specifies a list of physical buffers to use in the
1868 *   memory region.
1869 * @num_phys_buf: Specifies the size of the phys_buf_array.
1870 * @mr_access_flags: Specifies the memory access rights.
1871 * @iova_start: The offset of the region's starting I/O virtual address.
1872 */
1873struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1874			     struct ib_phys_buf *phys_buf_array,
1875			     int num_phys_buf,
1876			     int mr_access_flags,
1877			     u64 *iova_start);
1878
1879/**
1880 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1881 *   Conceptually, this call performs the functions deregister memory region
1882 *   followed by register physical memory region.  Where possible,
1883 *   resources are reused instead of deallocated and reallocated.
1884 * @mr: The memory region to modify.
1885 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1886 *   properties of the memory region are being modified.
1887 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1888 *   the new protection domain to associated with the memory region,
1889 *   otherwise, this parameter is ignored.
1890 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1891 *   field specifies a list of physical buffers to use in the new
1892 *   translation, otherwise, this parameter is ignored.
1893 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1894 *   field specifies the size of the phys_buf_array, otherwise, this
1895 *   parameter is ignored.
1896 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1897 *   field specifies the new memory access rights, otherwise, this
1898 *   parameter is ignored.
1899 * @iova_start: The offset of the region's starting I/O virtual address.
1900 */
1901int ib_rereg_phys_mr(struct ib_mr *mr,
1902		     int mr_rereg_mask,
1903		     struct ib_pd *pd,
1904		     struct ib_phys_buf *phys_buf_array,
1905		     int num_phys_buf,
1906		     int mr_access_flags,
1907		     u64 *iova_start);
1908
1909/**
1910 * ib_query_mr - Retrieves information about a specific memory region.
1911 * @mr: The memory region to retrieve information about.
1912 * @mr_attr: The attributes of the specified memory region.
1913 */
1914int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
 
 
 
 
1915
1916/**
1917 * ib_dereg_mr - Deregisters a memory region and removes it from the
1918 *   HCA translation table.
1919 * @mr: The memory region to deregister.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920 */
1921int ib_dereg_mr(struct ib_mr *mr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1922
1923/**
1924 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1925 *   IB_WR_FAST_REG_MR send work request.
1926 * @pd: The protection domain associated with the region.
1927 * @max_page_list_len: requested max physical buffer list length to be
1928 *   used with fast register work requests for this MR.
 
 
 
 
 
1929 */
1930struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
 
1931
1932/**
1933 * ib_alloc_fast_reg_page_list - Allocates a page list array
1934 * @device - ib device pointer.
1935 * @page_list_len - size of the page list array to be allocated.
 
 
 
1936 *
1937 * This allocates and returns a struct ib_fast_reg_page_list * and a
1938 * page_list array that is at least page_list_len in size.  The actual
1939 * size is returned in max_page_list_len.  The caller is responsible
1940 * for initializing the contents of the page_list array before posting
1941 * a send work request with the IB_WC_FAST_REG_MR opcode.
1942 *
1943 * The page_list array entries must be translated using one of the
1944 * ib_dma_*() functions just like the addresses passed to
1945 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
1946 * ib_fast_reg_page_list must not be modified by the caller until the
1947 * IB_WC_FAST_REG_MR work request completes.
1948 */
1949struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1950				struct ib_device *device, int page_list_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951
1952/**
1953 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1954 *   page list array.
1955 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
 
1956 */
1957void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1958
1959/**
1960 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1961 *   R_Key and L_Key.
1962 * @mr - struct ib_mr pointer to be updated.
1963 * @newkey - new key to be used.
 
 
 
1964 */
1965static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1966{
1967	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1968	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1969}
1970
1971/**
1972 * ib_alloc_mw - Allocates a memory window.
1973 * @pd: The protection domain associated with the memory window.
 
1974 */
1975struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
 
 
 
 
1976
1977/**
1978 * ib_bind_mw - Posts a work request to the send queue of the specified
1979 *   QP, which binds the memory window to the given address range and
1980 *   remote access attributes.
1981 * @qp: QP to post the bind work request on.
1982 * @mw: The memory window to bind.
1983 * @mw_bind: Specifies information about the memory window, including
1984 *   its address range, remote access rights, and associated memory region.
 
1985 */
1986static inline int ib_bind_mw(struct ib_qp *qp,
1987			     struct ib_mw *mw,
1988			     struct ib_mw_bind *mw_bind)
1989{
1990	/* XXX reference counting in corresponding MR? */
1991	return mw->device->bind_mw ?
1992		mw->device->bind_mw(qp, mw, mw_bind) :
1993		-ENOSYS;
 
 
1994}
1995
1996/**
1997 * ib_dealloc_mw - Deallocates a memory window.
1998 * @mw: The memory window to deallocate.
 
 
1999 */
2000int ib_dealloc_mw(struct ib_mw *mw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2001
2002/**
2003 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2004 * @pd: The protection domain associated with the unmapped region.
2005 * @mr_access_flags: Specifies the memory access rights.
2006 * @fmr_attr: Attributes of the unmapped region.
 
2007 *
2008 * A fast memory region must be mapped before it can be used as part of
2009 * a work request.
2010 */
2011struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2012			    int mr_access_flags,
2013			    struct ib_fmr_attr *fmr_attr);
 
 
 
 
2014
2015/**
2016 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2017 * @fmr: The fast memory region to associate with the pages.
2018 * @page_list: An array of physical pages to map to the fast memory region.
2019 * @list_len: The number of pages in page_list.
2020 * @iova: The I/O virtual address to use with the mapped region.
2021 */
2022static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2023				  u64 *page_list, int list_len,
2024				  u64 iova)
2025{
2026	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
 
 
 
 
2027}
2028
2029/**
2030 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2031 * @fmr_list: A linked list of fast memory regions to unmap.
 
 
 
 
2032 */
2033int ib_unmap_fmr(struct list_head *fmr_list);
 
 
 
 
 
 
 
 
2034
2035/**
2036 * ib_dealloc_fmr - Deallocates a fast memory region.
2037 * @fmr: The fast memory region to deallocate.
 
 
 
 
2038 */
2039int ib_dealloc_fmr(struct ib_fmr *fmr);
 
 
 
 
 
 
2040
2041/**
2042 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2043 * @qp: QP to attach to the multicast group.  The QP must be type
2044 *   IB_QPT_UD.
2045 * @gid: Multicast group GID.
2046 * @lid: Multicast group LID in host byte order.
2047 *
2048 * In order to send and receive multicast packets, subnet
2049 * administration must have created the multicast group and configured
2050 * the fabric appropriately.  The port associated with the specified
2051 * QP must also be a member of the multicast group.
 
 
 
 
 
 
2052 */
2053int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
 
 
 
 
 
 
 
 
2054
2055/**
2056 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2057 * @qp: QP to detach from the multicast group.
2058 * @gid: Multicast group GID.
2059 * @lid: Multicast group LID in host byte order.
 
 
 
2060 */
2061int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2062
2063#endif /* IB_VERBS_H */
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2/*
   3 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   4 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   5 * Copyright (c) 2004, 2020 Intel Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   7 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   9 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12#ifndef IB_VERBS_H
  13#define IB_VERBS_H
  14
  15#include <linux/ethtool.h>
  16#include <linux/types.h>
  17#include <linux/device.h>
 
  18#include <linux/dma-mapping.h>
  19#include <linux/kref.h>
  20#include <linux/list.h>
  21#include <linux/rwsem.h>
 
  22#include <linux/workqueue.h>
  23#include <linux/irq_poll.h>
  24#include <uapi/linux/if_ether.h>
  25#include <net/ipv6.h>
  26#include <net/ip.h>
  27#include <linux/string.h>
  28#include <linux/slab.h>
  29#include <linux/netdevice.h>
  30#include <linux/refcount.h>
  31#include <linux/if_link.h>
  32#include <linux/atomic.h>
  33#include <linux/mmu_notifier.h>
  34#include <linux/uaccess.h>
  35#include <linux/cgroup_rdma.h>
  36#include <linux/irqflags.h>
  37#include <linux/preempt.h>
  38#include <linux/dim.h>
  39#include <uapi/rdma/ib_user_verbs.h>
  40#include <rdma/rdma_counter.h>
  41#include <rdma/restrack.h>
  42#include <rdma/signature.h>
  43#include <uapi/rdma/rdma_user_ioctl.h>
  44#include <uapi/rdma/ib_user_ioctl_verbs.h>
  45
  46#define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
  47
  48struct ib_umem_odp;
  49struct ib_uqp_object;
  50struct ib_usrq_object;
  51struct ib_uwq_object;
  52struct rdma_cm_id;
  53struct ib_port;
  54struct hw_stats_device_data;
  55
  56extern struct workqueue_struct *ib_wq;
  57extern struct workqueue_struct *ib_comp_wq;
  58extern struct workqueue_struct *ib_comp_unbound_wq;
  59
  60struct ib_ucq_object;
  61
  62__printf(3, 4) __cold
  63void ibdev_printk(const char *level, const struct ib_device *ibdev,
  64		  const char *format, ...);
  65__printf(2, 3) __cold
  66void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
  67__printf(2, 3) __cold
  68void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
  69__printf(2, 3) __cold
  70void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
  71__printf(2, 3) __cold
  72void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
  73__printf(2, 3) __cold
  74void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
  75__printf(2, 3) __cold
  76void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
  77__printf(2, 3) __cold
  78void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
  79
  80#if defined(CONFIG_DYNAMIC_DEBUG) || \
  81	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
  82#define ibdev_dbg(__dev, format, args...)                       \
  83	dynamic_ibdev_dbg(__dev, format, ##args)
  84#else
  85__printf(2, 3) __cold
  86static inline
  87void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
  88#endif
  89
  90#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
  91do {                                                                    \
  92	static DEFINE_RATELIMIT_STATE(_rs,                              \
  93				      DEFAULT_RATELIMIT_INTERVAL,       \
  94				      DEFAULT_RATELIMIT_BURST);         \
  95	if (__ratelimit(&_rs))                                          \
  96		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
  97} while (0)
  98
  99#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
 100	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
 101#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
 102	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
 103#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
 104	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
 105#define ibdev_err_ratelimited(ibdev, fmt, ...) \
 106	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
 107#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
 108	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
 109#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
 110	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
 111#define ibdev_info_ratelimited(ibdev, fmt, ...) \
 112	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
 113
 114#if defined(CONFIG_DYNAMIC_DEBUG) || \
 115	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
 116/* descriptor check is first to prevent flooding with "callbacks suppressed" */
 117#define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
 118do {                                                                    \
 119	static DEFINE_RATELIMIT_STATE(_rs,                              \
 120				      DEFAULT_RATELIMIT_INTERVAL,       \
 121				      DEFAULT_RATELIMIT_BURST);         \
 122	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
 123	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
 124		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
 125				    ##__VA_ARGS__);                     \
 126} while (0)
 127#else
 128__printf(2, 3) __cold
 129static inline
 130void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
 131#endif
 132
 133union ib_gid {
 134	u8	raw[16];
 135	struct {
 136		__be64	subnet_prefix;
 137		__be64	interface_id;
 138	} global;
 139};
 140
 141extern union ib_gid zgid;
 142
 143enum ib_gid_type {
 144	IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
 145	IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
 146	IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
 147	IB_GID_TYPE_SIZE
 148};
 149
 150#define ROCE_V2_UDP_DPORT      4791
 151struct ib_gid_attr {
 152	struct net_device __rcu	*ndev;
 153	struct ib_device	*device;
 154	union ib_gid		gid;
 155	enum ib_gid_type	gid_type;
 156	u16			index;
 157	u32			port_num;
 158};
 159
 160enum {
 161	/* set the local administered indication */
 162	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
 163};
 164
 165enum rdma_transport_type {
 166	RDMA_TRANSPORT_IB,
 167	RDMA_TRANSPORT_IWARP,
 168	RDMA_TRANSPORT_USNIC,
 169	RDMA_TRANSPORT_USNIC_UDP,
 170	RDMA_TRANSPORT_UNSPECIFIED,
 171};
 172
 173enum rdma_protocol_type {
 174	RDMA_PROTOCOL_IB,
 175	RDMA_PROTOCOL_IBOE,
 176	RDMA_PROTOCOL_IWARP,
 177	RDMA_PROTOCOL_USNIC_UDP
 178};
 179
 180__attribute_const__ enum rdma_transport_type
 181rdma_node_get_transport(unsigned int node_type);
 182
 183enum rdma_network_type {
 184	RDMA_NETWORK_IB,
 185	RDMA_NETWORK_ROCE_V1,
 186	RDMA_NETWORK_IPV4,
 187	RDMA_NETWORK_IPV6
 188};
 189
 190static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 191{
 192	if (network_type == RDMA_NETWORK_IPV4 ||
 193	    network_type == RDMA_NETWORK_IPV6)
 194		return IB_GID_TYPE_ROCE_UDP_ENCAP;
 195	else if (network_type == RDMA_NETWORK_ROCE_V1)
 196		return IB_GID_TYPE_ROCE;
 197	else
 198		return IB_GID_TYPE_IB;
 199}
 200
 201static inline enum rdma_network_type
 202rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
 203{
 204	if (attr->gid_type == IB_GID_TYPE_IB)
 205		return RDMA_NETWORK_IB;
 206
 207	if (attr->gid_type == IB_GID_TYPE_ROCE)
 208		return RDMA_NETWORK_ROCE_V1;
 209
 210	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
 211		return RDMA_NETWORK_IPV4;
 212	else
 213		return RDMA_NETWORK_IPV6;
 214}
 215
 216enum rdma_link_layer {
 217	IB_LINK_LAYER_UNSPECIFIED,
 218	IB_LINK_LAYER_INFINIBAND,
 219	IB_LINK_LAYER_ETHERNET,
 220};
 221
 222enum ib_device_cap_flags {
 223	IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
 224	IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
 225	IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
 226	IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
 227	IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
 228	IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
 229	IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
 230	IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
 231	IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
 232	/* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
 233	IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
 234	IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
 235	IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
 236	IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
 237	IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
 238
 239	/* Reserved, old SEND_W_INV = 1 << 16,*/
 240	IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
 241	/*
 242	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 243	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
 244	 * messages and can verify the validity of checksum for
 245	 * incoming messages.  Setting this flag implies that the
 246	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 247	 */
 248	IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
 249	IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
 250
 251	/*
 252	 * This device supports the IB "base memory management extension",
 253	 * which includes support for fast registrations (IB_WR_REG_MR,
 254	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 255	 * also be set by any iWarp device which must support FRs to comply
 256	 * to the iWarp verbs spec.  iWarp devices also support the
 257	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 258	 * stag.
 259	 */
 260	IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
 261	IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
 262	IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
 263	IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
 264	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 265	IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
 266	IB_DEVICE_MANAGED_FLOW_STEERING =
 267		IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
 268	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 269	IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
 270	/* The device supports padding incoming writes to cacheline. */
 271	IB_DEVICE_PCI_WRITE_END_PADDING =
 272		IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
 273	/* Placement type attributes */
 274	IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
 275	IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
 276	IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
 277};
 278
 279enum ib_kernel_cap_flags {
 280	/*
 281	 * This device supports a per-device lkey or stag that can be
 282	 * used without performing a memory registration for the local
 283	 * memory.  Note that ULPs should never check this flag, but
 284	 * instead of use the local_dma_lkey flag in the ib_pd structure,
 285	 * which will always contain a usable lkey.
 286	 */
 287	IBK_LOCAL_DMA_LKEY = 1 << 0,
 288	/* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
 289	IBK_INTEGRITY_HANDOVER = 1 << 1,
 290	/* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
 291	IBK_ON_DEMAND_PAGING = 1 << 2,
 292	/* IB_MR_TYPE_SG_GAPS is supported */
 293	IBK_SG_GAPS_REG = 1 << 3,
 294	/* Driver supports RDMA_NLDEV_CMD_DELLINK */
 295	IBK_ALLOW_USER_UNREG = 1 << 4,
 296
 297	/* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
 298	IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
 299	/* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
 300	IBK_UD_TSO = 1 << 6,
 301	/* iopib will use the device ops:
 302	 *   get_vf_config
 303	 *   get_vf_guid
 304	 *   get_vf_stats
 305	 *   set_vf_guid
 306	 *   set_vf_link_state
 307	 */
 308	IBK_VIRTUAL_FUNCTION = 1 << 7,
 309	/* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
 310	IBK_RDMA_NETDEV_OPA = 1 << 8,
 311};
 312
 313enum ib_atomic_cap {
 314	IB_ATOMIC_NONE,
 315	IB_ATOMIC_HCA,
 316	IB_ATOMIC_GLOB
 317};
 318
 319enum ib_odp_general_cap_bits {
 320	IB_ODP_SUPPORT		= 1 << 0,
 321	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 322};
 323
 324enum ib_odp_transport_cap_bits {
 325	IB_ODP_SUPPORT_SEND	= 1 << 0,
 326	IB_ODP_SUPPORT_RECV	= 1 << 1,
 327	IB_ODP_SUPPORT_WRITE	= 1 << 2,
 328	IB_ODP_SUPPORT_READ	= 1 << 3,
 329	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
 330	IB_ODP_SUPPORT_SRQ_RECV	= 1 << 5,
 331};
 332
 333struct ib_odp_caps {
 334	uint64_t general_caps;
 335	struct {
 336		uint32_t  rc_odp_caps;
 337		uint32_t  uc_odp_caps;
 338		uint32_t  ud_odp_caps;
 339		uint32_t  xrc_odp_caps;
 340	} per_transport_caps;
 341};
 342
 343struct ib_rss_caps {
 344	/* Corresponding bit will be set if qp type from
 345	 * 'enum ib_qp_type' is supported, e.g.
 346	 * supported_qpts |= 1 << IB_QPT_UD
 347	 */
 348	u32 supported_qpts;
 349	u32 max_rwq_indirection_tables;
 350	u32 max_rwq_indirection_table_size;
 351};
 352
 353enum ib_tm_cap_flags {
 354	/*  Support tag matching with rendezvous offload for RC transport */
 355	IB_TM_CAP_RNDV_RC = 1 << 0,
 356};
 357
 358struct ib_tm_caps {
 359	/* Max size of RNDV header */
 360	u32 max_rndv_hdr_size;
 361	/* Max number of entries in tag matching list */
 362	u32 max_num_tags;
 363	/* From enum ib_tm_cap_flags */
 364	u32 flags;
 365	/* Max number of outstanding list operations */
 366	u32 max_ops;
 367	/* Max number of SGE in tag matching entry */
 368	u32 max_sge;
 369};
 370
 371struct ib_cq_init_attr {
 372	unsigned int	cqe;
 373	u32		comp_vector;
 374	u32		flags;
 375};
 376
 377enum ib_cq_attr_mask {
 378	IB_CQ_MODERATE = 1 << 0,
 379};
 380
 381struct ib_cq_caps {
 382	u16     max_cq_moderation_count;
 383	u16     max_cq_moderation_period;
 384};
 385
 386struct ib_dm_mr_attr {
 387	u64		length;
 388	u64		offset;
 389	u32		access_flags;
 390};
 391
 392struct ib_dm_alloc_attr {
 393	u64	length;
 394	u32	alignment;
 395	u32	flags;
 396};
 397
 398struct ib_device_attr {
 399	u64			fw_ver;
 400	__be64			sys_image_guid;
 401	u64			max_mr_size;
 402	u64			page_size_cap;
 403	u32			vendor_id;
 404	u32			vendor_part_id;
 405	u32			hw_ver;
 406	int			max_qp;
 407	int			max_qp_wr;
 408	u64			device_cap_flags;
 409	u64			kernel_cap_flags;
 410	int			max_send_sge;
 411	int			max_recv_sge;
 412	int			max_sge_rd;
 413	int			max_cq;
 414	int			max_cqe;
 415	int			max_mr;
 416	int			max_pd;
 417	int			max_qp_rd_atom;
 418	int			max_ee_rd_atom;
 419	int			max_res_rd_atom;
 420	int			max_qp_init_rd_atom;
 421	int			max_ee_init_rd_atom;
 422	enum ib_atomic_cap	atomic_cap;
 423	enum ib_atomic_cap	masked_atomic_cap;
 424	int			max_ee;
 425	int			max_rdd;
 426	int			max_mw;
 427	int			max_raw_ipv6_qp;
 428	int			max_raw_ethy_qp;
 429	int			max_mcast_grp;
 430	int			max_mcast_qp_attach;
 431	int			max_total_mcast_qp_attach;
 432	int			max_ah;
 
 
 433	int			max_srq;
 434	int			max_srq_wr;
 435	int			max_srq_sge;
 436	unsigned int		max_fast_reg_page_list_len;
 437	unsigned int		max_pi_fast_reg_page_list_len;
 438	u16			max_pkeys;
 439	u8			local_ca_ack_delay;
 440	int			sig_prot_cap;
 441	int			sig_guard_cap;
 442	struct ib_odp_caps	odp_caps;
 443	uint64_t		timestamp_mask;
 444	uint64_t		hca_core_clock; /* in KHZ */
 445	struct ib_rss_caps	rss_caps;
 446	u32			max_wq_type_rq;
 447	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
 448	struct ib_tm_caps	tm_caps;
 449	struct ib_cq_caps       cq_caps;
 450	u64			max_dm_size;
 451	/* Max entries for sgl for optimized performance per READ */
 452	u32			max_sgl_rd;
 453};
 454
 455enum ib_mtu {
 456	IB_MTU_256  = 1,
 457	IB_MTU_512  = 2,
 458	IB_MTU_1024 = 3,
 459	IB_MTU_2048 = 4,
 460	IB_MTU_4096 = 5
 461};
 462
 463enum opa_mtu {
 464	OPA_MTU_8192 = 6,
 465	OPA_MTU_10240 = 7
 466};
 467
 468static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 469{
 470	switch (mtu) {
 471	case IB_MTU_256:  return  256;
 472	case IB_MTU_512:  return  512;
 473	case IB_MTU_1024: return 1024;
 474	case IB_MTU_2048: return 2048;
 475	case IB_MTU_4096: return 4096;
 476	default: 	  return -1;
 477	}
 478}
 479
 480static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 481{
 482	if (mtu >= 4096)
 483		return IB_MTU_4096;
 484	else if (mtu >= 2048)
 485		return IB_MTU_2048;
 486	else if (mtu >= 1024)
 487		return IB_MTU_1024;
 488	else if (mtu >= 512)
 489		return IB_MTU_512;
 490	else
 491		return IB_MTU_256;
 492}
 493
 494static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
 495{
 496	switch (mtu) {
 497	case OPA_MTU_8192:
 498		return 8192;
 499	case OPA_MTU_10240:
 500		return 10240;
 501	default:
 502		return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
 503	}
 504}
 505
 506static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
 507{
 508	if (mtu >= 10240)
 509		return OPA_MTU_10240;
 510	else if (mtu >= 8192)
 511		return OPA_MTU_8192;
 512	else
 513		return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
 514}
 515
 516enum ib_port_state {
 517	IB_PORT_NOP		= 0,
 518	IB_PORT_DOWN		= 1,
 519	IB_PORT_INIT		= 2,
 520	IB_PORT_ARMED		= 3,
 521	IB_PORT_ACTIVE		= 4,
 522	IB_PORT_ACTIVE_DEFER	= 5
 523};
 524
 525enum ib_port_phys_state {
 526	IB_PORT_PHYS_STATE_SLEEP = 1,
 527	IB_PORT_PHYS_STATE_POLLING = 2,
 528	IB_PORT_PHYS_STATE_DISABLED = 3,
 529	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
 530	IB_PORT_PHYS_STATE_LINK_UP = 5,
 531	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
 532	IB_PORT_PHYS_STATE_PHY_TEST = 7,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533};
 534
 535enum ib_port_width {
 536	IB_WIDTH_1X	= 1,
 537	IB_WIDTH_2X	= 16,
 538	IB_WIDTH_4X	= 2,
 539	IB_WIDTH_8X	= 4,
 540	IB_WIDTH_12X	= 8
 541};
 542
 543static inline int ib_width_enum_to_int(enum ib_port_width width)
 544{
 545	switch (width) {
 546	case IB_WIDTH_1X:  return  1;
 547	case IB_WIDTH_2X:  return  2;
 548	case IB_WIDTH_4X:  return  4;
 549	case IB_WIDTH_8X:  return  8;
 550	case IB_WIDTH_12X: return 12;
 551	default: 	  return -1;
 552	}
 553}
 554
 555enum ib_port_speed {
 556	IB_SPEED_SDR	= 1,
 557	IB_SPEED_DDR	= 2,
 558	IB_SPEED_QDR	= 4,
 559	IB_SPEED_FDR10	= 8,
 560	IB_SPEED_FDR	= 16,
 561	IB_SPEED_EDR	= 32,
 562	IB_SPEED_HDR	= 64,
 563	IB_SPEED_NDR	= 128,
 564	IB_SPEED_XDR	= 256,
 565};
 566
 567enum ib_stat_flag {
 568	IB_STAT_FLAG_OPTIONAL = 1 << 0,
 569};
 570
 571/**
 572 * struct rdma_stat_desc
 573 * @name - The name of the counter
 574 * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
 575 * @priv - Driver private information; Core code should not use
 576 */
 577struct rdma_stat_desc {
 578	const char *name;
 579	unsigned int flags;
 580	const void *priv;
 581};
 582
 583/**
 584 * struct rdma_hw_stats
 585 * @lock - Mutex to protect parallel write access to lifespan and values
 586 *    of counters, which are 64bits and not guaranteed to be written
 587 *    atomicaly on 32bits systems.
 588 * @timestamp - Used by the core code to track when the last update was
 589 * @lifespan - Used by the core code to determine how old the counters
 590 *   should be before being updated again.  Stored in jiffies, defaults
 591 *   to 10 milliseconds, drivers can override the default be specifying
 592 *   their own value during their allocation routine.
 593 * @descs - Array of pointers to static descriptors used for the counters
 594 *   in directory.
 595 * @is_disabled - A bitmap to indicate each counter is currently disabled
 596 *   or not.
 597 * @num_counters - How many hardware counters there are.  If name is
 598 *   shorter than this number, a kernel oops will result.  Driver authors
 599 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 600 *   in their code to prevent this.
 601 * @value - Array of u64 counters that are accessed by the sysfs code and
 602 *   filled in by the drivers get_stats routine
 603 */
 604struct rdma_hw_stats {
 605	struct mutex	lock; /* Protect lifespan and values[] */
 606	unsigned long	timestamp;
 607	unsigned long	lifespan;
 608	const struct rdma_stat_desc *descs;
 609	unsigned long	*is_disabled;
 610	int		num_counters;
 611	u64		value[] __counted_by(num_counters);
 612};
 613
 614#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 615
 616struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 617	const struct rdma_stat_desc *descs, int num_counters,
 618	unsigned long lifespan);
 619
 620void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
 621
 622/* Define bits for the various functionality this port needs to be supported by
 623 * the core.
 624 */
 625/* Management                           0x00000FFF */
 626#define RDMA_CORE_CAP_IB_MAD            0x00000001
 627#define RDMA_CORE_CAP_IB_SMI            0x00000002
 628#define RDMA_CORE_CAP_IB_CM             0x00000004
 629#define RDMA_CORE_CAP_IW_CM             0x00000008
 630#define RDMA_CORE_CAP_IB_SA             0x00000010
 631#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 632
 633/* Address format                       0x000FF000 */
 634#define RDMA_CORE_CAP_AF_IB             0x00001000
 635#define RDMA_CORE_CAP_ETH_AH            0x00002000
 636#define RDMA_CORE_CAP_OPA_AH            0x00004000
 637#define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
 638
 639/* Protocol                             0xFFF00000 */
 640#define RDMA_CORE_CAP_PROT_IB           0x00100000
 641#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 642#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 643#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 644#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 645#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 646
 647#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
 648					| RDMA_CORE_CAP_PROT_ROCE     \
 649					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
 650
 651#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 652					| RDMA_CORE_CAP_IB_MAD \
 653					| RDMA_CORE_CAP_IB_SMI \
 654					| RDMA_CORE_CAP_IB_CM  \
 655					| RDMA_CORE_CAP_IB_SA  \
 656					| RDMA_CORE_CAP_AF_IB)
 657#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 658					| RDMA_CORE_CAP_IB_MAD  \
 659					| RDMA_CORE_CAP_IB_CM   \
 660					| RDMA_CORE_CAP_AF_IB   \
 661					| RDMA_CORE_CAP_ETH_AH)
 662#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
 663					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 664					| RDMA_CORE_CAP_IB_MAD  \
 665					| RDMA_CORE_CAP_IB_CM   \
 666					| RDMA_CORE_CAP_AF_IB   \
 667					| RDMA_CORE_CAP_ETH_AH)
 668#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 669					| RDMA_CORE_CAP_IW_CM)
 670#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 671					| RDMA_CORE_CAP_OPA_MAD)
 672
 673#define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
 674
 675#define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 676
 677struct ib_port_attr {
 678	u64			subnet_prefix;
 679	enum ib_port_state	state;
 680	enum ib_mtu		max_mtu;
 681	enum ib_mtu		active_mtu;
 682	u32                     phys_mtu;
 683	int			gid_tbl_len;
 684	unsigned int		ip_gids:1;
 685	/* This is the value from PortInfo CapabilityMask, defined by IBA */
 686	u32			port_cap_flags;
 687	u32			max_msg_sz;
 688	u32			bad_pkey_cntr;
 689	u32			qkey_viol_cntr;
 690	u16			pkey_tbl_len;
 691	u32			sm_lid;
 692	u32			lid;
 693	u8			lmc;
 694	u8			max_vl_num;
 695	u8			sm_sl;
 696	u8			subnet_timeout;
 697	u8			init_type_reply;
 698	u8			active_width;
 699	u16			active_speed;
 700	u8                      phys_state;
 701	u16			port_cap_flags2;
 702};
 703
 704enum ib_device_modify_flags {
 705	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
 706	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
 707};
 708
 709#define IB_DEVICE_NODE_DESC_MAX 64
 710
 711struct ib_device_modify {
 712	u64	sys_image_guid;
 713	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
 714};
 715
 716enum ib_port_modify_flags {
 717	IB_PORT_SHUTDOWN		= 1,
 718	IB_PORT_INIT_TYPE		= (1<<2),
 719	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
 720	IB_PORT_OPA_MASK_CHG		= (1<<4)
 721};
 722
 723struct ib_port_modify {
 724	u32	set_port_cap_mask;
 725	u32	clr_port_cap_mask;
 726	u8	init_type;
 727};
 728
 729enum ib_event_type {
 730	IB_EVENT_CQ_ERR,
 731	IB_EVENT_QP_FATAL,
 732	IB_EVENT_QP_REQ_ERR,
 733	IB_EVENT_QP_ACCESS_ERR,
 734	IB_EVENT_COMM_EST,
 735	IB_EVENT_SQ_DRAINED,
 736	IB_EVENT_PATH_MIG,
 737	IB_EVENT_PATH_MIG_ERR,
 738	IB_EVENT_DEVICE_FATAL,
 739	IB_EVENT_PORT_ACTIVE,
 740	IB_EVENT_PORT_ERR,
 741	IB_EVENT_LID_CHANGE,
 742	IB_EVENT_PKEY_CHANGE,
 743	IB_EVENT_SM_CHANGE,
 744	IB_EVENT_SRQ_ERR,
 745	IB_EVENT_SRQ_LIMIT_REACHED,
 746	IB_EVENT_QP_LAST_WQE_REACHED,
 747	IB_EVENT_CLIENT_REREGISTER,
 748	IB_EVENT_GID_CHANGE,
 749	IB_EVENT_WQ_FATAL,
 750};
 751
 752const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 753
 754struct ib_event {
 755	struct ib_device	*device;
 756	union {
 757		struct ib_cq	*cq;
 758		struct ib_qp	*qp;
 759		struct ib_srq	*srq;
 760		struct ib_wq	*wq;
 761		u32		port_num;
 762	} element;
 763	enum ib_event_type	event;
 764};
 765
 766struct ib_event_handler {
 767	struct ib_device *device;
 768	void            (*handler)(struct ib_event_handler *, struct ib_event *);
 769	struct list_head  list;
 770};
 771
 772#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
 773	do {							\
 774		(_ptr)->device  = _device;			\
 775		(_ptr)->handler = _handler;			\
 776		INIT_LIST_HEAD(&(_ptr)->list);			\
 777	} while (0)
 778
 779struct ib_global_route {
 780	const struct ib_gid_attr *sgid_attr;
 781	union ib_gid	dgid;
 782	u32		flow_label;
 783	u8		sgid_index;
 784	u8		hop_limit;
 785	u8		traffic_class;
 786};
 787
 788struct ib_grh {
 789	__be32		version_tclass_flow;
 790	__be16		paylen;
 791	u8		next_hdr;
 792	u8		hop_limit;
 793	union ib_gid	sgid;
 794	union ib_gid	dgid;
 795};
 796
 797union rdma_network_hdr {
 798	struct ib_grh ibgrh;
 799	struct {
 800		/* The IB spec states that if it's IPv4, the header
 801		 * is located in the last 20 bytes of the header.
 802		 */
 803		u8		reserved[20];
 804		struct iphdr	roce4grh;
 805	};
 806};
 807
 808#define IB_QPN_MASK		0xFFFFFF
 809
 810enum {
 811	IB_MULTICAST_QPN = 0xffffff
 812};
 813
 814#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
 815#define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
 816
 817enum ib_ah_flags {
 818	IB_AH_GRH	= 1
 819};
 820
 821enum ib_rate {
 822	IB_RATE_PORT_CURRENT = 0,
 823	IB_RATE_2_5_GBPS = 2,
 824	IB_RATE_5_GBPS   = 5,
 825	IB_RATE_10_GBPS  = 3,
 826	IB_RATE_20_GBPS  = 6,
 827	IB_RATE_30_GBPS  = 4,
 828	IB_RATE_40_GBPS  = 7,
 829	IB_RATE_60_GBPS  = 8,
 830	IB_RATE_80_GBPS  = 9,
 831	IB_RATE_120_GBPS = 10,
 832	IB_RATE_14_GBPS  = 11,
 833	IB_RATE_56_GBPS  = 12,
 834	IB_RATE_112_GBPS = 13,
 835	IB_RATE_168_GBPS = 14,
 836	IB_RATE_25_GBPS  = 15,
 837	IB_RATE_100_GBPS = 16,
 838	IB_RATE_200_GBPS = 17,
 839	IB_RATE_300_GBPS = 18,
 840	IB_RATE_28_GBPS  = 19,
 841	IB_RATE_50_GBPS  = 20,
 842	IB_RATE_400_GBPS = 21,
 843	IB_RATE_600_GBPS = 22,
 844	IB_RATE_800_GBPS = 23,
 845};
 846
 847/**
 848 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 849 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 850 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 851 * @rate: rate to convert.
 852 */
 853__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 854
 855/**
 856 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 857 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 858 * @rate: rate to convert.
 859 */
 860__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 861
 862
 863/**
 864 * enum ib_mr_type - memory region type
 865 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 866 *                            normal registration
 867 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 868 *                            register any arbitrary sg lists (without
 869 *                            the normal mr constraints - see
 870 *                            ib_map_mr_sg)
 871 * @IB_MR_TYPE_DM:            memory region that is used for device
 872 *                            memory registration
 873 * @IB_MR_TYPE_USER:          memory region that is used for the user-space
 874 *                            application
 875 * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
 876 *                            without address translations (VA=PA)
 877 * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
 878 *                            data integrity operations
 879 */
 880enum ib_mr_type {
 881	IB_MR_TYPE_MEM_REG,
 882	IB_MR_TYPE_SG_GAPS,
 883	IB_MR_TYPE_DM,
 884	IB_MR_TYPE_USER,
 885	IB_MR_TYPE_DMA,
 886	IB_MR_TYPE_INTEGRITY,
 887};
 888
 889enum ib_mr_status_check {
 890	IB_MR_CHECK_SIG_STATUS = 1,
 891};
 892
 893/**
 894 * struct ib_mr_status - Memory region status container
 895 *
 896 * @fail_status: Bitmask of MR checks status. For each
 897 *     failed check a corresponding status bit is set.
 898 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 899 *     failure.
 900 */
 901struct ib_mr_status {
 902	u32		    fail_status;
 903	struct ib_sig_err   sig_err;
 904};
 905
 906/**
 907 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 908 * enum.
 909 * @mult: multiple to convert.
 910 */
 911__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 912
 913struct rdma_ah_init_attr {
 914	struct rdma_ah_attr *ah_attr;
 915	u32 flags;
 916	struct net_device *xmit_slave;
 917};
 918
 919enum rdma_ah_attr_type {
 920	RDMA_AH_ATTR_TYPE_UNDEFINED,
 921	RDMA_AH_ATTR_TYPE_IB,
 922	RDMA_AH_ATTR_TYPE_ROCE,
 923	RDMA_AH_ATTR_TYPE_OPA,
 924};
 925
 926struct ib_ah_attr {
 
 927	u16			dlid;
 
 928	u8			src_path_bits;
 929};
 930
 931struct roce_ah_attr {
 932	u8			dmac[ETH_ALEN];
 933};
 934
 935struct opa_ah_attr {
 936	u32			dlid;
 937	u8			src_path_bits;
 938	bool			make_grd;
 939};
 940
 941struct rdma_ah_attr {
 942	struct ib_global_route	grh;
 943	u8			sl;
 944	u8			static_rate;
 945	u32			port_num;
 946	u8			ah_flags;
 947	enum rdma_ah_attr_type type;
 948	union {
 949		struct ib_ah_attr ib;
 950		struct roce_ah_attr roce;
 951		struct opa_ah_attr opa;
 952	};
 953};
 954
 955enum ib_wc_status {
 956	IB_WC_SUCCESS,
 957	IB_WC_LOC_LEN_ERR,
 958	IB_WC_LOC_QP_OP_ERR,
 959	IB_WC_LOC_EEC_OP_ERR,
 960	IB_WC_LOC_PROT_ERR,
 961	IB_WC_WR_FLUSH_ERR,
 962	IB_WC_MW_BIND_ERR,
 963	IB_WC_BAD_RESP_ERR,
 964	IB_WC_LOC_ACCESS_ERR,
 965	IB_WC_REM_INV_REQ_ERR,
 966	IB_WC_REM_ACCESS_ERR,
 967	IB_WC_REM_OP_ERR,
 968	IB_WC_RETRY_EXC_ERR,
 969	IB_WC_RNR_RETRY_EXC_ERR,
 970	IB_WC_LOC_RDD_VIOL_ERR,
 971	IB_WC_REM_INV_RD_REQ_ERR,
 972	IB_WC_REM_ABORT_ERR,
 973	IB_WC_INV_EECN_ERR,
 974	IB_WC_INV_EEC_STATE_ERR,
 975	IB_WC_FATAL_ERR,
 976	IB_WC_RESP_TIMEOUT_ERR,
 977	IB_WC_GENERAL_ERR
 978};
 979
 980const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 981
 982enum ib_wc_opcode {
 983	IB_WC_SEND = IB_UVERBS_WC_SEND,
 984	IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
 985	IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
 986	IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
 987	IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
 988	IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
 989	IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
 990	IB_WC_LSO = IB_UVERBS_WC_TSO,
 991	IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
 992	IB_WC_REG_MR,
 993	IB_WC_MASKED_COMP_SWAP,
 994	IB_WC_MASKED_FETCH_ADD,
 995	IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
 996/*
 997 * Set value of IB_WC_RECV so consumers can test if a completion is a
 998 * receive by testing (opcode & IB_WC_RECV).
 999 */
1000	IB_WC_RECV			= 1 << 7,
1001	IB_WC_RECV_RDMA_WITH_IMM
1002};
1003
1004enum ib_wc_flags {
1005	IB_WC_GRH		= 1,
1006	IB_WC_WITH_IMM		= (1<<1),
1007	IB_WC_WITH_INVALIDATE	= (1<<2),
1008	IB_WC_IP_CSUM_OK	= (1<<3),
1009	IB_WC_WITH_SMAC		= (1<<4),
1010	IB_WC_WITH_VLAN		= (1<<5),
1011	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
1012};
1013
1014struct ib_wc {
1015	union {
1016		u64		wr_id;
1017		struct ib_cqe	*wr_cqe;
1018	};
1019	enum ib_wc_status	status;
1020	enum ib_wc_opcode	opcode;
1021	u32			vendor_err;
1022	u32			byte_len;
1023	struct ib_qp	       *qp;
1024	union {
1025		__be32		imm_data;
1026		u32		invalidate_rkey;
1027	} ex;
1028	u32			src_qp;
1029	u32			slid;
1030	int			wc_flags;
1031	u16			pkey_index;
 
1032	u8			sl;
1033	u8			dlid_path_bits;
1034	u32 port_num; /* valid only for DR SMPs on switches */
1035	u8			smac[ETH_ALEN];
1036	u16			vlan_id;
1037	u8			network_hdr_type;
1038};
1039
1040enum ib_cq_notify_flags {
1041	IB_CQ_SOLICITED			= 1 << 0,
1042	IB_CQ_NEXT_COMP			= 1 << 1,
1043	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1044	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1045};
1046
1047enum ib_srq_type {
1048	IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1049	IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1050	IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1051};
1052
1053static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1054{
1055	return srq_type == IB_SRQT_XRC ||
1056	       srq_type == IB_SRQT_TM;
1057}
1058
1059enum ib_srq_attr_mask {
1060	IB_SRQ_MAX_WR	= 1 << 0,
1061	IB_SRQ_LIMIT	= 1 << 1,
1062};
1063
1064struct ib_srq_attr {
1065	u32	max_wr;
1066	u32	max_sge;
1067	u32	srq_limit;
1068};
1069
1070struct ib_srq_init_attr {
1071	void		      (*event_handler)(struct ib_event *, void *);
1072	void		       *srq_context;
1073	struct ib_srq_attr	attr;
1074	enum ib_srq_type	srq_type;
1075
1076	struct {
1077		struct ib_cq   *cq;
1078		union {
1079			struct {
1080				struct ib_xrcd *xrcd;
1081			} xrc;
1082
1083			struct {
1084				u32		max_num_tags;
1085			} tag_matching;
1086		};
1087	} ext;
1088};
1089
1090struct ib_qp_cap {
1091	u32	max_send_wr;
1092	u32	max_recv_wr;
1093	u32	max_send_sge;
1094	u32	max_recv_sge;
1095	u32	max_inline_data;
1096
1097	/*
1098	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1099	 * ib_create_qp() will calculate the right amount of needed WRs
1100	 * and MRs based on this.
1101	 */
1102	u32	max_rdma_ctxs;
1103};
1104
1105enum ib_sig_type {
1106	IB_SIGNAL_ALL_WR,
1107	IB_SIGNAL_REQ_WR
1108};
1109
1110enum ib_qp_type {
1111	/*
1112	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1113	 * here (and in that order) since the MAD layer uses them as
1114	 * indices into a 2-entry table.
1115	 */
1116	IB_QPT_SMI,
1117	IB_QPT_GSI,
1118
1119	IB_QPT_RC = IB_UVERBS_QPT_RC,
1120	IB_QPT_UC = IB_UVERBS_QPT_UC,
1121	IB_QPT_UD = IB_UVERBS_QPT_UD,
1122	IB_QPT_RAW_IPV6,
1123	IB_QPT_RAW_ETHERTYPE,
1124	IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1125	IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1126	IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1127	IB_QPT_MAX,
1128	IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1129	/* Reserve a range for qp types internal to the low level driver.
1130	 * These qp types will not be visible at the IB core layer, so the
1131	 * IB_QPT_MAX usages should not be affected in the core layer
1132	 */
1133	IB_QPT_RESERVED1 = 0x1000,
1134	IB_QPT_RESERVED2,
1135	IB_QPT_RESERVED3,
1136	IB_QPT_RESERVED4,
1137	IB_QPT_RESERVED5,
1138	IB_QPT_RESERVED6,
1139	IB_QPT_RESERVED7,
1140	IB_QPT_RESERVED8,
1141	IB_QPT_RESERVED9,
1142	IB_QPT_RESERVED10,
1143};
1144
1145enum ib_qp_create_flags {
1146	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1147	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	=
1148		IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1149	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1150	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1151	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1152	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1153	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
1154	IB_QP_CREATE_NETDEV_USE			= 1 << 7,
1155	IB_QP_CREATE_SCATTER_FCS		=
1156		IB_UVERBS_QP_CREATE_SCATTER_FCS,
1157	IB_QP_CREATE_CVLAN_STRIPPING		=
1158		IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1159	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1160	IB_QP_CREATE_PCI_WRITE_END_PADDING	=
1161		IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1162	/* reserve bits 26-31 for low level drivers' internal use */
1163	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1164	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1165};
1166
1167/*
1168 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1169 * callback to destroy the passed in QP.
1170 */
1171
1172struct ib_qp_init_attr {
1173	/* This callback occurs in workqueue context */
1174	void                  (*event_handler)(struct ib_event *, void *);
1175
1176	void		       *qp_context;
1177	struct ib_cq	       *send_cq;
1178	struct ib_cq	       *recv_cq;
1179	struct ib_srq	       *srq;
1180	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1181	struct ib_qp_cap	cap;
1182	enum ib_sig_type	sq_sig_type;
1183	enum ib_qp_type		qp_type;
1184	u32			create_flags;
1185
1186	/*
1187	 * Only needed for special QP types, or when using the RW API.
1188	 */
1189	u32			port_num;
1190	struct ib_rwq_ind_table *rwq_ind_tbl;
1191	u32			source_qpn;
1192};
1193
1194struct ib_qp_open_attr {
1195	void                  (*event_handler)(struct ib_event *, void *);
1196	void		       *qp_context;
1197	u32			qp_num;
1198	enum ib_qp_type		qp_type;
1199};
1200
1201enum ib_rnr_timeout {
1202	IB_RNR_TIMER_655_36 =  0,
1203	IB_RNR_TIMER_000_01 =  1,
1204	IB_RNR_TIMER_000_02 =  2,
1205	IB_RNR_TIMER_000_03 =  3,
1206	IB_RNR_TIMER_000_04 =  4,
1207	IB_RNR_TIMER_000_06 =  5,
1208	IB_RNR_TIMER_000_08 =  6,
1209	IB_RNR_TIMER_000_12 =  7,
1210	IB_RNR_TIMER_000_16 =  8,
1211	IB_RNR_TIMER_000_24 =  9,
1212	IB_RNR_TIMER_000_32 = 10,
1213	IB_RNR_TIMER_000_48 = 11,
1214	IB_RNR_TIMER_000_64 = 12,
1215	IB_RNR_TIMER_000_96 = 13,
1216	IB_RNR_TIMER_001_28 = 14,
1217	IB_RNR_TIMER_001_92 = 15,
1218	IB_RNR_TIMER_002_56 = 16,
1219	IB_RNR_TIMER_003_84 = 17,
1220	IB_RNR_TIMER_005_12 = 18,
1221	IB_RNR_TIMER_007_68 = 19,
1222	IB_RNR_TIMER_010_24 = 20,
1223	IB_RNR_TIMER_015_36 = 21,
1224	IB_RNR_TIMER_020_48 = 22,
1225	IB_RNR_TIMER_030_72 = 23,
1226	IB_RNR_TIMER_040_96 = 24,
1227	IB_RNR_TIMER_061_44 = 25,
1228	IB_RNR_TIMER_081_92 = 26,
1229	IB_RNR_TIMER_122_88 = 27,
1230	IB_RNR_TIMER_163_84 = 28,
1231	IB_RNR_TIMER_245_76 = 29,
1232	IB_RNR_TIMER_327_68 = 30,
1233	IB_RNR_TIMER_491_52 = 31
1234};
1235
1236enum ib_qp_attr_mask {
1237	IB_QP_STATE			= 1,
1238	IB_QP_CUR_STATE			= (1<<1),
1239	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1240	IB_QP_ACCESS_FLAGS		= (1<<3),
1241	IB_QP_PKEY_INDEX		= (1<<4),
1242	IB_QP_PORT			= (1<<5),
1243	IB_QP_QKEY			= (1<<6),
1244	IB_QP_AV			= (1<<7),
1245	IB_QP_PATH_MTU			= (1<<8),
1246	IB_QP_TIMEOUT			= (1<<9),
1247	IB_QP_RETRY_CNT			= (1<<10),
1248	IB_QP_RNR_RETRY			= (1<<11),
1249	IB_QP_RQ_PSN			= (1<<12),
1250	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1251	IB_QP_ALT_PATH			= (1<<14),
1252	IB_QP_MIN_RNR_TIMER		= (1<<15),
1253	IB_QP_SQ_PSN			= (1<<16),
1254	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1255	IB_QP_PATH_MIG_STATE		= (1<<18),
1256	IB_QP_CAP			= (1<<19),
1257	IB_QP_DEST_QPN			= (1<<20),
1258	IB_QP_RESERVED1			= (1<<21),
1259	IB_QP_RESERVED2			= (1<<22),
1260	IB_QP_RESERVED3			= (1<<23),
1261	IB_QP_RESERVED4			= (1<<24),
1262	IB_QP_RATE_LIMIT		= (1<<25),
1263
1264	IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1265};
1266
1267enum ib_qp_state {
1268	IB_QPS_RESET,
1269	IB_QPS_INIT,
1270	IB_QPS_RTR,
1271	IB_QPS_RTS,
1272	IB_QPS_SQD,
1273	IB_QPS_SQE,
1274	IB_QPS_ERR
1275};
1276
1277enum ib_mig_state {
1278	IB_MIG_MIGRATED,
1279	IB_MIG_REARM,
1280	IB_MIG_ARMED
1281};
1282
1283enum ib_mw_type {
1284	IB_MW_TYPE_1 = 1,
1285	IB_MW_TYPE_2 = 2
1286};
1287
1288struct ib_qp_attr {
1289	enum ib_qp_state	qp_state;
1290	enum ib_qp_state	cur_qp_state;
1291	enum ib_mtu		path_mtu;
1292	enum ib_mig_state	path_mig_state;
1293	u32			qkey;
1294	u32			rq_psn;
1295	u32			sq_psn;
1296	u32			dest_qp_num;
1297	int			qp_access_flags;
1298	struct ib_qp_cap	cap;
1299	struct rdma_ah_attr	ah_attr;
1300	struct rdma_ah_attr	alt_ah_attr;
1301	u16			pkey_index;
1302	u16			alt_pkey_index;
1303	u8			en_sqd_async_notify;
1304	u8			sq_draining;
1305	u8			max_rd_atomic;
1306	u8			max_dest_rd_atomic;
1307	u8			min_rnr_timer;
1308	u32			port_num;
1309	u8			timeout;
1310	u8			retry_cnt;
1311	u8			rnr_retry;
1312	u32			alt_port_num;
1313	u8			alt_timeout;
1314	u32			rate_limit;
1315	struct net_device	*xmit_slave;
1316};
1317
1318enum ib_wr_opcode {
1319	/* These are shared with userspace */
1320	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1321	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1322	IB_WR_SEND = IB_UVERBS_WR_SEND,
1323	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1324	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1325	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1326	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1327	IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1328	IB_WR_LSO = IB_UVERBS_WR_TSO,
1329	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1330	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1331	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1332	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1333		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1334	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1335		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1336	IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
1337	IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
1338
1339	/* These are kernel only and can not be issued by userspace */
1340	IB_WR_REG_MR = 0x20,
1341	IB_WR_REG_MR_INTEGRITY,
1342
1343	/* reserve values for low level drivers' internal use.
1344	 * These values will not be used at all in the ib core layer.
1345	 */
1346	IB_WR_RESERVED1 = 0xf0,
1347	IB_WR_RESERVED2,
1348	IB_WR_RESERVED3,
1349	IB_WR_RESERVED4,
1350	IB_WR_RESERVED5,
1351	IB_WR_RESERVED6,
1352	IB_WR_RESERVED7,
1353	IB_WR_RESERVED8,
1354	IB_WR_RESERVED9,
1355	IB_WR_RESERVED10,
1356};
1357
1358enum ib_send_flags {
1359	IB_SEND_FENCE		= 1,
1360	IB_SEND_SIGNALED	= (1<<1),
1361	IB_SEND_SOLICITED	= (1<<2),
1362	IB_SEND_INLINE		= (1<<3),
1363	IB_SEND_IP_CSUM		= (1<<4),
1364
1365	/* reserve bits 26-31 for low level drivers' internal use */
1366	IB_SEND_RESERVED_START	= (1 << 26),
1367	IB_SEND_RESERVED_END	= (1 << 31),
1368};
1369
1370struct ib_sge {
1371	u64	addr;
1372	u32	length;
1373	u32	lkey;
1374};
1375
1376struct ib_cqe {
1377	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
 
 
1378};
1379
1380struct ib_send_wr {
1381	struct ib_send_wr      *next;
1382	union {
1383		u64		wr_id;
1384		struct ib_cqe	*wr_cqe;
1385	};
1386	struct ib_sge	       *sg_list;
1387	int			num_sge;
1388	enum ib_wr_opcode	opcode;
1389	int			send_flags;
1390	union {
1391		__be32		imm_data;
1392		u32		invalidate_rkey;
1393	} ex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1394};
1395
1396struct ib_rdma_wr {
1397	struct ib_send_wr	wr;
1398	u64			remote_addr;
1399	u32			rkey;
1400};
1401
1402static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1403{
1404	return container_of(wr, struct ib_rdma_wr, wr);
1405}
1406
1407struct ib_atomic_wr {
1408	struct ib_send_wr	wr;
1409	u64			remote_addr;
1410	u64			compare_add;
1411	u64			swap;
1412	u64			compare_add_mask;
1413	u64			swap_mask;
1414	u32			rkey;
1415};
1416
1417static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1418{
1419	return container_of(wr, struct ib_atomic_wr, wr);
1420}
1421
1422struct ib_ud_wr {
1423	struct ib_send_wr	wr;
1424	struct ib_ah		*ah;
1425	void			*header;
1426	int			hlen;
1427	int			mss;
1428	u32			remote_qpn;
1429	u32			remote_qkey;
1430	u16			pkey_index; /* valid for GSI only */
1431	u32			port_num; /* valid for DR SMPs on switch only */
1432};
1433
1434static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1435{
1436	return container_of(wr, struct ib_ud_wr, wr);
1437}
1438
1439struct ib_reg_wr {
1440	struct ib_send_wr	wr;
1441	struct ib_mr		*mr;
1442	u32			key;
1443	int			access;
1444};
1445
1446static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1447{
1448	return container_of(wr, struct ib_reg_wr, wr);
1449}
1450
1451struct ib_recv_wr {
1452	struct ib_recv_wr      *next;
1453	union {
1454		u64		wr_id;
1455		struct ib_cqe	*wr_cqe;
1456	};
1457	struct ib_sge	       *sg_list;
1458	int			num_sge;
1459};
1460
1461enum ib_access_flags {
1462	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1463	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1464	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1465	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1466	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1467	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1468	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1469	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1470	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1471	IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
1472	IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
1473
1474	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1475	IB_ACCESS_SUPPORTED =
1476		((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
 
 
 
 
1477};
1478
1479/*
1480 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1481 * are hidden here instead of a uapi header!
1482 */
1483enum ib_mr_rereg_flags {
1484	IB_MR_REREG_TRANS	= 1,
1485	IB_MR_REREG_PD		= (1<<1),
1486	IB_MR_REREG_ACCESS	= (1<<2),
1487	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1488};
1489
1490struct ib_umem;
1491
1492enum rdma_remove_reason {
1493	/*
1494	 * Userspace requested uobject deletion or initial try
1495	 * to remove uobject via cleanup. Call could fail
1496	 */
1497	RDMA_REMOVE_DESTROY,
1498	/* Context deletion. This call should delete the actual object itself */
1499	RDMA_REMOVE_CLOSE,
1500	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1501	RDMA_REMOVE_DRIVER_REMOVE,
1502	/* uobj is being cleaned-up before being committed */
1503	RDMA_REMOVE_ABORT,
1504	/* The driver failed to destroy the uobject and is being disconnected */
1505	RDMA_REMOVE_DRIVER_FAILURE,
1506};
1507
1508struct ib_rdmacg_object {
1509#ifdef CONFIG_CGROUP_RDMA
1510	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1511#endif
 
 
 
 
 
 
 
 
 
1512};
1513
1514struct ib_ucontext {
1515	struct ib_device       *device;
1516	struct ib_uverbs_file  *ufile;
1517
1518	struct ib_rdmacg_object	cg_obj;
1519	/*
1520	 * Implementation details of the RDMA core, don't use in drivers:
1521	 */
1522	struct rdma_restrack_entry res;
1523	struct xarray mmap_xa;
1524};
1525
1526struct ib_uobject {
1527	u64			user_handle;	/* handle given to us by userspace */
1528	/* ufile & ucontext owning this object */
1529	struct ib_uverbs_file  *ufile;
1530	/* FIXME, save memory: ufile->context == context */
1531	struct ib_ucontext     *context;	/* associated user context */
1532	void		       *object;		/* containing object */
1533	struct list_head	list;		/* link to context's list */
1534	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1535	int			id;		/* index into kernel idr */
1536	struct kref		ref;
1537	atomic_t		usecnt;		/* protects exclusive access */
1538	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1539
1540	const struct uverbs_api_object *uapi_object;
1541};
1542
1543struct ib_udata {
1544	const void __user *inbuf;
1545	void __user *outbuf;
1546	size_t       inlen;
1547	size_t       outlen;
1548};
1549
1550struct ib_pd {
1551	u32			local_dma_lkey;
1552	u32			flags;
1553	struct ib_device       *device;
1554	struct ib_uobject      *uobject;
1555	atomic_t          	usecnt; /* count all resources */
1556
1557	u32			unsafe_global_rkey;
1558
1559	/*
1560	 * Implementation details of the RDMA core, don't use in drivers:
1561	 */
1562	struct ib_mr	       *__internal_mr;
1563	struct rdma_restrack_entry res;
1564};
1565
1566struct ib_xrcd {
1567	struct ib_device       *device;
1568	atomic_t		usecnt; /* count all exposed resources */
1569	struct inode	       *inode;
1570	struct rw_semaphore	tgt_qps_rwsem;
1571	struct xarray		tgt_qps;
1572};
1573
1574struct ib_ah {
1575	struct ib_device	*device;
1576	struct ib_pd		*pd;
1577	struct ib_uobject	*uobject;
1578	const struct ib_gid_attr *sgid_attr;
1579	enum rdma_ah_attr_type	type;
1580};
1581
1582typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1583
1584enum ib_poll_context {
1585	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1586	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1587	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1588	IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1589
1590	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1591};
1592
1593struct ib_cq {
1594	struct ib_device       *device;
1595	struct ib_ucq_object   *uobject;
1596	ib_comp_handler   	comp_handler;
1597	void                  (*event_handler)(struct ib_event *, void *);
1598	void                   *cq_context;
1599	int               	cqe;
1600	unsigned int		cqe_used;
1601	atomic_t          	usecnt; /* count number of work queues */
1602	enum ib_poll_context	poll_ctx;
1603	struct ib_wc		*wc;
1604	struct list_head        pool_entry;
1605	union {
1606		struct irq_poll		iop;
1607		struct work_struct	work;
1608	};
1609	struct workqueue_struct *comp_wq;
1610	struct dim *dim;
1611
1612	/* updated only by trace points */
1613	ktime_t timestamp;
1614	u8 interrupt:1;
1615	u8 shared:1;
1616	unsigned int comp_vector;
1617
1618	/*
1619	 * Implementation details of the RDMA core, don't use in drivers:
1620	 */
1621	struct rdma_restrack_entry res;
1622};
1623
1624struct ib_srq {
1625	struct ib_device       *device;
1626	struct ib_pd	       *pd;
1627	struct ib_usrq_object  *uobject;
1628	void		      (*event_handler)(struct ib_event *, void *);
1629	void		       *srq_context;
1630	enum ib_srq_type	srq_type;
1631	atomic_t		usecnt;
1632
1633	struct {
1634		struct ib_cq   *cq;
1635		union {
1636			struct {
1637				struct ib_xrcd *xrcd;
1638				u32		srq_num;
1639			} xrc;
1640		};
1641	} ext;
1642
1643	/*
1644	 * Implementation details of the RDMA core, don't use in drivers:
1645	 */
1646	struct rdma_restrack_entry res;
1647};
1648
1649enum ib_raw_packet_caps {
1650	/*
1651	 * Strip cvlan from incoming packet and report it in the matching work
1652	 * completion is supported.
1653	 */
1654	IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1655		IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1656	/*
1657	 * Scatter FCS field of an incoming packet to host memory is supported.
1658	 */
1659	IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1660	/* Checksum offloads are supported (for both send and receive). */
1661	IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1662	/*
1663	 * When a packet is received for an RQ with no receive WQEs, the
1664	 * packet processing is delayed.
1665	 */
1666	IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1667};
1668
1669enum ib_wq_type {
1670	IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1671};
1672
1673enum ib_wq_state {
1674	IB_WQS_RESET,
1675	IB_WQS_RDY,
1676	IB_WQS_ERR
1677};
1678
1679struct ib_wq {
1680	struct ib_device       *device;
1681	struct ib_uwq_object   *uobject;
1682	void		    *wq_context;
1683	void		    (*event_handler)(struct ib_event *, void *);
1684	struct ib_pd	       *pd;
1685	struct ib_cq	       *cq;
1686	u32		wq_num;
1687	enum ib_wq_state       state;
1688	enum ib_wq_type	wq_type;
1689	atomic_t		usecnt;
1690};
1691
1692enum ib_wq_flags {
1693	IB_WQ_FLAGS_CVLAN_STRIPPING	= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1694	IB_WQ_FLAGS_SCATTER_FCS		= IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1695	IB_WQ_FLAGS_DELAY_DROP		= IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1696	IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1697				IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1698};
1699
1700struct ib_wq_init_attr {
1701	void		       *wq_context;
1702	enum ib_wq_type	wq_type;
1703	u32		max_wr;
1704	u32		max_sge;
1705	struct	ib_cq	       *cq;
1706	void		    (*event_handler)(struct ib_event *, void *);
1707	u32		create_flags; /* Use enum ib_wq_flags */
1708};
1709
1710enum ib_wq_attr_mask {
1711	IB_WQ_STATE		= 1 << 0,
1712	IB_WQ_CUR_STATE		= 1 << 1,
1713	IB_WQ_FLAGS		= 1 << 2,
1714};
1715
1716struct ib_wq_attr {
1717	enum	ib_wq_state	wq_state;
1718	enum	ib_wq_state	curr_wq_state;
1719	u32			flags; /* Use enum ib_wq_flags */
1720	u32			flags_mask; /* Use enum ib_wq_flags */
1721};
1722
1723struct ib_rwq_ind_table {
1724	struct ib_device	*device;
1725	struct ib_uobject      *uobject;
1726	atomic_t		usecnt;
1727	u32		ind_tbl_num;
1728	u32		log_ind_tbl_size;
1729	struct ib_wq	**ind_tbl;
1730};
1731
1732struct ib_rwq_ind_table_init_attr {
1733	u32		log_ind_tbl_size;
1734	/* Each entry is a pointer to Receive Work Queue */
1735	struct ib_wq	**ind_tbl;
1736};
1737
1738enum port_pkey_state {
1739	IB_PORT_PKEY_NOT_VALID = 0,
1740	IB_PORT_PKEY_VALID = 1,
1741	IB_PORT_PKEY_LISTED = 2,
1742};
1743
1744struct ib_qp_security;
1745
1746struct ib_port_pkey {
1747	enum port_pkey_state	state;
1748	u16			pkey_index;
1749	u32			port_num;
1750	struct list_head	qp_list;
1751	struct list_head	to_error_list;
1752	struct ib_qp_security  *sec;
1753};
1754
1755struct ib_ports_pkeys {
1756	struct ib_port_pkey	main;
1757	struct ib_port_pkey	alt;
1758};
1759
1760struct ib_qp_security {
1761	struct ib_qp	       *qp;
1762	struct ib_device       *dev;
1763	/* Hold this mutex when changing port and pkey settings. */
1764	struct mutex		mutex;
1765	struct ib_ports_pkeys  *ports_pkeys;
1766	/* A list of all open shared QP handles.  Required to enforce security
1767	 * properly for all users of a shared QP.
1768	 */
1769	struct list_head        shared_qp_list;
1770	void                   *security;
1771	bool			destroying;
1772	atomic_t		error_list_count;
1773	struct completion	error_complete;
1774	int			error_comps_pending;
1775};
1776
1777/*
1778 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1779 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1780 */
1781struct ib_qp {
1782	struct ib_device       *device;
1783	struct ib_pd	       *pd;
1784	struct ib_cq	       *send_cq;
1785	struct ib_cq	       *recv_cq;
1786	spinlock_t		mr_lock;
1787	int			mrs_used;
1788	struct list_head	rdma_mrs;
1789	struct list_head	sig_mrs;
1790	struct ib_srq	       *srq;
1791	struct completion	srq_completion;
1792	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1793	struct list_head	xrcd_list;
1794
1795	/* count times opened, mcast attaches, flow attaches */
1796	atomic_t		usecnt;
1797	struct list_head	open_list;
1798	struct ib_qp           *real_qp;
1799	struct ib_uqp_object   *uobject;
1800	void                  (*event_handler)(struct ib_event *, void *);
1801	void                  (*registered_event_handler)(struct ib_event *, void *);
1802	void		       *qp_context;
1803	/* sgid_attrs associated with the AV's */
1804	const struct ib_gid_attr *av_sgid_attr;
1805	const struct ib_gid_attr *alt_path_sgid_attr;
1806	u32			qp_num;
1807	u32			max_write_sge;
1808	u32			max_read_sge;
1809	enum ib_qp_type		qp_type;
1810	struct ib_rwq_ind_table *rwq_ind_tbl;
1811	struct ib_qp_security  *qp_sec;
1812	u32			port;
1813
1814	bool			integrity_en;
1815	/*
1816	 * Implementation details of the RDMA core, don't use in drivers:
1817	 */
1818	struct rdma_restrack_entry     res;
1819
1820	/* The counter the qp is bind to */
1821	struct rdma_counter    *counter;
1822};
1823
1824struct ib_dm {
1825	struct ib_device  *device;
1826	u32		   length;
1827	u32		   flags;
1828	struct ib_uobject *uobject;
1829	atomic_t	   usecnt;
1830};
1831
1832struct ib_mr {
1833	struct ib_device  *device;
1834	struct ib_pd	  *pd;
 
1835	u32		   lkey;
1836	u32		   rkey;
1837	u64		   iova;
1838	u64		   length;
1839	unsigned int	   page_size;
1840	enum ib_mr_type	   type;
1841	bool		   need_inval;
1842	union {
1843		struct ib_uobject	*uobject;	/* user */
1844		struct list_head	qp_entry;	/* FR */
1845	};
1846
1847	struct ib_dm      *dm;
1848	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1849	/*
1850	 * Implementation details of the RDMA core, don't use in drivers:
1851	 */
1852	struct rdma_restrack_entry res;
1853};
1854
1855struct ib_mw {
1856	struct ib_device	*device;
1857	struct ib_pd		*pd;
1858	struct ib_uobject	*uobject;
1859	u32			rkey;
1860	enum ib_mw_type         type;
1861};
1862
1863/* Supported steering options */
1864enum ib_flow_attr_type {
1865	/* steering according to rule specifications */
1866	IB_FLOW_ATTR_NORMAL		= 0x0,
1867	/* default unicast and multicast rule -
1868	 * receive all Eth traffic which isn't steered to any QP
1869	 */
1870	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1871	/* default multicast rule -
1872	 * receive all Eth multicast traffic which isn't steered to any QP
1873	 */
1874	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1875	/* sniffer rule - receive all port traffic */
1876	IB_FLOW_ATTR_SNIFFER		= 0x3
1877};
1878
1879/* Supported steering header types */
1880enum ib_flow_spec_type {
1881	/* L2 headers*/
1882	IB_FLOW_SPEC_ETH		= 0x20,
1883	IB_FLOW_SPEC_IB			= 0x22,
1884	/* L3 header*/
1885	IB_FLOW_SPEC_IPV4		= 0x30,
1886	IB_FLOW_SPEC_IPV6		= 0x31,
1887	IB_FLOW_SPEC_ESP                = 0x34,
1888	/* L4 headers*/
1889	IB_FLOW_SPEC_TCP		= 0x40,
1890	IB_FLOW_SPEC_UDP		= 0x41,
1891	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1892	IB_FLOW_SPEC_GRE		= 0x51,
1893	IB_FLOW_SPEC_MPLS		= 0x60,
1894	IB_FLOW_SPEC_INNER		= 0x100,
1895	/* Actions */
1896	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1897	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1898	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1899	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1900};
1901#define IB_FLOW_SPEC_LAYER_MASK	0xF0
1902#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1903
1904enum ib_flow_flags {
1905	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1906	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1907	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1908};
1909
1910struct ib_flow_eth_filter {
1911	u8	dst_mac[6];
1912	u8	src_mac[6];
1913	__be16	ether_type;
1914	__be16	vlan_tag;
1915};
1916
1917struct ib_flow_spec_eth {
1918	u32			  type;
1919	u16			  size;
1920	struct ib_flow_eth_filter val;
1921	struct ib_flow_eth_filter mask;
1922};
1923
1924struct ib_flow_ib_filter {
1925	__be16 dlid;
1926	__u8   sl;
1927};
1928
1929struct ib_flow_spec_ib {
1930	u32			 type;
1931	u16			 size;
1932	struct ib_flow_ib_filter val;
1933	struct ib_flow_ib_filter mask;
1934};
1935
1936/* IPv4 header flags */
1937enum ib_ipv4_flags {
1938	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1939	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1940				    last have this flag set */
1941};
1942
1943struct ib_flow_ipv4_filter {
1944	__be32	src_ip;
1945	__be32	dst_ip;
1946	u8	proto;
1947	u8	tos;
1948	u8	ttl;
1949	u8	flags;
1950};
1951
1952struct ib_flow_spec_ipv4 {
1953	u32			   type;
1954	u16			   size;
1955	struct ib_flow_ipv4_filter val;
1956	struct ib_flow_ipv4_filter mask;
1957};
1958
1959struct ib_flow_ipv6_filter {
1960	u8	src_ip[16];
1961	u8	dst_ip[16];
1962	__be32	flow_label;
1963	u8	next_hdr;
1964	u8	traffic_class;
1965	u8	hop_limit;
1966} __packed;
1967
1968struct ib_flow_spec_ipv6 {
1969	u32			   type;
1970	u16			   size;
1971	struct ib_flow_ipv6_filter val;
1972	struct ib_flow_ipv6_filter mask;
1973};
1974
1975struct ib_flow_tcp_udp_filter {
1976	__be16	dst_port;
1977	__be16	src_port;
1978};
1979
1980struct ib_flow_spec_tcp_udp {
1981	u32			      type;
1982	u16			      size;
1983	struct ib_flow_tcp_udp_filter val;
1984	struct ib_flow_tcp_udp_filter mask;
1985};
1986
1987struct ib_flow_tunnel_filter {
1988	__be32	tunnel_id;
1989};
1990
1991/* ib_flow_spec_tunnel describes the Vxlan tunnel
1992 * the tunnel_id from val has the vni value
1993 */
1994struct ib_flow_spec_tunnel {
1995	u32			      type;
1996	u16			      size;
1997	struct ib_flow_tunnel_filter  val;
1998	struct ib_flow_tunnel_filter  mask;
1999};
2000
2001struct ib_flow_esp_filter {
2002	__be32	spi;
2003	__be32  seq;
2004};
2005
2006struct ib_flow_spec_esp {
2007	u32                           type;
2008	u16			      size;
2009	struct ib_flow_esp_filter     val;
2010	struct ib_flow_esp_filter     mask;
2011};
2012
2013struct ib_flow_gre_filter {
2014	__be16 c_ks_res0_ver;
2015	__be16 protocol;
2016	__be32 key;
2017};
2018
2019struct ib_flow_spec_gre {
2020	u32                           type;
2021	u16			      size;
2022	struct ib_flow_gre_filter     val;
2023	struct ib_flow_gre_filter     mask;
2024};
2025
2026struct ib_flow_mpls_filter {
2027	__be32 tag;
2028};
2029
2030struct ib_flow_spec_mpls {
2031	u32                           type;
2032	u16			      size;
2033	struct ib_flow_mpls_filter     val;
2034	struct ib_flow_mpls_filter     mask;
2035};
2036
2037struct ib_flow_spec_action_tag {
2038	enum ib_flow_spec_type	      type;
2039	u16			      size;
2040	u32                           tag_id;
2041};
2042
2043struct ib_flow_spec_action_drop {
2044	enum ib_flow_spec_type	      type;
2045	u16			      size;
2046};
2047
2048struct ib_flow_spec_action_handle {
2049	enum ib_flow_spec_type	      type;
2050	u16			      size;
2051	struct ib_flow_action	     *act;
2052};
2053
2054enum ib_counters_description {
2055	IB_COUNTER_PACKETS,
2056	IB_COUNTER_BYTES,
2057};
2058
2059struct ib_flow_spec_action_count {
2060	enum ib_flow_spec_type type;
2061	u16 size;
2062	struct ib_counters *counters;
2063};
2064
2065union ib_flow_spec {
2066	struct {
2067		u32			type;
2068		u16			size;
2069	};
2070	struct ib_flow_spec_eth		eth;
2071	struct ib_flow_spec_ib		ib;
2072	struct ib_flow_spec_ipv4        ipv4;
2073	struct ib_flow_spec_tcp_udp	tcp_udp;
2074	struct ib_flow_spec_ipv6        ipv6;
2075	struct ib_flow_spec_tunnel      tunnel;
2076	struct ib_flow_spec_esp		esp;
2077	struct ib_flow_spec_gre		gre;
2078	struct ib_flow_spec_mpls	mpls;
2079	struct ib_flow_spec_action_tag  flow_tag;
2080	struct ib_flow_spec_action_drop drop;
2081	struct ib_flow_spec_action_handle action;
2082	struct ib_flow_spec_action_count flow_count;
2083};
2084
2085struct ib_flow_attr {
2086	enum ib_flow_attr_type type;
2087	u16	     size;
2088	u16	     priority;
2089	u32	     flags;
2090	u8	     num_of_specs;
2091	u32	     port;
2092	union ib_flow_spec flows[];
2093};
2094
2095struct ib_flow {
2096	struct ib_qp		*qp;
2097	struct ib_device	*device;
2098	struct ib_uobject	*uobject;
2099};
2100
2101enum ib_flow_action_type {
2102	IB_FLOW_ACTION_UNSPECIFIED,
2103	IB_FLOW_ACTION_ESP = 1,
2104};
2105
2106struct ib_flow_action_attrs_esp_keymats {
2107	enum ib_uverbs_flow_action_esp_keymat			protocol;
2108	union {
2109		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2110	} keymat;
2111};
2112
2113struct ib_flow_action_attrs_esp_replays {
2114	enum ib_uverbs_flow_action_esp_replay			protocol;
2115	union {
2116		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2117	} replay;
2118};
2119
2120enum ib_flow_action_attrs_esp_flags {
2121	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2122	 * This is done in order to share the same flags between user-space and
2123	 * kernel and spare an unnecessary translation.
2124	 */
2125
2126	/* Kernel flags */
2127	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2128	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2129};
2130
2131struct ib_flow_spec_list {
2132	struct ib_flow_spec_list	*next;
2133	union ib_flow_spec		spec;
2134};
2135
2136struct ib_flow_action_attrs_esp {
2137	struct ib_flow_action_attrs_esp_keymats		*keymat;
2138	struct ib_flow_action_attrs_esp_replays		*replay;
2139	struct ib_flow_spec_list			*encap;
2140	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2141	 * Value of 0 is a valid value.
2142	 */
2143	u32						esn;
2144	u32						spi;
2145	u32						seq;
2146	u32						tfc_pad;
2147	/* Use enum ib_flow_action_attrs_esp_flags */
2148	u64						flags;
2149	u64						hard_limit_pkts;
2150};
2151
2152struct ib_flow_action {
2153	struct ib_device		*device;
2154	struct ib_uobject		*uobject;
2155	enum ib_flow_action_type	type;
2156	atomic_t			usecnt;
2157};
2158
2159struct ib_mad;
 
2160
2161enum ib_process_mad_flags {
2162	IB_MAD_IGNORE_MKEY	= 1,
2163	IB_MAD_IGNORE_BKEY	= 2,
2164	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2165};
2166
2167enum ib_mad_result {
2168	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2169	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2170	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2171	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2172};
2173
2174struct ib_port_cache {
2175	u64		      subnet_prefix;
2176	struct ib_pkey_cache  *pkey;
2177	struct ib_gid_table   *gid;
2178	u8                     lmc;
2179	enum ib_port_state     port_state;
2180};
2181
2182struct ib_port_immutable {
2183	int                           pkey_tbl_len;
2184	int                           gid_tbl_len;
2185	u32                           core_cap_flags;
2186	u32                           max_mad_size;
2187};
2188
2189struct ib_port_data {
2190	struct ib_device *ib_dev;
2191
2192	struct ib_port_immutable immutable;
2193
2194	spinlock_t pkey_list_lock;
2195
2196	spinlock_t netdev_lock;
2197
2198	struct list_head pkey_list;
2199
2200	struct ib_port_cache cache;
2201
2202	struct net_device __rcu *netdev;
2203	netdevice_tracker netdev_tracker;
2204	struct hlist_node ndev_hash_link;
2205	struct rdma_port_counter port_counter;
2206	struct ib_port *sysfs;
2207};
2208
2209/* rdma netdev type - specifies protocol type */
2210enum rdma_netdev_t {
2211	RDMA_NETDEV_OPA_VNIC,
2212	RDMA_NETDEV_IPOIB,
2213};
2214
2215/**
2216 * struct rdma_netdev - rdma netdev
2217 * For cases where netstack interfacing is required.
2218 */
2219struct rdma_netdev {
2220	void              *clnt_priv;
2221	struct ib_device  *hca;
2222	u32		   port_num;
2223	int                mtu;
2224
2225	/*
2226	 * cleanup function must be specified.
2227	 * FIXME: This is only used for OPA_VNIC and that usage should be
2228	 * removed too.
2229	 */
2230	void (*free_rdma_netdev)(struct net_device *netdev);
2231
2232	/* control functions */
2233	void (*set_id)(struct net_device *netdev, int id);
2234	/* send packet */
2235	int (*send)(struct net_device *dev, struct sk_buff *skb,
2236		    struct ib_ah *address, u32 dqpn);
2237	/* multicast */
2238	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2239			    union ib_gid *gid, u16 mlid,
2240			    int set_qkey, u32 qkey);
2241	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2242			    union ib_gid *gid, u16 mlid);
2243	/* timeout */
2244	void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2245};
2246
2247struct rdma_netdev_alloc_params {
2248	size_t sizeof_priv;
2249	unsigned int txqs;
2250	unsigned int rxqs;
2251	void *param;
2252
2253	int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2254				      struct net_device *netdev, void *param);
2255};
2256
2257struct ib_odp_counters {
2258	atomic64_t faults;
2259	atomic64_t invalidations;
2260	atomic64_t prefetch;
2261};
2262
2263struct ib_counters {
2264	struct ib_device	*device;
2265	struct ib_uobject	*uobject;
2266	/* num of objects attached */
2267	atomic_t	usecnt;
2268};
2269
2270struct ib_counters_read_attr {
2271	u64	*counters_buff;
2272	u32	ncounters;
2273	u32	flags; /* use enum ib_read_counters_flags */
2274};
2275
2276struct uverbs_attr_bundle;
2277struct iw_cm_id;
2278struct iw_cm_conn_param;
2279
2280#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2281	.size_##ib_struct =                                                    \
2282		(sizeof(struct drv_struct) +                                   \
2283		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2284		 BUILD_BUG_ON_ZERO(                                            \
2285			 !__same_type(((struct drv_struct *)NULL)->member,     \
2286				      struct ib_struct)))
2287
2288#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                          \
2289	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2290					   gfp, false))
2291
2292#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type)                              \
2293	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2294					   GFP_KERNEL, true))
2295
2296#define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2297	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2298
2299#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2300
2301struct rdma_user_mmap_entry {
2302	struct kref ref;
2303	struct ib_ucontext *ucontext;
2304	unsigned long start_pgoff;
2305	size_t npages;
2306	bool driver_removed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2307};
2308
2309/* Return the offset (in bytes) the user should pass to libc's mmap() */
2310static inline u64
2311rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2312{
2313	return (u64)entry->start_pgoff << PAGE_SHIFT;
2314}
2315
2316/**
2317 * struct ib_device_ops - InfiniBand device operations
2318 * This structure defines all the InfiniBand device operations, providers will
2319 * need to define the supported operations, otherwise they will be set to null.
2320 */
2321struct ib_device_ops {
2322	struct module *owner;
2323	enum rdma_driver_id driver_id;
2324	u32 uverbs_abi_ver;
2325	unsigned int uverbs_no_driver_id_binding:1;
2326
2327	/*
2328	 * NOTE: New drivers should not make use of device_group; instead new
2329	 * device parameter should be exposed via netlink command. This
2330	 * mechanism exists only for existing drivers.
2331	 */
2332	const struct attribute_group *device_group;
2333	const struct attribute_group **port_groups;
2334
2335	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2336			 const struct ib_send_wr **bad_send_wr);
2337	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2338			 const struct ib_recv_wr **bad_recv_wr);
2339	void (*drain_rq)(struct ib_qp *qp);
2340	void (*drain_sq)(struct ib_qp *qp);
2341	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2342	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2343	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2344	int (*post_srq_recv)(struct ib_srq *srq,
2345			     const struct ib_recv_wr *recv_wr,
2346			     const struct ib_recv_wr **bad_recv_wr);
2347	int (*process_mad)(struct ib_device *device, int process_mad_flags,
2348			   u32 port_num, const struct ib_wc *in_wc,
2349			   const struct ib_grh *in_grh,
2350			   const struct ib_mad *in_mad, struct ib_mad *out_mad,
2351			   size_t *out_mad_size, u16 *out_mad_pkey_index);
2352	int (*query_device)(struct ib_device *device,
2353			    struct ib_device_attr *device_attr,
2354			    struct ib_udata *udata);
2355	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2356			     struct ib_device_modify *device_modify);
2357	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2358	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2359						     int comp_vector);
2360	int (*query_port)(struct ib_device *device, u32 port_num,
2361			  struct ib_port_attr *port_attr);
2362	int (*modify_port)(struct ib_device *device, u32 port_num,
2363			   int port_modify_mask,
2364			   struct ib_port_modify *port_modify);
2365	/**
2366	 * The following mandatory functions are used only at device
2367	 * registration.  Keep functions such as these at the end of this
2368	 * structure to avoid cache line misses when accessing struct ib_device
2369	 * in fast paths.
2370	 */
2371	int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2372				  struct ib_port_immutable *immutable);
2373	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2374					       u32 port_num);
2375	/**
2376	 * When calling get_netdev, the HW vendor's driver should return the
2377	 * net device of device @device at port @port_num or NULL if such
2378	 * a net device doesn't exist. The vendor driver should call dev_hold
2379	 * on this net device. The HW vendor's device driver must guarantee
2380	 * that this function returns NULL before the net device has finished
2381	 * NETDEV_UNREGISTER state.
2382	 */
2383	struct net_device *(*get_netdev)(struct ib_device *device,
2384					 u32 port_num);
2385	/**
2386	 * rdma netdev operation
2387	 *
2388	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2389	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2390	 */
2391	struct net_device *(*alloc_rdma_netdev)(
2392		struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2393		const char *name, unsigned char name_assign_type,
2394		void (*setup)(struct net_device *));
2395
2396	int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2397				      enum rdma_netdev_t type,
2398				      struct rdma_netdev_alloc_params *params);
2399	/**
2400	 * query_gid should be return GID value for @device, when @port_num
2401	 * link layer is either IB or iWarp. It is no-op if @port_num port
2402	 * is RoCE link layer.
2403	 */
2404	int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2405			 union ib_gid *gid);
2406	/**
2407	 * When calling add_gid, the HW vendor's driver should add the gid
2408	 * of device of port at gid index available at @attr. Meta-info of
2409	 * that gid (for example, the network device related to this gid) is
2410	 * available at @attr. @context allows the HW vendor driver to store
2411	 * extra information together with a GID entry. The HW vendor driver may
2412	 * allocate memory to contain this information and store it in @context
2413	 * when a new GID entry is written to. Params are consistent until the
2414	 * next call of add_gid or delete_gid. The function should return 0 on
2415	 * success or error otherwise. The function could be called
2416	 * concurrently for different ports. This function is only called when
2417	 * roce_gid_table is used.
2418	 */
2419	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2420	/**
2421	 * When calling del_gid, the HW vendor's driver should delete the
2422	 * gid of device @device at gid index gid_index of port port_num
2423	 * available in @attr.
2424	 * Upon the deletion of a GID entry, the HW vendor must free any
2425	 * allocated memory. The caller will clear @context afterwards.
2426	 * This function is only called when roce_gid_table is used.
2427	 */
2428	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2429	int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2430			  u16 *pkey);
2431	int (*alloc_ucontext)(struct ib_ucontext *context,
2432			      struct ib_udata *udata);
2433	void (*dealloc_ucontext)(struct ib_ucontext *context);
2434	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2435	/**
2436	 * This will be called once refcount of an entry in mmap_xa reaches
2437	 * zero. The type of the memory that was mapped may differ between
2438	 * entries and is opaque to the rdma_user_mmap interface.
2439	 * Therefore needs to be implemented by the driver in mmap_free.
2440	 */
2441	void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2442	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2443	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2444	int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2445	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2446			 struct ib_udata *udata);
2447	int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2448			      struct ib_udata *udata);
2449	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2450	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2451	int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2452	int (*create_srq)(struct ib_srq *srq,
2453			  struct ib_srq_init_attr *srq_init_attr,
2454			  struct ib_udata *udata);
2455	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2456			  enum ib_srq_attr_mask srq_attr_mask,
2457			  struct ib_udata *udata);
2458	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2459	int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2460	int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2461			 struct ib_udata *udata);
2462	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2463			 int qp_attr_mask, struct ib_udata *udata);
2464	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2465			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2466	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2467	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2468			 struct uverbs_attr_bundle *attrs);
2469	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2470	int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2471	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2472	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2473	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2474				     u64 virt_addr, int mr_access_flags,
2475				     struct ib_udata *udata);
2476	struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2477					    u64 length, u64 virt_addr, int fd,
2478					    int mr_access_flags,
2479					    struct uverbs_attr_bundle *attrs);
2480	struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2481				       u64 length, u64 virt_addr,
2482				       int mr_access_flags, struct ib_pd *pd,
2483				       struct ib_udata *udata);
2484	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2485	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2486				  u32 max_num_sg);
2487	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2488					    u32 max_num_data_sg,
2489					    u32 max_num_meta_sg);
2490	int (*advise_mr)(struct ib_pd *pd,
2491			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2492			 struct ib_sge *sg_list, u32 num_sge,
2493			 struct uverbs_attr_bundle *attrs);
2494
2495	/*
2496	 * Kernel users should universally support relaxed ordering (RO), as
2497	 * they are designed to read data only after observing the CQE and use
2498	 * the DMA API correctly.
2499	 *
2500	 * Some drivers implicitly enable RO if platform supports it.
2501	 */
2502	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2503			 unsigned int *sg_offset);
2504	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2505			       struct ib_mr_status *mr_status);
2506	int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2507	int (*dealloc_mw)(struct ib_mw *mw);
2508	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2509	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2510	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2511	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2512	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2513				       struct ib_flow_attr *flow_attr,
2514				       struct ib_udata *udata);
2515	int (*destroy_flow)(struct ib_flow *flow_id);
2516	int (*destroy_flow_action)(struct ib_flow_action *action);
2517	int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2518				 int state);
2519	int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2520			     struct ifla_vf_info *ivf);
2521	int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2522			    struct ifla_vf_stats *stats);
2523	int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2524			    struct ifla_vf_guid *node_guid,
2525			    struct ifla_vf_guid *port_guid);
2526	int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2527			   int type);
2528	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2529				   struct ib_wq_init_attr *init_attr,
2530				   struct ib_udata *udata);
2531	int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2532	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2533			 u32 wq_attr_mask, struct ib_udata *udata);
2534	int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2535				    struct ib_rwq_ind_table_init_attr *init_attr,
2536				    struct ib_udata *udata);
2537	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2538	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2539				  struct ib_ucontext *context,
2540				  struct ib_dm_alloc_attr *attr,
2541				  struct uverbs_attr_bundle *attrs);
2542	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2543	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2544				   struct ib_dm_mr_attr *attr,
2545				   struct uverbs_attr_bundle *attrs);
2546	int (*create_counters)(struct ib_counters *counters,
2547			       struct uverbs_attr_bundle *attrs);
2548	int (*destroy_counters)(struct ib_counters *counters);
2549	int (*read_counters)(struct ib_counters *counters,
2550			     struct ib_counters_read_attr *counters_read_attr,
2551			     struct uverbs_attr_bundle *attrs);
2552	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2553			    int data_sg_nents, unsigned int *data_sg_offset,
2554			    struct scatterlist *meta_sg, int meta_sg_nents,
2555			    unsigned int *meta_sg_offset);
2556
2557	/**
2558	 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2559	 *   fill in the driver initialized data.  The struct is kfree()'ed by
2560	 *   the sysfs core when the device is removed.  A lifespan of -1 in the
2561	 *   return struct tells the core to set a default lifespan.
2562	 */
2563	struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2564	struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2565						     u32 port_num);
2566	/**
2567	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2568	 * @index - The index in the value array we wish to have updated, or
2569	 *   num_counters if we want all stats updated
2570	 * Return codes -
2571	 *   < 0 - Error, no counters updated
2572	 *   index - Updated the single counter pointed to by index
2573	 *   num_counters - Updated all counters (will reset the timestamp
2574	 *     and prevent further calls for lifespan milliseconds)
2575	 * Drivers are allowed to update all counters in leiu of just the
2576	 *   one given in index at their option
2577	 */
2578	int (*get_hw_stats)(struct ib_device *device,
2579			    struct rdma_hw_stats *stats, u32 port, int index);
2580
2581	/**
2582	 * modify_hw_stat - Modify the counter configuration
2583	 * @enable: true/false when enable/disable a counter
2584	 * Return codes - 0 on success or error code otherwise.
2585	 */
2586	int (*modify_hw_stat)(struct ib_device *device, u32 port,
2587			      unsigned int counter_index, bool enable);
2588	/**
2589	 * Allows rdma drivers to add their own restrack attributes.
2590	 */
2591	int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2592	int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2593	int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2594	int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2595	int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2596	int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2597	int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2598	int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq);
2599	int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq);
2600
2601	/* Device lifecycle callbacks */
2602	/*
2603	 * Called after the device becomes registered, before clients are
2604	 * attached
2605	 */
2606	int (*enable_driver)(struct ib_device *dev);
2607	/*
2608	 * This is called as part of ib_dealloc_device().
2609	 */
2610	void (*dealloc_driver)(struct ib_device *dev);
2611
2612	/* iWarp CM callbacks */
2613	void (*iw_add_ref)(struct ib_qp *qp);
2614	void (*iw_rem_ref)(struct ib_qp *qp);
2615	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2616	int (*iw_connect)(struct iw_cm_id *cm_id,
2617			  struct iw_cm_conn_param *conn_param);
2618	int (*iw_accept)(struct iw_cm_id *cm_id,
2619			 struct iw_cm_conn_param *conn_param);
2620	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2621			 u8 pdata_len);
2622	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2623	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2624	/**
2625	 * counter_bind_qp - Bind a QP to a counter.
2626	 * @counter - The counter to be bound. If counter->id is zero then
2627	 *   the driver needs to allocate a new counter and set counter->id
2628	 */
2629	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2630	/**
2631	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2632	 *   counter and bind it onto the default one
2633	 */
2634	int (*counter_unbind_qp)(struct ib_qp *qp);
2635	/**
2636	 * counter_dealloc -De-allocate the hw counter
2637	 */
2638	int (*counter_dealloc)(struct rdma_counter *counter);
2639	/**
2640	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2641	 * the driver initialized data.
2642	 */
2643	struct rdma_hw_stats *(*counter_alloc_stats)(
2644		struct rdma_counter *counter);
2645	/**
2646	 * counter_update_stats - Query the stats value of this counter
2647	 */
2648	int (*counter_update_stats)(struct rdma_counter *counter);
2649
2650	/**
2651	 * Allows rdma drivers to add their own restrack attributes
2652	 * dumped via 'rdma stat' iproute2 command.
2653	 */
2654	int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2655
2656	/* query driver for its ucontext properties */
2657	int (*query_ucontext)(struct ib_ucontext *context,
2658			      struct uverbs_attr_bundle *attrs);
2659
2660	/*
2661	 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2662	 * Everyone else relies on Linux memory management model.
2663	 */
2664	int (*get_numa_node)(struct ib_device *dev);
2665
2666	/**
2667	 * add_sub_dev - Add a sub IB device
2668	 */
2669	struct ib_device *(*add_sub_dev)(struct ib_device *parent,
2670					 enum rdma_nl_dev_type type,
2671					 const char *name);
2672
2673	/**
2674	 * del_sub_dev - Delete a sub IB device
2675	 */
2676	void (*del_sub_dev)(struct ib_device *sub_dev);
2677
2678	/**
2679	 * ufile_cleanup - Attempt to cleanup ubojects HW resources inside
2680	 * the ufile.
2681	 */
2682	void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
2683
2684	DECLARE_RDMA_OBJ_SIZE(ib_ah);
2685	DECLARE_RDMA_OBJ_SIZE(ib_counters);
2686	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2687	DECLARE_RDMA_OBJ_SIZE(ib_mw);
2688	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2689	DECLARE_RDMA_OBJ_SIZE(ib_qp);
2690	DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2691	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2692	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2693	DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2694};
2695
2696struct ib_core_device {
2697	/* device must be the first element in structure until,
2698	 * union of ib_core_device and device exists in ib_device.
2699	 */
2700	struct device dev;
2701	possible_net_t rdma_net;
2702	struct kobject *ports_kobj;
2703	struct list_head port_list;
2704	struct ib_device *owner; /* reach back to owner ib_device */
2705};
2706
2707struct rdma_restrack_root;
2708struct ib_device {
2709	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2710	struct device                *dma_device;
2711	struct ib_device_ops	     ops;
2712	char                          name[IB_DEVICE_NAME_MAX];
2713	struct rcu_head rcu_head;
2714
2715	struct list_head              event_handler_list;
2716	/* Protects event_handler_list */
2717	struct rw_semaphore event_handler_rwsem;
2718
2719	/* Protects QP's event_handler calls and open_qp list */
2720	spinlock_t qp_open_list_lock;
 
 
 
 
 
2721
2722	struct rw_semaphore	      client_data_rwsem;
2723	struct xarray                 client_data;
2724	struct mutex                  unregistration_lock;
2725
2726	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2727	rwlock_t cache_lock;
2728	/**
2729	 * port_data is indexed by port number
2730	 */
2731	struct ib_port_data *port_data;
2732
2733	int			      num_comp_vectors;
2734
2735	union {
2736		struct device		dev;
2737		struct ib_core_device	coredev;
2738	};
2739
2740	/* First group is for device attributes,
2741	 * Second group is for driver provided attributes (optional).
2742	 * Third group is for the hw_stats
2743	 * It is a NULL terminated array.
2744	 */
2745	const struct attribute_group	*groups[4];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2746
 
2747	u64			     uverbs_cmd_mask;
2748
2749	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2750	__be64			     node_guid;
2751	u32			     local_dma_lkey;
2752	u16                          is_switch:1;
2753	/* Indicates kernel verbs support, should not be used in drivers */
2754	u16                          kverbs_provider:1;
2755	/* CQ adaptive moderation (RDMA DIM) */
2756	u16                          use_cq_dim:1;
2757	u8                           node_type;
2758	u32			     phys_port_cnt;
2759	struct ib_device_attr        attrs;
2760	struct hw_stats_device_data *hw_stats_data;
2761
2762#ifdef CONFIG_CGROUP_RDMA
2763	struct rdmacg_device         cg_device;
2764#endif
2765
2766	u32                          index;
2767
2768	spinlock_t                   cq_pools_lock;
2769	struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2770
2771	struct rdma_restrack_root *res;
2772
2773	const struct uapi_definition   *driver_def;
2774
2775	/*
2776	 * Positive refcount indicates that the device is currently
2777	 * registered and cannot be unregistered.
2778	 */
2779	refcount_t refcount;
2780	struct completion unreg_completion;
2781	struct work_struct unregistration_work;
2782
2783	const struct rdma_link_ops *link_ops;
2784
2785	/* Protects compat_devs xarray modifications */
2786	struct mutex compat_devs_mutex;
2787	/* Maintains compat devices for each net namespace */
2788	struct xarray compat_devs;
2789
2790	/* Used by iWarp CM */
2791	char iw_ifname[IFNAMSIZ];
2792	u32 iw_driver_flags;
2793	u32 lag_flags;
2794
2795	/* A parent device has a list of sub-devices */
2796	struct mutex subdev_lock;
2797	struct list_head subdev_list_head;
2798
2799	/* A sub device has a type and a parent */
2800	enum rdma_nl_dev_type type;
2801	struct ib_device *parent;
2802	struct list_head subdev_list;
2803
2804	enum rdma_nl_name_assign_type name_assign_type;
2805};
2806
2807static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2808				    gfp_t gfp, bool is_numa_aware)
2809{
2810	if (is_numa_aware && dev->ops.get_numa_node)
2811		return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2812
2813	return kzalloc(size, gfp);
2814}
2815
2816struct ib_client_nl_info;
2817struct ib_client {
2818	const char *name;
2819	int (*add)(struct ib_device *ibdev);
2820	void (*remove)(struct ib_device *, void *client_data);
2821	void (*rename)(struct ib_device *dev, void *client_data);
2822	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2823			   struct ib_client_nl_info *res);
2824	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2825
2826	/* Returns the net_dev belonging to this ib_client and matching the
2827	 * given parameters.
2828	 * @dev:	 An RDMA device that the net_dev use for communication.
2829	 * @port:	 A physical port number on the RDMA device.
2830	 * @pkey:	 P_Key that the net_dev uses if applicable.
2831	 * @gid:	 A GID that the net_dev uses to communicate.
2832	 * @addr:	 An IP address the net_dev is configured with.
2833	 * @client_data: The device's client data set by ib_set_client_data().
2834	 *
2835	 * An ib_client that implements a net_dev on top of RDMA devices
2836	 * (such as IP over IB) should implement this callback, allowing the
2837	 * rdma_cm module to find the right net_dev for a given request.
2838	 *
2839	 * The caller is responsible for calling dev_put on the returned
2840	 * netdev. */
2841	struct net_device *(*get_net_dev_by_params)(
2842			struct ib_device *dev,
2843			u32 port,
2844			u16 pkey,
2845			const union ib_gid *gid,
2846			const struct sockaddr *addr,
2847			void *client_data);
2848
2849	refcount_t uses;
2850	struct completion uses_zero;
2851	u32 client_id;
2852
2853	/* kverbs are not required by the client */
2854	u8 no_kverbs_req:1;
2855};
2856
2857/*
2858 * IB block DMA iterator
2859 *
2860 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2861 * to a HW supported page size.
2862 */
2863struct ib_block_iter {
2864	/* internal states */
2865	struct scatterlist *__sg;	/* sg holding the current aligned block */
2866	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2867	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
2868	unsigned int __sg_nents;	/* number of SG entries */
2869	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2870	unsigned int __pg_bit;		/* alignment of current block */
2871};
2872
2873struct ib_device *_ib_alloc_device(size_t size);
2874#define ib_alloc_device(drv_struct, member)                                    \
2875	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2876				      BUILD_BUG_ON_ZERO(offsetof(              \
2877					      struct drv_struct, member))),    \
2878		     struct drv_struct, member)
2879
2880void ib_dealloc_device(struct ib_device *device);
2881
2882void ib_get_device_fw_str(struct ib_device *device, char *str);
2883
2884int ib_register_device(struct ib_device *device, const char *name,
2885		       struct device *dma_device);
2886void ib_unregister_device(struct ib_device *device);
2887void ib_unregister_driver(enum rdma_driver_id driver_id);
2888void ib_unregister_device_and_put(struct ib_device *device);
2889void ib_unregister_device_queued(struct ib_device *ib_dev);
2890
2891int ib_register_client   (struct ib_client *client);
2892void ib_unregister_client(struct ib_client *client);
2893
2894void __rdma_block_iter_start(struct ib_block_iter *biter,
2895			     struct scatterlist *sglist,
2896			     unsigned int nents,
2897			     unsigned long pgsz);
2898bool __rdma_block_iter_next(struct ib_block_iter *biter);
2899
2900/**
2901 * rdma_block_iter_dma_address - get the aligned dma address of the current
2902 * block held by the block iterator.
2903 * @biter: block iterator holding the memory block
2904 */
2905static inline dma_addr_t
2906rdma_block_iter_dma_address(struct ib_block_iter *biter)
2907{
2908	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2909}
2910
2911/**
2912 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2913 * @sglist: sglist to iterate over
2914 * @biter: block iterator holding the memory block
2915 * @nents: maximum number of sg entries to iterate over
2916 * @pgsz: best HW supported page size to use
2917 *
2918 * Callers may use rdma_block_iter_dma_address() to get each
2919 * blocks aligned DMA address.
2920 */
2921#define rdma_for_each_block(sglist, biter, nents, pgsz)		\
2922	for (__rdma_block_iter_start(biter, sglist, nents,	\
2923				     pgsz);			\
2924	     __rdma_block_iter_next(biter);)
2925
2926/**
2927 * ib_get_client_data - Get IB client context
2928 * @device:Device to get context for
2929 * @client:Client to get context for
2930 *
2931 * ib_get_client_data() returns the client context data set with
2932 * ib_set_client_data(). This can only be called while the client is
2933 * registered to the device, once the ib_client remove() callback returns this
2934 * cannot be called.
2935 */
2936static inline void *ib_get_client_data(struct ib_device *device,
2937				       struct ib_client *client)
2938{
2939	return xa_load(&device->client_data, client->client_id);
2940}
2941void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2942			 void *data);
2943void ib_set_device_ops(struct ib_device *device,
2944		       const struct ib_device_ops *ops);
2945
2946int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2947		      unsigned long pfn, unsigned long size, pgprot_t prot,
2948		      struct rdma_user_mmap_entry *entry);
2949int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2950				struct rdma_user_mmap_entry *entry,
2951				size_t length);
2952int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2953				      struct rdma_user_mmap_entry *entry,
2954				      size_t length, u32 min_pgoff,
2955				      u32 max_pgoff);
2956
2957#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2958void rdma_user_mmap_disassociate(struct ib_device *device);
2959#else
2960static inline void rdma_user_mmap_disassociate(struct ib_device *device)
2961{
2962}
2963#endif
2964
2965static inline int
2966rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
2967				  struct rdma_user_mmap_entry *entry,
2968				  size_t length, u32 pgoff)
2969{
2970	return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
2971						 pgoff);
2972}
2973
2974struct rdma_user_mmap_entry *
2975rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2976			       unsigned long pgoff);
2977struct rdma_user_mmap_entry *
2978rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2979			 struct vm_area_struct *vma);
2980void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2981
2982void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2983
2984static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2985{
2986	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2987}
2988
2989static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2990{
2991	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2992}
2993
2994static inline bool ib_is_buffer_cleared(const void __user *p,
2995					size_t len)
2996{
2997	bool ret;
2998	u8 *buf;
2999
3000	if (len > USHRT_MAX)
3001		return false;
3002
3003	buf = memdup_user(p, len);
3004	if (IS_ERR(buf))
3005		return false;
3006
3007	ret = !memchr_inv(buf, 0, len);
3008	kfree(buf);
3009	return ret;
3010}
3011
3012static inline bool ib_is_udata_cleared(struct ib_udata *udata,
3013				       size_t offset,
3014				       size_t len)
3015{
3016	return ib_is_buffer_cleared(udata->inbuf + offset, len);
3017}
3018
3019/**
3020 * ib_modify_qp_is_ok - Check that the supplied attribute mask
3021 * contains all required attributes and no attributes not allowed for
3022 * the given QP state transition.
3023 * @cur_state: Current QP state
3024 * @next_state: Next QP state
3025 * @type: QP type
3026 * @mask: Mask of supplied QP attributes
3027 *
3028 * This function is a helper function that a low-level driver's
3029 * modify_qp method can use to validate the consumer's input.  It
3030 * checks that cur_state and next_state are valid QP states, that a
3031 * transition from cur_state to next_state is allowed by the IB spec,
3032 * and that the attribute mask supplied is allowed for the transition.
3033 */
3034bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
3035			enum ib_qp_type type, enum ib_qp_attr_mask mask);
3036
3037void ib_register_event_handler(struct ib_event_handler *event_handler);
3038void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3039void ib_dispatch_event(const struct ib_event *event);
 
 
 
3040
3041int ib_query_port(struct ib_device *device,
3042		  u32 port_num, struct ib_port_attr *port_attr);
3043
3044enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3045					       u32 port_num);
3046
3047/**
3048 * rdma_cap_ib_switch - Check if the device is IB switch
3049 * @device: Device to check
3050 *
3051 * Device driver is responsible for setting is_switch bit on
3052 * in ib_device structure at init time.
3053 *
3054 * Return: true if the device is IB switch.
3055 */
3056static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3057{
3058	return device->is_switch;
3059}
3060
3061/**
3062 * rdma_start_port - Return the first valid port number for the device
3063 * specified
3064 *
3065 * @device: Device to be checked
3066 *
3067 * Return start port number
3068 */
3069static inline u32 rdma_start_port(const struct ib_device *device)
3070{
3071	return rdma_cap_ib_switch(device) ? 0 : 1;
3072}
3073
3074/**
3075 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3076 * @device - The struct ib_device * to iterate over
3077 * @iter - The unsigned int to store the port number
3078 */
3079#define rdma_for_each_port(device, iter)                                       \
3080	for (iter = rdma_start_port(device +				       \
3081				    BUILD_BUG_ON_ZERO(!__same_type(u32,	       \
3082								   iter)));    \
3083	     iter <= rdma_end_port(device); iter++)
3084
3085/**
3086 * rdma_end_port - Return the last valid port number for the device
3087 * specified
3088 *
3089 * @device: Device to be checked
3090 *
3091 * Return last port number
3092 */
3093static inline u32 rdma_end_port(const struct ib_device *device)
3094{
3095	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3096}
3097
3098static inline int rdma_is_port_valid(const struct ib_device *device,
3099				     unsigned int port)
3100{
3101	return (port >= rdma_start_port(device) &&
3102		port <= rdma_end_port(device));
3103}
3104
3105static inline bool rdma_is_grh_required(const struct ib_device *device,
3106					u32 port_num)
3107{
3108	return device->port_data[port_num].immutable.core_cap_flags &
3109	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
3110}
3111
3112static inline bool rdma_protocol_ib(const struct ib_device *device,
3113				    u32 port_num)
3114{
3115	return device->port_data[port_num].immutable.core_cap_flags &
3116	       RDMA_CORE_CAP_PROT_IB;
3117}
3118
3119static inline bool rdma_protocol_roce(const struct ib_device *device,
3120				      u32 port_num)
3121{
3122	return device->port_data[port_num].immutable.core_cap_flags &
3123	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3124}
3125
3126static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3127						u32 port_num)
3128{
3129	return device->port_data[port_num].immutable.core_cap_flags &
3130	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3131}
3132
3133static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3134						u32 port_num)
3135{
3136	return device->port_data[port_num].immutable.core_cap_flags &
3137	       RDMA_CORE_CAP_PROT_ROCE;
3138}
3139
3140static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3141				       u32 port_num)
3142{
3143	return device->port_data[port_num].immutable.core_cap_flags &
3144	       RDMA_CORE_CAP_PROT_IWARP;
3145}
3146
3147static inline bool rdma_ib_or_roce(const struct ib_device *device,
3148				   u32 port_num)
3149{
3150	return rdma_protocol_ib(device, port_num) ||
3151		rdma_protocol_roce(device, port_num);
3152}
3153
3154static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3155					    u32 port_num)
3156{
3157	return device->port_data[port_num].immutable.core_cap_flags &
3158	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3159}
3160
3161static inline bool rdma_protocol_usnic(const struct ib_device *device,
3162				       u32 port_num)
3163{
3164	return device->port_data[port_num].immutable.core_cap_flags &
3165	       RDMA_CORE_CAP_PROT_USNIC;
3166}
3167
3168/**
3169 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3170 * Management Datagrams.
3171 * @device: Device to check
3172 * @port_num: Port number to check
3173 *
3174 * Management Datagrams (MAD) are a required part of the InfiniBand
3175 * specification and are supported on all InfiniBand devices.  A slightly
3176 * extended version are also supported on OPA interfaces.
3177 *
3178 * Return: true if the port supports sending/receiving of MAD packets.
3179 */
3180static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3181{
3182	return device->port_data[port_num].immutable.core_cap_flags &
3183	       RDMA_CORE_CAP_IB_MAD;
3184}
3185
3186/**
3187 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3188 * Management Datagrams.
3189 * @device: Device to check
3190 * @port_num: Port number to check
3191 *
3192 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3193 * datagrams with their own versions.  These OPA MADs share many but not all of
3194 * the characteristics of InfiniBand MADs.
3195 *
3196 * OPA MADs differ in the following ways:
3197 *
3198 *    1) MADs are variable size up to 2K
3199 *       IBTA defined MADs remain fixed at 256 bytes
3200 *    2) OPA SMPs must carry valid PKeys
3201 *    3) OPA SMP packets are a different format
3202 *
3203 * Return: true if the port supports OPA MAD packet formats.
3204 */
3205static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3206{
3207	return device->port_data[port_num].immutable.core_cap_flags &
3208		RDMA_CORE_CAP_OPA_MAD;
3209}
3210
3211/**
3212 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3213 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3214 * @device: Device to check
3215 * @port_num: Port number to check
3216 *
3217 * Each InfiniBand node is required to provide a Subnet Management Agent
3218 * that the subnet manager can access.  Prior to the fabric being fully
3219 * configured by the subnet manager, the SMA is accessed via a well known
3220 * interface called the Subnet Management Interface (SMI).  This interface
3221 * uses directed route packets to communicate with the SM to get around the
3222 * chicken and egg problem of the SM needing to know what's on the fabric
3223 * in order to configure the fabric, and needing to configure the fabric in
3224 * order to send packets to the devices on the fabric.  These directed
3225 * route packets do not need the fabric fully configured in order to reach
3226 * their destination.  The SMI is the only method allowed to send
3227 * directed route packets on an InfiniBand fabric.
3228 *
3229 * Return: true if the port provides an SMI.
3230 */
3231static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3232{
3233	return device->port_data[port_num].immutable.core_cap_flags &
3234	       RDMA_CORE_CAP_IB_SMI;
3235}
3236
3237/**
3238 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3239 * Communication Manager.
3240 * @device: Device to check
3241 * @port_num: Port number to check
3242 *
3243 * The InfiniBand Communication Manager is one of many pre-defined General
3244 * Service Agents (GSA) that are accessed via the General Service
3245 * Interface (GSI).  It's role is to facilitate establishment of connections
3246 * between nodes as well as other management related tasks for established
3247 * connections.
3248 *
3249 * Return: true if the port supports an IB CM (this does not guarantee that
3250 * a CM is actually running however).
3251 */
3252static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3253{
3254	return device->port_data[port_num].immutable.core_cap_flags &
3255	       RDMA_CORE_CAP_IB_CM;
3256}
3257
3258/**
3259 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3260 * Communication Manager.
3261 * @device: Device to check
3262 * @port_num: Port number to check
3263 *
3264 * Similar to above, but specific to iWARP connections which have a different
3265 * managment protocol than InfiniBand.
3266 *
3267 * Return: true if the port supports an iWARP CM (this does not guarantee that
3268 * a CM is actually running however).
3269 */
3270static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3271{
3272	return device->port_data[port_num].immutable.core_cap_flags &
3273	       RDMA_CORE_CAP_IW_CM;
3274}
3275
3276/**
3277 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3278 * Subnet Administration.
3279 * @device: Device to check
3280 * @port_num: Port number to check
3281 *
3282 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3283 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3284 * fabrics, devices should resolve routes to other hosts by contacting the
3285 * SA to query the proper route.
3286 *
3287 * Return: true if the port should act as a client to the fabric Subnet
3288 * Administration interface.  This does not imply that the SA service is
3289 * running locally.
3290 */
3291static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3292{
3293	return device->port_data[port_num].immutable.core_cap_flags &
3294	       RDMA_CORE_CAP_IB_SA;
3295}
3296
3297/**
3298 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3299 * Multicast.
3300 * @device: Device to check
3301 * @port_num: Port number to check
3302 *
3303 * InfiniBand multicast registration is more complex than normal IPv4 or
3304 * IPv6 multicast registration.  Each Host Channel Adapter must register
3305 * with the Subnet Manager when it wishes to join a multicast group.  It
3306 * should do so only once regardless of how many queue pairs it subscribes
3307 * to this group.  And it should leave the group only after all queue pairs
3308 * attached to the group have been detached.
3309 *
3310 * Return: true if the port must undertake the additional adminstrative
3311 * overhead of registering/unregistering with the SM and tracking of the
3312 * total number of queue pairs attached to the multicast group.
3313 */
3314static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3315				     u32 port_num)
3316{
3317	return rdma_cap_ib_sa(device, port_num);
3318}
3319
3320/**
3321 * rdma_cap_af_ib - Check if the port of device has the capability
3322 * Native Infiniband Address.
3323 * @device: Device to check
3324 * @port_num: Port number to check
3325 *
3326 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3327 * GID.  RoCE uses a different mechanism, but still generates a GID via
3328 * a prescribed mechanism and port specific data.
3329 *
3330 * Return: true if the port uses a GID address to identify devices on the
3331 * network.
3332 */
3333static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3334{
3335	return device->port_data[port_num].immutable.core_cap_flags &
3336	       RDMA_CORE_CAP_AF_IB;
3337}
3338
3339/**
3340 * rdma_cap_eth_ah - Check if the port of device has the capability
3341 * Ethernet Address Handle.
3342 * @device: Device to check
3343 * @port_num: Port number to check
3344 *
3345 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3346 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3347 * port.  Normally, packet headers are generated by the sending host
3348 * adapter, but when sending connectionless datagrams, we must manually
3349 * inject the proper headers for the fabric we are communicating over.
3350 *
3351 * Return: true if we are running as a RoCE port and must force the
3352 * addition of a Global Route Header built from our Ethernet Address
3353 * Handle into our header list for connectionless packets.
3354 */
3355static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3356{
3357	return device->port_data[port_num].immutable.core_cap_flags &
3358	       RDMA_CORE_CAP_ETH_AH;
3359}
3360
3361/**
3362 * rdma_cap_opa_ah - Check if the port of device supports
3363 * OPA Address handles
3364 * @device: Device to check
3365 * @port_num: Port number to check
3366 *
3367 * Return: true if we are running on an OPA device which supports
3368 * the extended OPA addressing.
3369 */
3370static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3371{
3372	return (device->port_data[port_num].immutable.core_cap_flags &
3373		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3374}
3375
3376/**
3377 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3378 *
3379 * @device: Device
3380 * @port_num: Port number
3381 *
3382 * This MAD size includes the MAD headers and MAD payload.  No other headers
3383 * are included.
3384 *
3385 * Return the max MAD size required by the Port.  Will return 0 if the port
3386 * does not support MADs
3387 */
3388static inline size_t rdma_max_mad_size(const struct ib_device *device,
3389				       u32 port_num)
3390{
3391	return device->port_data[port_num].immutable.max_mad_size;
3392}
3393
3394/**
3395 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3396 * @device: Device to check
3397 * @port_num: Port number to check
3398 *
3399 * RoCE GID table mechanism manages the various GIDs for a device.
3400 *
3401 * NOTE: if allocating the port's GID table has failed, this call will still
3402 * return true, but any RoCE GID table API will fail.
3403 *
3404 * Return: true if the port uses RoCE GID table mechanism in order to manage
3405 * its GIDs.
3406 */
3407static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3408					   u32 port_num)
3409{
3410	return rdma_protocol_roce(device, port_num) &&
3411		device->ops.add_gid && device->ops.del_gid;
3412}
3413
3414/*
3415 * Check if the device supports READ W/ INVALIDATE.
3416 */
3417static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3418{
3419	/*
3420	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3421	 * has support for it yet.
3422	 */
3423	return rdma_protocol_iwarp(dev, port_num);
3424}
3425
3426/**
3427 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3428 * @device: Device
3429 * @port_num: 1 based Port number
3430 *
3431 * Return true if port is an Intel OPA port , false if not
3432 */
3433static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3434					  u32 port_num)
3435{
3436	return (device->port_data[port_num].immutable.core_cap_flags &
3437		RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3438}
3439
3440/**
3441 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3442 * @device: Device
3443 * @port_num: Port number
3444 * @mtu: enum value of MTU
3445 *
3446 * Return the MTU size supported by the port as an integer value. Will return
3447 * -1 if enum value of mtu is not supported.
3448 */
3449static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3450				       int mtu)
3451{
3452	if (rdma_core_cap_opa_port(device, port))
3453		return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3454	else
3455		return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3456}
3457
3458/**
3459 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3460 * @device: Device
3461 * @port_num: Port number
3462 * @attr: port attribute
3463 *
3464 * Return the MTU size supported by the port as an integer value.
3465 */
3466static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3467				     struct ib_port_attr *attr)
3468{
3469	if (rdma_core_cap_opa_port(device, port))
3470		return attr->phys_mtu;
3471	else
3472		return ib_mtu_enum_to_int(attr->max_mtu);
3473}
3474
3475int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3476			 int state);
3477int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3478		     struct ifla_vf_info *info);
3479int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3480		    struct ifla_vf_stats *stats);
3481int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3482		    struct ifla_vf_guid *node_guid,
3483		    struct ifla_vf_guid *port_guid);
3484int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3485		   int type);
3486
3487int ib_query_pkey(struct ib_device *device,
3488		  u32 port_num, u16 index, u16 *pkey);
3489
3490int ib_modify_device(struct ib_device *device,
3491		     int device_modify_mask,
3492		     struct ib_device_modify *device_modify);
3493
3494int ib_modify_port(struct ib_device *device,
3495		   u32 port_num, int port_modify_mask,
3496		   struct ib_port_modify *port_modify);
3497
3498int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3499		u32 *port_num, u16 *index);
3500
3501int ib_find_pkey(struct ib_device *device,
3502		 u32 port_num, u16 pkey, u16 *index);
3503
3504enum ib_pd_flags {
3505	/*
3506	 * Create a memory registration for all memory in the system and place
3507	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3508	 * ULPs to avoid the overhead of dynamic MRs.
3509	 *
3510	 * This flag is generally considered unsafe and must only be used in
3511	 * extremly trusted environments.  Every use of it will log a warning
3512	 * in the kernel log.
3513	 */
3514	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3515};
3516
3517struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3518		const char *caller);
3519
3520/**
3521 * ib_alloc_pd - Allocates an unused protection domain.
3522 * @device: The device on which to allocate the protection domain.
3523 * @flags: protection domain flags
3524 *
3525 * A protection domain object provides an association between QPs, shared
3526 * receive queues, address handles, memory regions, and memory windows.
3527 *
3528 * Every PD has a local_dma_lkey which can be used as the lkey value for local
3529 * memory operations.
3530 */
3531#define ib_alloc_pd(device, flags) \
3532	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3533
3534int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3535
3536/**
3537 * ib_dealloc_pd - Deallocate kernel PD
3538 * @pd: The protection domain
3539 *
3540 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3541 */
3542static inline void ib_dealloc_pd(struct ib_pd *pd)
3543{
3544	int ret = ib_dealloc_pd_user(pd, NULL);
3545
3546	WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3547}
3548
3549enum rdma_create_ah_flags {
3550	/* In a sleepable context */
3551	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3552};
3553
3554/**
3555 * rdma_create_ah - Creates an address handle for the given address vector.
3556 * @pd: The protection domain associated with the address handle.
3557 * @ah_attr: The attributes of the address vector.
3558 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3559 *
3560 * The address handle is used to reference a local or global destination
3561 * in all UD QP post sends.
3562 */
3563struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3564			     u32 flags);
3565
3566/**
3567 * rdma_create_user_ah - Creates an address handle for the given address vector.
3568 * It resolves destination mac address for ah attribute of RoCE type.
3569 * @pd: The protection domain associated with the address handle.
3570 * @ah_attr: The attributes of the address vector.
3571 * @udata: pointer to user's input output buffer information need by
3572 *         provider driver.
3573 *
3574 * It returns 0 on success and returns appropriate error code on error.
3575 * The address handle is used to reference a local or global destination
3576 * in all UD QP post sends.
3577 */
3578struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3579				  struct rdma_ah_attr *ah_attr,
3580				  struct ib_udata *udata);
3581/**
3582 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3583 *   work completion.
3584 * @hdr: the L3 header to parse
3585 * @net_type: type of header to parse
3586 * @sgid: place to store source gid
3587 * @dgid: place to store destination gid
3588 */
3589int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3590			      enum rdma_network_type net_type,
3591			      union ib_gid *sgid, union ib_gid *dgid);
3592
3593/**
3594 * ib_get_rdma_header_version - Get the header version
3595 * @hdr: the L3 header to parse
3596 */
3597int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3598
3599/**
3600 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3601 *   work completion.
3602 * @device: Device on which the received message arrived.
3603 * @port_num: Port on which the received message arrived.
3604 * @wc: Work completion associated with the received message.
3605 * @grh: References the received global route header.  This parameter is
3606 *   ignored unless the work completion indicates that the GRH is valid.
3607 * @ah_attr: Returned attributes that can be used when creating an address
3608 *   handle for replying to the message.
3609 * When ib_init_ah_attr_from_wc() returns success,
3610 * (a) for IB link layer it optionally contains a reference to SGID attribute
3611 * when GRH is present for IB link layer.
3612 * (b) for RoCE link layer it contains a reference to SGID attribute.
3613 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3614 * attributes which are initialized using ib_init_ah_attr_from_wc().
3615 *
3616 */
3617int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3618			    const struct ib_wc *wc, const struct ib_grh *grh,
3619			    struct rdma_ah_attr *ah_attr);
3620
3621/**
3622 * ib_create_ah_from_wc - Creates an address handle associated with the
3623 *   sender of the specified work completion.
3624 * @pd: The protection domain associated with the address handle.
3625 * @wc: Work completion information associated with a received message.
3626 * @grh: References the received global route header.  This parameter is
3627 *   ignored unless the work completion indicates that the GRH is valid.
3628 * @port_num: The outbound port number to associate with the address.
3629 *
3630 * The address handle is used to reference a local or global destination
3631 * in all UD QP post sends.
3632 */
3633struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3634				   const struct ib_grh *grh, u32 port_num);
3635
3636/**
3637 * rdma_modify_ah - Modifies the address vector associated with an address
3638 *   handle.
3639 * @ah: The address handle to modify.
3640 * @ah_attr: The new address vector attributes to associate with the
3641 *   address handle.
3642 */
3643int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3644
3645/**
3646 * rdma_query_ah - Queries the address vector associated with an address
3647 *   handle.
3648 * @ah: The address handle to query.
3649 * @ah_attr: The address vector attributes associated with the address
3650 *   handle.
3651 */
3652int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3653
3654enum rdma_destroy_ah_flags {
3655	/* In a sleepable context */
3656	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3657};
3658
3659/**
3660 * rdma_destroy_ah_user - Destroys an address handle.
3661 * @ah: The address handle to destroy.
3662 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3663 * @udata: Valid user data or NULL for kernel objects
3664 */
3665int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3666
3667/**
3668 * rdma_destroy_ah - Destroys an kernel address handle.
3669 * @ah: The address handle to destroy.
3670 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3671 *
3672 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
 
 
 
 
 
 
3673 */
3674static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3675{
3676	int ret = rdma_destroy_ah_user(ah, flags, NULL);
3677
3678	WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3679}
3680
3681struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3682				  struct ib_srq_init_attr *srq_init_attr,
3683				  struct ib_usrq_object *uobject,
3684				  struct ib_udata *udata);
3685static inline struct ib_srq *
3686ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3687{
3688	if (!pd->device->ops.create_srq)
3689		return ERR_PTR(-EOPNOTSUPP);
3690
3691	return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3692}
3693
3694/**
3695 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3696 * @srq: The SRQ to modify.
3697 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3698 *   the current values of selected SRQ attributes are returned.
3699 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3700 *   are being modified.
3701 *
3702 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3703 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3704 * the number of receives queued drops below the limit.
3705 */
3706int ib_modify_srq(struct ib_srq *srq,
3707		  struct ib_srq_attr *srq_attr,
3708		  enum ib_srq_attr_mask srq_attr_mask);
3709
3710/**
3711 * ib_query_srq - Returns the attribute list and current values for the
3712 *   specified SRQ.
3713 * @srq: The SRQ to query.
3714 * @srq_attr: The attributes of the specified SRQ.
3715 */
3716int ib_query_srq(struct ib_srq *srq,
3717		 struct ib_srq_attr *srq_attr);
3718
3719/**
3720 * ib_destroy_srq_user - Destroys the specified SRQ.
3721 * @srq: The SRQ to destroy.
3722 * @udata: Valid user data or NULL for kernel objects
3723 */
3724int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3725
3726/**
3727 * ib_destroy_srq - Destroys the specified kernel SRQ.
3728 * @srq: The SRQ to destroy.
3729 *
3730 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3731 */
3732static inline void ib_destroy_srq(struct ib_srq *srq)
3733{
3734	int ret = ib_destroy_srq_user(srq, NULL);
3735
3736	WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3737}
3738
3739/**
3740 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3741 * @srq: The SRQ to post the work request on.
3742 * @recv_wr: A list of work requests to post on the receive queue.
3743 * @bad_recv_wr: On an immediate failure, this parameter will reference
3744 *   the work request that failed to be posted on the QP.
3745 */
3746static inline int ib_post_srq_recv(struct ib_srq *srq,
3747				   const struct ib_recv_wr *recv_wr,
3748				   const struct ib_recv_wr **bad_recv_wr)
3749{
3750	const struct ib_recv_wr *dummy;
3751
3752	return srq->device->ops.post_srq_recv(srq, recv_wr,
3753					      bad_recv_wr ? : &dummy);
3754}
3755
3756struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3757				  struct ib_qp_init_attr *qp_init_attr,
3758				  const char *caller);
3759/**
3760 * ib_create_qp - Creates a kernel QP associated with the specific protection
3761 * domain.
3762 * @pd: The protection domain associated with the QP.
3763 * @init_attr: A list of initial attributes required to create the
3764 *   QP.  If QP creation succeeds, then the attributes are updated to
3765 *   the actual capabilities of the created QP.
3766 */
3767static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3768					 struct ib_qp_init_attr *init_attr)
3769{
3770	return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3771}
3772
3773/**
3774 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3775 * @qp: The QP to modify.
3776 * @attr: On input, specifies the QP attributes to modify.  On output,
3777 *   the current values of selected QP attributes are returned.
3778 * @attr_mask: A bit-mask used to specify which attributes of the QP
3779 *   are being modified.
3780 * @udata: pointer to user's input output buffer information
3781 *   are being modified.
3782 * It returns 0 on success and returns appropriate error code on error.
3783 */
3784int ib_modify_qp_with_udata(struct ib_qp *qp,
3785			    struct ib_qp_attr *attr,
3786			    int attr_mask,
3787			    struct ib_udata *udata);
3788
3789/**
3790 * ib_modify_qp - Modifies the attributes for the specified QP and then
3791 *   transitions the QP to the given state.
3792 * @qp: The QP to modify.
3793 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3794 *   the current values of selected QP attributes are returned.
3795 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3796 *   are being modified.
3797 */
3798int ib_modify_qp(struct ib_qp *qp,
3799		 struct ib_qp_attr *qp_attr,
3800		 int qp_attr_mask);
3801
3802/**
3803 * ib_query_qp - Returns the attribute list and current values for the
3804 *   specified QP.
3805 * @qp: The QP to query.
3806 * @qp_attr: The attributes of the specified QP.
3807 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3808 * @qp_init_attr: Additional attributes of the selected QP.
3809 *
3810 * The qp_attr_mask may be used to limit the query to gathering only the
3811 * selected attributes.
3812 */
3813int ib_query_qp(struct ib_qp *qp,
3814		struct ib_qp_attr *qp_attr,
3815		int qp_attr_mask,
3816		struct ib_qp_init_attr *qp_init_attr);
3817
3818/**
3819 * ib_destroy_qp - Destroys the specified QP.
3820 * @qp: The QP to destroy.
3821 * @udata: Valid udata or NULL for kernel objects
3822 */
3823int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3824
3825/**
3826 * ib_destroy_qp - Destroys the specified kernel QP.
3827 * @qp: The QP to destroy.
3828 *
3829 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3830 */
3831static inline int ib_destroy_qp(struct ib_qp *qp)
3832{
3833	return ib_destroy_qp_user(qp, NULL);
3834}
3835
3836/**
3837 * ib_open_qp - Obtain a reference to an existing sharable QP.
3838 * @xrcd - XRC domain
3839 * @qp_open_attr: Attributes identifying the QP to open.
3840 *
3841 * Returns a reference to a sharable QP.
3842 */
3843struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3844			 struct ib_qp_open_attr *qp_open_attr);
3845
3846/**
3847 * ib_close_qp - Release an external reference to a QP.
3848 * @qp: The QP handle to release
3849 *
3850 * The opened QP handle is released by the caller.  The underlying
3851 * shared QP is not destroyed until all internal references are released.
3852 */
3853int ib_close_qp(struct ib_qp *qp);
3854
3855/**
3856 * ib_post_send - Posts a list of work requests to the send queue of
3857 *   the specified QP.
3858 * @qp: The QP to post the work request on.
3859 * @send_wr: A list of work requests to post on the send queue.
3860 * @bad_send_wr: On an immediate failure, this parameter will reference
3861 *   the work request that failed to be posted on the QP.
3862 *
3863 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3864 * error is returned, the QP state shall not be affected,
3865 * ib_post_send() will return an immediate error after queueing any
3866 * earlier work requests in the list.
3867 */
3868static inline int ib_post_send(struct ib_qp *qp,
3869			       const struct ib_send_wr *send_wr,
3870			       const struct ib_send_wr **bad_send_wr)
3871{
3872	const struct ib_send_wr *dummy;
3873
3874	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3875}
3876
3877/**
3878 * ib_post_recv - Posts a list of work requests to the receive queue of
3879 *   the specified QP.
3880 * @qp: The QP to post the work request on.
3881 * @recv_wr: A list of work requests to post on the receive queue.
3882 * @bad_recv_wr: On an immediate failure, this parameter will reference
3883 *   the work request that failed to be posted on the QP.
3884 */
3885static inline int ib_post_recv(struct ib_qp *qp,
3886			       const struct ib_recv_wr *recv_wr,
3887			       const struct ib_recv_wr **bad_recv_wr)
3888{
3889	const struct ib_recv_wr *dummy;
3890
3891	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3892}
3893
3894struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3895			    int comp_vector, enum ib_poll_context poll_ctx,
3896			    const char *caller);
3897static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3898					int nr_cqe, int comp_vector,
3899					enum ib_poll_context poll_ctx)
3900{
3901	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3902			     KBUILD_MODNAME);
3903}
3904
3905struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3906				int nr_cqe, enum ib_poll_context poll_ctx,
3907				const char *caller);
3908
3909/**
3910 * ib_alloc_cq_any: Allocate kernel CQ
3911 * @dev: The IB device
3912 * @private: Private data attached to the CQE
3913 * @nr_cqe: Number of CQEs in the CQ
3914 * @poll_ctx: Context used for polling the CQ
3915 */
3916static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3917					    void *private, int nr_cqe,
3918					    enum ib_poll_context poll_ctx)
3919{
3920	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3921				 KBUILD_MODNAME);
3922}
3923
3924void ib_free_cq(struct ib_cq *cq);
3925int ib_process_cq_direct(struct ib_cq *cq, int budget);
3926
3927/**
3928 * ib_create_cq - Creates a CQ on the specified device.
3929 * @device: The device on which to create the CQ.
3930 * @comp_handler: A user-specified callback that is invoked when a
3931 *   completion event occurs on the CQ.
3932 * @event_handler: A user-specified callback that is invoked when an
3933 *   asynchronous event not associated with a completion occurs on the CQ.
3934 * @cq_context: Context associated with the CQ returned to the user via
3935 *   the associated completion and event handlers.
3936 * @cq_attr: The attributes the CQ should be created upon.
 
 
3937 *
3938 * Users can examine the cq structure to determine the actual CQ size.
3939 */
3940struct ib_cq *__ib_create_cq(struct ib_device *device,
3941			     ib_comp_handler comp_handler,
3942			     void (*event_handler)(struct ib_event *, void *),
3943			     void *cq_context,
3944			     const struct ib_cq_init_attr *cq_attr,
3945			     const char *caller);
3946#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3947	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3948
3949/**
3950 * ib_resize_cq - Modifies the capacity of the CQ.
3951 * @cq: The CQ to resize.
3952 * @cqe: The minimum size of the CQ.
3953 *
3954 * Users can examine the cq structure to determine the actual CQ size.
3955 */
3956int ib_resize_cq(struct ib_cq *cq, int cqe);
3957
3958/**
3959 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3960 * @cq: The CQ to modify.
3961 * @cq_count: number of CQEs that will trigger an event
3962 * @cq_period: max period of time in usec before triggering an event
3963 *
3964 */
3965int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3966
3967/**
3968 * ib_destroy_cq_user - Destroys the specified CQ.
3969 * @cq: The CQ to destroy.
3970 * @udata: Valid user data or NULL for kernel objects
3971 */
3972int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3973
3974/**
3975 * ib_destroy_cq - Destroys the specified kernel CQ.
3976 * @cq: The CQ to destroy.
3977 *
3978 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3979 */
3980static inline void ib_destroy_cq(struct ib_cq *cq)
3981{
3982	int ret = ib_destroy_cq_user(cq, NULL);
3983
3984	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3985}
3986
3987/**
3988 * ib_poll_cq - poll a CQ for completion(s)
3989 * @cq:the CQ being polled
3990 * @num_entries:maximum number of completions to return
3991 * @wc:array of at least @num_entries &struct ib_wc where completions
3992 *   will be returned
3993 *
3994 * Poll a CQ for (possibly multiple) completions.  If the return value
3995 * is < 0, an error occurred.  If the return value is >= 0, it is the
3996 * number of completions returned.  If the return value is
3997 * non-negative and < num_entries, then the CQ was emptied.
3998 */
3999static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
4000			     struct ib_wc *wc)
4001{
4002	return cq->device->ops.poll_cq(cq, num_entries, wc);
4003}
4004
4005/**
 
 
 
 
 
 
 
 
 
 
 
 
4006 * ib_req_notify_cq - Request completion notification on a CQ.
4007 * @cq: The CQ to generate an event for.
4008 * @flags:
4009 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
4010 *   to request an event on the next solicited event or next work
4011 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
4012 *   may also be |ed in to request a hint about missed events, as
4013 *   described below.
4014 *
4015 * Return Value:
4016 *    < 0 means an error occurred while requesting notification
4017 *   == 0 means notification was requested successfully, and if
4018 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
4019 *        were missed and it is safe to wait for another event.  In
4020 *        this case is it guaranteed that any work completions added
4021 *        to the CQ since the last CQ poll will trigger a completion
4022 *        notification event.
4023 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4024 *        in.  It means that the consumer must poll the CQ again to
4025 *        make sure it is empty to avoid missing an event because of a
4026 *        race between requesting notification and an entry being
4027 *        added to the CQ.  This return value means it is possible
4028 *        (but not guaranteed) that a work completion has been added
4029 *        to the CQ since the last poll without triggering a
4030 *        completion notification event.
4031 */
4032static inline int ib_req_notify_cq(struct ib_cq *cq,
4033				   enum ib_cq_notify_flags flags)
4034{
4035	return cq->device->ops.req_notify_cq(cq, flags);
4036}
4037
4038struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4039			     int comp_vector_hint,
4040			     enum ib_poll_context poll_ctx);
4041
4042void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4043
4044/*
4045 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4046 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4047 * address into the dma address.
4048 */
4049static inline bool ib_uses_virt_dma(struct ib_device *dev)
4050{
4051	return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4052}
4053
4054/*
4055 * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4056 */
4057static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4058{
4059	if (ib_uses_virt_dma(dev))
4060		return false;
4061
4062	return dma_pci_p2pdma_supported(dev->dma_device);
4063}
4064
4065/**
4066 * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4067 * @dma_addr: The DMA address
4068 *
4069 * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4070 * going through the dma_addr marshalling.
4071 */
4072static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
4073{
4074	/* virt_dma mode maps the kvs's directly into the dma addr */
4075	return (void *)(uintptr_t)dma_addr;
 
4076}
4077
4078/**
4079 * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4080 * @dma_addr: The DMA address
 
 
4081 *
4082 * Used by ib_uses_virt_dma() device to get back to the struct page after going
4083 * through the dma_addr marshalling.
 
4084 */
4085static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
4086{
4087	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
4088}
4089
4090/**
4091 * ib_dma_mapping_error - check a DMA addr for error
4092 * @dev: The device for which the dma_addr was created
4093 * @dma_addr: The DMA address to check
4094 */
4095static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4096{
4097	if (ib_uses_virt_dma(dev))
4098		return 0;
4099	return dma_mapping_error(dev->dma_device, dma_addr);
4100}
4101
4102/**
4103 * ib_dma_map_single - Map a kernel virtual address to DMA address
4104 * @dev: The device for which the dma_addr is to be created
4105 * @cpu_addr: The kernel virtual address
4106 * @size: The size of the region in bytes
4107 * @direction: The direction of the DMA
4108 */
4109static inline u64 ib_dma_map_single(struct ib_device *dev,
4110				    void *cpu_addr, size_t size,
4111				    enum dma_data_direction direction)
4112{
4113	if (ib_uses_virt_dma(dev))
4114		return (uintptr_t)cpu_addr;
4115	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4116}
4117
4118/**
4119 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4120 * @dev: The device for which the DMA address was created
4121 * @addr: The DMA address
4122 * @size: The size of the region in bytes
4123 * @direction: The direction of the DMA
4124 */
4125static inline void ib_dma_unmap_single(struct ib_device *dev,
4126				       u64 addr, size_t size,
4127				       enum dma_data_direction direction)
4128{
4129	if (!ib_uses_virt_dma(dev))
 
 
4130		dma_unmap_single(dev->dma_device, addr, size, direction);
4131}
4132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4133/**
4134 * ib_dma_map_page - Map a physical page to DMA address
4135 * @dev: The device for which the dma_addr is to be created
4136 * @page: The page to be mapped
4137 * @offset: The offset within the page
4138 * @size: The size of the region in bytes
4139 * @direction: The direction of the DMA
4140 */
4141static inline u64 ib_dma_map_page(struct ib_device *dev,
4142				  struct page *page,
4143				  unsigned long offset,
4144				  size_t size,
4145					 enum dma_data_direction direction)
4146{
4147	if (ib_uses_virt_dma(dev))
4148		return (uintptr_t)(page_address(page) + offset);
4149	return dma_map_page(dev->dma_device, page, offset, size, direction);
4150}
4151
4152/**
4153 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4154 * @dev: The device for which the DMA address was created
4155 * @addr: The DMA address
4156 * @size: The size of the region in bytes
4157 * @direction: The direction of the DMA
4158 */
4159static inline void ib_dma_unmap_page(struct ib_device *dev,
4160				     u64 addr, size_t size,
4161				     enum dma_data_direction direction)
4162{
4163	if (!ib_uses_virt_dma(dev))
 
 
4164		dma_unmap_page(dev->dma_device, addr, size, direction);
4165}
4166
4167int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4168static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4169				      struct scatterlist *sg, int nents,
4170				      enum dma_data_direction direction,
4171				      unsigned long dma_attrs)
4172{
4173	if (ib_uses_virt_dma(dev))
4174		return ib_dma_virt_map_sg(dev, sg, nents);
4175	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4176				dma_attrs);
4177}
4178
4179static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4180					 struct scatterlist *sg, int nents,
4181					 enum dma_data_direction direction,
4182					 unsigned long dma_attrs)
4183{
4184	if (!ib_uses_virt_dma(dev))
4185		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4186				   dma_attrs);
4187}
4188
4189/**
4190 * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4191 * @dev: The device for which the DMA addresses are to be created
4192 * @sg: The sg_table object describing the buffer
4193 * @direction: The direction of the DMA
4194 * @attrs: Optional DMA attributes for the map operation
4195 */
4196static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4197					   struct sg_table *sgt,
4198					   enum dma_data_direction direction,
4199					   unsigned long dma_attrs)
4200{
4201	int nents;
4202
4203	if (ib_uses_virt_dma(dev)) {
4204		nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4205		if (!nents)
4206			return -EIO;
4207		sgt->nents = nents;
4208		return 0;
4209	}
4210	return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4211}
4212
4213static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4214					      struct sg_table *sgt,
4215					      enum dma_data_direction direction,
4216					      unsigned long dma_attrs)
4217{
4218	if (!ib_uses_virt_dma(dev))
4219		dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4220}
4221
4222/**
4223 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4224 * @dev: The device for which the DMA addresses are to be created
4225 * @sg: The array of scatter/gather entries
4226 * @nents: The number of scatter/gather entries
4227 * @direction: The direction of the DMA
4228 */
4229static inline int ib_dma_map_sg(struct ib_device *dev,
4230				struct scatterlist *sg, int nents,
4231				enum dma_data_direction direction)
4232{
4233	return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
 
 
4234}
4235
4236/**
4237 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4238 * @dev: The device for which the DMA addresses were created
4239 * @sg: The array of scatter/gather entries
4240 * @nents: The number of scatter/gather entries
4241 * @direction: The direction of the DMA
4242 */
4243static inline void ib_dma_unmap_sg(struct ib_device *dev,
4244				   struct scatterlist *sg, int nents,
4245				   enum dma_data_direction direction)
4246{
4247	ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
 
 
 
 
 
 
 
 
 
 
 
4248}
4249
 
 
 
 
 
 
 
4250/**
4251 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4252 * @dev: The device to query
4253 *
4254 * The returned value represents a size in bytes.
 
 
 
 
 
 
 
 
 
 
 
 
4255 */
4256static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
 
4257{
4258	if (ib_uses_virt_dma(dev))
4259		return UINT_MAX;
4260	return dma_get_max_seg_size(dev->dma_device);
4261}
4262
4263/**
4264 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4265 * @dev: The device for which the DMA address was created
4266 * @addr: The DMA address
4267 * @size: The size of the region in bytes
4268 * @dir: The direction of the DMA
4269 */
4270static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4271					      u64 addr,
4272					      size_t size,
4273					      enum dma_data_direction dir)
4274{
4275	if (!ib_uses_virt_dma(dev))
 
 
4276		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4277}
4278
4279/**
4280 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4281 * @dev: The device for which the DMA address was created
4282 * @addr: The DMA address
4283 * @size: The size of the region in bytes
4284 * @dir: The direction of the DMA
4285 */
4286static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4287						 u64 addr,
4288						 size_t size,
4289						 enum dma_data_direction dir)
4290{
4291	if (!ib_uses_virt_dma(dev))
 
 
4292		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4293}
4294
4295/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4296 * space. This function should be called when 'current' is the owning MM.
4297 */
4298struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4299			     u64 virt_addr, int mr_access_flags);
4300
4301/* ib_advise_mr -  give an advice about an address range in a memory region */
4302int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4303		 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4304/**
4305 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4306 *   HCA translation table.
4307 * @mr: The memory region to deregister.
4308 * @udata: Valid user data or NULL for kernel object
4309 *
4310 * This function can fail, if the memory region has memory windows bound to it.
4311 */
4312int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4313
4314/**
4315 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4316 *   HCA translation table.
4317 * @mr: The memory region to deregister.
4318 *
4319 * This function can fail, if the memory region has memory windows bound to it.
4320 *
4321 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4322 */
4323static inline int ib_dereg_mr(struct ib_mr *mr)
4324{
4325	return ib_dereg_mr_user(mr, NULL);
4326}
4327
4328struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4329			  u32 max_num_sg);
4330
4331struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4332				    u32 max_num_data_sg,
4333				    u32 max_num_meta_sg);
4334
4335/**
4336 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4337 *   R_Key and L_Key.
4338 * @mr - struct ib_mr pointer to be updated.
4339 * @newkey - new key to be used.
4340 */
4341static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
 
 
 
4342{
4343	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4344	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
 
 
4345}
4346
4347/**
4348 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4349 * for calculating a new rkey for type 2 memory windows.
4350 * @rkey - the rkey to increment.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4351 */
4352static inline u32 ib_inc_rkey(u32 rkey)
4353{
4354	const u32 mask = 0x000000ff;
4355	return ((rkey + 1) & mask) | (rkey & ~mask);
4356}
4357
4358/**
4359 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4360 * @qp: QP to attach to the multicast group.  The QP must be type
4361 *   IB_QPT_UD.
4362 * @gid: Multicast group GID.
4363 * @lid: Multicast group LID in host byte order.
4364 *
4365 * In order to send and receive multicast packets, subnet
4366 * administration must have created the multicast group and configured
4367 * the fabric appropriately.  The port associated with the specified
4368 * QP must also be a member of the multicast group.
4369 */
4370int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4371
4372/**
4373 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4374 * @qp: QP to detach from the multicast group.
4375 * @gid: Multicast group GID.
4376 * @lid: Multicast group LID in host byte order.
4377 */
4378int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4379
4380struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4381				   struct inode *inode, struct ib_udata *udata);
4382int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4383
4384static inline int ib_check_mr_access(struct ib_device *ib_dev,
4385				     unsigned int flags)
4386{
4387	u64 device_cap = ib_dev->attrs.device_cap_flags;
4388
4389	/*
4390	 * Local write permission is required if remote write or
4391	 * remote atomic permission is also requested.
4392	 */
4393	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4394	    !(flags & IB_ACCESS_LOCAL_WRITE))
4395		return -EINVAL;
4396
4397	if (flags & ~IB_ACCESS_SUPPORTED)
4398		return -EINVAL;
4399
4400	if (flags & IB_ACCESS_ON_DEMAND &&
4401	    !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4402		return -EOPNOTSUPP;
4403
4404	if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
4405	    !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
4406	    (flags & IB_ACCESS_FLUSH_PERSISTENT &&
4407	    !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
4408		return -EOPNOTSUPP;
4409
4410	return 0;
4411}
4412
4413static inline bool ib_access_writable(int access_flags)
4414{
4415	/*
4416	 * We have writable memory backing the MR if any of the following
4417	 * access flags are set.  "Local write" and "remote write" obviously
4418	 * require write access.  "Remote atomic" can do things like fetch and
4419	 * add, which will modify memory, and "MW bind" can change permissions
4420	 * by binding a window.
4421	 */
4422	return access_flags &
4423		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4424		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4425}
4426
4427/**
4428 * ib_check_mr_status: lightweight check of MR status.
4429 *     This routine may provide status checks on a selected
4430 *     ib_mr. first use is for signature status check.
4431 *
4432 * @mr: A memory region.
4433 * @check_mask: Bitmask of which checks to perform from
4434 *     ib_mr_status_check enumeration.
4435 * @mr_status: The container of relevant status checks.
4436 *     failed checks will be indicated in the status bitmask
4437 *     and the relevant info shall be in the error item.
4438 */
4439int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4440		       struct ib_mr_status *mr_status);
4441
4442/**
4443 * ib_device_try_get: Hold a registration lock
4444 * device: The device to lock
4445 *
4446 * A device under an active registration lock cannot become unregistered. It
4447 * is only possible to obtain a registration lock on a device that is fully
4448 * registered, otherwise this function returns false.
4449 *
4450 * The registration lock is only necessary for actions which require the
4451 * device to still be registered. Uses that only require the device pointer to
4452 * be valid should use get_device(&ibdev->dev) to hold the memory.
 
 
4453 *
 
 
 
 
 
4454 */
4455static inline bool ib_device_try_get(struct ib_device *dev)
4456{
4457	return refcount_inc_not_zero(&dev->refcount);
4458}
4459
4460void ib_device_put(struct ib_device *device);
4461struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4462					  enum rdma_driver_id driver_id);
4463struct ib_device *ib_device_get_by_name(const char *name,
4464					enum rdma_driver_id driver_id);
4465struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4466					    u16 pkey, const union ib_gid *gid,
4467					    const struct sockaddr *addr);
4468int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4469			 unsigned int port);
4470struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
4471					u32 port);
4472struct ib_wq *ib_create_wq(struct ib_pd *pd,
4473			   struct ib_wq_init_attr *init_attr);
4474int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4475
4476int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4477		 unsigned int *sg_offset, unsigned int page_size);
4478int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4479		    int data_sg_nents, unsigned int *data_sg_offset,
4480		    struct scatterlist *meta_sg, int meta_sg_nents,
4481		    unsigned int *meta_sg_offset, unsigned int page_size);
4482
4483static inline int
4484ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4485		  unsigned int *sg_offset, unsigned int page_size)
4486{
4487	int n;
4488
4489	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4490	mr->iova = 0;
4491
4492	return n;
4493}
4494
4495int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4496		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4497
4498void ib_drain_rq(struct ib_qp *qp);
4499void ib_drain_sq(struct ib_qp *qp);
4500void ib_drain_qp(struct ib_qp *qp);
4501
4502int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4503		     u8 *width);
4504
4505static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4506{
4507	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4508		return attr->roce.dmac;
4509	return NULL;
4510}
4511
4512static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4513{
4514	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4515		attr->ib.dlid = (u16)dlid;
4516	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4517		attr->opa.dlid = dlid;
4518}
4519
4520static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4521{
4522	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4523		return attr->ib.dlid;
4524	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4525		return attr->opa.dlid;
4526	return 0;
4527}
4528
4529static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4530{
4531	attr->sl = sl;
4532}
4533
4534static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4535{
4536	return attr->sl;
4537}
4538
4539static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4540					 u8 src_path_bits)
4541{
4542	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4543		attr->ib.src_path_bits = src_path_bits;
4544	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4545		attr->opa.src_path_bits = src_path_bits;
4546}
4547
4548static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4549{
4550	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4551		return attr->ib.src_path_bits;
4552	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4553		return attr->opa.src_path_bits;
4554	return 0;
4555}
4556
4557static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4558					bool make_grd)
4559{
4560	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4561		attr->opa.make_grd = make_grd;
4562}
4563
4564static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4565{
4566	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4567		return attr->opa.make_grd;
4568	return false;
4569}
4570
4571static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4572{
4573	attr->port_num = port_num;
4574}
4575
4576static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4577{
4578	return attr->port_num;
4579}
4580
4581static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4582					   u8 static_rate)
4583{
4584	attr->static_rate = static_rate;
4585}
4586
4587static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4588{
4589	return attr->static_rate;
4590}
4591
4592static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4593					enum ib_ah_flags flag)
4594{
4595	attr->ah_flags = flag;
4596}
4597
4598static inline enum ib_ah_flags
4599		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4600{
4601	return attr->ah_flags;
4602}
4603
4604static inline const struct ib_global_route
4605		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4606{
4607	return &attr->grh;
4608}
4609
4610/*To retrieve and modify the grh */
4611static inline struct ib_global_route
4612		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4613{
4614	return &attr->grh;
4615}
4616
4617static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4618{
4619	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4620
4621	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4622}
4623
4624static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4625					     __be64 prefix)
4626{
4627	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4628
4629	grh->dgid.global.subnet_prefix = prefix;
4630}
4631
4632static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4633					    __be64 if_id)
4634{
4635	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4636
4637	grh->dgid.global.interface_id = if_id;
4638}
4639
4640static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4641				   union ib_gid *dgid, u32 flow_label,
4642				   u8 sgid_index, u8 hop_limit,
4643				   u8 traffic_class)
4644{
4645	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4646
4647	attr->ah_flags = IB_AH_GRH;
4648	if (dgid)
4649		grh->dgid = *dgid;
4650	grh->flow_label = flow_label;
4651	grh->sgid_index = sgid_index;
4652	grh->hop_limit = hop_limit;
4653	grh->traffic_class = traffic_class;
4654	grh->sgid_attr = NULL;
4655}
4656
4657void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4658void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4659			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4660			     const struct ib_gid_attr *sgid_attr);
4661void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4662		       const struct rdma_ah_attr *src);
4663void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4664			  const struct rdma_ah_attr *new);
4665void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4666
4667/**
4668 * rdma_ah_find_type - Return address handle type.
4669 *
4670 * @dev: Device to be checked
4671 * @port_num: Port number
4672 */
4673static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4674						       u32 port_num)
4675{
4676	if (rdma_protocol_roce(dev, port_num))
4677		return RDMA_AH_ATTR_TYPE_ROCE;
4678	if (rdma_protocol_ib(dev, port_num)) {
4679		if (rdma_cap_opa_ah(dev, port_num))
4680			return RDMA_AH_ATTR_TYPE_OPA;
4681		return RDMA_AH_ATTR_TYPE_IB;
4682	}
4683	if (dev->type == RDMA_DEVICE_TYPE_SMI)
4684		return RDMA_AH_ATTR_TYPE_IB;
4685
4686	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4687}
4688
4689/**
4690 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4691 *     In the current implementation the only way to
4692 *     get the 32bit lid is from other sources for OPA.
4693 *     For IB, lids will always be 16bits so cast the
4694 *     value accordingly.
4695 *
4696 * @lid: A 32bit LID
4697 */
4698static inline u16 ib_lid_cpu16(u32 lid)
4699{
4700	WARN_ON_ONCE(lid & 0xFFFF0000);
4701	return (u16)lid;
4702}
4703
4704/**
4705 * ib_lid_be16 - Return lid in 16bit BE encoding.
4706 *
4707 * @lid: A 32bit LID
4708 */
4709static inline __be16 ib_lid_be16(u32 lid)
4710{
4711	WARN_ON_ONCE(lid & 0xFFFF0000);
4712	return cpu_to_be16((u16)lid);
4713}
4714
4715/**
4716 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4717 *   vector
4718 * @device:         the rdma device
4719 * @comp_vector:    index of completion vector
4720 *
4721 * Returns NULL on failure, otherwise a corresponding cpu map of the
4722 * completion vector (returns all-cpus map if the device driver doesn't
4723 * implement get_vector_affinity).
4724 */
4725static inline const struct cpumask *
4726ib_get_vector_affinity(struct ib_device *device, int comp_vector)
 
4727{
4728	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4729	    !device->ops.get_vector_affinity)
4730		return NULL;
4731
4732	return device->ops.get_vector_affinity(device, comp_vector);
4733
4734}
4735
4736/**
4737 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4738 * and add their gids, as needed, to the relevant RoCE devices.
4739 *
4740 * @device:         the rdma device
4741 */
4742void rdma_roce_rescan_device(struct ib_device *ibdev);
4743void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port);
4744void roce_del_all_netdev_gids(struct ib_device *ib_dev,
4745			      u32 port, struct net_device *ndev);
4746
4747struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4748
4749int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4750
4751struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4752				     enum rdma_netdev_t type, const char *name,
4753				     unsigned char name_assign_type,
4754				     void (*setup)(struct net_device *));
4755
4756int rdma_init_netdev(struct ib_device *device, u32 port_num,
4757		     enum rdma_netdev_t type, const char *name,
4758		     unsigned char name_assign_type,
4759		     void (*setup)(struct net_device *),
4760		     struct net_device *netdev);
4761
4762/**
4763 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4764 *
4765 * @device:	device pointer for which ib_device pointer to retrieve
4766 *
4767 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4768 *
 
 
4769 */
4770static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4771{
4772	struct ib_core_device *coredev =
4773		container_of(device, struct ib_core_device, dev);
4774
4775	return coredev->owner;
4776}
4777
4778/**
4779 * ibdev_to_node - return the NUMA node for a given ib_device
4780 * @dev:	device to get the NUMA node for.
 
 
 
4781 */
4782static inline int ibdev_to_node(struct ib_device *ibdev)
 
 
4783{
4784	struct device *parent = ibdev->dev.parent;
4785
4786	if (!parent)
4787		return NUMA_NO_NODE;
4788	return dev_to_node(parent);
4789}
4790
4791/**
4792 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4793 *			       ib_device holder structure from device pointer.
4794 *
4795 * NOTE: New drivers should not make use of this API; This API is only for
4796 * existing drivers who have exposed sysfs entries using
4797 * ops->device_group.
4798 */
4799#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4800	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4801
4802bool rdma_dev_access_netns(const struct ib_device *device,
4803			   const struct net *net);
4804
4805#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4806#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4807#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4808
4809/**
4810 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4811 *                               on the flow_label
4812 *
4813 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4814 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4815 * convention.
4816 */
4817static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4818{
4819	u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4820
4821	fl_low ^= fl_high >> 14;
4822	return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4823}
4824
4825/**
4826 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4827 *                        local and remote qpn values
 
 
 
4828 *
4829 * This function folded the multiplication results of two qpns, 24 bit each,
4830 * fields, and converts it to a 20 bit results.
4831 *
4832 * This function will create symmetric flow_label value based on the local
4833 * and remote qpn values. this will allow both the requester and responder
4834 * to calculate the same flow_label for a given connection.
4835 *
4836 * This helper function should be used by driver in case the upper layer
4837 * provide a zero flow_label value. This is to improve entropy of RDMA
4838 * traffic in the network.
4839 */
4840static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4841{
4842	u64 v = (u64)lqpn * rqpn;
4843
4844	v ^= v >> 20;
4845	v ^= v >> 40;
4846
4847	return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4848}
4849
4850/**
4851 * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
4852 *                      label. If flow label is not defined in GRH then
4853 *                      calculate it based on lqpn/rqpn.
4854 *
4855 * @fl:                 flow label from GRH
4856 * @lqpn:               local qp number
4857 * @rqpn:               remote qp number
4858 */
4859static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4860{
4861	if (!fl)
4862		fl = rdma_calc_flow_label(lqpn, rqpn);
4863
4864	return rdma_flow_label_to_udp_sport(fl);
4865}
4866
4867const struct ib_port_immutable*
4868ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4869
4870/** ib_add_sub_device - Add a sub IB device on an existing one
4871 *
4872 * @parent: The IB device that needs to add a sub device
4873 * @type: The type of the new sub device
4874 * @name: The name of the new sub device
4875 *
4876 *
4877 * Return 0 on success, an error code otherwise
4878 */
4879int ib_add_sub_device(struct ib_device *parent,
4880		      enum rdma_nl_dev_type type,
4881		      const char *name);
4882
4883
4884/** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
4885 *
4886 * @sub: The sub device that is going to be deleted
4887 *
4888 * Return 0 on success, an error code otherwise
4889 */
4890int ib_del_sub_device_and_put(struct ib_device *sub);
4891
4892static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev)
4893{
4894	ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER;
4895}
4896
4897#endif /* IB_VERBS_H */