Loading...
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51
52#include <linux/atomic.h>
53#include <asm/uaccess.h>
54
55extern struct workqueue_struct *ib_wq;
56
57union ib_gid {
58 u8 raw[16];
59 struct {
60 __be64 subnet_prefix;
61 __be64 interface_id;
62 } global;
63};
64
65enum rdma_node_type {
66 /* IB values map to NodeInfo:NodeType. */
67 RDMA_NODE_IB_CA = 1,
68 RDMA_NODE_IB_SWITCH,
69 RDMA_NODE_IB_ROUTER,
70 RDMA_NODE_RNIC
71};
72
73enum rdma_transport_type {
74 RDMA_TRANSPORT_IB,
75 RDMA_TRANSPORT_IWARP
76};
77
78enum rdma_transport_type
79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
80
81enum rdma_link_layer {
82 IB_LINK_LAYER_UNSPECIFIED,
83 IB_LINK_LAYER_INFINIBAND,
84 IB_LINK_LAYER_ETHERNET,
85};
86
87enum ib_device_cap_flags {
88 IB_DEVICE_RESIZE_MAX_WR = 1,
89 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
90 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
91 IB_DEVICE_RAW_MULTI = (1<<3),
92 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
93 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
94 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
95 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
96 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
97 IB_DEVICE_INIT_TYPE = (1<<9),
98 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
99 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
100 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
101 IB_DEVICE_SRQ_RESIZE = (1<<13),
102 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
103 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
104 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
105 IB_DEVICE_MEM_WINDOW = (1<<17),
106 /*
107 * Devices should set IB_DEVICE_UD_IP_SUM if they support
108 * insertion of UDP and TCP checksum on outgoing UD IPoIB
109 * messages and can verify the validity of checksum for
110 * incoming messages. Setting this flag implies that the
111 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
112 */
113 IB_DEVICE_UD_IP_CSUM = (1<<18),
114 IB_DEVICE_UD_TSO = (1<<19),
115 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
116 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
117};
118
119enum ib_atomic_cap {
120 IB_ATOMIC_NONE,
121 IB_ATOMIC_HCA,
122 IB_ATOMIC_GLOB
123};
124
125struct ib_device_attr {
126 u64 fw_ver;
127 __be64 sys_image_guid;
128 u64 max_mr_size;
129 u64 page_size_cap;
130 u32 vendor_id;
131 u32 vendor_part_id;
132 u32 hw_ver;
133 int max_qp;
134 int max_qp_wr;
135 int device_cap_flags;
136 int max_sge;
137 int max_sge_rd;
138 int max_cq;
139 int max_cqe;
140 int max_mr;
141 int max_pd;
142 int max_qp_rd_atom;
143 int max_ee_rd_atom;
144 int max_res_rd_atom;
145 int max_qp_init_rd_atom;
146 int max_ee_init_rd_atom;
147 enum ib_atomic_cap atomic_cap;
148 enum ib_atomic_cap masked_atomic_cap;
149 int max_ee;
150 int max_rdd;
151 int max_mw;
152 int max_raw_ipv6_qp;
153 int max_raw_ethy_qp;
154 int max_mcast_grp;
155 int max_mcast_qp_attach;
156 int max_total_mcast_qp_attach;
157 int max_ah;
158 int max_fmr;
159 int max_map_per_fmr;
160 int max_srq;
161 int max_srq_wr;
162 int max_srq_sge;
163 unsigned int max_fast_reg_page_list_len;
164 u16 max_pkeys;
165 u8 local_ca_ack_delay;
166};
167
168enum ib_mtu {
169 IB_MTU_256 = 1,
170 IB_MTU_512 = 2,
171 IB_MTU_1024 = 3,
172 IB_MTU_2048 = 4,
173 IB_MTU_4096 = 5
174};
175
176static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
177{
178 switch (mtu) {
179 case IB_MTU_256: return 256;
180 case IB_MTU_512: return 512;
181 case IB_MTU_1024: return 1024;
182 case IB_MTU_2048: return 2048;
183 case IB_MTU_4096: return 4096;
184 default: return -1;
185 }
186}
187
188enum ib_port_state {
189 IB_PORT_NOP = 0,
190 IB_PORT_DOWN = 1,
191 IB_PORT_INIT = 2,
192 IB_PORT_ARMED = 3,
193 IB_PORT_ACTIVE = 4,
194 IB_PORT_ACTIVE_DEFER = 5
195};
196
197enum ib_port_cap_flags {
198 IB_PORT_SM = 1 << 1,
199 IB_PORT_NOTICE_SUP = 1 << 2,
200 IB_PORT_TRAP_SUP = 1 << 3,
201 IB_PORT_OPT_IPD_SUP = 1 << 4,
202 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
203 IB_PORT_SL_MAP_SUP = 1 << 6,
204 IB_PORT_MKEY_NVRAM = 1 << 7,
205 IB_PORT_PKEY_NVRAM = 1 << 8,
206 IB_PORT_LED_INFO_SUP = 1 << 9,
207 IB_PORT_SM_DISABLED = 1 << 10,
208 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
209 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
210 IB_PORT_CM_SUP = 1 << 16,
211 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
212 IB_PORT_REINIT_SUP = 1 << 18,
213 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
214 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
215 IB_PORT_DR_NOTICE_SUP = 1 << 21,
216 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
217 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
218 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
219 IB_PORT_CLIENT_REG_SUP = 1 << 25
220};
221
222enum ib_port_width {
223 IB_WIDTH_1X = 1,
224 IB_WIDTH_4X = 2,
225 IB_WIDTH_8X = 4,
226 IB_WIDTH_12X = 8
227};
228
229static inline int ib_width_enum_to_int(enum ib_port_width width)
230{
231 switch (width) {
232 case IB_WIDTH_1X: return 1;
233 case IB_WIDTH_4X: return 4;
234 case IB_WIDTH_8X: return 8;
235 case IB_WIDTH_12X: return 12;
236 default: return -1;
237 }
238}
239
240struct ib_protocol_stats {
241 /* TBD... */
242};
243
244struct iw_protocol_stats {
245 u64 ipInReceives;
246 u64 ipInHdrErrors;
247 u64 ipInTooBigErrors;
248 u64 ipInNoRoutes;
249 u64 ipInAddrErrors;
250 u64 ipInUnknownProtos;
251 u64 ipInTruncatedPkts;
252 u64 ipInDiscards;
253 u64 ipInDelivers;
254 u64 ipOutForwDatagrams;
255 u64 ipOutRequests;
256 u64 ipOutDiscards;
257 u64 ipOutNoRoutes;
258 u64 ipReasmTimeout;
259 u64 ipReasmReqds;
260 u64 ipReasmOKs;
261 u64 ipReasmFails;
262 u64 ipFragOKs;
263 u64 ipFragFails;
264 u64 ipFragCreates;
265 u64 ipInMcastPkts;
266 u64 ipOutMcastPkts;
267 u64 ipInBcastPkts;
268 u64 ipOutBcastPkts;
269
270 u64 tcpRtoAlgorithm;
271 u64 tcpRtoMin;
272 u64 tcpRtoMax;
273 u64 tcpMaxConn;
274 u64 tcpActiveOpens;
275 u64 tcpPassiveOpens;
276 u64 tcpAttemptFails;
277 u64 tcpEstabResets;
278 u64 tcpCurrEstab;
279 u64 tcpInSegs;
280 u64 tcpOutSegs;
281 u64 tcpRetransSegs;
282 u64 tcpInErrs;
283 u64 tcpOutRsts;
284};
285
286union rdma_protocol_stats {
287 struct ib_protocol_stats ib;
288 struct iw_protocol_stats iw;
289};
290
291struct ib_port_attr {
292 enum ib_port_state state;
293 enum ib_mtu max_mtu;
294 enum ib_mtu active_mtu;
295 int gid_tbl_len;
296 u32 port_cap_flags;
297 u32 max_msg_sz;
298 u32 bad_pkey_cntr;
299 u32 qkey_viol_cntr;
300 u16 pkey_tbl_len;
301 u16 lid;
302 u16 sm_lid;
303 u8 lmc;
304 u8 max_vl_num;
305 u8 sm_sl;
306 u8 subnet_timeout;
307 u8 init_type_reply;
308 u8 active_width;
309 u8 active_speed;
310 u8 phys_state;
311};
312
313enum ib_device_modify_flags {
314 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
315 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
316};
317
318struct ib_device_modify {
319 u64 sys_image_guid;
320 char node_desc[64];
321};
322
323enum ib_port_modify_flags {
324 IB_PORT_SHUTDOWN = 1,
325 IB_PORT_INIT_TYPE = (1<<2),
326 IB_PORT_RESET_QKEY_CNTR = (1<<3)
327};
328
329struct ib_port_modify {
330 u32 set_port_cap_mask;
331 u32 clr_port_cap_mask;
332 u8 init_type;
333};
334
335enum ib_event_type {
336 IB_EVENT_CQ_ERR,
337 IB_EVENT_QP_FATAL,
338 IB_EVENT_QP_REQ_ERR,
339 IB_EVENT_QP_ACCESS_ERR,
340 IB_EVENT_COMM_EST,
341 IB_EVENT_SQ_DRAINED,
342 IB_EVENT_PATH_MIG,
343 IB_EVENT_PATH_MIG_ERR,
344 IB_EVENT_DEVICE_FATAL,
345 IB_EVENT_PORT_ACTIVE,
346 IB_EVENT_PORT_ERR,
347 IB_EVENT_LID_CHANGE,
348 IB_EVENT_PKEY_CHANGE,
349 IB_EVENT_SM_CHANGE,
350 IB_EVENT_SRQ_ERR,
351 IB_EVENT_SRQ_LIMIT_REACHED,
352 IB_EVENT_QP_LAST_WQE_REACHED,
353 IB_EVENT_CLIENT_REREGISTER,
354 IB_EVENT_GID_CHANGE,
355};
356
357struct ib_event {
358 struct ib_device *device;
359 union {
360 struct ib_cq *cq;
361 struct ib_qp *qp;
362 struct ib_srq *srq;
363 u8 port_num;
364 } element;
365 enum ib_event_type event;
366};
367
368struct ib_event_handler {
369 struct ib_device *device;
370 void (*handler)(struct ib_event_handler *, struct ib_event *);
371 struct list_head list;
372};
373
374#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
375 do { \
376 (_ptr)->device = _device; \
377 (_ptr)->handler = _handler; \
378 INIT_LIST_HEAD(&(_ptr)->list); \
379 } while (0)
380
381struct ib_global_route {
382 union ib_gid dgid;
383 u32 flow_label;
384 u8 sgid_index;
385 u8 hop_limit;
386 u8 traffic_class;
387};
388
389struct ib_grh {
390 __be32 version_tclass_flow;
391 __be16 paylen;
392 u8 next_hdr;
393 u8 hop_limit;
394 union ib_gid sgid;
395 union ib_gid dgid;
396};
397
398enum {
399 IB_MULTICAST_QPN = 0xffffff
400};
401
402#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
403
404enum ib_ah_flags {
405 IB_AH_GRH = 1
406};
407
408enum ib_rate {
409 IB_RATE_PORT_CURRENT = 0,
410 IB_RATE_2_5_GBPS = 2,
411 IB_RATE_5_GBPS = 5,
412 IB_RATE_10_GBPS = 3,
413 IB_RATE_20_GBPS = 6,
414 IB_RATE_30_GBPS = 4,
415 IB_RATE_40_GBPS = 7,
416 IB_RATE_60_GBPS = 8,
417 IB_RATE_80_GBPS = 9,
418 IB_RATE_120_GBPS = 10
419};
420
421/**
422 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
423 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
424 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
425 * @rate: rate to convert.
426 */
427int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
428
429/**
430 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
431 * enum.
432 * @mult: multiple to convert.
433 */
434enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
435
436struct ib_ah_attr {
437 struct ib_global_route grh;
438 u16 dlid;
439 u8 sl;
440 u8 src_path_bits;
441 u8 static_rate;
442 u8 ah_flags;
443 u8 port_num;
444};
445
446enum ib_wc_status {
447 IB_WC_SUCCESS,
448 IB_WC_LOC_LEN_ERR,
449 IB_WC_LOC_QP_OP_ERR,
450 IB_WC_LOC_EEC_OP_ERR,
451 IB_WC_LOC_PROT_ERR,
452 IB_WC_WR_FLUSH_ERR,
453 IB_WC_MW_BIND_ERR,
454 IB_WC_BAD_RESP_ERR,
455 IB_WC_LOC_ACCESS_ERR,
456 IB_WC_REM_INV_REQ_ERR,
457 IB_WC_REM_ACCESS_ERR,
458 IB_WC_REM_OP_ERR,
459 IB_WC_RETRY_EXC_ERR,
460 IB_WC_RNR_RETRY_EXC_ERR,
461 IB_WC_LOC_RDD_VIOL_ERR,
462 IB_WC_REM_INV_RD_REQ_ERR,
463 IB_WC_REM_ABORT_ERR,
464 IB_WC_INV_EECN_ERR,
465 IB_WC_INV_EEC_STATE_ERR,
466 IB_WC_FATAL_ERR,
467 IB_WC_RESP_TIMEOUT_ERR,
468 IB_WC_GENERAL_ERR
469};
470
471enum ib_wc_opcode {
472 IB_WC_SEND,
473 IB_WC_RDMA_WRITE,
474 IB_WC_RDMA_READ,
475 IB_WC_COMP_SWAP,
476 IB_WC_FETCH_ADD,
477 IB_WC_BIND_MW,
478 IB_WC_LSO,
479 IB_WC_LOCAL_INV,
480 IB_WC_FAST_REG_MR,
481 IB_WC_MASKED_COMP_SWAP,
482 IB_WC_MASKED_FETCH_ADD,
483/*
484 * Set value of IB_WC_RECV so consumers can test if a completion is a
485 * receive by testing (opcode & IB_WC_RECV).
486 */
487 IB_WC_RECV = 1 << 7,
488 IB_WC_RECV_RDMA_WITH_IMM
489};
490
491enum ib_wc_flags {
492 IB_WC_GRH = 1,
493 IB_WC_WITH_IMM = (1<<1),
494 IB_WC_WITH_INVALIDATE = (1<<2),
495};
496
497struct ib_wc {
498 u64 wr_id;
499 enum ib_wc_status status;
500 enum ib_wc_opcode opcode;
501 u32 vendor_err;
502 u32 byte_len;
503 struct ib_qp *qp;
504 union {
505 __be32 imm_data;
506 u32 invalidate_rkey;
507 } ex;
508 u32 src_qp;
509 int wc_flags;
510 u16 pkey_index;
511 u16 slid;
512 u8 sl;
513 u8 dlid_path_bits;
514 u8 port_num; /* valid only for DR SMPs on switches */
515 int csum_ok;
516};
517
518enum ib_cq_notify_flags {
519 IB_CQ_SOLICITED = 1 << 0,
520 IB_CQ_NEXT_COMP = 1 << 1,
521 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
522 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
523};
524
525enum ib_srq_attr_mask {
526 IB_SRQ_MAX_WR = 1 << 0,
527 IB_SRQ_LIMIT = 1 << 1,
528};
529
530struct ib_srq_attr {
531 u32 max_wr;
532 u32 max_sge;
533 u32 srq_limit;
534};
535
536struct ib_srq_init_attr {
537 void (*event_handler)(struct ib_event *, void *);
538 void *srq_context;
539 struct ib_srq_attr attr;
540};
541
542struct ib_qp_cap {
543 u32 max_send_wr;
544 u32 max_recv_wr;
545 u32 max_send_sge;
546 u32 max_recv_sge;
547 u32 max_inline_data;
548};
549
550enum ib_sig_type {
551 IB_SIGNAL_ALL_WR,
552 IB_SIGNAL_REQ_WR
553};
554
555enum ib_qp_type {
556 /*
557 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
558 * here (and in that order) since the MAD layer uses them as
559 * indices into a 2-entry table.
560 */
561 IB_QPT_SMI,
562 IB_QPT_GSI,
563
564 IB_QPT_RC,
565 IB_QPT_UC,
566 IB_QPT_UD,
567 IB_QPT_RAW_IPV6,
568 IB_QPT_RAW_ETHERTYPE
569};
570
571enum ib_qp_create_flags {
572 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
573 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
574};
575
576struct ib_qp_init_attr {
577 void (*event_handler)(struct ib_event *, void *);
578 void *qp_context;
579 struct ib_cq *send_cq;
580 struct ib_cq *recv_cq;
581 struct ib_srq *srq;
582 struct ib_qp_cap cap;
583 enum ib_sig_type sq_sig_type;
584 enum ib_qp_type qp_type;
585 enum ib_qp_create_flags create_flags;
586 u8 port_num; /* special QP types only */
587};
588
589enum ib_rnr_timeout {
590 IB_RNR_TIMER_655_36 = 0,
591 IB_RNR_TIMER_000_01 = 1,
592 IB_RNR_TIMER_000_02 = 2,
593 IB_RNR_TIMER_000_03 = 3,
594 IB_RNR_TIMER_000_04 = 4,
595 IB_RNR_TIMER_000_06 = 5,
596 IB_RNR_TIMER_000_08 = 6,
597 IB_RNR_TIMER_000_12 = 7,
598 IB_RNR_TIMER_000_16 = 8,
599 IB_RNR_TIMER_000_24 = 9,
600 IB_RNR_TIMER_000_32 = 10,
601 IB_RNR_TIMER_000_48 = 11,
602 IB_RNR_TIMER_000_64 = 12,
603 IB_RNR_TIMER_000_96 = 13,
604 IB_RNR_TIMER_001_28 = 14,
605 IB_RNR_TIMER_001_92 = 15,
606 IB_RNR_TIMER_002_56 = 16,
607 IB_RNR_TIMER_003_84 = 17,
608 IB_RNR_TIMER_005_12 = 18,
609 IB_RNR_TIMER_007_68 = 19,
610 IB_RNR_TIMER_010_24 = 20,
611 IB_RNR_TIMER_015_36 = 21,
612 IB_RNR_TIMER_020_48 = 22,
613 IB_RNR_TIMER_030_72 = 23,
614 IB_RNR_TIMER_040_96 = 24,
615 IB_RNR_TIMER_061_44 = 25,
616 IB_RNR_TIMER_081_92 = 26,
617 IB_RNR_TIMER_122_88 = 27,
618 IB_RNR_TIMER_163_84 = 28,
619 IB_RNR_TIMER_245_76 = 29,
620 IB_RNR_TIMER_327_68 = 30,
621 IB_RNR_TIMER_491_52 = 31
622};
623
624enum ib_qp_attr_mask {
625 IB_QP_STATE = 1,
626 IB_QP_CUR_STATE = (1<<1),
627 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
628 IB_QP_ACCESS_FLAGS = (1<<3),
629 IB_QP_PKEY_INDEX = (1<<4),
630 IB_QP_PORT = (1<<5),
631 IB_QP_QKEY = (1<<6),
632 IB_QP_AV = (1<<7),
633 IB_QP_PATH_MTU = (1<<8),
634 IB_QP_TIMEOUT = (1<<9),
635 IB_QP_RETRY_CNT = (1<<10),
636 IB_QP_RNR_RETRY = (1<<11),
637 IB_QP_RQ_PSN = (1<<12),
638 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
639 IB_QP_ALT_PATH = (1<<14),
640 IB_QP_MIN_RNR_TIMER = (1<<15),
641 IB_QP_SQ_PSN = (1<<16),
642 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
643 IB_QP_PATH_MIG_STATE = (1<<18),
644 IB_QP_CAP = (1<<19),
645 IB_QP_DEST_QPN = (1<<20)
646};
647
648enum ib_qp_state {
649 IB_QPS_RESET,
650 IB_QPS_INIT,
651 IB_QPS_RTR,
652 IB_QPS_RTS,
653 IB_QPS_SQD,
654 IB_QPS_SQE,
655 IB_QPS_ERR
656};
657
658enum ib_mig_state {
659 IB_MIG_MIGRATED,
660 IB_MIG_REARM,
661 IB_MIG_ARMED
662};
663
664struct ib_qp_attr {
665 enum ib_qp_state qp_state;
666 enum ib_qp_state cur_qp_state;
667 enum ib_mtu path_mtu;
668 enum ib_mig_state path_mig_state;
669 u32 qkey;
670 u32 rq_psn;
671 u32 sq_psn;
672 u32 dest_qp_num;
673 int qp_access_flags;
674 struct ib_qp_cap cap;
675 struct ib_ah_attr ah_attr;
676 struct ib_ah_attr alt_ah_attr;
677 u16 pkey_index;
678 u16 alt_pkey_index;
679 u8 en_sqd_async_notify;
680 u8 sq_draining;
681 u8 max_rd_atomic;
682 u8 max_dest_rd_atomic;
683 u8 min_rnr_timer;
684 u8 port_num;
685 u8 timeout;
686 u8 retry_cnt;
687 u8 rnr_retry;
688 u8 alt_port_num;
689 u8 alt_timeout;
690};
691
692enum ib_wr_opcode {
693 IB_WR_RDMA_WRITE,
694 IB_WR_RDMA_WRITE_WITH_IMM,
695 IB_WR_SEND,
696 IB_WR_SEND_WITH_IMM,
697 IB_WR_RDMA_READ,
698 IB_WR_ATOMIC_CMP_AND_SWP,
699 IB_WR_ATOMIC_FETCH_AND_ADD,
700 IB_WR_LSO,
701 IB_WR_SEND_WITH_INV,
702 IB_WR_RDMA_READ_WITH_INV,
703 IB_WR_LOCAL_INV,
704 IB_WR_FAST_REG_MR,
705 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
706 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
707};
708
709enum ib_send_flags {
710 IB_SEND_FENCE = 1,
711 IB_SEND_SIGNALED = (1<<1),
712 IB_SEND_SOLICITED = (1<<2),
713 IB_SEND_INLINE = (1<<3),
714 IB_SEND_IP_CSUM = (1<<4)
715};
716
717struct ib_sge {
718 u64 addr;
719 u32 length;
720 u32 lkey;
721};
722
723struct ib_fast_reg_page_list {
724 struct ib_device *device;
725 u64 *page_list;
726 unsigned int max_page_list_len;
727};
728
729struct ib_send_wr {
730 struct ib_send_wr *next;
731 u64 wr_id;
732 struct ib_sge *sg_list;
733 int num_sge;
734 enum ib_wr_opcode opcode;
735 int send_flags;
736 union {
737 __be32 imm_data;
738 u32 invalidate_rkey;
739 } ex;
740 union {
741 struct {
742 u64 remote_addr;
743 u32 rkey;
744 } rdma;
745 struct {
746 u64 remote_addr;
747 u64 compare_add;
748 u64 swap;
749 u64 compare_add_mask;
750 u64 swap_mask;
751 u32 rkey;
752 } atomic;
753 struct {
754 struct ib_ah *ah;
755 void *header;
756 int hlen;
757 int mss;
758 u32 remote_qpn;
759 u32 remote_qkey;
760 u16 pkey_index; /* valid for GSI only */
761 u8 port_num; /* valid for DR SMPs on switch only */
762 } ud;
763 struct {
764 u64 iova_start;
765 struct ib_fast_reg_page_list *page_list;
766 unsigned int page_shift;
767 unsigned int page_list_len;
768 u32 length;
769 int access_flags;
770 u32 rkey;
771 } fast_reg;
772 } wr;
773};
774
775struct ib_recv_wr {
776 struct ib_recv_wr *next;
777 u64 wr_id;
778 struct ib_sge *sg_list;
779 int num_sge;
780};
781
782enum ib_access_flags {
783 IB_ACCESS_LOCAL_WRITE = 1,
784 IB_ACCESS_REMOTE_WRITE = (1<<1),
785 IB_ACCESS_REMOTE_READ = (1<<2),
786 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
787 IB_ACCESS_MW_BIND = (1<<4)
788};
789
790struct ib_phys_buf {
791 u64 addr;
792 u64 size;
793};
794
795struct ib_mr_attr {
796 struct ib_pd *pd;
797 u64 device_virt_addr;
798 u64 size;
799 int mr_access_flags;
800 u32 lkey;
801 u32 rkey;
802};
803
804enum ib_mr_rereg_flags {
805 IB_MR_REREG_TRANS = 1,
806 IB_MR_REREG_PD = (1<<1),
807 IB_MR_REREG_ACCESS = (1<<2)
808};
809
810struct ib_mw_bind {
811 struct ib_mr *mr;
812 u64 wr_id;
813 u64 addr;
814 u32 length;
815 int send_flags;
816 int mw_access_flags;
817};
818
819struct ib_fmr_attr {
820 int max_pages;
821 int max_maps;
822 u8 page_shift;
823};
824
825struct ib_ucontext {
826 struct ib_device *device;
827 struct list_head pd_list;
828 struct list_head mr_list;
829 struct list_head mw_list;
830 struct list_head cq_list;
831 struct list_head qp_list;
832 struct list_head srq_list;
833 struct list_head ah_list;
834 int closing;
835};
836
837struct ib_uobject {
838 u64 user_handle; /* handle given to us by userspace */
839 struct ib_ucontext *context; /* associated user context */
840 void *object; /* containing object */
841 struct list_head list; /* link to context's list */
842 int id; /* index into kernel idr */
843 struct kref ref;
844 struct rw_semaphore mutex; /* protects .live */
845 int live;
846};
847
848struct ib_udata {
849 void __user *inbuf;
850 void __user *outbuf;
851 size_t inlen;
852 size_t outlen;
853};
854
855struct ib_pd {
856 struct ib_device *device;
857 struct ib_uobject *uobject;
858 atomic_t usecnt; /* count all resources */
859};
860
861struct ib_ah {
862 struct ib_device *device;
863 struct ib_pd *pd;
864 struct ib_uobject *uobject;
865};
866
867typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
868
869struct ib_cq {
870 struct ib_device *device;
871 struct ib_uobject *uobject;
872 ib_comp_handler comp_handler;
873 void (*event_handler)(struct ib_event *, void *);
874 void *cq_context;
875 int cqe;
876 atomic_t usecnt; /* count number of work queues */
877};
878
879struct ib_srq {
880 struct ib_device *device;
881 struct ib_pd *pd;
882 struct ib_uobject *uobject;
883 void (*event_handler)(struct ib_event *, void *);
884 void *srq_context;
885 atomic_t usecnt;
886};
887
888struct ib_qp {
889 struct ib_device *device;
890 struct ib_pd *pd;
891 struct ib_cq *send_cq;
892 struct ib_cq *recv_cq;
893 struct ib_srq *srq;
894 struct ib_uobject *uobject;
895 void (*event_handler)(struct ib_event *, void *);
896 void *qp_context;
897 u32 qp_num;
898 enum ib_qp_type qp_type;
899};
900
901struct ib_mr {
902 struct ib_device *device;
903 struct ib_pd *pd;
904 struct ib_uobject *uobject;
905 u32 lkey;
906 u32 rkey;
907 atomic_t usecnt; /* count number of MWs */
908};
909
910struct ib_mw {
911 struct ib_device *device;
912 struct ib_pd *pd;
913 struct ib_uobject *uobject;
914 u32 rkey;
915};
916
917struct ib_fmr {
918 struct ib_device *device;
919 struct ib_pd *pd;
920 struct list_head list;
921 u32 lkey;
922 u32 rkey;
923};
924
925struct ib_mad;
926struct ib_grh;
927
928enum ib_process_mad_flags {
929 IB_MAD_IGNORE_MKEY = 1,
930 IB_MAD_IGNORE_BKEY = 2,
931 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
932};
933
934enum ib_mad_result {
935 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
936 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
937 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
938 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
939};
940
941#define IB_DEVICE_NAME_MAX 64
942
943struct ib_cache {
944 rwlock_t lock;
945 struct ib_event_handler event_handler;
946 struct ib_pkey_cache **pkey_cache;
947 struct ib_gid_cache **gid_cache;
948 u8 *lmc_cache;
949};
950
951struct ib_dma_mapping_ops {
952 int (*mapping_error)(struct ib_device *dev,
953 u64 dma_addr);
954 u64 (*map_single)(struct ib_device *dev,
955 void *ptr, size_t size,
956 enum dma_data_direction direction);
957 void (*unmap_single)(struct ib_device *dev,
958 u64 addr, size_t size,
959 enum dma_data_direction direction);
960 u64 (*map_page)(struct ib_device *dev,
961 struct page *page, unsigned long offset,
962 size_t size,
963 enum dma_data_direction direction);
964 void (*unmap_page)(struct ib_device *dev,
965 u64 addr, size_t size,
966 enum dma_data_direction direction);
967 int (*map_sg)(struct ib_device *dev,
968 struct scatterlist *sg, int nents,
969 enum dma_data_direction direction);
970 void (*unmap_sg)(struct ib_device *dev,
971 struct scatterlist *sg, int nents,
972 enum dma_data_direction direction);
973 u64 (*dma_address)(struct ib_device *dev,
974 struct scatterlist *sg);
975 unsigned int (*dma_len)(struct ib_device *dev,
976 struct scatterlist *sg);
977 void (*sync_single_for_cpu)(struct ib_device *dev,
978 u64 dma_handle,
979 size_t size,
980 enum dma_data_direction dir);
981 void (*sync_single_for_device)(struct ib_device *dev,
982 u64 dma_handle,
983 size_t size,
984 enum dma_data_direction dir);
985 void *(*alloc_coherent)(struct ib_device *dev,
986 size_t size,
987 u64 *dma_handle,
988 gfp_t flag);
989 void (*free_coherent)(struct ib_device *dev,
990 size_t size, void *cpu_addr,
991 u64 dma_handle);
992};
993
994struct iw_cm_verbs;
995
996struct ib_device {
997 struct device *dma_device;
998
999 char name[IB_DEVICE_NAME_MAX];
1000
1001 struct list_head event_handler_list;
1002 spinlock_t event_handler_lock;
1003
1004 spinlock_t client_data_lock;
1005 struct list_head core_list;
1006 struct list_head client_data_list;
1007
1008 struct ib_cache cache;
1009 int *pkey_tbl_len;
1010 int *gid_tbl_len;
1011
1012 int num_comp_vectors;
1013
1014 struct iw_cm_verbs *iwcm;
1015
1016 int (*get_protocol_stats)(struct ib_device *device,
1017 union rdma_protocol_stats *stats);
1018 int (*query_device)(struct ib_device *device,
1019 struct ib_device_attr *device_attr);
1020 int (*query_port)(struct ib_device *device,
1021 u8 port_num,
1022 struct ib_port_attr *port_attr);
1023 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1024 u8 port_num);
1025 int (*query_gid)(struct ib_device *device,
1026 u8 port_num, int index,
1027 union ib_gid *gid);
1028 int (*query_pkey)(struct ib_device *device,
1029 u8 port_num, u16 index, u16 *pkey);
1030 int (*modify_device)(struct ib_device *device,
1031 int device_modify_mask,
1032 struct ib_device_modify *device_modify);
1033 int (*modify_port)(struct ib_device *device,
1034 u8 port_num, int port_modify_mask,
1035 struct ib_port_modify *port_modify);
1036 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1037 struct ib_udata *udata);
1038 int (*dealloc_ucontext)(struct ib_ucontext *context);
1039 int (*mmap)(struct ib_ucontext *context,
1040 struct vm_area_struct *vma);
1041 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1042 struct ib_ucontext *context,
1043 struct ib_udata *udata);
1044 int (*dealloc_pd)(struct ib_pd *pd);
1045 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1046 struct ib_ah_attr *ah_attr);
1047 int (*modify_ah)(struct ib_ah *ah,
1048 struct ib_ah_attr *ah_attr);
1049 int (*query_ah)(struct ib_ah *ah,
1050 struct ib_ah_attr *ah_attr);
1051 int (*destroy_ah)(struct ib_ah *ah);
1052 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1053 struct ib_srq_init_attr *srq_init_attr,
1054 struct ib_udata *udata);
1055 int (*modify_srq)(struct ib_srq *srq,
1056 struct ib_srq_attr *srq_attr,
1057 enum ib_srq_attr_mask srq_attr_mask,
1058 struct ib_udata *udata);
1059 int (*query_srq)(struct ib_srq *srq,
1060 struct ib_srq_attr *srq_attr);
1061 int (*destroy_srq)(struct ib_srq *srq);
1062 int (*post_srq_recv)(struct ib_srq *srq,
1063 struct ib_recv_wr *recv_wr,
1064 struct ib_recv_wr **bad_recv_wr);
1065 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1066 struct ib_qp_init_attr *qp_init_attr,
1067 struct ib_udata *udata);
1068 int (*modify_qp)(struct ib_qp *qp,
1069 struct ib_qp_attr *qp_attr,
1070 int qp_attr_mask,
1071 struct ib_udata *udata);
1072 int (*query_qp)(struct ib_qp *qp,
1073 struct ib_qp_attr *qp_attr,
1074 int qp_attr_mask,
1075 struct ib_qp_init_attr *qp_init_attr);
1076 int (*destroy_qp)(struct ib_qp *qp);
1077 int (*post_send)(struct ib_qp *qp,
1078 struct ib_send_wr *send_wr,
1079 struct ib_send_wr **bad_send_wr);
1080 int (*post_recv)(struct ib_qp *qp,
1081 struct ib_recv_wr *recv_wr,
1082 struct ib_recv_wr **bad_recv_wr);
1083 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1084 int comp_vector,
1085 struct ib_ucontext *context,
1086 struct ib_udata *udata);
1087 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1088 u16 cq_period);
1089 int (*destroy_cq)(struct ib_cq *cq);
1090 int (*resize_cq)(struct ib_cq *cq, int cqe,
1091 struct ib_udata *udata);
1092 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1093 struct ib_wc *wc);
1094 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1095 int (*req_notify_cq)(struct ib_cq *cq,
1096 enum ib_cq_notify_flags flags);
1097 int (*req_ncomp_notif)(struct ib_cq *cq,
1098 int wc_cnt);
1099 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1100 int mr_access_flags);
1101 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1102 struct ib_phys_buf *phys_buf_array,
1103 int num_phys_buf,
1104 int mr_access_flags,
1105 u64 *iova_start);
1106 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1107 u64 start, u64 length,
1108 u64 virt_addr,
1109 int mr_access_flags,
1110 struct ib_udata *udata);
1111 int (*query_mr)(struct ib_mr *mr,
1112 struct ib_mr_attr *mr_attr);
1113 int (*dereg_mr)(struct ib_mr *mr);
1114 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1115 int max_page_list_len);
1116 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1117 int page_list_len);
1118 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1119 int (*rereg_phys_mr)(struct ib_mr *mr,
1120 int mr_rereg_mask,
1121 struct ib_pd *pd,
1122 struct ib_phys_buf *phys_buf_array,
1123 int num_phys_buf,
1124 int mr_access_flags,
1125 u64 *iova_start);
1126 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1127 int (*bind_mw)(struct ib_qp *qp,
1128 struct ib_mw *mw,
1129 struct ib_mw_bind *mw_bind);
1130 int (*dealloc_mw)(struct ib_mw *mw);
1131 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1132 int mr_access_flags,
1133 struct ib_fmr_attr *fmr_attr);
1134 int (*map_phys_fmr)(struct ib_fmr *fmr,
1135 u64 *page_list, int list_len,
1136 u64 iova);
1137 int (*unmap_fmr)(struct list_head *fmr_list);
1138 int (*dealloc_fmr)(struct ib_fmr *fmr);
1139 int (*attach_mcast)(struct ib_qp *qp,
1140 union ib_gid *gid,
1141 u16 lid);
1142 int (*detach_mcast)(struct ib_qp *qp,
1143 union ib_gid *gid,
1144 u16 lid);
1145 int (*process_mad)(struct ib_device *device,
1146 int process_mad_flags,
1147 u8 port_num,
1148 struct ib_wc *in_wc,
1149 struct ib_grh *in_grh,
1150 struct ib_mad *in_mad,
1151 struct ib_mad *out_mad);
1152
1153 struct ib_dma_mapping_ops *dma_ops;
1154
1155 struct module *owner;
1156 struct device dev;
1157 struct kobject *ports_parent;
1158 struct list_head port_list;
1159
1160 enum {
1161 IB_DEV_UNINITIALIZED,
1162 IB_DEV_REGISTERED,
1163 IB_DEV_UNREGISTERED
1164 } reg_state;
1165
1166 int uverbs_abi_ver;
1167 u64 uverbs_cmd_mask;
1168
1169 char node_desc[64];
1170 __be64 node_guid;
1171 u32 local_dma_lkey;
1172 u8 node_type;
1173 u8 phys_port_cnt;
1174};
1175
1176struct ib_client {
1177 char *name;
1178 void (*add) (struct ib_device *);
1179 void (*remove)(struct ib_device *);
1180
1181 struct list_head list;
1182};
1183
1184struct ib_device *ib_alloc_device(size_t size);
1185void ib_dealloc_device(struct ib_device *device);
1186
1187int ib_register_device(struct ib_device *device,
1188 int (*port_callback)(struct ib_device *,
1189 u8, struct kobject *));
1190void ib_unregister_device(struct ib_device *device);
1191
1192int ib_register_client (struct ib_client *client);
1193void ib_unregister_client(struct ib_client *client);
1194
1195void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1196void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1197 void *data);
1198
1199static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1200{
1201 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1202}
1203
1204static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1205{
1206 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1207}
1208
1209/**
1210 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1211 * contains all required attributes and no attributes not allowed for
1212 * the given QP state transition.
1213 * @cur_state: Current QP state
1214 * @next_state: Next QP state
1215 * @type: QP type
1216 * @mask: Mask of supplied QP attributes
1217 *
1218 * This function is a helper function that a low-level driver's
1219 * modify_qp method can use to validate the consumer's input. It
1220 * checks that cur_state and next_state are valid QP states, that a
1221 * transition from cur_state to next_state is allowed by the IB spec,
1222 * and that the attribute mask supplied is allowed for the transition.
1223 */
1224int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1225 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1226
1227int ib_register_event_handler (struct ib_event_handler *event_handler);
1228int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1229void ib_dispatch_event(struct ib_event *event);
1230
1231int ib_query_device(struct ib_device *device,
1232 struct ib_device_attr *device_attr);
1233
1234int ib_query_port(struct ib_device *device,
1235 u8 port_num, struct ib_port_attr *port_attr);
1236
1237enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1238 u8 port_num);
1239
1240int ib_query_gid(struct ib_device *device,
1241 u8 port_num, int index, union ib_gid *gid);
1242
1243int ib_query_pkey(struct ib_device *device,
1244 u8 port_num, u16 index, u16 *pkey);
1245
1246int ib_modify_device(struct ib_device *device,
1247 int device_modify_mask,
1248 struct ib_device_modify *device_modify);
1249
1250int ib_modify_port(struct ib_device *device,
1251 u8 port_num, int port_modify_mask,
1252 struct ib_port_modify *port_modify);
1253
1254int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1255 u8 *port_num, u16 *index);
1256
1257int ib_find_pkey(struct ib_device *device,
1258 u8 port_num, u16 pkey, u16 *index);
1259
1260/**
1261 * ib_alloc_pd - Allocates an unused protection domain.
1262 * @device: The device on which to allocate the protection domain.
1263 *
1264 * A protection domain object provides an association between QPs, shared
1265 * receive queues, address handles, memory regions, and memory windows.
1266 */
1267struct ib_pd *ib_alloc_pd(struct ib_device *device);
1268
1269/**
1270 * ib_dealloc_pd - Deallocates a protection domain.
1271 * @pd: The protection domain to deallocate.
1272 */
1273int ib_dealloc_pd(struct ib_pd *pd);
1274
1275/**
1276 * ib_create_ah - Creates an address handle for the given address vector.
1277 * @pd: The protection domain associated with the address handle.
1278 * @ah_attr: The attributes of the address vector.
1279 *
1280 * The address handle is used to reference a local or global destination
1281 * in all UD QP post sends.
1282 */
1283struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1284
1285/**
1286 * ib_init_ah_from_wc - Initializes address handle attributes from a
1287 * work completion.
1288 * @device: Device on which the received message arrived.
1289 * @port_num: Port on which the received message arrived.
1290 * @wc: Work completion associated with the received message.
1291 * @grh: References the received global route header. This parameter is
1292 * ignored unless the work completion indicates that the GRH is valid.
1293 * @ah_attr: Returned attributes that can be used when creating an address
1294 * handle for replying to the message.
1295 */
1296int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1297 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1298
1299/**
1300 * ib_create_ah_from_wc - Creates an address handle associated with the
1301 * sender of the specified work completion.
1302 * @pd: The protection domain associated with the address handle.
1303 * @wc: Work completion information associated with a received message.
1304 * @grh: References the received global route header. This parameter is
1305 * ignored unless the work completion indicates that the GRH is valid.
1306 * @port_num: The outbound port number to associate with the address.
1307 *
1308 * The address handle is used to reference a local or global destination
1309 * in all UD QP post sends.
1310 */
1311struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1312 struct ib_grh *grh, u8 port_num);
1313
1314/**
1315 * ib_modify_ah - Modifies the address vector associated with an address
1316 * handle.
1317 * @ah: The address handle to modify.
1318 * @ah_attr: The new address vector attributes to associate with the
1319 * address handle.
1320 */
1321int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1322
1323/**
1324 * ib_query_ah - Queries the address vector associated with an address
1325 * handle.
1326 * @ah: The address handle to query.
1327 * @ah_attr: The address vector attributes associated with the address
1328 * handle.
1329 */
1330int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1331
1332/**
1333 * ib_destroy_ah - Destroys an address handle.
1334 * @ah: The address handle to destroy.
1335 */
1336int ib_destroy_ah(struct ib_ah *ah);
1337
1338/**
1339 * ib_create_srq - Creates a SRQ associated with the specified protection
1340 * domain.
1341 * @pd: The protection domain associated with the SRQ.
1342 * @srq_init_attr: A list of initial attributes required to create the
1343 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1344 * the actual capabilities of the created SRQ.
1345 *
1346 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1347 * requested size of the SRQ, and set to the actual values allocated
1348 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1349 * will always be at least as large as the requested values.
1350 */
1351struct ib_srq *ib_create_srq(struct ib_pd *pd,
1352 struct ib_srq_init_attr *srq_init_attr);
1353
1354/**
1355 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1356 * @srq: The SRQ to modify.
1357 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1358 * the current values of selected SRQ attributes are returned.
1359 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1360 * are being modified.
1361 *
1362 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1363 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1364 * the number of receives queued drops below the limit.
1365 */
1366int ib_modify_srq(struct ib_srq *srq,
1367 struct ib_srq_attr *srq_attr,
1368 enum ib_srq_attr_mask srq_attr_mask);
1369
1370/**
1371 * ib_query_srq - Returns the attribute list and current values for the
1372 * specified SRQ.
1373 * @srq: The SRQ to query.
1374 * @srq_attr: The attributes of the specified SRQ.
1375 */
1376int ib_query_srq(struct ib_srq *srq,
1377 struct ib_srq_attr *srq_attr);
1378
1379/**
1380 * ib_destroy_srq - Destroys the specified SRQ.
1381 * @srq: The SRQ to destroy.
1382 */
1383int ib_destroy_srq(struct ib_srq *srq);
1384
1385/**
1386 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1387 * @srq: The SRQ to post the work request on.
1388 * @recv_wr: A list of work requests to post on the receive queue.
1389 * @bad_recv_wr: On an immediate failure, this parameter will reference
1390 * the work request that failed to be posted on the QP.
1391 */
1392static inline int ib_post_srq_recv(struct ib_srq *srq,
1393 struct ib_recv_wr *recv_wr,
1394 struct ib_recv_wr **bad_recv_wr)
1395{
1396 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1397}
1398
1399/**
1400 * ib_create_qp - Creates a QP associated with the specified protection
1401 * domain.
1402 * @pd: The protection domain associated with the QP.
1403 * @qp_init_attr: A list of initial attributes required to create the
1404 * QP. If QP creation succeeds, then the attributes are updated to
1405 * the actual capabilities of the created QP.
1406 */
1407struct ib_qp *ib_create_qp(struct ib_pd *pd,
1408 struct ib_qp_init_attr *qp_init_attr);
1409
1410/**
1411 * ib_modify_qp - Modifies the attributes for the specified QP and then
1412 * transitions the QP to the given state.
1413 * @qp: The QP to modify.
1414 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1415 * the current values of selected QP attributes are returned.
1416 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1417 * are being modified.
1418 */
1419int ib_modify_qp(struct ib_qp *qp,
1420 struct ib_qp_attr *qp_attr,
1421 int qp_attr_mask);
1422
1423/**
1424 * ib_query_qp - Returns the attribute list and current values for the
1425 * specified QP.
1426 * @qp: The QP to query.
1427 * @qp_attr: The attributes of the specified QP.
1428 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1429 * @qp_init_attr: Additional attributes of the selected QP.
1430 *
1431 * The qp_attr_mask may be used to limit the query to gathering only the
1432 * selected attributes.
1433 */
1434int ib_query_qp(struct ib_qp *qp,
1435 struct ib_qp_attr *qp_attr,
1436 int qp_attr_mask,
1437 struct ib_qp_init_attr *qp_init_attr);
1438
1439/**
1440 * ib_destroy_qp - Destroys the specified QP.
1441 * @qp: The QP to destroy.
1442 */
1443int ib_destroy_qp(struct ib_qp *qp);
1444
1445/**
1446 * ib_post_send - Posts a list of work requests to the send queue of
1447 * the specified QP.
1448 * @qp: The QP to post the work request on.
1449 * @send_wr: A list of work requests to post on the send queue.
1450 * @bad_send_wr: On an immediate failure, this parameter will reference
1451 * the work request that failed to be posted on the QP.
1452 *
1453 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1454 * error is returned, the QP state shall not be affected,
1455 * ib_post_send() will return an immediate error after queueing any
1456 * earlier work requests in the list.
1457 */
1458static inline int ib_post_send(struct ib_qp *qp,
1459 struct ib_send_wr *send_wr,
1460 struct ib_send_wr **bad_send_wr)
1461{
1462 return qp->device->post_send(qp, send_wr, bad_send_wr);
1463}
1464
1465/**
1466 * ib_post_recv - Posts a list of work requests to the receive queue of
1467 * the specified QP.
1468 * @qp: The QP to post the work request on.
1469 * @recv_wr: A list of work requests to post on the receive queue.
1470 * @bad_recv_wr: On an immediate failure, this parameter will reference
1471 * the work request that failed to be posted on the QP.
1472 */
1473static inline int ib_post_recv(struct ib_qp *qp,
1474 struct ib_recv_wr *recv_wr,
1475 struct ib_recv_wr **bad_recv_wr)
1476{
1477 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1478}
1479
1480/**
1481 * ib_create_cq - Creates a CQ on the specified device.
1482 * @device: The device on which to create the CQ.
1483 * @comp_handler: A user-specified callback that is invoked when a
1484 * completion event occurs on the CQ.
1485 * @event_handler: A user-specified callback that is invoked when an
1486 * asynchronous event not associated with a completion occurs on the CQ.
1487 * @cq_context: Context associated with the CQ returned to the user via
1488 * the associated completion and event handlers.
1489 * @cqe: The minimum size of the CQ.
1490 * @comp_vector - Completion vector used to signal completion events.
1491 * Must be >= 0 and < context->num_comp_vectors.
1492 *
1493 * Users can examine the cq structure to determine the actual CQ size.
1494 */
1495struct ib_cq *ib_create_cq(struct ib_device *device,
1496 ib_comp_handler comp_handler,
1497 void (*event_handler)(struct ib_event *, void *),
1498 void *cq_context, int cqe, int comp_vector);
1499
1500/**
1501 * ib_resize_cq - Modifies the capacity of the CQ.
1502 * @cq: The CQ to resize.
1503 * @cqe: The minimum size of the CQ.
1504 *
1505 * Users can examine the cq structure to determine the actual CQ size.
1506 */
1507int ib_resize_cq(struct ib_cq *cq, int cqe);
1508
1509/**
1510 * ib_modify_cq - Modifies moderation params of the CQ
1511 * @cq: The CQ to modify.
1512 * @cq_count: number of CQEs that will trigger an event
1513 * @cq_period: max period of time in usec before triggering an event
1514 *
1515 */
1516int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1517
1518/**
1519 * ib_destroy_cq - Destroys the specified CQ.
1520 * @cq: The CQ to destroy.
1521 */
1522int ib_destroy_cq(struct ib_cq *cq);
1523
1524/**
1525 * ib_poll_cq - poll a CQ for completion(s)
1526 * @cq:the CQ being polled
1527 * @num_entries:maximum number of completions to return
1528 * @wc:array of at least @num_entries &struct ib_wc where completions
1529 * will be returned
1530 *
1531 * Poll a CQ for (possibly multiple) completions. If the return value
1532 * is < 0, an error occurred. If the return value is >= 0, it is the
1533 * number of completions returned. If the return value is
1534 * non-negative and < num_entries, then the CQ was emptied.
1535 */
1536static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1537 struct ib_wc *wc)
1538{
1539 return cq->device->poll_cq(cq, num_entries, wc);
1540}
1541
1542/**
1543 * ib_peek_cq - Returns the number of unreaped completions currently
1544 * on the specified CQ.
1545 * @cq: The CQ to peek.
1546 * @wc_cnt: A minimum number of unreaped completions to check for.
1547 *
1548 * If the number of unreaped completions is greater than or equal to wc_cnt,
1549 * this function returns wc_cnt, otherwise, it returns the actual number of
1550 * unreaped completions.
1551 */
1552int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1553
1554/**
1555 * ib_req_notify_cq - Request completion notification on a CQ.
1556 * @cq: The CQ to generate an event for.
1557 * @flags:
1558 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1559 * to request an event on the next solicited event or next work
1560 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1561 * may also be |ed in to request a hint about missed events, as
1562 * described below.
1563 *
1564 * Return Value:
1565 * < 0 means an error occurred while requesting notification
1566 * == 0 means notification was requested successfully, and if
1567 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1568 * were missed and it is safe to wait for another event. In
1569 * this case is it guaranteed that any work completions added
1570 * to the CQ since the last CQ poll will trigger a completion
1571 * notification event.
1572 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1573 * in. It means that the consumer must poll the CQ again to
1574 * make sure it is empty to avoid missing an event because of a
1575 * race between requesting notification and an entry being
1576 * added to the CQ. This return value means it is possible
1577 * (but not guaranteed) that a work completion has been added
1578 * to the CQ since the last poll without triggering a
1579 * completion notification event.
1580 */
1581static inline int ib_req_notify_cq(struct ib_cq *cq,
1582 enum ib_cq_notify_flags flags)
1583{
1584 return cq->device->req_notify_cq(cq, flags);
1585}
1586
1587/**
1588 * ib_req_ncomp_notif - Request completion notification when there are
1589 * at least the specified number of unreaped completions on the CQ.
1590 * @cq: The CQ to generate an event for.
1591 * @wc_cnt: The number of unreaped completions that should be on the
1592 * CQ before an event is generated.
1593 */
1594static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1595{
1596 return cq->device->req_ncomp_notif ?
1597 cq->device->req_ncomp_notif(cq, wc_cnt) :
1598 -ENOSYS;
1599}
1600
1601/**
1602 * ib_get_dma_mr - Returns a memory region for system memory that is
1603 * usable for DMA.
1604 * @pd: The protection domain associated with the memory region.
1605 * @mr_access_flags: Specifies the memory access rights.
1606 *
1607 * Note that the ib_dma_*() functions defined below must be used
1608 * to create/destroy addresses used with the Lkey or Rkey returned
1609 * by ib_get_dma_mr().
1610 */
1611struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1612
1613/**
1614 * ib_dma_mapping_error - check a DMA addr for error
1615 * @dev: The device for which the dma_addr was created
1616 * @dma_addr: The DMA address to check
1617 */
1618static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1619{
1620 if (dev->dma_ops)
1621 return dev->dma_ops->mapping_error(dev, dma_addr);
1622 return dma_mapping_error(dev->dma_device, dma_addr);
1623}
1624
1625/**
1626 * ib_dma_map_single - Map a kernel virtual address to DMA address
1627 * @dev: The device for which the dma_addr is to be created
1628 * @cpu_addr: The kernel virtual address
1629 * @size: The size of the region in bytes
1630 * @direction: The direction of the DMA
1631 */
1632static inline u64 ib_dma_map_single(struct ib_device *dev,
1633 void *cpu_addr, size_t size,
1634 enum dma_data_direction direction)
1635{
1636 if (dev->dma_ops)
1637 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1638 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1639}
1640
1641/**
1642 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1643 * @dev: The device for which the DMA address was created
1644 * @addr: The DMA address
1645 * @size: The size of the region in bytes
1646 * @direction: The direction of the DMA
1647 */
1648static inline void ib_dma_unmap_single(struct ib_device *dev,
1649 u64 addr, size_t size,
1650 enum dma_data_direction direction)
1651{
1652 if (dev->dma_ops)
1653 dev->dma_ops->unmap_single(dev, addr, size, direction);
1654 else
1655 dma_unmap_single(dev->dma_device, addr, size, direction);
1656}
1657
1658static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1659 void *cpu_addr, size_t size,
1660 enum dma_data_direction direction,
1661 struct dma_attrs *attrs)
1662{
1663 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1664 direction, attrs);
1665}
1666
1667static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1668 u64 addr, size_t size,
1669 enum dma_data_direction direction,
1670 struct dma_attrs *attrs)
1671{
1672 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1673 direction, attrs);
1674}
1675
1676/**
1677 * ib_dma_map_page - Map a physical page to DMA address
1678 * @dev: The device for which the dma_addr is to be created
1679 * @page: The page to be mapped
1680 * @offset: The offset within the page
1681 * @size: The size of the region in bytes
1682 * @direction: The direction of the DMA
1683 */
1684static inline u64 ib_dma_map_page(struct ib_device *dev,
1685 struct page *page,
1686 unsigned long offset,
1687 size_t size,
1688 enum dma_data_direction direction)
1689{
1690 if (dev->dma_ops)
1691 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1692 return dma_map_page(dev->dma_device, page, offset, size, direction);
1693}
1694
1695/**
1696 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1697 * @dev: The device for which the DMA address was created
1698 * @addr: The DMA address
1699 * @size: The size of the region in bytes
1700 * @direction: The direction of the DMA
1701 */
1702static inline void ib_dma_unmap_page(struct ib_device *dev,
1703 u64 addr, size_t size,
1704 enum dma_data_direction direction)
1705{
1706 if (dev->dma_ops)
1707 dev->dma_ops->unmap_page(dev, addr, size, direction);
1708 else
1709 dma_unmap_page(dev->dma_device, addr, size, direction);
1710}
1711
1712/**
1713 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1714 * @dev: The device for which the DMA addresses are to be created
1715 * @sg: The array of scatter/gather entries
1716 * @nents: The number of scatter/gather entries
1717 * @direction: The direction of the DMA
1718 */
1719static inline int ib_dma_map_sg(struct ib_device *dev,
1720 struct scatterlist *sg, int nents,
1721 enum dma_data_direction direction)
1722{
1723 if (dev->dma_ops)
1724 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1725 return dma_map_sg(dev->dma_device, sg, nents, direction);
1726}
1727
1728/**
1729 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1730 * @dev: The device for which the DMA addresses were created
1731 * @sg: The array of scatter/gather entries
1732 * @nents: The number of scatter/gather entries
1733 * @direction: The direction of the DMA
1734 */
1735static inline void ib_dma_unmap_sg(struct ib_device *dev,
1736 struct scatterlist *sg, int nents,
1737 enum dma_data_direction direction)
1738{
1739 if (dev->dma_ops)
1740 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1741 else
1742 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1743}
1744
1745static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1746 struct scatterlist *sg, int nents,
1747 enum dma_data_direction direction,
1748 struct dma_attrs *attrs)
1749{
1750 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1751}
1752
1753static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1754 struct scatterlist *sg, int nents,
1755 enum dma_data_direction direction,
1756 struct dma_attrs *attrs)
1757{
1758 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1759}
1760/**
1761 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1762 * @dev: The device for which the DMA addresses were created
1763 * @sg: The scatter/gather entry
1764 */
1765static inline u64 ib_sg_dma_address(struct ib_device *dev,
1766 struct scatterlist *sg)
1767{
1768 if (dev->dma_ops)
1769 return dev->dma_ops->dma_address(dev, sg);
1770 return sg_dma_address(sg);
1771}
1772
1773/**
1774 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1775 * @dev: The device for which the DMA addresses were created
1776 * @sg: The scatter/gather entry
1777 */
1778static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1779 struct scatterlist *sg)
1780{
1781 if (dev->dma_ops)
1782 return dev->dma_ops->dma_len(dev, sg);
1783 return sg_dma_len(sg);
1784}
1785
1786/**
1787 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1788 * @dev: The device for which the DMA address was created
1789 * @addr: The DMA address
1790 * @size: The size of the region in bytes
1791 * @dir: The direction of the DMA
1792 */
1793static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1794 u64 addr,
1795 size_t size,
1796 enum dma_data_direction dir)
1797{
1798 if (dev->dma_ops)
1799 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1800 else
1801 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1802}
1803
1804/**
1805 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1806 * @dev: The device for which the DMA address was created
1807 * @addr: The DMA address
1808 * @size: The size of the region in bytes
1809 * @dir: The direction of the DMA
1810 */
1811static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1812 u64 addr,
1813 size_t size,
1814 enum dma_data_direction dir)
1815{
1816 if (dev->dma_ops)
1817 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1818 else
1819 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1820}
1821
1822/**
1823 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1824 * @dev: The device for which the DMA address is requested
1825 * @size: The size of the region to allocate in bytes
1826 * @dma_handle: A pointer for returning the DMA address of the region
1827 * @flag: memory allocator flags
1828 */
1829static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1830 size_t size,
1831 u64 *dma_handle,
1832 gfp_t flag)
1833{
1834 if (dev->dma_ops)
1835 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1836 else {
1837 dma_addr_t handle;
1838 void *ret;
1839
1840 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1841 *dma_handle = handle;
1842 return ret;
1843 }
1844}
1845
1846/**
1847 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1848 * @dev: The device for which the DMA addresses were allocated
1849 * @size: The size of the region
1850 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1851 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1852 */
1853static inline void ib_dma_free_coherent(struct ib_device *dev,
1854 size_t size, void *cpu_addr,
1855 u64 dma_handle)
1856{
1857 if (dev->dma_ops)
1858 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1859 else
1860 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1861}
1862
1863/**
1864 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1865 * by an HCA.
1866 * @pd: The protection domain associated assigned to the registered region.
1867 * @phys_buf_array: Specifies a list of physical buffers to use in the
1868 * memory region.
1869 * @num_phys_buf: Specifies the size of the phys_buf_array.
1870 * @mr_access_flags: Specifies the memory access rights.
1871 * @iova_start: The offset of the region's starting I/O virtual address.
1872 */
1873struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1874 struct ib_phys_buf *phys_buf_array,
1875 int num_phys_buf,
1876 int mr_access_flags,
1877 u64 *iova_start);
1878
1879/**
1880 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1881 * Conceptually, this call performs the functions deregister memory region
1882 * followed by register physical memory region. Where possible,
1883 * resources are reused instead of deallocated and reallocated.
1884 * @mr: The memory region to modify.
1885 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1886 * properties of the memory region are being modified.
1887 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1888 * the new protection domain to associated with the memory region,
1889 * otherwise, this parameter is ignored.
1890 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1891 * field specifies a list of physical buffers to use in the new
1892 * translation, otherwise, this parameter is ignored.
1893 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1894 * field specifies the size of the phys_buf_array, otherwise, this
1895 * parameter is ignored.
1896 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1897 * field specifies the new memory access rights, otherwise, this
1898 * parameter is ignored.
1899 * @iova_start: The offset of the region's starting I/O virtual address.
1900 */
1901int ib_rereg_phys_mr(struct ib_mr *mr,
1902 int mr_rereg_mask,
1903 struct ib_pd *pd,
1904 struct ib_phys_buf *phys_buf_array,
1905 int num_phys_buf,
1906 int mr_access_flags,
1907 u64 *iova_start);
1908
1909/**
1910 * ib_query_mr - Retrieves information about a specific memory region.
1911 * @mr: The memory region to retrieve information about.
1912 * @mr_attr: The attributes of the specified memory region.
1913 */
1914int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1915
1916/**
1917 * ib_dereg_mr - Deregisters a memory region and removes it from the
1918 * HCA translation table.
1919 * @mr: The memory region to deregister.
1920 */
1921int ib_dereg_mr(struct ib_mr *mr);
1922
1923/**
1924 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1925 * IB_WR_FAST_REG_MR send work request.
1926 * @pd: The protection domain associated with the region.
1927 * @max_page_list_len: requested max physical buffer list length to be
1928 * used with fast register work requests for this MR.
1929 */
1930struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1931
1932/**
1933 * ib_alloc_fast_reg_page_list - Allocates a page list array
1934 * @device - ib device pointer.
1935 * @page_list_len - size of the page list array to be allocated.
1936 *
1937 * This allocates and returns a struct ib_fast_reg_page_list * and a
1938 * page_list array that is at least page_list_len in size. The actual
1939 * size is returned in max_page_list_len. The caller is responsible
1940 * for initializing the contents of the page_list array before posting
1941 * a send work request with the IB_WC_FAST_REG_MR opcode.
1942 *
1943 * The page_list array entries must be translated using one of the
1944 * ib_dma_*() functions just like the addresses passed to
1945 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
1946 * ib_fast_reg_page_list must not be modified by the caller until the
1947 * IB_WC_FAST_REG_MR work request completes.
1948 */
1949struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1950 struct ib_device *device, int page_list_len);
1951
1952/**
1953 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1954 * page list array.
1955 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1956 */
1957void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1958
1959/**
1960 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1961 * R_Key and L_Key.
1962 * @mr - struct ib_mr pointer to be updated.
1963 * @newkey - new key to be used.
1964 */
1965static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1966{
1967 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1968 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1969}
1970
1971/**
1972 * ib_alloc_mw - Allocates a memory window.
1973 * @pd: The protection domain associated with the memory window.
1974 */
1975struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1976
1977/**
1978 * ib_bind_mw - Posts a work request to the send queue of the specified
1979 * QP, which binds the memory window to the given address range and
1980 * remote access attributes.
1981 * @qp: QP to post the bind work request on.
1982 * @mw: The memory window to bind.
1983 * @mw_bind: Specifies information about the memory window, including
1984 * its address range, remote access rights, and associated memory region.
1985 */
1986static inline int ib_bind_mw(struct ib_qp *qp,
1987 struct ib_mw *mw,
1988 struct ib_mw_bind *mw_bind)
1989{
1990 /* XXX reference counting in corresponding MR? */
1991 return mw->device->bind_mw ?
1992 mw->device->bind_mw(qp, mw, mw_bind) :
1993 -ENOSYS;
1994}
1995
1996/**
1997 * ib_dealloc_mw - Deallocates a memory window.
1998 * @mw: The memory window to deallocate.
1999 */
2000int ib_dealloc_mw(struct ib_mw *mw);
2001
2002/**
2003 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2004 * @pd: The protection domain associated with the unmapped region.
2005 * @mr_access_flags: Specifies the memory access rights.
2006 * @fmr_attr: Attributes of the unmapped region.
2007 *
2008 * A fast memory region must be mapped before it can be used as part of
2009 * a work request.
2010 */
2011struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2012 int mr_access_flags,
2013 struct ib_fmr_attr *fmr_attr);
2014
2015/**
2016 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2017 * @fmr: The fast memory region to associate with the pages.
2018 * @page_list: An array of physical pages to map to the fast memory region.
2019 * @list_len: The number of pages in page_list.
2020 * @iova: The I/O virtual address to use with the mapped region.
2021 */
2022static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2023 u64 *page_list, int list_len,
2024 u64 iova)
2025{
2026 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2027}
2028
2029/**
2030 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2031 * @fmr_list: A linked list of fast memory regions to unmap.
2032 */
2033int ib_unmap_fmr(struct list_head *fmr_list);
2034
2035/**
2036 * ib_dealloc_fmr - Deallocates a fast memory region.
2037 * @fmr: The fast memory region to deallocate.
2038 */
2039int ib_dealloc_fmr(struct ib_fmr *fmr);
2040
2041/**
2042 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2043 * @qp: QP to attach to the multicast group. The QP must be type
2044 * IB_QPT_UD.
2045 * @gid: Multicast group GID.
2046 * @lid: Multicast group LID in host byte order.
2047 *
2048 * In order to send and receive multicast packets, subnet
2049 * administration must have created the multicast group and configured
2050 * the fabric appropriately. The port associated with the specified
2051 * QP must also be a member of the multicast group.
2052 */
2053int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2054
2055/**
2056 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2057 * @qp: QP to detach from the multicast group.
2058 * @gid: Multicast group GID.
2059 * @lid: Multicast group LID in host byte order.
2060 */
2061int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2062
2063#endif /* IB_VERBS_H */
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
10 */
11
12#ifndef IB_VERBS_H
13#define IB_VERBS_H
14
15#include <linux/ethtool.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kref.h>
20#include <linux/list.h>
21#include <linux/rwsem.h>
22#include <linux/workqueue.h>
23#include <linux/irq_poll.h>
24#include <uapi/linux/if_ether.h>
25#include <net/ipv6.h>
26#include <net/ip.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <linux/netdevice.h>
30#include <linux/refcount.h>
31#include <linux/if_link.h>
32#include <linux/atomic.h>
33#include <linux/mmu_notifier.h>
34#include <linux/uaccess.h>
35#include <linux/cgroup_rdma.h>
36#include <linux/irqflags.h>
37#include <linux/preempt.h>
38#include <linux/dim.h>
39#include <uapi/rdma/ib_user_verbs.h>
40#include <rdma/rdma_counter.h>
41#include <rdma/restrack.h>
42#include <rdma/signature.h>
43#include <uapi/rdma/rdma_user_ioctl.h>
44#include <uapi/rdma/ib_user_ioctl_verbs.h>
45
46#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
47
48struct ib_umem_odp;
49struct ib_uqp_object;
50struct ib_usrq_object;
51struct ib_uwq_object;
52struct rdma_cm_id;
53struct ib_port;
54struct hw_stats_device_data;
55
56extern struct workqueue_struct *ib_wq;
57extern struct workqueue_struct *ib_comp_wq;
58extern struct workqueue_struct *ib_comp_unbound_wq;
59
60struct ib_ucq_object;
61
62__printf(3, 4) __cold
63void ibdev_printk(const char *level, const struct ib_device *ibdev,
64 const char *format, ...);
65__printf(2, 3) __cold
66void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
67__printf(2, 3) __cold
68void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
69__printf(2, 3) __cold
70void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
71__printf(2, 3) __cold
72void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
73__printf(2, 3) __cold
74void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
75__printf(2, 3) __cold
76void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
77__printf(2, 3) __cold
78void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
79
80#if defined(CONFIG_DYNAMIC_DEBUG) || \
81 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
82#define ibdev_dbg(__dev, format, args...) \
83 dynamic_ibdev_dbg(__dev, format, ##args)
84#else
85__printf(2, 3) __cold
86static inline
87void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
88#endif
89
90#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
91do { \
92 static DEFINE_RATELIMIT_STATE(_rs, \
93 DEFAULT_RATELIMIT_INTERVAL, \
94 DEFAULT_RATELIMIT_BURST); \
95 if (__ratelimit(&_rs)) \
96 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
97} while (0)
98
99#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
100 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
101#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
102 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
103#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
104 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
105#define ibdev_err_ratelimited(ibdev, fmt, ...) \
106 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
107#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
108 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
109#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
110 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
111#define ibdev_info_ratelimited(ibdev, fmt, ...) \
112 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
113
114#if defined(CONFIG_DYNAMIC_DEBUG) || \
115 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
116/* descriptor check is first to prevent flooding with "callbacks suppressed" */
117#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
118do { \
119 static DEFINE_RATELIMIT_STATE(_rs, \
120 DEFAULT_RATELIMIT_INTERVAL, \
121 DEFAULT_RATELIMIT_BURST); \
122 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
123 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
124 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
125 ##__VA_ARGS__); \
126} while (0)
127#else
128__printf(2, 3) __cold
129static inline
130void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
131#endif
132
133union ib_gid {
134 u8 raw[16];
135 struct {
136 __be64 subnet_prefix;
137 __be64 interface_id;
138 } global;
139};
140
141extern union ib_gid zgid;
142
143enum ib_gid_type {
144 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
145 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
146 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
147 IB_GID_TYPE_SIZE
148};
149
150#define ROCE_V2_UDP_DPORT 4791
151struct ib_gid_attr {
152 struct net_device __rcu *ndev;
153 struct ib_device *device;
154 union ib_gid gid;
155 enum ib_gid_type gid_type;
156 u16 index;
157 u32 port_num;
158};
159
160enum {
161 /* set the local administered indication */
162 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
163};
164
165enum rdma_transport_type {
166 RDMA_TRANSPORT_IB,
167 RDMA_TRANSPORT_IWARP,
168 RDMA_TRANSPORT_USNIC,
169 RDMA_TRANSPORT_USNIC_UDP,
170 RDMA_TRANSPORT_UNSPECIFIED,
171};
172
173enum rdma_protocol_type {
174 RDMA_PROTOCOL_IB,
175 RDMA_PROTOCOL_IBOE,
176 RDMA_PROTOCOL_IWARP,
177 RDMA_PROTOCOL_USNIC_UDP
178};
179
180__attribute_const__ enum rdma_transport_type
181rdma_node_get_transport(unsigned int node_type);
182
183enum rdma_network_type {
184 RDMA_NETWORK_IB,
185 RDMA_NETWORK_ROCE_V1,
186 RDMA_NETWORK_IPV4,
187 RDMA_NETWORK_IPV6
188};
189
190static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
191{
192 if (network_type == RDMA_NETWORK_IPV4 ||
193 network_type == RDMA_NETWORK_IPV6)
194 return IB_GID_TYPE_ROCE_UDP_ENCAP;
195 else if (network_type == RDMA_NETWORK_ROCE_V1)
196 return IB_GID_TYPE_ROCE;
197 else
198 return IB_GID_TYPE_IB;
199}
200
201static inline enum rdma_network_type
202rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
203{
204 if (attr->gid_type == IB_GID_TYPE_IB)
205 return RDMA_NETWORK_IB;
206
207 if (attr->gid_type == IB_GID_TYPE_ROCE)
208 return RDMA_NETWORK_ROCE_V1;
209
210 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
211 return RDMA_NETWORK_IPV4;
212 else
213 return RDMA_NETWORK_IPV6;
214}
215
216enum rdma_link_layer {
217 IB_LINK_LAYER_UNSPECIFIED,
218 IB_LINK_LAYER_INFINIBAND,
219 IB_LINK_LAYER_ETHERNET,
220};
221
222enum ib_device_cap_flags {
223 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
224 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
225 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
226 IB_DEVICE_RAW_MULTI = (1 << 3),
227 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
228 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
229 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
230 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
231 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
232 /* Not in use, former INIT_TYPE = (1 << 9),*/
233 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
234 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
235 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
236 IB_DEVICE_SRQ_RESIZE = (1 << 13),
237 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
238
239 /*
240 * This device supports a per-device lkey or stag that can be
241 * used without performing a memory registration for the local
242 * memory. Note that ULPs should never check this flag, but
243 * instead of use the local_dma_lkey flag in the ib_pd structure,
244 * which will always contain a usable lkey.
245 */
246 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
247 /* Reserved, old SEND_W_INV = (1 << 16),*/
248 IB_DEVICE_MEM_WINDOW = (1 << 17),
249 /*
250 * Devices should set IB_DEVICE_UD_IP_SUM if they support
251 * insertion of UDP and TCP checksum on outgoing UD IPoIB
252 * messages and can verify the validity of checksum for
253 * incoming messages. Setting this flag implies that the
254 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
255 */
256 IB_DEVICE_UD_IP_CSUM = (1 << 18),
257 IB_DEVICE_UD_TSO = (1 << 19),
258 IB_DEVICE_XRC = (1 << 20),
259
260 /*
261 * This device supports the IB "base memory management extension",
262 * which includes support for fast registrations (IB_WR_REG_MR,
263 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
264 * also be set by any iWarp device which must support FRs to comply
265 * to the iWarp verbs spec. iWarp devices also support the
266 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
267 * stag.
268 */
269 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
270 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
271 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
272 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
273 IB_DEVICE_RC_IP_CSUM = (1 << 25),
274 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
275 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
276 /*
277 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
278 * support execution of WQEs that involve synchronization
279 * of I/O operations with single completion queue managed
280 * by hardware.
281 */
282 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
283 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
284 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
285 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
286 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
287 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
288 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
289 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
290 IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
291 /* The device supports padding incoming writes to cacheline. */
292 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
293 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
294};
295
296enum ib_atomic_cap {
297 IB_ATOMIC_NONE,
298 IB_ATOMIC_HCA,
299 IB_ATOMIC_GLOB
300};
301
302enum ib_odp_general_cap_bits {
303 IB_ODP_SUPPORT = 1 << 0,
304 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
305};
306
307enum ib_odp_transport_cap_bits {
308 IB_ODP_SUPPORT_SEND = 1 << 0,
309 IB_ODP_SUPPORT_RECV = 1 << 1,
310 IB_ODP_SUPPORT_WRITE = 1 << 2,
311 IB_ODP_SUPPORT_READ = 1 << 3,
312 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
313 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
314};
315
316struct ib_odp_caps {
317 uint64_t general_caps;
318 struct {
319 uint32_t rc_odp_caps;
320 uint32_t uc_odp_caps;
321 uint32_t ud_odp_caps;
322 uint32_t xrc_odp_caps;
323 } per_transport_caps;
324};
325
326struct ib_rss_caps {
327 /* Corresponding bit will be set if qp type from
328 * 'enum ib_qp_type' is supported, e.g.
329 * supported_qpts |= 1 << IB_QPT_UD
330 */
331 u32 supported_qpts;
332 u32 max_rwq_indirection_tables;
333 u32 max_rwq_indirection_table_size;
334};
335
336enum ib_tm_cap_flags {
337 /* Support tag matching with rendezvous offload for RC transport */
338 IB_TM_CAP_RNDV_RC = 1 << 0,
339};
340
341struct ib_tm_caps {
342 /* Max size of RNDV header */
343 u32 max_rndv_hdr_size;
344 /* Max number of entries in tag matching list */
345 u32 max_num_tags;
346 /* From enum ib_tm_cap_flags */
347 u32 flags;
348 /* Max number of outstanding list operations */
349 u32 max_ops;
350 /* Max number of SGE in tag matching entry */
351 u32 max_sge;
352};
353
354struct ib_cq_init_attr {
355 unsigned int cqe;
356 u32 comp_vector;
357 u32 flags;
358};
359
360enum ib_cq_attr_mask {
361 IB_CQ_MODERATE = 1 << 0,
362};
363
364struct ib_cq_caps {
365 u16 max_cq_moderation_count;
366 u16 max_cq_moderation_period;
367};
368
369struct ib_dm_mr_attr {
370 u64 length;
371 u64 offset;
372 u32 access_flags;
373};
374
375struct ib_dm_alloc_attr {
376 u64 length;
377 u32 alignment;
378 u32 flags;
379};
380
381struct ib_device_attr {
382 u64 fw_ver;
383 __be64 sys_image_guid;
384 u64 max_mr_size;
385 u64 page_size_cap;
386 u32 vendor_id;
387 u32 vendor_part_id;
388 u32 hw_ver;
389 int max_qp;
390 int max_qp_wr;
391 u64 device_cap_flags;
392 int max_send_sge;
393 int max_recv_sge;
394 int max_sge_rd;
395 int max_cq;
396 int max_cqe;
397 int max_mr;
398 int max_pd;
399 int max_qp_rd_atom;
400 int max_ee_rd_atom;
401 int max_res_rd_atom;
402 int max_qp_init_rd_atom;
403 int max_ee_init_rd_atom;
404 enum ib_atomic_cap atomic_cap;
405 enum ib_atomic_cap masked_atomic_cap;
406 int max_ee;
407 int max_rdd;
408 int max_mw;
409 int max_raw_ipv6_qp;
410 int max_raw_ethy_qp;
411 int max_mcast_grp;
412 int max_mcast_qp_attach;
413 int max_total_mcast_qp_attach;
414 int max_ah;
415 int max_srq;
416 int max_srq_wr;
417 int max_srq_sge;
418 unsigned int max_fast_reg_page_list_len;
419 unsigned int max_pi_fast_reg_page_list_len;
420 u16 max_pkeys;
421 u8 local_ca_ack_delay;
422 int sig_prot_cap;
423 int sig_guard_cap;
424 struct ib_odp_caps odp_caps;
425 uint64_t timestamp_mask;
426 uint64_t hca_core_clock; /* in KHZ */
427 struct ib_rss_caps rss_caps;
428 u32 max_wq_type_rq;
429 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
430 struct ib_tm_caps tm_caps;
431 struct ib_cq_caps cq_caps;
432 u64 max_dm_size;
433 /* Max entries for sgl for optimized performance per READ */
434 u32 max_sgl_rd;
435};
436
437enum ib_mtu {
438 IB_MTU_256 = 1,
439 IB_MTU_512 = 2,
440 IB_MTU_1024 = 3,
441 IB_MTU_2048 = 4,
442 IB_MTU_4096 = 5
443};
444
445enum opa_mtu {
446 OPA_MTU_8192 = 6,
447 OPA_MTU_10240 = 7
448};
449
450static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
451{
452 switch (mtu) {
453 case IB_MTU_256: return 256;
454 case IB_MTU_512: return 512;
455 case IB_MTU_1024: return 1024;
456 case IB_MTU_2048: return 2048;
457 case IB_MTU_4096: return 4096;
458 default: return -1;
459 }
460}
461
462static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
463{
464 if (mtu >= 4096)
465 return IB_MTU_4096;
466 else if (mtu >= 2048)
467 return IB_MTU_2048;
468 else if (mtu >= 1024)
469 return IB_MTU_1024;
470 else if (mtu >= 512)
471 return IB_MTU_512;
472 else
473 return IB_MTU_256;
474}
475
476static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
477{
478 switch (mtu) {
479 case OPA_MTU_8192:
480 return 8192;
481 case OPA_MTU_10240:
482 return 10240;
483 default:
484 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
485 }
486}
487
488static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
489{
490 if (mtu >= 10240)
491 return OPA_MTU_10240;
492 else if (mtu >= 8192)
493 return OPA_MTU_8192;
494 else
495 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
496}
497
498enum ib_port_state {
499 IB_PORT_NOP = 0,
500 IB_PORT_DOWN = 1,
501 IB_PORT_INIT = 2,
502 IB_PORT_ARMED = 3,
503 IB_PORT_ACTIVE = 4,
504 IB_PORT_ACTIVE_DEFER = 5
505};
506
507enum ib_port_phys_state {
508 IB_PORT_PHYS_STATE_SLEEP = 1,
509 IB_PORT_PHYS_STATE_POLLING = 2,
510 IB_PORT_PHYS_STATE_DISABLED = 3,
511 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
512 IB_PORT_PHYS_STATE_LINK_UP = 5,
513 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
514 IB_PORT_PHYS_STATE_PHY_TEST = 7,
515};
516
517enum ib_port_width {
518 IB_WIDTH_1X = 1,
519 IB_WIDTH_2X = 16,
520 IB_WIDTH_4X = 2,
521 IB_WIDTH_8X = 4,
522 IB_WIDTH_12X = 8
523};
524
525static inline int ib_width_enum_to_int(enum ib_port_width width)
526{
527 switch (width) {
528 case IB_WIDTH_1X: return 1;
529 case IB_WIDTH_2X: return 2;
530 case IB_WIDTH_4X: return 4;
531 case IB_WIDTH_8X: return 8;
532 case IB_WIDTH_12X: return 12;
533 default: return -1;
534 }
535}
536
537enum ib_port_speed {
538 IB_SPEED_SDR = 1,
539 IB_SPEED_DDR = 2,
540 IB_SPEED_QDR = 4,
541 IB_SPEED_FDR10 = 8,
542 IB_SPEED_FDR = 16,
543 IB_SPEED_EDR = 32,
544 IB_SPEED_HDR = 64,
545 IB_SPEED_NDR = 128,
546};
547
548/**
549 * struct rdma_hw_stats
550 * @lock - Mutex to protect parallel write access to lifespan and values
551 * of counters, which are 64bits and not guaranteeed to be written
552 * atomicaly on 32bits systems.
553 * @timestamp - Used by the core code to track when the last update was
554 * @lifespan - Used by the core code to determine how old the counters
555 * should be before being updated again. Stored in jiffies, defaults
556 * to 10 milliseconds, drivers can override the default be specifying
557 * their own value during their allocation routine.
558 * @name - Array of pointers to static names used for the counters in
559 * directory.
560 * @num_counters - How many hardware counters there are. If name is
561 * shorter than this number, a kernel oops will result. Driver authors
562 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
563 * in their code to prevent this.
564 * @value - Array of u64 counters that are accessed by the sysfs code and
565 * filled in by the drivers get_stats routine
566 */
567struct rdma_hw_stats {
568 struct mutex lock; /* Protect lifespan and values[] */
569 unsigned long timestamp;
570 unsigned long lifespan;
571 const char * const *names;
572 int num_counters;
573 u64 value[];
574};
575
576#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
577/**
578 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
579 * for drivers.
580 * @names - Array of static const char *
581 * @num_counters - How many elements in array
582 * @lifespan - How many milliseconds between updates
583 */
584static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
585 const char * const *names, int num_counters,
586 unsigned long lifespan)
587{
588 struct rdma_hw_stats *stats;
589
590 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
591 GFP_KERNEL);
592 if (!stats)
593 return NULL;
594 stats->names = names;
595 stats->num_counters = num_counters;
596 stats->lifespan = msecs_to_jiffies(lifespan);
597
598 return stats;
599}
600
601
602/* Define bits for the various functionality this port needs to be supported by
603 * the core.
604 */
605/* Management 0x00000FFF */
606#define RDMA_CORE_CAP_IB_MAD 0x00000001
607#define RDMA_CORE_CAP_IB_SMI 0x00000002
608#define RDMA_CORE_CAP_IB_CM 0x00000004
609#define RDMA_CORE_CAP_IW_CM 0x00000008
610#define RDMA_CORE_CAP_IB_SA 0x00000010
611#define RDMA_CORE_CAP_OPA_MAD 0x00000020
612
613/* Address format 0x000FF000 */
614#define RDMA_CORE_CAP_AF_IB 0x00001000
615#define RDMA_CORE_CAP_ETH_AH 0x00002000
616#define RDMA_CORE_CAP_OPA_AH 0x00004000
617#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
618
619/* Protocol 0xFFF00000 */
620#define RDMA_CORE_CAP_PROT_IB 0x00100000
621#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
622#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
623#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
624#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
625#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
626
627#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
628 | RDMA_CORE_CAP_PROT_ROCE \
629 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
630
631#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
632 | RDMA_CORE_CAP_IB_MAD \
633 | RDMA_CORE_CAP_IB_SMI \
634 | RDMA_CORE_CAP_IB_CM \
635 | RDMA_CORE_CAP_IB_SA \
636 | RDMA_CORE_CAP_AF_IB)
637#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
638 | RDMA_CORE_CAP_IB_MAD \
639 | RDMA_CORE_CAP_IB_CM \
640 | RDMA_CORE_CAP_AF_IB \
641 | RDMA_CORE_CAP_ETH_AH)
642#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
643 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
644 | RDMA_CORE_CAP_IB_MAD \
645 | RDMA_CORE_CAP_IB_CM \
646 | RDMA_CORE_CAP_AF_IB \
647 | RDMA_CORE_CAP_ETH_AH)
648#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
649 | RDMA_CORE_CAP_IW_CM)
650#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
651 | RDMA_CORE_CAP_OPA_MAD)
652
653#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
654
655#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
656
657struct ib_port_attr {
658 u64 subnet_prefix;
659 enum ib_port_state state;
660 enum ib_mtu max_mtu;
661 enum ib_mtu active_mtu;
662 u32 phys_mtu;
663 int gid_tbl_len;
664 unsigned int ip_gids:1;
665 /* This is the value from PortInfo CapabilityMask, defined by IBA */
666 u32 port_cap_flags;
667 u32 max_msg_sz;
668 u32 bad_pkey_cntr;
669 u32 qkey_viol_cntr;
670 u16 pkey_tbl_len;
671 u32 sm_lid;
672 u32 lid;
673 u8 lmc;
674 u8 max_vl_num;
675 u8 sm_sl;
676 u8 subnet_timeout;
677 u8 init_type_reply;
678 u8 active_width;
679 u16 active_speed;
680 u8 phys_state;
681 u16 port_cap_flags2;
682};
683
684enum ib_device_modify_flags {
685 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
686 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
687};
688
689#define IB_DEVICE_NODE_DESC_MAX 64
690
691struct ib_device_modify {
692 u64 sys_image_guid;
693 char node_desc[IB_DEVICE_NODE_DESC_MAX];
694};
695
696enum ib_port_modify_flags {
697 IB_PORT_SHUTDOWN = 1,
698 IB_PORT_INIT_TYPE = (1<<2),
699 IB_PORT_RESET_QKEY_CNTR = (1<<3),
700 IB_PORT_OPA_MASK_CHG = (1<<4)
701};
702
703struct ib_port_modify {
704 u32 set_port_cap_mask;
705 u32 clr_port_cap_mask;
706 u8 init_type;
707};
708
709enum ib_event_type {
710 IB_EVENT_CQ_ERR,
711 IB_EVENT_QP_FATAL,
712 IB_EVENT_QP_REQ_ERR,
713 IB_EVENT_QP_ACCESS_ERR,
714 IB_EVENT_COMM_EST,
715 IB_EVENT_SQ_DRAINED,
716 IB_EVENT_PATH_MIG,
717 IB_EVENT_PATH_MIG_ERR,
718 IB_EVENT_DEVICE_FATAL,
719 IB_EVENT_PORT_ACTIVE,
720 IB_EVENT_PORT_ERR,
721 IB_EVENT_LID_CHANGE,
722 IB_EVENT_PKEY_CHANGE,
723 IB_EVENT_SM_CHANGE,
724 IB_EVENT_SRQ_ERR,
725 IB_EVENT_SRQ_LIMIT_REACHED,
726 IB_EVENT_QP_LAST_WQE_REACHED,
727 IB_EVENT_CLIENT_REREGISTER,
728 IB_EVENT_GID_CHANGE,
729 IB_EVENT_WQ_FATAL,
730};
731
732const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
733
734struct ib_event {
735 struct ib_device *device;
736 union {
737 struct ib_cq *cq;
738 struct ib_qp *qp;
739 struct ib_srq *srq;
740 struct ib_wq *wq;
741 u32 port_num;
742 } element;
743 enum ib_event_type event;
744};
745
746struct ib_event_handler {
747 struct ib_device *device;
748 void (*handler)(struct ib_event_handler *, struct ib_event *);
749 struct list_head list;
750};
751
752#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
753 do { \
754 (_ptr)->device = _device; \
755 (_ptr)->handler = _handler; \
756 INIT_LIST_HEAD(&(_ptr)->list); \
757 } while (0)
758
759struct ib_global_route {
760 const struct ib_gid_attr *sgid_attr;
761 union ib_gid dgid;
762 u32 flow_label;
763 u8 sgid_index;
764 u8 hop_limit;
765 u8 traffic_class;
766};
767
768struct ib_grh {
769 __be32 version_tclass_flow;
770 __be16 paylen;
771 u8 next_hdr;
772 u8 hop_limit;
773 union ib_gid sgid;
774 union ib_gid dgid;
775};
776
777union rdma_network_hdr {
778 struct ib_grh ibgrh;
779 struct {
780 /* The IB spec states that if it's IPv4, the header
781 * is located in the last 20 bytes of the header.
782 */
783 u8 reserved[20];
784 struct iphdr roce4grh;
785 };
786};
787
788#define IB_QPN_MASK 0xFFFFFF
789
790enum {
791 IB_MULTICAST_QPN = 0xffffff
792};
793
794#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
795#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
796
797enum ib_ah_flags {
798 IB_AH_GRH = 1
799};
800
801enum ib_rate {
802 IB_RATE_PORT_CURRENT = 0,
803 IB_RATE_2_5_GBPS = 2,
804 IB_RATE_5_GBPS = 5,
805 IB_RATE_10_GBPS = 3,
806 IB_RATE_20_GBPS = 6,
807 IB_RATE_30_GBPS = 4,
808 IB_RATE_40_GBPS = 7,
809 IB_RATE_60_GBPS = 8,
810 IB_RATE_80_GBPS = 9,
811 IB_RATE_120_GBPS = 10,
812 IB_RATE_14_GBPS = 11,
813 IB_RATE_56_GBPS = 12,
814 IB_RATE_112_GBPS = 13,
815 IB_RATE_168_GBPS = 14,
816 IB_RATE_25_GBPS = 15,
817 IB_RATE_100_GBPS = 16,
818 IB_RATE_200_GBPS = 17,
819 IB_RATE_300_GBPS = 18,
820 IB_RATE_28_GBPS = 19,
821 IB_RATE_50_GBPS = 20,
822 IB_RATE_400_GBPS = 21,
823 IB_RATE_600_GBPS = 22,
824};
825
826/**
827 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
828 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
829 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
830 * @rate: rate to convert.
831 */
832__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
833
834/**
835 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
836 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
837 * @rate: rate to convert.
838 */
839__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
840
841
842/**
843 * enum ib_mr_type - memory region type
844 * @IB_MR_TYPE_MEM_REG: memory region that is used for
845 * normal registration
846 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
847 * register any arbitrary sg lists (without
848 * the normal mr constraints - see
849 * ib_map_mr_sg)
850 * @IB_MR_TYPE_DM: memory region that is used for device
851 * memory registration
852 * @IB_MR_TYPE_USER: memory region that is used for the user-space
853 * application
854 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
855 * without address translations (VA=PA)
856 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
857 * data integrity operations
858 */
859enum ib_mr_type {
860 IB_MR_TYPE_MEM_REG,
861 IB_MR_TYPE_SG_GAPS,
862 IB_MR_TYPE_DM,
863 IB_MR_TYPE_USER,
864 IB_MR_TYPE_DMA,
865 IB_MR_TYPE_INTEGRITY,
866};
867
868enum ib_mr_status_check {
869 IB_MR_CHECK_SIG_STATUS = 1,
870};
871
872/**
873 * struct ib_mr_status - Memory region status container
874 *
875 * @fail_status: Bitmask of MR checks status. For each
876 * failed check a corresponding status bit is set.
877 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
878 * failure.
879 */
880struct ib_mr_status {
881 u32 fail_status;
882 struct ib_sig_err sig_err;
883};
884
885/**
886 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
887 * enum.
888 * @mult: multiple to convert.
889 */
890__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
891
892struct rdma_ah_init_attr {
893 struct rdma_ah_attr *ah_attr;
894 u32 flags;
895 struct net_device *xmit_slave;
896};
897
898enum rdma_ah_attr_type {
899 RDMA_AH_ATTR_TYPE_UNDEFINED,
900 RDMA_AH_ATTR_TYPE_IB,
901 RDMA_AH_ATTR_TYPE_ROCE,
902 RDMA_AH_ATTR_TYPE_OPA,
903};
904
905struct ib_ah_attr {
906 u16 dlid;
907 u8 src_path_bits;
908};
909
910struct roce_ah_attr {
911 u8 dmac[ETH_ALEN];
912};
913
914struct opa_ah_attr {
915 u32 dlid;
916 u8 src_path_bits;
917 bool make_grd;
918};
919
920struct rdma_ah_attr {
921 struct ib_global_route grh;
922 u8 sl;
923 u8 static_rate;
924 u32 port_num;
925 u8 ah_flags;
926 enum rdma_ah_attr_type type;
927 union {
928 struct ib_ah_attr ib;
929 struct roce_ah_attr roce;
930 struct opa_ah_attr opa;
931 };
932};
933
934enum ib_wc_status {
935 IB_WC_SUCCESS,
936 IB_WC_LOC_LEN_ERR,
937 IB_WC_LOC_QP_OP_ERR,
938 IB_WC_LOC_EEC_OP_ERR,
939 IB_WC_LOC_PROT_ERR,
940 IB_WC_WR_FLUSH_ERR,
941 IB_WC_MW_BIND_ERR,
942 IB_WC_BAD_RESP_ERR,
943 IB_WC_LOC_ACCESS_ERR,
944 IB_WC_REM_INV_REQ_ERR,
945 IB_WC_REM_ACCESS_ERR,
946 IB_WC_REM_OP_ERR,
947 IB_WC_RETRY_EXC_ERR,
948 IB_WC_RNR_RETRY_EXC_ERR,
949 IB_WC_LOC_RDD_VIOL_ERR,
950 IB_WC_REM_INV_RD_REQ_ERR,
951 IB_WC_REM_ABORT_ERR,
952 IB_WC_INV_EECN_ERR,
953 IB_WC_INV_EEC_STATE_ERR,
954 IB_WC_FATAL_ERR,
955 IB_WC_RESP_TIMEOUT_ERR,
956 IB_WC_GENERAL_ERR
957};
958
959const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
960
961enum ib_wc_opcode {
962 IB_WC_SEND = IB_UVERBS_WC_SEND,
963 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
964 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
965 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
966 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
967 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
968 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
969 IB_WC_LSO = IB_UVERBS_WC_TSO,
970 IB_WC_REG_MR,
971 IB_WC_MASKED_COMP_SWAP,
972 IB_WC_MASKED_FETCH_ADD,
973/*
974 * Set value of IB_WC_RECV so consumers can test if a completion is a
975 * receive by testing (opcode & IB_WC_RECV).
976 */
977 IB_WC_RECV = 1 << 7,
978 IB_WC_RECV_RDMA_WITH_IMM
979};
980
981enum ib_wc_flags {
982 IB_WC_GRH = 1,
983 IB_WC_WITH_IMM = (1<<1),
984 IB_WC_WITH_INVALIDATE = (1<<2),
985 IB_WC_IP_CSUM_OK = (1<<3),
986 IB_WC_WITH_SMAC = (1<<4),
987 IB_WC_WITH_VLAN = (1<<5),
988 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
989};
990
991struct ib_wc {
992 union {
993 u64 wr_id;
994 struct ib_cqe *wr_cqe;
995 };
996 enum ib_wc_status status;
997 enum ib_wc_opcode opcode;
998 u32 vendor_err;
999 u32 byte_len;
1000 struct ib_qp *qp;
1001 union {
1002 __be32 imm_data;
1003 u32 invalidate_rkey;
1004 } ex;
1005 u32 src_qp;
1006 u32 slid;
1007 int wc_flags;
1008 u16 pkey_index;
1009 u8 sl;
1010 u8 dlid_path_bits;
1011 u32 port_num; /* valid only for DR SMPs on switches */
1012 u8 smac[ETH_ALEN];
1013 u16 vlan_id;
1014 u8 network_hdr_type;
1015};
1016
1017enum ib_cq_notify_flags {
1018 IB_CQ_SOLICITED = 1 << 0,
1019 IB_CQ_NEXT_COMP = 1 << 1,
1020 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1021 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1022};
1023
1024enum ib_srq_type {
1025 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1026 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1027 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1028};
1029
1030static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1031{
1032 return srq_type == IB_SRQT_XRC ||
1033 srq_type == IB_SRQT_TM;
1034}
1035
1036enum ib_srq_attr_mask {
1037 IB_SRQ_MAX_WR = 1 << 0,
1038 IB_SRQ_LIMIT = 1 << 1,
1039};
1040
1041struct ib_srq_attr {
1042 u32 max_wr;
1043 u32 max_sge;
1044 u32 srq_limit;
1045};
1046
1047struct ib_srq_init_attr {
1048 void (*event_handler)(struct ib_event *, void *);
1049 void *srq_context;
1050 struct ib_srq_attr attr;
1051 enum ib_srq_type srq_type;
1052
1053 struct {
1054 struct ib_cq *cq;
1055 union {
1056 struct {
1057 struct ib_xrcd *xrcd;
1058 } xrc;
1059
1060 struct {
1061 u32 max_num_tags;
1062 } tag_matching;
1063 };
1064 } ext;
1065};
1066
1067struct ib_qp_cap {
1068 u32 max_send_wr;
1069 u32 max_recv_wr;
1070 u32 max_send_sge;
1071 u32 max_recv_sge;
1072 u32 max_inline_data;
1073
1074 /*
1075 * Maximum number of rdma_rw_ctx structures in flight at a time.
1076 * ib_create_qp() will calculate the right amount of neededed WRs
1077 * and MRs based on this.
1078 */
1079 u32 max_rdma_ctxs;
1080};
1081
1082enum ib_sig_type {
1083 IB_SIGNAL_ALL_WR,
1084 IB_SIGNAL_REQ_WR
1085};
1086
1087enum ib_qp_type {
1088 /*
1089 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1090 * here (and in that order) since the MAD layer uses them as
1091 * indices into a 2-entry table.
1092 */
1093 IB_QPT_SMI,
1094 IB_QPT_GSI,
1095
1096 IB_QPT_RC = IB_UVERBS_QPT_RC,
1097 IB_QPT_UC = IB_UVERBS_QPT_UC,
1098 IB_QPT_UD = IB_UVERBS_QPT_UD,
1099 IB_QPT_RAW_IPV6,
1100 IB_QPT_RAW_ETHERTYPE,
1101 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1102 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1103 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1104 IB_QPT_MAX,
1105 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1106 /* Reserve a range for qp types internal to the low level driver.
1107 * These qp types will not be visible at the IB core layer, so the
1108 * IB_QPT_MAX usages should not be affected in the core layer
1109 */
1110 IB_QPT_RESERVED1 = 0x1000,
1111 IB_QPT_RESERVED2,
1112 IB_QPT_RESERVED3,
1113 IB_QPT_RESERVED4,
1114 IB_QPT_RESERVED5,
1115 IB_QPT_RESERVED6,
1116 IB_QPT_RESERVED7,
1117 IB_QPT_RESERVED8,
1118 IB_QPT_RESERVED9,
1119 IB_QPT_RESERVED10,
1120};
1121
1122enum ib_qp_create_flags {
1123 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1124 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1125 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1126 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1127 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1128 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1129 IB_QP_CREATE_NETIF_QP = 1 << 5,
1130 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1131 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1132 IB_QP_CREATE_SCATTER_FCS =
1133 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1134 IB_QP_CREATE_CVLAN_STRIPPING =
1135 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1136 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1137 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1138 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1139 /* reserve bits 26-31 for low level drivers' internal use */
1140 IB_QP_CREATE_RESERVED_START = 1 << 26,
1141 IB_QP_CREATE_RESERVED_END = 1 << 31,
1142};
1143
1144/*
1145 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1146 * callback to destroy the passed in QP.
1147 */
1148
1149struct ib_qp_init_attr {
1150 /* Consumer's event_handler callback must not block */
1151 void (*event_handler)(struct ib_event *, void *);
1152
1153 void *qp_context;
1154 struct ib_cq *send_cq;
1155 struct ib_cq *recv_cq;
1156 struct ib_srq *srq;
1157 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1158 struct ib_qp_cap cap;
1159 enum ib_sig_type sq_sig_type;
1160 enum ib_qp_type qp_type;
1161 u32 create_flags;
1162
1163 /*
1164 * Only needed for special QP types, or when using the RW API.
1165 */
1166 u32 port_num;
1167 struct ib_rwq_ind_table *rwq_ind_tbl;
1168 u32 source_qpn;
1169};
1170
1171struct ib_qp_open_attr {
1172 void (*event_handler)(struct ib_event *, void *);
1173 void *qp_context;
1174 u32 qp_num;
1175 enum ib_qp_type qp_type;
1176};
1177
1178enum ib_rnr_timeout {
1179 IB_RNR_TIMER_655_36 = 0,
1180 IB_RNR_TIMER_000_01 = 1,
1181 IB_RNR_TIMER_000_02 = 2,
1182 IB_RNR_TIMER_000_03 = 3,
1183 IB_RNR_TIMER_000_04 = 4,
1184 IB_RNR_TIMER_000_06 = 5,
1185 IB_RNR_TIMER_000_08 = 6,
1186 IB_RNR_TIMER_000_12 = 7,
1187 IB_RNR_TIMER_000_16 = 8,
1188 IB_RNR_TIMER_000_24 = 9,
1189 IB_RNR_TIMER_000_32 = 10,
1190 IB_RNR_TIMER_000_48 = 11,
1191 IB_RNR_TIMER_000_64 = 12,
1192 IB_RNR_TIMER_000_96 = 13,
1193 IB_RNR_TIMER_001_28 = 14,
1194 IB_RNR_TIMER_001_92 = 15,
1195 IB_RNR_TIMER_002_56 = 16,
1196 IB_RNR_TIMER_003_84 = 17,
1197 IB_RNR_TIMER_005_12 = 18,
1198 IB_RNR_TIMER_007_68 = 19,
1199 IB_RNR_TIMER_010_24 = 20,
1200 IB_RNR_TIMER_015_36 = 21,
1201 IB_RNR_TIMER_020_48 = 22,
1202 IB_RNR_TIMER_030_72 = 23,
1203 IB_RNR_TIMER_040_96 = 24,
1204 IB_RNR_TIMER_061_44 = 25,
1205 IB_RNR_TIMER_081_92 = 26,
1206 IB_RNR_TIMER_122_88 = 27,
1207 IB_RNR_TIMER_163_84 = 28,
1208 IB_RNR_TIMER_245_76 = 29,
1209 IB_RNR_TIMER_327_68 = 30,
1210 IB_RNR_TIMER_491_52 = 31
1211};
1212
1213enum ib_qp_attr_mask {
1214 IB_QP_STATE = 1,
1215 IB_QP_CUR_STATE = (1<<1),
1216 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1217 IB_QP_ACCESS_FLAGS = (1<<3),
1218 IB_QP_PKEY_INDEX = (1<<4),
1219 IB_QP_PORT = (1<<5),
1220 IB_QP_QKEY = (1<<6),
1221 IB_QP_AV = (1<<7),
1222 IB_QP_PATH_MTU = (1<<8),
1223 IB_QP_TIMEOUT = (1<<9),
1224 IB_QP_RETRY_CNT = (1<<10),
1225 IB_QP_RNR_RETRY = (1<<11),
1226 IB_QP_RQ_PSN = (1<<12),
1227 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1228 IB_QP_ALT_PATH = (1<<14),
1229 IB_QP_MIN_RNR_TIMER = (1<<15),
1230 IB_QP_SQ_PSN = (1<<16),
1231 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1232 IB_QP_PATH_MIG_STATE = (1<<18),
1233 IB_QP_CAP = (1<<19),
1234 IB_QP_DEST_QPN = (1<<20),
1235 IB_QP_RESERVED1 = (1<<21),
1236 IB_QP_RESERVED2 = (1<<22),
1237 IB_QP_RESERVED3 = (1<<23),
1238 IB_QP_RESERVED4 = (1<<24),
1239 IB_QP_RATE_LIMIT = (1<<25),
1240
1241 IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1242};
1243
1244enum ib_qp_state {
1245 IB_QPS_RESET,
1246 IB_QPS_INIT,
1247 IB_QPS_RTR,
1248 IB_QPS_RTS,
1249 IB_QPS_SQD,
1250 IB_QPS_SQE,
1251 IB_QPS_ERR
1252};
1253
1254enum ib_mig_state {
1255 IB_MIG_MIGRATED,
1256 IB_MIG_REARM,
1257 IB_MIG_ARMED
1258};
1259
1260enum ib_mw_type {
1261 IB_MW_TYPE_1 = 1,
1262 IB_MW_TYPE_2 = 2
1263};
1264
1265struct ib_qp_attr {
1266 enum ib_qp_state qp_state;
1267 enum ib_qp_state cur_qp_state;
1268 enum ib_mtu path_mtu;
1269 enum ib_mig_state path_mig_state;
1270 u32 qkey;
1271 u32 rq_psn;
1272 u32 sq_psn;
1273 u32 dest_qp_num;
1274 int qp_access_flags;
1275 struct ib_qp_cap cap;
1276 struct rdma_ah_attr ah_attr;
1277 struct rdma_ah_attr alt_ah_attr;
1278 u16 pkey_index;
1279 u16 alt_pkey_index;
1280 u8 en_sqd_async_notify;
1281 u8 sq_draining;
1282 u8 max_rd_atomic;
1283 u8 max_dest_rd_atomic;
1284 u8 min_rnr_timer;
1285 u32 port_num;
1286 u8 timeout;
1287 u8 retry_cnt;
1288 u8 rnr_retry;
1289 u32 alt_port_num;
1290 u8 alt_timeout;
1291 u32 rate_limit;
1292 struct net_device *xmit_slave;
1293};
1294
1295enum ib_wr_opcode {
1296 /* These are shared with userspace */
1297 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1298 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1299 IB_WR_SEND = IB_UVERBS_WR_SEND,
1300 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1301 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1302 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1303 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1304 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1305 IB_WR_LSO = IB_UVERBS_WR_TSO,
1306 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1307 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1308 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1309 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1310 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1311 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1312 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1313
1314 /* These are kernel only and can not be issued by userspace */
1315 IB_WR_REG_MR = 0x20,
1316 IB_WR_REG_MR_INTEGRITY,
1317
1318 /* reserve values for low level drivers' internal use.
1319 * These values will not be used at all in the ib core layer.
1320 */
1321 IB_WR_RESERVED1 = 0xf0,
1322 IB_WR_RESERVED2,
1323 IB_WR_RESERVED3,
1324 IB_WR_RESERVED4,
1325 IB_WR_RESERVED5,
1326 IB_WR_RESERVED6,
1327 IB_WR_RESERVED7,
1328 IB_WR_RESERVED8,
1329 IB_WR_RESERVED9,
1330 IB_WR_RESERVED10,
1331};
1332
1333enum ib_send_flags {
1334 IB_SEND_FENCE = 1,
1335 IB_SEND_SIGNALED = (1<<1),
1336 IB_SEND_SOLICITED = (1<<2),
1337 IB_SEND_INLINE = (1<<3),
1338 IB_SEND_IP_CSUM = (1<<4),
1339
1340 /* reserve bits 26-31 for low level drivers' internal use */
1341 IB_SEND_RESERVED_START = (1 << 26),
1342 IB_SEND_RESERVED_END = (1 << 31),
1343};
1344
1345struct ib_sge {
1346 u64 addr;
1347 u32 length;
1348 u32 lkey;
1349};
1350
1351struct ib_cqe {
1352 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1353};
1354
1355struct ib_send_wr {
1356 struct ib_send_wr *next;
1357 union {
1358 u64 wr_id;
1359 struct ib_cqe *wr_cqe;
1360 };
1361 struct ib_sge *sg_list;
1362 int num_sge;
1363 enum ib_wr_opcode opcode;
1364 int send_flags;
1365 union {
1366 __be32 imm_data;
1367 u32 invalidate_rkey;
1368 } ex;
1369};
1370
1371struct ib_rdma_wr {
1372 struct ib_send_wr wr;
1373 u64 remote_addr;
1374 u32 rkey;
1375};
1376
1377static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1378{
1379 return container_of(wr, struct ib_rdma_wr, wr);
1380}
1381
1382struct ib_atomic_wr {
1383 struct ib_send_wr wr;
1384 u64 remote_addr;
1385 u64 compare_add;
1386 u64 swap;
1387 u64 compare_add_mask;
1388 u64 swap_mask;
1389 u32 rkey;
1390};
1391
1392static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1393{
1394 return container_of(wr, struct ib_atomic_wr, wr);
1395}
1396
1397struct ib_ud_wr {
1398 struct ib_send_wr wr;
1399 struct ib_ah *ah;
1400 void *header;
1401 int hlen;
1402 int mss;
1403 u32 remote_qpn;
1404 u32 remote_qkey;
1405 u16 pkey_index; /* valid for GSI only */
1406 u32 port_num; /* valid for DR SMPs on switch only */
1407};
1408
1409static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1410{
1411 return container_of(wr, struct ib_ud_wr, wr);
1412}
1413
1414struct ib_reg_wr {
1415 struct ib_send_wr wr;
1416 struct ib_mr *mr;
1417 u32 key;
1418 int access;
1419};
1420
1421static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1422{
1423 return container_of(wr, struct ib_reg_wr, wr);
1424}
1425
1426struct ib_recv_wr {
1427 struct ib_recv_wr *next;
1428 union {
1429 u64 wr_id;
1430 struct ib_cqe *wr_cqe;
1431 };
1432 struct ib_sge *sg_list;
1433 int num_sge;
1434};
1435
1436enum ib_access_flags {
1437 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1438 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1439 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1440 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1441 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1442 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1443 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1444 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1445 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1446
1447 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1448 IB_ACCESS_SUPPORTED =
1449 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1450};
1451
1452/*
1453 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1454 * are hidden here instead of a uapi header!
1455 */
1456enum ib_mr_rereg_flags {
1457 IB_MR_REREG_TRANS = 1,
1458 IB_MR_REREG_PD = (1<<1),
1459 IB_MR_REREG_ACCESS = (1<<2),
1460 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1461};
1462
1463struct ib_umem;
1464
1465enum rdma_remove_reason {
1466 /*
1467 * Userspace requested uobject deletion or initial try
1468 * to remove uobject via cleanup. Call could fail
1469 */
1470 RDMA_REMOVE_DESTROY,
1471 /* Context deletion. This call should delete the actual object itself */
1472 RDMA_REMOVE_CLOSE,
1473 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1474 RDMA_REMOVE_DRIVER_REMOVE,
1475 /* uobj is being cleaned-up before being committed */
1476 RDMA_REMOVE_ABORT,
1477 /* The driver failed to destroy the uobject and is being disconnected */
1478 RDMA_REMOVE_DRIVER_FAILURE,
1479};
1480
1481struct ib_rdmacg_object {
1482#ifdef CONFIG_CGROUP_RDMA
1483 struct rdma_cgroup *cg; /* owner rdma cgroup */
1484#endif
1485};
1486
1487struct ib_ucontext {
1488 struct ib_device *device;
1489 struct ib_uverbs_file *ufile;
1490
1491 struct ib_rdmacg_object cg_obj;
1492 /*
1493 * Implementation details of the RDMA core, don't use in drivers:
1494 */
1495 struct rdma_restrack_entry res;
1496 struct xarray mmap_xa;
1497};
1498
1499struct ib_uobject {
1500 u64 user_handle; /* handle given to us by userspace */
1501 /* ufile & ucontext owning this object */
1502 struct ib_uverbs_file *ufile;
1503 /* FIXME, save memory: ufile->context == context */
1504 struct ib_ucontext *context; /* associated user context */
1505 void *object; /* containing object */
1506 struct list_head list; /* link to context's list */
1507 struct ib_rdmacg_object cg_obj; /* rdmacg object */
1508 int id; /* index into kernel idr */
1509 struct kref ref;
1510 atomic_t usecnt; /* protects exclusive access */
1511 struct rcu_head rcu; /* kfree_rcu() overhead */
1512
1513 const struct uverbs_api_object *uapi_object;
1514};
1515
1516struct ib_udata {
1517 const void __user *inbuf;
1518 void __user *outbuf;
1519 size_t inlen;
1520 size_t outlen;
1521};
1522
1523struct ib_pd {
1524 u32 local_dma_lkey;
1525 u32 flags;
1526 struct ib_device *device;
1527 struct ib_uobject *uobject;
1528 atomic_t usecnt; /* count all resources */
1529
1530 u32 unsafe_global_rkey;
1531
1532 /*
1533 * Implementation details of the RDMA core, don't use in drivers:
1534 */
1535 struct ib_mr *__internal_mr;
1536 struct rdma_restrack_entry res;
1537};
1538
1539struct ib_xrcd {
1540 struct ib_device *device;
1541 atomic_t usecnt; /* count all exposed resources */
1542 struct inode *inode;
1543 struct rw_semaphore tgt_qps_rwsem;
1544 struct xarray tgt_qps;
1545};
1546
1547struct ib_ah {
1548 struct ib_device *device;
1549 struct ib_pd *pd;
1550 struct ib_uobject *uobject;
1551 const struct ib_gid_attr *sgid_attr;
1552 enum rdma_ah_attr_type type;
1553};
1554
1555typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1556
1557enum ib_poll_context {
1558 IB_POLL_SOFTIRQ, /* poll from softirq context */
1559 IB_POLL_WORKQUEUE, /* poll from workqueue */
1560 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1561 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1562
1563 IB_POLL_DIRECT, /* caller context, no hw completions */
1564};
1565
1566struct ib_cq {
1567 struct ib_device *device;
1568 struct ib_ucq_object *uobject;
1569 ib_comp_handler comp_handler;
1570 void (*event_handler)(struct ib_event *, void *);
1571 void *cq_context;
1572 int cqe;
1573 unsigned int cqe_used;
1574 atomic_t usecnt; /* count number of work queues */
1575 enum ib_poll_context poll_ctx;
1576 struct ib_wc *wc;
1577 struct list_head pool_entry;
1578 union {
1579 struct irq_poll iop;
1580 struct work_struct work;
1581 };
1582 struct workqueue_struct *comp_wq;
1583 struct dim *dim;
1584
1585 /* updated only by trace points */
1586 ktime_t timestamp;
1587 u8 interrupt:1;
1588 u8 shared:1;
1589 unsigned int comp_vector;
1590
1591 /*
1592 * Implementation details of the RDMA core, don't use in drivers:
1593 */
1594 struct rdma_restrack_entry res;
1595};
1596
1597struct ib_srq {
1598 struct ib_device *device;
1599 struct ib_pd *pd;
1600 struct ib_usrq_object *uobject;
1601 void (*event_handler)(struct ib_event *, void *);
1602 void *srq_context;
1603 enum ib_srq_type srq_type;
1604 atomic_t usecnt;
1605
1606 struct {
1607 struct ib_cq *cq;
1608 union {
1609 struct {
1610 struct ib_xrcd *xrcd;
1611 u32 srq_num;
1612 } xrc;
1613 };
1614 } ext;
1615
1616 /*
1617 * Implementation details of the RDMA core, don't use in drivers:
1618 */
1619 struct rdma_restrack_entry res;
1620};
1621
1622enum ib_raw_packet_caps {
1623 /* Strip cvlan from incoming packet and report it in the matching work
1624 * completion is supported.
1625 */
1626 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1627 /* Scatter FCS field of an incoming packet to host memory is supported.
1628 */
1629 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1630 /* Checksum offloads are supported (for both send and receive). */
1631 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1632 /* When a packet is received for an RQ with no receive WQEs, the
1633 * packet processing is delayed.
1634 */
1635 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1636};
1637
1638enum ib_wq_type {
1639 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1640};
1641
1642enum ib_wq_state {
1643 IB_WQS_RESET,
1644 IB_WQS_RDY,
1645 IB_WQS_ERR
1646};
1647
1648struct ib_wq {
1649 struct ib_device *device;
1650 struct ib_uwq_object *uobject;
1651 void *wq_context;
1652 void (*event_handler)(struct ib_event *, void *);
1653 struct ib_pd *pd;
1654 struct ib_cq *cq;
1655 u32 wq_num;
1656 enum ib_wq_state state;
1657 enum ib_wq_type wq_type;
1658 atomic_t usecnt;
1659};
1660
1661enum ib_wq_flags {
1662 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1663 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1664 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1665 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1666 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1667};
1668
1669struct ib_wq_init_attr {
1670 void *wq_context;
1671 enum ib_wq_type wq_type;
1672 u32 max_wr;
1673 u32 max_sge;
1674 struct ib_cq *cq;
1675 void (*event_handler)(struct ib_event *, void *);
1676 u32 create_flags; /* Use enum ib_wq_flags */
1677};
1678
1679enum ib_wq_attr_mask {
1680 IB_WQ_STATE = 1 << 0,
1681 IB_WQ_CUR_STATE = 1 << 1,
1682 IB_WQ_FLAGS = 1 << 2,
1683};
1684
1685struct ib_wq_attr {
1686 enum ib_wq_state wq_state;
1687 enum ib_wq_state curr_wq_state;
1688 u32 flags; /* Use enum ib_wq_flags */
1689 u32 flags_mask; /* Use enum ib_wq_flags */
1690};
1691
1692struct ib_rwq_ind_table {
1693 struct ib_device *device;
1694 struct ib_uobject *uobject;
1695 atomic_t usecnt;
1696 u32 ind_tbl_num;
1697 u32 log_ind_tbl_size;
1698 struct ib_wq **ind_tbl;
1699};
1700
1701struct ib_rwq_ind_table_init_attr {
1702 u32 log_ind_tbl_size;
1703 /* Each entry is a pointer to Receive Work Queue */
1704 struct ib_wq **ind_tbl;
1705};
1706
1707enum port_pkey_state {
1708 IB_PORT_PKEY_NOT_VALID = 0,
1709 IB_PORT_PKEY_VALID = 1,
1710 IB_PORT_PKEY_LISTED = 2,
1711};
1712
1713struct ib_qp_security;
1714
1715struct ib_port_pkey {
1716 enum port_pkey_state state;
1717 u16 pkey_index;
1718 u32 port_num;
1719 struct list_head qp_list;
1720 struct list_head to_error_list;
1721 struct ib_qp_security *sec;
1722};
1723
1724struct ib_ports_pkeys {
1725 struct ib_port_pkey main;
1726 struct ib_port_pkey alt;
1727};
1728
1729struct ib_qp_security {
1730 struct ib_qp *qp;
1731 struct ib_device *dev;
1732 /* Hold this mutex when changing port and pkey settings. */
1733 struct mutex mutex;
1734 struct ib_ports_pkeys *ports_pkeys;
1735 /* A list of all open shared QP handles. Required to enforce security
1736 * properly for all users of a shared QP.
1737 */
1738 struct list_head shared_qp_list;
1739 void *security;
1740 bool destroying;
1741 atomic_t error_list_count;
1742 struct completion error_complete;
1743 int error_comps_pending;
1744};
1745
1746/*
1747 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1748 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1749 */
1750struct ib_qp {
1751 struct ib_device *device;
1752 struct ib_pd *pd;
1753 struct ib_cq *send_cq;
1754 struct ib_cq *recv_cq;
1755 spinlock_t mr_lock;
1756 int mrs_used;
1757 struct list_head rdma_mrs;
1758 struct list_head sig_mrs;
1759 struct ib_srq *srq;
1760 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1761 struct list_head xrcd_list;
1762
1763 /* count times opened, mcast attaches, flow attaches */
1764 atomic_t usecnt;
1765 struct list_head open_list;
1766 struct ib_qp *real_qp;
1767 struct ib_uqp_object *uobject;
1768 void (*event_handler)(struct ib_event *, void *);
1769 void *qp_context;
1770 /* sgid_attrs associated with the AV's */
1771 const struct ib_gid_attr *av_sgid_attr;
1772 const struct ib_gid_attr *alt_path_sgid_attr;
1773 u32 qp_num;
1774 u32 max_write_sge;
1775 u32 max_read_sge;
1776 enum ib_qp_type qp_type;
1777 struct ib_rwq_ind_table *rwq_ind_tbl;
1778 struct ib_qp_security *qp_sec;
1779 u32 port;
1780
1781 bool integrity_en;
1782 /*
1783 * Implementation details of the RDMA core, don't use in drivers:
1784 */
1785 struct rdma_restrack_entry res;
1786
1787 /* The counter the qp is bind to */
1788 struct rdma_counter *counter;
1789};
1790
1791struct ib_dm {
1792 struct ib_device *device;
1793 u32 length;
1794 u32 flags;
1795 struct ib_uobject *uobject;
1796 atomic_t usecnt;
1797};
1798
1799struct ib_mr {
1800 struct ib_device *device;
1801 struct ib_pd *pd;
1802 u32 lkey;
1803 u32 rkey;
1804 u64 iova;
1805 u64 length;
1806 unsigned int page_size;
1807 enum ib_mr_type type;
1808 bool need_inval;
1809 union {
1810 struct ib_uobject *uobject; /* user */
1811 struct list_head qp_entry; /* FR */
1812 };
1813
1814 struct ib_dm *dm;
1815 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1816 /*
1817 * Implementation details of the RDMA core, don't use in drivers:
1818 */
1819 struct rdma_restrack_entry res;
1820};
1821
1822struct ib_mw {
1823 struct ib_device *device;
1824 struct ib_pd *pd;
1825 struct ib_uobject *uobject;
1826 u32 rkey;
1827 enum ib_mw_type type;
1828};
1829
1830/* Supported steering options */
1831enum ib_flow_attr_type {
1832 /* steering according to rule specifications */
1833 IB_FLOW_ATTR_NORMAL = 0x0,
1834 /* default unicast and multicast rule -
1835 * receive all Eth traffic which isn't steered to any QP
1836 */
1837 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1838 /* default multicast rule -
1839 * receive all Eth multicast traffic which isn't steered to any QP
1840 */
1841 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1842 /* sniffer rule - receive all port traffic */
1843 IB_FLOW_ATTR_SNIFFER = 0x3
1844};
1845
1846/* Supported steering header types */
1847enum ib_flow_spec_type {
1848 /* L2 headers*/
1849 IB_FLOW_SPEC_ETH = 0x20,
1850 IB_FLOW_SPEC_IB = 0x22,
1851 /* L3 header*/
1852 IB_FLOW_SPEC_IPV4 = 0x30,
1853 IB_FLOW_SPEC_IPV6 = 0x31,
1854 IB_FLOW_SPEC_ESP = 0x34,
1855 /* L4 headers*/
1856 IB_FLOW_SPEC_TCP = 0x40,
1857 IB_FLOW_SPEC_UDP = 0x41,
1858 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1859 IB_FLOW_SPEC_GRE = 0x51,
1860 IB_FLOW_SPEC_MPLS = 0x60,
1861 IB_FLOW_SPEC_INNER = 0x100,
1862 /* Actions */
1863 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1864 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1865 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1866 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1867};
1868#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1869#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1870
1871enum ib_flow_flags {
1872 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1873 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1874 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
1875};
1876
1877struct ib_flow_eth_filter {
1878 u8 dst_mac[6];
1879 u8 src_mac[6];
1880 __be16 ether_type;
1881 __be16 vlan_tag;
1882 /* Must be last */
1883 u8 real_sz[];
1884};
1885
1886struct ib_flow_spec_eth {
1887 u32 type;
1888 u16 size;
1889 struct ib_flow_eth_filter val;
1890 struct ib_flow_eth_filter mask;
1891};
1892
1893struct ib_flow_ib_filter {
1894 __be16 dlid;
1895 __u8 sl;
1896 /* Must be last */
1897 u8 real_sz[];
1898};
1899
1900struct ib_flow_spec_ib {
1901 u32 type;
1902 u16 size;
1903 struct ib_flow_ib_filter val;
1904 struct ib_flow_ib_filter mask;
1905};
1906
1907/* IPv4 header flags */
1908enum ib_ipv4_flags {
1909 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1910 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1911 last have this flag set */
1912};
1913
1914struct ib_flow_ipv4_filter {
1915 __be32 src_ip;
1916 __be32 dst_ip;
1917 u8 proto;
1918 u8 tos;
1919 u8 ttl;
1920 u8 flags;
1921 /* Must be last */
1922 u8 real_sz[];
1923};
1924
1925struct ib_flow_spec_ipv4 {
1926 u32 type;
1927 u16 size;
1928 struct ib_flow_ipv4_filter val;
1929 struct ib_flow_ipv4_filter mask;
1930};
1931
1932struct ib_flow_ipv6_filter {
1933 u8 src_ip[16];
1934 u8 dst_ip[16];
1935 __be32 flow_label;
1936 u8 next_hdr;
1937 u8 traffic_class;
1938 u8 hop_limit;
1939 /* Must be last */
1940 u8 real_sz[];
1941};
1942
1943struct ib_flow_spec_ipv6 {
1944 u32 type;
1945 u16 size;
1946 struct ib_flow_ipv6_filter val;
1947 struct ib_flow_ipv6_filter mask;
1948};
1949
1950struct ib_flow_tcp_udp_filter {
1951 __be16 dst_port;
1952 __be16 src_port;
1953 /* Must be last */
1954 u8 real_sz[];
1955};
1956
1957struct ib_flow_spec_tcp_udp {
1958 u32 type;
1959 u16 size;
1960 struct ib_flow_tcp_udp_filter val;
1961 struct ib_flow_tcp_udp_filter mask;
1962};
1963
1964struct ib_flow_tunnel_filter {
1965 __be32 tunnel_id;
1966 u8 real_sz[];
1967};
1968
1969/* ib_flow_spec_tunnel describes the Vxlan tunnel
1970 * the tunnel_id from val has the vni value
1971 */
1972struct ib_flow_spec_tunnel {
1973 u32 type;
1974 u16 size;
1975 struct ib_flow_tunnel_filter val;
1976 struct ib_flow_tunnel_filter mask;
1977};
1978
1979struct ib_flow_esp_filter {
1980 __be32 spi;
1981 __be32 seq;
1982 /* Must be last */
1983 u8 real_sz[];
1984};
1985
1986struct ib_flow_spec_esp {
1987 u32 type;
1988 u16 size;
1989 struct ib_flow_esp_filter val;
1990 struct ib_flow_esp_filter mask;
1991};
1992
1993struct ib_flow_gre_filter {
1994 __be16 c_ks_res0_ver;
1995 __be16 protocol;
1996 __be32 key;
1997 /* Must be last */
1998 u8 real_sz[];
1999};
2000
2001struct ib_flow_spec_gre {
2002 u32 type;
2003 u16 size;
2004 struct ib_flow_gre_filter val;
2005 struct ib_flow_gre_filter mask;
2006};
2007
2008struct ib_flow_mpls_filter {
2009 __be32 tag;
2010 /* Must be last */
2011 u8 real_sz[];
2012};
2013
2014struct ib_flow_spec_mpls {
2015 u32 type;
2016 u16 size;
2017 struct ib_flow_mpls_filter val;
2018 struct ib_flow_mpls_filter mask;
2019};
2020
2021struct ib_flow_spec_action_tag {
2022 enum ib_flow_spec_type type;
2023 u16 size;
2024 u32 tag_id;
2025};
2026
2027struct ib_flow_spec_action_drop {
2028 enum ib_flow_spec_type type;
2029 u16 size;
2030};
2031
2032struct ib_flow_spec_action_handle {
2033 enum ib_flow_spec_type type;
2034 u16 size;
2035 struct ib_flow_action *act;
2036};
2037
2038enum ib_counters_description {
2039 IB_COUNTER_PACKETS,
2040 IB_COUNTER_BYTES,
2041};
2042
2043struct ib_flow_spec_action_count {
2044 enum ib_flow_spec_type type;
2045 u16 size;
2046 struct ib_counters *counters;
2047};
2048
2049union ib_flow_spec {
2050 struct {
2051 u32 type;
2052 u16 size;
2053 };
2054 struct ib_flow_spec_eth eth;
2055 struct ib_flow_spec_ib ib;
2056 struct ib_flow_spec_ipv4 ipv4;
2057 struct ib_flow_spec_tcp_udp tcp_udp;
2058 struct ib_flow_spec_ipv6 ipv6;
2059 struct ib_flow_spec_tunnel tunnel;
2060 struct ib_flow_spec_esp esp;
2061 struct ib_flow_spec_gre gre;
2062 struct ib_flow_spec_mpls mpls;
2063 struct ib_flow_spec_action_tag flow_tag;
2064 struct ib_flow_spec_action_drop drop;
2065 struct ib_flow_spec_action_handle action;
2066 struct ib_flow_spec_action_count flow_count;
2067};
2068
2069struct ib_flow_attr {
2070 enum ib_flow_attr_type type;
2071 u16 size;
2072 u16 priority;
2073 u32 flags;
2074 u8 num_of_specs;
2075 u32 port;
2076 union ib_flow_spec flows[];
2077};
2078
2079struct ib_flow {
2080 struct ib_qp *qp;
2081 struct ib_device *device;
2082 struct ib_uobject *uobject;
2083};
2084
2085enum ib_flow_action_type {
2086 IB_FLOW_ACTION_UNSPECIFIED,
2087 IB_FLOW_ACTION_ESP = 1,
2088};
2089
2090struct ib_flow_action_attrs_esp_keymats {
2091 enum ib_uverbs_flow_action_esp_keymat protocol;
2092 union {
2093 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2094 } keymat;
2095};
2096
2097struct ib_flow_action_attrs_esp_replays {
2098 enum ib_uverbs_flow_action_esp_replay protocol;
2099 union {
2100 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2101 } replay;
2102};
2103
2104enum ib_flow_action_attrs_esp_flags {
2105 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2106 * This is done in order to share the same flags between user-space and
2107 * kernel and spare an unnecessary translation.
2108 */
2109
2110 /* Kernel flags */
2111 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2112 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2113};
2114
2115struct ib_flow_spec_list {
2116 struct ib_flow_spec_list *next;
2117 union ib_flow_spec spec;
2118};
2119
2120struct ib_flow_action_attrs_esp {
2121 struct ib_flow_action_attrs_esp_keymats *keymat;
2122 struct ib_flow_action_attrs_esp_replays *replay;
2123 struct ib_flow_spec_list *encap;
2124 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2125 * Value of 0 is a valid value.
2126 */
2127 u32 esn;
2128 u32 spi;
2129 u32 seq;
2130 u32 tfc_pad;
2131 /* Use enum ib_flow_action_attrs_esp_flags */
2132 u64 flags;
2133 u64 hard_limit_pkts;
2134};
2135
2136struct ib_flow_action {
2137 struct ib_device *device;
2138 struct ib_uobject *uobject;
2139 enum ib_flow_action_type type;
2140 atomic_t usecnt;
2141};
2142
2143struct ib_mad;
2144
2145enum ib_process_mad_flags {
2146 IB_MAD_IGNORE_MKEY = 1,
2147 IB_MAD_IGNORE_BKEY = 2,
2148 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2149};
2150
2151enum ib_mad_result {
2152 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2153 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2154 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2155 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2156};
2157
2158struct ib_port_cache {
2159 u64 subnet_prefix;
2160 struct ib_pkey_cache *pkey;
2161 struct ib_gid_table *gid;
2162 u8 lmc;
2163 enum ib_port_state port_state;
2164};
2165
2166struct ib_port_immutable {
2167 int pkey_tbl_len;
2168 int gid_tbl_len;
2169 u32 core_cap_flags;
2170 u32 max_mad_size;
2171};
2172
2173struct ib_port_data {
2174 struct ib_device *ib_dev;
2175
2176 struct ib_port_immutable immutable;
2177
2178 spinlock_t pkey_list_lock;
2179
2180 spinlock_t netdev_lock;
2181
2182 struct list_head pkey_list;
2183
2184 struct ib_port_cache cache;
2185
2186 struct net_device __rcu *netdev;
2187 struct hlist_node ndev_hash_link;
2188 struct rdma_port_counter port_counter;
2189 struct ib_port *sysfs;
2190};
2191
2192/* rdma netdev type - specifies protocol type */
2193enum rdma_netdev_t {
2194 RDMA_NETDEV_OPA_VNIC,
2195 RDMA_NETDEV_IPOIB,
2196};
2197
2198/**
2199 * struct rdma_netdev - rdma netdev
2200 * For cases where netstack interfacing is required.
2201 */
2202struct rdma_netdev {
2203 void *clnt_priv;
2204 struct ib_device *hca;
2205 u32 port_num;
2206 int mtu;
2207
2208 /*
2209 * cleanup function must be specified.
2210 * FIXME: This is only used for OPA_VNIC and that usage should be
2211 * removed too.
2212 */
2213 void (*free_rdma_netdev)(struct net_device *netdev);
2214
2215 /* control functions */
2216 void (*set_id)(struct net_device *netdev, int id);
2217 /* send packet */
2218 int (*send)(struct net_device *dev, struct sk_buff *skb,
2219 struct ib_ah *address, u32 dqpn);
2220 /* multicast */
2221 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2222 union ib_gid *gid, u16 mlid,
2223 int set_qkey, u32 qkey);
2224 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2225 union ib_gid *gid, u16 mlid);
2226 /* timeout */
2227 void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2228};
2229
2230struct rdma_netdev_alloc_params {
2231 size_t sizeof_priv;
2232 unsigned int txqs;
2233 unsigned int rxqs;
2234 void *param;
2235
2236 int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2237 struct net_device *netdev, void *param);
2238};
2239
2240struct ib_odp_counters {
2241 atomic64_t faults;
2242 atomic64_t invalidations;
2243 atomic64_t prefetch;
2244};
2245
2246struct ib_counters {
2247 struct ib_device *device;
2248 struct ib_uobject *uobject;
2249 /* num of objects attached */
2250 atomic_t usecnt;
2251};
2252
2253struct ib_counters_read_attr {
2254 u64 *counters_buff;
2255 u32 ncounters;
2256 u32 flags; /* use enum ib_read_counters_flags */
2257};
2258
2259struct uverbs_attr_bundle;
2260struct iw_cm_id;
2261struct iw_cm_conn_param;
2262
2263#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2264 .size_##ib_struct = \
2265 (sizeof(struct drv_struct) + \
2266 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2267 BUILD_BUG_ON_ZERO( \
2268 !__same_type(((struct drv_struct *)NULL)->member, \
2269 struct ib_struct)))
2270
2271#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2272 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2273
2274#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2275 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2276
2277#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2278
2279struct rdma_user_mmap_entry {
2280 struct kref ref;
2281 struct ib_ucontext *ucontext;
2282 unsigned long start_pgoff;
2283 size_t npages;
2284 bool driver_removed;
2285};
2286
2287/* Return the offset (in bytes) the user should pass to libc's mmap() */
2288static inline u64
2289rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2290{
2291 return (u64)entry->start_pgoff << PAGE_SHIFT;
2292}
2293
2294/**
2295 * struct ib_device_ops - InfiniBand device operations
2296 * This structure defines all the InfiniBand device operations, providers will
2297 * need to define the supported operations, otherwise they will be set to null.
2298 */
2299struct ib_device_ops {
2300 struct module *owner;
2301 enum rdma_driver_id driver_id;
2302 u32 uverbs_abi_ver;
2303 unsigned int uverbs_no_driver_id_binding:1;
2304
2305 /*
2306 * NOTE: New drivers should not make use of device_group; instead new
2307 * device parameter should be exposed via netlink command. This
2308 * mechanism exists only for existing drivers.
2309 */
2310 const struct attribute_group *device_group;
2311 const struct attribute_group **port_groups;
2312
2313 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2314 const struct ib_send_wr **bad_send_wr);
2315 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2316 const struct ib_recv_wr **bad_recv_wr);
2317 void (*drain_rq)(struct ib_qp *qp);
2318 void (*drain_sq)(struct ib_qp *qp);
2319 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2320 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2321 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2322 int (*post_srq_recv)(struct ib_srq *srq,
2323 const struct ib_recv_wr *recv_wr,
2324 const struct ib_recv_wr **bad_recv_wr);
2325 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2326 u32 port_num, const struct ib_wc *in_wc,
2327 const struct ib_grh *in_grh,
2328 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2329 size_t *out_mad_size, u16 *out_mad_pkey_index);
2330 int (*query_device)(struct ib_device *device,
2331 struct ib_device_attr *device_attr,
2332 struct ib_udata *udata);
2333 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2334 struct ib_device_modify *device_modify);
2335 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2336 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2337 int comp_vector);
2338 int (*query_port)(struct ib_device *device, u32 port_num,
2339 struct ib_port_attr *port_attr);
2340 int (*modify_port)(struct ib_device *device, u32 port_num,
2341 int port_modify_mask,
2342 struct ib_port_modify *port_modify);
2343 /**
2344 * The following mandatory functions are used only at device
2345 * registration. Keep functions such as these at the end of this
2346 * structure to avoid cache line misses when accessing struct ib_device
2347 * in fast paths.
2348 */
2349 int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2350 struct ib_port_immutable *immutable);
2351 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2352 u32 port_num);
2353 /**
2354 * When calling get_netdev, the HW vendor's driver should return the
2355 * net device of device @device at port @port_num or NULL if such
2356 * a net device doesn't exist. The vendor driver should call dev_hold
2357 * on this net device. The HW vendor's device driver must guarantee
2358 * that this function returns NULL before the net device has finished
2359 * NETDEV_UNREGISTER state.
2360 */
2361 struct net_device *(*get_netdev)(struct ib_device *device,
2362 u32 port_num);
2363 /**
2364 * rdma netdev operation
2365 *
2366 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2367 * must return -EOPNOTSUPP if it doesn't support the specified type.
2368 */
2369 struct net_device *(*alloc_rdma_netdev)(
2370 struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2371 const char *name, unsigned char name_assign_type,
2372 void (*setup)(struct net_device *));
2373
2374 int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2375 enum rdma_netdev_t type,
2376 struct rdma_netdev_alloc_params *params);
2377 /**
2378 * query_gid should be return GID value for @device, when @port_num
2379 * link layer is either IB or iWarp. It is no-op if @port_num port
2380 * is RoCE link layer.
2381 */
2382 int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2383 union ib_gid *gid);
2384 /**
2385 * When calling add_gid, the HW vendor's driver should add the gid
2386 * of device of port at gid index available at @attr. Meta-info of
2387 * that gid (for example, the network device related to this gid) is
2388 * available at @attr. @context allows the HW vendor driver to store
2389 * extra information together with a GID entry. The HW vendor driver may
2390 * allocate memory to contain this information and store it in @context
2391 * when a new GID entry is written to. Params are consistent until the
2392 * next call of add_gid or delete_gid. The function should return 0 on
2393 * success or error otherwise. The function could be called
2394 * concurrently for different ports. This function is only called when
2395 * roce_gid_table is used.
2396 */
2397 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2398 /**
2399 * When calling del_gid, the HW vendor's driver should delete the
2400 * gid of device @device at gid index gid_index of port port_num
2401 * available in @attr.
2402 * Upon the deletion of a GID entry, the HW vendor must free any
2403 * allocated memory. The caller will clear @context afterwards.
2404 * This function is only called when roce_gid_table is used.
2405 */
2406 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2407 int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2408 u16 *pkey);
2409 int (*alloc_ucontext)(struct ib_ucontext *context,
2410 struct ib_udata *udata);
2411 void (*dealloc_ucontext)(struct ib_ucontext *context);
2412 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2413 /**
2414 * This will be called once refcount of an entry in mmap_xa reaches
2415 * zero. The type of the memory that was mapped may differ between
2416 * entries and is opaque to the rdma_user_mmap interface.
2417 * Therefore needs to be implemented by the driver in mmap_free.
2418 */
2419 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2420 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2421 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2422 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2423 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2424 struct ib_udata *udata);
2425 int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2426 struct ib_udata *udata);
2427 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2428 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2429 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2430 int (*create_srq)(struct ib_srq *srq,
2431 struct ib_srq_init_attr *srq_init_attr,
2432 struct ib_udata *udata);
2433 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2434 enum ib_srq_attr_mask srq_attr_mask,
2435 struct ib_udata *udata);
2436 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2437 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2438 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2439 struct ib_qp_init_attr *qp_init_attr,
2440 struct ib_udata *udata);
2441 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2442 int qp_attr_mask, struct ib_udata *udata);
2443 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2444 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2445 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2446 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2447 struct ib_udata *udata);
2448 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2449 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2450 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2451 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2452 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2453 u64 virt_addr, int mr_access_flags,
2454 struct ib_udata *udata);
2455 struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2456 u64 length, u64 virt_addr, int fd,
2457 int mr_access_flags,
2458 struct ib_udata *udata);
2459 struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2460 u64 length, u64 virt_addr,
2461 int mr_access_flags, struct ib_pd *pd,
2462 struct ib_udata *udata);
2463 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2464 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2465 u32 max_num_sg);
2466 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2467 u32 max_num_data_sg,
2468 u32 max_num_meta_sg);
2469 int (*advise_mr)(struct ib_pd *pd,
2470 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2471 struct ib_sge *sg_list, u32 num_sge,
2472 struct uverbs_attr_bundle *attrs);
2473
2474 /*
2475 * Kernel users should universally support relaxed ordering (RO), as
2476 * they are designed to read data only after observing the CQE and use
2477 * the DMA API correctly.
2478 *
2479 * Some drivers implicitly enable RO if platform supports it.
2480 */
2481 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2482 unsigned int *sg_offset);
2483 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2484 struct ib_mr_status *mr_status);
2485 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2486 int (*dealloc_mw)(struct ib_mw *mw);
2487 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2488 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2489 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2490 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2491 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2492 struct ib_flow_attr *flow_attr,
2493 struct ib_udata *udata);
2494 int (*destroy_flow)(struct ib_flow *flow_id);
2495 struct ib_flow_action *(*create_flow_action_esp)(
2496 struct ib_device *device,
2497 const struct ib_flow_action_attrs_esp *attr,
2498 struct uverbs_attr_bundle *attrs);
2499 int (*destroy_flow_action)(struct ib_flow_action *action);
2500 int (*modify_flow_action_esp)(
2501 struct ib_flow_action *action,
2502 const struct ib_flow_action_attrs_esp *attr,
2503 struct uverbs_attr_bundle *attrs);
2504 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2505 int state);
2506 int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2507 struct ifla_vf_info *ivf);
2508 int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2509 struct ifla_vf_stats *stats);
2510 int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2511 struct ifla_vf_guid *node_guid,
2512 struct ifla_vf_guid *port_guid);
2513 int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2514 int type);
2515 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2516 struct ib_wq_init_attr *init_attr,
2517 struct ib_udata *udata);
2518 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2519 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2520 u32 wq_attr_mask, struct ib_udata *udata);
2521 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2522 struct ib_rwq_ind_table_init_attr *init_attr,
2523 struct ib_udata *udata);
2524 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2525 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2526 struct ib_ucontext *context,
2527 struct ib_dm_alloc_attr *attr,
2528 struct uverbs_attr_bundle *attrs);
2529 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2530 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2531 struct ib_dm_mr_attr *attr,
2532 struct uverbs_attr_bundle *attrs);
2533 int (*create_counters)(struct ib_counters *counters,
2534 struct uverbs_attr_bundle *attrs);
2535 int (*destroy_counters)(struct ib_counters *counters);
2536 int (*read_counters)(struct ib_counters *counters,
2537 struct ib_counters_read_attr *counters_read_attr,
2538 struct uverbs_attr_bundle *attrs);
2539 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2540 int data_sg_nents, unsigned int *data_sg_offset,
2541 struct scatterlist *meta_sg, int meta_sg_nents,
2542 unsigned int *meta_sg_offset);
2543
2544 /**
2545 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2546 * fill in the driver initialized data. The struct is kfree()'ed by
2547 * the sysfs core when the device is removed. A lifespan of -1 in the
2548 * return struct tells the core to set a default lifespan.
2549 */
2550 struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2551 struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2552 u32 port_num);
2553 /**
2554 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2555 * @index - The index in the value array we wish to have updated, or
2556 * num_counters if we want all stats updated
2557 * Return codes -
2558 * < 0 - Error, no counters updated
2559 * index - Updated the single counter pointed to by index
2560 * num_counters - Updated all counters (will reset the timestamp
2561 * and prevent further calls for lifespan milliseconds)
2562 * Drivers are allowed to update all counters in leiu of just the
2563 * one given in index at their option
2564 */
2565 int (*get_hw_stats)(struct ib_device *device,
2566 struct rdma_hw_stats *stats, u32 port, int index);
2567
2568 /**
2569 * Allows rdma drivers to add their own restrack attributes.
2570 */
2571 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2572 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2573 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2574 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2575 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2576 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2577 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2578
2579 /* Device lifecycle callbacks */
2580 /*
2581 * Called after the device becomes registered, before clients are
2582 * attached
2583 */
2584 int (*enable_driver)(struct ib_device *dev);
2585 /*
2586 * This is called as part of ib_dealloc_device().
2587 */
2588 void (*dealloc_driver)(struct ib_device *dev);
2589
2590 /* iWarp CM callbacks */
2591 void (*iw_add_ref)(struct ib_qp *qp);
2592 void (*iw_rem_ref)(struct ib_qp *qp);
2593 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2594 int (*iw_connect)(struct iw_cm_id *cm_id,
2595 struct iw_cm_conn_param *conn_param);
2596 int (*iw_accept)(struct iw_cm_id *cm_id,
2597 struct iw_cm_conn_param *conn_param);
2598 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2599 u8 pdata_len);
2600 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2601 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2602 /**
2603 * counter_bind_qp - Bind a QP to a counter.
2604 * @counter - The counter to be bound. If counter->id is zero then
2605 * the driver needs to allocate a new counter and set counter->id
2606 */
2607 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2608 /**
2609 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2610 * counter and bind it onto the default one
2611 */
2612 int (*counter_unbind_qp)(struct ib_qp *qp);
2613 /**
2614 * counter_dealloc -De-allocate the hw counter
2615 */
2616 int (*counter_dealloc)(struct rdma_counter *counter);
2617 /**
2618 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2619 * the driver initialized data.
2620 */
2621 struct rdma_hw_stats *(*counter_alloc_stats)(
2622 struct rdma_counter *counter);
2623 /**
2624 * counter_update_stats - Query the stats value of this counter
2625 */
2626 int (*counter_update_stats)(struct rdma_counter *counter);
2627
2628 /**
2629 * Allows rdma drivers to add their own restrack attributes
2630 * dumped via 'rdma stat' iproute2 command.
2631 */
2632 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2633
2634 /* query driver for its ucontext properties */
2635 int (*query_ucontext)(struct ib_ucontext *context,
2636 struct uverbs_attr_bundle *attrs);
2637
2638 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2639 DECLARE_RDMA_OBJ_SIZE(ib_counters);
2640 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2641 DECLARE_RDMA_OBJ_SIZE(ib_mw);
2642 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2643 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2644 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2645 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2646 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2647};
2648
2649struct ib_core_device {
2650 /* device must be the first element in structure until,
2651 * union of ib_core_device and device exists in ib_device.
2652 */
2653 struct device dev;
2654 possible_net_t rdma_net;
2655 struct kobject *ports_kobj;
2656 struct list_head port_list;
2657 struct ib_device *owner; /* reach back to owner ib_device */
2658};
2659
2660struct rdma_restrack_root;
2661struct ib_device {
2662 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2663 struct device *dma_device;
2664 struct ib_device_ops ops;
2665 char name[IB_DEVICE_NAME_MAX];
2666 struct rcu_head rcu_head;
2667
2668 struct list_head event_handler_list;
2669 /* Protects event_handler_list */
2670 struct rw_semaphore event_handler_rwsem;
2671
2672 /* Protects QP's event_handler calls and open_qp list */
2673 spinlock_t qp_open_list_lock;
2674
2675 struct rw_semaphore client_data_rwsem;
2676 struct xarray client_data;
2677 struct mutex unregistration_lock;
2678
2679 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2680 rwlock_t cache_lock;
2681 /**
2682 * port_data is indexed by port number
2683 */
2684 struct ib_port_data *port_data;
2685
2686 int num_comp_vectors;
2687
2688 union {
2689 struct device dev;
2690 struct ib_core_device coredev;
2691 };
2692
2693 /* First group is for device attributes,
2694 * Second group is for driver provided attributes (optional).
2695 * Third group is for the hw_stats
2696 * It is a NULL terminated array.
2697 */
2698 const struct attribute_group *groups[4];
2699
2700 u64 uverbs_cmd_mask;
2701
2702 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2703 __be64 node_guid;
2704 u32 local_dma_lkey;
2705 u16 is_switch:1;
2706 /* Indicates kernel verbs support, should not be used in drivers */
2707 u16 kverbs_provider:1;
2708 /* CQ adaptive moderation (RDMA DIM) */
2709 u16 use_cq_dim:1;
2710 u8 node_type;
2711 u32 phys_port_cnt;
2712 struct ib_device_attr attrs;
2713 struct hw_stats_device_data *hw_stats_data;
2714
2715#ifdef CONFIG_CGROUP_RDMA
2716 struct rdmacg_device cg_device;
2717#endif
2718
2719 u32 index;
2720
2721 spinlock_t cq_pools_lock;
2722 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2723
2724 struct rdma_restrack_root *res;
2725
2726 const struct uapi_definition *driver_def;
2727
2728 /*
2729 * Positive refcount indicates that the device is currently
2730 * registered and cannot be unregistered.
2731 */
2732 refcount_t refcount;
2733 struct completion unreg_completion;
2734 struct work_struct unregistration_work;
2735
2736 const struct rdma_link_ops *link_ops;
2737
2738 /* Protects compat_devs xarray modifications */
2739 struct mutex compat_devs_mutex;
2740 /* Maintains compat devices for each net namespace */
2741 struct xarray compat_devs;
2742
2743 /* Used by iWarp CM */
2744 char iw_ifname[IFNAMSIZ];
2745 u32 iw_driver_flags;
2746 u32 lag_flags;
2747};
2748
2749struct ib_client_nl_info;
2750struct ib_client {
2751 const char *name;
2752 int (*add)(struct ib_device *ibdev);
2753 void (*remove)(struct ib_device *, void *client_data);
2754 void (*rename)(struct ib_device *dev, void *client_data);
2755 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2756 struct ib_client_nl_info *res);
2757 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2758
2759 /* Returns the net_dev belonging to this ib_client and matching the
2760 * given parameters.
2761 * @dev: An RDMA device that the net_dev use for communication.
2762 * @port: A physical port number on the RDMA device.
2763 * @pkey: P_Key that the net_dev uses if applicable.
2764 * @gid: A GID that the net_dev uses to communicate.
2765 * @addr: An IP address the net_dev is configured with.
2766 * @client_data: The device's client data set by ib_set_client_data().
2767 *
2768 * An ib_client that implements a net_dev on top of RDMA devices
2769 * (such as IP over IB) should implement this callback, allowing the
2770 * rdma_cm module to find the right net_dev for a given request.
2771 *
2772 * The caller is responsible for calling dev_put on the returned
2773 * netdev. */
2774 struct net_device *(*get_net_dev_by_params)(
2775 struct ib_device *dev,
2776 u32 port,
2777 u16 pkey,
2778 const union ib_gid *gid,
2779 const struct sockaddr *addr,
2780 void *client_data);
2781
2782 refcount_t uses;
2783 struct completion uses_zero;
2784 u32 client_id;
2785
2786 /* kverbs are not required by the client */
2787 u8 no_kverbs_req:1;
2788};
2789
2790/*
2791 * IB block DMA iterator
2792 *
2793 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2794 * to a HW supported page size.
2795 */
2796struct ib_block_iter {
2797 /* internal states */
2798 struct scatterlist *__sg; /* sg holding the current aligned block */
2799 dma_addr_t __dma_addr; /* unaligned DMA address of this block */
2800 unsigned int __sg_nents; /* number of SG entries */
2801 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
2802 unsigned int __pg_bit; /* alignment of current block */
2803};
2804
2805struct ib_device *_ib_alloc_device(size_t size);
2806#define ib_alloc_device(drv_struct, member) \
2807 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2808 BUILD_BUG_ON_ZERO(offsetof( \
2809 struct drv_struct, member))), \
2810 struct drv_struct, member)
2811
2812void ib_dealloc_device(struct ib_device *device);
2813
2814void ib_get_device_fw_str(struct ib_device *device, char *str);
2815
2816int ib_register_device(struct ib_device *device, const char *name,
2817 struct device *dma_device);
2818void ib_unregister_device(struct ib_device *device);
2819void ib_unregister_driver(enum rdma_driver_id driver_id);
2820void ib_unregister_device_and_put(struct ib_device *device);
2821void ib_unregister_device_queued(struct ib_device *ib_dev);
2822
2823int ib_register_client (struct ib_client *client);
2824void ib_unregister_client(struct ib_client *client);
2825
2826void __rdma_block_iter_start(struct ib_block_iter *biter,
2827 struct scatterlist *sglist,
2828 unsigned int nents,
2829 unsigned long pgsz);
2830bool __rdma_block_iter_next(struct ib_block_iter *biter);
2831
2832/**
2833 * rdma_block_iter_dma_address - get the aligned dma address of the current
2834 * block held by the block iterator.
2835 * @biter: block iterator holding the memory block
2836 */
2837static inline dma_addr_t
2838rdma_block_iter_dma_address(struct ib_block_iter *biter)
2839{
2840 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2841}
2842
2843/**
2844 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2845 * @sglist: sglist to iterate over
2846 * @biter: block iterator holding the memory block
2847 * @nents: maximum number of sg entries to iterate over
2848 * @pgsz: best HW supported page size to use
2849 *
2850 * Callers may use rdma_block_iter_dma_address() to get each
2851 * blocks aligned DMA address.
2852 */
2853#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2854 for (__rdma_block_iter_start(biter, sglist, nents, \
2855 pgsz); \
2856 __rdma_block_iter_next(biter);)
2857
2858/**
2859 * ib_get_client_data - Get IB client context
2860 * @device:Device to get context for
2861 * @client:Client to get context for
2862 *
2863 * ib_get_client_data() returns the client context data set with
2864 * ib_set_client_data(). This can only be called while the client is
2865 * registered to the device, once the ib_client remove() callback returns this
2866 * cannot be called.
2867 */
2868static inline void *ib_get_client_data(struct ib_device *device,
2869 struct ib_client *client)
2870{
2871 return xa_load(&device->client_data, client->client_id);
2872}
2873void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2874 void *data);
2875void ib_set_device_ops(struct ib_device *device,
2876 const struct ib_device_ops *ops);
2877
2878int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2879 unsigned long pfn, unsigned long size, pgprot_t prot,
2880 struct rdma_user_mmap_entry *entry);
2881int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2882 struct rdma_user_mmap_entry *entry,
2883 size_t length);
2884int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2885 struct rdma_user_mmap_entry *entry,
2886 size_t length, u32 min_pgoff,
2887 u32 max_pgoff);
2888
2889struct rdma_user_mmap_entry *
2890rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2891 unsigned long pgoff);
2892struct rdma_user_mmap_entry *
2893rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2894 struct vm_area_struct *vma);
2895void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2896
2897void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2898
2899static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2900{
2901 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2902}
2903
2904static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2905{
2906 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2907}
2908
2909static inline bool ib_is_buffer_cleared(const void __user *p,
2910 size_t len)
2911{
2912 bool ret;
2913 u8 *buf;
2914
2915 if (len > USHRT_MAX)
2916 return false;
2917
2918 buf = memdup_user(p, len);
2919 if (IS_ERR(buf))
2920 return false;
2921
2922 ret = !memchr_inv(buf, 0, len);
2923 kfree(buf);
2924 return ret;
2925}
2926
2927static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2928 size_t offset,
2929 size_t len)
2930{
2931 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2932}
2933
2934/**
2935 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2936 * contains all required attributes and no attributes not allowed for
2937 * the given QP state transition.
2938 * @cur_state: Current QP state
2939 * @next_state: Next QP state
2940 * @type: QP type
2941 * @mask: Mask of supplied QP attributes
2942 *
2943 * This function is a helper function that a low-level driver's
2944 * modify_qp method can use to validate the consumer's input. It
2945 * checks that cur_state and next_state are valid QP states, that a
2946 * transition from cur_state to next_state is allowed by the IB spec,
2947 * and that the attribute mask supplied is allowed for the transition.
2948 */
2949bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2950 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2951
2952void ib_register_event_handler(struct ib_event_handler *event_handler);
2953void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2954void ib_dispatch_event(const struct ib_event *event);
2955
2956int ib_query_port(struct ib_device *device,
2957 u32 port_num, struct ib_port_attr *port_attr);
2958
2959enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2960 u32 port_num);
2961
2962/**
2963 * rdma_cap_ib_switch - Check if the device is IB switch
2964 * @device: Device to check
2965 *
2966 * Device driver is responsible for setting is_switch bit on
2967 * in ib_device structure at init time.
2968 *
2969 * Return: true if the device is IB switch.
2970 */
2971static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2972{
2973 return device->is_switch;
2974}
2975
2976/**
2977 * rdma_start_port - Return the first valid port number for the device
2978 * specified
2979 *
2980 * @device: Device to be checked
2981 *
2982 * Return start port number
2983 */
2984static inline u32 rdma_start_port(const struct ib_device *device)
2985{
2986 return rdma_cap_ib_switch(device) ? 0 : 1;
2987}
2988
2989/**
2990 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
2991 * @device - The struct ib_device * to iterate over
2992 * @iter - The unsigned int to store the port number
2993 */
2994#define rdma_for_each_port(device, iter) \
2995 for (iter = rdma_start_port(device + \
2996 BUILD_BUG_ON_ZERO(!__same_type(u32, \
2997 iter))); \
2998 iter <= rdma_end_port(device); iter++)
2999
3000/**
3001 * rdma_end_port - Return the last valid port number for the device
3002 * specified
3003 *
3004 * @device: Device to be checked
3005 *
3006 * Return last port number
3007 */
3008static inline u32 rdma_end_port(const struct ib_device *device)
3009{
3010 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3011}
3012
3013static inline int rdma_is_port_valid(const struct ib_device *device,
3014 unsigned int port)
3015{
3016 return (port >= rdma_start_port(device) &&
3017 port <= rdma_end_port(device));
3018}
3019
3020static inline bool rdma_is_grh_required(const struct ib_device *device,
3021 u32 port_num)
3022{
3023 return device->port_data[port_num].immutable.core_cap_flags &
3024 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3025}
3026
3027static inline bool rdma_protocol_ib(const struct ib_device *device,
3028 u32 port_num)
3029{
3030 return device->port_data[port_num].immutable.core_cap_flags &
3031 RDMA_CORE_CAP_PROT_IB;
3032}
3033
3034static inline bool rdma_protocol_roce(const struct ib_device *device,
3035 u32 port_num)
3036{
3037 return device->port_data[port_num].immutable.core_cap_flags &
3038 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3039}
3040
3041static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3042 u32 port_num)
3043{
3044 return device->port_data[port_num].immutable.core_cap_flags &
3045 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3046}
3047
3048static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3049 u32 port_num)
3050{
3051 return device->port_data[port_num].immutable.core_cap_flags &
3052 RDMA_CORE_CAP_PROT_ROCE;
3053}
3054
3055static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3056 u32 port_num)
3057{
3058 return device->port_data[port_num].immutable.core_cap_flags &
3059 RDMA_CORE_CAP_PROT_IWARP;
3060}
3061
3062static inline bool rdma_ib_or_roce(const struct ib_device *device,
3063 u32 port_num)
3064{
3065 return rdma_protocol_ib(device, port_num) ||
3066 rdma_protocol_roce(device, port_num);
3067}
3068
3069static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3070 u32 port_num)
3071{
3072 return device->port_data[port_num].immutable.core_cap_flags &
3073 RDMA_CORE_CAP_PROT_RAW_PACKET;
3074}
3075
3076static inline bool rdma_protocol_usnic(const struct ib_device *device,
3077 u32 port_num)
3078{
3079 return device->port_data[port_num].immutable.core_cap_flags &
3080 RDMA_CORE_CAP_PROT_USNIC;
3081}
3082
3083/**
3084 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3085 * Management Datagrams.
3086 * @device: Device to check
3087 * @port_num: Port number to check
3088 *
3089 * Management Datagrams (MAD) are a required part of the InfiniBand
3090 * specification and are supported on all InfiniBand devices. A slightly
3091 * extended version are also supported on OPA interfaces.
3092 *
3093 * Return: true if the port supports sending/receiving of MAD packets.
3094 */
3095static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3096{
3097 return device->port_data[port_num].immutable.core_cap_flags &
3098 RDMA_CORE_CAP_IB_MAD;
3099}
3100
3101/**
3102 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3103 * Management Datagrams.
3104 * @device: Device to check
3105 * @port_num: Port number to check
3106 *
3107 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3108 * datagrams with their own versions. These OPA MADs share many but not all of
3109 * the characteristics of InfiniBand MADs.
3110 *
3111 * OPA MADs differ in the following ways:
3112 *
3113 * 1) MADs are variable size up to 2K
3114 * IBTA defined MADs remain fixed at 256 bytes
3115 * 2) OPA SMPs must carry valid PKeys
3116 * 3) OPA SMP packets are a different format
3117 *
3118 * Return: true if the port supports OPA MAD packet formats.
3119 */
3120static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3121{
3122 return device->port_data[port_num].immutable.core_cap_flags &
3123 RDMA_CORE_CAP_OPA_MAD;
3124}
3125
3126/**
3127 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3128 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3129 * @device: Device to check
3130 * @port_num: Port number to check
3131 *
3132 * Each InfiniBand node is required to provide a Subnet Management Agent
3133 * that the subnet manager can access. Prior to the fabric being fully
3134 * configured by the subnet manager, the SMA is accessed via a well known
3135 * interface called the Subnet Management Interface (SMI). This interface
3136 * uses directed route packets to communicate with the SM to get around the
3137 * chicken and egg problem of the SM needing to know what's on the fabric
3138 * in order to configure the fabric, and needing to configure the fabric in
3139 * order to send packets to the devices on the fabric. These directed
3140 * route packets do not need the fabric fully configured in order to reach
3141 * their destination. The SMI is the only method allowed to send
3142 * directed route packets on an InfiniBand fabric.
3143 *
3144 * Return: true if the port provides an SMI.
3145 */
3146static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3147{
3148 return device->port_data[port_num].immutable.core_cap_flags &
3149 RDMA_CORE_CAP_IB_SMI;
3150}
3151
3152/**
3153 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3154 * Communication Manager.
3155 * @device: Device to check
3156 * @port_num: Port number to check
3157 *
3158 * The InfiniBand Communication Manager is one of many pre-defined General
3159 * Service Agents (GSA) that are accessed via the General Service
3160 * Interface (GSI). It's role is to facilitate establishment of connections
3161 * between nodes as well as other management related tasks for established
3162 * connections.
3163 *
3164 * Return: true if the port supports an IB CM (this does not guarantee that
3165 * a CM is actually running however).
3166 */
3167static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3168{
3169 return device->port_data[port_num].immutable.core_cap_flags &
3170 RDMA_CORE_CAP_IB_CM;
3171}
3172
3173/**
3174 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3175 * Communication Manager.
3176 * @device: Device to check
3177 * @port_num: Port number to check
3178 *
3179 * Similar to above, but specific to iWARP connections which have a different
3180 * managment protocol than InfiniBand.
3181 *
3182 * Return: true if the port supports an iWARP CM (this does not guarantee that
3183 * a CM is actually running however).
3184 */
3185static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3186{
3187 return device->port_data[port_num].immutable.core_cap_flags &
3188 RDMA_CORE_CAP_IW_CM;
3189}
3190
3191/**
3192 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3193 * Subnet Administration.
3194 * @device: Device to check
3195 * @port_num: Port number to check
3196 *
3197 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3198 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3199 * fabrics, devices should resolve routes to other hosts by contacting the
3200 * SA to query the proper route.
3201 *
3202 * Return: true if the port should act as a client to the fabric Subnet
3203 * Administration interface. This does not imply that the SA service is
3204 * running locally.
3205 */
3206static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3207{
3208 return device->port_data[port_num].immutable.core_cap_flags &
3209 RDMA_CORE_CAP_IB_SA;
3210}
3211
3212/**
3213 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3214 * Multicast.
3215 * @device: Device to check
3216 * @port_num: Port number to check
3217 *
3218 * InfiniBand multicast registration is more complex than normal IPv4 or
3219 * IPv6 multicast registration. Each Host Channel Adapter must register
3220 * with the Subnet Manager when it wishes to join a multicast group. It
3221 * should do so only once regardless of how many queue pairs it subscribes
3222 * to this group. And it should leave the group only after all queue pairs
3223 * attached to the group have been detached.
3224 *
3225 * Return: true if the port must undertake the additional adminstrative
3226 * overhead of registering/unregistering with the SM and tracking of the
3227 * total number of queue pairs attached to the multicast group.
3228 */
3229static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3230 u32 port_num)
3231{
3232 return rdma_cap_ib_sa(device, port_num);
3233}
3234
3235/**
3236 * rdma_cap_af_ib - Check if the port of device has the capability
3237 * Native Infiniband Address.
3238 * @device: Device to check
3239 * @port_num: Port number to check
3240 *
3241 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3242 * GID. RoCE uses a different mechanism, but still generates a GID via
3243 * a prescribed mechanism and port specific data.
3244 *
3245 * Return: true if the port uses a GID address to identify devices on the
3246 * network.
3247 */
3248static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3249{
3250 return device->port_data[port_num].immutable.core_cap_flags &
3251 RDMA_CORE_CAP_AF_IB;
3252}
3253
3254/**
3255 * rdma_cap_eth_ah - Check if the port of device has the capability
3256 * Ethernet Address Handle.
3257 * @device: Device to check
3258 * @port_num: Port number to check
3259 *
3260 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3261 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3262 * port. Normally, packet headers are generated by the sending host
3263 * adapter, but when sending connectionless datagrams, we must manually
3264 * inject the proper headers for the fabric we are communicating over.
3265 *
3266 * Return: true if we are running as a RoCE port and must force the
3267 * addition of a Global Route Header built from our Ethernet Address
3268 * Handle into our header list for connectionless packets.
3269 */
3270static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3271{
3272 return device->port_data[port_num].immutable.core_cap_flags &
3273 RDMA_CORE_CAP_ETH_AH;
3274}
3275
3276/**
3277 * rdma_cap_opa_ah - Check if the port of device supports
3278 * OPA Address handles
3279 * @device: Device to check
3280 * @port_num: Port number to check
3281 *
3282 * Return: true if we are running on an OPA device which supports
3283 * the extended OPA addressing.
3284 */
3285static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3286{
3287 return (device->port_data[port_num].immutable.core_cap_flags &
3288 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3289}
3290
3291/**
3292 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3293 *
3294 * @device: Device
3295 * @port_num: Port number
3296 *
3297 * This MAD size includes the MAD headers and MAD payload. No other headers
3298 * are included.
3299 *
3300 * Return the max MAD size required by the Port. Will return 0 if the port
3301 * does not support MADs
3302 */
3303static inline size_t rdma_max_mad_size(const struct ib_device *device,
3304 u32 port_num)
3305{
3306 return device->port_data[port_num].immutable.max_mad_size;
3307}
3308
3309/**
3310 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3311 * @device: Device to check
3312 * @port_num: Port number to check
3313 *
3314 * RoCE GID table mechanism manages the various GIDs for a device.
3315 *
3316 * NOTE: if allocating the port's GID table has failed, this call will still
3317 * return true, but any RoCE GID table API will fail.
3318 *
3319 * Return: true if the port uses RoCE GID table mechanism in order to manage
3320 * its GIDs.
3321 */
3322static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3323 u32 port_num)
3324{
3325 return rdma_protocol_roce(device, port_num) &&
3326 device->ops.add_gid && device->ops.del_gid;
3327}
3328
3329/*
3330 * Check if the device supports READ W/ INVALIDATE.
3331 */
3332static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3333{
3334 /*
3335 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3336 * has support for it yet.
3337 */
3338 return rdma_protocol_iwarp(dev, port_num);
3339}
3340
3341/**
3342 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3343 * @device: Device
3344 * @port_num: 1 based Port number
3345 *
3346 * Return true if port is an Intel OPA port , false if not
3347 */
3348static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3349 u32 port_num)
3350{
3351 return (device->port_data[port_num].immutable.core_cap_flags &
3352 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3353}
3354
3355/**
3356 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3357 * @device: Device
3358 * @port_num: Port number
3359 * @mtu: enum value of MTU
3360 *
3361 * Return the MTU size supported by the port as an integer value. Will return
3362 * -1 if enum value of mtu is not supported.
3363 */
3364static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3365 int mtu)
3366{
3367 if (rdma_core_cap_opa_port(device, port))
3368 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3369 else
3370 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3371}
3372
3373/**
3374 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3375 * @device: Device
3376 * @port_num: Port number
3377 * @attr: port attribute
3378 *
3379 * Return the MTU size supported by the port as an integer value.
3380 */
3381static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3382 struct ib_port_attr *attr)
3383{
3384 if (rdma_core_cap_opa_port(device, port))
3385 return attr->phys_mtu;
3386 else
3387 return ib_mtu_enum_to_int(attr->max_mtu);
3388}
3389
3390int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3391 int state);
3392int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3393 struct ifla_vf_info *info);
3394int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3395 struct ifla_vf_stats *stats);
3396int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3397 struct ifla_vf_guid *node_guid,
3398 struct ifla_vf_guid *port_guid);
3399int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3400 int type);
3401
3402int ib_query_pkey(struct ib_device *device,
3403 u32 port_num, u16 index, u16 *pkey);
3404
3405int ib_modify_device(struct ib_device *device,
3406 int device_modify_mask,
3407 struct ib_device_modify *device_modify);
3408
3409int ib_modify_port(struct ib_device *device,
3410 u32 port_num, int port_modify_mask,
3411 struct ib_port_modify *port_modify);
3412
3413int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3414 u32 *port_num, u16 *index);
3415
3416int ib_find_pkey(struct ib_device *device,
3417 u32 port_num, u16 pkey, u16 *index);
3418
3419enum ib_pd_flags {
3420 /*
3421 * Create a memory registration for all memory in the system and place
3422 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3423 * ULPs to avoid the overhead of dynamic MRs.
3424 *
3425 * This flag is generally considered unsafe and must only be used in
3426 * extremly trusted environments. Every use of it will log a warning
3427 * in the kernel log.
3428 */
3429 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3430};
3431
3432struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3433 const char *caller);
3434
3435/**
3436 * ib_alloc_pd - Allocates an unused protection domain.
3437 * @device: The device on which to allocate the protection domain.
3438 * @flags: protection domain flags
3439 *
3440 * A protection domain object provides an association between QPs, shared
3441 * receive queues, address handles, memory regions, and memory windows.
3442 *
3443 * Every PD has a local_dma_lkey which can be used as the lkey value for local
3444 * memory operations.
3445 */
3446#define ib_alloc_pd(device, flags) \
3447 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3448
3449int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3450
3451/**
3452 * ib_dealloc_pd - Deallocate kernel PD
3453 * @pd: The protection domain
3454 *
3455 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3456 */
3457static inline void ib_dealloc_pd(struct ib_pd *pd)
3458{
3459 int ret = ib_dealloc_pd_user(pd, NULL);
3460
3461 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3462}
3463
3464enum rdma_create_ah_flags {
3465 /* In a sleepable context */
3466 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3467};
3468
3469/**
3470 * rdma_create_ah - Creates an address handle for the given address vector.
3471 * @pd: The protection domain associated with the address handle.
3472 * @ah_attr: The attributes of the address vector.
3473 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3474 *
3475 * The address handle is used to reference a local or global destination
3476 * in all UD QP post sends.
3477 */
3478struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3479 u32 flags);
3480
3481/**
3482 * rdma_create_user_ah - Creates an address handle for the given address vector.
3483 * It resolves destination mac address for ah attribute of RoCE type.
3484 * @pd: The protection domain associated with the address handle.
3485 * @ah_attr: The attributes of the address vector.
3486 * @udata: pointer to user's input output buffer information need by
3487 * provider driver.
3488 *
3489 * It returns 0 on success and returns appropriate error code on error.
3490 * The address handle is used to reference a local or global destination
3491 * in all UD QP post sends.
3492 */
3493struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3494 struct rdma_ah_attr *ah_attr,
3495 struct ib_udata *udata);
3496/**
3497 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3498 * work completion.
3499 * @hdr: the L3 header to parse
3500 * @net_type: type of header to parse
3501 * @sgid: place to store source gid
3502 * @dgid: place to store destination gid
3503 */
3504int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3505 enum rdma_network_type net_type,
3506 union ib_gid *sgid, union ib_gid *dgid);
3507
3508/**
3509 * ib_get_rdma_header_version - Get the header version
3510 * @hdr: the L3 header to parse
3511 */
3512int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3513
3514/**
3515 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3516 * work completion.
3517 * @device: Device on which the received message arrived.
3518 * @port_num: Port on which the received message arrived.
3519 * @wc: Work completion associated with the received message.
3520 * @grh: References the received global route header. This parameter is
3521 * ignored unless the work completion indicates that the GRH is valid.
3522 * @ah_attr: Returned attributes that can be used when creating an address
3523 * handle for replying to the message.
3524 * When ib_init_ah_attr_from_wc() returns success,
3525 * (a) for IB link layer it optionally contains a reference to SGID attribute
3526 * when GRH is present for IB link layer.
3527 * (b) for RoCE link layer it contains a reference to SGID attribute.
3528 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3529 * attributes which are initialized using ib_init_ah_attr_from_wc().
3530 *
3531 */
3532int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3533 const struct ib_wc *wc, const struct ib_grh *grh,
3534 struct rdma_ah_attr *ah_attr);
3535
3536/**
3537 * ib_create_ah_from_wc - Creates an address handle associated with the
3538 * sender of the specified work completion.
3539 * @pd: The protection domain associated with the address handle.
3540 * @wc: Work completion information associated with a received message.
3541 * @grh: References the received global route header. This parameter is
3542 * ignored unless the work completion indicates that the GRH is valid.
3543 * @port_num: The outbound port number to associate with the address.
3544 *
3545 * The address handle is used to reference a local or global destination
3546 * in all UD QP post sends.
3547 */
3548struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3549 const struct ib_grh *grh, u32 port_num);
3550
3551/**
3552 * rdma_modify_ah - Modifies the address vector associated with an address
3553 * handle.
3554 * @ah: The address handle to modify.
3555 * @ah_attr: The new address vector attributes to associate with the
3556 * address handle.
3557 */
3558int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3559
3560/**
3561 * rdma_query_ah - Queries the address vector associated with an address
3562 * handle.
3563 * @ah: The address handle to query.
3564 * @ah_attr: The address vector attributes associated with the address
3565 * handle.
3566 */
3567int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3568
3569enum rdma_destroy_ah_flags {
3570 /* In a sleepable context */
3571 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3572};
3573
3574/**
3575 * rdma_destroy_ah_user - Destroys an address handle.
3576 * @ah: The address handle to destroy.
3577 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3578 * @udata: Valid user data or NULL for kernel objects
3579 */
3580int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3581
3582/**
3583 * rdma_destroy_ah - Destroys an kernel address handle.
3584 * @ah: The address handle to destroy.
3585 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3586 *
3587 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3588 */
3589static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3590{
3591 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3592
3593 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3594}
3595
3596struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3597 struct ib_srq_init_attr *srq_init_attr,
3598 struct ib_usrq_object *uobject,
3599 struct ib_udata *udata);
3600static inline struct ib_srq *
3601ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3602{
3603 if (!pd->device->ops.create_srq)
3604 return ERR_PTR(-EOPNOTSUPP);
3605
3606 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3607}
3608
3609/**
3610 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3611 * @srq: The SRQ to modify.
3612 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3613 * the current values of selected SRQ attributes are returned.
3614 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3615 * are being modified.
3616 *
3617 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3618 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3619 * the number of receives queued drops below the limit.
3620 */
3621int ib_modify_srq(struct ib_srq *srq,
3622 struct ib_srq_attr *srq_attr,
3623 enum ib_srq_attr_mask srq_attr_mask);
3624
3625/**
3626 * ib_query_srq - Returns the attribute list and current values for the
3627 * specified SRQ.
3628 * @srq: The SRQ to query.
3629 * @srq_attr: The attributes of the specified SRQ.
3630 */
3631int ib_query_srq(struct ib_srq *srq,
3632 struct ib_srq_attr *srq_attr);
3633
3634/**
3635 * ib_destroy_srq_user - Destroys the specified SRQ.
3636 * @srq: The SRQ to destroy.
3637 * @udata: Valid user data or NULL for kernel objects
3638 */
3639int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3640
3641/**
3642 * ib_destroy_srq - Destroys the specified kernel SRQ.
3643 * @srq: The SRQ to destroy.
3644 *
3645 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3646 */
3647static inline void ib_destroy_srq(struct ib_srq *srq)
3648{
3649 int ret = ib_destroy_srq_user(srq, NULL);
3650
3651 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3652}
3653
3654/**
3655 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3656 * @srq: The SRQ to post the work request on.
3657 * @recv_wr: A list of work requests to post on the receive queue.
3658 * @bad_recv_wr: On an immediate failure, this parameter will reference
3659 * the work request that failed to be posted on the QP.
3660 */
3661static inline int ib_post_srq_recv(struct ib_srq *srq,
3662 const struct ib_recv_wr *recv_wr,
3663 const struct ib_recv_wr **bad_recv_wr)
3664{
3665 const struct ib_recv_wr *dummy;
3666
3667 return srq->device->ops.post_srq_recv(srq, recv_wr,
3668 bad_recv_wr ? : &dummy);
3669}
3670
3671struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
3672 struct ib_qp_init_attr *qp_init_attr,
3673 const char *caller);
3674static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3675 struct ib_qp_init_attr *init_attr)
3676{
3677 return ib_create_named_qp(pd, init_attr, KBUILD_MODNAME);
3678}
3679
3680/**
3681 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3682 * @qp: The QP to modify.
3683 * @attr: On input, specifies the QP attributes to modify. On output,
3684 * the current values of selected QP attributes are returned.
3685 * @attr_mask: A bit-mask used to specify which attributes of the QP
3686 * are being modified.
3687 * @udata: pointer to user's input output buffer information
3688 * are being modified.
3689 * It returns 0 on success and returns appropriate error code on error.
3690 */
3691int ib_modify_qp_with_udata(struct ib_qp *qp,
3692 struct ib_qp_attr *attr,
3693 int attr_mask,
3694 struct ib_udata *udata);
3695
3696/**
3697 * ib_modify_qp - Modifies the attributes for the specified QP and then
3698 * transitions the QP to the given state.
3699 * @qp: The QP to modify.
3700 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3701 * the current values of selected QP attributes are returned.
3702 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3703 * are being modified.
3704 */
3705int ib_modify_qp(struct ib_qp *qp,
3706 struct ib_qp_attr *qp_attr,
3707 int qp_attr_mask);
3708
3709/**
3710 * ib_query_qp - Returns the attribute list and current values for the
3711 * specified QP.
3712 * @qp: The QP to query.
3713 * @qp_attr: The attributes of the specified QP.
3714 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3715 * @qp_init_attr: Additional attributes of the selected QP.
3716 *
3717 * The qp_attr_mask may be used to limit the query to gathering only the
3718 * selected attributes.
3719 */
3720int ib_query_qp(struct ib_qp *qp,
3721 struct ib_qp_attr *qp_attr,
3722 int qp_attr_mask,
3723 struct ib_qp_init_attr *qp_init_attr);
3724
3725/**
3726 * ib_destroy_qp - Destroys the specified QP.
3727 * @qp: The QP to destroy.
3728 * @udata: Valid udata or NULL for kernel objects
3729 */
3730int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3731
3732/**
3733 * ib_destroy_qp - Destroys the specified kernel QP.
3734 * @qp: The QP to destroy.
3735 *
3736 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3737 */
3738static inline int ib_destroy_qp(struct ib_qp *qp)
3739{
3740 return ib_destroy_qp_user(qp, NULL);
3741}
3742
3743/**
3744 * ib_open_qp - Obtain a reference to an existing sharable QP.
3745 * @xrcd - XRC domain
3746 * @qp_open_attr: Attributes identifying the QP to open.
3747 *
3748 * Returns a reference to a sharable QP.
3749 */
3750struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3751 struct ib_qp_open_attr *qp_open_attr);
3752
3753/**
3754 * ib_close_qp - Release an external reference to a QP.
3755 * @qp: The QP handle to release
3756 *
3757 * The opened QP handle is released by the caller. The underlying
3758 * shared QP is not destroyed until all internal references are released.
3759 */
3760int ib_close_qp(struct ib_qp *qp);
3761
3762/**
3763 * ib_post_send - Posts a list of work requests to the send queue of
3764 * the specified QP.
3765 * @qp: The QP to post the work request on.
3766 * @send_wr: A list of work requests to post on the send queue.
3767 * @bad_send_wr: On an immediate failure, this parameter will reference
3768 * the work request that failed to be posted on the QP.
3769 *
3770 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3771 * error is returned, the QP state shall not be affected,
3772 * ib_post_send() will return an immediate error after queueing any
3773 * earlier work requests in the list.
3774 */
3775static inline int ib_post_send(struct ib_qp *qp,
3776 const struct ib_send_wr *send_wr,
3777 const struct ib_send_wr **bad_send_wr)
3778{
3779 const struct ib_send_wr *dummy;
3780
3781 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3782}
3783
3784/**
3785 * ib_post_recv - Posts a list of work requests to the receive queue of
3786 * the specified QP.
3787 * @qp: The QP to post the work request on.
3788 * @recv_wr: A list of work requests to post on the receive queue.
3789 * @bad_recv_wr: On an immediate failure, this parameter will reference
3790 * the work request that failed to be posted on the QP.
3791 */
3792static inline int ib_post_recv(struct ib_qp *qp,
3793 const struct ib_recv_wr *recv_wr,
3794 const struct ib_recv_wr **bad_recv_wr)
3795{
3796 const struct ib_recv_wr *dummy;
3797
3798 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3799}
3800
3801struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3802 int comp_vector, enum ib_poll_context poll_ctx,
3803 const char *caller);
3804static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3805 int nr_cqe, int comp_vector,
3806 enum ib_poll_context poll_ctx)
3807{
3808 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3809 KBUILD_MODNAME);
3810}
3811
3812struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3813 int nr_cqe, enum ib_poll_context poll_ctx,
3814 const char *caller);
3815
3816/**
3817 * ib_alloc_cq_any: Allocate kernel CQ
3818 * @dev: The IB device
3819 * @private: Private data attached to the CQE
3820 * @nr_cqe: Number of CQEs in the CQ
3821 * @poll_ctx: Context used for polling the CQ
3822 */
3823static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3824 void *private, int nr_cqe,
3825 enum ib_poll_context poll_ctx)
3826{
3827 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3828 KBUILD_MODNAME);
3829}
3830
3831void ib_free_cq(struct ib_cq *cq);
3832int ib_process_cq_direct(struct ib_cq *cq, int budget);
3833
3834/**
3835 * ib_create_cq - Creates a CQ on the specified device.
3836 * @device: The device on which to create the CQ.
3837 * @comp_handler: A user-specified callback that is invoked when a
3838 * completion event occurs on the CQ.
3839 * @event_handler: A user-specified callback that is invoked when an
3840 * asynchronous event not associated with a completion occurs on the CQ.
3841 * @cq_context: Context associated with the CQ returned to the user via
3842 * the associated completion and event handlers.
3843 * @cq_attr: The attributes the CQ should be created upon.
3844 *
3845 * Users can examine the cq structure to determine the actual CQ size.
3846 */
3847struct ib_cq *__ib_create_cq(struct ib_device *device,
3848 ib_comp_handler comp_handler,
3849 void (*event_handler)(struct ib_event *, void *),
3850 void *cq_context,
3851 const struct ib_cq_init_attr *cq_attr,
3852 const char *caller);
3853#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3854 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3855
3856/**
3857 * ib_resize_cq - Modifies the capacity of the CQ.
3858 * @cq: The CQ to resize.
3859 * @cqe: The minimum size of the CQ.
3860 *
3861 * Users can examine the cq structure to determine the actual CQ size.
3862 */
3863int ib_resize_cq(struct ib_cq *cq, int cqe);
3864
3865/**
3866 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3867 * @cq: The CQ to modify.
3868 * @cq_count: number of CQEs that will trigger an event
3869 * @cq_period: max period of time in usec before triggering an event
3870 *
3871 */
3872int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3873
3874/**
3875 * ib_destroy_cq_user - Destroys the specified CQ.
3876 * @cq: The CQ to destroy.
3877 * @udata: Valid user data or NULL for kernel objects
3878 */
3879int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3880
3881/**
3882 * ib_destroy_cq - Destroys the specified kernel CQ.
3883 * @cq: The CQ to destroy.
3884 *
3885 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3886 */
3887static inline void ib_destroy_cq(struct ib_cq *cq)
3888{
3889 int ret = ib_destroy_cq_user(cq, NULL);
3890
3891 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3892}
3893
3894/**
3895 * ib_poll_cq - poll a CQ for completion(s)
3896 * @cq:the CQ being polled
3897 * @num_entries:maximum number of completions to return
3898 * @wc:array of at least @num_entries &struct ib_wc where completions
3899 * will be returned
3900 *
3901 * Poll a CQ for (possibly multiple) completions. If the return value
3902 * is < 0, an error occurred. If the return value is >= 0, it is the
3903 * number of completions returned. If the return value is
3904 * non-negative and < num_entries, then the CQ was emptied.
3905 */
3906static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3907 struct ib_wc *wc)
3908{
3909 return cq->device->ops.poll_cq(cq, num_entries, wc);
3910}
3911
3912/**
3913 * ib_req_notify_cq - Request completion notification on a CQ.
3914 * @cq: The CQ to generate an event for.
3915 * @flags:
3916 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3917 * to request an event on the next solicited event or next work
3918 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3919 * may also be |ed in to request a hint about missed events, as
3920 * described below.
3921 *
3922 * Return Value:
3923 * < 0 means an error occurred while requesting notification
3924 * == 0 means notification was requested successfully, and if
3925 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3926 * were missed and it is safe to wait for another event. In
3927 * this case is it guaranteed that any work completions added
3928 * to the CQ since the last CQ poll will trigger a completion
3929 * notification event.
3930 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3931 * in. It means that the consumer must poll the CQ again to
3932 * make sure it is empty to avoid missing an event because of a
3933 * race between requesting notification and an entry being
3934 * added to the CQ. This return value means it is possible
3935 * (but not guaranteed) that a work completion has been added
3936 * to the CQ since the last poll without triggering a
3937 * completion notification event.
3938 */
3939static inline int ib_req_notify_cq(struct ib_cq *cq,
3940 enum ib_cq_notify_flags flags)
3941{
3942 return cq->device->ops.req_notify_cq(cq, flags);
3943}
3944
3945struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
3946 int comp_vector_hint,
3947 enum ib_poll_context poll_ctx);
3948
3949void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
3950
3951/*
3952 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
3953 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
3954 * address into the dma address.
3955 */
3956static inline bool ib_uses_virt_dma(struct ib_device *dev)
3957{
3958 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
3959}
3960
3961/**
3962 * ib_dma_mapping_error - check a DMA addr for error
3963 * @dev: The device for which the dma_addr was created
3964 * @dma_addr: The DMA address to check
3965 */
3966static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3967{
3968 if (ib_uses_virt_dma(dev))
3969 return 0;
3970 return dma_mapping_error(dev->dma_device, dma_addr);
3971}
3972
3973/**
3974 * ib_dma_map_single - Map a kernel virtual address to DMA address
3975 * @dev: The device for which the dma_addr is to be created
3976 * @cpu_addr: The kernel virtual address
3977 * @size: The size of the region in bytes
3978 * @direction: The direction of the DMA
3979 */
3980static inline u64 ib_dma_map_single(struct ib_device *dev,
3981 void *cpu_addr, size_t size,
3982 enum dma_data_direction direction)
3983{
3984 if (ib_uses_virt_dma(dev))
3985 return (uintptr_t)cpu_addr;
3986 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3987}
3988
3989/**
3990 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3991 * @dev: The device for which the DMA address was created
3992 * @addr: The DMA address
3993 * @size: The size of the region in bytes
3994 * @direction: The direction of the DMA
3995 */
3996static inline void ib_dma_unmap_single(struct ib_device *dev,
3997 u64 addr, size_t size,
3998 enum dma_data_direction direction)
3999{
4000 if (!ib_uses_virt_dma(dev))
4001 dma_unmap_single(dev->dma_device, addr, size, direction);
4002}
4003
4004/**
4005 * ib_dma_map_page - Map a physical page to DMA address
4006 * @dev: The device for which the dma_addr is to be created
4007 * @page: The page to be mapped
4008 * @offset: The offset within the page
4009 * @size: The size of the region in bytes
4010 * @direction: The direction of the DMA
4011 */
4012static inline u64 ib_dma_map_page(struct ib_device *dev,
4013 struct page *page,
4014 unsigned long offset,
4015 size_t size,
4016 enum dma_data_direction direction)
4017{
4018 if (ib_uses_virt_dma(dev))
4019 return (uintptr_t)(page_address(page) + offset);
4020 return dma_map_page(dev->dma_device, page, offset, size, direction);
4021}
4022
4023/**
4024 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4025 * @dev: The device for which the DMA address was created
4026 * @addr: The DMA address
4027 * @size: The size of the region in bytes
4028 * @direction: The direction of the DMA
4029 */
4030static inline void ib_dma_unmap_page(struct ib_device *dev,
4031 u64 addr, size_t size,
4032 enum dma_data_direction direction)
4033{
4034 if (!ib_uses_virt_dma(dev))
4035 dma_unmap_page(dev->dma_device, addr, size, direction);
4036}
4037
4038int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4039static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4040 struct scatterlist *sg, int nents,
4041 enum dma_data_direction direction,
4042 unsigned long dma_attrs)
4043{
4044 if (ib_uses_virt_dma(dev))
4045 return ib_dma_virt_map_sg(dev, sg, nents);
4046 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4047 dma_attrs);
4048}
4049
4050static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4051 struct scatterlist *sg, int nents,
4052 enum dma_data_direction direction,
4053 unsigned long dma_attrs)
4054{
4055 if (!ib_uses_virt_dma(dev))
4056 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4057 dma_attrs);
4058}
4059
4060/**
4061 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4062 * @dev: The device for which the DMA addresses are to be created
4063 * @sg: The array of scatter/gather entries
4064 * @nents: The number of scatter/gather entries
4065 * @direction: The direction of the DMA
4066 */
4067static inline int ib_dma_map_sg(struct ib_device *dev,
4068 struct scatterlist *sg, int nents,
4069 enum dma_data_direction direction)
4070{
4071 return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4072}
4073
4074/**
4075 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4076 * @dev: The device for which the DMA addresses were created
4077 * @sg: The array of scatter/gather entries
4078 * @nents: The number of scatter/gather entries
4079 * @direction: The direction of the DMA
4080 */
4081static inline void ib_dma_unmap_sg(struct ib_device *dev,
4082 struct scatterlist *sg, int nents,
4083 enum dma_data_direction direction)
4084{
4085 ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4086}
4087
4088/**
4089 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4090 * @dev: The device to query
4091 *
4092 * The returned value represents a size in bytes.
4093 */
4094static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4095{
4096 if (ib_uses_virt_dma(dev))
4097 return UINT_MAX;
4098 return dma_get_max_seg_size(dev->dma_device);
4099}
4100
4101/**
4102 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4103 * @dev: The device for which the DMA address was created
4104 * @addr: The DMA address
4105 * @size: The size of the region in bytes
4106 * @dir: The direction of the DMA
4107 */
4108static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4109 u64 addr,
4110 size_t size,
4111 enum dma_data_direction dir)
4112{
4113 if (!ib_uses_virt_dma(dev))
4114 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4115}
4116
4117/**
4118 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4119 * @dev: The device for which the DMA address was created
4120 * @addr: The DMA address
4121 * @size: The size of the region in bytes
4122 * @dir: The direction of the DMA
4123 */
4124static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4125 u64 addr,
4126 size_t size,
4127 enum dma_data_direction dir)
4128{
4129 if (!ib_uses_virt_dma(dev))
4130 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4131}
4132
4133/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4134 * space. This function should be called when 'current' is the owning MM.
4135 */
4136struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4137 u64 virt_addr, int mr_access_flags);
4138
4139/* ib_advise_mr - give an advice about an address range in a memory region */
4140int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4141 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4142/**
4143 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4144 * HCA translation table.
4145 * @mr: The memory region to deregister.
4146 * @udata: Valid user data or NULL for kernel object
4147 *
4148 * This function can fail, if the memory region has memory windows bound to it.
4149 */
4150int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4151
4152/**
4153 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4154 * HCA translation table.
4155 * @mr: The memory region to deregister.
4156 *
4157 * This function can fail, if the memory region has memory windows bound to it.
4158 *
4159 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4160 */
4161static inline int ib_dereg_mr(struct ib_mr *mr)
4162{
4163 return ib_dereg_mr_user(mr, NULL);
4164}
4165
4166struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4167 u32 max_num_sg);
4168
4169struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4170 u32 max_num_data_sg,
4171 u32 max_num_meta_sg);
4172
4173/**
4174 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4175 * R_Key and L_Key.
4176 * @mr - struct ib_mr pointer to be updated.
4177 * @newkey - new key to be used.
4178 */
4179static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4180{
4181 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4182 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4183}
4184
4185/**
4186 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4187 * for calculating a new rkey for type 2 memory windows.
4188 * @rkey - the rkey to increment.
4189 */
4190static inline u32 ib_inc_rkey(u32 rkey)
4191{
4192 const u32 mask = 0x000000ff;
4193 return ((rkey + 1) & mask) | (rkey & ~mask);
4194}
4195
4196/**
4197 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4198 * @qp: QP to attach to the multicast group. The QP must be type
4199 * IB_QPT_UD.
4200 * @gid: Multicast group GID.
4201 * @lid: Multicast group LID in host byte order.
4202 *
4203 * In order to send and receive multicast packets, subnet
4204 * administration must have created the multicast group and configured
4205 * the fabric appropriately. The port associated with the specified
4206 * QP must also be a member of the multicast group.
4207 */
4208int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4209
4210/**
4211 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4212 * @qp: QP to detach from the multicast group.
4213 * @gid: Multicast group GID.
4214 * @lid: Multicast group LID in host byte order.
4215 */
4216int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4217
4218struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4219 struct inode *inode, struct ib_udata *udata);
4220int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4221
4222static inline int ib_check_mr_access(struct ib_device *ib_dev,
4223 unsigned int flags)
4224{
4225 /*
4226 * Local write permission is required if remote write or
4227 * remote atomic permission is also requested.
4228 */
4229 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4230 !(flags & IB_ACCESS_LOCAL_WRITE))
4231 return -EINVAL;
4232
4233 if (flags & ~IB_ACCESS_SUPPORTED)
4234 return -EINVAL;
4235
4236 if (flags & IB_ACCESS_ON_DEMAND &&
4237 !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
4238 return -EINVAL;
4239 return 0;
4240}
4241
4242static inline bool ib_access_writable(int access_flags)
4243{
4244 /*
4245 * We have writable memory backing the MR if any of the following
4246 * access flags are set. "Local write" and "remote write" obviously
4247 * require write access. "Remote atomic" can do things like fetch and
4248 * add, which will modify memory, and "MW bind" can change permissions
4249 * by binding a window.
4250 */
4251 return access_flags &
4252 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4253 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4254}
4255
4256/**
4257 * ib_check_mr_status: lightweight check of MR status.
4258 * This routine may provide status checks on a selected
4259 * ib_mr. first use is for signature status check.
4260 *
4261 * @mr: A memory region.
4262 * @check_mask: Bitmask of which checks to perform from
4263 * ib_mr_status_check enumeration.
4264 * @mr_status: The container of relevant status checks.
4265 * failed checks will be indicated in the status bitmask
4266 * and the relevant info shall be in the error item.
4267 */
4268int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4269 struct ib_mr_status *mr_status);
4270
4271/**
4272 * ib_device_try_get: Hold a registration lock
4273 * device: The device to lock
4274 *
4275 * A device under an active registration lock cannot become unregistered. It
4276 * is only possible to obtain a registration lock on a device that is fully
4277 * registered, otherwise this function returns false.
4278 *
4279 * The registration lock is only necessary for actions which require the
4280 * device to still be registered. Uses that only require the device pointer to
4281 * be valid should use get_device(&ibdev->dev) to hold the memory.
4282 *
4283 */
4284static inline bool ib_device_try_get(struct ib_device *dev)
4285{
4286 return refcount_inc_not_zero(&dev->refcount);
4287}
4288
4289void ib_device_put(struct ib_device *device);
4290struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4291 enum rdma_driver_id driver_id);
4292struct ib_device *ib_device_get_by_name(const char *name,
4293 enum rdma_driver_id driver_id);
4294struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4295 u16 pkey, const union ib_gid *gid,
4296 const struct sockaddr *addr);
4297int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4298 unsigned int port);
4299struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
4300
4301struct ib_wq *ib_create_wq(struct ib_pd *pd,
4302 struct ib_wq_init_attr *init_attr);
4303int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4304
4305int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4306 unsigned int *sg_offset, unsigned int page_size);
4307int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4308 int data_sg_nents, unsigned int *data_sg_offset,
4309 struct scatterlist *meta_sg, int meta_sg_nents,
4310 unsigned int *meta_sg_offset, unsigned int page_size);
4311
4312static inline int
4313ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4314 unsigned int *sg_offset, unsigned int page_size)
4315{
4316 int n;
4317
4318 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4319 mr->iova = 0;
4320
4321 return n;
4322}
4323
4324int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4325 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4326
4327void ib_drain_rq(struct ib_qp *qp);
4328void ib_drain_sq(struct ib_qp *qp);
4329void ib_drain_qp(struct ib_qp *qp);
4330
4331int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4332 u8 *width);
4333
4334static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4335{
4336 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4337 return attr->roce.dmac;
4338 return NULL;
4339}
4340
4341static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4342{
4343 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4344 attr->ib.dlid = (u16)dlid;
4345 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4346 attr->opa.dlid = dlid;
4347}
4348
4349static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4350{
4351 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4352 return attr->ib.dlid;
4353 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4354 return attr->opa.dlid;
4355 return 0;
4356}
4357
4358static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4359{
4360 attr->sl = sl;
4361}
4362
4363static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4364{
4365 return attr->sl;
4366}
4367
4368static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4369 u8 src_path_bits)
4370{
4371 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4372 attr->ib.src_path_bits = src_path_bits;
4373 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4374 attr->opa.src_path_bits = src_path_bits;
4375}
4376
4377static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4378{
4379 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4380 return attr->ib.src_path_bits;
4381 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4382 return attr->opa.src_path_bits;
4383 return 0;
4384}
4385
4386static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4387 bool make_grd)
4388{
4389 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4390 attr->opa.make_grd = make_grd;
4391}
4392
4393static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4394{
4395 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4396 return attr->opa.make_grd;
4397 return false;
4398}
4399
4400static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4401{
4402 attr->port_num = port_num;
4403}
4404
4405static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4406{
4407 return attr->port_num;
4408}
4409
4410static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4411 u8 static_rate)
4412{
4413 attr->static_rate = static_rate;
4414}
4415
4416static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4417{
4418 return attr->static_rate;
4419}
4420
4421static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4422 enum ib_ah_flags flag)
4423{
4424 attr->ah_flags = flag;
4425}
4426
4427static inline enum ib_ah_flags
4428 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4429{
4430 return attr->ah_flags;
4431}
4432
4433static inline const struct ib_global_route
4434 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4435{
4436 return &attr->grh;
4437}
4438
4439/*To retrieve and modify the grh */
4440static inline struct ib_global_route
4441 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4442{
4443 return &attr->grh;
4444}
4445
4446static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4447{
4448 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4449
4450 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4451}
4452
4453static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4454 __be64 prefix)
4455{
4456 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4457
4458 grh->dgid.global.subnet_prefix = prefix;
4459}
4460
4461static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4462 __be64 if_id)
4463{
4464 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4465
4466 grh->dgid.global.interface_id = if_id;
4467}
4468
4469static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4470 union ib_gid *dgid, u32 flow_label,
4471 u8 sgid_index, u8 hop_limit,
4472 u8 traffic_class)
4473{
4474 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4475
4476 attr->ah_flags = IB_AH_GRH;
4477 if (dgid)
4478 grh->dgid = *dgid;
4479 grh->flow_label = flow_label;
4480 grh->sgid_index = sgid_index;
4481 grh->hop_limit = hop_limit;
4482 grh->traffic_class = traffic_class;
4483 grh->sgid_attr = NULL;
4484}
4485
4486void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4487void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4488 u32 flow_label, u8 hop_limit, u8 traffic_class,
4489 const struct ib_gid_attr *sgid_attr);
4490void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4491 const struct rdma_ah_attr *src);
4492void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4493 const struct rdma_ah_attr *new);
4494void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4495
4496/**
4497 * rdma_ah_find_type - Return address handle type.
4498 *
4499 * @dev: Device to be checked
4500 * @port_num: Port number
4501 */
4502static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4503 u32 port_num)
4504{
4505 if (rdma_protocol_roce(dev, port_num))
4506 return RDMA_AH_ATTR_TYPE_ROCE;
4507 if (rdma_protocol_ib(dev, port_num)) {
4508 if (rdma_cap_opa_ah(dev, port_num))
4509 return RDMA_AH_ATTR_TYPE_OPA;
4510 return RDMA_AH_ATTR_TYPE_IB;
4511 }
4512
4513 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4514}
4515
4516/**
4517 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4518 * In the current implementation the only way to get
4519 * get the 32bit lid is from other sources for OPA.
4520 * For IB, lids will always be 16bits so cast the
4521 * value accordingly.
4522 *
4523 * @lid: A 32bit LID
4524 */
4525static inline u16 ib_lid_cpu16(u32 lid)
4526{
4527 WARN_ON_ONCE(lid & 0xFFFF0000);
4528 return (u16)lid;
4529}
4530
4531/**
4532 * ib_lid_be16 - Return lid in 16bit BE encoding.
4533 *
4534 * @lid: A 32bit LID
4535 */
4536static inline __be16 ib_lid_be16(u32 lid)
4537{
4538 WARN_ON_ONCE(lid & 0xFFFF0000);
4539 return cpu_to_be16((u16)lid);
4540}
4541
4542/**
4543 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4544 * vector
4545 * @device: the rdma device
4546 * @comp_vector: index of completion vector
4547 *
4548 * Returns NULL on failure, otherwise a corresponding cpu map of the
4549 * completion vector (returns all-cpus map if the device driver doesn't
4550 * implement get_vector_affinity).
4551 */
4552static inline const struct cpumask *
4553ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4554{
4555 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4556 !device->ops.get_vector_affinity)
4557 return NULL;
4558
4559 return device->ops.get_vector_affinity(device, comp_vector);
4560
4561}
4562
4563/**
4564 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4565 * and add their gids, as needed, to the relevant RoCE devices.
4566 *
4567 * @device: the rdma device
4568 */
4569void rdma_roce_rescan_device(struct ib_device *ibdev);
4570
4571struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4572
4573int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4574
4575struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4576 enum rdma_netdev_t type, const char *name,
4577 unsigned char name_assign_type,
4578 void (*setup)(struct net_device *));
4579
4580int rdma_init_netdev(struct ib_device *device, u32 port_num,
4581 enum rdma_netdev_t type, const char *name,
4582 unsigned char name_assign_type,
4583 void (*setup)(struct net_device *),
4584 struct net_device *netdev);
4585
4586/**
4587 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4588 *
4589 * @device: device pointer for which ib_device pointer to retrieve
4590 *
4591 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4592 *
4593 */
4594static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4595{
4596 struct ib_core_device *coredev =
4597 container_of(device, struct ib_core_device, dev);
4598
4599 return coredev->owner;
4600}
4601
4602/**
4603 * ibdev_to_node - return the NUMA node for a given ib_device
4604 * @dev: device to get the NUMA node for.
4605 */
4606static inline int ibdev_to_node(struct ib_device *ibdev)
4607{
4608 struct device *parent = ibdev->dev.parent;
4609
4610 if (!parent)
4611 return NUMA_NO_NODE;
4612 return dev_to_node(parent);
4613}
4614
4615/**
4616 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4617 * ib_device holder structure from device pointer.
4618 *
4619 * NOTE: New drivers should not make use of this API; This API is only for
4620 * existing drivers who have exposed sysfs entries using
4621 * ops->device_group.
4622 */
4623#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4624 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4625
4626bool rdma_dev_access_netns(const struct ib_device *device,
4627 const struct net *net);
4628
4629#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4630#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4631#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4632
4633/**
4634 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4635 * on the flow_label
4636 *
4637 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4638 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4639 * convention.
4640 */
4641static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4642{
4643 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4644
4645 fl_low ^= fl_high >> 14;
4646 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4647}
4648
4649/**
4650 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4651 * local and remote qpn values
4652 *
4653 * This function folded the multiplication results of two qpns, 24 bit each,
4654 * fields, and converts it to a 20 bit results.
4655 *
4656 * This function will create symmetric flow_label value based on the local
4657 * and remote qpn values. this will allow both the requester and responder
4658 * to calculate the same flow_label for a given connection.
4659 *
4660 * This helper function should be used by driver in case the upper layer
4661 * provide a zero flow_label value. This is to improve entropy of RDMA
4662 * traffic in the network.
4663 */
4664static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4665{
4666 u64 v = (u64)lqpn * rqpn;
4667
4668 v ^= v >> 20;
4669 v ^= v >> 40;
4670
4671 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4672}
4673
4674const struct ib_port_immutable*
4675ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4676#endif /* IB_VERBS_H */