Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
  1/* QLogic qedr NIC Driver
  2 * Copyright (c) 2015-2016  QLogic Corporation
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and /or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32#ifndef __QEDR_H__
 33#define __QEDR_H__
 34
 35#include <linux/pci.h>
 36#include <linux/xarray.h>
 37#include <rdma/ib_addr.h>
 38#include <linux/qed/qed_if.h>
 39#include <linux/qed/qed_chain.h>
 40#include <linux/qed/qed_rdma_if.h>
 41#include <linux/qed/qede_rdma.h>
 42#include <linux/qed/roce_common.h>
 43#include <linux/completion.h>
 44#include "qedr_hsi_rdma.h"
 45
 46#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
 47#define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
 48#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
 49#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
 50
 51#define DP_DEBUG(dev, module, fmt, ...)					\
 52	pr_debug("(%s) " module ": " fmt,				\
 53		 DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
 54
 55#define QEDR_MSG_INIT "INIT"
 56#define QEDR_MSG_MISC "MISC"
 57#define QEDR_MSG_CQ   "  CQ"
 58#define QEDR_MSG_MR   "  MR"
 59#define QEDR_MSG_RQ   "  RQ"
 60#define QEDR_MSG_SQ   "  SQ"
 61#define QEDR_MSG_QP   "  QP"
 62#define QEDR_MSG_SRQ  " SRQ"
 63#define QEDR_MSG_GSI  " GSI"
 64#define QEDR_MSG_IWARP  " IW"
 65
 66#define QEDR_CQ_MAGIC_NUMBER	(0x11223344)
 67
 68#define FW_PAGE_SIZE		(RDMA_RING_PAGE_SIZE)
 69#define FW_PAGE_SHIFT		(12)
 70
 71struct qedr_dev;
 72
 73struct qedr_cnq {
 74	struct qedr_dev		*dev;
 75	struct qed_chain	pbl;
 76	struct qed_sb_info	*sb;
 77	char			name[32];
 78	u64			n_comp;
 79	__le16			*hw_cons_ptr;
 80	u8			index;
 81};
 82
 83#define QEDR_MAX_SGID 128
 84
 85struct qedr_device_attr {
 86	u32	vendor_id;
 87	u32	vendor_part_id;
 88	u32	hw_ver;
 89	u64	fw_ver;
 90	u64	node_guid;
 91	u64	sys_image_guid;
 92	u8	max_cnq;
 93	u8	max_sge;
 94	u16	max_inline;
 95	u32	max_sqe;
 96	u32	max_rqe;
 97	u8	max_qp_resp_rd_atomic_resc;
 98	u8	max_qp_req_rd_atomic_resc;
 99	u64	max_dev_resp_rd_atomic_resc;
100	u32	max_cq;
101	u32	max_qp;
102	u32	max_mr;
103	u64	max_mr_size;
104	u32	max_cqe;
105	u32	max_mw;
 
106	u32	max_mr_mw_fmr_pbl;
107	u64	max_mr_mw_fmr_size;
108	u32	max_pd;
109	u32	max_ah;
110	u8	max_pkey;
111	u32	max_srq;
112	u32	max_srq_wr;
113	u8	max_srq_sge;
114	u8	max_stats_queues;
115	u32	dev_caps;
116
117	u64	page_size_caps;
118	u8	dev_ack_delay;
119	u32	reserved_lkey;
120	u32	bad_pkey_counter;
121	struct qed_rdma_events events;
122};
123
124#define QEDR_ENET_STATE_BIT	(0)
125
126struct qedr_dev {
127	struct ib_device	ibdev;
128	struct qed_dev		*cdev;
129	struct pci_dev		*pdev;
130	struct net_device	*ndev;
131
132	enum ib_atomic_cap	atomic_cap;
133
134	void *rdma_ctx;
135	struct qedr_device_attr attr;
136
137	const struct qed_rdma_ops *ops;
138	struct qed_int_info	int_info;
139
140	struct qed_sb_info	*sb_array;
141	struct qedr_cnq		*cnq_array;
142	int			num_cnq;
143	int			sb_start;
144
145	void __iomem		*db_addr;
146	u64			db_phys_addr;
147	u32			db_size;
148	u16			dpi;
149
150	union ib_gid *sgid_tbl;
151
152	/* Lock for sgid table */
153	spinlock_t sgid_lock;
154
155	u64			guid;
156
157	u32			dp_module;
158	u8			dp_level;
159	u8			num_hwfns;
160#define QEDR_IS_CMT(dev)        ((dev)->num_hwfns > 1)
161	u8			affin_hwfn_idx;
162	u8			gsi_ll2_handle;
163
164	uint			wq_multiplier;
165	u8			gsi_ll2_mac_address[ETH_ALEN];
166	int			gsi_qp_created;
167	struct qedr_cq		*gsi_sqcq;
168	struct qedr_cq		*gsi_rqcq;
169	struct qedr_qp		*gsi_qp;
170	enum qed_rdma_type	rdma_type;
171	struct xarray		qps;
172	struct xarray		srqs;
173	struct workqueue_struct *iwarp_wq;
174	u16			iwarp_max_mtu;
175
176	unsigned long enet_state;
177
178	u8 user_dpm_enabled;
179};
180
181#define QEDR_MAX_SQ_PBL			(0x8000)
182#define QEDR_MAX_SQ_PBL_ENTRIES		(0x10000 / sizeof(void *))
183#define QEDR_SQE_ELEMENT_SIZE		(sizeof(struct rdma_sq_sge))
184#define QEDR_MAX_SQE_ELEMENTS_PER_SQE	(ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
185					 QEDR_SQE_ELEMENT_SIZE)
186#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE	((RDMA_RING_PAGE_SIZE) / \
187					 QEDR_SQE_ELEMENT_SIZE)
188#define QEDR_MAX_SQE			((QEDR_MAX_SQ_PBL_ENTRIES) *\
189					 (RDMA_RING_PAGE_SIZE) / \
190					 (QEDR_SQE_ELEMENT_SIZE) /\
191					 (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
192/* RQ */
193#define QEDR_MAX_RQ_PBL			(0x2000)
194#define QEDR_MAX_RQ_PBL_ENTRIES		(0x10000 / sizeof(void *))
195#define QEDR_RQE_ELEMENT_SIZE		(sizeof(struct rdma_rq_sge))
196#define QEDR_MAX_RQE_ELEMENTS_PER_RQE	(RDMA_MAX_SGE_PER_RQ_WQE)
197#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE	((RDMA_RING_PAGE_SIZE) / \
198					 QEDR_RQE_ELEMENT_SIZE)
199#define QEDR_MAX_RQE			((QEDR_MAX_RQ_PBL_ENTRIES) *\
200					 (RDMA_RING_PAGE_SIZE) / \
201					 (QEDR_RQE_ELEMENT_SIZE) /\
202					 (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
203
204#define QEDR_CQE_SIZE	(sizeof(union rdma_cqe))
205#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
206#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
207				  sizeof(u64)) - 1)
208#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
209			     (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
210
211#define QEDR_ROCE_MAX_CNQ_SIZE		(0x4000)
212
213#define QEDR_MAX_PORT			(1)
214#define QEDR_PORT			(1)
215
216#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
217
218#define QEDR_ROCE_PKEY_MAX 1
219#define QEDR_ROCE_PKEY_TABLE_LEN 1
220#define QEDR_ROCE_PKEY_DEFAULT 0xffff
221
222struct qedr_pbl {
223	struct list_head list_entry;
224	void *va;
225	dma_addr_t pa;
226};
227
228struct qedr_ucontext {
229	struct ib_ucontext ibucontext;
230	struct qedr_dev *dev;
231	struct qedr_pd *pd;
232	void __iomem *dpi_addr;
233	struct rdma_user_mmap_entry *db_mmap_entry;
234	u64 dpi_phys_addr;
235	u32 dpi_size;
236	u16 dpi;
237	bool db_rec;
238	u8 edpm_mode;
239};
240
241union db_prod32 {
242	struct rdma_pwm_val16_data data;
243	u32 raw;
 
244};
245
246union db_prod64 {
247	struct rdma_pwm_val32_data data;
248	u64 raw;
249};
250
251enum qedr_cq_type {
252	QEDR_CQ_TYPE_GSI,
253	QEDR_CQ_TYPE_KERNEL,
254	QEDR_CQ_TYPE_USER,
255};
256
257struct qedr_pbl_info {
258	u32 num_pbls;
259	u32 num_pbes;
260	u32 pbl_size;
261	u32 pbe_size;
262	bool two_layered;
263};
264
265struct qedr_userq {
266	struct ib_umem *umem;
267	struct qedr_pbl_info pbl_info;
268	struct qedr_pbl *pbl_tbl;
269	u64 buf_addr;
270	size_t buf_len;
271
272	/* doorbell recovery */
273	void __iomem *db_addr;
274	struct qedr_user_db_rec *db_rec_data;
275	struct rdma_user_mmap_entry *db_mmap_entry;
276	void __iomem *db_rec_db2_addr;
277	union db_prod32 db_rec_db2_data;
278};
279
280struct qedr_cq {
281	struct ib_cq ibcq;
282
283	enum qedr_cq_type cq_type;
284	u32 sig;
285
286	u16 icid;
287
288	/* Lock to protect multiplem CQ's */
289	spinlock_t cq_lock;
290	u8 arm_flags;
291	struct qed_chain pbl;
292
293	void __iomem *db_addr;
294	union db_prod64 db;
295
296	u8 pbl_toggle;
297	union rdma_cqe *latest_cqe;
298	union rdma_cqe *toggle_cqe;
299
300	u32 cq_cons;
301
302	struct qedr_userq q;
303	u8 destroyed;
304	u16 cnq_notif;
305};
306
307struct qedr_pd {
308	struct ib_pd ibpd;
309	u32 pd_id;
310	struct qedr_ucontext *uctx;
311};
312
313struct qedr_xrcd {
314	struct ib_xrcd ibxrcd;
315	u16 xrcd_id;
 
 
 
 
 
 
 
 
316};
317
318struct qedr_qp_hwq_info {
319	/* WQE Elements */
320	struct qed_chain pbl;
321	u64 p_phys_addr_tbl;
322	u32 max_sges;
323
324	/* WQE */
325	u16 prod;
326	u16 cons;
327	u16 wqe_cons;
328	u16 gsi_cons;
329	u16 max_wr;
330
331	/* DB */
332	void __iomem *db;
333	union db_prod32 db_data;
334
335	void __iomem *iwarp_db2;
336	union db_prod32 iwarp_db2_data;
337};
338
339#define QEDR_INC_SW_IDX(p_info, index)					\
340	do {								\
341		p_info->index = (p_info->index + 1) &			\
342				qed_chain_get_capacity(p_info->pbl)	\
343	} while (0)
344
345struct qedr_srq_hwq_info {
346	u32 max_sges;
347	u32 max_wr;
348	struct qed_chain pbl;
349	u64 p_phys_addr_tbl;
350	u32 wqe_prod;
351	u32 sge_prod;
352	u32 wr_prod_cnt;
353	atomic_t wr_cons_cnt;
354	u32 num_elems;
355
356	struct rdma_srq_producers *virt_prod_pair_addr;
357	dma_addr_t phy_prod_pair_addr;
358};
359
360struct qedr_srq {
361	struct ib_srq ibsrq;
362	struct qedr_dev *dev;
363
364	struct qedr_userq	usrq;
365	struct qedr_srq_hwq_info hw_srq;
366	struct ib_umem *prod_umem;
367	u16 srq_id;
368	u32 srq_limit;
369	bool is_xrc;
370	/* lock to protect srq recv post */
371	spinlock_t lock;
372};
373
374enum qedr_qp_err_bitmap {
375	QEDR_QP_ERR_SQ_FULL = 1,
376	QEDR_QP_ERR_RQ_FULL = 2,
377	QEDR_QP_ERR_BAD_SR = 4,
378	QEDR_QP_ERR_BAD_RR = 8,
379	QEDR_QP_ERR_SQ_PBL_FULL = 16,
380	QEDR_QP_ERR_RQ_PBL_FULL = 32,
381};
382
383enum qedr_qp_create_type {
384	QEDR_QP_CREATE_NONE,
385	QEDR_QP_CREATE_USER,
386	QEDR_QP_CREATE_KERNEL,
387};
388
389enum qedr_iwarp_cm_flags {
390	QEDR_IWARP_CM_WAIT_FOR_CONNECT    = BIT(0),
391	QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
392};
393
394struct qedr_qp {
395	struct ib_qp ibqp;	/* must be first */
396	struct qedr_dev *dev;
 
397	struct qedr_qp_hwq_info sq;
398	struct qedr_qp_hwq_info rq;
399
400	u32 max_inline_data;
401
402	/* Lock for QP's */
403	spinlock_t q_lock;
404	struct qedr_cq *sq_cq;
405	struct qedr_cq *rq_cq;
406	struct qedr_srq *srq;
407	enum qed_roce_qp_state state;
408	u32 id;
409	struct qedr_pd *pd;
410	enum ib_qp_type qp_type;
411	enum qedr_qp_create_type create_type;
412	struct qed_rdma_qp *qed_qp;
413	u32 qp_id;
414	u16 icid;
415	u16 mtu;
416	int sgid_idx;
417	u32 rq_psn;
418	u32 sq_psn;
419	u32 qkey;
420	u32 dest_qp_num;
421	u8 timeout;
422
423	/* Relevant to qps created from kernel space only (ULPs) */
424	u8 prev_wqe_size;
425	u16 wqe_cons;
426	u32 err_bitmap;
427	bool signaled;
428
429	/* SQ shadow */
430	struct {
431		u64 wr_id;
432		enum ib_wc_opcode opcode;
433		u32 bytes_len;
434		u8 wqe_size;
435		bool signaled;
436		dma_addr_t icrc_mapping;
437		u32 *icrc;
438		struct qedr_mr *mr;
439	} *wqe_wr_id;
440
441	/* RQ shadow */
442	struct {
443		u64 wr_id;
444		struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
445		u8 wqe_size;
446
447		u8 smac[ETH_ALEN];
448		u16 vlan;
449		int rc;
450	} *rqe_wr_id;
451
452	/* Relevant to qps created from user space only (applications) */
453	struct qedr_userq usq;
454	struct qedr_userq urq;
455
456	/* synchronization objects used with iwarp ep */
457	struct kref refcnt;
458	struct completion iwarp_cm_comp;
459	struct completion qp_rel_comp;
460	unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
461};
462
463struct qedr_ah {
464	struct ib_ah ibah;
465	struct rdma_ah_attr attr;
466};
467
468enum qedr_mr_type {
469	QEDR_MR_USER,
470	QEDR_MR_KERNEL,
471	QEDR_MR_DMA,
472	QEDR_MR_FRMR,
473};
474
475struct mr_info {
476	struct qedr_pbl *pbl_table;
477	struct qedr_pbl_info pbl_info;
478	struct list_head free_pbl_list;
479	struct list_head inuse_pbl_list;
480	u32 completed;
481	u32 completed_handled;
482};
483
484struct qedr_mr {
485	struct ib_mr ibmr;
486	struct ib_umem *umem;
487
488	struct qed_rdma_register_tid_in_params hw_mr;
489	enum qedr_mr_type type;
490
491	struct qedr_dev *dev;
492	struct mr_info info;
493
494	u64 *pages;
495	u32 npages;
496};
497
498struct qedr_user_mmap_entry {
499	struct rdma_user_mmap_entry rdma_entry;
500	struct qedr_dev *dev;
501	union {
502		u64 io_address;
503		void *address;
504	};
505	size_t length;
506	u16 dpi;
507	u8 mmap_flag;
508};
509
510#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
511
512#define QEDR_RESP_IMM	(RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
513			 RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
514#define QEDR_RESP_RDMA	(RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
515			 RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
516#define QEDR_RESP_INV	(RDMA_CQE_RESPONDER_INV_FLG_MASK << \
517			 RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
518
519static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
520{
521	info->cons = (info->cons + 1) % info->max_wr;
522	info->wqe_cons++;
523}
524
525static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
526{
527	info->prod = (info->prod + 1) % info->max_wr;
528}
529
530static inline int qedr_get_dmac(struct qedr_dev *dev,
531				struct rdma_ah_attr *ah_attr, u8 *mac_addr)
532{
533	union ib_gid zero_sgid = { { 0 } };
534	struct in6_addr in6;
535	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
536	u8 *dmac;
537
538	if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
539		DP_ERR(dev, "Local port GID not supported\n");
540		eth_zero_addr(mac_addr);
541		return -EINVAL;
542	}
543
544	memcpy(&in6, grh->dgid.raw, sizeof(in6));
545	dmac = rdma_ah_retrieve_dmac(ah_attr);
546	if (!dmac)
547		return -EINVAL;
548	ether_addr_copy(mac_addr, dmac);
549
550	return 0;
551}
552
553struct qedr_iw_listener {
554	struct qedr_dev *dev;
555	struct iw_cm_id *cm_id;
556	int		backlog;
557	void		*qed_handle;
558};
559
560struct qedr_iw_ep {
561	struct qedr_dev	*dev;
562	struct iw_cm_id	*cm_id;
563	struct qedr_qp	*qp;
564	void		*qed_context;
565	struct kref	refcnt;
566};
567
568static inline
569struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
570{
571	return container_of(ibucontext, struct qedr_ucontext, ibucontext);
572}
573
574static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
575{
576	return container_of(ibdev, struct qedr_dev, ibdev);
577}
578
579static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
580{
581	return container_of(ibpd, struct qedr_pd, ibpd);
582}
583
584static inline struct qedr_xrcd *get_qedr_xrcd(struct ib_xrcd *ibxrcd)
585{
586	return container_of(ibxrcd, struct qedr_xrcd, ibxrcd);
587}
588
589static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
590{
591	return container_of(ibcq, struct qedr_cq, ibcq);
592}
593
594static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
595{
596	return container_of(ibqp, struct qedr_qp, ibqp);
597}
598
599static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
600{
601	return container_of(ibah, struct qedr_ah, ibah);
602}
603
604static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
605{
606	return container_of(ibmr, struct qedr_mr, ibmr);
607}
608
609static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
610{
611	return container_of(ibsrq, struct qedr_srq, ibsrq);
612}
613
614static inline bool qedr_qp_has_srq(struct qedr_qp *qp)
615{
616	return qp->srq;
617}
618
619static inline bool qedr_qp_has_sq(struct qedr_qp *qp)
620{
621	if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT)
622		return false;
623
624	return true;
625}
626
627static inline bool qedr_qp_has_rq(struct qedr_qp *qp)
628{
629	if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI ||
630	    qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp))
631		return false;
632
633	return true;
634}
635
636static inline struct qedr_user_mmap_entry *
637get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
638{
639	return container_of(rdma_entry, struct qedr_user_mmap_entry,
640			    rdma_entry);
641}
642#endif
v5.4
  1/* QLogic qedr NIC Driver
  2 * Copyright (c) 2015-2016  QLogic Corporation
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and /or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32#ifndef __QEDR_H__
 33#define __QEDR_H__
 34
 35#include <linux/pci.h>
 36#include <linux/xarray.h>
 37#include <rdma/ib_addr.h>
 38#include <linux/qed/qed_if.h>
 39#include <linux/qed/qed_chain.h>
 40#include <linux/qed/qed_rdma_if.h>
 41#include <linux/qed/qede_rdma.h>
 42#include <linux/qed/roce_common.h>
 
 43#include "qedr_hsi_rdma.h"
 44
 45#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
 46#define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
 47#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
 48#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
 49
 50#define DP_DEBUG(dev, module, fmt, ...)					\
 51	pr_debug("(%s) " module ": " fmt,				\
 52		 DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
 53
 54#define QEDR_MSG_INIT "INIT"
 55#define QEDR_MSG_MISC "MISC"
 56#define QEDR_MSG_CQ   "  CQ"
 57#define QEDR_MSG_MR   "  MR"
 58#define QEDR_MSG_RQ   "  RQ"
 59#define QEDR_MSG_SQ   "  SQ"
 60#define QEDR_MSG_QP   "  QP"
 61#define QEDR_MSG_SRQ  " SRQ"
 62#define QEDR_MSG_GSI  " GSI"
 63#define QEDR_MSG_IWARP  " IW"
 64
 65#define QEDR_CQ_MAGIC_NUMBER	(0x11223344)
 66
 67#define FW_PAGE_SIZE		(RDMA_RING_PAGE_SIZE)
 68#define FW_PAGE_SHIFT		(12)
 69
 70struct qedr_dev;
 71
 72struct qedr_cnq {
 73	struct qedr_dev		*dev;
 74	struct qed_chain	pbl;
 75	struct qed_sb_info	*sb;
 76	char			name[32];
 77	u64			n_comp;
 78	__le16			*hw_cons_ptr;
 79	u8			index;
 80};
 81
 82#define QEDR_MAX_SGID 128
 83
 84struct qedr_device_attr {
 85	u32	vendor_id;
 86	u32	vendor_part_id;
 87	u32	hw_ver;
 88	u64	fw_ver;
 89	u64	node_guid;
 90	u64	sys_image_guid;
 91	u8	max_cnq;
 92	u8	max_sge;
 93	u16	max_inline;
 94	u32	max_sqe;
 95	u32	max_rqe;
 96	u8	max_qp_resp_rd_atomic_resc;
 97	u8	max_qp_req_rd_atomic_resc;
 98	u64	max_dev_resp_rd_atomic_resc;
 99	u32	max_cq;
100	u32	max_qp;
101	u32	max_mr;
102	u64	max_mr_size;
103	u32	max_cqe;
104	u32	max_mw;
105	u32	max_fmr;
106	u32	max_mr_mw_fmr_pbl;
107	u64	max_mr_mw_fmr_size;
108	u32	max_pd;
109	u32	max_ah;
110	u8	max_pkey;
111	u32	max_srq;
112	u32	max_srq_wr;
113	u8	max_srq_sge;
114	u8	max_stats_queues;
115	u32	dev_caps;
116
117	u64	page_size_caps;
118	u8	dev_ack_delay;
119	u32	reserved_lkey;
120	u32	bad_pkey_counter;
121	struct qed_rdma_events events;
122};
123
124#define QEDR_ENET_STATE_BIT	(0)
125
126struct qedr_dev {
127	struct ib_device	ibdev;
128	struct qed_dev		*cdev;
129	struct pci_dev		*pdev;
130	struct net_device	*ndev;
131
132	enum ib_atomic_cap	atomic_cap;
133
134	void *rdma_ctx;
135	struct qedr_device_attr attr;
136
137	const struct qed_rdma_ops *ops;
138	struct qed_int_info	int_info;
139
140	struct qed_sb_info	*sb_array;
141	struct qedr_cnq		*cnq_array;
142	int			num_cnq;
143	int			sb_start;
144
145	void __iomem		*db_addr;
146	u64			db_phys_addr;
147	u32			db_size;
148	u16			dpi;
149
150	union ib_gid *sgid_tbl;
151
152	/* Lock for sgid table */
153	spinlock_t sgid_lock;
154
155	u64			guid;
156
157	u32			dp_module;
158	u8			dp_level;
159	u8			num_hwfns;
160#define QEDR_IS_CMT(dev)        ((dev)->num_hwfns > 1)
161	u8			affin_hwfn_idx;
162	u8			gsi_ll2_handle;
163
164	uint			wq_multiplier;
165	u8			gsi_ll2_mac_address[ETH_ALEN];
166	int			gsi_qp_created;
167	struct qedr_cq		*gsi_sqcq;
168	struct qedr_cq		*gsi_rqcq;
169	struct qedr_qp		*gsi_qp;
170	enum qed_rdma_type	rdma_type;
171	struct xarray		qps;
172	struct xarray		srqs;
173	struct workqueue_struct *iwarp_wq;
174	u16			iwarp_max_mtu;
175
176	unsigned long enet_state;
177
178	u8 user_dpm_enabled;
179};
180
181#define QEDR_MAX_SQ_PBL			(0x8000)
182#define QEDR_MAX_SQ_PBL_ENTRIES		(0x10000 / sizeof(void *))
183#define QEDR_SQE_ELEMENT_SIZE		(sizeof(struct rdma_sq_sge))
184#define QEDR_MAX_SQE_ELEMENTS_PER_SQE	(ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
185					 QEDR_SQE_ELEMENT_SIZE)
186#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE	((RDMA_RING_PAGE_SIZE) / \
187					 QEDR_SQE_ELEMENT_SIZE)
188#define QEDR_MAX_SQE			((QEDR_MAX_SQ_PBL_ENTRIES) *\
189					 (RDMA_RING_PAGE_SIZE) / \
190					 (QEDR_SQE_ELEMENT_SIZE) /\
191					 (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
192/* RQ */
193#define QEDR_MAX_RQ_PBL			(0x2000)
194#define QEDR_MAX_RQ_PBL_ENTRIES		(0x10000 / sizeof(void *))
195#define QEDR_RQE_ELEMENT_SIZE		(sizeof(struct rdma_rq_sge))
196#define QEDR_MAX_RQE_ELEMENTS_PER_RQE	(RDMA_MAX_SGE_PER_RQ_WQE)
197#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE	((RDMA_RING_PAGE_SIZE) / \
198					 QEDR_RQE_ELEMENT_SIZE)
199#define QEDR_MAX_RQE			((QEDR_MAX_RQ_PBL_ENTRIES) *\
200					 (RDMA_RING_PAGE_SIZE) / \
201					 (QEDR_RQE_ELEMENT_SIZE) /\
202					 (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
203
204#define QEDR_CQE_SIZE	(sizeof(union rdma_cqe))
205#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
206#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
207				  sizeof(u64)) - 1)
208#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
209			     (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
210
211#define QEDR_ROCE_MAX_CNQ_SIZE		(0x4000)
212
213#define QEDR_MAX_PORT			(1)
214#define QEDR_PORT			(1)
215
216#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
217
218#define QEDR_ROCE_PKEY_MAX 1
219#define QEDR_ROCE_PKEY_TABLE_LEN 1
220#define QEDR_ROCE_PKEY_DEFAULT 0xffff
221
222struct qedr_pbl {
223	struct list_head list_entry;
224	void *va;
225	dma_addr_t pa;
226};
227
228struct qedr_ucontext {
229	struct ib_ucontext ibucontext;
230	struct qedr_dev *dev;
231	struct qedr_pd *pd;
232	void __iomem *dpi_addr;
 
233	u64 dpi_phys_addr;
234	u32 dpi_size;
235	u16 dpi;
 
 
 
236
237	struct list_head mm_head;
238
239	/* Lock to protect mm list */
240	struct mutex mm_list_lock;
241};
242
243union db_prod64 {
244	struct rdma_pwm_val32_data data;
245	u64 raw;
246};
247
248enum qedr_cq_type {
249	QEDR_CQ_TYPE_GSI,
250	QEDR_CQ_TYPE_KERNEL,
251	QEDR_CQ_TYPE_USER,
252};
253
254struct qedr_pbl_info {
255	u32 num_pbls;
256	u32 num_pbes;
257	u32 pbl_size;
258	u32 pbe_size;
259	bool two_layered;
260};
261
262struct qedr_userq {
263	struct ib_umem *umem;
264	struct qedr_pbl_info pbl_info;
265	struct qedr_pbl *pbl_tbl;
266	u64 buf_addr;
267	size_t buf_len;
 
 
 
 
 
 
 
268};
269
270struct qedr_cq {
271	struct ib_cq ibcq;
272
273	enum qedr_cq_type cq_type;
274	u32 sig;
275
276	u16 icid;
277
278	/* Lock to protect multiplem CQ's */
279	spinlock_t cq_lock;
280	u8 arm_flags;
281	struct qed_chain pbl;
282
283	void __iomem *db_addr;
284	union db_prod64 db;
285
286	u8 pbl_toggle;
287	union rdma_cqe *latest_cqe;
288	union rdma_cqe *toggle_cqe;
289
290	u32 cq_cons;
291
292	struct qedr_userq q;
293	u8 destroyed;
294	u16 cnq_notif;
295};
296
297struct qedr_pd {
298	struct ib_pd ibpd;
299	u32 pd_id;
300	struct qedr_ucontext *uctx;
301};
302
303struct qedr_mm {
304	struct {
305		u64 phy_addr;
306		unsigned long len;
307	} key;
308	struct list_head entry;
309};
310
311union db_prod32 {
312	struct rdma_pwm_val16_data data;
313	u32 raw;
314};
315
316struct qedr_qp_hwq_info {
317	/* WQE Elements */
318	struct qed_chain pbl;
319	u64 p_phys_addr_tbl;
320	u32 max_sges;
321
322	/* WQE */
323	u16 prod;
324	u16 cons;
325	u16 wqe_cons;
326	u16 gsi_cons;
327	u16 max_wr;
328
329	/* DB */
330	void __iomem *db;
331	union db_prod32 db_data;
332
333	void __iomem *iwarp_db2;
334	union db_prod32 iwarp_db2_data;
335};
336
337#define QEDR_INC_SW_IDX(p_info, index)					\
338	do {								\
339		p_info->index = (p_info->index + 1) &			\
340				qed_chain_get_capacity(p_info->pbl)	\
341	} while (0)
342
343struct qedr_srq_hwq_info {
344	u32 max_sges;
345	u32 max_wr;
346	struct qed_chain pbl;
347	u64 p_phys_addr_tbl;
348	u32 wqe_prod;
349	u32 sge_prod;
350	u32 wr_prod_cnt;
351	u32 wr_cons_cnt;
352	u32 num_elems;
353
354	u32 *virt_prod_pair_addr;
355	dma_addr_t phy_prod_pair_addr;
356};
357
358struct qedr_srq {
359	struct ib_srq ibsrq;
360	struct qedr_dev *dev;
361
362	struct qedr_userq	usrq;
363	struct qedr_srq_hwq_info hw_srq;
364	struct ib_umem *prod_umem;
365	u16 srq_id;
366	u32 srq_limit;
 
367	/* lock to protect srq recv post */
368	spinlock_t lock;
369};
370
371enum qedr_qp_err_bitmap {
372	QEDR_QP_ERR_SQ_FULL = 1,
373	QEDR_QP_ERR_RQ_FULL = 2,
374	QEDR_QP_ERR_BAD_SR = 4,
375	QEDR_QP_ERR_BAD_RR = 8,
376	QEDR_QP_ERR_SQ_PBL_FULL = 16,
377	QEDR_QP_ERR_RQ_PBL_FULL = 32,
378};
379
 
 
 
 
 
 
 
 
 
 
 
380struct qedr_qp {
381	struct ib_qp ibqp;	/* must be first */
382	struct qedr_dev *dev;
383	struct qedr_iw_ep *ep;
384	struct qedr_qp_hwq_info sq;
385	struct qedr_qp_hwq_info rq;
386
387	u32 max_inline_data;
388
389	/* Lock for QP's */
390	spinlock_t q_lock;
391	struct qedr_cq *sq_cq;
392	struct qedr_cq *rq_cq;
393	struct qedr_srq *srq;
394	enum qed_roce_qp_state state;
395	u32 id;
396	struct qedr_pd *pd;
397	enum ib_qp_type qp_type;
 
398	struct qed_rdma_qp *qed_qp;
399	u32 qp_id;
400	u16 icid;
401	u16 mtu;
402	int sgid_idx;
403	u32 rq_psn;
404	u32 sq_psn;
405	u32 qkey;
406	u32 dest_qp_num;
 
407
408	/* Relevant to qps created from kernel space only (ULPs) */
409	u8 prev_wqe_size;
410	u16 wqe_cons;
411	u32 err_bitmap;
412	bool signaled;
413
414	/* SQ shadow */
415	struct {
416		u64 wr_id;
417		enum ib_wc_opcode opcode;
418		u32 bytes_len;
419		u8 wqe_size;
420		bool signaled;
421		dma_addr_t icrc_mapping;
422		u32 *icrc;
423		struct qedr_mr *mr;
424	} *wqe_wr_id;
425
426	/* RQ shadow */
427	struct {
428		u64 wr_id;
429		struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
430		u8 wqe_size;
431
432		u8 smac[ETH_ALEN];
433		u16 vlan;
434		int rc;
435	} *rqe_wr_id;
436
437	/* Relevant to qps created from user space only (applications) */
438	struct qedr_userq usq;
439	struct qedr_userq urq;
440	atomic_t refcnt;
441	bool destroyed;
 
 
 
 
442};
443
444struct qedr_ah {
445	struct ib_ah ibah;
446	struct rdma_ah_attr attr;
447};
448
449enum qedr_mr_type {
450	QEDR_MR_USER,
451	QEDR_MR_KERNEL,
452	QEDR_MR_DMA,
453	QEDR_MR_FRMR,
454};
455
456struct mr_info {
457	struct qedr_pbl *pbl_table;
458	struct qedr_pbl_info pbl_info;
459	struct list_head free_pbl_list;
460	struct list_head inuse_pbl_list;
461	u32 completed;
462	u32 completed_handled;
463};
464
465struct qedr_mr {
466	struct ib_mr ibmr;
467	struct ib_umem *umem;
468
469	struct qed_rdma_register_tid_in_params hw_mr;
470	enum qedr_mr_type type;
471
472	struct qedr_dev *dev;
473	struct mr_info info;
474
475	u64 *pages;
476	u32 npages;
477};
478
 
 
 
 
 
 
 
 
 
 
 
 
479#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
480
481#define QEDR_RESP_IMM	(RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
482			 RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
483#define QEDR_RESP_RDMA	(RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
484			 RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
485#define QEDR_RESP_INV	(RDMA_CQE_RESPONDER_INV_FLG_MASK << \
486			 RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
487
488static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
489{
490	info->cons = (info->cons + 1) % info->max_wr;
491	info->wqe_cons++;
492}
493
494static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
495{
496	info->prod = (info->prod + 1) % info->max_wr;
497}
498
499static inline int qedr_get_dmac(struct qedr_dev *dev,
500				struct rdma_ah_attr *ah_attr, u8 *mac_addr)
501{
502	union ib_gid zero_sgid = { { 0 } };
503	struct in6_addr in6;
504	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
505	u8 *dmac;
506
507	if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
508		DP_ERR(dev, "Local port GID not supported\n");
509		eth_zero_addr(mac_addr);
510		return -EINVAL;
511	}
512
513	memcpy(&in6, grh->dgid.raw, sizeof(in6));
514	dmac = rdma_ah_retrieve_dmac(ah_attr);
515	if (!dmac)
516		return -EINVAL;
517	ether_addr_copy(mac_addr, dmac);
518
519	return 0;
520}
521
522struct qedr_iw_listener {
523	struct qedr_dev *dev;
524	struct iw_cm_id *cm_id;
525	int		backlog;
526	void		*qed_handle;
527};
528
529struct qedr_iw_ep {
530	struct qedr_dev	*dev;
531	struct iw_cm_id	*cm_id;
532	struct qedr_qp	*qp;
533	void		*qed_context;
534	u8		during_connect;
535};
536
537static inline
538struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
539{
540	return container_of(ibucontext, struct qedr_ucontext, ibucontext);
541}
542
543static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
544{
545	return container_of(ibdev, struct qedr_dev, ibdev);
546}
547
548static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
549{
550	return container_of(ibpd, struct qedr_pd, ibpd);
551}
552
 
 
 
 
 
553static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
554{
555	return container_of(ibcq, struct qedr_cq, ibcq);
556}
557
558static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
559{
560	return container_of(ibqp, struct qedr_qp, ibqp);
561}
562
563static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
564{
565	return container_of(ibah, struct qedr_ah, ibah);
566}
567
568static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
569{
570	return container_of(ibmr, struct qedr_mr, ibmr);
571}
572
573static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
574{
575	return container_of(ibsrq, struct qedr_srq, ibsrq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
576}
577#endif