Linux Audio

Check our new training course

Loading...
  1/*
  2 * Broadcom NetXtreme-E RoCE driver.
  3 *
  4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
  5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the
 11 * BSD license below:
 12 *
 13 * Redistribution and use in source and binary forms, with or without
 14 * modification, are permitted provided that the following conditions
 15 * are met:
 16 *
 17 * 1. Redistributions of source code must retain the above copyright
 18 *    notice, this list of conditions and the following disclaimer.
 19 * 2. Redistributions in binary form must reproduce the above copyright
 20 *    notice, this list of conditions and the following disclaimer in
 21 *    the documentation and/or other materials provided with the
 22 *    distribution.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 35 *
 36 * Description: IB Verbs interpreter (header)
 37 */
 38
 39#ifndef __BNXT_RE_IB_VERBS_H__
 40#define __BNXT_RE_IB_VERBS_H__
 41
 42struct bnxt_re_gid_ctx {
 43	u32			idx;
 44	u32			refcnt;
 45};
 46
 47#define BNXT_RE_FENCE_BYTES	64
 48struct bnxt_re_fence_data {
 49	u32 size;
 50	u8 va[BNXT_RE_FENCE_BYTES];
 51	dma_addr_t dma_addr;
 52	struct bnxt_re_mr *mr;
 53	struct ib_mw *mw;
 54	struct bnxt_qplib_swqe bind_wqe;
 55	u32 bind_rkey;
 56};
 57
 58struct bnxt_re_pd {
 59	struct ib_pd            ib_pd;
 60	struct bnxt_re_dev	*rdev;
 61	struct bnxt_qplib_pd	qplib_pd;
 62	struct bnxt_re_fence_data fence;
 63	struct rdma_user_mmap_entry *pd_db_mmap;
 64	struct rdma_user_mmap_entry *pd_wcdb_mmap;
 65};
 66
 67struct bnxt_re_ah {
 68	struct ib_ah		ib_ah;
 69	struct bnxt_re_dev	*rdev;
 70	struct bnxt_qplib_ah	qplib_ah;
 71};
 72
 73struct bnxt_re_srq {
 74	struct ib_srq		ib_srq;
 75	struct bnxt_re_dev	*rdev;
 76	u32			srq_limit;
 77	struct bnxt_qplib_srq	qplib_srq;
 78	struct ib_umem		*umem;
 79	spinlock_t		lock;		/* protect srq */
 80};
 81
 82struct bnxt_re_qp {
 83	struct ib_qp		ib_qp;
 84	struct list_head	list;
 85	struct bnxt_re_dev	*rdev;
 86	spinlock_t		sq_lock;	/* protect sq */
 87	spinlock_t		rq_lock;	/* protect rq */
 88	struct bnxt_qplib_qp	qplib_qp;
 89	struct ib_umem		*sumem;
 90	struct ib_umem		*rumem;
 91	/* QP1 */
 92	u32			send_psn;
 93	struct ib_ud_header	qp1_hdr;
 94	struct bnxt_re_cq	*scq;
 95	struct bnxt_re_cq	*rcq;
 96};
 97
 98struct bnxt_re_cq {
 99	struct ib_cq		ib_cq;
100	struct bnxt_re_dev	*rdev;
101	spinlock_t              cq_lock;	/* protect cq */
102	u16			cq_count;
103	u16			cq_period;
104	struct bnxt_qplib_cq	qplib_cq;
105	struct bnxt_qplib_cqe	*cql;
106#define MAX_CQL_PER_POLL	1024
107	u32			max_cql;
108	struct ib_umem		*umem;
109	struct ib_umem		*resize_umem;
110	int			resize_cqe;
111	void			*uctx_cq_page;
112	struct hlist_node	hash_entry;
113};
114
115struct bnxt_re_mr {
116	struct bnxt_re_dev	*rdev;
117	struct ib_mr		ib_mr;
118	struct ib_umem		*ib_umem;
119	struct bnxt_qplib_mrw	qplib_mr;
120	u32			npages;
121	u64			*pages;
122	struct bnxt_qplib_frpl	qplib_frpl;
123};
124
125struct bnxt_re_frpl {
126	struct bnxt_re_dev		*rdev;
127	struct bnxt_qplib_frpl		qplib_frpl;
128	u64				*page_list;
129};
130
131struct bnxt_re_mw {
132	struct bnxt_re_dev	*rdev;
133	struct ib_mw		ib_mw;
134	struct bnxt_qplib_mrw	qplib_mw;
135};
136
137struct bnxt_re_ucontext {
138	struct ib_ucontext      ib_uctx;
139	struct bnxt_re_dev	*rdev;
140	struct bnxt_qplib_dpi	dpi;
141	struct bnxt_qplib_dpi   wcdpi;
142	void			*shpg;
143	spinlock_t		sh_lock;	/* protect shpg */
144	struct rdma_user_mmap_entry *shpage_mmap;
145	u64 cmask;
146};
147
148enum bnxt_re_mmap_flag {
149	BNXT_RE_MMAP_SH_PAGE,
150	BNXT_RE_MMAP_UC_DB,
151	BNXT_RE_MMAP_WC_DB,
152	BNXT_RE_MMAP_DBR_PAGE,
153	BNXT_RE_MMAP_DBR_BAR,
154	BNXT_RE_MMAP_TOGGLE_PAGE,
155};
156
157struct bnxt_re_user_mmap_entry {
158	struct rdma_user_mmap_entry rdma_entry;
159	struct bnxt_re_ucontext *uctx;
160	u64 mem_offset;
161	u8 mmap_flag;
162};
163
164static inline u16 bnxt_re_get_swqe_size(int nsge)
165{
166	return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
167}
168
169static inline u16 bnxt_re_get_rwqe_size(int nsge)
170{
171	return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
172}
173
174static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
175{
176	return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
177		ent : roundup_pow_of_two(ent) : ent;
178}
179
180int bnxt_re_query_device(struct ib_device *ibdev,
181			 struct ib_device_attr *ib_attr,
182			 struct ib_udata *udata);
183int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
184		       struct ib_port_attr *port_attr);
185int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
186			       struct ib_port_immutable *immutable);
187void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
188int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
189		       u16 index, u16 *pkey);
190int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
191int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
192int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
193		      int index, union ib_gid *gid);
194enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
195					    u32 port_num);
196int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
197int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
198int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
199		      struct ib_udata *udata);
200int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
201int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
202int bnxt_re_create_srq(struct ib_srq *srq,
203		       struct ib_srq_init_attr *srq_init_attr,
204		       struct ib_udata *udata);
205int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
206		       enum ib_srq_attr_mask srq_attr_mask,
207		       struct ib_udata *udata);
208int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
209int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
210int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
211			  const struct ib_recv_wr **bad_recv_wr);
212int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
213		      struct ib_udata *udata);
214int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
215		      int qp_attr_mask, struct ib_udata *udata);
216int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
217		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
218int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
219int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
220		      const struct ib_send_wr **bad_send_wr);
221int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
222		      const struct ib_recv_wr **bad_recv_wr);
223int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
224		      struct ib_udata *udata);
225int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
226int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
227int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
228int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
229struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
230
231int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
232		      unsigned int *sg_offset);
233struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
234			       u32 max_num_sg);
235int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
236struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
237			       struct ib_udata *udata);
238int bnxt_re_dealloc_mw(struct ib_mw *mw);
239struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
240				  u64 virt_addr, int mr_access_flags,
241				  struct ib_udata *udata);
242struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
243					 u64 length, u64 virt_addr,
244					 int fd, int mr_access_flags,
245					 struct ib_udata *udata);
246int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
247void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
248int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
249void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
250
251
252unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
253void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
254#endif /* __BNXT_RE_IB_VERBS_H__ */