Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#ifndef IB_SRP_H
 34#define IB_SRP_H
 35
 36#include <linux/types.h>
 37#include <linux/list.h>
 38#include <linux/mutex.h>
 39#include <linux/scatterlist.h>
 40
 41#include <scsi/scsi_host.h>
 42#include <scsi/scsi_cmnd.h>
 43
 44#include <rdma/ib_verbs.h>
 45#include <rdma/ib_sa.h>
 46#include <rdma/ib_cm.h>
 47#include <rdma/rdma_cm.h>
 48
 49enum {
 50	SRP_PATH_REC_TIMEOUT_MS	= 1000,
 51	SRP_ABORT_TIMEOUT_MS	= 5000,
 52
 53	SRP_PORT_REDIRECT	= 1,
 54	SRP_DLID_REDIRECT	= 2,
 55	SRP_STALE_CONN		= 3,
 56
 57	SRP_DEF_SG_TABLESIZE	= 12,
 58
 59	SRP_DEFAULT_QUEUE_SIZE	= 1 << 6,
 60	SRP_RSP_SQ_SIZE		= 1,
 61	SRP_TSK_MGMT_SQ_SIZE	= 1,
 62	SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
 63				  SRP_TSK_MGMT_SQ_SIZE,
 64
 65	SRP_MAX_PAGES_PER_MR	= 512,
 66
 67	SRP_MAX_ADD_CDB_LEN	= 16,
 68
 69	SRP_MAX_IMM_SGE		= 2,
 70	SRP_MAX_SGE		= SRP_MAX_IMM_SGE + 1,
 71	/*
 72	 * Choose the immediate data offset such that a 32 byte CDB still fits.
 73	 */
 74	SRP_IMM_DATA_OFFSET	= sizeof(struct srp_cmd) +
 75				  SRP_MAX_ADD_CDB_LEN +
 76				  sizeof(struct srp_imm_buf),
 77};
 78
 79enum {
 80	SRP_TAG_NO_REQ		= ~0U,
 81	SRP_TAG_TSK_MGMT	= BIT(31),
 82};
 83
 84enum srp_target_state {
 85	SRP_TARGET_SCANNING,
 86	SRP_TARGET_LIVE,
 87	SRP_TARGET_REMOVED,
 88};
 89
 90enum srp_iu_type {
 91	SRP_IU_CMD,
 92	SRP_IU_TSK_MGMT,
 93	SRP_IU_RSP,
 94};
 95
 96/*
 97 * RDMA adapter in the initiator system.
 98 *
 99 * @dev_list: List of RDMA ports associated with this RDMA adapter (srp_host).
100 * @mr_page_mask: HCA memory registration page mask.
101 * @mr_page_size: HCA memory registration page size.
102 * @mr_max_size: Maximum size in bytes of a single FR registration request.
103 */
104struct srp_device {
105	struct list_head	dev_list;
106	struct ib_device       *dev;
107	struct ib_pd	       *pd;
108	u32			global_rkey;
109	u64			mr_page_mask;
110	int			mr_page_size;
111	int			mr_max_size;
112	int			max_pages_per_mr;
113	bool			has_fr;
114	bool			use_fast_reg;
115};
116
117/*
118 * One port of an RDMA adapter in the initiator system.
119 *
120 * @target_list: List of connected target ports (struct srp_target_port).
121 * @target_lock: Protects @target_list.
122 */
123struct srp_host {
124	struct srp_device      *srp_dev;
125	u32			port;
126	struct device		dev;
127	struct list_head	target_list;
128	spinlock_t		target_lock;
129	struct list_head	list;
130	struct mutex		add_target_mutex;
131};
132
133struct srp_request {
134	struct scsi_cmnd       *scmnd;
135	struct srp_iu	       *cmd;
136	struct srp_fr_desc     **fr_list;
137	struct srp_direct_buf  *indirect_desc;
138	dma_addr_t		indirect_dma_addr;
139	short			nmdesc;
140	struct ib_cqe		reg_cqe;
141};
142
143/**
144 * struct srp_rdma_ch
145 * @comp_vector: Completion vector used by this RDMA channel.
146 * @max_it_iu_len: Maximum initiator-to-target information unit length.
147 * @max_ti_iu_len: Maximum target-to-initiator information unit length.
148 */
149struct srp_rdma_ch {
150	/* These are RW in the hot path, and commonly used together */
151	struct list_head	free_tx;
152	spinlock_t		lock;
153	s32			req_lim;
154
155	/* These are read-only in the hot path */
156	struct srp_target_port *target ____cacheline_aligned_in_smp;
157	struct ib_cq	       *send_cq;
158	struct ib_cq	       *recv_cq;
159	struct ib_qp	       *qp;
160	struct srp_fr_pool     *fr_pool;
161	uint32_t		max_it_iu_len;
162	uint32_t		max_ti_iu_len;
163	u8			max_imm_sge;
164	bool			use_imm_data;
165
166	/* Everything above this point is used in the hot path of
167	 * command processing. Try to keep them packed into cachelines.
168	 */
169
170	struct completion	done;
171	int			status;
172
173	union {
174		struct ib_cm {
175			struct sa_path_rec	path;
176			struct ib_sa_query	*path_query;
177			int			path_query_id;
178			struct ib_cm_id		*cm_id;
179		} ib_cm;
180		struct rdma_cm {
181			struct rdma_cm_id	*cm_id;
182		} rdma_cm;
183	};
184
185	struct srp_iu	      **tx_ring;
186	struct srp_iu	      **rx_ring;
187	int			comp_vector;
188
189	u64			tsk_mgmt_tag;
190	struct completion	tsk_mgmt_done;
191	u8			tsk_mgmt_status;
192	bool			connected;
193};
194
195/**
196 * struct srp_target_port - RDMA port in the SRP target system
197 * @comp_vector: Completion vector used by the first RDMA channel created for
198 *   this target port.
199 */
200struct srp_target_port {
201	/* read and written in the hot path */
202	spinlock_t		lock;
203
204	/* read only in the hot path */
205	u32			global_rkey;
206	struct srp_rdma_ch	*ch;
207	struct net		*net;
208	u32			ch_count;
209	u32			lkey;
210	enum srp_target_state	state;
211	uint32_t		max_it_iu_size;
212	unsigned int		cmd_sg_cnt;
213	unsigned int		indirect_size;
214	bool			allow_ext_sg;
215
216	/* other member variables */
217	union ib_gid		sgid;
218	__be64			id_ext;
219	__be64			ioc_guid;
220	__be64			initiator_ext;
221	u16			io_class;
222	struct srp_host	       *srp_host;
223	struct Scsi_Host       *scsi_host;
224	struct srp_rport       *rport;
225	char			target_name[32];
226	unsigned int		scsi_id;
227	unsigned int		sg_tablesize;
228	unsigned int		target_can_queue;
229	int			mr_pool_size;
230	int			mr_per_cmd;
231	int			queue_size;
232	int			comp_vector;
233	int			tl_retry_count;
234
235	bool			using_rdma_cm;
236
237	union {
238		struct {
239			__be64			service_id;
240			union ib_gid		orig_dgid;
241			__be16			pkey;
242		} ib_cm;
243		struct {
244			union {
245				struct sockaddr_in	ip4;
246				struct sockaddr_in6	ip6;
247				struct sockaddr		sa;
248				struct sockaddr_storage ss;
249			} src;
250			union {
251				struct sockaddr_in	ip4;
252				struct sockaddr_in6	ip6;
253				struct sockaddr		sa;
254				struct sockaddr_storage ss;
255			} dst;
256			bool src_specified;
257		} rdma_cm;
258	};
259
260	u32			rq_tmo_jiffies;
261
262	int			zero_req_lim;
263
264	struct work_struct	tl_err_work;
265	struct work_struct	remove_work;
266
267	struct list_head	list;
268	bool			qp_in_error;
269};
270
271struct srp_iu {
272	struct list_head	list;
273	u64			dma;
274	void		       *buf;
275	size_t			size;
276	enum dma_data_direction	direction;
277	u32			num_sge;
278	struct ib_sge		sge[SRP_MAX_SGE];
279	struct ib_cqe		cqe;
280};
281
282/**
283 * struct srp_fr_desc - fast registration work request arguments
284 * @entry: Entry in srp_fr_pool.free_list.
285 * @mr:    Memory region.
286 * @frpl:  Fast registration page list.
287 */
288struct srp_fr_desc {
289	struct list_head		entry;
290	struct ib_mr			*mr;
291};
292
293/**
294 * struct srp_fr_pool - pool of fast registration descriptors
295 *
296 * An entry is available for allocation if and only if it occurs in @free_list.
297 *
298 * @size:      Number of descriptors in this pool.
299 * @max_page_list_len: Maximum fast registration work request page list length.
300 * @lock:      Protects free_list.
301 * @free_list: List of free descriptors.
302 * @desc:      Fast registration descriptor pool.
303 */
304struct srp_fr_pool {
305	int			size;
306	int			max_page_list_len;
307	spinlock_t		lock;
308	struct list_head	free_list;
309	struct srp_fr_desc	desc[] __counted_by(size);
310};
311
312/**
313 * struct srp_map_state - per-request DMA memory mapping state
314 * @desc:	    Pointer to the element of the SRP buffer descriptor array
315 *		    that is being filled in.
316 * @pages:	    Array with DMA addresses of pages being considered for
317 *		    memory registration.
318 * @base_dma_addr:  DMA address of the first page that has not yet been mapped.
319 * @dma_len:	    Number of bytes that will be registered with the next FR
320 *                  memory registration call.
321 * @total_len:	    Total number of bytes in the sg-list being mapped.
322 * @npages:	    Number of page addresses in the pages[] array.
323 * @nmdesc:	    Number of FR memory descriptors used for mapping.
324 * @ndesc:	    Number of SRP buffer descriptors that have been filled in.
325 */
326struct srp_map_state {
327	union {
328		struct {
329			struct srp_fr_desc **next;
330			struct srp_fr_desc **end;
331		} fr;
332		struct {
333			void		   **next;
334			void		   **end;
335		} gen;
336	};
337	struct srp_direct_buf  *desc;
338	union {
339		u64			*pages;
340		struct scatterlist	*sg;
341	};
342	dma_addr_t		base_dma_addr;
343	u32			dma_len;
344	u32			total_len;
345	unsigned int		npages;
346	unsigned int		nmdesc;
347	unsigned int		ndesc;
348};
349
350#endif /* IB_SRP_H */