Loading...
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_SRP_H
34#define IB_SRP_H
35
36#include <linux/types.h>
37#include <linux/list.h>
38#include <linux/mutex.h>
39#include <linux/scatterlist.h>
40
41#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h>
43
44#include <rdma/ib_verbs.h>
45#include <rdma/ib_sa.h>
46#include <rdma/ib_cm.h>
47#include <rdma/rdma_cm.h>
48
49enum {
50 SRP_PATH_REC_TIMEOUT_MS = 1000,
51 SRP_ABORT_TIMEOUT_MS = 5000,
52
53 SRP_PORT_REDIRECT = 1,
54 SRP_DLID_REDIRECT = 2,
55 SRP_STALE_CONN = 3,
56
57 SRP_DEF_SG_TABLESIZE = 12,
58
59 SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
60 SRP_RSP_SQ_SIZE = 1,
61 SRP_TSK_MGMT_SQ_SIZE = 1,
62 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
63 SRP_TSK_MGMT_SQ_SIZE,
64
65 SRP_MAX_PAGES_PER_MR = 512,
66
67 SRP_MAX_ADD_CDB_LEN = 16,
68
69 SRP_MAX_IMM_SGE = 2,
70 SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1,
71 /*
72 * Choose the immediate data offset such that a 32 byte CDB still fits.
73 */
74 SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) +
75 SRP_MAX_ADD_CDB_LEN +
76 sizeof(struct srp_imm_buf),
77};
78
79enum {
80 SRP_TAG_NO_REQ = ~0U,
81 SRP_TAG_TSK_MGMT = BIT(31),
82};
83
84enum srp_target_state {
85 SRP_TARGET_SCANNING,
86 SRP_TARGET_LIVE,
87 SRP_TARGET_REMOVED,
88};
89
90enum srp_iu_type {
91 SRP_IU_CMD,
92 SRP_IU_TSK_MGMT,
93 SRP_IU_RSP,
94};
95
96/*
97 * RDMA adapter in the initiator system.
98 *
99 * @dev_list: List of RDMA ports associated with this RDMA adapter (srp_host).
100 * @mr_page_mask: HCA memory registration page mask.
101 * @mr_page_size: HCA memory registration page size.
102 * @mr_max_size: Maximum size in bytes of a single FR registration request.
103 */
104struct srp_device {
105 struct list_head dev_list;
106 struct ib_device *dev;
107 struct ib_pd *pd;
108 u32 global_rkey;
109 u64 mr_page_mask;
110 int mr_page_size;
111 int mr_max_size;
112 int max_pages_per_mr;
113 bool has_fr;
114 bool use_fast_reg;
115};
116
117/*
118 * One port of an RDMA adapter in the initiator system.
119 *
120 * @target_list: List of connected target ports (struct srp_target_port).
121 * @target_lock: Protects @target_list.
122 */
123struct srp_host {
124 struct srp_device *srp_dev;
125 u32 port;
126 struct device dev;
127 struct list_head target_list;
128 spinlock_t target_lock;
129 struct list_head list;
130 struct mutex add_target_mutex;
131};
132
133struct srp_request {
134 struct scsi_cmnd *scmnd;
135 struct srp_iu *cmd;
136 struct srp_fr_desc **fr_list;
137 struct srp_direct_buf *indirect_desc;
138 dma_addr_t indirect_dma_addr;
139 short nmdesc;
140 struct ib_cqe reg_cqe;
141};
142
143/**
144 * struct srp_rdma_ch
145 * @comp_vector: Completion vector used by this RDMA channel.
146 * @max_it_iu_len: Maximum initiator-to-target information unit length.
147 * @max_ti_iu_len: Maximum target-to-initiator information unit length.
148 */
149struct srp_rdma_ch {
150 /* These are RW in the hot path, and commonly used together */
151 struct list_head free_tx;
152 spinlock_t lock;
153 s32 req_lim;
154
155 /* These are read-only in the hot path */
156 struct srp_target_port *target ____cacheline_aligned_in_smp;
157 struct ib_cq *send_cq;
158 struct ib_cq *recv_cq;
159 struct ib_qp *qp;
160 struct srp_fr_pool *fr_pool;
161 uint32_t max_it_iu_len;
162 uint32_t max_ti_iu_len;
163 u8 max_imm_sge;
164 bool use_imm_data;
165
166 /* Everything above this point is used in the hot path of
167 * command processing. Try to keep them packed into cachelines.
168 */
169
170 struct completion done;
171 int status;
172
173 union {
174 struct ib_cm {
175 struct sa_path_rec path;
176 struct ib_sa_query *path_query;
177 int path_query_id;
178 struct ib_cm_id *cm_id;
179 } ib_cm;
180 struct rdma_cm {
181 struct rdma_cm_id *cm_id;
182 } rdma_cm;
183 };
184
185 struct srp_iu **tx_ring;
186 struct srp_iu **rx_ring;
187 int comp_vector;
188
189 u64 tsk_mgmt_tag;
190 struct completion tsk_mgmt_done;
191 u8 tsk_mgmt_status;
192 bool connected;
193};
194
195/**
196 * struct srp_target_port - RDMA port in the SRP target system
197 * @comp_vector: Completion vector used by the first RDMA channel created for
198 * this target port.
199 */
200struct srp_target_port {
201 /* read and written in the hot path */
202 spinlock_t lock;
203
204 /* read only in the hot path */
205 u32 global_rkey;
206 struct srp_rdma_ch *ch;
207 struct net *net;
208 u32 ch_count;
209 u32 lkey;
210 enum srp_target_state state;
211 uint32_t max_it_iu_size;
212 unsigned int cmd_sg_cnt;
213 unsigned int indirect_size;
214 bool allow_ext_sg;
215
216 /* other member variables */
217 union ib_gid sgid;
218 __be64 id_ext;
219 __be64 ioc_guid;
220 __be64 initiator_ext;
221 u16 io_class;
222 struct srp_host *srp_host;
223 struct Scsi_Host *scsi_host;
224 struct srp_rport *rport;
225 char target_name[32];
226 unsigned int scsi_id;
227 unsigned int sg_tablesize;
228 unsigned int target_can_queue;
229 int mr_pool_size;
230 int mr_per_cmd;
231 int queue_size;
232 int comp_vector;
233 int tl_retry_count;
234
235 bool using_rdma_cm;
236
237 union {
238 struct {
239 __be64 service_id;
240 union ib_gid orig_dgid;
241 __be16 pkey;
242 } ib_cm;
243 struct {
244 union {
245 struct sockaddr_in ip4;
246 struct sockaddr_in6 ip6;
247 struct sockaddr sa;
248 struct sockaddr_storage ss;
249 } src;
250 union {
251 struct sockaddr_in ip4;
252 struct sockaddr_in6 ip6;
253 struct sockaddr sa;
254 struct sockaddr_storage ss;
255 } dst;
256 bool src_specified;
257 } rdma_cm;
258 };
259
260 u32 rq_tmo_jiffies;
261
262 int zero_req_lim;
263
264 struct work_struct tl_err_work;
265 struct work_struct remove_work;
266
267 struct list_head list;
268 bool qp_in_error;
269};
270
271struct srp_iu {
272 struct list_head list;
273 u64 dma;
274 void *buf;
275 size_t size;
276 enum dma_data_direction direction;
277 u32 num_sge;
278 struct ib_sge sge[SRP_MAX_SGE];
279 struct ib_cqe cqe;
280};
281
282/**
283 * struct srp_fr_desc - fast registration work request arguments
284 * @entry: Entry in srp_fr_pool.free_list.
285 * @mr: Memory region.
286 * @frpl: Fast registration page list.
287 */
288struct srp_fr_desc {
289 struct list_head entry;
290 struct ib_mr *mr;
291};
292
293/**
294 * struct srp_fr_pool - pool of fast registration descriptors
295 *
296 * An entry is available for allocation if and only if it occurs in @free_list.
297 *
298 * @size: Number of descriptors in this pool.
299 * @max_page_list_len: Maximum fast registration work request page list length.
300 * @lock: Protects free_list.
301 * @free_list: List of free descriptors.
302 * @desc: Fast registration descriptor pool.
303 */
304struct srp_fr_pool {
305 int size;
306 int max_page_list_len;
307 spinlock_t lock;
308 struct list_head free_list;
309 struct srp_fr_desc desc[] __counted_by(size);
310};
311
312/**
313 * struct srp_map_state - per-request DMA memory mapping state
314 * @desc: Pointer to the element of the SRP buffer descriptor array
315 * that is being filled in.
316 * @pages: Array with DMA addresses of pages being considered for
317 * memory registration.
318 * @base_dma_addr: DMA address of the first page that has not yet been mapped.
319 * @dma_len: Number of bytes that will be registered with the next FR
320 * memory registration call.
321 * @total_len: Total number of bytes in the sg-list being mapped.
322 * @npages: Number of page addresses in the pages[] array.
323 * @nmdesc: Number of FR memory descriptors used for mapping.
324 * @ndesc: Number of SRP buffer descriptors that have been filled in.
325 */
326struct srp_map_state {
327 union {
328 struct {
329 struct srp_fr_desc **next;
330 struct srp_fr_desc **end;
331 } fr;
332 struct {
333 void **next;
334 void **end;
335 } gen;
336 };
337 struct srp_direct_buf *desc;
338 union {
339 u64 *pages;
340 struct scatterlist *sg;
341 };
342 dma_addr_t base_dma_addr;
343 u32 dma_len;
344 u32 total_len;
345 unsigned int npages;
346 unsigned int nmdesc;
347 unsigned int ndesc;
348};
349
350#endif /* IB_SRP_H */
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_SRP_H
34#define IB_SRP_H
35
36#include <linux/types.h>
37#include <linux/list.h>
38#include <linux/mutex.h>
39#include <linux/scatterlist.h>
40
41#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h>
43
44#include <rdma/ib_verbs.h>
45#include <rdma/ib_sa.h>
46#include <rdma/ib_cm.h>
47#include <rdma/rdma_cm.h>
48
49enum {
50 SRP_PATH_REC_TIMEOUT_MS = 1000,
51 SRP_ABORT_TIMEOUT_MS = 5000,
52
53 SRP_PORT_REDIRECT = 1,
54 SRP_DLID_REDIRECT = 2,
55 SRP_STALE_CONN = 3,
56
57 SRP_DEF_SG_TABLESIZE = 12,
58
59 SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
60 SRP_RSP_SQ_SIZE = 1,
61 SRP_TSK_MGMT_SQ_SIZE = 1,
62 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
63 SRP_TSK_MGMT_SQ_SIZE,
64
65 SRP_TAG_NO_REQ = ~0U,
66 SRP_TAG_TSK_MGMT = 1U << 31,
67
68 SRP_MAX_PAGES_PER_MR = 512,
69
70 SRP_MAX_ADD_CDB_LEN = 16,
71
72 SRP_MAX_IMM_SGE = 2,
73 SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1,
74 /*
75 * Choose the immediate data offset such that a 32 byte CDB still fits.
76 */
77 SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) +
78 SRP_MAX_ADD_CDB_LEN +
79 sizeof(struct srp_imm_buf),
80};
81
82enum srp_target_state {
83 SRP_TARGET_SCANNING,
84 SRP_TARGET_LIVE,
85 SRP_TARGET_REMOVED,
86};
87
88enum srp_iu_type {
89 SRP_IU_CMD,
90 SRP_IU_TSK_MGMT,
91 SRP_IU_RSP,
92};
93
94/*
95 * @mr_page_mask: HCA memory registration page mask.
96 * @mr_page_size: HCA memory registration page size.
97 * @mr_max_size: Maximum size in bytes of a single FR registration request.
98 */
99struct srp_device {
100 struct list_head dev_list;
101 struct ib_device *dev;
102 struct ib_pd *pd;
103 u32 global_rkey;
104 u64 mr_page_mask;
105 int mr_page_size;
106 int mr_max_size;
107 int max_pages_per_mr;
108 bool has_fr;
109 bool use_fast_reg;
110};
111
112struct srp_host {
113 struct srp_device *srp_dev;
114 u8 port;
115 struct device dev;
116 struct list_head target_list;
117 spinlock_t target_lock;
118 struct completion released;
119 struct list_head list;
120 struct mutex add_target_mutex;
121};
122
123struct srp_request {
124 struct scsi_cmnd *scmnd;
125 struct srp_iu *cmd;
126 struct srp_fr_desc **fr_list;
127 struct srp_direct_buf *indirect_desc;
128 dma_addr_t indirect_dma_addr;
129 short nmdesc;
130 struct ib_cqe reg_cqe;
131};
132
133/**
134 * struct srp_rdma_ch
135 * @comp_vector: Completion vector used by this RDMA channel.
136 * @max_it_iu_len: Maximum initiator-to-target information unit length.
137 * @max_ti_iu_len: Maximum target-to-initiator information unit length.
138 */
139struct srp_rdma_ch {
140 /* These are RW in the hot path, and commonly used together */
141 struct list_head free_tx;
142 spinlock_t lock;
143 s32 req_lim;
144
145 /* These are read-only in the hot path */
146 struct srp_target_port *target ____cacheline_aligned_in_smp;
147 struct ib_cq *send_cq;
148 struct ib_cq *recv_cq;
149 struct ib_qp *qp;
150 struct srp_fr_pool *fr_pool;
151 uint32_t max_it_iu_len;
152 uint32_t max_ti_iu_len;
153 u8 max_imm_sge;
154 bool use_imm_data;
155
156 /* Everything above this point is used in the hot path of
157 * command processing. Try to keep them packed into cachelines.
158 */
159
160 struct completion done;
161 int status;
162
163 union {
164 struct ib_cm {
165 struct sa_path_rec path;
166 struct ib_sa_query *path_query;
167 int path_query_id;
168 struct ib_cm_id *cm_id;
169 } ib_cm;
170 struct rdma_cm {
171 struct rdma_cm_id *cm_id;
172 } rdma_cm;
173 };
174
175 struct srp_iu **tx_ring;
176 struct srp_iu **rx_ring;
177 struct srp_request *req_ring;
178 int comp_vector;
179
180 u64 tsk_mgmt_tag;
181 struct completion tsk_mgmt_done;
182 u8 tsk_mgmt_status;
183 bool connected;
184};
185
186/**
187 * struct srp_target_port
188 * @comp_vector: Completion vector used by the first RDMA channel created for
189 * this target port.
190 */
191struct srp_target_port {
192 /* read and written in the hot path */
193 spinlock_t lock;
194
195 /* read only in the hot path */
196 u32 global_rkey;
197 struct srp_rdma_ch *ch;
198 struct net *net;
199 u32 ch_count;
200 u32 lkey;
201 enum srp_target_state state;
202 uint32_t max_it_iu_size;
203 unsigned int cmd_sg_cnt;
204 unsigned int indirect_size;
205 bool allow_ext_sg;
206
207 /* other member variables */
208 union ib_gid sgid;
209 __be64 id_ext;
210 __be64 ioc_guid;
211 __be64 initiator_ext;
212 u16 io_class;
213 struct srp_host *srp_host;
214 struct Scsi_Host *scsi_host;
215 struct srp_rport *rport;
216 char target_name[32];
217 unsigned int scsi_id;
218 unsigned int sg_tablesize;
219 unsigned int target_can_queue;
220 int mr_pool_size;
221 int mr_per_cmd;
222 int queue_size;
223 int req_ring_size;
224 int comp_vector;
225 int tl_retry_count;
226
227 bool using_rdma_cm;
228
229 union {
230 struct {
231 __be64 service_id;
232 union ib_gid orig_dgid;
233 __be16 pkey;
234 } ib_cm;
235 struct {
236 union {
237 struct sockaddr_in ip4;
238 struct sockaddr_in6 ip6;
239 struct sockaddr sa;
240 struct sockaddr_storage ss;
241 } src;
242 union {
243 struct sockaddr_in ip4;
244 struct sockaddr_in6 ip6;
245 struct sockaddr sa;
246 struct sockaddr_storage ss;
247 } dst;
248 bool src_specified;
249 } rdma_cm;
250 };
251
252 u32 rq_tmo_jiffies;
253
254 int zero_req_lim;
255
256 struct work_struct tl_err_work;
257 struct work_struct remove_work;
258
259 struct list_head list;
260 bool qp_in_error;
261};
262
263struct srp_iu {
264 struct list_head list;
265 u64 dma;
266 void *buf;
267 size_t size;
268 enum dma_data_direction direction;
269 u32 num_sge;
270 struct ib_sge sge[SRP_MAX_SGE];
271 struct ib_cqe cqe;
272};
273
274/**
275 * struct srp_fr_desc - fast registration work request arguments
276 * @entry: Entry in srp_fr_pool.free_list.
277 * @mr: Memory region.
278 * @frpl: Fast registration page list.
279 */
280struct srp_fr_desc {
281 struct list_head entry;
282 struct ib_mr *mr;
283};
284
285/**
286 * struct srp_fr_pool - pool of fast registration descriptors
287 *
288 * An entry is available for allocation if and only if it occurs in @free_list.
289 *
290 * @size: Number of descriptors in this pool.
291 * @max_page_list_len: Maximum fast registration work request page list length.
292 * @lock: Protects free_list.
293 * @free_list: List of free descriptors.
294 * @desc: Fast registration descriptor pool.
295 */
296struct srp_fr_pool {
297 int size;
298 int max_page_list_len;
299 spinlock_t lock;
300 struct list_head free_list;
301 struct srp_fr_desc desc[];
302};
303
304/**
305 * struct srp_map_state - per-request DMA memory mapping state
306 * @desc: Pointer to the element of the SRP buffer descriptor array
307 * that is being filled in.
308 * @pages: Array with DMA addresses of pages being considered for
309 * memory registration.
310 * @base_dma_addr: DMA address of the first page that has not yet been mapped.
311 * @dma_len: Number of bytes that will be registered with the next FR
312 * memory registration call.
313 * @total_len: Total number of bytes in the sg-list being mapped.
314 * @npages: Number of page addresses in the pages[] array.
315 * @nmdesc: Number of FR memory descriptors used for mapping.
316 * @ndesc: Number of SRP buffer descriptors that have been filled in.
317 */
318struct srp_map_state {
319 union {
320 struct {
321 struct srp_fr_desc **next;
322 struct srp_fr_desc **end;
323 } fr;
324 struct {
325 void **next;
326 void **end;
327 } gen;
328 };
329 struct srp_direct_buf *desc;
330 union {
331 u64 *pages;
332 struct scatterlist *sg;
333 };
334 dma_addr_t base_dma_addr;
335 u32 dma_len;
336 u32 total_len;
337 unsigned int npages;
338 unsigned int nmdesc;
339 unsigned int ndesc;
340};
341
342#endif /* IB_SRP_H */