Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *	- Redistributions of source code must retain the above
 16 *	  copyright notice, this list of conditions and the following
 17 *	  disclaimer.
 18 *
 19 *	- Redistributions in binary form must reproduce the above
 20 *	  copyright notice, this list of conditions and the following
 21 *	  disclaimer in the documentation and/or other materials
 22 *	  provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <linux/mm.h>
 36#include <linux/highmem.h>
 37#include <linux/scatterlist.h>
 38
 39#include "iscsi_iser.h"
 40
 41void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42{
 43	iser_err_comp(wc, "memreg");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44}
 45
 46static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47{
 48	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
 49	struct iser_fr_desc *desc;
 50	unsigned long flags;
 51
 52	spin_lock_irqsave(&fr_pool->lock, flags);
 53	desc = list_first_entry(&fr_pool->list,
 54				struct iser_fr_desc, list);
 55	list_del(&desc->list);
 56	spin_unlock_irqrestore(&fr_pool->lock, flags);
 
 
 
 
 
 
 57
 58	return desc;
 
 
 
 59}
 60
 61static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
 62				 struct iser_fr_desc *desc)
 
 63{
 64	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
 65	unsigned long flags;
 66
 67	spin_lock_irqsave(&fr_pool->lock, flags);
 68	list_add(&desc->list, &fr_pool->list);
 69	spin_unlock_irqrestore(&fr_pool->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70}
 71
 72int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 73			   enum iser_data_dir iser_dir,
 74			   enum dma_data_direction dma_dir)
 
 75{
 76	struct iser_data_buf *data = &iser_task->data[iser_dir];
 77	struct ib_device *dev;
 78
 79	iser_task->dir[iser_dir] = 1;
 80	dev = iser_task->iser_conn->ib_conn.device->ib_device;
 81
 82	data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
 83	if (unlikely(data->dma_nents == 0)) {
 84		iser_err("dma_map_sg failed!!!\n");
 85		return -EINVAL;
 86	}
 87
 88	if (scsi_prot_sg_count(iser_task->sc)) {
 89		struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
 90
 91		pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
 92		if (unlikely(pdata->dma_nents == 0)) {
 93			iser_err("protection dma_map_sg failed!!!\n");
 94			goto out_unmap;
 95		}
 96	}
 97
 98	return 0;
 99
100out_unmap:
101	ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
102	return -EINVAL;
103}
104
105
106void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
107			      enum iser_data_dir iser_dir,
108			      enum dma_data_direction dma_dir)
109{
110	struct iser_data_buf *data = &iser_task->data[iser_dir];
111	struct ib_device *dev;
112
113	dev = iser_task->iser_conn->ib_conn.device->ib_device;
114	ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
 
 
 
 
 
 
 
 
 
 
115
116	if (scsi_prot_sg_count(iser_task->sc)) {
117		struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
 
118
119		ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
120	}
 
 
 
 
 
 
 
 
 
 
121}
122
123static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
124			struct iser_mem_reg *reg)
125{
126	struct scatterlist *sg = mem->sg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
128	reg->sge.lkey = device->pd->local_dma_lkey;
129	/*
130	 * FIXME: rework the registration code path to differentiate
131	 * rkey/lkey use cases
132	 */
133
134	if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
135		reg->rkey = device->pd->unsafe_global_rkey;
136	else
137		reg->rkey = 0;
138	reg->sge.addr = sg_dma_address(&sg[0]);
139	reg->sge.length = sg_dma_len(&sg[0]);
140
141	iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
142		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
143		 reg->sge.addr, reg->sge.length);
 
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145	return 0;
146}
147
148void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
149			    enum iser_data_dir cmd_dir)
150{
151	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
152	struct iser_fr_desc *desc;
153	struct ib_mr_status mr_status;
154
155	desc = reg->desc;
156	if (!desc)
157		return;
158
159	/*
160	 * The signature MR cannot be invalidated and reused without checking.
161	 * libiscsi calls the check_protection transport handler only if
162	 * SCSI-Response is received. And the signature MR is not checked if
163	 * the task is completed for some other reason like a timeout or error
164	 * handling. That's why we must check the signature MR here before
165	 * putting it to the free pool.
166	 */
167	if (unlikely(desc->sig_protected)) {
168		desc->sig_protected = false;
169		ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
170				   &mr_status);
171	}
172	iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
173	reg->desc = NULL;
174}
175
176static void iser_set_dif_domain(struct scsi_cmnd *sc,
177				struct ib_sig_domain *domain)
178{
179	domain->sig_type = IB_SIG_TYPE_T10_DIF;
180	domain->sig.dif.pi_interval = scsi_prot_interval(sc);
181	domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
182	/*
183	 * At the moment we hard code those, but in the future
184	 * we will take them from sc.
185	 */
186	domain->sig.dif.apptag_check_mask = 0xffff;
187	domain->sig.dif.app_escape = true;
188	domain->sig.dif.ref_escape = true;
189	if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
190		domain->sig.dif.ref_remap = true;
191}
192
193static int iser_set_sig_attrs(struct scsi_cmnd *sc,
194			      struct ib_sig_attrs *sig_attrs)
 
195{
 
 
 
 
 
 
 
196	switch (scsi_get_prot_op(sc)) {
197	case SCSI_PROT_WRITE_INSERT:
198	case SCSI_PROT_READ_STRIP:
199		sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
200		iser_set_dif_domain(sc, &sig_attrs->wire);
201		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
 
 
202		break;
203	case SCSI_PROT_READ_INSERT:
204	case SCSI_PROT_WRITE_STRIP:
205		sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
206		iser_set_dif_domain(sc, &sig_attrs->mem);
207		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
208						IB_T10DIF_CSUM : IB_T10DIF_CRC;
 
209		break;
210	case SCSI_PROT_READ_PASS:
211	case SCSI_PROT_WRITE_PASS:
212		iser_set_dif_domain(sc, &sig_attrs->wire);
 
 
 
 
213		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
214		iser_set_dif_domain(sc, &sig_attrs->mem);
215		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
216						IB_T10DIF_CSUM : IB_T10DIF_CRC;
217		break;
218	default:
219		iser_err("Unsupported PI operation %d\n",
220			 scsi_get_prot_op(sc));
221		return -EINVAL;
222	}
223
224	return 0;
225}
226
227static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
 
 
228{
229	*mask = 0;
230	if (sc->prot_flags & SCSI_PROT_REF_CHECK)
231		*mask |= IB_SIG_CHECK_REFTAG;
232	if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
233		*mask |= IB_SIG_CHECK_GUARD;
234}
235
236static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
237				 struct ib_cqe *cqe, struct ib_send_wr *next_wr)
238{
239	inv_wr->opcode = IB_WR_LOCAL_INV;
240	inv_wr->wr_cqe = cqe;
241	inv_wr->ex.invalidate_rkey = mr->rkey;
242	inv_wr->send_flags = 0;
243	inv_wr->num_sge = 0;
244	inv_wr->next = next_wr;
245}
246
247static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
248			   struct iser_data_buf *mem,
249			   struct iser_data_buf *sig_mem,
250			   struct iser_reg_resources *rsc,
251			   struct iser_mem_reg *sig_reg)
252{
253	struct iser_tx_desc *tx_desc = &iser_task->desc;
254	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
255	struct ib_mr *mr = rsc->sig_mr;
256	struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
257	struct ib_reg_wr *wr = &tx_desc->reg_wr;
 
258	int ret;
 
259
260	memset(sig_attrs, 0, sizeof(*sig_attrs));
261	ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
262	if (ret)
263		goto err;
264
265	iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
 
 
266
267	if (rsc->sig_mr->need_inval)
268		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
270	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
 
 
 
271
272	ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
273			      sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
274	if (unlikely(ret)) {
275		iser_err("failed to map PI sg (%d)\n",
276			 mem->dma_nents + sig_mem->dma_nents);
277		goto err;
278	}
 
279
280	memset(wr, 0, sizeof(*wr));
281	wr->wr.next = &tx_desc->send_wr;
282	wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
283	wr->wr.wr_cqe = cqe;
284	wr->wr.num_sge = 0;
285	wr->wr.send_flags = 0;
286	wr->mr = mr;
287	wr->key = mr->rkey;
288	wr->access = IB_ACCESS_LOCAL_WRITE |
289		     IB_ACCESS_REMOTE_READ |
290		     IB_ACCESS_REMOTE_WRITE;
291	rsc->sig_mr->need_inval = true;
292
293	sig_reg->sge.lkey = mr->lkey;
294	sig_reg->rkey = mr->rkey;
295	sig_reg->sge.addr = mr->iova;
296	sig_reg->sge.length = mr->length;
297
298	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
299		 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
300		 sig_reg->sge.length);
301err:
302	return ret;
303}
304
305static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
 
306			    struct iser_data_buf *mem,
307			    struct iser_reg_resources *rsc,
308			    struct iser_mem_reg *reg)
309{
310	struct iser_tx_desc *tx_desc = &iser_task->desc;
311	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
312	struct ib_mr *mr = rsc->mr;
313	struct ib_reg_wr *wr = &tx_desc->reg_wr;
314	int n;
315
316	if (rsc->mr->need_inval)
317		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
318
319	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
320
321	n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
322	if (unlikely(n != mem->dma_nents)) {
323		iser_err("failed to map sg (%d/%d)\n",
324			 n, mem->dma_nents);
325		return n < 0 ? n : -EINVAL;
326	}
327
328	wr->wr.next = &tx_desc->send_wr;
329	wr->wr.opcode = IB_WR_REG_MR;
330	wr->wr.wr_cqe = cqe;
331	wr->wr.send_flags = 0;
332	wr->wr.num_sge = 0;
333	wr->mr = mr;
334	wr->key = mr->rkey;
335	wr->access = IB_ACCESS_LOCAL_WRITE  |
336		     IB_ACCESS_REMOTE_WRITE |
337		     IB_ACCESS_REMOTE_READ;
338
339	rsc->mr->need_inval = true;
340
341	reg->sge.lkey = mr->lkey;
342	reg->rkey = mr->rkey;
343	reg->sge.addr = mr->iova;
344	reg->sge.length = mr->length;
345
346	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
347		 reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
 
 
 
 
 
348
349	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350}
351
352int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
353			 enum iser_data_dir dir,
354			 bool all_imm)
 
 
 
 
 
355{
356	struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
357	struct iser_device *device = ib_conn->device;
358	struct iser_data_buf *mem = &task->data[dir];
359	struct iser_mem_reg *reg = &task->rdma_reg[dir];
360	struct iser_fr_desc *desc;
361	bool use_dma_key;
362	int err;
 
 
363
364	use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
365		      scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
366	if (use_dma_key)
367		return iser_reg_dma(device, mem, reg);
368
369	desc = iser_reg_desc_get_fr(ib_conn);
370	if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
371		err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
372		if (unlikely(err))
373			goto err_reg;
374	} else {
375		err = iser_reg_sig_mr(task, mem, &task->prot[dir],
376				      &desc->rsc, reg);
377		if (unlikely(err))
378			goto err_reg;
379
380		desc->sig_protected = true;
 
 
 
 
 
 
 
381	}
382
383	reg->desc = desc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
385	return 0;
 
 
 
 
 
 
386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387err_reg:
388	iser_reg_desc_put_fr(ib_conn, desc);
 
 
 
 
389
390	return err;
391}
v3.15
  1/*
  2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *	- Redistributions of source code must retain the above
 16 *	  copyright notice, this list of conditions and the following
 17 *	  disclaimer.
 18 *
 19 *	- Redistributions in binary form must reproduce the above
 20 *	  copyright notice, this list of conditions and the following
 21 *	  disclaimer in the documentation and/or other materials
 22 *	  provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33#include <linux/module.h>
 34#include <linux/kernel.h>
 35#include <linux/slab.h>
 36#include <linux/mm.h>
 37#include <linux/highmem.h>
 38#include <linux/scatterlist.h>
 39
 40#include "iscsi_iser.h"
 41
 42#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
 43
 44/**
 45 * iser_start_rdma_unaligned_sg
 46 */
 47static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
 48					struct iser_data_buf *data,
 49					struct iser_data_buf *data_copy,
 50					enum iser_data_dir cmd_dir)
 51{
 52	struct ib_device *dev = iser_task->ib_conn->device->ib_device;
 53	struct scatterlist *sgl = (struct scatterlist *)data->buf;
 54	struct scatterlist *sg;
 55	char *mem = NULL;
 56	unsigned long  cmd_data_len = 0;
 57	int dma_nents, i;
 58
 59	for_each_sg(sgl, sg, data->size, i)
 60		cmd_data_len += ib_sg_dma_len(dev, sg);
 61
 62	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
 63		mem = (void *)__get_free_pages(GFP_ATOMIC,
 64		      ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
 65	else
 66		mem = kmalloc(cmd_data_len, GFP_ATOMIC);
 67
 68	if (mem == NULL) {
 69		iser_err("Failed to allocate mem size %d %d for copying sglist\n",
 70			 data->size, (int)cmd_data_len);
 71		return -ENOMEM;
 72	}
 73
 74	if (cmd_dir == ISER_DIR_OUT) {
 75		/* copy the unaligned sg the buffer which is used for RDMA */
 76		int i;
 77		char *p, *from;
 78
 79		sgl = (struct scatterlist *)data->buf;
 80		p = mem;
 81		for_each_sg(sgl, sg, data->size, i) {
 82			from = kmap_atomic(sg_page(sg));
 83			memcpy(p,
 84			       from + sg->offset,
 85			       sg->length);
 86			kunmap_atomic(from);
 87			p += sg->length;
 88		}
 89	}
 90
 91	sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
 92	data_copy->buf = &data_copy->sg_single;
 93	data_copy->size = 1;
 94	data_copy->copy_buf = mem;
 95
 96	dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
 97				  (cmd_dir == ISER_DIR_OUT) ?
 98				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
 99	BUG_ON(dma_nents == 0);
100
101	data_copy->dma_nents = dma_nents;
102	data_copy->data_len = cmd_data_len;
103
104	return 0;
105}
106
107/**
108 * iser_finalize_rdma_unaligned_sg
109 */
110
111void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
112				     struct iser_data_buf *data,
113				     struct iser_data_buf *data_copy,
114				     enum iser_data_dir cmd_dir)
115{
116	struct ib_device *dev;
117	unsigned long  cmd_data_len;
118
119	dev = iser_task->ib_conn->device->ib_device;
120
121	ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
122			(cmd_dir == ISER_DIR_OUT) ?
123			DMA_TO_DEVICE : DMA_FROM_DEVICE);
124
125	if (cmd_dir == ISER_DIR_IN) {
126		char *mem;
127		struct scatterlist *sgl, *sg;
128		unsigned char *p, *to;
129		unsigned int sg_size;
130		int i;
131
132		/* copy back read RDMA to unaligned sg */
133		mem = data_copy->copy_buf;
134
135		sgl = (struct scatterlist *)data->buf;
136		sg_size = data->size;
137
138		p = mem;
139		for_each_sg(sgl, sg, sg_size, i) {
140			to = kmap_atomic(sg_page(sg));
141			memcpy(to + sg->offset,
142			       p,
143			       sg->length);
144			kunmap_atomic(to);
145			p += sg->length;
146		}
147	}
148
149	cmd_data_len = data->data_len;
150
151	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
152		free_pages((unsigned long)data_copy->copy_buf,
153			   ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
154	else
155		kfree(data_copy->copy_buf);
156
157	data_copy->copy_buf = NULL;
158}
159
160#define IS_4K_ALIGNED(addr)	((((unsigned long)addr) & ~MASK_4K) == 0)
161
162/**
163 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
164 * and returns the length of resulting physical address array (may be less than
165 * the original due to possible compaction).
166 *
167 * we build a "page vec" under the assumption that the SG meets the RDMA
168 * alignment requirements. Other then the first and last SG elements, all
169 * the "internal" elements can be compacted into a list whose elements are
170 * dma addresses of physical pages. The code supports also the weird case
171 * where --few fragments of the same page-- are present in the SG as
172 * consecutive elements. Also, it handles one entry SG.
173 */
174
175static int iser_sg_to_page_vec(struct iser_data_buf *data,
176			       struct ib_device *ibdev, u64 *pages,
177			       int *offset, int *data_size)
178{
179	struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
180	u64 start_addr, end_addr, page, chunk_start = 0;
181	unsigned long total_sz = 0;
182	unsigned int dma_len;
183	int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
184
185	/* compute the offset of first element */
186	*offset = (u64) sgl[0].offset & ~MASK_4K;
187
188	new_chunk = 1;
189	cur_page  = 0;
190	for_each_sg(sgl, sg, data->dma_nents, i) {
191		start_addr = ib_sg_dma_address(ibdev, sg);
192		if (new_chunk)
193			chunk_start = start_addr;
194		dma_len = ib_sg_dma_len(ibdev, sg);
195		end_addr = start_addr + dma_len;
196		total_sz += dma_len;
197
198		/* collect page fragments until aligned or end of SG list */
199		if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
200			new_chunk = 0;
201			continue;
202		}
203		new_chunk = 1;
204
205		/* address of the first page in the contiguous chunk;
206		   masking relevant for the very first SG entry,
207		   which might be unaligned */
208		page = chunk_start & MASK_4K;
209		do {
210			pages[cur_page++] = page;
211			page += SIZE_4K;
212		} while (page < end_addr);
213	}
214
215	*data_size = total_sz;
216	iser_dbg("page_vec->data_size:%d cur_page %d\n",
217		 *data_size, cur_page);
218	return cur_page;
219}
220
221
222/**
223 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
224 * for RDMA sub-list of a scatter-gather list of memory buffers, and  returns
225 * the number of entries which are aligned correctly. Supports the case where
226 * consecutive SG elements are actually fragments of the same physcial page.
227 */
228static int iser_data_buf_aligned_len(struct iser_data_buf *data,
229				      struct ib_device *ibdev)
230{
231	struct scatterlist *sgl, *sg, *next_sg = NULL;
232	u64 start_addr, end_addr;
233	int i, ret_len, start_check = 0;
234
235	if (data->dma_nents == 1)
236		return 1;
237
238	sgl = (struct scatterlist *)data->buf;
239	start_addr  = ib_sg_dma_address(ibdev, sgl);
240
241	for_each_sg(sgl, sg, data->dma_nents, i) {
242		if (start_check && !IS_4K_ALIGNED(start_addr))
243			break;
244
245		next_sg = sg_next(sg);
246		if (!next_sg)
247			break;
248
249		end_addr    = start_addr + ib_sg_dma_len(ibdev, sg);
250		start_addr  = ib_sg_dma_address(ibdev, next_sg);
251
252		if (end_addr == start_addr) {
253			start_check = 0;
254			continue;
255		} else
256			start_check = 1;
257
258		if (!IS_4K_ALIGNED(end_addr))
259			break;
260	}
261	ret_len = (next_sg) ? i : i+1;
262	iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
263		 ret_len, data->dma_nents, data);
264	return ret_len;
265}
266
267static void iser_data_buf_dump(struct iser_data_buf *data,
268			       struct ib_device *ibdev)
269{
270	struct scatterlist *sgl = (struct scatterlist *)data->buf;
271	struct scatterlist *sg;
272	int i;
273
274	for_each_sg(sgl, sg, data->dma_nents, i)
275		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
276			 "off:0x%x sz:0x%x dma_len:0x%x\n",
277			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
278			 sg_page(sg), sg->offset,
279			 sg->length, ib_sg_dma_len(ibdev, sg));
280}
281
282static void iser_dump_page_vec(struct iser_page_vec *page_vec)
283{
284	int i;
285
286	iser_err("page vec length %d data size %d\n",
287		 page_vec->length, page_vec->data_size);
288	for (i = 0; i < page_vec->length; i++)
289		iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
290}
291
292static void iser_page_vec_build(struct iser_data_buf *data,
293				struct iser_page_vec *page_vec,
294				struct ib_device *ibdev)
295{
296	int page_vec_len = 0;
 
297
298	page_vec->length = 0;
299	page_vec->offset = 0;
300
301	iser_dbg("Translating sg sz: %d\n", data->dma_nents);
302	page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
303					   &page_vec->offset,
304					   &page_vec->data_size);
305	iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
306
307	page_vec->length = page_vec_len;
308
309	if (page_vec_len * SIZE_4K < page_vec->data_size) {
310		iser_err("page_vec too short to hold this SG\n");
311		iser_data_buf_dump(data, ibdev);
312		iser_dump_page_vec(page_vec);
313		BUG();
314	}
315}
316
317int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
318			    struct iser_data_buf *data,
319			    enum iser_data_dir iser_dir,
320			    enum dma_data_direction dma_dir)
321{
 
322	struct ib_device *dev;
323
324	iser_task->dir[iser_dir] = 1;
325	dev = iser_task->ib_conn->device->ib_device;
326
327	data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
328	if (data->dma_nents == 0) {
329		iser_err("dma_map_sg failed!!!\n");
330		return -EINVAL;
331	}
 
 
 
 
 
 
 
 
 
 
 
332	return 0;
 
 
 
 
333}
334
 
335void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
336			      struct iser_data_buf *data)
 
337{
 
338	struct ib_device *dev;
339
340	dev = iser_task->ib_conn->device->ib_device;
341	ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
342}
343
344static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
345			      struct ib_device *ibdev,
346			      struct iser_data_buf *mem,
347			      struct iser_data_buf *mem_copy,
348			      enum iser_data_dir cmd_dir,
349			      int aligned_len)
350{
351	struct iscsi_conn    *iscsi_conn = iser_task->ib_conn->iscsi_conn;
352
353	iscsi_conn->fmr_unalign_cnt++;
354	iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
355		  aligned_len, mem->size);
356
357	if (iser_debug_level > 0)
358		iser_data_buf_dump(mem, ibdev);
359
360	/* unmap the command data before accessing it */
361	iser_dma_unmap_task_data(iser_task, mem);
362
363	/* allocate copy buf, if we are writing, copy the */
364	/* unaligned scatterlist, dma map the copy        */
365	if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
366		return -ENOMEM;
367
368	return 0;
369}
370
371/**
372 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
373 * using FMR (if possible) obtaining rkey and va
374 *
375 * returns 0 on success, errno code on failure
376 */
377int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
378			  enum iser_data_dir cmd_dir)
379{
380	struct iser_conn     *ib_conn = iser_task->ib_conn;
381	struct iser_device   *device = ib_conn->device;
382	struct ib_device     *ibdev = device->ib_device;
383	struct iser_data_buf *mem = &iser_task->data[cmd_dir];
384	struct iser_regd_buf *regd_buf;
385	int aligned_len;
386	int err;
387	int i;
388	struct scatterlist *sg;
389
390	regd_buf = &iser_task->rdma_regd[cmd_dir];
 
 
 
 
391
392	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
393	if (aligned_len != mem->dma_nents) {
394		err = fall_to_bounce_buf(iser_task, ibdev, mem,
395					 &iser_task->data_copy[cmd_dir],
396					 cmd_dir, aligned_len);
397		if (err) {
398			iser_err("failed to allocate bounce buffer\n");
399			return err;
400		}
401		mem = &iser_task->data_copy[cmd_dir];
402	}
403
404	/* if there a single dma entry, FMR is not needed */
405	if (mem->dma_nents == 1) {
406		sg = (struct scatterlist *)mem->buf;
407
408		regd_buf->reg.lkey = device->mr->lkey;
409		regd_buf->reg.rkey = device->mr->rkey;
410		regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
411		regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
412		regd_buf->reg.is_mr = 0;
413
414		iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X  "
415			 "va: 0x%08lX sz: %ld]\n",
416			 (unsigned int)regd_buf->reg.lkey,
417			 (unsigned int)regd_buf->reg.rkey,
418			 (unsigned long)regd_buf->reg.va,
419			 (unsigned long)regd_buf->reg.len);
420	} else { /* use FMR for multiple dma entries */
421		iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
422		err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
423					&regd_buf->reg);
424		if (err && err != -EAGAIN) {
425			iser_data_buf_dump(mem, ibdev);
426			iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
427				 mem->dma_nents,
428				 ntoh24(iser_task->desc.iscsi_header.dlength));
429			iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
430				 ib_conn->fmr.page_vec->data_size,
431				 ib_conn->fmr.page_vec->length,
432				 ib_conn->fmr.page_vec->offset);
433			for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
434				iser_err("page_vec[%d] = 0x%llx\n", i,
435					 (unsigned long long) ib_conn->fmr.page_vec->pages[i]);
436		}
437		if (err)
438			return err;
439	}
440	return 0;
441}
442
443static inline enum ib_t10_dif_type
444scsi2ib_prot_type(unsigned char prot_type)
445{
446	switch (prot_type) {
447	case SCSI_PROT_DIF_TYPE0:
448		return IB_T10DIF_NONE;
449	case SCSI_PROT_DIF_TYPE1:
450		return IB_T10DIF_TYPE1;
451	case SCSI_PROT_DIF_TYPE2:
452		return IB_T10DIF_TYPE2;
453	case SCSI_PROT_DIF_TYPE3:
454		return IB_T10DIF_TYPE3;
455	default:
456		return IB_T10DIF_NONE;
457	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458}
459
460
461static int
462iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
463{
464	unsigned char scsi_ptype = scsi_get_prot_type(sc);
465
466	sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
467	sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
468	sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size;
469	sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size;
470
471	switch (scsi_get_prot_op(sc)) {
472	case SCSI_PROT_WRITE_INSERT:
473	case SCSI_PROT_READ_STRIP:
474		sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
475		sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
476		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
477		sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
478						  0xffffffff;
479		break;
480	case SCSI_PROT_READ_INSERT:
481	case SCSI_PROT_WRITE_STRIP:
482		sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
483		sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
484		sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
485						 0xffffffff;
486		sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
487		break;
488	case SCSI_PROT_READ_PASS:
489	case SCSI_PROT_WRITE_PASS:
490		sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
491		sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
492		sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
493						 0xffffffff;
494		sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
495		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
496		sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
497						  0xffffffff;
 
498		break;
499	default:
500		iser_err("Unsupported PI operation %d\n",
501			 scsi_get_prot_op(sc));
502		return -EINVAL;
503	}
 
504	return 0;
505}
506
507
508static int
509iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
510{
511	switch (scsi_get_prot_type(sc)) {
512	case SCSI_PROT_DIF_TYPE0:
513		*mask = 0x0;
514		break;
515	case SCSI_PROT_DIF_TYPE1:
516	case SCSI_PROT_DIF_TYPE2:
517		*mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
518		break;
519	case SCSI_PROT_DIF_TYPE3:
520		*mask = ISER_CHECK_GUARD;
521		break;
522	default:
523		iser_err("Unsupported protection type %d\n",
524			 scsi_get_prot_type(sc));
525		return -EINVAL;
526	}
527
528	return 0;
529}
530
531static int
532iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
533		struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
534		struct ib_sge *prot_sge, struct ib_sge *sig_sge)
535{
536	struct iser_conn *ib_conn = iser_task->ib_conn;
537	struct iser_pi_context *pi_ctx = desc->pi_ctx;
538	struct ib_send_wr sig_wr, inv_wr;
539	struct ib_send_wr *bad_wr, *wr = NULL;
540	struct ib_sig_attrs sig_attrs;
541	int ret;
542	u32 key;
543
544	memset(&sig_attrs, 0, sizeof(sig_attrs));
545	ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
546	if (ret)
547		goto err;
548
549	ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
550	if (ret)
551		goto err;
552
553	if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
554		memset(&inv_wr, 0, sizeof(inv_wr));
555		inv_wr.opcode = IB_WR_LOCAL_INV;
556		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
557		inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
558		wr = &inv_wr;
559		/* Bump the key */
560		key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
561		ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
562	}
563
564	memset(&sig_wr, 0, sizeof(sig_wr));
565	sig_wr.opcode = IB_WR_REG_SIG_MR;
566	sig_wr.wr_id = ISER_FASTREG_LI_WRID;
567	sig_wr.sg_list = data_sge;
568	sig_wr.num_sge = 1;
569	sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
570	sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
571	if (scsi_prot_sg_count(iser_task->sc))
572		sig_wr.wr.sig_handover.prot = prot_sge;
573	sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
574					      IB_ACCESS_REMOTE_READ |
575					      IB_ACCESS_REMOTE_WRITE;
576
577	if (!wr)
578		wr = &sig_wr;
579	else
580		wr->next = &sig_wr;
581
582	ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
583	if (ret) {
584		iser_err("reg_sig_mr failed, ret:%d\n", ret);
 
 
585		goto err;
586	}
587	desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
588
589	sig_sge->lkey = pi_ctx->sig_mr->lkey;
590	sig_sge->addr = 0;
591	sig_sge->length = data_sge->length + prot_sge->length;
592	if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
593	    scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
594		sig_sge->length += (data_sge->length /
595				   iser_task->sc->device->sector_size) * 8;
596	}
597
598	iser_dbg("sig_sge: addr: 0x%llx  length: %u lkey: 0x%x\n",
599		 sig_sge->addr, sig_sge->length,
600		 sig_sge->lkey);
 
 
 
 
 
 
 
 
 
601err:
602	return ret;
603}
604
605static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
606			    struct iser_regd_buf *regd_buf,
607			    struct iser_data_buf *mem,
608			    enum iser_reg_indicator ind,
609			    struct ib_sge *sge)
610{
611	struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
612	struct iser_conn *ib_conn = iser_task->ib_conn;
613	struct iser_device *device = ib_conn->device;
614	struct ib_device *ibdev = device->ib_device;
615	struct ib_mr *mr;
616	struct ib_fast_reg_page_list *frpl;
617	struct ib_send_wr fastreg_wr, inv_wr;
618	struct ib_send_wr *bad_wr, *wr = NULL;
619	u8 key;
620	int ret, offset, size, plen;
621
622	/* if there a single dma entry, dma mr suffices */
623	if (mem->dma_nents == 1) {
624		struct scatterlist *sg = (struct scatterlist *)mem->buf;
625
626		sge->lkey = device->mr->lkey;
627		sge->addr   = ib_sg_dma_address(ibdev, &sg[0]);
628		sge->length  = ib_sg_dma_len(ibdev, &sg[0]);
629
630		iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
631			 sge->lkey, sge->addr, sge->length);
632		return 0;
633	}
 
 
 
 
 
 
 
 
 
 
 
 
634
635	if (ind == ISER_DATA_KEY_VALID) {
636		mr = desc->data_mr;
637		frpl = desc->data_frpl;
638	} else {
639		mr = desc->pi_ctx->prot_mr;
640		frpl = desc->pi_ctx->prot_frpl;
641	}
642
643	plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
644				   &offset, &size);
645	if (plen * SIZE_4K < size) {
646		iser_err("fast reg page_list too short to hold this SG\n");
647		return -EINVAL;
648	}
649
650	if (!(desc->reg_indicators & ind)) {
651		memset(&inv_wr, 0, sizeof(inv_wr));
652		inv_wr.wr_id = ISER_FASTREG_LI_WRID;
653		inv_wr.opcode = IB_WR_LOCAL_INV;
654		inv_wr.ex.invalidate_rkey = mr->rkey;
655		wr = &inv_wr;
656		/* Bump the key */
657		key = (u8)(mr->rkey & 0x000000FF);
658		ib_update_fast_reg_key(mr, ++key);
659	}
660
661	/* Prepare FASTREG WR */
662	memset(&fastreg_wr, 0, sizeof(fastreg_wr));
663	fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
664	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
665	fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
666	fastreg_wr.wr.fast_reg.page_list = frpl;
667	fastreg_wr.wr.fast_reg.page_list_len = plen;
668	fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
669	fastreg_wr.wr.fast_reg.length = size;
670	fastreg_wr.wr.fast_reg.rkey = mr->rkey;
671	fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE  |
672					       IB_ACCESS_REMOTE_WRITE |
673					       IB_ACCESS_REMOTE_READ);
674
675	if (!wr)
676		wr = &fastreg_wr;
677	else
678		wr->next = &fastreg_wr;
679
680	ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
681	if (ret) {
682		iser_err("fast registration failed, ret:%d\n", ret);
683		return ret;
684	}
685	desc->reg_indicators &= ~ind;
686
687	sge->lkey = mr->lkey;
688	sge->addr = frpl->page_list[0] + offset;
689	sge->length = size;
690
691	return ret;
692}
693
694/**
695 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
696 * using Fast Registration WR (if possible) obtaining rkey and va
697 *
698 * returns 0 on success, errno code on failure
699 */
700int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
701			      enum iser_data_dir cmd_dir)
702{
703	struct iser_conn *ib_conn = iser_task->ib_conn;
704	struct iser_device *device = ib_conn->device;
705	struct ib_device *ibdev = device->ib_device;
706	struct iser_data_buf *mem = &iser_task->data[cmd_dir];
707	struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
708	struct fast_reg_descriptor *desc = NULL;
709	struct ib_sge data_sge;
710	int err, aligned_len;
711	unsigned long flags;
712
713	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
714	if (aligned_len != mem->dma_nents) {
715		err = fall_to_bounce_buf(iser_task, ibdev, mem,
716					 &iser_task->data_copy[cmd_dir],
717					 cmd_dir, aligned_len);
718		if (err) {
719			iser_err("failed to allocate bounce buffer\n");
720			return err;
721		}
722		mem = &iser_task->data_copy[cmd_dir];
723	}
 
 
 
 
724
725	if (mem->dma_nents != 1 ||
726	    scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
727		spin_lock_irqsave(&ib_conn->lock, flags);
728		desc = list_first_entry(&ib_conn->fastreg.pool,
729					struct fast_reg_descriptor, list);
730		list_del(&desc->list);
731		spin_unlock_irqrestore(&ib_conn->lock, flags);
732		regd_buf->reg.mem_h = desc;
733	}
734
735	err = iser_fast_reg_mr(iser_task, regd_buf, mem,
736			       ISER_DATA_KEY_VALID, &data_sge);
737	if (err)
738		goto err_reg;
739
740	if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
741		struct ib_sge prot_sge, sig_sge;
742
743		memset(&prot_sge, 0, sizeof(prot_sge));
744		if (scsi_prot_sg_count(iser_task->sc)) {
745			mem = &iser_task->prot[cmd_dir];
746			aligned_len = iser_data_buf_aligned_len(mem, ibdev);
747			if (aligned_len != mem->dma_nents) {
748				err = fall_to_bounce_buf(iser_task, ibdev, mem,
749							 &iser_task->prot_copy[cmd_dir],
750							 cmd_dir, aligned_len);
751				if (err) {
752					iser_err("failed to allocate bounce buffer\n");
753					return err;
754				}
755				mem = &iser_task->prot_copy[cmd_dir];
756			}
757
758			err = iser_fast_reg_mr(iser_task, regd_buf, mem,
759					       ISER_PROT_KEY_VALID, &prot_sge);
760			if (err)
761				goto err_reg;
762		}
763
764		err = iser_reg_sig_mr(iser_task, desc, &data_sge,
765				      &prot_sge, &sig_sge);
766		if (err) {
767			iser_err("Failed to register signature mr\n");
768			return err;
769		}
770		desc->reg_indicators |= ISER_FASTREG_PROTECTED;
771
772		regd_buf->reg.lkey = sig_sge.lkey;
773		regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
774		regd_buf->reg.va = sig_sge.addr;
775		regd_buf->reg.len = sig_sge.length;
776		regd_buf->reg.is_mr = 1;
777	} else {
778		if (desc) {
779			regd_buf->reg.rkey = desc->data_mr->rkey;
780			regd_buf->reg.is_mr = 1;
781		} else {
782			regd_buf->reg.rkey = device->mr->rkey;
783			regd_buf->reg.is_mr = 0;
784		}
785
786		regd_buf->reg.lkey = data_sge.lkey;
787		regd_buf->reg.va = data_sge.addr;
788		regd_buf->reg.len = data_sge.length;
789	}
790
791	return 0;
792err_reg:
793	if (desc) {
794		spin_lock_irqsave(&ib_conn->lock, flags);
795		list_add_tail(&desc->list, &ib_conn->fastreg.pool);
796		spin_unlock_irqrestore(&ib_conn->lock, flags);
797	}
798
799	return err;
800}