Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 *
5 * (c) Copyright 2013 Datera, Inc.
6 *
7 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 *
9 ****************************************************************************/
10
11#include <linux/string.h>
12#include <linux/module.h>
13#include <linux/scatterlist.h>
14#include <linux/socket.h>
15#include <linux/in.h>
16#include <linux/in6.h>
17#include <rdma/ib_verbs.h>
18#include <rdma/ib_cm.h>
19#include <rdma/rdma_cm.h>
20#include <target/target_core_base.h>
21#include <target/target_core_fabric.h>
22#include <target/iscsi/iscsi_transport.h>
23#include <linux/semaphore.h>
24
25#include "ib_isert.h"
26
27static int isert_debug_level;
28module_param_named(debug_level, isert_debug_level, int, 0644);
29MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
30
31static int isert_sg_tablesize_set(const char *val,
32 const struct kernel_param *kp);
33static const struct kernel_param_ops sg_tablesize_ops = {
34 .set = isert_sg_tablesize_set,
35 .get = param_get_int,
36};
37
38static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
39module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
40MODULE_PARM_DESC(sg_tablesize,
41 "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
42
43static DEFINE_MUTEX(device_list_mutex);
44static LIST_HEAD(device_list);
45static struct workqueue_struct *isert_login_wq;
46static struct workqueue_struct *isert_comp_wq;
47static struct workqueue_struct *isert_release_wq;
48
49static int
50isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
51static int
52isert_login_post_recv(struct isert_conn *isert_conn);
53static int
54isert_rdma_accept(struct isert_conn *isert_conn);
55struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
56
57static void isert_release_work(struct work_struct *work);
58static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
59static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
60static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
61static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
62
63static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp)
64{
65 int n = 0, ret;
66
67 ret = kstrtoint(val, 10, &n);
68 if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE ||
69 n > ISCSI_ISER_MAX_SG_TABLESIZE)
70 return -EINVAL;
71
72 return param_set_int(val, kp);
73}
74
75static inline bool
76isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
77{
78 return (conn->pi_support &&
79 cmd->prot_op != TARGET_PROT_NORMAL);
80}
81
82static void
83isert_qp_event_callback(struct ib_event *e, void *context)
84{
85 struct isert_conn *isert_conn = context;
86
87 isert_err("%s (%d): conn %p\n",
88 ib_event_msg(e->event), e->event, isert_conn);
89
90 switch (e->event) {
91 case IB_EVENT_COMM_EST:
92 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
93 break;
94 case IB_EVENT_QP_LAST_WQE_REACHED:
95 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
96 break;
97 default:
98 break;
99 }
100}
101
102static struct ib_qp *
103isert_create_qp(struct isert_conn *isert_conn,
104 struct rdma_cm_id *cma_id)
105{
106 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2;
107 struct isert_device *device = isert_conn->device;
108 struct ib_device *ib_dev = device->ib_device;
109 struct ib_qp_init_attr attr;
110 int ret, factor;
111
112 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE);
113 if (IS_ERR(isert_conn->cq)) {
114 isert_err("Unable to allocate cq\n");
115 ret = PTR_ERR(isert_conn->cq);
116 return ERR_PTR(ret);
117 }
118 isert_conn->cq_size = cq_size;
119
120 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
121 attr.event_handler = isert_qp_event_callback;
122 attr.qp_context = isert_conn;
123 attr.send_cq = isert_conn->cq;
124 attr.recv_cq = isert_conn->cq;
125 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
126 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
127 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num,
128 isert_sg_tablesize);
129 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor;
130 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
131 attr.cap.max_recv_sge = 1;
132 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
133 attr.qp_type = IB_QPT_RC;
134 if (device->pi_capable)
135 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
136
137 ret = rdma_create_qp(cma_id, device->pd, &attr);
138 if (ret) {
139 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
140 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
141
142 return ERR_PTR(ret);
143 }
144
145 return cma_id->qp;
146}
147
148static int
149isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
150{
151 struct isert_device *device = isert_conn->device;
152 struct ib_device *ib_dev = device->ib_device;
153 struct iser_rx_desc *rx_desc;
154 struct ib_sge *rx_sg;
155 u64 dma_addr;
156 int i, j;
157
158 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS,
159 sizeof(struct iser_rx_desc),
160 GFP_KERNEL);
161 if (!isert_conn->rx_descs)
162 return -ENOMEM;
163
164 rx_desc = isert_conn->rx_descs;
165
166 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
167 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
168 ISER_RX_SIZE, DMA_FROM_DEVICE);
169 if (ib_dma_mapping_error(ib_dev, dma_addr))
170 goto dma_map_fail;
171
172 rx_desc->dma_addr = dma_addr;
173
174 rx_sg = &rx_desc->rx_sg;
175 rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
176 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
177 rx_sg->lkey = device->pd->local_dma_lkey;
178 rx_desc->rx_cqe.done = isert_recv_done;
179 }
180
181 return 0;
182
183dma_map_fail:
184 rx_desc = isert_conn->rx_descs;
185 for (j = 0; j < i; j++, rx_desc++) {
186 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
187 ISER_RX_SIZE, DMA_FROM_DEVICE);
188 }
189 kfree(isert_conn->rx_descs);
190 isert_conn->rx_descs = NULL;
191 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
192 return -ENOMEM;
193}
194
195static void
196isert_free_rx_descriptors(struct isert_conn *isert_conn)
197{
198 struct ib_device *ib_dev = isert_conn->device->ib_device;
199 struct iser_rx_desc *rx_desc;
200 int i;
201
202 if (!isert_conn->rx_descs)
203 return;
204
205 rx_desc = isert_conn->rx_descs;
206 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
207 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
208 ISER_RX_SIZE, DMA_FROM_DEVICE);
209 }
210
211 kfree(isert_conn->rx_descs);
212 isert_conn->rx_descs = NULL;
213}
214
215static int
216isert_create_device_ib_res(struct isert_device *device)
217{
218 struct ib_device *ib_dev = device->ib_device;
219 int ret;
220
221 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
222 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
223 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
224
225 device->pd = ib_alloc_pd(ib_dev, 0);
226 if (IS_ERR(device->pd)) {
227 ret = PTR_ERR(device->pd);
228 isert_err("failed to allocate pd, device %p, ret=%d\n",
229 device, ret);
230 return ret;
231 }
232
233 /* Check signature cap */
234 if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
235 device->pi_capable = true;
236 else
237 device->pi_capable = false;
238
239 return 0;
240}
241
242static void
243isert_free_device_ib_res(struct isert_device *device)
244{
245 isert_info("device %p\n", device);
246
247 ib_dealloc_pd(device->pd);
248}
249
250static void
251isert_device_put(struct isert_device *device)
252{
253 mutex_lock(&device_list_mutex);
254 device->refcount--;
255 isert_info("device %p refcount %d\n", device, device->refcount);
256 if (!device->refcount) {
257 isert_free_device_ib_res(device);
258 list_del(&device->dev_node);
259 kfree(device);
260 }
261 mutex_unlock(&device_list_mutex);
262}
263
264static struct isert_device *
265isert_device_get(struct rdma_cm_id *cma_id)
266{
267 struct isert_device *device;
268 int ret;
269
270 mutex_lock(&device_list_mutex);
271 list_for_each_entry(device, &device_list, dev_node) {
272 if (device->ib_device->node_guid == cma_id->device->node_guid) {
273 device->refcount++;
274 isert_info("Found iser device %p refcount %d\n",
275 device, device->refcount);
276 mutex_unlock(&device_list_mutex);
277 return device;
278 }
279 }
280
281 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
282 if (!device) {
283 mutex_unlock(&device_list_mutex);
284 return ERR_PTR(-ENOMEM);
285 }
286
287 INIT_LIST_HEAD(&device->dev_node);
288
289 device->ib_device = cma_id->device;
290 ret = isert_create_device_ib_res(device);
291 if (ret) {
292 kfree(device);
293 mutex_unlock(&device_list_mutex);
294 return ERR_PTR(ret);
295 }
296
297 device->refcount++;
298 list_add_tail(&device->dev_node, &device_list);
299 isert_info("Created a new iser device %p refcount %d\n",
300 device, device->refcount);
301 mutex_unlock(&device_list_mutex);
302
303 return device;
304}
305
306static void
307isert_init_conn(struct isert_conn *isert_conn)
308{
309 isert_conn->state = ISER_CONN_INIT;
310 INIT_LIST_HEAD(&isert_conn->node);
311 init_completion(&isert_conn->login_comp);
312 init_completion(&isert_conn->login_req_comp);
313 init_waitqueue_head(&isert_conn->rem_wait);
314 kref_init(&isert_conn->kref);
315 mutex_init(&isert_conn->mutex);
316 INIT_WORK(&isert_conn->release_work, isert_release_work);
317}
318
319static void
320isert_free_login_buf(struct isert_conn *isert_conn)
321{
322 struct ib_device *ib_dev = isert_conn->device->ib_device;
323
324 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
325 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
326 kfree(isert_conn->login_rsp_buf);
327
328 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
329 ISER_RX_SIZE, DMA_FROM_DEVICE);
330 kfree(isert_conn->login_desc);
331}
332
333static int
334isert_alloc_login_buf(struct isert_conn *isert_conn,
335 struct ib_device *ib_dev)
336{
337 int ret;
338
339 isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
340 GFP_KERNEL);
341 if (!isert_conn->login_desc)
342 return -ENOMEM;
343
344 isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
345 isert_conn->login_desc->buf,
346 ISER_RX_SIZE, DMA_FROM_DEVICE);
347 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
348 if (ret) {
349 isert_err("login_desc dma mapping error: %d\n", ret);
350 isert_conn->login_desc->dma_addr = 0;
351 goto out_free_login_desc;
352 }
353
354 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
355 if (!isert_conn->login_rsp_buf) {
356 ret = -ENOMEM;
357 goto out_unmap_login_desc;
358 }
359
360 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
361 isert_conn->login_rsp_buf,
362 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
363 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
364 if (ret) {
365 isert_err("login_rsp_dma mapping error: %d\n", ret);
366 isert_conn->login_rsp_dma = 0;
367 goto out_free_login_rsp_buf;
368 }
369
370 return 0;
371
372out_free_login_rsp_buf:
373 kfree(isert_conn->login_rsp_buf);
374out_unmap_login_desc:
375 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
376 ISER_RX_SIZE, DMA_FROM_DEVICE);
377out_free_login_desc:
378 kfree(isert_conn->login_desc);
379 return ret;
380}
381
382static void
383isert_set_nego_params(struct isert_conn *isert_conn,
384 struct rdma_conn_param *param)
385{
386 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
387
388 /* Set max inflight RDMA READ requests */
389 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
390 attr->max_qp_init_rd_atom);
391 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
392
393 if (param->private_data) {
394 u8 flags = *(u8 *)param->private_data;
395
396 /*
397 * use remote invalidation if the both initiator
398 * and the HCA support it
399 */
400 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
401 (attr->device_cap_flags &
402 IB_DEVICE_MEM_MGT_EXTENSIONS);
403 if (isert_conn->snd_w_inv)
404 isert_info("Using remote invalidation\n");
405 }
406}
407
408static void
409isert_destroy_qp(struct isert_conn *isert_conn)
410{
411 ib_destroy_qp(isert_conn->qp);
412 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
413}
414
415static int
416isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
417{
418 struct isert_np *isert_np = cma_id->context;
419 struct iscsi_np *np = isert_np->np;
420 struct isert_conn *isert_conn;
421 struct isert_device *device;
422 int ret = 0;
423
424 spin_lock_bh(&np->np_thread_lock);
425 if (!np->enabled) {
426 spin_unlock_bh(&np->np_thread_lock);
427 isert_dbg("iscsi_np is not enabled, reject connect request\n");
428 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
429 }
430 spin_unlock_bh(&np->np_thread_lock);
431
432 isert_dbg("cma_id: %p, portal: %p\n",
433 cma_id, cma_id->context);
434
435 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
436 if (!isert_conn)
437 return -ENOMEM;
438
439 isert_init_conn(isert_conn);
440 isert_conn->cm_id = cma_id;
441
442 device = isert_device_get(cma_id);
443 if (IS_ERR(device)) {
444 ret = PTR_ERR(device);
445 goto out;
446 }
447 isert_conn->device = device;
448
449 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
450 if (ret)
451 goto out_conn_dev;
452
453 isert_set_nego_params(isert_conn, &event->param.conn);
454
455 isert_conn->qp = isert_create_qp(isert_conn, cma_id);
456 if (IS_ERR(isert_conn->qp)) {
457 ret = PTR_ERR(isert_conn->qp);
458 goto out_rsp_dma_map;
459 }
460
461 ret = isert_login_post_recv(isert_conn);
462 if (ret)
463 goto out_destroy_qp;
464
465 ret = isert_rdma_accept(isert_conn);
466 if (ret)
467 goto out_destroy_qp;
468
469 mutex_lock(&isert_np->mutex);
470 list_add_tail(&isert_conn->node, &isert_np->accepted);
471 mutex_unlock(&isert_np->mutex);
472
473 return 0;
474
475out_destroy_qp:
476 isert_destroy_qp(isert_conn);
477out_rsp_dma_map:
478 isert_free_login_buf(isert_conn);
479out_conn_dev:
480 isert_device_put(device);
481out:
482 kfree(isert_conn);
483 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
484 return ret;
485}
486
487static void
488isert_connect_release(struct isert_conn *isert_conn)
489{
490 struct isert_device *device = isert_conn->device;
491
492 isert_dbg("conn %p\n", isert_conn);
493
494 BUG_ON(!device);
495
496 isert_free_rx_descriptors(isert_conn);
497 if (isert_conn->cm_id &&
498 !isert_conn->dev_removed)
499 rdma_destroy_id(isert_conn->cm_id);
500
501 if (isert_conn->qp)
502 isert_destroy_qp(isert_conn);
503
504 if (isert_conn->login_desc)
505 isert_free_login_buf(isert_conn);
506
507 isert_device_put(device);
508
509 if (isert_conn->dev_removed)
510 wake_up_interruptible(&isert_conn->rem_wait);
511 else
512 kfree(isert_conn);
513}
514
515static void
516isert_connected_handler(struct rdma_cm_id *cma_id)
517{
518 struct isert_conn *isert_conn = cma_id->qp->qp_context;
519 struct isert_np *isert_np = cma_id->context;
520
521 isert_info("conn %p\n", isert_conn);
522
523 mutex_lock(&isert_conn->mutex);
524 isert_conn->state = ISER_CONN_UP;
525 kref_get(&isert_conn->kref);
526 mutex_unlock(&isert_conn->mutex);
527
528 mutex_lock(&isert_np->mutex);
529 list_move_tail(&isert_conn->node, &isert_np->pending);
530 mutex_unlock(&isert_np->mutex);
531
532 isert_info("np %p: Allow accept_np to continue\n", isert_np);
533 up(&isert_np->sem);
534}
535
536static void
537isert_release_kref(struct kref *kref)
538{
539 struct isert_conn *isert_conn = container_of(kref,
540 struct isert_conn, kref);
541
542 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
543 current->pid);
544
545 isert_connect_release(isert_conn);
546}
547
548static void
549isert_put_conn(struct isert_conn *isert_conn)
550{
551 kref_put(&isert_conn->kref, isert_release_kref);
552}
553
554static void
555isert_handle_unbound_conn(struct isert_conn *isert_conn)
556{
557 struct isert_np *isert_np = isert_conn->cm_id->context;
558
559 mutex_lock(&isert_np->mutex);
560 if (!list_empty(&isert_conn->node)) {
561 /*
562 * This means iscsi doesn't know this connection
563 * so schedule a cleanup ourselves
564 */
565 list_del_init(&isert_conn->node);
566 isert_put_conn(isert_conn);
567 queue_work(isert_release_wq, &isert_conn->release_work);
568 }
569 mutex_unlock(&isert_np->mutex);
570}
571
572/**
573 * isert_conn_terminate() - Initiate connection termination
574 * @isert_conn: isert connection struct
575 *
576 * Notes:
577 * In case the connection state is BOUND, move state
578 * to TEMINATING and start teardown sequence (rdma_disconnect).
579 * In case the connection state is UP, complete flush as well.
580 *
581 * This routine must be called with mutex held. Thus it is
582 * safe to call multiple times.
583 */
584static void
585isert_conn_terminate(struct isert_conn *isert_conn)
586{
587 int err;
588
589 if (isert_conn->state >= ISER_CONN_TERMINATING)
590 return;
591
592 isert_info("Terminating conn %p state %d\n",
593 isert_conn, isert_conn->state);
594 isert_conn->state = ISER_CONN_TERMINATING;
595 err = rdma_disconnect(isert_conn->cm_id);
596 if (err)
597 isert_warn("Failed rdma_disconnect isert_conn %p\n",
598 isert_conn);
599}
600
601static int
602isert_np_cma_handler(struct isert_np *isert_np,
603 enum rdma_cm_event_type event)
604{
605 isert_dbg("%s (%d): isert np %p\n",
606 rdma_event_msg(event), event, isert_np);
607
608 switch (event) {
609 case RDMA_CM_EVENT_DEVICE_REMOVAL:
610 isert_np->cm_id = NULL;
611 break;
612 case RDMA_CM_EVENT_ADDR_CHANGE:
613 isert_np->cm_id = isert_setup_id(isert_np);
614 if (IS_ERR(isert_np->cm_id)) {
615 isert_err("isert np %p setup id failed: %ld\n",
616 isert_np, PTR_ERR(isert_np->cm_id));
617 isert_np->cm_id = NULL;
618 }
619 break;
620 default:
621 isert_err("isert np %p Unexpected event %d\n",
622 isert_np, event);
623 }
624
625 return -1;
626}
627
628static int
629isert_disconnected_handler(struct rdma_cm_id *cma_id,
630 enum rdma_cm_event_type event)
631{
632 struct isert_conn *isert_conn = cma_id->qp->qp_context;
633
634 mutex_lock(&isert_conn->mutex);
635 switch (isert_conn->state) {
636 case ISER_CONN_TERMINATING:
637 break;
638 case ISER_CONN_UP:
639 isert_conn_terminate(isert_conn);
640 ib_drain_qp(isert_conn->qp);
641 isert_handle_unbound_conn(isert_conn);
642 break;
643 case ISER_CONN_BOUND:
644 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
645 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
646 break;
647 default:
648 isert_warn("conn %p terminating in state %d\n",
649 isert_conn, isert_conn->state);
650 }
651 mutex_unlock(&isert_conn->mutex);
652
653 return 0;
654}
655
656static int
657isert_connect_error(struct rdma_cm_id *cma_id)
658{
659 struct isert_conn *isert_conn = cma_id->qp->qp_context;
660 struct isert_np *isert_np = cma_id->context;
661
662 ib_drain_qp(isert_conn->qp);
663
664 mutex_lock(&isert_np->mutex);
665 list_del_init(&isert_conn->node);
666 mutex_unlock(&isert_np->mutex);
667 isert_conn->cm_id = NULL;
668 isert_put_conn(isert_conn);
669
670 return -1;
671}
672
673static int
674isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
675{
676 struct isert_np *isert_np = cma_id->context;
677 struct isert_conn *isert_conn;
678 int ret = 0;
679
680 isert_info("%s (%d): status %d id %p np %p\n",
681 rdma_event_msg(event->event), event->event,
682 event->status, cma_id, cma_id->context);
683
684 if (isert_np->cm_id == cma_id)
685 return isert_np_cma_handler(cma_id->context, event->event);
686
687 switch (event->event) {
688 case RDMA_CM_EVENT_CONNECT_REQUEST:
689 ret = isert_connect_request(cma_id, event);
690 if (ret)
691 isert_err("failed handle connect request %d\n", ret);
692 break;
693 case RDMA_CM_EVENT_ESTABLISHED:
694 isert_connected_handler(cma_id);
695 break;
696 case RDMA_CM_EVENT_ADDR_CHANGE:
697 case RDMA_CM_EVENT_DISCONNECTED:
698 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
699 ret = isert_disconnected_handler(cma_id, event->event);
700 break;
701 case RDMA_CM_EVENT_DEVICE_REMOVAL:
702 isert_conn = cma_id->qp->qp_context;
703 isert_conn->dev_removed = true;
704 isert_disconnected_handler(cma_id, event->event);
705 wait_event_interruptible(isert_conn->rem_wait,
706 isert_conn->state == ISER_CONN_DOWN);
707 kfree(isert_conn);
708 /*
709 * return non-zero from the callback to destroy
710 * the rdma cm id
711 */
712 return 1;
713 case RDMA_CM_EVENT_REJECTED:
714 isert_info("Connection rejected: %s\n",
715 rdma_reject_msg(cma_id, event->status));
716 fallthrough;
717 case RDMA_CM_EVENT_UNREACHABLE:
718 case RDMA_CM_EVENT_CONNECT_ERROR:
719 ret = isert_connect_error(cma_id);
720 break;
721 default:
722 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
723 break;
724 }
725
726 return ret;
727}
728
729static int
730isert_post_recvm(struct isert_conn *isert_conn, u32 count)
731{
732 struct ib_recv_wr *rx_wr;
733 int i, ret;
734 struct iser_rx_desc *rx_desc;
735
736 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
737 rx_desc = &isert_conn->rx_descs[i];
738
739 rx_wr->wr_cqe = &rx_desc->rx_cqe;
740 rx_wr->sg_list = &rx_desc->rx_sg;
741 rx_wr->num_sge = 1;
742 rx_wr->next = rx_wr + 1;
743 rx_desc->in_use = false;
744 }
745 rx_wr--;
746 rx_wr->next = NULL; /* mark end of work requests list */
747
748 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
749 if (ret)
750 isert_err("ib_post_recv() failed with ret: %d\n", ret);
751
752 return ret;
753}
754
755static int
756isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
757{
758 struct ib_recv_wr rx_wr;
759 int ret;
760
761 if (!rx_desc->in_use) {
762 /*
763 * if the descriptor is not in-use we already reposted it
764 * for recv, so just silently return
765 */
766 return 0;
767 }
768
769 rx_desc->in_use = false;
770 rx_wr.wr_cqe = &rx_desc->rx_cqe;
771 rx_wr.sg_list = &rx_desc->rx_sg;
772 rx_wr.num_sge = 1;
773 rx_wr.next = NULL;
774
775 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
776 if (ret)
777 isert_err("ib_post_recv() failed with ret: %d\n", ret);
778
779 return ret;
780}
781
782static int
783isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
784{
785 struct ib_device *ib_dev = isert_conn->cm_id->device;
786 struct ib_send_wr send_wr;
787 int ret;
788
789 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
790 ISER_HEADERS_LEN, DMA_TO_DEVICE);
791
792 tx_desc->tx_cqe.done = isert_login_send_done;
793
794 send_wr.next = NULL;
795 send_wr.wr_cqe = &tx_desc->tx_cqe;
796 send_wr.sg_list = tx_desc->tx_sg;
797 send_wr.num_sge = tx_desc->num_sge;
798 send_wr.opcode = IB_WR_SEND;
799 send_wr.send_flags = IB_SEND_SIGNALED;
800
801 ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
802 if (ret)
803 isert_err("ib_post_send() failed, ret: %d\n", ret);
804
805 return ret;
806}
807
808static void
809__isert_create_send_desc(struct isert_device *device,
810 struct iser_tx_desc *tx_desc)
811{
812
813 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
814 tx_desc->iser_header.flags = ISCSI_CTRL;
815
816 tx_desc->num_sge = 1;
817
818 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
819 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
820 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
821 }
822}
823
824static void
825isert_create_send_desc(struct isert_conn *isert_conn,
826 struct isert_cmd *isert_cmd,
827 struct iser_tx_desc *tx_desc)
828{
829 struct isert_device *device = isert_conn->device;
830 struct ib_device *ib_dev = device->ib_device;
831
832 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
833 ISER_HEADERS_LEN, DMA_TO_DEVICE);
834
835 __isert_create_send_desc(device, tx_desc);
836}
837
838static int
839isert_init_tx_hdrs(struct isert_conn *isert_conn,
840 struct iser_tx_desc *tx_desc)
841{
842 struct isert_device *device = isert_conn->device;
843 struct ib_device *ib_dev = device->ib_device;
844 u64 dma_addr;
845
846 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
847 ISER_HEADERS_LEN, DMA_TO_DEVICE);
848 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
849 isert_err("ib_dma_mapping_error() failed\n");
850 return -ENOMEM;
851 }
852
853 tx_desc->dma_addr = dma_addr;
854 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
855 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
856 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
857
858 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
859 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
860 tx_desc->tx_sg[0].lkey);
861
862 return 0;
863}
864
865static void
866isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
867 struct ib_send_wr *send_wr)
868{
869 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
870
871 tx_desc->tx_cqe.done = isert_send_done;
872 send_wr->wr_cqe = &tx_desc->tx_cqe;
873
874 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
875 send_wr->opcode = IB_WR_SEND_WITH_INV;
876 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
877 } else {
878 send_wr->opcode = IB_WR_SEND;
879 }
880
881 send_wr->sg_list = &tx_desc->tx_sg[0];
882 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
883 send_wr->send_flags = IB_SEND_SIGNALED;
884}
885
886static int
887isert_login_post_recv(struct isert_conn *isert_conn)
888{
889 struct ib_recv_wr rx_wr;
890 struct ib_sge sge;
891 int ret;
892
893 memset(&sge, 0, sizeof(struct ib_sge));
894 sge.addr = isert_conn->login_desc->dma_addr +
895 isert_get_hdr_offset(isert_conn->login_desc);
896 sge.length = ISER_RX_PAYLOAD_SIZE;
897 sge.lkey = isert_conn->device->pd->local_dma_lkey;
898
899 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
900 sge.addr, sge.length, sge.lkey);
901
902 isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
903
904 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
905 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
906 rx_wr.sg_list = &sge;
907 rx_wr.num_sge = 1;
908
909 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
910 if (ret)
911 isert_err("ib_post_recv() failed: %d\n", ret);
912
913 return ret;
914}
915
916static int
917isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
918 u32 length)
919{
920 struct isert_conn *isert_conn = conn->context;
921 struct isert_device *device = isert_conn->device;
922 struct ib_device *ib_dev = device->ib_device;
923 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
924 int ret;
925
926 __isert_create_send_desc(device, tx_desc);
927
928 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
929 sizeof(struct iscsi_hdr));
930
931 isert_init_tx_hdrs(isert_conn, tx_desc);
932
933 if (length > 0) {
934 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
935
936 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
937 length, DMA_TO_DEVICE);
938
939 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
940
941 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
942 length, DMA_TO_DEVICE);
943
944 tx_dsg->addr = isert_conn->login_rsp_dma;
945 tx_dsg->length = length;
946 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
947 tx_desc->num_sge = 2;
948 }
949 if (!login->login_failed) {
950 if (login->login_complete) {
951 ret = isert_alloc_rx_descriptors(isert_conn);
952 if (ret)
953 return ret;
954
955 ret = isert_post_recvm(isert_conn,
956 ISERT_QP_MAX_RECV_DTOS);
957 if (ret)
958 return ret;
959
960 /* Now we are in FULL_FEATURE phase */
961 mutex_lock(&isert_conn->mutex);
962 isert_conn->state = ISER_CONN_FULL_FEATURE;
963 mutex_unlock(&isert_conn->mutex);
964 goto post_send;
965 }
966
967 ret = isert_login_post_recv(isert_conn);
968 if (ret)
969 return ret;
970 }
971post_send:
972 ret = isert_login_post_send(isert_conn, tx_desc);
973 if (ret)
974 return ret;
975
976 return 0;
977}
978
979static void
980isert_rx_login_req(struct isert_conn *isert_conn)
981{
982 struct iser_rx_desc *rx_desc = isert_conn->login_desc;
983 int rx_buflen = isert_conn->login_req_len;
984 struct iscsit_conn *conn = isert_conn->conn;
985 struct iscsi_login *login = conn->conn_login;
986 int size;
987
988 isert_info("conn %p\n", isert_conn);
989
990 WARN_ON_ONCE(!login);
991
992 if (login->first_request) {
993 struct iscsi_login_req *login_req =
994 (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
995 /*
996 * Setup the initial iscsi_login values from the leading
997 * login request PDU.
998 */
999 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1000 login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
1001 login_req->flags);
1002 login->version_min = login_req->min_version;
1003 login->version_max = login_req->max_version;
1004 memcpy(login->isid, login_req->isid, 6);
1005 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1006 login->init_task_tag = login_req->itt;
1007 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1008 login->cid = be16_to_cpu(login_req->cid);
1009 login->tsih = be16_to_cpu(login_req->tsih);
1010 }
1011
1012 memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
1013
1014 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1015 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1016 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1017 MAX_KEY_VALUE_PAIRS);
1018 memcpy(login->req_buf, isert_get_data(rx_desc), size);
1019
1020 if (login->first_request) {
1021 complete(&isert_conn->login_comp);
1022 return;
1023 }
1024 queue_delayed_work(isert_login_wq, &conn->login_work, 0);
1025}
1026
1027static struct iscsit_cmd
1028*isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
1029{
1030 struct isert_conn *isert_conn = conn->context;
1031 struct isert_cmd *isert_cmd;
1032 struct iscsit_cmd *cmd;
1033
1034 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1035 if (!cmd) {
1036 isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
1037 return NULL;
1038 }
1039 isert_cmd = iscsit_priv_cmd(cmd);
1040 isert_cmd->conn = isert_conn;
1041 isert_cmd->iscsit_cmd = cmd;
1042 isert_cmd->rx_desc = rx_desc;
1043
1044 return cmd;
1045}
1046
1047static int
1048isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1049 struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
1050 struct iser_rx_desc *rx_desc, unsigned char *buf)
1051{
1052 struct iscsit_conn *conn = isert_conn->conn;
1053 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1054 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1055 bool dump_payload = false;
1056 unsigned int data_len;
1057
1058 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1059 if (rc < 0)
1060 return rc;
1061
1062 imm_data = cmd->immediate_data;
1063 imm_data_len = cmd->first_burst_len;
1064 unsol_data = cmd->unsolicited_data;
1065 data_len = cmd->se_cmd.data_length;
1066
1067 if (imm_data && imm_data_len == data_len)
1068 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1069 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1070 if (rc < 0) {
1071 return 0;
1072 } else if (rc > 0) {
1073 dump_payload = true;
1074 goto sequence_cmd;
1075 }
1076
1077 if (!imm_data)
1078 return 0;
1079
1080 if (imm_data_len != data_len) {
1081 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1082 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1083 isert_get_data(rx_desc), imm_data_len);
1084 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1085 sg_nents, imm_data_len);
1086 } else {
1087 sg_init_table(&isert_cmd->sg, 1);
1088 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1089 cmd->se_cmd.t_data_nents = 1;
1090 sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
1091 imm_data_len);
1092 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1093 imm_data_len);
1094 }
1095
1096 cmd->write_data_done += imm_data_len;
1097
1098 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1099 spin_lock_bh(&cmd->istate_lock);
1100 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1101 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1102 spin_unlock_bh(&cmd->istate_lock);
1103 }
1104
1105sequence_cmd:
1106 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1107
1108 if (!rc && !dump_payload && unsol_data)
1109 iscsit_set_unsolicited_dataout(cmd);
1110 else if (dump_payload && imm_data)
1111 target_put_sess_cmd(&cmd->se_cmd);
1112
1113 return 0;
1114}
1115
1116static int
1117isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1118 struct iser_rx_desc *rx_desc, unsigned char *buf)
1119{
1120 struct scatterlist *sg_start;
1121 struct iscsit_conn *conn = isert_conn->conn;
1122 struct iscsit_cmd *cmd = NULL;
1123 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1124 u32 unsol_data_len = ntoh24(hdr->dlength);
1125 int rc, sg_nents, sg_off, page_off;
1126
1127 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1128 if (rc < 0)
1129 return rc;
1130 else if (!cmd)
1131 return 0;
1132 /*
1133 * FIXME: Unexpected unsolicited_data out
1134 */
1135 if (!cmd->unsolicited_data) {
1136 isert_err("Received unexpected solicited data payload\n");
1137 dump_stack();
1138 return -1;
1139 }
1140
1141 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1142 "write_data_done: %u, data_length: %u\n",
1143 unsol_data_len, cmd->write_data_done,
1144 cmd->se_cmd.data_length);
1145
1146 sg_off = cmd->write_data_done / PAGE_SIZE;
1147 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1148 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1149 page_off = cmd->write_data_done % PAGE_SIZE;
1150 /*
1151 * FIXME: Non page-aligned unsolicited_data out
1152 */
1153 if (page_off) {
1154 isert_err("unexpected non-page aligned data payload\n");
1155 dump_stack();
1156 return -1;
1157 }
1158 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1159 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1160 sg_nents, isert_get_data(rx_desc), unsol_data_len);
1161
1162 sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
1163 unsol_data_len);
1164
1165 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1166 if (rc < 0)
1167 return rc;
1168
1169 /*
1170 * multiple data-outs on the same command can arrive -
1171 * so post the buffer before hand
1172 */
1173 return isert_post_recv(isert_conn, rx_desc);
1174}
1175
1176static int
1177isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1178 struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
1179 unsigned char *buf)
1180{
1181 struct iscsit_conn *conn = isert_conn->conn;
1182 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1183 int rc;
1184
1185 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1186 if (rc < 0)
1187 return rc;
1188 /*
1189 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1190 */
1191
1192 return iscsit_process_nop_out(conn, cmd, hdr);
1193}
1194
1195static int
1196isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1197 struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
1198 struct iscsi_text *hdr)
1199{
1200 struct iscsit_conn *conn = isert_conn->conn;
1201 u32 payload_length = ntoh24(hdr->dlength);
1202 int rc;
1203 unsigned char *text_in = NULL;
1204
1205 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1206 if (rc < 0)
1207 return rc;
1208
1209 if (payload_length) {
1210 text_in = kzalloc(payload_length, GFP_KERNEL);
1211 if (!text_in)
1212 return -ENOMEM;
1213 }
1214 cmd->text_in_ptr = text_in;
1215
1216 memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
1217
1218 return iscsit_process_text_cmd(conn, cmd, hdr);
1219}
1220
1221static int
1222isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1223 uint32_t read_stag, uint64_t read_va,
1224 uint32_t write_stag, uint64_t write_va)
1225{
1226 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
1227 struct iscsit_conn *conn = isert_conn->conn;
1228 struct iscsit_cmd *cmd;
1229 struct isert_cmd *isert_cmd;
1230 int ret = -EINVAL;
1231 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1232
1233 if (conn->sess->sess_ops->SessionType &&
1234 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1235 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1236 " ignoring\n", opcode);
1237 return 0;
1238 }
1239
1240 switch (opcode) {
1241 case ISCSI_OP_SCSI_CMD:
1242 cmd = isert_allocate_cmd(conn, rx_desc);
1243 if (!cmd)
1244 break;
1245
1246 isert_cmd = iscsit_priv_cmd(cmd);
1247 isert_cmd->read_stag = read_stag;
1248 isert_cmd->read_va = read_va;
1249 isert_cmd->write_stag = write_stag;
1250 isert_cmd->write_va = write_va;
1251 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1252
1253 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1254 rx_desc, (unsigned char *)hdr);
1255 break;
1256 case ISCSI_OP_NOOP_OUT:
1257 cmd = isert_allocate_cmd(conn, rx_desc);
1258 if (!cmd)
1259 break;
1260
1261 isert_cmd = iscsit_priv_cmd(cmd);
1262 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1263 rx_desc, (unsigned char *)hdr);
1264 break;
1265 case ISCSI_OP_SCSI_DATA_OUT:
1266 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1267 (unsigned char *)hdr);
1268 break;
1269 case ISCSI_OP_SCSI_TMFUNC:
1270 cmd = isert_allocate_cmd(conn, rx_desc);
1271 if (!cmd)
1272 break;
1273
1274 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1275 (unsigned char *)hdr);
1276 break;
1277 case ISCSI_OP_LOGOUT:
1278 cmd = isert_allocate_cmd(conn, rx_desc);
1279 if (!cmd)
1280 break;
1281
1282 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1283 break;
1284 case ISCSI_OP_TEXT:
1285 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1286 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1287 else
1288 cmd = isert_allocate_cmd(conn, rx_desc);
1289
1290 if (!cmd)
1291 break;
1292
1293 isert_cmd = iscsit_priv_cmd(cmd);
1294 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1295 rx_desc, (struct iscsi_text *)hdr);
1296 break;
1297 default:
1298 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1299 dump_stack();
1300 break;
1301 }
1302
1303 return ret;
1304}
1305
1306static void
1307isert_print_wc(struct ib_wc *wc, const char *type)
1308{
1309 if (wc->status != IB_WC_WR_FLUSH_ERR)
1310 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1311 ib_wc_status_msg(wc->status), wc->status,
1312 wc->vendor_err);
1313 else
1314 isert_dbg("%s failure: %s (%d)\n", type,
1315 ib_wc_status_msg(wc->status), wc->status);
1316}
1317
1318static void
1319isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1320{
1321 struct isert_conn *isert_conn = wc->qp->qp_context;
1322 struct ib_device *ib_dev = isert_conn->cm_id->device;
1323 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1324 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
1325 struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
1326 uint64_t read_va = 0, write_va = 0;
1327 uint32_t read_stag = 0, write_stag = 0;
1328
1329 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1330 isert_print_wc(wc, "recv");
1331 if (wc->status != IB_WC_WR_FLUSH_ERR)
1332 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1333 return;
1334 }
1335
1336 rx_desc->in_use = true;
1337
1338 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1339 ISER_RX_SIZE, DMA_FROM_DEVICE);
1340
1341 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1342 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1343 (int)(wc->byte_len - ISER_HEADERS_LEN));
1344
1345 switch (iser_ctrl->flags & 0xF0) {
1346 case ISCSI_CTRL:
1347 if (iser_ctrl->flags & ISER_RSV) {
1348 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1349 read_va = be64_to_cpu(iser_ctrl->read_va);
1350 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1351 read_stag, (unsigned long long)read_va);
1352 }
1353 if (iser_ctrl->flags & ISER_WSV) {
1354 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1355 write_va = be64_to_cpu(iser_ctrl->write_va);
1356 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1357 write_stag, (unsigned long long)write_va);
1358 }
1359
1360 isert_dbg("ISER ISCSI_CTRL PDU\n");
1361 break;
1362 case ISER_HELLO:
1363 isert_err("iSER Hello message\n");
1364 break;
1365 default:
1366 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1367 break;
1368 }
1369
1370 isert_rx_opcode(isert_conn, rx_desc,
1371 read_stag, read_va, write_stag, write_va);
1372
1373 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1374 ISER_RX_SIZE, DMA_FROM_DEVICE);
1375}
1376
1377static void
1378isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1379{
1380 struct isert_conn *isert_conn = wc->qp->qp_context;
1381 struct ib_device *ib_dev = isert_conn->device->ib_device;
1382
1383 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1384 isert_print_wc(wc, "login recv");
1385 return;
1386 }
1387
1388 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
1389 ISER_RX_SIZE, DMA_FROM_DEVICE);
1390
1391 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1392
1393 if (isert_conn->conn) {
1394 struct iscsi_login *login = isert_conn->conn->conn_login;
1395
1396 if (login && !login->first_request)
1397 isert_rx_login_req(isert_conn);
1398 }
1399
1400 mutex_lock(&isert_conn->mutex);
1401 complete(&isert_conn->login_req_comp);
1402 mutex_unlock(&isert_conn->mutex);
1403
1404 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
1405 ISER_RX_SIZE, DMA_FROM_DEVICE);
1406}
1407
1408static void
1409isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1410{
1411 struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
1412 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1413
1414 if (!cmd->rw.nr_ops)
1415 return;
1416
1417 if (isert_prot_cmd(conn, se_cmd)) {
1418 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1419 conn->cm_id->port_num, se_cmd->t_data_sg,
1420 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1421 se_cmd->t_prot_nents, dir);
1422 } else {
1423 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1424 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1425 }
1426
1427 cmd->rw.nr_ops = 0;
1428}
1429
1430static void
1431isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1432{
1433 struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
1434 struct isert_conn *isert_conn = isert_cmd->conn;
1435 struct iscsit_conn *conn = isert_conn->conn;
1436 struct iscsi_text_rsp *hdr;
1437
1438 isert_dbg("Cmd %p\n", isert_cmd);
1439
1440 switch (cmd->iscsi_opcode) {
1441 case ISCSI_OP_SCSI_CMD:
1442 spin_lock_bh(&conn->cmd_lock);
1443 if (!list_empty(&cmd->i_conn_node))
1444 list_del_init(&cmd->i_conn_node);
1445 spin_unlock_bh(&conn->cmd_lock);
1446
1447 if (cmd->data_direction == DMA_TO_DEVICE) {
1448 iscsit_stop_dataout_timer(cmd);
1449 /*
1450 * Check for special case during comp_err where
1451 * WRITE_PENDING has been handed off from core,
1452 * but requires an extra target_put_sess_cmd()
1453 * before transport_generic_free_cmd() below.
1454 */
1455 if (comp_err &&
1456 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1457 struct se_cmd *se_cmd = &cmd->se_cmd;
1458
1459 target_put_sess_cmd(se_cmd);
1460 }
1461 }
1462
1463 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1464 transport_generic_free_cmd(&cmd->se_cmd, 0);
1465 break;
1466 case ISCSI_OP_SCSI_TMFUNC:
1467 spin_lock_bh(&conn->cmd_lock);
1468 if (!list_empty(&cmd->i_conn_node))
1469 list_del_init(&cmd->i_conn_node);
1470 spin_unlock_bh(&conn->cmd_lock);
1471
1472 transport_generic_free_cmd(&cmd->se_cmd, 0);
1473 break;
1474 case ISCSI_OP_REJECT:
1475 case ISCSI_OP_NOOP_OUT:
1476 case ISCSI_OP_TEXT:
1477 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1478 /* If the continue bit is on, keep the command alive */
1479 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1480 break;
1481
1482 spin_lock_bh(&conn->cmd_lock);
1483 if (!list_empty(&cmd->i_conn_node))
1484 list_del_init(&cmd->i_conn_node);
1485 spin_unlock_bh(&conn->cmd_lock);
1486
1487 /*
1488 * Handle special case for REJECT when iscsi_add_reject*() has
1489 * overwritten the original iscsi_opcode assignment, and the
1490 * associated cmd->se_cmd needs to be released.
1491 */
1492 if (cmd->se_cmd.se_tfo != NULL) {
1493 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1494 cmd->iscsi_opcode);
1495 transport_generic_free_cmd(&cmd->se_cmd, 0);
1496 break;
1497 }
1498 fallthrough;
1499 default:
1500 iscsit_release_cmd(cmd);
1501 break;
1502 }
1503}
1504
1505static void
1506isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1507{
1508 if (tx_desc->dma_addr != 0) {
1509 isert_dbg("unmap single for tx_desc->dma_addr\n");
1510 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1511 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1512 tx_desc->dma_addr = 0;
1513 }
1514}
1515
1516static void
1517isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1518 struct ib_device *ib_dev, bool comp_err)
1519{
1520 if (isert_cmd->pdu_buf_dma != 0) {
1521 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1522 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1523 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1524 isert_cmd->pdu_buf_dma = 0;
1525 }
1526
1527 isert_unmap_tx_desc(tx_desc, ib_dev);
1528 isert_put_cmd(isert_cmd, comp_err);
1529}
1530
1531static int
1532isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1533{
1534 struct ib_mr_status mr_status;
1535 int ret;
1536
1537 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1538 if (ret) {
1539 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1540 goto fail_mr_status;
1541 }
1542
1543 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1544 u64 sec_offset_err;
1545 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1546
1547 switch (mr_status.sig_err.err_type) {
1548 case IB_SIG_BAD_GUARD:
1549 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1550 break;
1551 case IB_SIG_BAD_REFTAG:
1552 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1553 break;
1554 case IB_SIG_BAD_APPTAG:
1555 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1556 break;
1557 }
1558 sec_offset_err = mr_status.sig_err.sig_err_offset;
1559 do_div(sec_offset_err, block_size);
1560 se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
1561
1562 isert_err("PI error found type %d at sector 0x%llx "
1563 "expected 0x%x vs actual 0x%x\n",
1564 mr_status.sig_err.err_type,
1565 (unsigned long long)se_cmd->sense_info,
1566 mr_status.sig_err.expected,
1567 mr_status.sig_err.actual);
1568 ret = 1;
1569 }
1570
1571fail_mr_status:
1572 return ret;
1573}
1574
1575static void
1576isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1577{
1578 struct isert_conn *isert_conn = wc->qp->qp_context;
1579 struct isert_device *device = isert_conn->device;
1580 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1581 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1582 struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
1583 int ret = 0;
1584
1585 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1586 isert_print_wc(wc, "rdma write");
1587 if (wc->status != IB_WC_WR_FLUSH_ERR)
1588 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1589 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1590 return;
1591 }
1592
1593 isert_dbg("Cmd %p\n", isert_cmd);
1594
1595 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr);
1596 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1597
1598 if (ret) {
1599 /*
1600 * transport_generic_request_failure() expects to have
1601 * plus two references to handle queue-full, so re-add
1602 * one here as target-core will have already dropped
1603 * it after the first isert_put_datain() callback.
1604 */
1605 kref_get(&cmd->cmd_kref);
1606 transport_generic_request_failure(cmd, cmd->pi_err);
1607 } else {
1608 /*
1609 * XXX: isert_put_response() failure is not retried.
1610 */
1611 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
1612 if (ret)
1613 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1614 }
1615}
1616
1617static void
1618isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1619{
1620 struct isert_conn *isert_conn = wc->qp->qp_context;
1621 struct isert_device *device = isert_conn->device;
1622 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1623 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1624 struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
1625 struct se_cmd *se_cmd = &cmd->se_cmd;
1626 int ret = 0;
1627
1628 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1629 isert_print_wc(wc, "rdma read");
1630 if (wc->status != IB_WC_WR_FLUSH_ERR)
1631 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1632 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1633 return;
1634 }
1635
1636 isert_dbg("Cmd %p\n", isert_cmd);
1637
1638 iscsit_stop_dataout_timer(cmd);
1639
1640 if (isert_prot_cmd(isert_conn, se_cmd))
1641 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr);
1642 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1643 cmd->write_data_done = 0;
1644
1645 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1646 spin_lock_bh(&cmd->istate_lock);
1647 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1648 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1649 spin_unlock_bh(&cmd->istate_lock);
1650
1651 /*
1652 * transport_generic_request_failure() will drop the extra
1653 * se_cmd->cmd_kref reference after T10-PI error, and handle
1654 * any non-zero ->queue_status() callback error retries.
1655 */
1656 if (ret)
1657 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1658 else
1659 target_execute_cmd(se_cmd);
1660}
1661
1662static void
1663isert_do_control_comp(struct work_struct *work)
1664{
1665 struct isert_cmd *isert_cmd = container_of(work,
1666 struct isert_cmd, comp_work);
1667 struct isert_conn *isert_conn = isert_cmd->conn;
1668 struct ib_device *ib_dev = isert_conn->cm_id->device;
1669 struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
1670
1671 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1672
1673 switch (cmd->i_state) {
1674 case ISTATE_SEND_TASKMGTRSP:
1675 iscsit_tmr_post_handler(cmd, cmd->conn);
1676 fallthrough;
1677 case ISTATE_SEND_REJECT:
1678 case ISTATE_SEND_TEXTRSP:
1679 cmd->i_state = ISTATE_SENT_STATUS;
1680 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1681 ib_dev, false);
1682 break;
1683 case ISTATE_SEND_LOGOUTRSP:
1684 iscsit_logout_post_handler(cmd, cmd->conn);
1685 break;
1686 default:
1687 isert_err("Unknown i_state %d\n", cmd->i_state);
1688 dump_stack();
1689 break;
1690 }
1691}
1692
1693static void
1694isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1695{
1696 struct isert_conn *isert_conn = wc->qp->qp_context;
1697 struct ib_device *ib_dev = isert_conn->cm_id->device;
1698 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1699
1700 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1701 isert_print_wc(wc, "login send");
1702 if (wc->status != IB_WC_WR_FLUSH_ERR)
1703 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1704 }
1705
1706 isert_unmap_tx_desc(tx_desc, ib_dev);
1707}
1708
1709static void
1710isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1711{
1712 struct isert_conn *isert_conn = wc->qp->qp_context;
1713 struct ib_device *ib_dev = isert_conn->cm_id->device;
1714 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1715 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1716
1717 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1718 isert_print_wc(wc, "send");
1719 if (wc->status != IB_WC_WR_FLUSH_ERR)
1720 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1721 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1722 return;
1723 }
1724
1725 isert_dbg("Cmd %p\n", isert_cmd);
1726
1727 switch (isert_cmd->iscsit_cmd->i_state) {
1728 case ISTATE_SEND_TASKMGTRSP:
1729 case ISTATE_SEND_LOGOUTRSP:
1730 case ISTATE_SEND_REJECT:
1731 case ISTATE_SEND_TEXTRSP:
1732 isert_unmap_tx_desc(tx_desc, ib_dev);
1733
1734 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1735 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1736 return;
1737 default:
1738 isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
1739 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1740 break;
1741 }
1742}
1743
1744static int
1745isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1746{
1747 int ret;
1748
1749 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1750 if (ret)
1751 return ret;
1752
1753 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
1754 if (ret) {
1755 isert_err("ib_post_send failed with %d\n", ret);
1756 return ret;
1757 }
1758 return ret;
1759}
1760
1761static int
1762isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
1763{
1764 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1765 struct isert_conn *isert_conn = conn->context;
1766 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1767 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1768 &isert_cmd->tx_desc.iscsi_header;
1769
1770 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1771 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1772 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1773 /*
1774 * Attach SENSE DATA payload to iSCSI Response PDU
1775 */
1776 if (cmd->se_cmd.sense_buffer &&
1777 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1778 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1779 struct isert_device *device = isert_conn->device;
1780 struct ib_device *ib_dev = device->ib_device;
1781 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1782 u32 padding, pdu_len;
1783
1784 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1785 cmd->sense_buffer);
1786 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1787
1788 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1789 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1790 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1791
1792 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1793 (void *)cmd->sense_buffer, pdu_len,
1794 DMA_TO_DEVICE);
1795 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1796 return -ENOMEM;
1797
1798 isert_cmd->pdu_buf_len = pdu_len;
1799 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1800 tx_dsg->length = pdu_len;
1801 tx_dsg->lkey = device->pd->local_dma_lkey;
1802 isert_cmd->tx_desc.num_sge = 2;
1803 }
1804
1805 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1806
1807 isert_dbg("Posting SCSI Response\n");
1808
1809 return isert_post_response(isert_conn, isert_cmd);
1810}
1811
1812static void
1813isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
1814{
1815 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1816 struct isert_conn *isert_conn = conn->context;
1817
1818 spin_lock_bh(&conn->cmd_lock);
1819 if (!list_empty(&cmd->i_conn_node))
1820 list_del_init(&cmd->i_conn_node);
1821 spin_unlock_bh(&conn->cmd_lock);
1822
1823 if (cmd->data_direction == DMA_TO_DEVICE)
1824 iscsit_stop_dataout_timer(cmd);
1825 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1826}
1827
1828static enum target_prot_op
1829isert_get_sup_prot_ops(struct iscsit_conn *conn)
1830{
1831 struct isert_conn *isert_conn = conn->context;
1832 struct isert_device *device = isert_conn->device;
1833
1834 if (conn->tpg->tpg_attrib.t10_pi) {
1835 if (device->pi_capable) {
1836 isert_info("conn %p PI offload enabled\n", isert_conn);
1837 isert_conn->pi_support = true;
1838 return TARGET_PROT_ALL;
1839 }
1840 }
1841
1842 isert_info("conn %p PI offload disabled\n", isert_conn);
1843 isert_conn->pi_support = false;
1844
1845 return TARGET_PROT_NORMAL;
1846}
1847
1848static int
1849isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
1850 bool nopout_response)
1851{
1852 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1853 struct isert_conn *isert_conn = conn->context;
1854 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1855
1856 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1857 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1858 &isert_cmd->tx_desc.iscsi_header,
1859 nopout_response);
1860 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1861 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1862
1863 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1864
1865 return isert_post_response(isert_conn, isert_cmd);
1866}
1867
1868static int
1869isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1870{
1871 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1872 struct isert_conn *isert_conn = conn->context;
1873 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1874
1875 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1876 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1877 &isert_cmd->tx_desc.iscsi_header);
1878 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1879 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1880
1881 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1882
1883 return isert_post_response(isert_conn, isert_cmd);
1884}
1885
1886static int
1887isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1888{
1889 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1890 struct isert_conn *isert_conn = conn->context;
1891 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1892
1893 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1894 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1895 &isert_cmd->tx_desc.iscsi_header);
1896 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1897 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1898
1899 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1900
1901 return isert_post_response(isert_conn, isert_cmd);
1902}
1903
1904static int
1905isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1906{
1907 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1908 struct isert_conn *isert_conn = conn->context;
1909 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1910 struct isert_device *device = isert_conn->device;
1911 struct ib_device *ib_dev = device->ib_device;
1912 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1913 struct iscsi_reject *hdr =
1914 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1915
1916 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1917 iscsit_build_reject(cmd, conn, hdr);
1918 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1919
1920 hton24(hdr->dlength, ISCSI_HDR_LEN);
1921 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1922 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1923 DMA_TO_DEVICE);
1924 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1925 return -ENOMEM;
1926 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1927 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1928 tx_dsg->length = ISCSI_HDR_LEN;
1929 tx_dsg->lkey = device->pd->local_dma_lkey;
1930 isert_cmd->tx_desc.num_sge = 2;
1931
1932 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1933
1934 isert_dbg("conn %p Posting Reject\n", isert_conn);
1935
1936 return isert_post_response(isert_conn, isert_cmd);
1937}
1938
1939static int
1940isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1941{
1942 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1943 struct isert_conn *isert_conn = conn->context;
1944 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1945 struct iscsi_text_rsp *hdr =
1946 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1947 u32 txt_rsp_len;
1948 int rc;
1949
1950 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1951 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
1952 if (rc < 0)
1953 return rc;
1954
1955 txt_rsp_len = rc;
1956 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1957
1958 if (txt_rsp_len) {
1959 struct isert_device *device = isert_conn->device;
1960 struct ib_device *ib_dev = device->ib_device;
1961 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1962 void *txt_rsp_buf = cmd->buf_ptr;
1963
1964 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1965 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1966 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1967 return -ENOMEM;
1968
1969 isert_cmd->pdu_buf_len = txt_rsp_len;
1970 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1971 tx_dsg->length = txt_rsp_len;
1972 tx_dsg->lkey = device->pd->local_dma_lkey;
1973 isert_cmd->tx_desc.num_sge = 2;
1974 }
1975 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1976
1977 isert_dbg("conn %p Text Response\n", isert_conn);
1978
1979 return isert_post_response(isert_conn, isert_cmd);
1980}
1981
1982static inline void
1983isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
1984{
1985 domain->sig_type = IB_SIG_TYPE_T10_DIF;
1986 domain->sig.dif.bg_type = IB_T10DIF_CRC;
1987 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
1988 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
1989 /*
1990 * At the moment we hard code those, but if in the future
1991 * the target core would like to use it, we will take it
1992 * from se_cmd.
1993 */
1994 domain->sig.dif.apptag_check_mask = 0xffff;
1995 domain->sig.dif.app_escape = true;
1996 domain->sig.dif.ref_escape = true;
1997 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
1998 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
1999 domain->sig.dif.ref_remap = true;
2000}
2001
2002static int
2003isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2004{
2005 memset(sig_attrs, 0, sizeof(*sig_attrs));
2006
2007 switch (se_cmd->prot_op) {
2008 case TARGET_PROT_DIN_INSERT:
2009 case TARGET_PROT_DOUT_STRIP:
2010 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2011 isert_set_dif_domain(se_cmd, &sig_attrs->wire);
2012 break;
2013 case TARGET_PROT_DOUT_INSERT:
2014 case TARGET_PROT_DIN_STRIP:
2015 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2016 isert_set_dif_domain(se_cmd, &sig_attrs->mem);
2017 break;
2018 case TARGET_PROT_DIN_PASS:
2019 case TARGET_PROT_DOUT_PASS:
2020 isert_set_dif_domain(se_cmd, &sig_attrs->wire);
2021 isert_set_dif_domain(se_cmd, &sig_attrs->mem);
2022 break;
2023 default:
2024 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2025 return -EINVAL;
2026 }
2027
2028 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
2029 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
2030 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
2031 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
2032 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
2033 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
2034
2035 return 0;
2036}
2037
2038static int
2039isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2040 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2041{
2042 struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
2043 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2044 u8 port_num = conn->cm_id->port_num;
2045 u64 addr;
2046 u32 rkey, offset;
2047 int ret;
2048
2049 if (cmd->ctx_init_done)
2050 goto rdma_ctx_post;
2051
2052 if (dir == DMA_FROM_DEVICE) {
2053 addr = cmd->write_va;
2054 rkey = cmd->write_stag;
2055 offset = cmd->iscsit_cmd->write_data_done;
2056 } else {
2057 addr = cmd->read_va;
2058 rkey = cmd->read_stag;
2059 offset = 0;
2060 }
2061
2062 if (isert_prot_cmd(conn, se_cmd)) {
2063 struct ib_sig_attrs sig_attrs;
2064
2065 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2066 if (ret)
2067 return ret;
2068
2069 WARN_ON_ONCE(offset);
2070 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2071 se_cmd->t_data_sg, se_cmd->t_data_nents,
2072 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2073 &sig_attrs, addr, rkey, dir);
2074 } else {
2075 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2076 se_cmd->t_data_sg, se_cmd->t_data_nents,
2077 offset, addr, rkey, dir);
2078 }
2079
2080 if (ret < 0) {
2081 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2082 return ret;
2083 }
2084
2085 cmd->ctx_init_done = true;
2086
2087rdma_ctx_post:
2088 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2089 if (ret < 0)
2090 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2091 return ret;
2092}
2093
2094static int
2095isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
2096{
2097 struct se_cmd *se_cmd = &cmd->se_cmd;
2098 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2099 struct isert_conn *isert_conn = conn->context;
2100 struct ib_cqe *cqe = NULL;
2101 struct ib_send_wr *chain_wr = NULL;
2102 int rc;
2103
2104 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2105 isert_cmd, se_cmd->data_length);
2106
2107 if (isert_prot_cmd(isert_conn, se_cmd)) {
2108 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2109 cqe = &isert_cmd->tx_desc.tx_cqe;
2110 } else {
2111 /*
2112 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2113 */
2114 isert_create_send_desc(isert_conn, isert_cmd,
2115 &isert_cmd->tx_desc);
2116 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2117 &isert_cmd->tx_desc.iscsi_header);
2118 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2119 isert_init_send_wr(isert_conn, isert_cmd,
2120 &isert_cmd->tx_desc.send_wr);
2121
2122 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2123 if (rc)
2124 return rc;
2125
2126 chain_wr = &isert_cmd->tx_desc.send_wr;
2127 }
2128
2129 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2130 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2131 isert_cmd, rc);
2132 return rc;
2133}
2134
2135static int
2136isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
2137{
2138 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2139 int ret;
2140
2141 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2142 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2143
2144 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2145 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2146 &isert_cmd->tx_desc.tx_cqe, NULL);
2147
2148 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2149 isert_cmd, ret);
2150 return ret;
2151}
2152
2153static int
2154isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
2155{
2156 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2157 int ret = 0;
2158
2159 switch (state) {
2160 case ISTATE_REMOVE:
2161 spin_lock_bh(&conn->cmd_lock);
2162 list_del_init(&cmd->i_conn_node);
2163 spin_unlock_bh(&conn->cmd_lock);
2164 isert_put_cmd(isert_cmd, true);
2165 break;
2166 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2167 ret = isert_put_nopin(cmd, conn, false);
2168 break;
2169 default:
2170 isert_err("Unknown immediate state: 0x%02x\n", state);
2171 ret = -EINVAL;
2172 break;
2173 }
2174
2175 return ret;
2176}
2177
2178static int
2179isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
2180{
2181 struct isert_conn *isert_conn = conn->context;
2182 int ret;
2183
2184 switch (state) {
2185 case ISTATE_SEND_LOGOUTRSP:
2186 ret = isert_put_logout_rsp(cmd, conn);
2187 if (!ret)
2188 isert_conn->logout_posted = true;
2189 break;
2190 case ISTATE_SEND_NOPIN:
2191 ret = isert_put_nopin(cmd, conn, true);
2192 break;
2193 case ISTATE_SEND_TASKMGTRSP:
2194 ret = isert_put_tm_rsp(cmd, conn);
2195 break;
2196 case ISTATE_SEND_REJECT:
2197 ret = isert_put_reject(cmd, conn);
2198 break;
2199 case ISTATE_SEND_TEXTRSP:
2200 ret = isert_put_text_rsp(cmd, conn);
2201 break;
2202 case ISTATE_SEND_STATUS:
2203 /*
2204 * Special case for sending non GOOD SCSI status from TX thread
2205 * context during pre se_cmd excecution failure.
2206 */
2207 ret = isert_put_response(conn, cmd);
2208 break;
2209 default:
2210 isert_err("Unknown response state: 0x%02x\n", state);
2211 ret = -EINVAL;
2212 break;
2213 }
2214
2215 return ret;
2216}
2217
2218struct rdma_cm_id *
2219isert_setup_id(struct isert_np *isert_np)
2220{
2221 struct iscsi_np *np = isert_np->np;
2222 struct rdma_cm_id *id;
2223 struct sockaddr *sa;
2224 int ret;
2225
2226 sa = (struct sockaddr *)&np->np_sockaddr;
2227 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2228
2229 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2230 RDMA_PS_TCP, IB_QPT_RC);
2231 if (IS_ERR(id)) {
2232 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2233 ret = PTR_ERR(id);
2234 goto out;
2235 }
2236 isert_dbg("id %p context %p\n", id, id->context);
2237
2238 /*
2239 * Allow both IPv4 and IPv6 sockets to bind a single port
2240 * at the same time.
2241 */
2242 ret = rdma_set_afonly(id, 1);
2243 if (ret) {
2244 isert_err("rdma_set_afonly() failed: %d\n", ret);
2245 goto out_id;
2246 }
2247
2248 ret = rdma_bind_addr(id, sa);
2249 if (ret) {
2250 isert_err("rdma_bind_addr() failed: %d\n", ret);
2251 goto out_id;
2252 }
2253
2254 ret = rdma_listen(id, 0);
2255 if (ret) {
2256 isert_err("rdma_listen() failed: %d\n", ret);
2257 goto out_id;
2258 }
2259
2260 return id;
2261out_id:
2262 rdma_destroy_id(id);
2263out:
2264 return ERR_PTR(ret);
2265}
2266
2267static int
2268isert_setup_np(struct iscsi_np *np,
2269 struct sockaddr_storage *ksockaddr)
2270{
2271 struct isert_np *isert_np;
2272 struct rdma_cm_id *isert_lid;
2273 int ret;
2274
2275 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2276 if (!isert_np)
2277 return -ENOMEM;
2278
2279 sema_init(&isert_np->sem, 0);
2280 mutex_init(&isert_np->mutex);
2281 INIT_LIST_HEAD(&isert_np->accepted);
2282 INIT_LIST_HEAD(&isert_np->pending);
2283 isert_np->np = np;
2284
2285 /*
2286 * Setup the np->np_sockaddr from the passed sockaddr setup
2287 * in iscsi_target_configfs.c code..
2288 */
2289 memcpy(&np->np_sockaddr, ksockaddr,
2290 sizeof(struct sockaddr_storage));
2291
2292 isert_lid = isert_setup_id(isert_np);
2293 if (IS_ERR(isert_lid)) {
2294 ret = PTR_ERR(isert_lid);
2295 goto out;
2296 }
2297
2298 isert_np->cm_id = isert_lid;
2299 np->np_context = isert_np;
2300
2301 return 0;
2302
2303out:
2304 kfree(isert_np);
2305
2306 return ret;
2307}
2308
2309static int
2310isert_rdma_accept(struct isert_conn *isert_conn)
2311{
2312 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2313 struct rdma_conn_param cp;
2314 int ret;
2315 struct iser_cm_hdr rsp_hdr;
2316
2317 memset(&cp, 0, sizeof(struct rdma_conn_param));
2318 cp.initiator_depth = isert_conn->initiator_depth;
2319 cp.retry_count = 7;
2320 cp.rnr_retry_count = 7;
2321
2322 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2323 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2324 if (!isert_conn->snd_w_inv)
2325 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2326 cp.private_data = (void *)&rsp_hdr;
2327 cp.private_data_len = sizeof(rsp_hdr);
2328
2329 ret = rdma_accept(cm_id, &cp);
2330 if (ret) {
2331 isert_err("rdma_accept() failed with: %d\n", ret);
2332 return ret;
2333 }
2334
2335 return 0;
2336}
2337
2338static int
2339isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
2340{
2341 struct isert_conn *isert_conn = conn->context;
2342 int ret;
2343
2344 isert_info("before login_req comp conn: %p\n", isert_conn);
2345 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2346 if (ret) {
2347 isert_err("isert_conn %p interrupted before got login req\n",
2348 isert_conn);
2349 return ret;
2350 }
2351 reinit_completion(&isert_conn->login_req_comp);
2352
2353 /*
2354 * For login requests after the first PDU, isert_rx_login_req() will
2355 * kick queue_delayed_work(isert_login_wq, &conn->login_work) as
2356 * the packet is received, which turns this callback from
2357 * iscsi_target_do_login_rx() into a NOP.
2358 */
2359 if (!login->first_request)
2360 return 0;
2361
2362 isert_rx_login_req(isert_conn);
2363
2364 isert_info("before login_comp conn: %p\n", conn);
2365 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2366 if (ret)
2367 return ret;
2368
2369 isert_info("processing login->req: %p\n", login->req);
2370
2371 return 0;
2372}
2373
2374static void
2375isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
2376 struct isert_conn *isert_conn)
2377{
2378 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2379 struct rdma_route *cm_route = &cm_id->route;
2380
2381 conn->login_family = np->np_sockaddr.ss_family;
2382
2383 conn->login_sockaddr = cm_route->addr.dst_addr;
2384 conn->local_sockaddr = cm_route->addr.src_addr;
2385}
2386
2387static int
2388isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
2389{
2390 struct isert_np *isert_np = np->np_context;
2391 struct isert_conn *isert_conn;
2392 int ret;
2393
2394accept_wait:
2395 ret = down_interruptible(&isert_np->sem);
2396 if (ret)
2397 return -ENODEV;
2398
2399 spin_lock_bh(&np->np_thread_lock);
2400 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2401 spin_unlock_bh(&np->np_thread_lock);
2402 isert_dbg("np_thread_state %d\n",
2403 np->np_thread_state);
2404 /*
2405 * No point in stalling here when np_thread
2406 * is in state RESET/SHUTDOWN/EXIT - bail
2407 */
2408 return -ENODEV;
2409 }
2410 spin_unlock_bh(&np->np_thread_lock);
2411
2412 mutex_lock(&isert_np->mutex);
2413 if (list_empty(&isert_np->pending)) {
2414 mutex_unlock(&isert_np->mutex);
2415 goto accept_wait;
2416 }
2417 isert_conn = list_first_entry(&isert_np->pending,
2418 struct isert_conn, node);
2419 list_del_init(&isert_conn->node);
2420 mutex_unlock(&isert_np->mutex);
2421
2422 conn->context = isert_conn;
2423 isert_conn->conn = conn;
2424 isert_conn->state = ISER_CONN_BOUND;
2425
2426 isert_set_conn_info(np, conn, isert_conn);
2427
2428 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2429
2430 return 0;
2431}
2432
2433static void
2434isert_free_np(struct iscsi_np *np)
2435{
2436 struct isert_np *isert_np = np->np_context;
2437 struct isert_conn *isert_conn, *n;
2438 LIST_HEAD(drop_conn_list);
2439
2440 if (isert_np->cm_id)
2441 rdma_destroy_id(isert_np->cm_id);
2442
2443 /*
2444 * FIXME: At this point we don't have a good way to insure
2445 * that at this point we don't have hanging connections that
2446 * completed RDMA establishment but didn't start iscsi login
2447 * process. So work-around this by cleaning up what ever piled
2448 * up in accepted and pending lists.
2449 */
2450 mutex_lock(&isert_np->mutex);
2451 if (!list_empty(&isert_np->pending)) {
2452 isert_info("Still have isert pending connections\n");
2453 list_for_each_entry_safe(isert_conn, n,
2454 &isert_np->pending,
2455 node) {
2456 isert_info("cleaning isert_conn %p state (%d)\n",
2457 isert_conn, isert_conn->state);
2458 list_move_tail(&isert_conn->node, &drop_conn_list);
2459 }
2460 }
2461
2462 if (!list_empty(&isert_np->accepted)) {
2463 isert_info("Still have isert accepted connections\n");
2464 list_for_each_entry_safe(isert_conn, n,
2465 &isert_np->accepted,
2466 node) {
2467 isert_info("cleaning isert_conn %p state (%d)\n",
2468 isert_conn, isert_conn->state);
2469 list_move_tail(&isert_conn->node, &drop_conn_list);
2470 }
2471 }
2472 mutex_unlock(&isert_np->mutex);
2473
2474 list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
2475 list_del_init(&isert_conn->node);
2476 isert_connect_release(isert_conn);
2477 }
2478
2479 np->np_context = NULL;
2480 kfree(isert_np);
2481}
2482
2483static void isert_release_work(struct work_struct *work)
2484{
2485 struct isert_conn *isert_conn = container_of(work,
2486 struct isert_conn,
2487 release_work);
2488
2489 isert_info("Starting release conn %p\n", isert_conn);
2490
2491 mutex_lock(&isert_conn->mutex);
2492 isert_conn->state = ISER_CONN_DOWN;
2493 mutex_unlock(&isert_conn->mutex);
2494
2495 isert_info("Destroying conn %p\n", isert_conn);
2496 isert_put_conn(isert_conn);
2497}
2498
2499static void
2500isert_wait4logout(struct isert_conn *isert_conn)
2501{
2502 struct iscsit_conn *conn = isert_conn->conn;
2503
2504 isert_info("conn %p\n", isert_conn);
2505
2506 if (isert_conn->logout_posted) {
2507 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2508 wait_for_completion_timeout(&conn->conn_logout_comp,
2509 SECONDS_FOR_LOGOUT_COMP * HZ);
2510 }
2511}
2512
2513static void
2514isert_wait4cmds(struct iscsit_conn *conn)
2515{
2516 isert_info("iscsit_conn %p\n", conn);
2517
2518 if (conn->sess) {
2519 target_stop_cmd_counter(conn->cmd_cnt);
2520 target_wait_for_cmds(conn->cmd_cnt);
2521 }
2522}
2523
2524/**
2525 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2526 * unsolicitate dataout
2527 * @conn: iscsi connection
2528 *
2529 * We might still have commands that are waiting for unsolicited
2530 * dataouts messages. We must put the extra reference on those
2531 * before blocking on the target_wait_for_session_cmds
2532 */
2533static void
2534isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
2535{
2536 struct iscsit_cmd *cmd, *tmp;
2537 static LIST_HEAD(drop_cmd_list);
2538
2539 spin_lock_bh(&conn->cmd_lock);
2540 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2541 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2542 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2543 (cmd->write_data_done < cmd->se_cmd.data_length))
2544 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2545 }
2546 spin_unlock_bh(&conn->cmd_lock);
2547
2548 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2549 list_del_init(&cmd->i_conn_node);
2550 if (cmd->i_state != ISTATE_REMOVE) {
2551 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2552
2553 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2554 isert_put_cmd(isert_cmd, true);
2555 }
2556 }
2557}
2558
2559static void isert_wait_conn(struct iscsit_conn *conn)
2560{
2561 struct isert_conn *isert_conn = conn->context;
2562
2563 isert_info("Starting conn %p\n", isert_conn);
2564
2565 mutex_lock(&isert_conn->mutex);
2566 isert_conn_terminate(isert_conn);
2567 mutex_unlock(&isert_conn->mutex);
2568
2569 ib_drain_qp(isert_conn->qp);
2570 isert_put_unsol_pending_cmds(conn);
2571 isert_wait4cmds(conn);
2572 isert_wait4logout(isert_conn);
2573
2574 queue_work(isert_release_wq, &isert_conn->release_work);
2575}
2576
2577static void isert_free_conn(struct iscsit_conn *conn)
2578{
2579 struct isert_conn *isert_conn = conn->context;
2580
2581 ib_drain_qp(isert_conn->qp);
2582 isert_put_conn(isert_conn);
2583}
2584
2585static void isert_get_rx_pdu(struct iscsit_conn *conn)
2586{
2587 struct completion comp;
2588
2589 init_completion(&comp);
2590
2591 wait_for_completion_interruptible(&comp);
2592}
2593
2594static struct iscsit_transport iser_target_transport = {
2595 .name = "IB/iSER",
2596 .transport_type = ISCSI_INFINIBAND,
2597 .rdma_shutdown = true,
2598 .priv_size = sizeof(struct isert_cmd),
2599 .owner = THIS_MODULE,
2600 .iscsit_setup_np = isert_setup_np,
2601 .iscsit_accept_np = isert_accept_np,
2602 .iscsit_free_np = isert_free_np,
2603 .iscsit_wait_conn = isert_wait_conn,
2604 .iscsit_free_conn = isert_free_conn,
2605 .iscsit_get_login_rx = isert_get_login_rx,
2606 .iscsit_put_login_tx = isert_put_login_tx,
2607 .iscsit_immediate_queue = isert_immediate_queue,
2608 .iscsit_response_queue = isert_response_queue,
2609 .iscsit_get_dataout = isert_get_dataout,
2610 .iscsit_queue_data_in = isert_put_datain,
2611 .iscsit_queue_status = isert_put_response,
2612 .iscsit_aborted_task = isert_aborted_task,
2613 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2614 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2615};
2616
2617static int __init isert_init(void)
2618{
2619 isert_login_wq = alloc_workqueue("isert_login_wq", 0, 0);
2620 if (!isert_login_wq) {
2621 isert_err("Unable to allocate isert_login_wq\n");
2622 return -ENOMEM;
2623 }
2624
2625 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2626 WQ_UNBOUND | WQ_HIGHPRI, 0);
2627 if (!isert_comp_wq) {
2628 isert_err("Unable to allocate isert_comp_wq\n");
2629 goto destroy_login_wq;
2630 }
2631
2632 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2633 WQ_UNBOUND_MAX_ACTIVE);
2634 if (!isert_release_wq) {
2635 isert_err("Unable to allocate isert_release_wq\n");
2636 goto destroy_comp_wq;
2637 }
2638
2639 iscsit_register_transport(&iser_target_transport);
2640 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2641
2642 return 0;
2643
2644destroy_comp_wq:
2645 destroy_workqueue(isert_comp_wq);
2646destroy_login_wq:
2647 destroy_workqueue(isert_login_wq);
2648
2649 return -ENOMEM;
2650}
2651
2652static void __exit isert_exit(void)
2653{
2654 flush_workqueue(isert_login_wq);
2655 destroy_workqueue(isert_release_wq);
2656 destroy_workqueue(isert_comp_wq);
2657 iscsit_unregister_transport(&iser_target_transport);
2658 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2659 destroy_workqueue(isert_login_wq);
2660}
2661
2662MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2663MODULE_AUTHOR("nab@Linux-iSCSI.org");
2664MODULE_LICENSE("GPL");
2665
2666module_init(isert_init);
2667module_exit(isert_exit);
1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4 * (c) Copyright 2013 Datera, Inc.
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
30#include <linux/semaphore.h>
31
32#include "ib_isert.h"
33
34#define ISERT_MAX_CONN 8
35#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36#define ISER_MAX_TX_CQ_LEN \
37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN)
40
41static int isert_debug_level;
42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
44
45static DEFINE_MUTEX(device_list_mutex);
46static LIST_HEAD(device_list);
47static struct workqueue_struct *isert_comp_wq;
48static struct workqueue_struct *isert_release_wq;
49
50static int
51isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
52static int
53isert_login_post_recv(struct isert_conn *isert_conn);
54static int
55isert_rdma_accept(struct isert_conn *isert_conn);
56struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
57
58static void isert_release_work(struct work_struct *work);
59static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
60static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
61static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
62static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
63
64static inline bool
65isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
66{
67 return (conn->pi_support &&
68 cmd->prot_op != TARGET_PROT_NORMAL);
69}
70
71
72static void
73isert_qp_event_callback(struct ib_event *e, void *context)
74{
75 struct isert_conn *isert_conn = context;
76
77 isert_err("%s (%d): conn %p\n",
78 ib_event_msg(e->event), e->event, isert_conn);
79
80 switch (e->event) {
81 case IB_EVENT_COMM_EST:
82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
83 break;
84 case IB_EVENT_QP_LAST_WQE_REACHED:
85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
86 break;
87 default:
88 break;
89 }
90}
91
92static struct isert_comp *
93isert_comp_get(struct isert_conn *isert_conn)
94{
95 struct isert_device *device = isert_conn->device;
96 struct isert_comp *comp;
97 int i, min = 0;
98
99 mutex_lock(&device_list_mutex);
100 for (i = 0; i < device->comps_used; i++)
101 if (device->comps[i].active_qps <
102 device->comps[min].active_qps)
103 min = i;
104 comp = &device->comps[min];
105 comp->active_qps++;
106 mutex_unlock(&device_list_mutex);
107
108 isert_info("conn %p, using comp %p min_index: %d\n",
109 isert_conn, comp, min);
110
111 return comp;
112}
113
114static void
115isert_comp_put(struct isert_comp *comp)
116{
117 mutex_lock(&device_list_mutex);
118 comp->active_qps--;
119 mutex_unlock(&device_list_mutex);
120}
121
122static struct ib_qp *
123isert_create_qp(struct isert_conn *isert_conn,
124 struct isert_comp *comp,
125 struct rdma_cm_id *cma_id)
126{
127 struct isert_device *device = isert_conn->device;
128 struct ib_qp_init_attr attr;
129 int ret;
130
131 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
132 attr.event_handler = isert_qp_event_callback;
133 attr.qp_context = isert_conn;
134 attr.send_cq = comp->cq;
135 attr.recv_cq = comp->cq;
136 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
137 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
138 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
139 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
140 attr.cap.max_recv_sge = 1;
141 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
142 attr.qp_type = IB_QPT_RC;
143 if (device->pi_capable)
144 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
145
146 ret = rdma_create_qp(cma_id, device->pd, &attr);
147 if (ret) {
148 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
149 return ERR_PTR(ret);
150 }
151
152 return cma_id->qp;
153}
154
155static int
156isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
157{
158 struct isert_comp *comp;
159 int ret;
160
161 comp = isert_comp_get(isert_conn);
162 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
163 if (IS_ERR(isert_conn->qp)) {
164 ret = PTR_ERR(isert_conn->qp);
165 goto err;
166 }
167
168 return 0;
169err:
170 isert_comp_put(comp);
171 return ret;
172}
173
174static int
175isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
176{
177 struct isert_device *device = isert_conn->device;
178 struct ib_device *ib_dev = device->ib_device;
179 struct iser_rx_desc *rx_desc;
180 struct ib_sge *rx_sg;
181 u64 dma_addr;
182 int i, j;
183
184 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
185 sizeof(struct iser_rx_desc), GFP_KERNEL);
186 if (!isert_conn->rx_descs)
187 return -ENOMEM;
188
189 rx_desc = isert_conn->rx_descs;
190
191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
194 if (ib_dma_mapping_error(ib_dev, dma_addr))
195 goto dma_map_fail;
196
197 rx_desc->dma_addr = dma_addr;
198
199 rx_sg = &rx_desc->rx_sg;
200 rx_sg->addr = rx_desc->dma_addr;
201 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
202 rx_sg->lkey = device->pd->local_dma_lkey;
203 rx_desc->rx_cqe.done = isert_recv_done;
204 }
205
206 return 0;
207
208dma_map_fail:
209 rx_desc = isert_conn->rx_descs;
210 for (j = 0; j < i; j++, rx_desc++) {
211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
213 }
214 kfree(isert_conn->rx_descs);
215 isert_conn->rx_descs = NULL;
216 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
217 return -ENOMEM;
218}
219
220static void
221isert_free_rx_descriptors(struct isert_conn *isert_conn)
222{
223 struct ib_device *ib_dev = isert_conn->device->ib_device;
224 struct iser_rx_desc *rx_desc;
225 int i;
226
227 if (!isert_conn->rx_descs)
228 return;
229
230 rx_desc = isert_conn->rx_descs;
231 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
232 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
233 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
234 }
235
236 kfree(isert_conn->rx_descs);
237 isert_conn->rx_descs = NULL;
238}
239
240static void
241isert_free_comps(struct isert_device *device)
242{
243 int i;
244
245 for (i = 0; i < device->comps_used; i++) {
246 struct isert_comp *comp = &device->comps[i];
247
248 if (comp->cq)
249 ib_free_cq(comp->cq);
250 }
251 kfree(device->comps);
252}
253
254static int
255isert_alloc_comps(struct isert_device *device)
256{
257 int i, max_cqe, ret = 0;
258
259 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
260 device->ib_device->num_comp_vectors));
261
262 isert_info("Using %d CQs, %s supports %d vectors support "
263 "pi_capable %d\n",
264 device->comps_used, device->ib_device->name,
265 device->ib_device->num_comp_vectors,
266 device->pi_capable);
267
268 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
269 GFP_KERNEL);
270 if (!device->comps)
271 return -ENOMEM;
272
273 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
274
275 for (i = 0; i < device->comps_used; i++) {
276 struct isert_comp *comp = &device->comps[i];
277
278 comp->device = device;
279 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
280 IB_POLL_WORKQUEUE);
281 if (IS_ERR(comp->cq)) {
282 isert_err("Unable to allocate cq\n");
283 ret = PTR_ERR(comp->cq);
284 comp->cq = NULL;
285 goto out_cq;
286 }
287 }
288
289 return 0;
290out_cq:
291 isert_free_comps(device);
292 return ret;
293}
294
295static int
296isert_create_device_ib_res(struct isert_device *device)
297{
298 struct ib_device *ib_dev = device->ib_device;
299 int ret;
300
301 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
302 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
303
304 ret = isert_alloc_comps(device);
305 if (ret)
306 goto out;
307
308 device->pd = ib_alloc_pd(ib_dev, 0);
309 if (IS_ERR(device->pd)) {
310 ret = PTR_ERR(device->pd);
311 isert_err("failed to allocate pd, device %p, ret=%d\n",
312 device, ret);
313 goto out_cq;
314 }
315
316 /* Check signature cap */
317 device->pi_capable = ib_dev->attrs.device_cap_flags &
318 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
319
320 return 0;
321
322out_cq:
323 isert_free_comps(device);
324out:
325 if (ret > 0)
326 ret = -EINVAL;
327 return ret;
328}
329
330static void
331isert_free_device_ib_res(struct isert_device *device)
332{
333 isert_info("device %p\n", device);
334
335 ib_dealloc_pd(device->pd);
336 isert_free_comps(device);
337}
338
339static void
340isert_device_put(struct isert_device *device)
341{
342 mutex_lock(&device_list_mutex);
343 device->refcount--;
344 isert_info("device %p refcount %d\n", device, device->refcount);
345 if (!device->refcount) {
346 isert_free_device_ib_res(device);
347 list_del(&device->dev_node);
348 kfree(device);
349 }
350 mutex_unlock(&device_list_mutex);
351}
352
353static struct isert_device *
354isert_device_get(struct rdma_cm_id *cma_id)
355{
356 struct isert_device *device;
357 int ret;
358
359 mutex_lock(&device_list_mutex);
360 list_for_each_entry(device, &device_list, dev_node) {
361 if (device->ib_device->node_guid == cma_id->device->node_guid) {
362 device->refcount++;
363 isert_info("Found iser device %p refcount %d\n",
364 device, device->refcount);
365 mutex_unlock(&device_list_mutex);
366 return device;
367 }
368 }
369
370 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
371 if (!device) {
372 mutex_unlock(&device_list_mutex);
373 return ERR_PTR(-ENOMEM);
374 }
375
376 INIT_LIST_HEAD(&device->dev_node);
377
378 device->ib_device = cma_id->device;
379 ret = isert_create_device_ib_res(device);
380 if (ret) {
381 kfree(device);
382 mutex_unlock(&device_list_mutex);
383 return ERR_PTR(ret);
384 }
385
386 device->refcount++;
387 list_add_tail(&device->dev_node, &device_list);
388 isert_info("Created a new iser device %p refcount %d\n",
389 device, device->refcount);
390 mutex_unlock(&device_list_mutex);
391
392 return device;
393}
394
395static void
396isert_init_conn(struct isert_conn *isert_conn)
397{
398 isert_conn->state = ISER_CONN_INIT;
399 INIT_LIST_HEAD(&isert_conn->node);
400 init_completion(&isert_conn->login_comp);
401 init_completion(&isert_conn->login_req_comp);
402 init_waitqueue_head(&isert_conn->rem_wait);
403 kref_init(&isert_conn->kref);
404 mutex_init(&isert_conn->mutex);
405 INIT_WORK(&isert_conn->release_work, isert_release_work);
406}
407
408static void
409isert_free_login_buf(struct isert_conn *isert_conn)
410{
411 struct ib_device *ib_dev = isert_conn->device->ib_device;
412
413 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
414 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
415 kfree(isert_conn->login_rsp_buf);
416
417 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
418 ISER_RX_PAYLOAD_SIZE,
419 DMA_FROM_DEVICE);
420 kfree(isert_conn->login_req_buf);
421}
422
423static int
424isert_alloc_login_buf(struct isert_conn *isert_conn,
425 struct ib_device *ib_dev)
426{
427 int ret;
428
429 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
430 GFP_KERNEL);
431 if (!isert_conn->login_req_buf)
432 return -ENOMEM;
433
434 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
435 isert_conn->login_req_buf,
436 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
437 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
438 if (ret) {
439 isert_err("login_req_dma mapping error: %d\n", ret);
440 isert_conn->login_req_dma = 0;
441 goto out_free_login_req_buf;
442 }
443
444 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
445 if (!isert_conn->login_rsp_buf) {
446 ret = -ENOMEM;
447 goto out_unmap_login_req_buf;
448 }
449
450 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
451 isert_conn->login_rsp_buf,
452 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
453 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
454 if (ret) {
455 isert_err("login_rsp_dma mapping error: %d\n", ret);
456 isert_conn->login_rsp_dma = 0;
457 goto out_free_login_rsp_buf;
458 }
459
460 return 0;
461
462out_free_login_rsp_buf:
463 kfree(isert_conn->login_rsp_buf);
464out_unmap_login_req_buf:
465 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
466 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
467out_free_login_req_buf:
468 kfree(isert_conn->login_req_buf);
469 return ret;
470}
471
472static void
473isert_set_nego_params(struct isert_conn *isert_conn,
474 struct rdma_conn_param *param)
475{
476 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
477
478 /* Set max inflight RDMA READ requests */
479 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
480 attr->max_qp_init_rd_atom);
481 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
482
483 if (param->private_data) {
484 u8 flags = *(u8 *)param->private_data;
485
486 /*
487 * use remote invalidation if the both initiator
488 * and the HCA support it
489 */
490 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
491 (attr->device_cap_flags &
492 IB_DEVICE_MEM_MGT_EXTENSIONS);
493 if (isert_conn->snd_w_inv)
494 isert_info("Using remote invalidation\n");
495 }
496}
497
498static int
499isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
500{
501 struct isert_np *isert_np = cma_id->context;
502 struct iscsi_np *np = isert_np->np;
503 struct isert_conn *isert_conn;
504 struct isert_device *device;
505 int ret = 0;
506
507 spin_lock_bh(&np->np_thread_lock);
508 if (!np->enabled) {
509 spin_unlock_bh(&np->np_thread_lock);
510 isert_dbg("iscsi_np is not enabled, reject connect request\n");
511 return rdma_reject(cma_id, NULL, 0);
512 }
513 spin_unlock_bh(&np->np_thread_lock);
514
515 isert_dbg("cma_id: %p, portal: %p\n",
516 cma_id, cma_id->context);
517
518 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
519 if (!isert_conn)
520 return -ENOMEM;
521
522 isert_init_conn(isert_conn);
523 isert_conn->cm_id = cma_id;
524
525 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
526 if (ret)
527 goto out;
528
529 device = isert_device_get(cma_id);
530 if (IS_ERR(device)) {
531 ret = PTR_ERR(device);
532 goto out_rsp_dma_map;
533 }
534 isert_conn->device = device;
535
536 isert_set_nego_params(isert_conn, &event->param.conn);
537
538 ret = isert_conn_setup_qp(isert_conn, cma_id);
539 if (ret)
540 goto out_conn_dev;
541
542 ret = isert_login_post_recv(isert_conn);
543 if (ret)
544 goto out_conn_dev;
545
546 ret = isert_rdma_accept(isert_conn);
547 if (ret)
548 goto out_conn_dev;
549
550 mutex_lock(&isert_np->mutex);
551 list_add_tail(&isert_conn->node, &isert_np->accepted);
552 mutex_unlock(&isert_np->mutex);
553
554 return 0;
555
556out_conn_dev:
557 isert_device_put(device);
558out_rsp_dma_map:
559 isert_free_login_buf(isert_conn);
560out:
561 kfree(isert_conn);
562 rdma_reject(cma_id, NULL, 0);
563 return ret;
564}
565
566static void
567isert_connect_release(struct isert_conn *isert_conn)
568{
569 struct isert_device *device = isert_conn->device;
570
571 isert_dbg("conn %p\n", isert_conn);
572
573 BUG_ON(!device);
574
575 isert_free_rx_descriptors(isert_conn);
576 if (isert_conn->cm_id &&
577 !isert_conn->dev_removed)
578 rdma_destroy_id(isert_conn->cm_id);
579
580 if (isert_conn->qp) {
581 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
582
583 isert_comp_put(comp);
584 ib_destroy_qp(isert_conn->qp);
585 }
586
587 if (isert_conn->login_req_buf)
588 isert_free_login_buf(isert_conn);
589
590 isert_device_put(device);
591
592 if (isert_conn->dev_removed)
593 wake_up_interruptible(&isert_conn->rem_wait);
594 else
595 kfree(isert_conn);
596}
597
598static void
599isert_connected_handler(struct rdma_cm_id *cma_id)
600{
601 struct isert_conn *isert_conn = cma_id->qp->qp_context;
602 struct isert_np *isert_np = cma_id->context;
603
604 isert_info("conn %p\n", isert_conn);
605
606 mutex_lock(&isert_conn->mutex);
607 isert_conn->state = ISER_CONN_UP;
608 kref_get(&isert_conn->kref);
609 mutex_unlock(&isert_conn->mutex);
610
611 mutex_lock(&isert_np->mutex);
612 list_move_tail(&isert_conn->node, &isert_np->pending);
613 mutex_unlock(&isert_np->mutex);
614
615 isert_info("np %p: Allow accept_np to continue\n", isert_np);
616 up(&isert_np->sem);
617}
618
619static void
620isert_release_kref(struct kref *kref)
621{
622 struct isert_conn *isert_conn = container_of(kref,
623 struct isert_conn, kref);
624
625 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
626 current->pid);
627
628 isert_connect_release(isert_conn);
629}
630
631static void
632isert_put_conn(struct isert_conn *isert_conn)
633{
634 kref_put(&isert_conn->kref, isert_release_kref);
635}
636
637static void
638isert_handle_unbound_conn(struct isert_conn *isert_conn)
639{
640 struct isert_np *isert_np = isert_conn->cm_id->context;
641
642 mutex_lock(&isert_np->mutex);
643 if (!list_empty(&isert_conn->node)) {
644 /*
645 * This means iscsi doesn't know this connection
646 * so schedule a cleanup ourselves
647 */
648 list_del_init(&isert_conn->node);
649 isert_put_conn(isert_conn);
650 queue_work(isert_release_wq, &isert_conn->release_work);
651 }
652 mutex_unlock(&isert_np->mutex);
653}
654
655/**
656 * isert_conn_terminate() - Initiate connection termination
657 * @isert_conn: isert connection struct
658 *
659 * Notes:
660 * In case the connection state is BOUND, move state
661 * to TEMINATING and start teardown sequence (rdma_disconnect).
662 * In case the connection state is UP, complete flush as well.
663 *
664 * This routine must be called with mutex held. Thus it is
665 * safe to call multiple times.
666 */
667static void
668isert_conn_terminate(struct isert_conn *isert_conn)
669{
670 int err;
671
672 if (isert_conn->state >= ISER_CONN_TERMINATING)
673 return;
674
675 isert_info("Terminating conn %p state %d\n",
676 isert_conn, isert_conn->state);
677 isert_conn->state = ISER_CONN_TERMINATING;
678 err = rdma_disconnect(isert_conn->cm_id);
679 if (err)
680 isert_warn("Failed rdma_disconnect isert_conn %p\n",
681 isert_conn);
682}
683
684static int
685isert_np_cma_handler(struct isert_np *isert_np,
686 enum rdma_cm_event_type event)
687{
688 isert_dbg("%s (%d): isert np %p\n",
689 rdma_event_msg(event), event, isert_np);
690
691 switch (event) {
692 case RDMA_CM_EVENT_DEVICE_REMOVAL:
693 isert_np->cm_id = NULL;
694 break;
695 case RDMA_CM_EVENT_ADDR_CHANGE:
696 isert_np->cm_id = isert_setup_id(isert_np);
697 if (IS_ERR(isert_np->cm_id)) {
698 isert_err("isert np %p setup id failed: %ld\n",
699 isert_np, PTR_ERR(isert_np->cm_id));
700 isert_np->cm_id = NULL;
701 }
702 break;
703 default:
704 isert_err("isert np %p Unexpected event %d\n",
705 isert_np, event);
706 }
707
708 return -1;
709}
710
711static int
712isert_disconnected_handler(struct rdma_cm_id *cma_id,
713 enum rdma_cm_event_type event)
714{
715 struct isert_conn *isert_conn = cma_id->qp->qp_context;
716
717 mutex_lock(&isert_conn->mutex);
718 switch (isert_conn->state) {
719 case ISER_CONN_TERMINATING:
720 break;
721 case ISER_CONN_UP:
722 isert_conn_terminate(isert_conn);
723 ib_drain_qp(isert_conn->qp);
724 isert_handle_unbound_conn(isert_conn);
725 break;
726 case ISER_CONN_BOUND:
727 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
728 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
729 break;
730 default:
731 isert_warn("conn %p terminating in state %d\n",
732 isert_conn, isert_conn->state);
733 }
734 mutex_unlock(&isert_conn->mutex);
735
736 return 0;
737}
738
739static int
740isert_connect_error(struct rdma_cm_id *cma_id)
741{
742 struct isert_conn *isert_conn = cma_id->qp->qp_context;
743
744 ib_drain_qp(isert_conn->qp);
745 list_del_init(&isert_conn->node);
746 isert_conn->cm_id = NULL;
747 isert_put_conn(isert_conn);
748
749 return -1;
750}
751
752static int
753isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
754{
755 struct isert_np *isert_np = cma_id->context;
756 struct isert_conn *isert_conn;
757 int ret = 0;
758
759 isert_info("%s (%d): status %d id %p np %p\n",
760 rdma_event_msg(event->event), event->event,
761 event->status, cma_id, cma_id->context);
762
763 if (isert_np->cm_id == cma_id)
764 return isert_np_cma_handler(cma_id->context, event->event);
765
766 switch (event->event) {
767 case RDMA_CM_EVENT_CONNECT_REQUEST:
768 ret = isert_connect_request(cma_id, event);
769 if (ret)
770 isert_err("failed handle connect request %d\n", ret);
771 break;
772 case RDMA_CM_EVENT_ESTABLISHED:
773 isert_connected_handler(cma_id);
774 break;
775 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
776 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
777 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
778 ret = isert_disconnected_handler(cma_id, event->event);
779 break;
780 case RDMA_CM_EVENT_DEVICE_REMOVAL:
781 isert_conn = cma_id->qp->qp_context;
782 isert_conn->dev_removed = true;
783 isert_disconnected_handler(cma_id, event->event);
784 wait_event_interruptible(isert_conn->rem_wait,
785 isert_conn->state == ISER_CONN_DOWN);
786 kfree(isert_conn);
787 /*
788 * return non-zero from the callback to destroy
789 * the rdma cm id
790 */
791 return 1;
792 case RDMA_CM_EVENT_REJECTED:
793 isert_info("Connection rejected: %s\n",
794 rdma_reject_msg(cma_id, event->status));
795 /* fall through */
796 case RDMA_CM_EVENT_UNREACHABLE:
797 case RDMA_CM_EVENT_CONNECT_ERROR:
798 ret = isert_connect_error(cma_id);
799 break;
800 default:
801 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
802 break;
803 }
804
805 return ret;
806}
807
808static int
809isert_post_recvm(struct isert_conn *isert_conn, u32 count)
810{
811 struct ib_recv_wr *rx_wr, *rx_wr_failed;
812 int i, ret;
813 struct iser_rx_desc *rx_desc;
814
815 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
816 rx_desc = &isert_conn->rx_descs[i];
817
818 rx_wr->wr_cqe = &rx_desc->rx_cqe;
819 rx_wr->sg_list = &rx_desc->rx_sg;
820 rx_wr->num_sge = 1;
821 rx_wr->next = rx_wr + 1;
822 rx_desc->in_use = false;
823 }
824 rx_wr--;
825 rx_wr->next = NULL; /* mark end of work requests list */
826
827 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
828 &rx_wr_failed);
829 if (ret)
830 isert_err("ib_post_recv() failed with ret: %d\n", ret);
831
832 return ret;
833}
834
835static int
836isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
837{
838 struct ib_recv_wr *rx_wr_failed, rx_wr;
839 int ret;
840
841 if (!rx_desc->in_use) {
842 /*
843 * if the descriptor is not in-use we already reposted it
844 * for recv, so just silently return
845 */
846 return 0;
847 }
848
849 rx_desc->in_use = false;
850 rx_wr.wr_cqe = &rx_desc->rx_cqe;
851 rx_wr.sg_list = &rx_desc->rx_sg;
852 rx_wr.num_sge = 1;
853 rx_wr.next = NULL;
854
855 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
856 if (ret)
857 isert_err("ib_post_recv() failed with ret: %d\n", ret);
858
859 return ret;
860}
861
862static int
863isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
864{
865 struct ib_device *ib_dev = isert_conn->cm_id->device;
866 struct ib_send_wr send_wr, *send_wr_failed;
867 int ret;
868
869 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
870 ISER_HEADERS_LEN, DMA_TO_DEVICE);
871
872 tx_desc->tx_cqe.done = isert_login_send_done;
873
874 send_wr.next = NULL;
875 send_wr.wr_cqe = &tx_desc->tx_cqe;
876 send_wr.sg_list = tx_desc->tx_sg;
877 send_wr.num_sge = tx_desc->num_sge;
878 send_wr.opcode = IB_WR_SEND;
879 send_wr.send_flags = IB_SEND_SIGNALED;
880
881 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
882 if (ret)
883 isert_err("ib_post_send() failed, ret: %d\n", ret);
884
885 return ret;
886}
887
888static void
889isert_create_send_desc(struct isert_conn *isert_conn,
890 struct isert_cmd *isert_cmd,
891 struct iser_tx_desc *tx_desc)
892{
893 struct isert_device *device = isert_conn->device;
894 struct ib_device *ib_dev = device->ib_device;
895
896 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
897 ISER_HEADERS_LEN, DMA_TO_DEVICE);
898
899 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
900 tx_desc->iser_header.flags = ISCSI_CTRL;
901
902 tx_desc->num_sge = 1;
903
904 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
905 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
906 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
907 }
908}
909
910static int
911isert_init_tx_hdrs(struct isert_conn *isert_conn,
912 struct iser_tx_desc *tx_desc)
913{
914 struct isert_device *device = isert_conn->device;
915 struct ib_device *ib_dev = device->ib_device;
916 u64 dma_addr;
917
918 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
919 ISER_HEADERS_LEN, DMA_TO_DEVICE);
920 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
921 isert_err("ib_dma_mapping_error() failed\n");
922 return -ENOMEM;
923 }
924
925 tx_desc->dma_addr = dma_addr;
926 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
927 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
928 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
929
930 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
931 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
932 tx_desc->tx_sg[0].lkey);
933
934 return 0;
935}
936
937static void
938isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
939 struct ib_send_wr *send_wr)
940{
941 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
942
943 tx_desc->tx_cqe.done = isert_send_done;
944 send_wr->wr_cqe = &tx_desc->tx_cqe;
945
946 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
947 send_wr->opcode = IB_WR_SEND_WITH_INV;
948 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
949 } else {
950 send_wr->opcode = IB_WR_SEND;
951 }
952
953 send_wr->sg_list = &tx_desc->tx_sg[0];
954 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
955 send_wr->send_flags = IB_SEND_SIGNALED;
956}
957
958static int
959isert_login_post_recv(struct isert_conn *isert_conn)
960{
961 struct ib_recv_wr rx_wr, *rx_wr_fail;
962 struct ib_sge sge;
963 int ret;
964
965 memset(&sge, 0, sizeof(struct ib_sge));
966 sge.addr = isert_conn->login_req_dma;
967 sge.length = ISER_RX_PAYLOAD_SIZE;
968 sge.lkey = isert_conn->device->pd->local_dma_lkey;
969
970 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
971 sge.addr, sge.length, sge.lkey);
972
973 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
974
975 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
976 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
977 rx_wr.sg_list = &sge;
978 rx_wr.num_sge = 1;
979
980 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
981 if (ret)
982 isert_err("ib_post_recv() failed: %d\n", ret);
983
984 return ret;
985}
986
987static int
988isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
989 u32 length)
990{
991 struct isert_conn *isert_conn = conn->context;
992 struct isert_device *device = isert_conn->device;
993 struct ib_device *ib_dev = device->ib_device;
994 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
995 int ret;
996
997 isert_create_send_desc(isert_conn, NULL, tx_desc);
998
999 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1000 sizeof(struct iscsi_hdr));
1001
1002 isert_init_tx_hdrs(isert_conn, tx_desc);
1003
1004 if (length > 0) {
1005 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1006
1007 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1008 length, DMA_TO_DEVICE);
1009
1010 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1011
1012 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1013 length, DMA_TO_DEVICE);
1014
1015 tx_dsg->addr = isert_conn->login_rsp_dma;
1016 tx_dsg->length = length;
1017 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
1018 tx_desc->num_sge = 2;
1019 }
1020 if (!login->login_failed) {
1021 if (login->login_complete) {
1022 ret = isert_alloc_rx_descriptors(isert_conn);
1023 if (ret)
1024 return ret;
1025
1026 ret = isert_post_recvm(isert_conn,
1027 ISERT_QP_MAX_RECV_DTOS);
1028 if (ret)
1029 return ret;
1030
1031 /* Now we are in FULL_FEATURE phase */
1032 mutex_lock(&isert_conn->mutex);
1033 isert_conn->state = ISER_CONN_FULL_FEATURE;
1034 mutex_unlock(&isert_conn->mutex);
1035 goto post_send;
1036 }
1037
1038 ret = isert_login_post_recv(isert_conn);
1039 if (ret)
1040 return ret;
1041 }
1042post_send:
1043 ret = isert_login_post_send(isert_conn, tx_desc);
1044 if (ret)
1045 return ret;
1046
1047 return 0;
1048}
1049
1050static void
1051isert_rx_login_req(struct isert_conn *isert_conn)
1052{
1053 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1054 int rx_buflen = isert_conn->login_req_len;
1055 struct iscsi_conn *conn = isert_conn->conn;
1056 struct iscsi_login *login = conn->conn_login;
1057 int size;
1058
1059 isert_info("conn %p\n", isert_conn);
1060
1061 WARN_ON_ONCE(!login);
1062
1063 if (login->first_request) {
1064 struct iscsi_login_req *login_req =
1065 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1066 /*
1067 * Setup the initial iscsi_login values from the leading
1068 * login request PDU.
1069 */
1070 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1071 login->current_stage =
1072 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1073 >> 2;
1074 login->version_min = login_req->min_version;
1075 login->version_max = login_req->max_version;
1076 memcpy(login->isid, login_req->isid, 6);
1077 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1078 login->init_task_tag = login_req->itt;
1079 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1080 login->cid = be16_to_cpu(login_req->cid);
1081 login->tsih = be16_to_cpu(login_req->tsih);
1082 }
1083
1084 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1085
1086 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1087 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1088 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1089 MAX_KEY_VALUE_PAIRS);
1090 memcpy(login->req_buf, &rx_desc->data[0], size);
1091
1092 if (login->first_request) {
1093 complete(&isert_conn->login_comp);
1094 return;
1095 }
1096 schedule_delayed_work(&conn->login_work, 0);
1097}
1098
1099static struct iscsi_cmd
1100*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1101{
1102 struct isert_conn *isert_conn = conn->context;
1103 struct isert_cmd *isert_cmd;
1104 struct iscsi_cmd *cmd;
1105
1106 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1107 if (!cmd) {
1108 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1109 return NULL;
1110 }
1111 isert_cmd = iscsit_priv_cmd(cmd);
1112 isert_cmd->conn = isert_conn;
1113 isert_cmd->iscsi_cmd = cmd;
1114 isert_cmd->rx_desc = rx_desc;
1115
1116 return cmd;
1117}
1118
1119static int
1120isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1121 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1122 struct iser_rx_desc *rx_desc, unsigned char *buf)
1123{
1124 struct iscsi_conn *conn = isert_conn->conn;
1125 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1126 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1127 bool dump_payload = false;
1128 unsigned int data_len;
1129
1130 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1131 if (rc < 0)
1132 return rc;
1133
1134 imm_data = cmd->immediate_data;
1135 imm_data_len = cmd->first_burst_len;
1136 unsol_data = cmd->unsolicited_data;
1137 data_len = cmd->se_cmd.data_length;
1138
1139 if (imm_data && imm_data_len == data_len)
1140 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1141 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1142 if (rc < 0) {
1143 return 0;
1144 } else if (rc > 0) {
1145 dump_payload = true;
1146 goto sequence_cmd;
1147 }
1148
1149 if (!imm_data)
1150 return 0;
1151
1152 if (imm_data_len != data_len) {
1153 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1154 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1155 &rx_desc->data[0], imm_data_len);
1156 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1157 sg_nents, imm_data_len);
1158 } else {
1159 sg_init_table(&isert_cmd->sg, 1);
1160 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1161 cmd->se_cmd.t_data_nents = 1;
1162 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1163 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1164 imm_data_len);
1165 }
1166
1167 cmd->write_data_done += imm_data_len;
1168
1169 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1170 spin_lock_bh(&cmd->istate_lock);
1171 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1172 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1173 spin_unlock_bh(&cmd->istate_lock);
1174 }
1175
1176sequence_cmd:
1177 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1178
1179 if (!rc && dump_payload == false && unsol_data)
1180 iscsit_set_unsoliticed_dataout(cmd);
1181 else if (dump_payload && imm_data)
1182 target_put_sess_cmd(&cmd->se_cmd);
1183
1184 return 0;
1185}
1186
1187static int
1188isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1189 struct iser_rx_desc *rx_desc, unsigned char *buf)
1190{
1191 struct scatterlist *sg_start;
1192 struct iscsi_conn *conn = isert_conn->conn;
1193 struct iscsi_cmd *cmd = NULL;
1194 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1195 u32 unsol_data_len = ntoh24(hdr->dlength);
1196 int rc, sg_nents, sg_off, page_off;
1197
1198 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1199 if (rc < 0)
1200 return rc;
1201 else if (!cmd)
1202 return 0;
1203 /*
1204 * FIXME: Unexpected unsolicited_data out
1205 */
1206 if (!cmd->unsolicited_data) {
1207 isert_err("Received unexpected solicited data payload\n");
1208 dump_stack();
1209 return -1;
1210 }
1211
1212 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1213 "write_data_done: %u, data_length: %u\n",
1214 unsol_data_len, cmd->write_data_done,
1215 cmd->se_cmd.data_length);
1216
1217 sg_off = cmd->write_data_done / PAGE_SIZE;
1218 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1219 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1220 page_off = cmd->write_data_done % PAGE_SIZE;
1221 /*
1222 * FIXME: Non page-aligned unsolicited_data out
1223 */
1224 if (page_off) {
1225 isert_err("unexpected non-page aligned data payload\n");
1226 dump_stack();
1227 return -1;
1228 }
1229 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1230 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1231 sg_nents, &rx_desc->data[0], unsol_data_len);
1232
1233 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1234 unsol_data_len);
1235
1236 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1237 if (rc < 0)
1238 return rc;
1239
1240 /*
1241 * multiple data-outs on the same command can arrive -
1242 * so post the buffer before hand
1243 */
1244 rc = isert_post_recv(isert_conn, rx_desc);
1245 if (rc) {
1246 isert_err("ib_post_recv failed with %d\n", rc);
1247 return rc;
1248 }
1249 return 0;
1250}
1251
1252static int
1253isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1254 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1255 unsigned char *buf)
1256{
1257 struct iscsi_conn *conn = isert_conn->conn;
1258 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1259 int rc;
1260
1261 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1262 if (rc < 0)
1263 return rc;
1264 /*
1265 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1266 */
1267
1268 return iscsit_process_nop_out(conn, cmd, hdr);
1269}
1270
1271static int
1272isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1273 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1274 struct iscsi_text *hdr)
1275{
1276 struct iscsi_conn *conn = isert_conn->conn;
1277 u32 payload_length = ntoh24(hdr->dlength);
1278 int rc;
1279 unsigned char *text_in = NULL;
1280
1281 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1282 if (rc < 0)
1283 return rc;
1284
1285 if (payload_length) {
1286 text_in = kzalloc(payload_length, GFP_KERNEL);
1287 if (!text_in)
1288 return -ENOMEM;
1289 }
1290 cmd->text_in_ptr = text_in;
1291
1292 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1293
1294 return iscsit_process_text_cmd(conn, cmd, hdr);
1295}
1296
1297static int
1298isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1299 uint32_t read_stag, uint64_t read_va,
1300 uint32_t write_stag, uint64_t write_va)
1301{
1302 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1303 struct iscsi_conn *conn = isert_conn->conn;
1304 struct iscsi_cmd *cmd;
1305 struct isert_cmd *isert_cmd;
1306 int ret = -EINVAL;
1307 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1308
1309 if (conn->sess->sess_ops->SessionType &&
1310 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1311 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1312 " ignoring\n", opcode);
1313 return 0;
1314 }
1315
1316 switch (opcode) {
1317 case ISCSI_OP_SCSI_CMD:
1318 cmd = isert_allocate_cmd(conn, rx_desc);
1319 if (!cmd)
1320 break;
1321
1322 isert_cmd = iscsit_priv_cmd(cmd);
1323 isert_cmd->read_stag = read_stag;
1324 isert_cmd->read_va = read_va;
1325 isert_cmd->write_stag = write_stag;
1326 isert_cmd->write_va = write_va;
1327 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1328
1329 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1330 rx_desc, (unsigned char *)hdr);
1331 break;
1332 case ISCSI_OP_NOOP_OUT:
1333 cmd = isert_allocate_cmd(conn, rx_desc);
1334 if (!cmd)
1335 break;
1336
1337 isert_cmd = iscsit_priv_cmd(cmd);
1338 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1339 rx_desc, (unsigned char *)hdr);
1340 break;
1341 case ISCSI_OP_SCSI_DATA_OUT:
1342 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1343 (unsigned char *)hdr);
1344 break;
1345 case ISCSI_OP_SCSI_TMFUNC:
1346 cmd = isert_allocate_cmd(conn, rx_desc);
1347 if (!cmd)
1348 break;
1349
1350 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1351 (unsigned char *)hdr);
1352 break;
1353 case ISCSI_OP_LOGOUT:
1354 cmd = isert_allocate_cmd(conn, rx_desc);
1355 if (!cmd)
1356 break;
1357
1358 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1359 break;
1360 case ISCSI_OP_TEXT:
1361 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1362 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1363 else
1364 cmd = isert_allocate_cmd(conn, rx_desc);
1365
1366 if (!cmd)
1367 break;
1368
1369 isert_cmd = iscsit_priv_cmd(cmd);
1370 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1371 rx_desc, (struct iscsi_text *)hdr);
1372 break;
1373 default:
1374 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1375 dump_stack();
1376 break;
1377 }
1378
1379 return ret;
1380}
1381
1382static void
1383isert_print_wc(struct ib_wc *wc, const char *type)
1384{
1385 if (wc->status != IB_WC_WR_FLUSH_ERR)
1386 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1387 ib_wc_status_msg(wc->status), wc->status,
1388 wc->vendor_err);
1389 else
1390 isert_dbg("%s failure: %s (%d)\n", type,
1391 ib_wc_status_msg(wc->status), wc->status);
1392}
1393
1394static void
1395isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1396{
1397 struct isert_conn *isert_conn = wc->qp->qp_context;
1398 struct ib_device *ib_dev = isert_conn->cm_id->device;
1399 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1400 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1401 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1402 uint64_t read_va = 0, write_va = 0;
1403 uint32_t read_stag = 0, write_stag = 0;
1404
1405 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1406 isert_print_wc(wc, "recv");
1407 if (wc->status != IB_WC_WR_FLUSH_ERR)
1408 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1409 return;
1410 }
1411
1412 rx_desc->in_use = true;
1413
1414 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1415 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1416
1417 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1418 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1419 (int)(wc->byte_len - ISER_HEADERS_LEN));
1420
1421 switch (iser_ctrl->flags & 0xF0) {
1422 case ISCSI_CTRL:
1423 if (iser_ctrl->flags & ISER_RSV) {
1424 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1425 read_va = be64_to_cpu(iser_ctrl->read_va);
1426 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1427 read_stag, (unsigned long long)read_va);
1428 }
1429 if (iser_ctrl->flags & ISER_WSV) {
1430 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1431 write_va = be64_to_cpu(iser_ctrl->write_va);
1432 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1433 write_stag, (unsigned long long)write_va);
1434 }
1435
1436 isert_dbg("ISER ISCSI_CTRL PDU\n");
1437 break;
1438 case ISER_HELLO:
1439 isert_err("iSER Hello message\n");
1440 break;
1441 default:
1442 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1443 break;
1444 }
1445
1446 isert_rx_opcode(isert_conn, rx_desc,
1447 read_stag, read_va, write_stag, write_va);
1448
1449 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1450 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1451}
1452
1453static void
1454isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1455{
1456 struct isert_conn *isert_conn = wc->qp->qp_context;
1457 struct ib_device *ib_dev = isert_conn->device->ib_device;
1458
1459 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1460 isert_print_wc(wc, "login recv");
1461 return;
1462 }
1463
1464 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1465 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1466
1467 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1468
1469 if (isert_conn->conn) {
1470 struct iscsi_login *login = isert_conn->conn->conn_login;
1471
1472 if (login && !login->first_request)
1473 isert_rx_login_req(isert_conn);
1474 }
1475
1476 mutex_lock(&isert_conn->mutex);
1477 complete(&isert_conn->login_req_comp);
1478 mutex_unlock(&isert_conn->mutex);
1479
1480 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1481 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1482}
1483
1484static void
1485isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1486{
1487 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
1488 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1489
1490 if (!cmd->rw.nr_ops)
1491 return;
1492
1493 if (isert_prot_cmd(conn, se_cmd)) {
1494 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1495 conn->cm_id->port_num, se_cmd->t_data_sg,
1496 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1497 se_cmd->t_prot_nents, dir);
1498 } else {
1499 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1500 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1501 }
1502
1503 cmd->rw.nr_ops = 0;
1504}
1505
1506static void
1507isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1508{
1509 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1510 struct isert_conn *isert_conn = isert_cmd->conn;
1511 struct iscsi_conn *conn = isert_conn->conn;
1512 struct iscsi_text_rsp *hdr;
1513
1514 isert_dbg("Cmd %p\n", isert_cmd);
1515
1516 switch (cmd->iscsi_opcode) {
1517 case ISCSI_OP_SCSI_CMD:
1518 spin_lock_bh(&conn->cmd_lock);
1519 if (!list_empty(&cmd->i_conn_node))
1520 list_del_init(&cmd->i_conn_node);
1521 spin_unlock_bh(&conn->cmd_lock);
1522
1523 if (cmd->data_direction == DMA_TO_DEVICE) {
1524 iscsit_stop_dataout_timer(cmd);
1525 /*
1526 * Check for special case during comp_err where
1527 * WRITE_PENDING has been handed off from core,
1528 * but requires an extra target_put_sess_cmd()
1529 * before transport_generic_free_cmd() below.
1530 */
1531 if (comp_err &&
1532 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1533 struct se_cmd *se_cmd = &cmd->se_cmd;
1534
1535 target_put_sess_cmd(se_cmd);
1536 }
1537 }
1538
1539 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1540 transport_generic_free_cmd(&cmd->se_cmd, 0);
1541 break;
1542 case ISCSI_OP_SCSI_TMFUNC:
1543 spin_lock_bh(&conn->cmd_lock);
1544 if (!list_empty(&cmd->i_conn_node))
1545 list_del_init(&cmd->i_conn_node);
1546 spin_unlock_bh(&conn->cmd_lock);
1547
1548 transport_generic_free_cmd(&cmd->se_cmd, 0);
1549 break;
1550 case ISCSI_OP_REJECT:
1551 case ISCSI_OP_NOOP_OUT:
1552 case ISCSI_OP_TEXT:
1553 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1554 /* If the continue bit is on, keep the command alive */
1555 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1556 break;
1557
1558 spin_lock_bh(&conn->cmd_lock);
1559 if (!list_empty(&cmd->i_conn_node))
1560 list_del_init(&cmd->i_conn_node);
1561 spin_unlock_bh(&conn->cmd_lock);
1562
1563 /*
1564 * Handle special case for REJECT when iscsi_add_reject*() has
1565 * overwritten the original iscsi_opcode assignment, and the
1566 * associated cmd->se_cmd needs to be released.
1567 */
1568 if (cmd->se_cmd.se_tfo != NULL) {
1569 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1570 cmd->iscsi_opcode);
1571 transport_generic_free_cmd(&cmd->se_cmd, 0);
1572 break;
1573 }
1574 /* fall through */
1575 default:
1576 iscsit_release_cmd(cmd);
1577 break;
1578 }
1579}
1580
1581static void
1582isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1583{
1584 if (tx_desc->dma_addr != 0) {
1585 isert_dbg("unmap single for tx_desc->dma_addr\n");
1586 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1587 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1588 tx_desc->dma_addr = 0;
1589 }
1590}
1591
1592static void
1593isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1594 struct ib_device *ib_dev, bool comp_err)
1595{
1596 if (isert_cmd->pdu_buf_dma != 0) {
1597 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1598 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1599 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1600 isert_cmd->pdu_buf_dma = 0;
1601 }
1602
1603 isert_unmap_tx_desc(tx_desc, ib_dev);
1604 isert_put_cmd(isert_cmd, comp_err);
1605}
1606
1607static int
1608isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1609{
1610 struct ib_mr_status mr_status;
1611 int ret;
1612
1613 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1614 if (ret) {
1615 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1616 goto fail_mr_status;
1617 }
1618
1619 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1620 u64 sec_offset_err;
1621 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1622
1623 switch (mr_status.sig_err.err_type) {
1624 case IB_SIG_BAD_GUARD:
1625 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1626 break;
1627 case IB_SIG_BAD_REFTAG:
1628 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1629 break;
1630 case IB_SIG_BAD_APPTAG:
1631 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1632 break;
1633 }
1634 sec_offset_err = mr_status.sig_err.sig_err_offset;
1635 do_div(sec_offset_err, block_size);
1636 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1637
1638 isert_err("PI error found type %d at sector 0x%llx "
1639 "expected 0x%x vs actual 0x%x\n",
1640 mr_status.sig_err.err_type,
1641 (unsigned long long)se_cmd->bad_sector,
1642 mr_status.sig_err.expected,
1643 mr_status.sig_err.actual);
1644 ret = 1;
1645 }
1646
1647fail_mr_status:
1648 return ret;
1649}
1650
1651static void
1652isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1653{
1654 struct isert_conn *isert_conn = wc->qp->qp_context;
1655 struct isert_device *device = isert_conn->device;
1656 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1657 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1658 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
1659 int ret = 0;
1660
1661 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1662 isert_print_wc(wc, "rdma write");
1663 if (wc->status != IB_WC_WR_FLUSH_ERR)
1664 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1665 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1666 return;
1667 }
1668
1669 isert_dbg("Cmd %p\n", isert_cmd);
1670
1671 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1672 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1673
1674 if (ret) {
1675 /*
1676 * transport_generic_request_failure() expects to have
1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1680 */
1681 kref_get(&cmd->cmd_kref);
1682 transport_generic_request_failure(cmd, cmd->pi_err);
1683 } else {
1684 /*
1685 * XXX: isert_put_response() failure is not retried.
1686 */
1687 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1688 if (ret)
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1690 }
1691}
1692
1693static void
1694isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1695{
1696 struct isert_conn *isert_conn = wc->qp->qp_context;
1697 struct isert_device *device = isert_conn->device;
1698 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1699 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1700 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1701 struct se_cmd *se_cmd = &cmd->se_cmd;
1702 int ret = 0;
1703
1704 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1705 isert_print_wc(wc, "rdma read");
1706 if (wc->status != IB_WC_WR_FLUSH_ERR)
1707 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1708 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1709 return;
1710 }
1711
1712 isert_dbg("Cmd %p\n", isert_cmd);
1713
1714 iscsit_stop_dataout_timer(cmd);
1715
1716 if (isert_prot_cmd(isert_conn, se_cmd))
1717 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
1718 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1719 cmd->write_data_done = 0;
1720
1721 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1722 spin_lock_bh(&cmd->istate_lock);
1723 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1724 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1725 spin_unlock_bh(&cmd->istate_lock);
1726
1727 /*
1728 * transport_generic_request_failure() will drop the extra
1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1730 * any non-zero ->queue_status() callback error retries.
1731 */
1732 if (ret)
1733 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1734 else
1735 target_execute_cmd(se_cmd);
1736}
1737
1738static void
1739isert_do_control_comp(struct work_struct *work)
1740{
1741 struct isert_cmd *isert_cmd = container_of(work,
1742 struct isert_cmd, comp_work);
1743 struct isert_conn *isert_conn = isert_cmd->conn;
1744 struct ib_device *ib_dev = isert_conn->cm_id->device;
1745 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1746
1747 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1748
1749 switch (cmd->i_state) {
1750 case ISTATE_SEND_TASKMGTRSP:
1751 iscsit_tmr_post_handler(cmd, cmd->conn);
1752 /* fall through */
1753 case ISTATE_SEND_REJECT:
1754 case ISTATE_SEND_TEXTRSP:
1755 cmd->i_state = ISTATE_SENT_STATUS;
1756 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1757 ib_dev, false);
1758 break;
1759 case ISTATE_SEND_LOGOUTRSP:
1760 iscsit_logout_post_handler(cmd, cmd->conn);
1761 break;
1762 default:
1763 isert_err("Unknown i_state %d\n", cmd->i_state);
1764 dump_stack();
1765 break;
1766 }
1767}
1768
1769static void
1770isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1771{
1772 struct isert_conn *isert_conn = wc->qp->qp_context;
1773 struct ib_device *ib_dev = isert_conn->cm_id->device;
1774 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1775
1776 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1777 isert_print_wc(wc, "login send");
1778 if (wc->status != IB_WC_WR_FLUSH_ERR)
1779 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1780 }
1781
1782 isert_unmap_tx_desc(tx_desc, ib_dev);
1783}
1784
1785static void
1786isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1787{
1788 struct isert_conn *isert_conn = wc->qp->qp_context;
1789 struct ib_device *ib_dev = isert_conn->cm_id->device;
1790 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1791 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1792
1793 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1794 isert_print_wc(wc, "send");
1795 if (wc->status != IB_WC_WR_FLUSH_ERR)
1796 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1797 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1798 return;
1799 }
1800
1801 isert_dbg("Cmd %p\n", isert_cmd);
1802
1803 switch (isert_cmd->iscsi_cmd->i_state) {
1804 case ISTATE_SEND_TASKMGTRSP:
1805 case ISTATE_SEND_LOGOUTRSP:
1806 case ISTATE_SEND_REJECT:
1807 case ISTATE_SEND_TEXTRSP:
1808 isert_unmap_tx_desc(tx_desc, ib_dev);
1809
1810 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1811 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1812 return;
1813 default:
1814 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
1815 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1816 break;
1817 }
1818}
1819
1820static int
1821isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1822{
1823 struct ib_send_wr *wr_failed;
1824 int ret;
1825
1826 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1827 if (ret) {
1828 isert_err("ib_post_recv failed with %d\n", ret);
1829 return ret;
1830 }
1831
1832 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
1833 &wr_failed);
1834 if (ret) {
1835 isert_err("ib_post_send failed with %d\n", ret);
1836 return ret;
1837 }
1838 return ret;
1839}
1840
1841static int
1842isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1843{
1844 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1845 struct isert_conn *isert_conn = conn->context;
1846 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1847 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1848 &isert_cmd->tx_desc.iscsi_header;
1849
1850 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1851 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1852 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1853 /*
1854 * Attach SENSE DATA payload to iSCSI Response PDU
1855 */
1856 if (cmd->se_cmd.sense_buffer &&
1857 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1858 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1859 struct isert_device *device = isert_conn->device;
1860 struct ib_device *ib_dev = device->ib_device;
1861 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1862 u32 padding, pdu_len;
1863
1864 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1865 cmd->sense_buffer);
1866 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1867
1868 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1869 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1870 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1871
1872 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1873 (void *)cmd->sense_buffer, pdu_len,
1874 DMA_TO_DEVICE);
1875 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1876 return -ENOMEM;
1877
1878 isert_cmd->pdu_buf_len = pdu_len;
1879 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1880 tx_dsg->length = pdu_len;
1881 tx_dsg->lkey = device->pd->local_dma_lkey;
1882 isert_cmd->tx_desc.num_sge = 2;
1883 }
1884
1885 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1886
1887 isert_dbg("Posting SCSI Response\n");
1888
1889 return isert_post_response(isert_conn, isert_cmd);
1890}
1891
1892static void
1893isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1894{
1895 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1896 struct isert_conn *isert_conn = conn->context;
1897
1898 spin_lock_bh(&conn->cmd_lock);
1899 if (!list_empty(&cmd->i_conn_node))
1900 list_del_init(&cmd->i_conn_node);
1901 spin_unlock_bh(&conn->cmd_lock);
1902
1903 if (cmd->data_direction == DMA_TO_DEVICE)
1904 iscsit_stop_dataout_timer(cmd);
1905 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1906}
1907
1908static enum target_prot_op
1909isert_get_sup_prot_ops(struct iscsi_conn *conn)
1910{
1911 struct isert_conn *isert_conn = conn->context;
1912 struct isert_device *device = isert_conn->device;
1913
1914 if (conn->tpg->tpg_attrib.t10_pi) {
1915 if (device->pi_capable) {
1916 isert_info("conn %p PI offload enabled\n", isert_conn);
1917 isert_conn->pi_support = true;
1918 return TARGET_PROT_ALL;
1919 }
1920 }
1921
1922 isert_info("conn %p PI offload disabled\n", isert_conn);
1923 isert_conn->pi_support = false;
1924
1925 return TARGET_PROT_NORMAL;
1926}
1927
1928static int
1929isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1930 bool nopout_response)
1931{
1932 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1933 struct isert_conn *isert_conn = conn->context;
1934 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1935
1936 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1937 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1938 &isert_cmd->tx_desc.iscsi_header,
1939 nopout_response);
1940 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1941 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1942
1943 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1944
1945 return isert_post_response(isert_conn, isert_cmd);
1946}
1947
1948static int
1949isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1950{
1951 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1952 struct isert_conn *isert_conn = conn->context;
1953 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1954
1955 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1956 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1957 &isert_cmd->tx_desc.iscsi_header);
1958 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1959 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1960
1961 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1962
1963 return isert_post_response(isert_conn, isert_cmd);
1964}
1965
1966static int
1967isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1968{
1969 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1970 struct isert_conn *isert_conn = conn->context;
1971 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1972
1973 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1974 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1975 &isert_cmd->tx_desc.iscsi_header);
1976 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1977 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1978
1979 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1980
1981 return isert_post_response(isert_conn, isert_cmd);
1982}
1983
1984static int
1985isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1986{
1987 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1988 struct isert_conn *isert_conn = conn->context;
1989 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1990 struct isert_device *device = isert_conn->device;
1991 struct ib_device *ib_dev = device->ib_device;
1992 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1993 struct iscsi_reject *hdr =
1994 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1995
1996 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1997 iscsit_build_reject(cmd, conn, hdr);
1998 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1999
2000 hton24(hdr->dlength, ISCSI_HDR_LEN);
2001 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2002 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2003 DMA_TO_DEVICE);
2004 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
2005 return -ENOMEM;
2006 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2007 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2008 tx_dsg->length = ISCSI_HDR_LEN;
2009 tx_dsg->lkey = device->pd->local_dma_lkey;
2010 isert_cmd->tx_desc.num_sge = 2;
2011
2012 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2013
2014 isert_dbg("conn %p Posting Reject\n", isert_conn);
2015
2016 return isert_post_response(isert_conn, isert_cmd);
2017}
2018
2019static int
2020isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2021{
2022 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2023 struct isert_conn *isert_conn = conn->context;
2024 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2025 struct iscsi_text_rsp *hdr =
2026 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2027 u32 txt_rsp_len;
2028 int rc;
2029
2030 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2031 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2032 if (rc < 0)
2033 return rc;
2034
2035 txt_rsp_len = rc;
2036 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2037
2038 if (txt_rsp_len) {
2039 struct isert_device *device = isert_conn->device;
2040 struct ib_device *ib_dev = device->ib_device;
2041 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2042 void *txt_rsp_buf = cmd->buf_ptr;
2043
2044 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2045 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2046 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
2047 return -ENOMEM;
2048
2049 isert_cmd->pdu_buf_len = txt_rsp_len;
2050 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2051 tx_dsg->length = txt_rsp_len;
2052 tx_dsg->lkey = device->pd->local_dma_lkey;
2053 isert_cmd->tx_desc.num_sge = 2;
2054 }
2055 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2056
2057 isert_dbg("conn %p Text Response\n", isert_conn);
2058
2059 return isert_post_response(isert_conn, isert_cmd);
2060}
2061
2062static inline void
2063isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2064 struct ib_sig_domain *domain)
2065{
2066 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2067 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2068 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2069 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2070 /*
2071 * At the moment we hard code those, but if in the future
2072 * the target core would like to use it, we will take it
2073 * from se_cmd.
2074 */
2075 domain->sig.dif.apptag_check_mask = 0xffff;
2076 domain->sig.dif.app_escape = true;
2077 domain->sig.dif.ref_escape = true;
2078 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2079 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2080 domain->sig.dif.ref_remap = true;
2081};
2082
2083static int
2084isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2085{
2086 memset(sig_attrs, 0, sizeof(*sig_attrs));
2087
2088 switch (se_cmd->prot_op) {
2089 case TARGET_PROT_DIN_INSERT:
2090 case TARGET_PROT_DOUT_STRIP:
2091 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2092 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2093 break;
2094 case TARGET_PROT_DOUT_INSERT:
2095 case TARGET_PROT_DIN_STRIP:
2096 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2097 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2098 break;
2099 case TARGET_PROT_DIN_PASS:
2100 case TARGET_PROT_DOUT_PASS:
2101 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2102 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2103 break;
2104 default:
2105 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2106 return -EINVAL;
2107 }
2108
2109 sig_attrs->check_mask =
2110 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2111 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2112 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2113 return 0;
2114}
2115
2116static int
2117isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2118 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2119{
2120 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
2121 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2122 u8 port_num = conn->cm_id->port_num;
2123 u64 addr;
2124 u32 rkey, offset;
2125 int ret;
2126
2127 if (cmd->ctx_init_done)
2128 goto rdma_ctx_post;
2129
2130 if (dir == DMA_FROM_DEVICE) {
2131 addr = cmd->write_va;
2132 rkey = cmd->write_stag;
2133 offset = cmd->iscsi_cmd->write_data_done;
2134 } else {
2135 addr = cmd->read_va;
2136 rkey = cmd->read_stag;
2137 offset = 0;
2138 }
2139
2140 if (isert_prot_cmd(conn, se_cmd)) {
2141 struct ib_sig_attrs sig_attrs;
2142
2143 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2144 if (ret)
2145 return ret;
2146
2147 WARN_ON_ONCE(offset);
2148 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2149 se_cmd->t_data_sg, se_cmd->t_data_nents,
2150 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2151 &sig_attrs, addr, rkey, dir);
2152 } else {
2153 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2154 se_cmd->t_data_sg, se_cmd->t_data_nents,
2155 offset, addr, rkey, dir);
2156 }
2157
2158 if (ret < 0) {
2159 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2160 return ret;
2161 }
2162
2163 cmd->ctx_init_done = true;
2164
2165rdma_ctx_post:
2166 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2167 if (ret < 0)
2168 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2169 return ret;
2170}
2171
2172static int
2173isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2174{
2175 struct se_cmd *se_cmd = &cmd->se_cmd;
2176 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2177 struct isert_conn *isert_conn = conn->context;
2178 struct ib_cqe *cqe = NULL;
2179 struct ib_send_wr *chain_wr = NULL;
2180 int rc;
2181
2182 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2183 isert_cmd, se_cmd->data_length);
2184
2185 if (isert_prot_cmd(isert_conn, se_cmd)) {
2186 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2187 cqe = &isert_cmd->tx_desc.tx_cqe;
2188 } else {
2189 /*
2190 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2191 */
2192 isert_create_send_desc(isert_conn, isert_cmd,
2193 &isert_cmd->tx_desc);
2194 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2195 &isert_cmd->tx_desc.iscsi_header);
2196 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2197 isert_init_send_wr(isert_conn, isert_cmd,
2198 &isert_cmd->tx_desc.send_wr);
2199
2200 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2201 if (rc) {
2202 isert_err("ib_post_recv failed with %d\n", rc);
2203 return rc;
2204 }
2205
2206 chain_wr = &isert_cmd->tx_desc.send_wr;
2207 }
2208
2209 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2210 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2211 isert_cmd, rc);
2212 return rc;
2213}
2214
2215static int
2216isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2217{
2218 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2219 int ret;
2220
2221 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2222 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2223
2224 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2225 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2226 &isert_cmd->tx_desc.tx_cqe, NULL);
2227
2228 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2229 isert_cmd, ret);
2230 return ret;
2231}
2232
2233static int
2234isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2235{
2236 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2237 int ret = 0;
2238
2239 switch (state) {
2240 case ISTATE_REMOVE:
2241 spin_lock_bh(&conn->cmd_lock);
2242 list_del_init(&cmd->i_conn_node);
2243 spin_unlock_bh(&conn->cmd_lock);
2244 isert_put_cmd(isert_cmd, true);
2245 break;
2246 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2247 ret = isert_put_nopin(cmd, conn, false);
2248 break;
2249 default:
2250 isert_err("Unknown immediate state: 0x%02x\n", state);
2251 ret = -EINVAL;
2252 break;
2253 }
2254
2255 return ret;
2256}
2257
2258static int
2259isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2260{
2261 struct isert_conn *isert_conn = conn->context;
2262 int ret;
2263
2264 switch (state) {
2265 case ISTATE_SEND_LOGOUTRSP:
2266 ret = isert_put_logout_rsp(cmd, conn);
2267 if (!ret)
2268 isert_conn->logout_posted = true;
2269 break;
2270 case ISTATE_SEND_NOPIN:
2271 ret = isert_put_nopin(cmd, conn, true);
2272 break;
2273 case ISTATE_SEND_TASKMGTRSP:
2274 ret = isert_put_tm_rsp(cmd, conn);
2275 break;
2276 case ISTATE_SEND_REJECT:
2277 ret = isert_put_reject(cmd, conn);
2278 break;
2279 case ISTATE_SEND_TEXTRSP:
2280 ret = isert_put_text_rsp(cmd, conn);
2281 break;
2282 case ISTATE_SEND_STATUS:
2283 /*
2284 * Special case for sending non GOOD SCSI status from TX thread
2285 * context during pre se_cmd excecution failure.
2286 */
2287 ret = isert_put_response(conn, cmd);
2288 break;
2289 default:
2290 isert_err("Unknown response state: 0x%02x\n", state);
2291 ret = -EINVAL;
2292 break;
2293 }
2294
2295 return ret;
2296}
2297
2298struct rdma_cm_id *
2299isert_setup_id(struct isert_np *isert_np)
2300{
2301 struct iscsi_np *np = isert_np->np;
2302 struct rdma_cm_id *id;
2303 struct sockaddr *sa;
2304 int ret;
2305
2306 sa = (struct sockaddr *)&np->np_sockaddr;
2307 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2308
2309 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2310 RDMA_PS_TCP, IB_QPT_RC);
2311 if (IS_ERR(id)) {
2312 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2313 ret = PTR_ERR(id);
2314 goto out;
2315 }
2316 isert_dbg("id %p context %p\n", id, id->context);
2317
2318 ret = rdma_bind_addr(id, sa);
2319 if (ret) {
2320 isert_err("rdma_bind_addr() failed: %d\n", ret);
2321 goto out_id;
2322 }
2323
2324 ret = rdma_listen(id, 0);
2325 if (ret) {
2326 isert_err("rdma_listen() failed: %d\n", ret);
2327 goto out_id;
2328 }
2329
2330 return id;
2331out_id:
2332 rdma_destroy_id(id);
2333out:
2334 return ERR_PTR(ret);
2335}
2336
2337static int
2338isert_setup_np(struct iscsi_np *np,
2339 struct sockaddr_storage *ksockaddr)
2340{
2341 struct isert_np *isert_np;
2342 struct rdma_cm_id *isert_lid;
2343 int ret;
2344
2345 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2346 if (!isert_np)
2347 return -ENOMEM;
2348
2349 sema_init(&isert_np->sem, 0);
2350 mutex_init(&isert_np->mutex);
2351 INIT_LIST_HEAD(&isert_np->accepted);
2352 INIT_LIST_HEAD(&isert_np->pending);
2353 isert_np->np = np;
2354
2355 /*
2356 * Setup the np->np_sockaddr from the passed sockaddr setup
2357 * in iscsi_target_configfs.c code..
2358 */
2359 memcpy(&np->np_sockaddr, ksockaddr,
2360 sizeof(struct sockaddr_storage));
2361
2362 isert_lid = isert_setup_id(isert_np);
2363 if (IS_ERR(isert_lid)) {
2364 ret = PTR_ERR(isert_lid);
2365 goto out;
2366 }
2367
2368 isert_np->cm_id = isert_lid;
2369 np->np_context = isert_np;
2370
2371 return 0;
2372
2373out:
2374 kfree(isert_np);
2375
2376 return ret;
2377}
2378
2379static int
2380isert_rdma_accept(struct isert_conn *isert_conn)
2381{
2382 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2383 struct rdma_conn_param cp;
2384 int ret;
2385 struct iser_cm_hdr rsp_hdr;
2386
2387 memset(&cp, 0, sizeof(struct rdma_conn_param));
2388 cp.initiator_depth = isert_conn->initiator_depth;
2389 cp.retry_count = 7;
2390 cp.rnr_retry_count = 7;
2391
2392 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2393 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2394 if (!isert_conn->snd_w_inv)
2395 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2396 cp.private_data = (void *)&rsp_hdr;
2397 cp.private_data_len = sizeof(rsp_hdr);
2398
2399 ret = rdma_accept(cm_id, &cp);
2400 if (ret) {
2401 isert_err("rdma_accept() failed with: %d\n", ret);
2402 return ret;
2403 }
2404
2405 return 0;
2406}
2407
2408static int
2409isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2410{
2411 struct isert_conn *isert_conn = conn->context;
2412 int ret;
2413
2414 isert_info("before login_req comp conn: %p\n", isert_conn);
2415 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2416 if (ret) {
2417 isert_err("isert_conn %p interrupted before got login req\n",
2418 isert_conn);
2419 return ret;
2420 }
2421 reinit_completion(&isert_conn->login_req_comp);
2422
2423 /*
2424 * For login requests after the first PDU, isert_rx_login_req() will
2425 * kick schedule_delayed_work(&conn->login_work) as the packet is
2426 * received, which turns this callback from iscsi_target_do_login_rx()
2427 * into a NOP.
2428 */
2429 if (!login->first_request)
2430 return 0;
2431
2432 isert_rx_login_req(isert_conn);
2433
2434 isert_info("before login_comp conn: %p\n", conn);
2435 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2436 if (ret)
2437 return ret;
2438
2439 isert_info("processing login->req: %p\n", login->req);
2440
2441 return 0;
2442}
2443
2444static void
2445isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2446 struct isert_conn *isert_conn)
2447{
2448 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2449 struct rdma_route *cm_route = &cm_id->route;
2450
2451 conn->login_family = np->np_sockaddr.ss_family;
2452
2453 conn->login_sockaddr = cm_route->addr.dst_addr;
2454 conn->local_sockaddr = cm_route->addr.src_addr;
2455}
2456
2457static int
2458isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2459{
2460 struct isert_np *isert_np = np->np_context;
2461 struct isert_conn *isert_conn;
2462 int ret;
2463
2464accept_wait:
2465 ret = down_interruptible(&isert_np->sem);
2466 if (ret)
2467 return -ENODEV;
2468
2469 spin_lock_bh(&np->np_thread_lock);
2470 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2471 spin_unlock_bh(&np->np_thread_lock);
2472 isert_dbg("np_thread_state %d\n",
2473 np->np_thread_state);
2474 /**
2475 * No point in stalling here when np_thread
2476 * is in state RESET/SHUTDOWN/EXIT - bail
2477 **/
2478 return -ENODEV;
2479 }
2480 spin_unlock_bh(&np->np_thread_lock);
2481
2482 mutex_lock(&isert_np->mutex);
2483 if (list_empty(&isert_np->pending)) {
2484 mutex_unlock(&isert_np->mutex);
2485 goto accept_wait;
2486 }
2487 isert_conn = list_first_entry(&isert_np->pending,
2488 struct isert_conn, node);
2489 list_del_init(&isert_conn->node);
2490 mutex_unlock(&isert_np->mutex);
2491
2492 conn->context = isert_conn;
2493 isert_conn->conn = conn;
2494 isert_conn->state = ISER_CONN_BOUND;
2495
2496 isert_set_conn_info(np, conn, isert_conn);
2497
2498 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2499
2500 return 0;
2501}
2502
2503static void
2504isert_free_np(struct iscsi_np *np)
2505{
2506 struct isert_np *isert_np = np->np_context;
2507 struct isert_conn *isert_conn, *n;
2508
2509 if (isert_np->cm_id)
2510 rdma_destroy_id(isert_np->cm_id);
2511
2512 /*
2513 * FIXME: At this point we don't have a good way to insure
2514 * that at this point we don't have hanging connections that
2515 * completed RDMA establishment but didn't start iscsi login
2516 * process. So work-around this by cleaning up what ever piled
2517 * up in accepted and pending lists.
2518 */
2519 mutex_lock(&isert_np->mutex);
2520 if (!list_empty(&isert_np->pending)) {
2521 isert_info("Still have isert pending connections\n");
2522 list_for_each_entry_safe(isert_conn, n,
2523 &isert_np->pending,
2524 node) {
2525 isert_info("cleaning isert_conn %p state (%d)\n",
2526 isert_conn, isert_conn->state);
2527 isert_connect_release(isert_conn);
2528 }
2529 }
2530
2531 if (!list_empty(&isert_np->accepted)) {
2532 isert_info("Still have isert accepted connections\n");
2533 list_for_each_entry_safe(isert_conn, n,
2534 &isert_np->accepted,
2535 node) {
2536 isert_info("cleaning isert_conn %p state (%d)\n",
2537 isert_conn, isert_conn->state);
2538 isert_connect_release(isert_conn);
2539 }
2540 }
2541 mutex_unlock(&isert_np->mutex);
2542
2543 np->np_context = NULL;
2544 kfree(isert_np);
2545}
2546
2547static void isert_release_work(struct work_struct *work)
2548{
2549 struct isert_conn *isert_conn = container_of(work,
2550 struct isert_conn,
2551 release_work);
2552
2553 isert_info("Starting release conn %p\n", isert_conn);
2554
2555 mutex_lock(&isert_conn->mutex);
2556 isert_conn->state = ISER_CONN_DOWN;
2557 mutex_unlock(&isert_conn->mutex);
2558
2559 isert_info("Destroying conn %p\n", isert_conn);
2560 isert_put_conn(isert_conn);
2561}
2562
2563static void
2564isert_wait4logout(struct isert_conn *isert_conn)
2565{
2566 struct iscsi_conn *conn = isert_conn->conn;
2567
2568 isert_info("conn %p\n", isert_conn);
2569
2570 if (isert_conn->logout_posted) {
2571 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2572 wait_for_completion_timeout(&conn->conn_logout_comp,
2573 SECONDS_FOR_LOGOUT_COMP * HZ);
2574 }
2575}
2576
2577static void
2578isert_wait4cmds(struct iscsi_conn *conn)
2579{
2580 isert_info("iscsi_conn %p\n", conn);
2581
2582 if (conn->sess) {
2583 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2584 target_wait_for_sess_cmds(conn->sess->se_sess);
2585 }
2586}
2587
2588/**
2589 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2590 * unsolicitate dataout
2591 * @conn: iscsi connection
2592 *
2593 * We might still have commands that are waiting for unsolicited
2594 * dataouts messages. We must put the extra reference on those
2595 * before blocking on the target_wait_for_session_cmds
2596 */
2597static void
2598isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
2599{
2600 struct iscsi_cmd *cmd, *tmp;
2601 static LIST_HEAD(drop_cmd_list);
2602
2603 spin_lock_bh(&conn->cmd_lock);
2604 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2605 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2606 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2607 (cmd->write_data_done < cmd->se_cmd.data_length))
2608 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2609 }
2610 spin_unlock_bh(&conn->cmd_lock);
2611
2612 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2613 list_del_init(&cmd->i_conn_node);
2614 if (cmd->i_state != ISTATE_REMOVE) {
2615 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2616
2617 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2618 isert_put_cmd(isert_cmd, true);
2619 }
2620 }
2621}
2622
2623static void isert_wait_conn(struct iscsi_conn *conn)
2624{
2625 struct isert_conn *isert_conn = conn->context;
2626
2627 isert_info("Starting conn %p\n", isert_conn);
2628
2629 mutex_lock(&isert_conn->mutex);
2630 isert_conn_terminate(isert_conn);
2631 mutex_unlock(&isert_conn->mutex);
2632
2633 ib_drain_qp(isert_conn->qp);
2634 isert_put_unsol_pending_cmds(conn);
2635 isert_wait4cmds(conn);
2636 isert_wait4logout(isert_conn);
2637
2638 queue_work(isert_release_wq, &isert_conn->release_work);
2639}
2640
2641static void isert_free_conn(struct iscsi_conn *conn)
2642{
2643 struct isert_conn *isert_conn = conn->context;
2644
2645 ib_drain_qp(isert_conn->qp);
2646 isert_put_conn(isert_conn);
2647}
2648
2649static void isert_get_rx_pdu(struct iscsi_conn *conn)
2650{
2651 struct completion comp;
2652
2653 init_completion(&comp);
2654
2655 wait_for_completion_interruptible(&comp);
2656}
2657
2658static struct iscsit_transport iser_target_transport = {
2659 .name = "IB/iSER",
2660 .transport_type = ISCSI_INFINIBAND,
2661 .rdma_shutdown = true,
2662 .priv_size = sizeof(struct isert_cmd),
2663 .owner = THIS_MODULE,
2664 .iscsit_setup_np = isert_setup_np,
2665 .iscsit_accept_np = isert_accept_np,
2666 .iscsit_free_np = isert_free_np,
2667 .iscsit_wait_conn = isert_wait_conn,
2668 .iscsit_free_conn = isert_free_conn,
2669 .iscsit_get_login_rx = isert_get_login_rx,
2670 .iscsit_put_login_tx = isert_put_login_tx,
2671 .iscsit_immediate_queue = isert_immediate_queue,
2672 .iscsit_response_queue = isert_response_queue,
2673 .iscsit_get_dataout = isert_get_dataout,
2674 .iscsit_queue_data_in = isert_put_datain,
2675 .iscsit_queue_status = isert_put_response,
2676 .iscsit_aborted_task = isert_aborted_task,
2677 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2678 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2679};
2680
2681static int __init isert_init(void)
2682{
2683 int ret;
2684
2685 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2686 WQ_UNBOUND | WQ_HIGHPRI, 0);
2687 if (!isert_comp_wq) {
2688 isert_err("Unable to allocate isert_comp_wq\n");
2689 return -ENOMEM;
2690 }
2691
2692 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2693 WQ_UNBOUND_MAX_ACTIVE);
2694 if (!isert_release_wq) {
2695 isert_err("Unable to allocate isert_release_wq\n");
2696 ret = -ENOMEM;
2697 goto destroy_comp_wq;
2698 }
2699
2700 iscsit_register_transport(&iser_target_transport);
2701 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2702
2703 return 0;
2704
2705destroy_comp_wq:
2706 destroy_workqueue(isert_comp_wq);
2707
2708 return ret;
2709}
2710
2711static void __exit isert_exit(void)
2712{
2713 flush_scheduled_work();
2714 destroy_workqueue(isert_release_wq);
2715 destroy_workqueue(isert_comp_wq);
2716 iscsit_unregister_transport(&iser_target_transport);
2717 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2718}
2719
2720MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2721MODULE_AUTHOR("nab@Linux-iSCSI.org");
2722MODULE_LICENSE("GPL");
2723
2724module_init(isert_init);
2725module_exit(isert_exit);