Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * iSCSI lib functions
4 *
5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2004 - 2006 Mike Christie
7 * Copyright (C) 2004 - 2005 Dmitry Yusupov
8 * Copyright (C) 2004 - 2005 Alex Aizman
9 * maintained by open-iscsi@googlegroups.com
10 */
11#include <linux/types.h>
12#include <linux/kfifo.h>
13#include <linux/delay.h>
14#include <linux/log2.h>
15#include <linux/slab.h>
16#include <linux/sched/signal.h>
17#include <linux/module.h>
18#include <asm/unaligned.h>
19#include <net/tcp.h>
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_device.h>
22#include <scsi/scsi_eh.h>
23#include <scsi/scsi_tcq.h>
24#include <scsi/scsi_host.h>
25#include <scsi/scsi.h>
26#include <scsi/iscsi_proto.h>
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_iscsi.h>
29#include <scsi/libiscsi.h>
30#include <trace/events/iscsi.h>
31
32static int iscsi_dbg_lib_conn;
33module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
34 S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(debug_libiscsi_conn,
36 "Turn on debugging for connections in libiscsi module. "
37 "Set to 1 to turn on, and zero to turn off. Default is off.");
38
39static int iscsi_dbg_lib_session;
40module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
41 S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(debug_libiscsi_session,
43 "Turn on debugging for sessions in libiscsi module. "
44 "Set to 1 to turn on, and zero to turn off. Default is off.");
45
46static int iscsi_dbg_lib_eh;
47module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
48 S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(debug_libiscsi_eh,
50 "Turn on debugging for error handling in libiscsi module. "
51 "Set to 1 to turn on, and zero to turn off. Default is off.");
52
53#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
54 do { \
55 if (iscsi_dbg_lib_conn) \
56 iscsi_conn_printk(KERN_INFO, _conn, \
57 "%s " dbg_fmt, \
58 __func__, ##arg); \
59 iscsi_dbg_trace(trace_iscsi_dbg_conn, \
60 &(_conn)->cls_conn->dev, \
61 "%s " dbg_fmt, __func__, ##arg);\
62 } while (0);
63
64#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
65 do { \
66 if (iscsi_dbg_lib_session) \
67 iscsi_session_printk(KERN_INFO, _session, \
68 "%s " dbg_fmt, \
69 __func__, ##arg); \
70 iscsi_dbg_trace(trace_iscsi_dbg_session, \
71 &(_session)->cls_session->dev, \
72 "%s " dbg_fmt, __func__, ##arg); \
73 } while (0);
74
75#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
76 do { \
77 if (iscsi_dbg_lib_eh) \
78 iscsi_session_printk(KERN_INFO, _session, \
79 "%s " dbg_fmt, \
80 __func__, ##arg); \
81 iscsi_dbg_trace(trace_iscsi_dbg_eh, \
82 &(_session)->cls_session->dev, \
83 "%s " dbg_fmt, __func__, ##arg); \
84 } while (0);
85
86inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
87{
88 struct Scsi_Host *shost = conn->session->host;
89 struct iscsi_host *ihost = shost_priv(shost);
90
91 if (ihost->workq)
92 queue_work(ihost->workq, &conn->xmitwork);
93}
94EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
95
96static void __iscsi_update_cmdsn(struct iscsi_session *session,
97 uint32_t exp_cmdsn, uint32_t max_cmdsn)
98{
99 /*
100 * standard specifies this check for when to update expected and
101 * max sequence numbers
102 */
103 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
104 return;
105
106 if (exp_cmdsn != session->exp_cmdsn &&
107 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
108 session->exp_cmdsn = exp_cmdsn;
109
110 if (max_cmdsn != session->max_cmdsn &&
111 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
112 session->max_cmdsn = max_cmdsn;
113}
114
115void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
116{
117 __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
118 be32_to_cpu(hdr->max_cmdsn));
119}
120EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
121
122/**
123 * iscsi_prep_data_out_pdu - initialize Data-Out
124 * @task: scsi command task
125 * @r2t: R2T info
126 * @hdr: iscsi data in pdu
127 *
128 * Notes:
129 * Initialize Data-Out within this R2T sequence and finds
130 * proper data_offset within this SCSI command.
131 *
132 * This function is called with connection lock taken.
133 **/
134void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
135 struct iscsi_data *hdr)
136{
137 struct iscsi_conn *conn = task->conn;
138 unsigned int left = r2t->data_length - r2t->sent;
139
140 task->hdr_len = sizeof(struct iscsi_data);
141
142 memset(hdr, 0, sizeof(struct iscsi_data));
143 hdr->ttt = r2t->ttt;
144 hdr->datasn = cpu_to_be32(r2t->datasn);
145 r2t->datasn++;
146 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
147 hdr->lun = task->lun;
148 hdr->itt = task->hdr_itt;
149 hdr->exp_statsn = r2t->exp_statsn;
150 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
151 if (left > conn->max_xmit_dlength) {
152 hton24(hdr->dlength, conn->max_xmit_dlength);
153 r2t->data_count = conn->max_xmit_dlength;
154 hdr->flags = 0;
155 } else {
156 hton24(hdr->dlength, left);
157 r2t->data_count = left;
158 hdr->flags = ISCSI_FLAG_CMD_FINAL;
159 }
160 conn->dataout_pdus_cnt++;
161}
162EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
163
164static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
165{
166 unsigned exp_len = task->hdr_len + len;
167
168 if (exp_len > task->hdr_max) {
169 WARN_ON(1);
170 return -EINVAL;
171 }
172
173 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
174 task->hdr_len = exp_len;
175 return 0;
176}
177
178/*
179 * make an extended cdb AHS
180 */
181static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
182{
183 struct scsi_cmnd *cmd = task->sc;
184 unsigned rlen, pad_len;
185 unsigned short ahslength;
186 struct iscsi_ecdb_ahdr *ecdb_ahdr;
187 int rc;
188
189 ecdb_ahdr = iscsi_next_hdr(task);
190 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
191
192 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
193 ahslength = rlen + sizeof(ecdb_ahdr->reserved);
194
195 pad_len = iscsi_padding(rlen);
196
197 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
198 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
199 if (rc)
200 return rc;
201
202 if (pad_len)
203 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
204
205 ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
206 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
207 ecdb_ahdr->reserved = 0;
208 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
209
210 ISCSI_DBG_SESSION(task->conn->session,
211 "iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
212 "rlen %d pad_len %d ahs_length %d iscsi_headers_size "
213 "%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
214 task->hdr_len);
215 return 0;
216}
217
218/**
219 * iscsi_check_tmf_restrictions - check if a task is affected by TMF
220 * @task: iscsi task
221 * @opcode: opcode to check for
222 *
223 * During TMF a task has to be checked if it's affected.
224 * All unrelated I/O can be passed through, but I/O to the
225 * affected LUN should be restricted.
226 * If 'fast_abort' is set we won't be sending any I/O to the
227 * affected LUN.
228 * Otherwise the target is waiting for all TTTs to be completed,
229 * so we have to send all outstanding Data-Out PDUs to the target.
230 */
231static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
232{
233 struct iscsi_session *session = task->conn->session;
234 struct iscsi_tm *tmf = &session->tmhdr;
235 u64 hdr_lun;
236
237 if (session->tmf_state == TMF_INITIAL)
238 return 0;
239
240 if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
241 return 0;
242
243 switch (ISCSI_TM_FUNC_VALUE(tmf)) {
244 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
245 /*
246 * Allow PDUs for unrelated LUNs
247 */
248 hdr_lun = scsilun_to_int(&tmf->lun);
249 if (hdr_lun != task->sc->device->lun)
250 return 0;
251 fallthrough;
252 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
253 /*
254 * Fail all SCSI cmd PDUs
255 */
256 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
257 iscsi_session_printk(KERN_INFO, session,
258 "task [op %x itt 0x%x/0x%x] rejected.\n",
259 opcode, task->itt, task->hdr_itt);
260 return -EACCES;
261 }
262 /*
263 * And also all data-out PDUs in response to R2T
264 * if fast_abort is set.
265 */
266 if (session->fast_abort) {
267 iscsi_session_printk(KERN_INFO, session,
268 "task [op %x itt 0x%x/0x%x] fast abort.\n",
269 opcode, task->itt, task->hdr_itt);
270 return -EACCES;
271 }
272 break;
273 case ISCSI_TM_FUNC_ABORT_TASK:
274 /*
275 * the caller has already checked if the task
276 * they want to abort was in the pending queue so if
277 * we are here the cmd pdu has gone out already, and
278 * we will only hit this for data-outs
279 */
280 if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
281 task->hdr_itt == tmf->rtt) {
282 ISCSI_DBG_SESSION(session,
283 "Preventing task %x/%x from sending "
284 "data-out due to abort task in "
285 "progress\n", task->itt,
286 task->hdr_itt);
287 return -EACCES;
288 }
289 break;
290 }
291
292 return 0;
293}
294
295/**
296 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
297 * @task: iscsi task
298 *
299 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
300 * fields like dlength or final based on how much data it sends
301 */
302static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
303{
304 struct iscsi_conn *conn = task->conn;
305 struct iscsi_session *session = conn->session;
306 struct scsi_cmnd *sc = task->sc;
307 struct iscsi_scsi_req *hdr;
308 unsigned hdrlength, cmd_len, transfer_length;
309 itt_t itt;
310 int rc;
311
312 rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
313 if (rc)
314 return rc;
315
316 if (conn->session->tt->alloc_pdu) {
317 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
318 if (rc)
319 return rc;
320 }
321 hdr = (struct iscsi_scsi_req *)task->hdr;
322 itt = hdr->itt;
323 memset(hdr, 0, sizeof(*hdr));
324
325 if (session->tt->parse_pdu_itt)
326 hdr->itt = task->hdr_itt = itt;
327 else
328 hdr->itt = task->hdr_itt = build_itt(task->itt,
329 task->conn->session->age);
330 task->hdr_len = 0;
331 rc = iscsi_add_hdr(task, sizeof(*hdr));
332 if (rc)
333 return rc;
334 hdr->opcode = ISCSI_OP_SCSI_CMD;
335 hdr->flags = ISCSI_ATTR_SIMPLE;
336 int_to_scsilun(sc->device->lun, &hdr->lun);
337 task->lun = hdr->lun;
338 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
339 cmd_len = sc->cmd_len;
340 if (cmd_len < ISCSI_CDB_SIZE)
341 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
342 else if (cmd_len > ISCSI_CDB_SIZE) {
343 rc = iscsi_prep_ecdb_ahs(task);
344 if (rc)
345 return rc;
346 cmd_len = ISCSI_CDB_SIZE;
347 }
348 memcpy(hdr->cdb, sc->cmnd, cmd_len);
349
350 task->imm_count = 0;
351 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
352 task->protected = true;
353
354 transfer_length = scsi_transfer_length(sc);
355 hdr->data_length = cpu_to_be32(transfer_length);
356 if (sc->sc_data_direction == DMA_TO_DEVICE) {
357 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
358
359 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
360 /*
361 * Write counters:
362 *
363 * imm_count bytes to be sent right after
364 * SCSI PDU Header
365 *
366 * unsol_count bytes(as Data-Out) to be sent
367 * without R2T ack right after
368 * immediate data
369 *
370 * r2t data_length bytes to be sent via R2T ack's
371 *
372 * pad_count bytes to be sent as zero-padding
373 */
374 memset(r2t, 0, sizeof(*r2t));
375
376 if (session->imm_data_en) {
377 if (transfer_length >= session->first_burst)
378 task->imm_count = min(session->first_burst,
379 conn->max_xmit_dlength);
380 else
381 task->imm_count = min(transfer_length,
382 conn->max_xmit_dlength);
383 hton24(hdr->dlength, task->imm_count);
384 } else
385 zero_data(hdr->dlength);
386
387 if (!session->initial_r2t_en) {
388 r2t->data_length = min(session->first_burst,
389 transfer_length) -
390 task->imm_count;
391 r2t->data_offset = task->imm_count;
392 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
393 r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
394 }
395
396 if (!task->unsol_r2t.data_length)
397 /* No unsolicit Data-Out's */
398 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
399 } else {
400 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
401 zero_data(hdr->dlength);
402
403 if (sc->sc_data_direction == DMA_FROM_DEVICE)
404 hdr->flags |= ISCSI_FLAG_CMD_READ;
405 }
406
407 /* calculate size of additional header segments (AHSs) */
408 hdrlength = task->hdr_len - sizeof(*hdr);
409
410 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
411 hdrlength /= ISCSI_PAD_LEN;
412
413 WARN_ON(hdrlength >= 256);
414 hdr->hlength = hdrlength & 0xFF;
415 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
416
417 if (session->tt->init_task && session->tt->init_task(task))
418 return -EIO;
419
420 task->state = ISCSI_TASK_RUNNING;
421 session->cmdsn++;
422
423 conn->scsicmd_pdus_cnt++;
424 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
425 "itt 0x%x len %d cmdsn %d win %d]\n",
426 sc->sc_data_direction == DMA_TO_DEVICE ?
427 "write" : "read", conn->id, sc, sc->cmnd[0],
428 task->itt, transfer_length,
429 session->cmdsn,
430 session->max_cmdsn - session->exp_cmdsn + 1);
431 return 0;
432}
433
434/**
435 * iscsi_free_task - free a task
436 * @task: iscsi cmd task
437 *
438 * Must be called with session back_lock.
439 * This function returns the scsi command to scsi-ml or cleans
440 * up mgmt tasks then returns the task to the pool.
441 */
442static void iscsi_free_task(struct iscsi_task *task)
443{
444 struct iscsi_conn *conn = task->conn;
445 struct iscsi_session *session = conn->session;
446 struct scsi_cmnd *sc = task->sc;
447 int oldstate = task->state;
448
449 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
450 task->itt, task->state, task->sc);
451
452 session->tt->cleanup_task(task);
453 task->state = ISCSI_TASK_FREE;
454 task->sc = NULL;
455 /*
456 * login task is preallocated so do not free
457 */
458 if (conn->login_task == task)
459 return;
460
461 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
462
463 if (sc) {
464 /* SCSI eh reuses commands to verify us */
465 sc->SCp.ptr = NULL;
466 /*
467 * queue command may call this to free the task, so
468 * it will decide how to return sc to scsi-ml.
469 */
470 if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
471 sc->scsi_done(sc);
472 }
473}
474
475void __iscsi_get_task(struct iscsi_task *task)
476{
477 refcount_inc(&task->refcount);
478}
479EXPORT_SYMBOL_GPL(__iscsi_get_task);
480
481void __iscsi_put_task(struct iscsi_task *task)
482{
483 if (refcount_dec_and_test(&task->refcount))
484 iscsi_free_task(task);
485}
486EXPORT_SYMBOL_GPL(__iscsi_put_task);
487
488void iscsi_put_task(struct iscsi_task *task)
489{
490 struct iscsi_session *session = task->conn->session;
491
492 /* regular RX path uses back_lock */
493 spin_lock_bh(&session->back_lock);
494 __iscsi_put_task(task);
495 spin_unlock_bh(&session->back_lock);
496}
497EXPORT_SYMBOL_GPL(iscsi_put_task);
498
499/**
500 * iscsi_complete_task - finish a task
501 * @task: iscsi cmd task
502 * @state: state to complete task with
503 *
504 * Must be called with session back_lock.
505 */
506static void iscsi_complete_task(struct iscsi_task *task, int state)
507{
508 struct iscsi_conn *conn = task->conn;
509
510 ISCSI_DBG_SESSION(conn->session,
511 "complete task itt 0x%x state %d sc %p\n",
512 task->itt, task->state, task->sc);
513 if (task->state == ISCSI_TASK_COMPLETED ||
514 task->state == ISCSI_TASK_ABRT_TMF ||
515 task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
516 task->state == ISCSI_TASK_REQUEUE_SCSIQ)
517 return;
518 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
519 task->state = state;
520
521 if (READ_ONCE(conn->ping_task) == task)
522 WRITE_ONCE(conn->ping_task, NULL);
523
524 /* release get from queueing */
525 __iscsi_put_task(task);
526}
527
528/**
529 * iscsi_complete_scsi_task - finish scsi task normally
530 * @task: iscsi task for scsi cmd
531 * @exp_cmdsn: expected cmd sn in cpu format
532 * @max_cmdsn: max cmd sn in cpu format
533 *
534 * This is used when drivers do not need or cannot perform
535 * lower level pdu processing.
536 *
537 * Called with session back_lock
538 */
539void iscsi_complete_scsi_task(struct iscsi_task *task,
540 uint32_t exp_cmdsn, uint32_t max_cmdsn)
541{
542 struct iscsi_conn *conn = task->conn;
543
544 ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
545
546 conn->last_recv = jiffies;
547 __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
548 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
549}
550EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
551
552/*
553 * Must be called with back and frwd lock
554 */
555static bool cleanup_queued_task(struct iscsi_task *task)
556{
557 struct iscsi_conn *conn = task->conn;
558 bool early_complete = false;
559
560 /* Bad target might have completed task while it was still running */
561 if (task->state == ISCSI_TASK_COMPLETED)
562 early_complete = true;
563
564 if (!list_empty(&task->running)) {
565 list_del_init(&task->running);
566 /*
567 * If it's on a list but still running, this could be from
568 * a bad target sending a rsp early, cleanup from a TMF, or
569 * session recovery.
570 */
571 if (task->state == ISCSI_TASK_RUNNING ||
572 task->state == ISCSI_TASK_COMPLETED)
573 __iscsi_put_task(task);
574 }
575
576 if (conn->session->running_aborted_task == task) {
577 conn->session->running_aborted_task = NULL;
578 __iscsi_put_task(task);
579 }
580
581 if (conn->task == task) {
582 conn->task = NULL;
583 __iscsi_put_task(task);
584 }
585
586 return early_complete;
587}
588
589/*
590 * session frwd lock must be held and if not called for a task that is still
591 * pending or from the xmit thread, then xmit thread must be suspended
592 */
593static void fail_scsi_task(struct iscsi_task *task, int err)
594{
595 struct iscsi_conn *conn = task->conn;
596 struct scsi_cmnd *sc;
597 int state;
598
599 spin_lock_bh(&conn->session->back_lock);
600 if (cleanup_queued_task(task)) {
601 spin_unlock_bh(&conn->session->back_lock);
602 return;
603 }
604
605 if (task->state == ISCSI_TASK_PENDING) {
606 /*
607 * cmd never made it to the xmit thread, so we should not count
608 * the cmd in the sequencing
609 */
610 conn->session->queued_cmdsn--;
611 /* it was never sent so just complete like normal */
612 state = ISCSI_TASK_COMPLETED;
613 } else if (err == DID_TRANSPORT_DISRUPTED)
614 state = ISCSI_TASK_ABRT_SESS_RECOV;
615 else
616 state = ISCSI_TASK_ABRT_TMF;
617
618 sc = task->sc;
619 sc->result = err << 16;
620 scsi_set_resid(sc, scsi_bufflen(sc));
621 iscsi_complete_task(task, state);
622 spin_unlock_bh(&conn->session->back_lock);
623}
624
625static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
626 struct iscsi_task *task)
627{
628 struct iscsi_session *session = conn->session;
629 struct iscsi_hdr *hdr = task->hdr;
630 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
631 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
632
633 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
634 return -ENOTCONN;
635
636 if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
637 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
638 /*
639 * pre-format CmdSN for outgoing PDU.
640 */
641 nop->cmdsn = cpu_to_be32(session->cmdsn);
642 if (hdr->itt != RESERVED_ITT) {
643 /*
644 * TODO: We always use immediate for normal session pdus.
645 * If we start to send tmfs or nops as non-immediate then
646 * we should start checking the cmdsn numbers for mgmt tasks.
647 *
648 * During discovery sessions iscsid sends TEXT as non immediate,
649 * but we always only send one PDU at a time.
650 */
651 if (conn->c_stage == ISCSI_CONN_STARTED &&
652 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
653 session->queued_cmdsn++;
654 session->cmdsn++;
655 }
656 }
657
658 if (session->tt->init_task && session->tt->init_task(task))
659 return -EIO;
660
661 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
662 session->state = ISCSI_STATE_LOGGING_OUT;
663
664 task->state = ISCSI_TASK_RUNNING;
665 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
666 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
667 hdr->itt, task->data_count);
668 return 0;
669}
670
671static struct iscsi_task *
672__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
673 char *data, uint32_t data_size)
674{
675 struct iscsi_session *session = conn->session;
676 struct iscsi_host *ihost = shost_priv(session->host);
677 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
678 struct iscsi_task *task;
679 itt_t itt;
680
681 if (session->state == ISCSI_STATE_TERMINATE)
682 return NULL;
683
684 if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
685 /*
686 * Login and Text are sent serially, in
687 * request-followed-by-response sequence.
688 * Same task can be used. Same ITT must be used.
689 * Note that login_task is preallocated at conn_create().
690 */
691 if (conn->login_task->state != ISCSI_TASK_FREE) {
692 iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
693 "progress. Cannot start new task.\n");
694 return NULL;
695 }
696
697 if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
698 iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
699 return NULL;
700 }
701
702 task = conn->login_task;
703 } else {
704 if (session->state != ISCSI_STATE_LOGGED_IN)
705 return NULL;
706
707 if (data_size != 0) {
708 iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
709 return NULL;
710 }
711
712 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
713 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
714
715 if (!kfifo_out(&session->cmdpool.queue,
716 (void*)&task, sizeof(void*)))
717 return NULL;
718 }
719 /*
720 * released in complete pdu for task we expect a response for, and
721 * released by the lld when it has transmitted the task for
722 * pdus we do not expect a response for.
723 */
724 refcount_set(&task->refcount, 1);
725 task->conn = conn;
726 task->sc = NULL;
727 INIT_LIST_HEAD(&task->running);
728 task->state = ISCSI_TASK_PENDING;
729
730 if (data_size) {
731 memcpy(task->data, data, data_size);
732 task->data_count = data_size;
733 } else
734 task->data_count = 0;
735
736 if (conn->session->tt->alloc_pdu) {
737 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
738 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
739 "pdu for mgmt task.\n");
740 goto free_task;
741 }
742 }
743
744 itt = task->hdr->itt;
745 task->hdr_len = sizeof(struct iscsi_hdr);
746 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
747
748 if (hdr->itt != RESERVED_ITT) {
749 if (session->tt->parse_pdu_itt)
750 task->hdr->itt = itt;
751 else
752 task->hdr->itt = build_itt(task->itt,
753 task->conn->session->age);
754 }
755
756 if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
757 WRITE_ONCE(conn->ping_task, task);
758
759 if (!ihost->workq) {
760 if (iscsi_prep_mgmt_task(conn, task))
761 goto free_task;
762
763 if (session->tt->xmit_task(task))
764 goto free_task;
765 } else {
766 list_add_tail(&task->running, &conn->mgmtqueue);
767 iscsi_conn_queue_work(conn);
768 }
769
770 return task;
771
772free_task:
773 /* regular RX path uses back_lock */
774 spin_lock(&session->back_lock);
775 __iscsi_put_task(task);
776 spin_unlock(&session->back_lock);
777 return NULL;
778}
779
780int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
781 char *data, uint32_t data_size)
782{
783 struct iscsi_conn *conn = cls_conn->dd_data;
784 struct iscsi_session *session = conn->session;
785 int err = 0;
786
787 spin_lock_bh(&session->frwd_lock);
788 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
789 err = -EPERM;
790 spin_unlock_bh(&session->frwd_lock);
791 return err;
792}
793EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
794
795/**
796 * iscsi_scsi_cmd_rsp - SCSI Command Response processing
797 * @conn: iscsi connection
798 * @hdr: iscsi header
799 * @task: scsi command task
800 * @data: cmd data buffer
801 * @datalen: len of buffer
802 *
803 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
804 * then completes the command and task. called under back_lock
805 **/
806static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
807 struct iscsi_task *task, char *data,
808 int datalen)
809{
810 struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
811 struct iscsi_session *session = conn->session;
812 struct scsi_cmnd *sc = task->sc;
813
814 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
815 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
816
817 sc->result = (DID_OK << 16) | rhdr->cmd_status;
818
819 if (task->protected) {
820 sector_t sector;
821 u8 ascq;
822
823 /**
824 * Transports that didn't implement check_protection
825 * callback but still published T10-PI support to scsi-mid
826 * deserve this BUG_ON.
827 **/
828 BUG_ON(!session->tt->check_protection);
829
830 ascq = session->tt->check_protection(task, §or);
831 if (ascq) {
832 scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq);
833 scsi_set_sense_information(sc->sense_buffer,
834 SCSI_SENSE_BUFFERSIZE,
835 sector);
836 goto out;
837 }
838 }
839
840 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
841 sc->result = DID_ERROR << 16;
842 goto out;
843 }
844
845 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
846 uint16_t senselen;
847
848 if (datalen < 2) {
849invalid_datalen:
850 iscsi_conn_printk(KERN_ERR, conn,
851 "Got CHECK_CONDITION but invalid data "
852 "buffer size of %d\n", datalen);
853 sc->result = DID_BAD_TARGET << 16;
854 goto out;
855 }
856
857 senselen = get_unaligned_be16(data);
858 if (datalen < senselen)
859 goto invalid_datalen;
860
861 memcpy(sc->sense_buffer, data + 2,
862 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
863 ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
864 min_t(uint16_t, senselen,
865 SCSI_SENSE_BUFFERSIZE));
866 }
867
868 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
869 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
870 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
871 }
872
873 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
874 ISCSI_FLAG_CMD_OVERFLOW)) {
875 int res_count = be32_to_cpu(rhdr->residual_count);
876
877 if (res_count > 0 &&
878 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
879 res_count <= scsi_bufflen(sc)))
880 /* write side for bidi or uni-io set_resid */
881 scsi_set_resid(sc, res_count);
882 else
883 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
884 }
885out:
886 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
887 sc, sc->result, task->itt);
888 conn->scsirsp_pdus_cnt++;
889 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
890}
891
892/**
893 * iscsi_data_in_rsp - SCSI Data-In Response processing
894 * @conn: iscsi connection
895 * @hdr: iscsi pdu
896 * @task: scsi command task
897 *
898 * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
899 * then completes the command and task. called under back_lock
900 **/
901static void
902iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
903 struct iscsi_task *task)
904{
905 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
906 struct scsi_cmnd *sc = task->sc;
907
908 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
909 return;
910
911 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
912 sc->result = (DID_OK << 16) | rhdr->cmd_status;
913 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
914 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
915 ISCSI_FLAG_DATA_OVERFLOW)) {
916 int res_count = be32_to_cpu(rhdr->residual_count);
917
918 if (res_count > 0 &&
919 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
920 res_count <= sc->sdb.length))
921 scsi_set_resid(sc, res_count);
922 else
923 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
924 }
925
926 ISCSI_DBG_SESSION(conn->session, "data in with status done "
927 "[sc %p res %d itt 0x%x]\n",
928 sc, sc->result, task->itt);
929 conn->scsirsp_pdus_cnt++;
930 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
931}
932
933static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
934{
935 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
936 struct iscsi_session *session = conn->session;
937
938 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
939 conn->tmfrsp_pdus_cnt++;
940
941 if (session->tmf_state != TMF_QUEUED)
942 return;
943
944 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
945 session->tmf_state = TMF_SUCCESS;
946 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
947 session->tmf_state = TMF_NOT_FOUND;
948 else
949 session->tmf_state = TMF_FAILED;
950 wake_up(&session->ehwait);
951}
952
953static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
954{
955 struct iscsi_nopout hdr;
956 struct iscsi_task *task;
957
958 if (!rhdr) {
959 if (READ_ONCE(conn->ping_task))
960 return -EINVAL;
961 WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
962 }
963
964 memset(&hdr, 0, sizeof(struct iscsi_nopout));
965 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
966 hdr.flags = ISCSI_FLAG_CMD_FINAL;
967
968 if (rhdr) {
969 hdr.lun = rhdr->lun;
970 hdr.ttt = rhdr->ttt;
971 hdr.itt = RESERVED_ITT;
972 } else
973 hdr.ttt = RESERVED_ITT;
974
975 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
976 if (!task) {
977 if (!rhdr)
978 WRITE_ONCE(conn->ping_task, NULL);
979 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
980 return -EIO;
981 } else if (!rhdr) {
982 /* only track our nops */
983 conn->last_ping = jiffies;
984 }
985
986 return 0;
987}
988
989/**
990 * iscsi_nop_out_rsp - SCSI NOP Response processing
991 * @task: scsi command task
992 * @nop: the nop structure
993 * @data: where to put the data
994 * @datalen: length of data
995 *
996 * iscsi_nop_out_rsp handles nop response from use or
997 * from user space. called under back_lock
998 **/
999static int iscsi_nop_out_rsp(struct iscsi_task *task,
1000 struct iscsi_nopin *nop, char *data, int datalen)
1001{
1002 struct iscsi_conn *conn = task->conn;
1003 int rc = 0;
1004
1005 if (READ_ONCE(conn->ping_task) != task) {
1006 /*
1007 * If this is not in response to one of our
1008 * nops then it must be from userspace.
1009 */
1010 if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
1011 data, datalen))
1012 rc = ISCSI_ERR_CONN_FAILED;
1013 } else
1014 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
1015 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1016 return rc;
1017}
1018
1019static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1020 char *data, int datalen)
1021{
1022 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
1023 struct iscsi_hdr rejected_pdu;
1024 int opcode, rc = 0;
1025
1026 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
1027
1028 if (ntoh24(reject->dlength) > datalen ||
1029 ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
1030 iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
1031 "pdu. Invalid data length (pdu dlength "
1032 "%u, datalen %d\n", ntoh24(reject->dlength),
1033 datalen);
1034 return ISCSI_ERR_PROTO;
1035 }
1036 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
1037 opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
1038
1039 switch (reject->reason) {
1040 case ISCSI_REASON_DATA_DIGEST_ERROR:
1041 iscsi_conn_printk(KERN_ERR, conn,
1042 "pdu (op 0x%x itt 0x%x) rejected "
1043 "due to DataDigest error.\n",
1044 opcode, rejected_pdu.itt);
1045 break;
1046 case ISCSI_REASON_IMM_CMD_REJECT:
1047 iscsi_conn_printk(KERN_ERR, conn,
1048 "pdu (op 0x%x itt 0x%x) rejected. Too many "
1049 "immediate commands.\n",
1050 opcode, rejected_pdu.itt);
1051 /*
1052 * We only send one TMF at a time so if the target could not
1053 * handle it, then it should get fixed (RFC mandates that
1054 * a target can handle one immediate TMF per conn).
1055 *
1056 * For nops-outs, we could have sent more than one if
1057 * the target is sending us lots of nop-ins
1058 */
1059 if (opcode != ISCSI_OP_NOOP_OUT)
1060 return 0;
1061
1062 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1063 /*
1064 * nop-out in response to target's nop-out rejected.
1065 * Just resend.
1066 */
1067 /* In RX path we are under back lock */
1068 spin_unlock(&conn->session->back_lock);
1069 spin_lock(&conn->session->frwd_lock);
1070 iscsi_send_nopout(conn,
1071 (struct iscsi_nopin*)&rejected_pdu);
1072 spin_unlock(&conn->session->frwd_lock);
1073 spin_lock(&conn->session->back_lock);
1074 } else {
1075 struct iscsi_task *task;
1076 /*
1077 * Our nop as ping got dropped. We know the target
1078 * and transport are ok so just clean up
1079 */
1080 task = iscsi_itt_to_task(conn, rejected_pdu.itt);
1081 if (!task) {
1082 iscsi_conn_printk(KERN_ERR, conn,
1083 "Invalid pdu reject. Could "
1084 "not lookup rejected task.\n");
1085 rc = ISCSI_ERR_BAD_ITT;
1086 } else
1087 rc = iscsi_nop_out_rsp(task,
1088 (struct iscsi_nopin*)&rejected_pdu,
1089 NULL, 0);
1090 }
1091 break;
1092 default:
1093 iscsi_conn_printk(KERN_ERR, conn,
1094 "pdu (op 0x%x itt 0x%x) rejected. Reason "
1095 "code 0x%x\n", rejected_pdu.opcode,
1096 rejected_pdu.itt, reject->reason);
1097 break;
1098 }
1099 return rc;
1100}
1101
1102/**
1103 * iscsi_itt_to_task - look up task by itt
1104 * @conn: iscsi connection
1105 * @itt: itt
1106 *
1107 * This should be used for mgmt tasks like login and nops, or if
1108 * the LDD's itt space does not include the session age.
1109 *
1110 * The session back_lock must be held.
1111 */
1112struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
1113{
1114 struct iscsi_session *session = conn->session;
1115 int i;
1116
1117 if (itt == RESERVED_ITT)
1118 return NULL;
1119
1120 if (session->tt->parse_pdu_itt)
1121 session->tt->parse_pdu_itt(conn, itt, &i, NULL);
1122 else
1123 i = get_itt(itt);
1124 if (i >= session->cmds_max)
1125 return NULL;
1126
1127 return session->cmds[i];
1128}
1129EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
1130
1131/**
1132 * __iscsi_complete_pdu - complete pdu
1133 * @conn: iscsi conn
1134 * @hdr: iscsi header
1135 * @data: data buffer
1136 * @datalen: len of data buffer
1137 *
1138 * Completes pdu processing by freeing any resources allocated at
1139 * queuecommand or send generic. session back_lock must be held and verify
1140 * itt must have been called.
1141 */
1142int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1143 char *data, int datalen)
1144{
1145 struct iscsi_session *session = conn->session;
1146 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
1147 struct iscsi_task *task;
1148 uint32_t itt;
1149
1150 conn->last_recv = jiffies;
1151 rc = iscsi_verify_itt(conn, hdr->itt);
1152 if (rc)
1153 return rc;
1154
1155 if (hdr->itt != RESERVED_ITT)
1156 itt = get_itt(hdr->itt);
1157 else
1158 itt = ~0U;
1159
1160 ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
1161 opcode, conn->id, itt, datalen);
1162
1163 if (itt == ~0U) {
1164 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1165
1166 switch(opcode) {
1167 case ISCSI_OP_NOOP_IN:
1168 if (datalen) {
1169 rc = ISCSI_ERR_PROTO;
1170 break;
1171 }
1172
1173 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
1174 break;
1175
1176 /* In RX path we are under back lock */
1177 spin_unlock(&session->back_lock);
1178 spin_lock(&session->frwd_lock);
1179 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
1180 spin_unlock(&session->frwd_lock);
1181 spin_lock(&session->back_lock);
1182 break;
1183 case ISCSI_OP_REJECT:
1184 rc = iscsi_handle_reject(conn, hdr, data, datalen);
1185 break;
1186 case ISCSI_OP_ASYNC_EVENT:
1187 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1188 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1189 rc = ISCSI_ERR_CONN_FAILED;
1190 break;
1191 default:
1192 rc = ISCSI_ERR_BAD_OPCODE;
1193 break;
1194 }
1195 goto out;
1196 }
1197
1198 switch(opcode) {
1199 case ISCSI_OP_SCSI_CMD_RSP:
1200 case ISCSI_OP_SCSI_DATA_IN:
1201 task = iscsi_itt_to_ctask(conn, hdr->itt);
1202 if (!task)
1203 return ISCSI_ERR_BAD_ITT;
1204 task->last_xfer = jiffies;
1205 break;
1206 case ISCSI_OP_R2T:
1207 /*
1208 * LLD handles R2Ts if they need to.
1209 */
1210 return 0;
1211 case ISCSI_OP_LOGOUT_RSP:
1212 case ISCSI_OP_LOGIN_RSP:
1213 case ISCSI_OP_TEXT_RSP:
1214 case ISCSI_OP_SCSI_TMFUNC_RSP:
1215 case ISCSI_OP_NOOP_IN:
1216 task = iscsi_itt_to_task(conn, hdr->itt);
1217 if (!task)
1218 return ISCSI_ERR_BAD_ITT;
1219 break;
1220 default:
1221 return ISCSI_ERR_BAD_OPCODE;
1222 }
1223
1224 switch(opcode) {
1225 case ISCSI_OP_SCSI_CMD_RSP:
1226 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
1227 break;
1228 case ISCSI_OP_SCSI_DATA_IN:
1229 iscsi_data_in_rsp(conn, hdr, task);
1230 break;
1231 case ISCSI_OP_LOGOUT_RSP:
1232 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1233 if (datalen) {
1234 rc = ISCSI_ERR_PROTO;
1235 break;
1236 }
1237 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1238 goto recv_pdu;
1239 case ISCSI_OP_LOGIN_RSP:
1240 case ISCSI_OP_TEXT_RSP:
1241 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1242 /*
1243 * login related PDU's exp_statsn is handled in
1244 * userspace
1245 */
1246 goto recv_pdu;
1247 case ISCSI_OP_SCSI_TMFUNC_RSP:
1248 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1249 if (datalen) {
1250 rc = ISCSI_ERR_PROTO;
1251 break;
1252 }
1253
1254 iscsi_tmf_rsp(conn, hdr);
1255 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1256 break;
1257 case ISCSI_OP_NOOP_IN:
1258 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1259 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
1260 rc = ISCSI_ERR_PROTO;
1261 break;
1262 }
1263 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1264
1265 rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
1266 data, datalen);
1267 break;
1268 default:
1269 rc = ISCSI_ERR_BAD_OPCODE;
1270 break;
1271 }
1272
1273out:
1274 return rc;
1275recv_pdu:
1276 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1277 rc = ISCSI_ERR_CONN_FAILED;
1278 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1279 return rc;
1280}
1281EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
1282
1283int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1284 char *data, int datalen)
1285{
1286 int rc;
1287
1288 spin_lock(&conn->session->back_lock);
1289 rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
1290 spin_unlock(&conn->session->back_lock);
1291 return rc;
1292}
1293EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
1294
1295int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
1296{
1297 struct iscsi_session *session = conn->session;
1298 int age = 0, i = 0;
1299
1300 if (itt == RESERVED_ITT)
1301 return 0;
1302
1303 if (session->tt->parse_pdu_itt)
1304 session->tt->parse_pdu_itt(conn, itt, &i, &age);
1305 else {
1306 i = get_itt(itt);
1307 age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
1308 }
1309
1310 if (age != session->age) {
1311 iscsi_conn_printk(KERN_ERR, conn,
1312 "received itt %x expected session age (%x)\n",
1313 (__force u32)itt, session->age);
1314 return ISCSI_ERR_BAD_ITT;
1315 }
1316
1317 if (i >= session->cmds_max) {
1318 iscsi_conn_printk(KERN_ERR, conn,
1319 "received invalid itt index %u (max cmds "
1320 "%u.\n", i, session->cmds_max);
1321 return ISCSI_ERR_BAD_ITT;
1322 }
1323 return 0;
1324}
1325EXPORT_SYMBOL_GPL(iscsi_verify_itt);
1326
1327/**
1328 * iscsi_itt_to_ctask - look up ctask by itt
1329 * @conn: iscsi connection
1330 * @itt: itt
1331 *
1332 * This should be used for cmd tasks.
1333 *
1334 * The session back_lock must be held.
1335 */
1336struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
1337{
1338 struct iscsi_task *task;
1339
1340 if (iscsi_verify_itt(conn, itt))
1341 return NULL;
1342
1343 task = iscsi_itt_to_task(conn, itt);
1344 if (!task || !task->sc)
1345 return NULL;
1346
1347 if (task->sc->SCp.phase != conn->session->age) {
1348 iscsi_session_printk(KERN_ERR, conn->session,
1349 "task's session age %d, expected %d\n",
1350 task->sc->SCp.phase, conn->session->age);
1351 return NULL;
1352 }
1353
1354 return task;
1355}
1356EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
1357
1358void iscsi_session_failure(struct iscsi_session *session,
1359 enum iscsi_err err)
1360{
1361 struct iscsi_conn *conn;
1362
1363 spin_lock_bh(&session->frwd_lock);
1364 conn = session->leadconn;
1365 if (session->state == ISCSI_STATE_TERMINATE || !conn) {
1366 spin_unlock_bh(&session->frwd_lock);
1367 return;
1368 }
1369
1370 iscsi_get_conn(conn->cls_conn);
1371 spin_unlock_bh(&session->frwd_lock);
1372 /*
1373 * if the host is being removed bypass the connection
1374 * recovery initialization because we are going to kill
1375 * the session.
1376 */
1377 if (err == ISCSI_ERR_INVALID_HOST)
1378 iscsi_conn_error_event(conn->cls_conn, err);
1379 else
1380 iscsi_conn_failure(conn, err);
1381 iscsi_put_conn(conn->cls_conn);
1382}
1383EXPORT_SYMBOL_GPL(iscsi_session_failure);
1384
1385static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
1386{
1387 struct iscsi_session *session = conn->session;
1388
1389 if (session->state == ISCSI_STATE_FAILED)
1390 return false;
1391
1392 if (conn->stop_stage == 0)
1393 session->state = ISCSI_STATE_FAILED;
1394
1395 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1396 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1397 return true;
1398}
1399
1400void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
1401{
1402 struct iscsi_session *session = conn->session;
1403 bool needs_evt;
1404
1405 spin_lock_bh(&session->frwd_lock);
1406 needs_evt = iscsi_set_conn_failed(conn);
1407 spin_unlock_bh(&session->frwd_lock);
1408
1409 if (needs_evt)
1410 iscsi_conn_error_event(conn->cls_conn, err);
1411}
1412EXPORT_SYMBOL_GPL(iscsi_conn_failure);
1413
1414static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
1415{
1416 struct iscsi_session *session = conn->session;
1417
1418 /*
1419 * Check for iSCSI window and take care of CmdSN wrap-around
1420 */
1421 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
1422 ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
1423 "%u MaxCmdSN %u CmdSN %u/%u\n",
1424 session->exp_cmdsn, session->max_cmdsn,
1425 session->cmdsn, session->queued_cmdsn);
1426 return -ENOSPC;
1427 }
1428 return 0;
1429}
1430
1431static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
1432 bool was_requeue)
1433{
1434 int rc;
1435
1436 spin_lock_bh(&conn->session->back_lock);
1437
1438 if (!conn->task) {
1439 /* Take a ref so we can access it after xmit_task() */
1440 __iscsi_get_task(task);
1441 } else {
1442 /* Already have a ref from when we failed to send it last call */
1443 conn->task = NULL;
1444 }
1445
1446 /*
1447 * If this was a requeue for a R2T we have an extra ref on the task in
1448 * case a bad target sends a cmd rsp before we have handled the task.
1449 */
1450 if (was_requeue)
1451 __iscsi_put_task(task);
1452
1453 /*
1454 * Do this after dropping the extra ref because if this was a requeue
1455 * it's removed from that list and cleanup_queued_task would miss it.
1456 */
1457 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1458 /*
1459 * Save the task and ref in case we weren't cleaning up this
1460 * task and get woken up again.
1461 */
1462 conn->task = task;
1463 spin_unlock_bh(&conn->session->back_lock);
1464 return -ENODATA;
1465 }
1466 spin_unlock_bh(&conn->session->back_lock);
1467
1468 spin_unlock_bh(&conn->session->frwd_lock);
1469 rc = conn->session->tt->xmit_task(task);
1470 spin_lock_bh(&conn->session->frwd_lock);
1471 if (!rc) {
1472 /* done with this task */
1473 task->last_xfer = jiffies;
1474 }
1475 /* regular RX path uses back_lock */
1476 spin_lock(&conn->session->back_lock);
1477 if (rc && task->state == ISCSI_TASK_RUNNING) {
1478 /*
1479 * get an extra ref that is released next time we access it
1480 * as conn->task above.
1481 */
1482 __iscsi_get_task(task);
1483 conn->task = task;
1484 }
1485
1486 __iscsi_put_task(task);
1487 spin_unlock(&conn->session->back_lock);
1488 return rc;
1489}
1490
1491/**
1492 * iscsi_requeue_task - requeue task to run from session workqueue
1493 * @task: task to requeue
1494 *
1495 * Callers must have taken a ref to the task that is going to be requeued.
1496 */
1497void iscsi_requeue_task(struct iscsi_task *task)
1498{
1499 struct iscsi_conn *conn = task->conn;
1500
1501 /*
1502 * this may be on the requeue list already if the xmit_task callout
1503 * is handling the r2ts while we are adding new ones
1504 */
1505 spin_lock_bh(&conn->session->frwd_lock);
1506 if (list_empty(&task->running)) {
1507 list_add_tail(&task->running, &conn->requeue);
1508 } else {
1509 /*
1510 * Don't need the extra ref since it's already requeued and
1511 * has a ref.
1512 */
1513 iscsi_put_task(task);
1514 }
1515 iscsi_conn_queue_work(conn);
1516 spin_unlock_bh(&conn->session->frwd_lock);
1517}
1518EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1519
1520/**
1521 * iscsi_data_xmit - xmit any command into the scheduled connection
1522 * @conn: iscsi connection
1523 *
1524 * Notes:
1525 * The function can return -EAGAIN in which case the caller must
1526 * re-schedule it again later or recover. '0' return code means
1527 * successful xmit.
1528 **/
1529static int iscsi_data_xmit(struct iscsi_conn *conn)
1530{
1531 struct iscsi_task *task;
1532 int rc = 0;
1533
1534 spin_lock_bh(&conn->session->frwd_lock);
1535 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1536 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1537 spin_unlock_bh(&conn->session->frwd_lock);
1538 return -ENODATA;
1539 }
1540
1541 if (conn->task) {
1542 rc = iscsi_xmit_task(conn, conn->task, false);
1543 if (rc)
1544 goto done;
1545 }
1546
1547 /*
1548 * process mgmt pdus like nops before commands since we should
1549 * only have one nop-out as a ping from us and targets should not
1550 * overflow us with nop-ins
1551 */
1552check_mgmt:
1553 while (!list_empty(&conn->mgmtqueue)) {
1554 task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
1555 running);
1556 list_del_init(&task->running);
1557 if (iscsi_prep_mgmt_task(conn, task)) {
1558 /* regular RX path uses back_lock */
1559 spin_lock_bh(&conn->session->back_lock);
1560 __iscsi_put_task(task);
1561 spin_unlock_bh(&conn->session->back_lock);
1562 continue;
1563 }
1564 rc = iscsi_xmit_task(conn, task, false);
1565 if (rc)
1566 goto done;
1567 }
1568
1569 /* process pending command queue */
1570 while (!list_empty(&conn->cmdqueue)) {
1571 task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1572 running);
1573 list_del_init(&task->running);
1574 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1575 fail_scsi_task(task, DID_IMM_RETRY);
1576 continue;
1577 }
1578 rc = iscsi_prep_scsi_cmd_pdu(task);
1579 if (rc) {
1580 if (rc == -ENOMEM || rc == -EACCES)
1581 fail_scsi_task(task, DID_IMM_RETRY);
1582 else
1583 fail_scsi_task(task, DID_ABORT);
1584 continue;
1585 }
1586 rc = iscsi_xmit_task(conn, task, false);
1587 if (rc)
1588 goto done;
1589 /*
1590 * we could continuously get new task requests so
1591 * we need to check the mgmt queue for nops that need to
1592 * be sent to aviod starvation
1593 */
1594 if (!list_empty(&conn->mgmtqueue))
1595 goto check_mgmt;
1596 }
1597
1598 while (!list_empty(&conn->requeue)) {
1599 /*
1600 * we always do fastlogout - conn stop code will clean up.
1601 */
1602 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1603 break;
1604
1605 task = list_entry(conn->requeue.next, struct iscsi_task,
1606 running);
1607
1608 if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
1609 break;
1610
1611 list_del_init(&task->running);
1612 rc = iscsi_xmit_task(conn, task, true);
1613 if (rc)
1614 goto done;
1615 if (!list_empty(&conn->mgmtqueue))
1616 goto check_mgmt;
1617 }
1618 spin_unlock_bh(&conn->session->frwd_lock);
1619 return -ENODATA;
1620
1621done:
1622 spin_unlock_bh(&conn->session->frwd_lock);
1623 return rc;
1624}
1625
1626static void iscsi_xmitworker(struct work_struct *work)
1627{
1628 struct iscsi_conn *conn =
1629 container_of(work, struct iscsi_conn, xmitwork);
1630 int rc;
1631 /*
1632 * serialize Xmit worker on a per-connection basis.
1633 */
1634 do {
1635 rc = iscsi_data_xmit(conn);
1636 } while (rc >= 0 || rc == -EAGAIN);
1637}
1638
1639static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1640 struct scsi_cmnd *sc)
1641{
1642 struct iscsi_task *task;
1643
1644 if (!kfifo_out(&conn->session->cmdpool.queue,
1645 (void *) &task, sizeof(void *)))
1646 return NULL;
1647
1648 sc->SCp.phase = conn->session->age;
1649 sc->SCp.ptr = (char *) task;
1650
1651 refcount_set(&task->refcount, 1);
1652 task->state = ISCSI_TASK_PENDING;
1653 task->conn = conn;
1654 task->sc = sc;
1655 task->have_checked_conn = false;
1656 task->last_timeout = jiffies;
1657 task->last_xfer = jiffies;
1658 task->protected = false;
1659 INIT_LIST_HEAD(&task->running);
1660 return task;
1661}
1662
1663enum {
1664 FAILURE_BAD_HOST = 1,
1665 FAILURE_SESSION_FAILED,
1666 FAILURE_SESSION_FREED,
1667 FAILURE_WINDOW_CLOSED,
1668 FAILURE_OOM,
1669 FAILURE_SESSION_TERMINATE,
1670 FAILURE_SESSION_IN_RECOVERY,
1671 FAILURE_SESSION_RECOVERY_TIMEOUT,
1672 FAILURE_SESSION_LOGGING_OUT,
1673 FAILURE_SESSION_NOT_READY,
1674};
1675
1676int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1677{
1678 struct iscsi_cls_session *cls_session;
1679 struct iscsi_host *ihost;
1680 int reason = 0;
1681 struct iscsi_session *session;
1682 struct iscsi_conn *conn;
1683 struct iscsi_task *task = NULL;
1684
1685 sc->result = 0;
1686 sc->SCp.ptr = NULL;
1687
1688 ihost = shost_priv(host);
1689
1690 cls_session = starget_to_session(scsi_target(sc->device));
1691 session = cls_session->dd_data;
1692 spin_lock_bh(&session->frwd_lock);
1693
1694 reason = iscsi_session_chkready(cls_session);
1695 if (reason) {
1696 sc->result = reason;
1697 goto fault;
1698 }
1699
1700 if (session->state != ISCSI_STATE_LOGGED_IN) {
1701 /*
1702 * to handle the race between when we set the recovery state
1703 * and block the session we requeue here (commands could
1704 * be entering our queuecommand while a block is starting
1705 * up because the block code is not locked)
1706 */
1707 switch (session->state) {
1708 case ISCSI_STATE_FAILED:
1709 /*
1710 * cmds should fail during shutdown, if the session
1711 * state is bad, allowing completion to happen
1712 */
1713 if (unlikely(system_state != SYSTEM_RUNNING)) {
1714 reason = FAILURE_SESSION_FAILED;
1715 sc->result = DID_NO_CONNECT << 16;
1716 break;
1717 }
1718 fallthrough;
1719 case ISCSI_STATE_IN_RECOVERY:
1720 reason = FAILURE_SESSION_IN_RECOVERY;
1721 sc->result = DID_IMM_RETRY << 16;
1722 break;
1723 case ISCSI_STATE_LOGGING_OUT:
1724 reason = FAILURE_SESSION_LOGGING_OUT;
1725 sc->result = DID_IMM_RETRY << 16;
1726 break;
1727 case ISCSI_STATE_RECOVERY_FAILED:
1728 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1729 sc->result = DID_TRANSPORT_FAILFAST << 16;
1730 break;
1731 case ISCSI_STATE_TERMINATE:
1732 reason = FAILURE_SESSION_TERMINATE;
1733 sc->result = DID_NO_CONNECT << 16;
1734 break;
1735 default:
1736 reason = FAILURE_SESSION_FREED;
1737 sc->result = DID_NO_CONNECT << 16;
1738 }
1739 goto fault;
1740 }
1741
1742 conn = session->leadconn;
1743 if (!conn) {
1744 reason = FAILURE_SESSION_FREED;
1745 sc->result = DID_NO_CONNECT << 16;
1746 goto fault;
1747 }
1748
1749 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1750 reason = FAILURE_SESSION_IN_RECOVERY;
1751 sc->result = DID_REQUEUE << 16;
1752 goto fault;
1753 }
1754
1755 if (iscsi_check_cmdsn_window_closed(conn)) {
1756 reason = FAILURE_WINDOW_CLOSED;
1757 goto reject;
1758 }
1759
1760 task = iscsi_alloc_task(conn, sc);
1761 if (!task) {
1762 reason = FAILURE_OOM;
1763 goto reject;
1764 }
1765
1766 if (!ihost->workq) {
1767 reason = iscsi_prep_scsi_cmd_pdu(task);
1768 if (reason) {
1769 if (reason == -ENOMEM || reason == -EACCES) {
1770 reason = FAILURE_OOM;
1771 goto prepd_reject;
1772 } else {
1773 sc->result = DID_ABORT << 16;
1774 goto prepd_fault;
1775 }
1776 }
1777 if (session->tt->xmit_task(task)) {
1778 session->cmdsn--;
1779 reason = FAILURE_SESSION_NOT_READY;
1780 goto prepd_reject;
1781 }
1782 } else {
1783 list_add_tail(&task->running, &conn->cmdqueue);
1784 iscsi_conn_queue_work(conn);
1785 }
1786
1787 session->queued_cmdsn++;
1788 spin_unlock_bh(&session->frwd_lock);
1789 return 0;
1790
1791prepd_reject:
1792 spin_lock_bh(&session->back_lock);
1793 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1794 spin_unlock_bh(&session->back_lock);
1795reject:
1796 spin_unlock_bh(&session->frwd_lock);
1797 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1798 sc->cmnd[0], reason);
1799 return SCSI_MLQUEUE_TARGET_BUSY;
1800
1801prepd_fault:
1802 spin_lock_bh(&session->back_lock);
1803 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1804 spin_unlock_bh(&session->back_lock);
1805fault:
1806 spin_unlock_bh(&session->frwd_lock);
1807 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1808 sc->cmnd[0], reason);
1809 scsi_set_resid(sc, scsi_bufflen(sc));
1810 sc->scsi_done(sc);
1811 return 0;
1812}
1813EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1814
1815int iscsi_target_alloc(struct scsi_target *starget)
1816{
1817 struct iscsi_cls_session *cls_session = starget_to_session(starget);
1818 struct iscsi_session *session = cls_session->dd_data;
1819
1820 starget->can_queue = session->scsi_cmds_max;
1821 return 0;
1822}
1823EXPORT_SYMBOL_GPL(iscsi_target_alloc);
1824
1825static void iscsi_tmf_timedout(struct timer_list *t)
1826{
1827 struct iscsi_session *session = from_timer(session, t, tmf_timer);
1828
1829 spin_lock(&session->frwd_lock);
1830 if (session->tmf_state == TMF_QUEUED) {
1831 session->tmf_state = TMF_TIMEDOUT;
1832 ISCSI_DBG_EH(session, "tmf timedout\n");
1833 /* unblock eh_abort() */
1834 wake_up(&session->ehwait);
1835 }
1836 spin_unlock(&session->frwd_lock);
1837}
1838
1839static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1840 struct iscsi_tm *hdr, int age,
1841 int timeout)
1842 __must_hold(&session->frwd_lock)
1843{
1844 struct iscsi_session *session = conn->session;
1845 struct iscsi_task *task;
1846
1847 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1848 NULL, 0);
1849 if (!task) {
1850 spin_unlock_bh(&session->frwd_lock);
1851 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
1852 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1853 spin_lock_bh(&session->frwd_lock);
1854 return -EPERM;
1855 }
1856 conn->tmfcmd_pdus_cnt++;
1857 session->tmf_timer.expires = timeout * HZ + jiffies;
1858 add_timer(&session->tmf_timer);
1859 ISCSI_DBG_EH(session, "tmf set timeout\n");
1860
1861 spin_unlock_bh(&session->frwd_lock);
1862 mutex_unlock(&session->eh_mutex);
1863
1864 /*
1865 * block eh thread until:
1866 *
1867 * 1) tmf response
1868 * 2) tmf timeout
1869 * 3) session is terminated or restarted or userspace has
1870 * given up on recovery
1871 */
1872 wait_event_interruptible(session->ehwait, age != session->age ||
1873 session->state != ISCSI_STATE_LOGGED_IN ||
1874 session->tmf_state != TMF_QUEUED);
1875 if (signal_pending(current))
1876 flush_signals(current);
1877 del_timer_sync(&session->tmf_timer);
1878
1879 mutex_lock(&session->eh_mutex);
1880 spin_lock_bh(&session->frwd_lock);
1881 /* if the session drops it will clean up the task */
1882 if (age != session->age ||
1883 session->state != ISCSI_STATE_LOGGED_IN)
1884 return -ENOTCONN;
1885 return 0;
1886}
1887
1888/*
1889 * Fail commands. session frwd lock held and xmit thread flushed.
1890 */
1891static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
1892{
1893 struct iscsi_session *session = conn->session;
1894 struct iscsi_task *task;
1895 int i;
1896
1897 spin_lock_bh(&session->back_lock);
1898 for (i = 0; i < session->cmds_max; i++) {
1899 task = session->cmds[i];
1900 if (!task->sc || task->state == ISCSI_TASK_FREE)
1901 continue;
1902
1903 if (lun != -1 && lun != task->sc->device->lun)
1904 continue;
1905
1906 __iscsi_get_task(task);
1907 spin_unlock_bh(&session->back_lock);
1908
1909 ISCSI_DBG_SESSION(session,
1910 "failing sc %p itt 0x%x state %d\n",
1911 task->sc, task->itt, task->state);
1912 fail_scsi_task(task, error);
1913
1914 spin_unlock_bh(&session->frwd_lock);
1915 iscsi_put_task(task);
1916 spin_lock_bh(&session->frwd_lock);
1917
1918 spin_lock_bh(&session->back_lock);
1919 }
1920
1921 spin_unlock_bh(&session->back_lock);
1922}
1923
1924/**
1925 * iscsi_suspend_queue - suspend iscsi_queuecommand
1926 * @conn: iscsi conn to stop queueing IO on
1927 *
1928 * This grabs the session frwd_lock to make sure no one is in
1929 * xmit_task/queuecommand, and then sets suspend to prevent
1930 * new commands from being queued. This only needs to be called
1931 * by offload drivers that need to sync a path like ep disconnect
1932 * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
1933 * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
1934 */
1935void iscsi_suspend_queue(struct iscsi_conn *conn)
1936{
1937 spin_lock_bh(&conn->session->frwd_lock);
1938 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1939 spin_unlock_bh(&conn->session->frwd_lock);
1940}
1941EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1942
1943/**
1944 * iscsi_suspend_tx - suspend iscsi_data_xmit
1945 * @conn: iscsi conn tp stop processing IO on.
1946 *
1947 * This function sets the suspend bit to prevent iscsi_data_xmit
1948 * from sending new IO, and if work is queued on the xmit thread
1949 * it will wait for it to be completed.
1950 */
1951void iscsi_suspend_tx(struct iscsi_conn *conn)
1952{
1953 struct Scsi_Host *shost = conn->session->host;
1954 struct iscsi_host *ihost = shost_priv(shost);
1955
1956 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1957 if (ihost->workq)
1958 flush_workqueue(ihost->workq);
1959}
1960EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1961
1962static void iscsi_start_tx(struct iscsi_conn *conn)
1963{
1964 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1965 iscsi_conn_queue_work(conn);
1966}
1967
1968/*
1969 * We want to make sure a ping is in flight. It has timed out.
1970 * And we are not busy processing a pdu that is making
1971 * progress but got started before the ping and is taking a while
1972 * to complete so the ping is just stuck behind it in a queue.
1973 */
1974static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1975{
1976 if (READ_ONCE(conn->ping_task) &&
1977 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1978 (conn->ping_timeout * HZ), jiffies))
1979 return 1;
1980 else
1981 return 0;
1982}
1983
1984enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1985{
1986 enum blk_eh_timer_return rc = BLK_EH_DONE;
1987 struct iscsi_task *task = NULL, *running_task;
1988 struct iscsi_cls_session *cls_session;
1989 struct iscsi_session *session;
1990 struct iscsi_conn *conn;
1991 int i;
1992
1993 cls_session = starget_to_session(scsi_target(sc->device));
1994 session = cls_session->dd_data;
1995
1996 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1997
1998 spin_lock_bh(&session->frwd_lock);
1999 spin_lock(&session->back_lock);
2000 task = (struct iscsi_task *)sc->SCp.ptr;
2001 if (!task) {
2002 /*
2003 * Raced with completion. Blk layer has taken ownership
2004 * so let timeout code complete it now.
2005 */
2006 rc = BLK_EH_DONE;
2007 spin_unlock(&session->back_lock);
2008 goto done;
2009 }
2010 __iscsi_get_task(task);
2011 spin_unlock(&session->back_lock);
2012
2013 if (session->state != ISCSI_STATE_LOGGED_IN) {
2014 /*
2015 * During shutdown, if session is prematurely disconnected,
2016 * recovery won't happen and there will be hung cmds. Not
2017 * handling cmds would trigger EH, also bad in this case.
2018 * Instead, handle cmd, allow completion to happen and let
2019 * upper layer to deal with the result.
2020 */
2021 if (unlikely(system_state != SYSTEM_RUNNING)) {
2022 sc->result = DID_NO_CONNECT << 16;
2023 ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
2024 rc = BLK_EH_DONE;
2025 goto done;
2026 }
2027 /*
2028 * We are probably in the middle of iscsi recovery so let
2029 * that complete and handle the error.
2030 */
2031 rc = BLK_EH_RESET_TIMER;
2032 goto done;
2033 }
2034
2035 conn = session->leadconn;
2036 if (!conn) {
2037 /* In the middle of shuting down */
2038 rc = BLK_EH_RESET_TIMER;
2039 goto done;
2040 }
2041
2042 /*
2043 * If we have sent (at least queued to the network layer) a pdu or
2044 * recvd one for the task since the last timeout ask for
2045 * more time. If on the next timeout we have not made progress
2046 * we can check if it is the task or connection when we send the
2047 * nop as a ping.
2048 */
2049 if (time_after(task->last_xfer, task->last_timeout)) {
2050 ISCSI_DBG_EH(session, "Command making progress. Asking "
2051 "scsi-ml for more time to complete. "
2052 "Last data xfer at %lu. Last timeout was at "
2053 "%lu\n.", task->last_xfer, task->last_timeout);
2054 task->have_checked_conn = false;
2055 rc = BLK_EH_RESET_TIMER;
2056 goto done;
2057 }
2058
2059 if (!conn->recv_timeout && !conn->ping_timeout)
2060 goto done;
2061 /*
2062 * if the ping timedout then we are in the middle of cleaning up
2063 * and can let the iscsi eh handle it
2064 */
2065 if (iscsi_has_ping_timed_out(conn)) {
2066 rc = BLK_EH_RESET_TIMER;
2067 goto done;
2068 }
2069
2070 spin_lock(&session->back_lock);
2071 for (i = 0; i < conn->session->cmds_max; i++) {
2072 running_task = conn->session->cmds[i];
2073 if (!running_task->sc || running_task == task ||
2074 running_task->state != ISCSI_TASK_RUNNING)
2075 continue;
2076
2077 /*
2078 * Only check if cmds started before this one have made
2079 * progress, or this could never fail
2080 */
2081 if (time_after(running_task->sc->jiffies_at_alloc,
2082 task->sc->jiffies_at_alloc))
2083 continue;
2084
2085 if (time_after(running_task->last_xfer, task->last_timeout)) {
2086 /*
2087 * This task has not made progress, but a task
2088 * started before us has transferred data since
2089 * we started/last-checked. We could be queueing
2090 * too many tasks or the LU is bad.
2091 *
2092 * If the device is bad the cmds ahead of us on
2093 * other devs will complete, and this loop will
2094 * eventually fail starting the scsi eh.
2095 */
2096 ISCSI_DBG_EH(session, "Command has not made progress "
2097 "but commands ahead of it have. "
2098 "Asking scsi-ml for more time to "
2099 "complete. Our last xfer vs running task "
2100 "last xfer %lu/%lu. Last check %lu.\n",
2101 task->last_xfer, running_task->last_xfer,
2102 task->last_timeout);
2103 spin_unlock(&session->back_lock);
2104 rc = BLK_EH_RESET_TIMER;
2105 goto done;
2106 }
2107 }
2108 spin_unlock(&session->back_lock);
2109
2110 /* Assumes nop timeout is shorter than scsi cmd timeout */
2111 if (task->have_checked_conn)
2112 goto done;
2113
2114 /*
2115 * Checking the transport already or nop from a cmd timeout still
2116 * running
2117 */
2118 if (READ_ONCE(conn->ping_task)) {
2119 task->have_checked_conn = true;
2120 rc = BLK_EH_RESET_TIMER;
2121 goto done;
2122 }
2123
2124 /* Make sure there is a transport check done */
2125 iscsi_send_nopout(conn, NULL);
2126 task->have_checked_conn = true;
2127 rc = BLK_EH_RESET_TIMER;
2128
2129done:
2130 spin_unlock_bh(&session->frwd_lock);
2131
2132 if (task) {
2133 task->last_timeout = jiffies;
2134 iscsi_put_task(task);
2135 }
2136 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2137 "timer reset" : "shutdown or nh");
2138 return rc;
2139}
2140EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
2141
2142static void iscsi_check_transport_timeouts(struct timer_list *t)
2143{
2144 struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
2145 struct iscsi_session *session = conn->session;
2146 unsigned long recv_timeout, next_timeout = 0, last_recv;
2147
2148 spin_lock(&session->frwd_lock);
2149 if (session->state != ISCSI_STATE_LOGGED_IN)
2150 goto done;
2151
2152 recv_timeout = conn->recv_timeout;
2153 if (!recv_timeout)
2154 goto done;
2155
2156 recv_timeout *= HZ;
2157 last_recv = conn->last_recv;
2158
2159 if (iscsi_has_ping_timed_out(conn)) {
2160 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
2161 "expired, recv timeout %d, last rx %lu, "
2162 "last ping %lu, now %lu\n",
2163 conn->ping_timeout, conn->recv_timeout,
2164 last_recv, conn->last_ping, jiffies);
2165 spin_unlock(&session->frwd_lock);
2166 iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT);
2167 return;
2168 }
2169
2170 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
2171 /* send a ping to try to provoke some traffic */
2172 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
2173 if (iscsi_send_nopout(conn, NULL))
2174 next_timeout = jiffies + (1 * HZ);
2175 else
2176 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
2177 } else
2178 next_timeout = last_recv + recv_timeout;
2179
2180 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
2181 mod_timer(&conn->transport_timer, next_timeout);
2182done:
2183 spin_unlock(&session->frwd_lock);
2184}
2185
2186/**
2187 * iscsi_conn_unbind - prevent queueing to conn.
2188 * @cls_conn: iscsi conn ep is bound to.
2189 * @is_active: is the conn in use for boot or is this for EH/termination
2190 *
2191 * This must be called by drivers implementing the ep_disconnect callout.
2192 * It disables queueing to the connection from libiscsi in preparation for
2193 * an ep_disconnect call.
2194 */
2195void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
2196{
2197 struct iscsi_session *session;
2198 struct iscsi_conn *conn;
2199
2200 if (!cls_conn)
2201 return;
2202
2203 conn = cls_conn->dd_data;
2204 session = conn->session;
2205 /*
2206 * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
2207 * complete or timeout. The caller just wants to know what's running
2208 * is everything that needs to be cleaned up, and no cmds will be
2209 * queued.
2210 */
2211 mutex_lock(&session->eh_mutex);
2212
2213 iscsi_suspend_queue(conn);
2214 iscsi_suspend_tx(conn);
2215
2216 spin_lock_bh(&session->frwd_lock);
2217 if (!is_active) {
2218 /*
2219 * if logout timed out before userspace could even send a PDU
2220 * the state might still be in ISCSI_STATE_LOGGED_IN and
2221 * allowing new cmds and TMFs.
2222 */
2223 if (session->state == ISCSI_STATE_LOGGED_IN)
2224 iscsi_set_conn_failed(conn);
2225 }
2226 spin_unlock_bh(&session->frwd_lock);
2227 mutex_unlock(&session->eh_mutex);
2228}
2229EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
2230
2231static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
2232 struct iscsi_tm *hdr)
2233{
2234 memset(hdr, 0, sizeof(*hdr));
2235 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2236 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
2237 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2238 hdr->lun = task->lun;
2239 hdr->rtt = task->hdr_itt;
2240 hdr->refcmdsn = task->cmdsn;
2241}
2242
2243int iscsi_eh_abort(struct scsi_cmnd *sc)
2244{
2245 struct iscsi_cls_session *cls_session;
2246 struct iscsi_session *session;
2247 struct iscsi_conn *conn;
2248 struct iscsi_task *task;
2249 struct iscsi_tm *hdr;
2250 int age;
2251
2252 cls_session = starget_to_session(scsi_target(sc->device));
2253 session = cls_session->dd_data;
2254
2255 ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
2256
2257 mutex_lock(&session->eh_mutex);
2258 spin_lock_bh(&session->frwd_lock);
2259 /*
2260 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
2261 * got the command.
2262 */
2263 if (!sc->SCp.ptr) {
2264 ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
2265 "it completed.\n");
2266 spin_unlock_bh(&session->frwd_lock);
2267 mutex_unlock(&session->eh_mutex);
2268 return SUCCESS;
2269 }
2270
2271 /*
2272 * If we are not logged in or we have started a new session
2273 * then let the host reset code handle this
2274 */
2275 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
2276 sc->SCp.phase != session->age) {
2277 spin_unlock_bh(&session->frwd_lock);
2278 mutex_unlock(&session->eh_mutex);
2279 ISCSI_DBG_EH(session, "failing abort due to dropped "
2280 "session.\n");
2281 return FAILED;
2282 }
2283
2284 spin_lock(&session->back_lock);
2285 task = (struct iscsi_task *)sc->SCp.ptr;
2286 if (!task || !task->sc) {
2287 /* task completed before time out */
2288 ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
2289
2290 spin_unlock(&session->back_lock);
2291 spin_unlock_bh(&session->frwd_lock);
2292 mutex_unlock(&session->eh_mutex);
2293 return SUCCESS;
2294 }
2295
2296 conn = session->leadconn;
2297 iscsi_get_conn(conn->cls_conn);
2298 conn->eh_abort_cnt++;
2299 age = session->age;
2300
2301 ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
2302 __iscsi_get_task(task);
2303 spin_unlock(&session->back_lock);
2304
2305 if (task->state == ISCSI_TASK_PENDING) {
2306 fail_scsi_task(task, DID_ABORT);
2307 goto success;
2308 }
2309
2310 /* only have one tmf outstanding at a time */
2311 if (session->tmf_state != TMF_INITIAL)
2312 goto failed;
2313 session->tmf_state = TMF_QUEUED;
2314
2315 hdr = &session->tmhdr;
2316 iscsi_prep_abort_task_pdu(task, hdr);
2317
2318 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
2319 goto failed;
2320
2321 switch (session->tmf_state) {
2322 case TMF_SUCCESS:
2323 spin_unlock_bh(&session->frwd_lock);
2324 /*
2325 * stop tx side incase the target had sent a abort rsp but
2326 * the initiator was still writing out data.
2327 */
2328 iscsi_suspend_tx(conn);
2329 /*
2330 * we do not stop the recv side because targets have been
2331 * good and have never sent us a successful tmf response
2332 * then sent more data for the cmd.
2333 */
2334 spin_lock_bh(&session->frwd_lock);
2335 fail_scsi_task(task, DID_ABORT);
2336 session->tmf_state = TMF_INITIAL;
2337 memset(hdr, 0, sizeof(*hdr));
2338 spin_unlock_bh(&session->frwd_lock);
2339 iscsi_start_tx(conn);
2340 goto success_unlocked;
2341 case TMF_TIMEDOUT:
2342 session->running_aborted_task = task;
2343 spin_unlock_bh(&session->frwd_lock);
2344 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2345 goto failed_unlocked;
2346 case TMF_NOT_FOUND:
2347 if (iscsi_task_is_completed(task)) {
2348 session->tmf_state = TMF_INITIAL;
2349 memset(hdr, 0, sizeof(*hdr));
2350 /* task completed before tmf abort response */
2351 ISCSI_DBG_EH(session, "sc completed while abort in "
2352 "progress\n");
2353 goto success;
2354 }
2355 fallthrough;
2356 default:
2357 session->tmf_state = TMF_INITIAL;
2358 goto failed;
2359 }
2360
2361success:
2362 spin_unlock_bh(&session->frwd_lock);
2363success_unlocked:
2364 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
2365 sc, task->itt);
2366 iscsi_put_task(task);
2367 iscsi_put_conn(conn->cls_conn);
2368 mutex_unlock(&session->eh_mutex);
2369 return SUCCESS;
2370
2371failed:
2372 spin_unlock_bh(&session->frwd_lock);
2373failed_unlocked:
2374 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
2375 task ? task->itt : 0);
2376 /*
2377 * The driver might be accessing the task so hold the ref. The conn
2378 * stop cleanup will drop the ref after ep_disconnect so we know the
2379 * driver's no longer touching the task.
2380 */
2381 if (!session->running_aborted_task)
2382 iscsi_put_task(task);
2383
2384 iscsi_put_conn(conn->cls_conn);
2385 mutex_unlock(&session->eh_mutex);
2386 return FAILED;
2387}
2388EXPORT_SYMBOL_GPL(iscsi_eh_abort);
2389
2390static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2391{
2392 memset(hdr, 0, sizeof(*hdr));
2393 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2394 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2395 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2396 int_to_scsilun(sc->device->lun, &hdr->lun);
2397 hdr->rtt = RESERVED_ITT;
2398}
2399
2400int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2401{
2402 struct iscsi_cls_session *cls_session;
2403 struct iscsi_session *session;
2404 struct iscsi_conn *conn;
2405 struct iscsi_tm *hdr;
2406 int rc = FAILED;
2407
2408 cls_session = starget_to_session(scsi_target(sc->device));
2409 session = cls_session->dd_data;
2410
2411 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc,
2412 sc->device->lun);
2413
2414 mutex_lock(&session->eh_mutex);
2415 spin_lock_bh(&session->frwd_lock);
2416 /*
2417 * Just check if we are not logged in. We cannot check for
2418 * the phase because the reset could come from a ioctl.
2419 */
2420 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2421 goto unlock;
2422 conn = session->leadconn;
2423
2424 /* only have one tmf outstanding at a time */
2425 if (session->tmf_state != TMF_INITIAL)
2426 goto unlock;
2427 session->tmf_state = TMF_QUEUED;
2428
2429 hdr = &session->tmhdr;
2430 iscsi_prep_lun_reset_pdu(sc, hdr);
2431
2432 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2433 session->lu_reset_timeout)) {
2434 rc = FAILED;
2435 goto unlock;
2436 }
2437
2438 switch (session->tmf_state) {
2439 case TMF_SUCCESS:
2440 break;
2441 case TMF_TIMEDOUT:
2442 spin_unlock_bh(&session->frwd_lock);
2443 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2444 goto done;
2445 default:
2446 session->tmf_state = TMF_INITIAL;
2447 goto unlock;
2448 }
2449
2450 rc = SUCCESS;
2451 spin_unlock_bh(&session->frwd_lock);
2452
2453 iscsi_suspend_tx(conn);
2454
2455 spin_lock_bh(&session->frwd_lock);
2456 memset(hdr, 0, sizeof(*hdr));
2457 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
2458 session->tmf_state = TMF_INITIAL;
2459 spin_unlock_bh(&session->frwd_lock);
2460
2461 iscsi_start_tx(conn);
2462 goto done;
2463
2464unlock:
2465 spin_unlock_bh(&session->frwd_lock);
2466done:
2467 ISCSI_DBG_EH(session, "dev reset result = %s\n",
2468 rc == SUCCESS ? "SUCCESS" : "FAILED");
2469 mutex_unlock(&session->eh_mutex);
2470 return rc;
2471}
2472EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
2473
2474void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
2475{
2476 struct iscsi_session *session = cls_session->dd_data;
2477
2478 spin_lock_bh(&session->frwd_lock);
2479 if (session->state != ISCSI_STATE_LOGGED_IN) {
2480 session->state = ISCSI_STATE_RECOVERY_FAILED;
2481 wake_up(&session->ehwait);
2482 }
2483 spin_unlock_bh(&session->frwd_lock);
2484}
2485EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2486
2487/**
2488 * iscsi_eh_session_reset - drop session and attempt relogin
2489 * @sc: scsi command
2490 *
2491 * This function will wait for a relogin, session termination from
2492 * userspace, or a recovery/replacement timeout.
2493 */
2494int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2495{
2496 struct iscsi_cls_session *cls_session;
2497 struct iscsi_session *session;
2498 struct iscsi_conn *conn;
2499
2500 cls_session = starget_to_session(scsi_target(sc->device));
2501 session = cls_session->dd_data;
2502
2503 mutex_lock(&session->eh_mutex);
2504 spin_lock_bh(&session->frwd_lock);
2505 if (session->state == ISCSI_STATE_TERMINATE) {
2506failed:
2507 ISCSI_DBG_EH(session,
2508 "failing session reset: Could not log back into "
2509 "%s [age %d]\n", session->targetname,
2510 session->age);
2511 spin_unlock_bh(&session->frwd_lock);
2512 mutex_unlock(&session->eh_mutex);
2513 return FAILED;
2514 }
2515
2516 conn = session->leadconn;
2517 iscsi_get_conn(conn->cls_conn);
2518
2519 spin_unlock_bh(&session->frwd_lock);
2520 mutex_unlock(&session->eh_mutex);
2521
2522 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2523 iscsi_put_conn(conn->cls_conn);
2524
2525 ISCSI_DBG_EH(session, "wait for relogin\n");
2526 wait_event_interruptible(session->ehwait,
2527 session->state == ISCSI_STATE_TERMINATE ||
2528 session->state == ISCSI_STATE_LOGGED_IN ||
2529 session->state == ISCSI_STATE_RECOVERY_FAILED);
2530 if (signal_pending(current))
2531 flush_signals(current);
2532
2533 mutex_lock(&session->eh_mutex);
2534 spin_lock_bh(&session->frwd_lock);
2535 if (session->state == ISCSI_STATE_LOGGED_IN) {
2536 ISCSI_DBG_EH(session,
2537 "session reset succeeded for %s,%s\n",
2538 session->targetname, conn->persistent_address);
2539 } else
2540 goto failed;
2541 spin_unlock_bh(&session->frwd_lock);
2542 mutex_unlock(&session->eh_mutex);
2543 return SUCCESS;
2544}
2545EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
2546
2547static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2548{
2549 memset(hdr, 0, sizeof(*hdr));
2550 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2551 hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2552 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2553 hdr->rtt = RESERVED_ITT;
2554}
2555
2556/**
2557 * iscsi_eh_target_reset - reset target
2558 * @sc: scsi command
2559 *
2560 * This will attempt to send a warm target reset.
2561 */
2562static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2563{
2564 struct iscsi_cls_session *cls_session;
2565 struct iscsi_session *session;
2566 struct iscsi_conn *conn;
2567 struct iscsi_tm *hdr;
2568 int rc = FAILED;
2569
2570 cls_session = starget_to_session(scsi_target(sc->device));
2571 session = cls_session->dd_data;
2572
2573 ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
2574 session->targetname);
2575
2576 mutex_lock(&session->eh_mutex);
2577 spin_lock_bh(&session->frwd_lock);
2578 /*
2579 * Just check if we are not logged in. We cannot check for
2580 * the phase because the reset could come from a ioctl.
2581 */
2582 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2583 goto unlock;
2584 conn = session->leadconn;
2585
2586 /* only have one tmf outstanding at a time */
2587 if (session->tmf_state != TMF_INITIAL)
2588 goto unlock;
2589 session->tmf_state = TMF_QUEUED;
2590
2591 hdr = &session->tmhdr;
2592 iscsi_prep_tgt_reset_pdu(sc, hdr);
2593
2594 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2595 session->tgt_reset_timeout)) {
2596 rc = FAILED;
2597 goto unlock;
2598 }
2599
2600 switch (session->tmf_state) {
2601 case TMF_SUCCESS:
2602 break;
2603 case TMF_TIMEDOUT:
2604 spin_unlock_bh(&session->frwd_lock);
2605 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2606 goto done;
2607 default:
2608 session->tmf_state = TMF_INITIAL;
2609 goto unlock;
2610 }
2611
2612 rc = SUCCESS;
2613 spin_unlock_bh(&session->frwd_lock);
2614
2615 iscsi_suspend_tx(conn);
2616
2617 spin_lock_bh(&session->frwd_lock);
2618 memset(hdr, 0, sizeof(*hdr));
2619 fail_scsi_tasks(conn, -1, DID_ERROR);
2620 session->tmf_state = TMF_INITIAL;
2621 spin_unlock_bh(&session->frwd_lock);
2622
2623 iscsi_start_tx(conn);
2624 goto done;
2625
2626unlock:
2627 spin_unlock_bh(&session->frwd_lock);
2628done:
2629 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2630 rc == SUCCESS ? "SUCCESS" : "FAILED");
2631 mutex_unlock(&session->eh_mutex);
2632 return rc;
2633}
2634
2635/**
2636 * iscsi_eh_recover_target - reset target and possibly the session
2637 * @sc: scsi command
2638 *
2639 * This will attempt to send a warm target reset. If that fails,
2640 * we will escalate to ERL0 session recovery.
2641 */
2642int iscsi_eh_recover_target(struct scsi_cmnd *sc)
2643{
2644 int rc;
2645
2646 rc = iscsi_eh_target_reset(sc);
2647 if (rc == FAILED)
2648 rc = iscsi_eh_session_reset(sc);
2649 return rc;
2650}
2651EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
2652
2653/*
2654 * Pre-allocate a pool of @max items of @item_size. By default, the pool
2655 * should be accessed via kfifo_{get,put} on q->queue.
2656 * Optionally, the caller can obtain the array of object pointers
2657 * by passing in a non-NULL @items pointer
2658 */
2659int
2660iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2661{
2662 int i, num_arrays = 1;
2663
2664 memset(q, 0, sizeof(*q));
2665
2666 q->max = max;
2667
2668 /* If the user passed an items pointer, he wants a copy of
2669 * the array. */
2670 if (items)
2671 num_arrays++;
2672 q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
2673 if (q->pool == NULL)
2674 return -ENOMEM;
2675
2676 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
2677
2678 for (i = 0; i < max; i++) {
2679 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
2680 if (q->pool[i] == NULL) {
2681 q->max = i;
2682 goto enomem;
2683 }
2684 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
2685 }
2686
2687 if (items) {
2688 *items = q->pool + max;
2689 memcpy(*items, q->pool, max * sizeof(void *));
2690 }
2691
2692 return 0;
2693
2694enomem:
2695 iscsi_pool_free(q);
2696 return -ENOMEM;
2697}
2698EXPORT_SYMBOL_GPL(iscsi_pool_init);
2699
2700void iscsi_pool_free(struct iscsi_pool *q)
2701{
2702 int i;
2703
2704 for (i = 0; i < q->max; i++)
2705 kfree(q->pool[i]);
2706 kvfree(q->pool);
2707}
2708EXPORT_SYMBOL_GPL(iscsi_pool_free);
2709
2710int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
2711 uint16_t requested_cmds_max)
2712{
2713 int scsi_cmds, total_cmds = requested_cmds_max;
2714
2715check:
2716 if (!total_cmds)
2717 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
2718 /*
2719 * The iscsi layer needs some tasks for nop handling and tmfs,
2720 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
2721 * + 1 command for scsi IO.
2722 */
2723 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2724 printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n",
2725 total_cmds, ISCSI_TOTAL_CMDS_MIN);
2726 return -EINVAL;
2727 }
2728
2729 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
2730 printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n",
2731 requested_cmds_max, ISCSI_TOTAL_CMDS_MAX,
2732 ISCSI_TOTAL_CMDS_MAX);
2733 total_cmds = ISCSI_TOTAL_CMDS_MAX;
2734 }
2735
2736 if (!is_power_of_2(total_cmds)) {
2737 total_cmds = rounddown_pow_of_two(total_cmds);
2738 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2739 printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN);
2740 return -EINVAL;
2741 }
2742
2743 printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n",
2744 requested_cmds_max, total_cmds);
2745 }
2746
2747 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
2748 if (shost->can_queue && scsi_cmds > shost->can_queue) {
2749 total_cmds = shost->can_queue;
2750
2751 printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n",
2752 requested_cmds_max, shost->can_queue);
2753 goto check;
2754 }
2755
2756 return scsi_cmds;
2757}
2758EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds);
2759
2760/**
2761 * iscsi_host_add - add host to system
2762 * @shost: scsi host
2763 * @pdev: parent device
2764 *
2765 * This should be called by partial offload and software iscsi drivers
2766 * to add a host to the system.
2767 */
2768int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
2769{
2770 if (!shost->can_queue)
2771 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
2772
2773 if (!shost->cmd_per_lun)
2774 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
2775
2776 return scsi_add_host(shost, pdev);
2777}
2778EXPORT_SYMBOL_GPL(iscsi_host_add);
2779
2780/**
2781 * iscsi_host_alloc - allocate a host and driver data
2782 * @sht: scsi host template
2783 * @dd_data_size: driver host data size
2784 * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
2785 *
2786 * This should be called by partial offload and software iscsi drivers.
2787 * To access the driver specific memory use the iscsi_host_priv() macro.
2788 */
2789struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
2790 int dd_data_size, bool xmit_can_sleep)
2791{
2792 struct Scsi_Host *shost;
2793 struct iscsi_host *ihost;
2794
2795 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
2796 if (!shost)
2797 return NULL;
2798 ihost = shost_priv(shost);
2799
2800 if (xmit_can_sleep) {
2801 snprintf(ihost->workq_name, sizeof(ihost->workq_name),
2802 "iscsi_q_%d", shost->host_no);
2803 ihost->workq = alloc_workqueue("%s",
2804 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
2805 1, ihost->workq_name);
2806 if (!ihost->workq)
2807 goto free_host;
2808 }
2809
2810 spin_lock_init(&ihost->lock);
2811 ihost->state = ISCSI_HOST_SETUP;
2812 ihost->num_sessions = 0;
2813 init_waitqueue_head(&ihost->session_removal_wq);
2814 return shost;
2815
2816free_host:
2817 scsi_host_put(shost);
2818 return NULL;
2819}
2820EXPORT_SYMBOL_GPL(iscsi_host_alloc);
2821
2822static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
2823{
2824 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
2825}
2826
2827/**
2828 * iscsi_host_remove - remove host and sessions
2829 * @shost: scsi host
2830 *
2831 * If there are any sessions left, this will initiate the removal and wait
2832 * for the completion.
2833 */
2834void iscsi_host_remove(struct Scsi_Host *shost)
2835{
2836 struct iscsi_host *ihost = shost_priv(shost);
2837 unsigned long flags;
2838
2839 spin_lock_irqsave(&ihost->lock, flags);
2840 ihost->state = ISCSI_HOST_REMOVED;
2841 spin_unlock_irqrestore(&ihost->lock, flags);
2842
2843 iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
2844 wait_event_interruptible(ihost->session_removal_wq,
2845 ihost->num_sessions == 0);
2846 if (signal_pending(current))
2847 flush_signals(current);
2848
2849 scsi_remove_host(shost);
2850}
2851EXPORT_SYMBOL_GPL(iscsi_host_remove);
2852
2853void iscsi_host_free(struct Scsi_Host *shost)
2854{
2855 struct iscsi_host *ihost = shost_priv(shost);
2856
2857 if (ihost->workq)
2858 destroy_workqueue(ihost->workq);
2859
2860 kfree(ihost->netdev);
2861 kfree(ihost->hwaddress);
2862 kfree(ihost->initiatorname);
2863 scsi_host_put(shost);
2864}
2865EXPORT_SYMBOL_GPL(iscsi_host_free);
2866
2867static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2868{
2869 struct iscsi_host *ihost = shost_priv(shost);
2870 unsigned long flags;
2871
2872 shost = scsi_host_get(shost);
2873 if (!shost) {
2874 printk(KERN_ERR "Invalid state. Cannot notify host removal "
2875 "of session teardown event because host already "
2876 "removed.\n");
2877 return;
2878 }
2879
2880 spin_lock_irqsave(&ihost->lock, flags);
2881 ihost->num_sessions--;
2882 if (ihost->num_sessions == 0)
2883 wake_up(&ihost->session_removal_wq);
2884 spin_unlock_irqrestore(&ihost->lock, flags);
2885 scsi_host_put(shost);
2886}
2887
2888/**
2889 * iscsi_session_setup - create iscsi cls session and host and session
2890 * @iscsit: iscsi transport template
2891 * @shost: scsi host
2892 * @cmds_max: session can queue
2893 * @dd_size: private driver data size, added to session allocation size
2894 * @cmd_task_size: LLD task private data size
2895 * @initial_cmdsn: initial CmdSN
2896 * @id: target ID to add to this session
2897 *
2898 * This can be used by software iscsi_transports that allocate
2899 * a session per scsi host.
2900 *
2901 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
2902 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
2903 * for nop handling and login/logout requests.
2904 */
2905struct iscsi_cls_session *
2906iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2907 uint16_t cmds_max, int dd_size, int cmd_task_size,
2908 uint32_t initial_cmdsn, unsigned int id)
2909{
2910 struct iscsi_host *ihost = shost_priv(shost);
2911 struct iscsi_session *session;
2912 struct iscsi_cls_session *cls_session;
2913 int cmd_i, scsi_cmds;
2914 unsigned long flags;
2915
2916 spin_lock_irqsave(&ihost->lock, flags);
2917 if (ihost->state == ISCSI_HOST_REMOVED) {
2918 spin_unlock_irqrestore(&ihost->lock, flags);
2919 return NULL;
2920 }
2921 ihost->num_sessions++;
2922 spin_unlock_irqrestore(&ihost->lock, flags);
2923
2924 scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
2925 if (scsi_cmds < 0)
2926 goto dec_session_count;
2927
2928 cls_session = iscsi_alloc_session(shost, iscsit,
2929 sizeof(struct iscsi_session) +
2930 dd_size);
2931 if (!cls_session)
2932 goto dec_session_count;
2933 session = cls_session->dd_data;
2934 session->cls_session = cls_session;
2935 session->host = shost;
2936 session->state = ISCSI_STATE_FREE;
2937 session->fast_abort = 1;
2938 session->tgt_reset_timeout = 30;
2939 session->lu_reset_timeout = 15;
2940 session->abort_timeout = 10;
2941 session->scsi_cmds_max = scsi_cmds;
2942 session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX;
2943 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
2944 session->exp_cmdsn = initial_cmdsn + 1;
2945 session->max_cmdsn = initial_cmdsn + 1;
2946 session->max_r2t = 1;
2947 session->tt = iscsit;
2948 session->dd_data = cls_session->dd_data + sizeof(*session);
2949
2950 session->tmf_state = TMF_INITIAL;
2951 timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
2952 mutex_init(&session->eh_mutex);
2953
2954 spin_lock_init(&session->frwd_lock);
2955 spin_lock_init(&session->back_lock);
2956
2957 /* initialize SCSI PDU commands pool */
2958 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
2959 (void***)&session->cmds,
2960 cmd_task_size + sizeof(struct iscsi_task)))
2961 goto cmdpool_alloc_fail;
2962
2963 /* pre-format cmds pool with ITT */
2964 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
2965 struct iscsi_task *task = session->cmds[cmd_i];
2966
2967 if (cmd_task_size)
2968 task->dd_data = &task[1];
2969 task->itt = cmd_i;
2970 task->state = ISCSI_TASK_FREE;
2971 INIT_LIST_HEAD(&task->running);
2972 }
2973
2974 if (!try_module_get(iscsit->owner))
2975 goto module_get_fail;
2976
2977 if (iscsi_add_session(cls_session, id))
2978 goto cls_session_fail;
2979
2980 return cls_session;
2981
2982cls_session_fail:
2983 module_put(iscsit->owner);
2984module_get_fail:
2985 iscsi_pool_free(&session->cmdpool);
2986cmdpool_alloc_fail:
2987 iscsi_free_session(cls_session);
2988dec_session_count:
2989 iscsi_host_dec_session_cnt(shost);
2990 return NULL;
2991}
2992EXPORT_SYMBOL_GPL(iscsi_session_setup);
2993
2994/**
2995 * iscsi_session_teardown - destroy session, host, and cls_session
2996 * @cls_session: iscsi session
2997 */
2998void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2999{
3000 struct iscsi_session *session = cls_session->dd_data;
3001 struct module *owner = cls_session->transport->owner;
3002 struct Scsi_Host *shost = session->host;
3003
3004 iscsi_remove_session(cls_session);
3005
3006 iscsi_pool_free(&session->cmdpool);
3007 kfree(session->password);
3008 kfree(session->password_in);
3009 kfree(session->username);
3010 kfree(session->username_in);
3011 kfree(session->targetname);
3012 kfree(session->targetalias);
3013 kfree(session->initiatorname);
3014 kfree(session->boot_root);
3015 kfree(session->boot_nic);
3016 kfree(session->boot_target);
3017 kfree(session->ifacename);
3018 kfree(session->portal_type);
3019 kfree(session->discovery_parent_type);
3020
3021 iscsi_free_session(cls_session);
3022
3023 iscsi_host_dec_session_cnt(shost);
3024 module_put(owner);
3025}
3026EXPORT_SYMBOL_GPL(iscsi_session_teardown);
3027
3028/**
3029 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
3030 * @cls_session: iscsi_cls_session
3031 * @dd_size: private driver data size
3032 * @conn_idx: cid
3033 */
3034struct iscsi_cls_conn *
3035iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
3036 uint32_t conn_idx)
3037{
3038 struct iscsi_session *session = cls_session->dd_data;
3039 struct iscsi_conn *conn;
3040 struct iscsi_cls_conn *cls_conn;
3041 char *data;
3042
3043 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
3044 conn_idx);
3045 if (!cls_conn)
3046 return NULL;
3047 conn = cls_conn->dd_data;
3048 memset(conn, 0, sizeof(*conn) + dd_size);
3049
3050 conn->dd_data = cls_conn->dd_data + sizeof(*conn);
3051 conn->session = session;
3052 conn->cls_conn = cls_conn;
3053 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
3054 conn->id = conn_idx;
3055 conn->exp_statsn = 0;
3056
3057 timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
3058
3059 INIT_LIST_HEAD(&conn->mgmtqueue);
3060 INIT_LIST_HEAD(&conn->cmdqueue);
3061 INIT_LIST_HEAD(&conn->requeue);
3062 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
3063
3064 /* allocate login_task used for the login/text sequences */
3065 spin_lock_bh(&session->frwd_lock);
3066 if (!kfifo_out(&session->cmdpool.queue,
3067 (void*)&conn->login_task,
3068 sizeof(void*))) {
3069 spin_unlock_bh(&session->frwd_lock);
3070 goto login_task_alloc_fail;
3071 }
3072 spin_unlock_bh(&session->frwd_lock);
3073
3074 data = (char *) __get_free_pages(GFP_KERNEL,
3075 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3076 if (!data)
3077 goto login_task_data_alloc_fail;
3078 conn->login_task->data = conn->data = data;
3079
3080 init_waitqueue_head(&session->ehwait);
3081
3082 return cls_conn;
3083
3084login_task_data_alloc_fail:
3085 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
3086 sizeof(void*));
3087login_task_alloc_fail:
3088 iscsi_destroy_conn(cls_conn);
3089 return NULL;
3090}
3091EXPORT_SYMBOL_GPL(iscsi_conn_setup);
3092
3093/**
3094 * iscsi_conn_teardown - teardown iscsi connection
3095 * @cls_conn: iscsi class connection
3096 *
3097 * TODO: we may need to make this into a two step process
3098 * like scsi-mls remove + put host
3099 */
3100void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
3101{
3102 struct iscsi_conn *conn = cls_conn->dd_data;
3103 struct iscsi_session *session = conn->session;
3104
3105 del_timer_sync(&conn->transport_timer);
3106
3107 mutex_lock(&session->eh_mutex);
3108 spin_lock_bh(&session->frwd_lock);
3109 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
3110 if (session->leadconn == conn) {
3111 /*
3112 * leading connection? then give up on recovery.
3113 */
3114 session->state = ISCSI_STATE_TERMINATE;
3115 wake_up(&session->ehwait);
3116 }
3117 spin_unlock_bh(&session->frwd_lock);
3118
3119 /* flush queued up work because we free the connection below */
3120 iscsi_suspend_tx(conn);
3121
3122 spin_lock_bh(&session->frwd_lock);
3123 free_pages((unsigned long) conn->data,
3124 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3125 kfree(conn->persistent_address);
3126 kfree(conn->local_ipaddr);
3127 /* regular RX path uses back_lock */
3128 spin_lock_bh(&session->back_lock);
3129 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
3130 sizeof(void*));
3131 spin_unlock_bh(&session->back_lock);
3132 if (session->leadconn == conn)
3133 session->leadconn = NULL;
3134 spin_unlock_bh(&session->frwd_lock);
3135 mutex_unlock(&session->eh_mutex);
3136
3137 iscsi_destroy_conn(cls_conn);
3138}
3139EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
3140
3141int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
3142{
3143 struct iscsi_conn *conn = cls_conn->dd_data;
3144 struct iscsi_session *session = conn->session;
3145
3146 if (!session) {
3147 iscsi_conn_printk(KERN_ERR, conn,
3148 "can't start unbound connection\n");
3149 return -EPERM;
3150 }
3151
3152 if ((session->imm_data_en || !session->initial_r2t_en) &&
3153 session->first_burst > session->max_burst) {
3154 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
3155 "first_burst %d max_burst %d\n",
3156 session->first_burst, session->max_burst);
3157 return -EINVAL;
3158 }
3159
3160 if (conn->ping_timeout && !conn->recv_timeout) {
3161 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
3162 "zero. Using 5 seconds\n.");
3163 conn->recv_timeout = 5;
3164 }
3165
3166 if (conn->recv_timeout && !conn->ping_timeout) {
3167 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
3168 "zero. Using 5 seconds.\n");
3169 conn->ping_timeout = 5;
3170 }
3171
3172 spin_lock_bh(&session->frwd_lock);
3173 conn->c_stage = ISCSI_CONN_STARTED;
3174 session->state = ISCSI_STATE_LOGGED_IN;
3175 session->queued_cmdsn = session->cmdsn;
3176
3177 conn->last_recv = jiffies;
3178 conn->last_ping = jiffies;
3179 if (conn->recv_timeout && conn->ping_timeout)
3180 mod_timer(&conn->transport_timer,
3181 jiffies + (conn->recv_timeout * HZ));
3182
3183 switch(conn->stop_stage) {
3184 case STOP_CONN_RECOVER:
3185 /*
3186 * unblock eh_abort() if it is blocked. re-try all
3187 * commands after successful recovery
3188 */
3189 conn->stop_stage = 0;
3190 session->tmf_state = TMF_INITIAL;
3191 session->age++;
3192 if (session->age == 16)
3193 session->age = 0;
3194 break;
3195 case STOP_CONN_TERM:
3196 conn->stop_stage = 0;
3197 break;
3198 default:
3199 break;
3200 }
3201 spin_unlock_bh(&session->frwd_lock);
3202
3203 iscsi_unblock_session(session->cls_session);
3204 wake_up(&session->ehwait);
3205 return 0;
3206}
3207EXPORT_SYMBOL_GPL(iscsi_conn_start);
3208
3209static void
3210fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
3211{
3212 struct iscsi_task *task;
3213 int i, state;
3214
3215 for (i = 0; i < conn->session->cmds_max; i++) {
3216 task = conn->session->cmds[i];
3217 if (task->sc)
3218 continue;
3219
3220 if (task->state == ISCSI_TASK_FREE)
3221 continue;
3222
3223 ISCSI_DBG_SESSION(conn->session,
3224 "failing mgmt itt 0x%x state %d\n",
3225 task->itt, task->state);
3226
3227 spin_lock_bh(&session->back_lock);
3228 if (cleanup_queued_task(task)) {
3229 spin_unlock_bh(&session->back_lock);
3230 continue;
3231 }
3232
3233 state = ISCSI_TASK_ABRT_SESS_RECOV;
3234 if (task->state == ISCSI_TASK_PENDING)
3235 state = ISCSI_TASK_COMPLETED;
3236 iscsi_complete_task(task, state);
3237 spin_unlock_bh(&session->back_lock);
3238 }
3239}
3240
3241void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
3242{
3243 struct iscsi_conn *conn = cls_conn->dd_data;
3244 struct iscsi_session *session = conn->session;
3245 int old_stop_stage;
3246
3247 mutex_lock(&session->eh_mutex);
3248 spin_lock_bh(&session->frwd_lock);
3249 if (conn->stop_stage == STOP_CONN_TERM) {
3250 spin_unlock_bh(&session->frwd_lock);
3251 mutex_unlock(&session->eh_mutex);
3252 return;
3253 }
3254
3255 /*
3256 * When this is called for the in_login state, we only want to clean
3257 * up the login task and connection. We do not need to block and set
3258 * the recovery state again
3259 */
3260 if (flag == STOP_CONN_TERM)
3261 session->state = ISCSI_STATE_TERMINATE;
3262 else if (conn->stop_stage != STOP_CONN_RECOVER)
3263 session->state = ISCSI_STATE_IN_RECOVERY;
3264
3265 old_stop_stage = conn->stop_stage;
3266 conn->stop_stage = flag;
3267 spin_unlock_bh(&session->frwd_lock);
3268
3269 del_timer_sync(&conn->transport_timer);
3270 iscsi_suspend_tx(conn);
3271
3272 spin_lock_bh(&session->frwd_lock);
3273 conn->c_stage = ISCSI_CONN_STOPPED;
3274 spin_unlock_bh(&session->frwd_lock);
3275
3276 /*
3277 * for connection level recovery we should not calculate
3278 * header digest. conn->hdr_size used for optimization
3279 * in hdr_extract() and will be re-negotiated at
3280 * set_param() time.
3281 */
3282 if (flag == STOP_CONN_RECOVER) {
3283 conn->hdrdgst_en = 0;
3284 conn->datadgst_en = 0;
3285 if (session->state == ISCSI_STATE_IN_RECOVERY &&
3286 old_stop_stage != STOP_CONN_RECOVER) {
3287 ISCSI_DBG_SESSION(session, "blocking session\n");
3288 iscsi_block_session(session->cls_session);
3289 }
3290 }
3291
3292 /*
3293 * flush queues.
3294 */
3295 spin_lock_bh(&session->frwd_lock);
3296 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
3297 fail_mgmt_tasks(session, conn);
3298 memset(&session->tmhdr, 0, sizeof(session->tmhdr));
3299 spin_unlock_bh(&session->frwd_lock);
3300 mutex_unlock(&session->eh_mutex);
3301}
3302EXPORT_SYMBOL_GPL(iscsi_conn_stop);
3303
3304int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
3305 struct iscsi_cls_conn *cls_conn, int is_leading)
3306{
3307 struct iscsi_session *session = cls_session->dd_data;
3308 struct iscsi_conn *conn = cls_conn->dd_data;
3309
3310 spin_lock_bh(&session->frwd_lock);
3311 if (is_leading)
3312 session->leadconn = conn;
3313 spin_unlock_bh(&session->frwd_lock);
3314
3315 /*
3316 * The target could have reduced it's window size between logins, so
3317 * we have to reset max/exp cmdsn so we can see the new values.
3318 */
3319 spin_lock_bh(&session->back_lock);
3320 session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1;
3321 spin_unlock_bh(&session->back_lock);
3322 /*
3323 * Unblock xmitworker(), Login Phase will pass through.
3324 */
3325 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
3326 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
3327 return 0;
3328}
3329EXPORT_SYMBOL_GPL(iscsi_conn_bind);
3330
3331int iscsi_switch_str_param(char **param, char *new_val_buf)
3332{
3333 char *new_val;
3334
3335 if (*param) {
3336 if (!strcmp(*param, new_val_buf))
3337 return 0;
3338 }
3339
3340 new_val = kstrdup(new_val_buf, GFP_NOIO);
3341 if (!new_val)
3342 return -ENOMEM;
3343
3344 kfree(*param);
3345 *param = new_val;
3346 return 0;
3347}
3348EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
3349
3350int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3351 enum iscsi_param param, char *buf, int buflen)
3352{
3353 struct iscsi_conn *conn = cls_conn->dd_data;
3354 struct iscsi_session *session = conn->session;
3355 int val;
3356
3357 switch(param) {
3358 case ISCSI_PARAM_FAST_ABORT:
3359 sscanf(buf, "%d", &session->fast_abort);
3360 break;
3361 case ISCSI_PARAM_ABORT_TMO:
3362 sscanf(buf, "%d", &session->abort_timeout);
3363 break;
3364 case ISCSI_PARAM_LU_RESET_TMO:
3365 sscanf(buf, "%d", &session->lu_reset_timeout);
3366 break;
3367 case ISCSI_PARAM_TGT_RESET_TMO:
3368 sscanf(buf, "%d", &session->tgt_reset_timeout);
3369 break;
3370 case ISCSI_PARAM_PING_TMO:
3371 sscanf(buf, "%d", &conn->ping_timeout);
3372 break;
3373 case ISCSI_PARAM_RECV_TMO:
3374 sscanf(buf, "%d", &conn->recv_timeout);
3375 break;
3376 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3377 sscanf(buf, "%d", &conn->max_recv_dlength);
3378 break;
3379 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3380 sscanf(buf, "%d", &conn->max_xmit_dlength);
3381 break;
3382 case ISCSI_PARAM_HDRDGST_EN:
3383 sscanf(buf, "%d", &conn->hdrdgst_en);
3384 break;
3385 case ISCSI_PARAM_DATADGST_EN:
3386 sscanf(buf, "%d", &conn->datadgst_en);
3387 break;
3388 case ISCSI_PARAM_INITIAL_R2T_EN:
3389 sscanf(buf, "%d", &session->initial_r2t_en);
3390 break;
3391 case ISCSI_PARAM_MAX_R2T:
3392 sscanf(buf, "%hu", &session->max_r2t);
3393 break;
3394 case ISCSI_PARAM_IMM_DATA_EN:
3395 sscanf(buf, "%d", &session->imm_data_en);
3396 break;
3397 case ISCSI_PARAM_FIRST_BURST:
3398 sscanf(buf, "%d", &session->first_burst);
3399 break;
3400 case ISCSI_PARAM_MAX_BURST:
3401 sscanf(buf, "%d", &session->max_burst);
3402 break;
3403 case ISCSI_PARAM_PDU_INORDER_EN:
3404 sscanf(buf, "%d", &session->pdu_inorder_en);
3405 break;
3406 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3407 sscanf(buf, "%d", &session->dataseq_inorder_en);
3408 break;
3409 case ISCSI_PARAM_ERL:
3410 sscanf(buf, "%d", &session->erl);
3411 break;
3412 case ISCSI_PARAM_EXP_STATSN:
3413 sscanf(buf, "%u", &conn->exp_statsn);
3414 break;
3415 case ISCSI_PARAM_USERNAME:
3416 return iscsi_switch_str_param(&session->username, buf);
3417 case ISCSI_PARAM_USERNAME_IN:
3418 return iscsi_switch_str_param(&session->username_in, buf);
3419 case ISCSI_PARAM_PASSWORD:
3420 return iscsi_switch_str_param(&session->password, buf);
3421 case ISCSI_PARAM_PASSWORD_IN:
3422 return iscsi_switch_str_param(&session->password_in, buf);
3423 case ISCSI_PARAM_TARGET_NAME:
3424 return iscsi_switch_str_param(&session->targetname, buf);
3425 case ISCSI_PARAM_TARGET_ALIAS:
3426 return iscsi_switch_str_param(&session->targetalias, buf);
3427 case ISCSI_PARAM_TPGT:
3428 sscanf(buf, "%d", &session->tpgt);
3429 break;
3430 case ISCSI_PARAM_PERSISTENT_PORT:
3431 sscanf(buf, "%d", &conn->persistent_port);
3432 break;
3433 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3434 return iscsi_switch_str_param(&conn->persistent_address, buf);
3435 case ISCSI_PARAM_IFACE_NAME:
3436 return iscsi_switch_str_param(&session->ifacename, buf);
3437 case ISCSI_PARAM_INITIATOR_NAME:
3438 return iscsi_switch_str_param(&session->initiatorname, buf);
3439 case ISCSI_PARAM_BOOT_ROOT:
3440 return iscsi_switch_str_param(&session->boot_root, buf);
3441 case ISCSI_PARAM_BOOT_NIC:
3442 return iscsi_switch_str_param(&session->boot_nic, buf);
3443 case ISCSI_PARAM_BOOT_TARGET:
3444 return iscsi_switch_str_param(&session->boot_target, buf);
3445 case ISCSI_PARAM_PORTAL_TYPE:
3446 return iscsi_switch_str_param(&session->portal_type, buf);
3447 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3448 return iscsi_switch_str_param(&session->discovery_parent_type,
3449 buf);
3450 case ISCSI_PARAM_DISCOVERY_SESS:
3451 sscanf(buf, "%d", &val);
3452 session->discovery_sess = !!val;
3453 break;
3454 case ISCSI_PARAM_LOCAL_IPADDR:
3455 return iscsi_switch_str_param(&conn->local_ipaddr, buf);
3456 default:
3457 return -ENOSYS;
3458 }
3459
3460 return 0;
3461}
3462EXPORT_SYMBOL_GPL(iscsi_set_param);
3463
3464int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3465 enum iscsi_param param, char *buf)
3466{
3467 struct iscsi_session *session = cls_session->dd_data;
3468 int len;
3469
3470 switch(param) {
3471 case ISCSI_PARAM_FAST_ABORT:
3472 len = sysfs_emit(buf, "%d\n", session->fast_abort);
3473 break;
3474 case ISCSI_PARAM_ABORT_TMO:
3475 len = sysfs_emit(buf, "%d\n", session->abort_timeout);
3476 break;
3477 case ISCSI_PARAM_LU_RESET_TMO:
3478 len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
3479 break;
3480 case ISCSI_PARAM_TGT_RESET_TMO:
3481 len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
3482 break;
3483 case ISCSI_PARAM_INITIAL_R2T_EN:
3484 len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
3485 break;
3486 case ISCSI_PARAM_MAX_R2T:
3487 len = sysfs_emit(buf, "%hu\n", session->max_r2t);
3488 break;
3489 case ISCSI_PARAM_IMM_DATA_EN:
3490 len = sysfs_emit(buf, "%d\n", session->imm_data_en);
3491 break;
3492 case ISCSI_PARAM_FIRST_BURST:
3493 len = sysfs_emit(buf, "%u\n", session->first_burst);
3494 break;
3495 case ISCSI_PARAM_MAX_BURST:
3496 len = sysfs_emit(buf, "%u\n", session->max_burst);
3497 break;
3498 case ISCSI_PARAM_PDU_INORDER_EN:
3499 len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
3500 break;
3501 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3502 len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
3503 break;
3504 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
3505 len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
3506 break;
3507 case ISCSI_PARAM_ERL:
3508 len = sysfs_emit(buf, "%d\n", session->erl);
3509 break;
3510 case ISCSI_PARAM_TARGET_NAME:
3511 len = sysfs_emit(buf, "%s\n", session->targetname);
3512 break;
3513 case ISCSI_PARAM_TARGET_ALIAS:
3514 len = sysfs_emit(buf, "%s\n", session->targetalias);
3515 break;
3516 case ISCSI_PARAM_TPGT:
3517 len = sysfs_emit(buf, "%d\n", session->tpgt);
3518 break;
3519 case ISCSI_PARAM_USERNAME:
3520 len = sysfs_emit(buf, "%s\n", session->username);
3521 break;
3522 case ISCSI_PARAM_USERNAME_IN:
3523 len = sysfs_emit(buf, "%s\n", session->username_in);
3524 break;
3525 case ISCSI_PARAM_PASSWORD:
3526 len = sysfs_emit(buf, "%s\n", session->password);
3527 break;
3528 case ISCSI_PARAM_PASSWORD_IN:
3529 len = sysfs_emit(buf, "%s\n", session->password_in);
3530 break;
3531 case ISCSI_PARAM_IFACE_NAME:
3532 len = sysfs_emit(buf, "%s\n", session->ifacename);
3533 break;
3534 case ISCSI_PARAM_INITIATOR_NAME:
3535 len = sysfs_emit(buf, "%s\n", session->initiatorname);
3536 break;
3537 case ISCSI_PARAM_BOOT_ROOT:
3538 len = sysfs_emit(buf, "%s\n", session->boot_root);
3539 break;
3540 case ISCSI_PARAM_BOOT_NIC:
3541 len = sysfs_emit(buf, "%s\n", session->boot_nic);
3542 break;
3543 case ISCSI_PARAM_BOOT_TARGET:
3544 len = sysfs_emit(buf, "%s\n", session->boot_target);
3545 break;
3546 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
3547 len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
3548 break;
3549 case ISCSI_PARAM_DISCOVERY_SESS:
3550 len = sysfs_emit(buf, "%u\n", session->discovery_sess);
3551 break;
3552 case ISCSI_PARAM_PORTAL_TYPE:
3553 len = sysfs_emit(buf, "%s\n", session->portal_type);
3554 break;
3555 case ISCSI_PARAM_CHAP_AUTH_EN:
3556 len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
3557 break;
3558 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
3559 len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
3560 break;
3561 case ISCSI_PARAM_BIDI_CHAP_EN:
3562 len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
3563 break;
3564 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
3565 len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
3566 break;
3567 case ISCSI_PARAM_DEF_TIME2WAIT:
3568 len = sysfs_emit(buf, "%d\n", session->time2wait);
3569 break;
3570 case ISCSI_PARAM_DEF_TIME2RETAIN:
3571 len = sysfs_emit(buf, "%d\n", session->time2retain);
3572 break;
3573 case ISCSI_PARAM_TSID:
3574 len = sysfs_emit(buf, "%u\n", session->tsid);
3575 break;
3576 case ISCSI_PARAM_ISID:
3577 len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
3578 session->isid[0], session->isid[1],
3579 session->isid[2], session->isid[3],
3580 session->isid[4], session->isid[5]);
3581 break;
3582 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
3583 len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
3584 break;
3585 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3586 if (session->discovery_parent_type)
3587 len = sysfs_emit(buf, "%s\n",
3588 session->discovery_parent_type);
3589 else
3590 len = sysfs_emit(buf, "\n");
3591 break;
3592 default:
3593 return -ENOSYS;
3594 }
3595
3596 return len;
3597}
3598EXPORT_SYMBOL_GPL(iscsi_session_get_param);
3599
3600int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
3601 enum iscsi_param param, char *buf)
3602{
3603 struct sockaddr_in6 *sin6 = NULL;
3604 struct sockaddr_in *sin = NULL;
3605 int len;
3606
3607 switch (addr->ss_family) {
3608 case AF_INET:
3609 sin = (struct sockaddr_in *)addr;
3610 break;
3611 case AF_INET6:
3612 sin6 = (struct sockaddr_in6 *)addr;
3613 break;
3614 default:
3615 return -EINVAL;
3616 }
3617
3618 switch (param) {
3619 case ISCSI_PARAM_CONN_ADDRESS:
3620 case ISCSI_HOST_PARAM_IPADDRESS:
3621 if (sin)
3622 len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
3623 else
3624 len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
3625 break;
3626 case ISCSI_PARAM_CONN_PORT:
3627 case ISCSI_PARAM_LOCAL_PORT:
3628 if (sin)
3629 len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
3630 else
3631 len = sysfs_emit(buf, "%hu\n",
3632 be16_to_cpu(sin6->sin6_port));
3633 break;
3634 default:
3635 return -EINVAL;
3636 }
3637
3638 return len;
3639}
3640EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
3641
3642int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3643 enum iscsi_param param, char *buf)
3644{
3645 struct iscsi_conn *conn = cls_conn->dd_data;
3646 int len;
3647
3648 switch(param) {
3649 case ISCSI_PARAM_PING_TMO:
3650 len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
3651 break;
3652 case ISCSI_PARAM_RECV_TMO:
3653 len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
3654 break;
3655 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3656 len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
3657 break;
3658 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3659 len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
3660 break;
3661 case ISCSI_PARAM_HDRDGST_EN:
3662 len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
3663 break;
3664 case ISCSI_PARAM_DATADGST_EN:
3665 len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
3666 break;
3667 case ISCSI_PARAM_IFMARKER_EN:
3668 len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
3669 break;
3670 case ISCSI_PARAM_OFMARKER_EN:
3671 len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
3672 break;
3673 case ISCSI_PARAM_EXP_STATSN:
3674 len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
3675 break;
3676 case ISCSI_PARAM_PERSISTENT_PORT:
3677 len = sysfs_emit(buf, "%d\n", conn->persistent_port);
3678 break;
3679 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3680 len = sysfs_emit(buf, "%s\n", conn->persistent_address);
3681 break;
3682 case ISCSI_PARAM_STATSN:
3683 len = sysfs_emit(buf, "%u\n", conn->statsn);
3684 break;
3685 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
3686 len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
3687 break;
3688 case ISCSI_PARAM_KEEPALIVE_TMO:
3689 len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
3690 break;
3691 case ISCSI_PARAM_LOCAL_PORT:
3692 len = sysfs_emit(buf, "%u\n", conn->local_port);
3693 break;
3694 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
3695 len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
3696 break;
3697 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
3698 len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
3699 break;
3700 case ISCSI_PARAM_TCP_WSF_DISABLE:
3701 len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
3702 break;
3703 case ISCSI_PARAM_TCP_TIMER_SCALE:
3704 len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
3705 break;
3706 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
3707 len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
3708 break;
3709 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
3710 len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
3711 break;
3712 case ISCSI_PARAM_IPV4_TOS:
3713 len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
3714 break;
3715 case ISCSI_PARAM_IPV6_TC:
3716 len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
3717 break;
3718 case ISCSI_PARAM_IPV6_FLOW_LABEL:
3719 len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
3720 break;
3721 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
3722 len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
3723 break;
3724 case ISCSI_PARAM_TCP_XMIT_WSF:
3725 len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
3726 break;
3727 case ISCSI_PARAM_TCP_RECV_WSF:
3728 len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
3729 break;
3730 case ISCSI_PARAM_LOCAL_IPADDR:
3731 len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
3732 break;
3733 default:
3734 return -ENOSYS;
3735 }
3736
3737 return len;
3738}
3739EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
3740
3741int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3742 char *buf)
3743{
3744 struct iscsi_host *ihost = shost_priv(shost);
3745 int len;
3746
3747 switch (param) {
3748 case ISCSI_HOST_PARAM_NETDEV_NAME:
3749 len = sysfs_emit(buf, "%s\n", ihost->netdev);
3750 break;
3751 case ISCSI_HOST_PARAM_HWADDRESS:
3752 len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
3753 break;
3754 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3755 len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
3756 break;
3757 default:
3758 return -ENOSYS;
3759 }
3760
3761 return len;
3762}
3763EXPORT_SYMBOL_GPL(iscsi_host_get_param);
3764
3765int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3766 char *buf, int buflen)
3767{
3768 struct iscsi_host *ihost = shost_priv(shost);
3769
3770 switch (param) {
3771 case ISCSI_HOST_PARAM_NETDEV_NAME:
3772 return iscsi_switch_str_param(&ihost->netdev, buf);
3773 case ISCSI_HOST_PARAM_HWADDRESS:
3774 return iscsi_switch_str_param(&ihost->hwaddress, buf);
3775 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3776 return iscsi_switch_str_param(&ihost->initiatorname, buf);
3777 default:
3778 return -ENOSYS;
3779 }
3780
3781 return 0;
3782}
3783EXPORT_SYMBOL_GPL(iscsi_host_set_param);
3784
3785MODULE_AUTHOR("Mike Christie");
3786MODULE_DESCRIPTION("iSCSI library functions");
3787MODULE_LICENSE("GPL");
1/*
2 * iSCSI lib functions
3 *
4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2004 - 2006 Mike Christie
6 * Copyright (C) 2004 - 2005 Dmitry Yusupov
7 * Copyright (C) 2004 - 2005 Alex Aizman
8 * maintained by open-iscsi@googlegroups.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24#include <linux/types.h>
25#include <linux/kfifo.h>
26#include <linux/delay.h>
27#include <linux/log2.h>
28#include <linux/slab.h>
29#include <asm/unaligned.h>
30#include <net/tcp.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi.h>
37#include <scsi/iscsi_proto.h>
38#include <scsi/scsi_transport.h>
39#include <scsi/scsi_transport_iscsi.h>
40#include <scsi/libiscsi.h>
41
42static int iscsi_dbg_lib_conn;
43module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
44 S_IRUGO | S_IWUSR);
45MODULE_PARM_DESC(debug_libiscsi_conn,
46 "Turn on debugging for connections in libiscsi module. "
47 "Set to 1 to turn on, and zero to turn off. Default is off.");
48
49static int iscsi_dbg_lib_session;
50module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
51 S_IRUGO | S_IWUSR);
52MODULE_PARM_DESC(debug_libiscsi_session,
53 "Turn on debugging for sessions in libiscsi module. "
54 "Set to 1 to turn on, and zero to turn off. Default is off.");
55
56static int iscsi_dbg_lib_eh;
57module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
58 S_IRUGO | S_IWUSR);
59MODULE_PARM_DESC(debug_libiscsi_eh,
60 "Turn on debugging for error handling in libiscsi module. "
61 "Set to 1 to turn on, and zero to turn off. Default is off.");
62
63#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
64 do { \
65 if (iscsi_dbg_lib_conn) \
66 iscsi_conn_printk(KERN_INFO, _conn, \
67 "%s " dbg_fmt, \
68 __func__, ##arg); \
69 } while (0);
70
71#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
72 do { \
73 if (iscsi_dbg_lib_session) \
74 iscsi_session_printk(KERN_INFO, _session, \
75 "%s " dbg_fmt, \
76 __func__, ##arg); \
77 } while (0);
78
79#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
80 do { \
81 if (iscsi_dbg_lib_eh) \
82 iscsi_session_printk(KERN_INFO, _session, \
83 "%s " dbg_fmt, \
84 __func__, ##arg); \
85 } while (0);
86
87inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
88{
89 struct Scsi_Host *shost = conn->session->host;
90 struct iscsi_host *ihost = shost_priv(shost);
91
92 if (ihost->workq)
93 queue_work(ihost->workq, &conn->xmitwork);
94}
95EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
96
97static void __iscsi_update_cmdsn(struct iscsi_session *session,
98 uint32_t exp_cmdsn, uint32_t max_cmdsn)
99{
100 /*
101 * standard specifies this check for when to update expected and
102 * max sequence numbers
103 */
104 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
105 return;
106
107 if (exp_cmdsn != session->exp_cmdsn &&
108 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
109 session->exp_cmdsn = exp_cmdsn;
110
111 if (max_cmdsn != session->max_cmdsn &&
112 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
113 session->max_cmdsn = max_cmdsn;
114 /*
115 * if the window closed with IO queued, then kick the
116 * xmit thread
117 */
118 if (!list_empty(&session->leadconn->cmdqueue) ||
119 !list_empty(&session->leadconn->mgmtqueue))
120 iscsi_conn_queue_work(session->leadconn);
121 }
122}
123
124void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
125{
126 __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
127 be32_to_cpu(hdr->max_cmdsn));
128}
129EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
130
131/**
132 * iscsi_prep_data_out_pdu - initialize Data-Out
133 * @task: scsi command task
134 * @r2t: R2T info
135 * @hdr: iscsi data in pdu
136 *
137 * Notes:
138 * Initialize Data-Out within this R2T sequence and finds
139 * proper data_offset within this SCSI command.
140 *
141 * This function is called with connection lock taken.
142 **/
143void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
144 struct iscsi_data *hdr)
145{
146 struct iscsi_conn *conn = task->conn;
147 unsigned int left = r2t->data_length - r2t->sent;
148
149 task->hdr_len = sizeof(struct iscsi_data);
150
151 memset(hdr, 0, sizeof(struct iscsi_data));
152 hdr->ttt = r2t->ttt;
153 hdr->datasn = cpu_to_be32(r2t->datasn);
154 r2t->datasn++;
155 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
156 hdr->lun = task->lun;
157 hdr->itt = task->hdr_itt;
158 hdr->exp_statsn = r2t->exp_statsn;
159 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
160 if (left > conn->max_xmit_dlength) {
161 hton24(hdr->dlength, conn->max_xmit_dlength);
162 r2t->data_count = conn->max_xmit_dlength;
163 hdr->flags = 0;
164 } else {
165 hton24(hdr->dlength, left);
166 r2t->data_count = left;
167 hdr->flags = ISCSI_FLAG_CMD_FINAL;
168 }
169 conn->dataout_pdus_cnt++;
170}
171EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
172
173static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
174{
175 unsigned exp_len = task->hdr_len + len;
176
177 if (exp_len > task->hdr_max) {
178 WARN_ON(1);
179 return -EINVAL;
180 }
181
182 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
183 task->hdr_len = exp_len;
184 return 0;
185}
186
187/*
188 * make an extended cdb AHS
189 */
190static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
191{
192 struct scsi_cmnd *cmd = task->sc;
193 unsigned rlen, pad_len;
194 unsigned short ahslength;
195 struct iscsi_ecdb_ahdr *ecdb_ahdr;
196 int rc;
197
198 ecdb_ahdr = iscsi_next_hdr(task);
199 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
200
201 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
202 ahslength = rlen + sizeof(ecdb_ahdr->reserved);
203
204 pad_len = iscsi_padding(rlen);
205
206 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
207 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
208 if (rc)
209 return rc;
210
211 if (pad_len)
212 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
213
214 ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
215 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
216 ecdb_ahdr->reserved = 0;
217 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
218
219 ISCSI_DBG_SESSION(task->conn->session,
220 "iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
221 "rlen %d pad_len %d ahs_length %d iscsi_headers_size "
222 "%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
223 task->hdr_len);
224 return 0;
225}
226
227static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
228{
229 struct scsi_cmnd *sc = task->sc;
230 struct iscsi_rlength_ahdr *rlen_ahdr;
231 int rc;
232
233 rlen_ahdr = iscsi_next_hdr(task);
234 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
235 if (rc)
236 return rc;
237
238 rlen_ahdr->ahslength =
239 cpu_to_be16(sizeof(rlen_ahdr->read_length) +
240 sizeof(rlen_ahdr->reserved));
241 rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
242 rlen_ahdr->reserved = 0;
243 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
244
245 ISCSI_DBG_SESSION(task->conn->session,
246 "bidi-in rlen_ahdr->read_length(%d) "
247 "rlen_ahdr->ahslength(%d)\n",
248 be32_to_cpu(rlen_ahdr->read_length),
249 be16_to_cpu(rlen_ahdr->ahslength));
250 return 0;
251}
252
253/**
254 * iscsi_check_tmf_restrictions - check if a task is affected by TMF
255 * @task: iscsi task
256 * @opcode: opcode to check for
257 *
258 * During TMF a task has to be checked if it's affected.
259 * All unrelated I/O can be passed through, but I/O to the
260 * affected LUN should be restricted.
261 * If 'fast_abort' is set we won't be sending any I/O to the
262 * affected LUN.
263 * Otherwise the target is waiting for all TTTs to be completed,
264 * so we have to send all outstanding Data-Out PDUs to the target.
265 */
266static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
267{
268 struct iscsi_conn *conn = task->conn;
269 struct iscsi_tm *tmf = &conn->tmhdr;
270 unsigned int hdr_lun;
271
272 if (conn->tmf_state == TMF_INITIAL)
273 return 0;
274
275 if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
276 return 0;
277
278 switch (ISCSI_TM_FUNC_VALUE(tmf)) {
279 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
280 /*
281 * Allow PDUs for unrelated LUNs
282 */
283 hdr_lun = scsilun_to_int(&tmf->lun);
284 if (hdr_lun != task->sc->device->lun)
285 return 0;
286 /* fall through */
287 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
288 /*
289 * Fail all SCSI cmd PDUs
290 */
291 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
292 iscsi_conn_printk(KERN_INFO, conn,
293 "task [op %x/%x itt "
294 "0x%x/0x%x] "
295 "rejected.\n",
296 task->hdr->opcode, opcode,
297 task->itt, task->hdr_itt);
298 return -EACCES;
299 }
300 /*
301 * And also all data-out PDUs in response to R2T
302 * if fast_abort is set.
303 */
304 if (conn->session->fast_abort) {
305 iscsi_conn_printk(KERN_INFO, conn,
306 "task [op %x/%x itt "
307 "0x%x/0x%x] fast abort.\n",
308 task->hdr->opcode, opcode,
309 task->itt, task->hdr_itt);
310 return -EACCES;
311 }
312 break;
313 case ISCSI_TM_FUNC_ABORT_TASK:
314 /*
315 * the caller has already checked if the task
316 * they want to abort was in the pending queue so if
317 * we are here the cmd pdu has gone out already, and
318 * we will only hit this for data-outs
319 */
320 if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
321 task->hdr_itt == tmf->rtt) {
322 ISCSI_DBG_SESSION(conn->session,
323 "Preventing task %x/%x from sending "
324 "data-out due to abort task in "
325 "progress\n", task->itt,
326 task->hdr_itt);
327 return -EACCES;
328 }
329 break;
330 }
331
332 return 0;
333}
334
335/**
336 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
337 * @task: iscsi task
338 *
339 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
340 * fields like dlength or final based on how much data it sends
341 */
342static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
343{
344 struct iscsi_conn *conn = task->conn;
345 struct iscsi_session *session = conn->session;
346 struct scsi_cmnd *sc = task->sc;
347 struct iscsi_scsi_req *hdr;
348 unsigned hdrlength, cmd_len;
349 itt_t itt;
350 int rc;
351
352 rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
353 if (rc)
354 return rc;
355
356 if (conn->session->tt->alloc_pdu) {
357 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
358 if (rc)
359 return rc;
360 }
361 hdr = (struct iscsi_scsi_req *)task->hdr;
362 itt = hdr->itt;
363 memset(hdr, 0, sizeof(*hdr));
364
365 if (session->tt->parse_pdu_itt)
366 hdr->itt = task->hdr_itt = itt;
367 else
368 hdr->itt = task->hdr_itt = build_itt(task->itt,
369 task->conn->session->age);
370 task->hdr_len = 0;
371 rc = iscsi_add_hdr(task, sizeof(*hdr));
372 if (rc)
373 return rc;
374 hdr->opcode = ISCSI_OP_SCSI_CMD;
375 hdr->flags = ISCSI_ATTR_SIMPLE;
376 int_to_scsilun(sc->device->lun, &hdr->lun);
377 task->lun = hdr->lun;
378 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
379 cmd_len = sc->cmd_len;
380 if (cmd_len < ISCSI_CDB_SIZE)
381 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
382 else if (cmd_len > ISCSI_CDB_SIZE) {
383 rc = iscsi_prep_ecdb_ahs(task);
384 if (rc)
385 return rc;
386 cmd_len = ISCSI_CDB_SIZE;
387 }
388 memcpy(hdr->cdb, sc->cmnd, cmd_len);
389
390 task->imm_count = 0;
391 if (scsi_bidi_cmnd(sc)) {
392 hdr->flags |= ISCSI_FLAG_CMD_READ;
393 rc = iscsi_prep_bidi_ahs(task);
394 if (rc)
395 return rc;
396 }
397 if (sc->sc_data_direction == DMA_TO_DEVICE) {
398 unsigned out_len = scsi_out(sc)->length;
399 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
400
401 hdr->data_length = cpu_to_be32(out_len);
402 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
403 /*
404 * Write counters:
405 *
406 * imm_count bytes to be sent right after
407 * SCSI PDU Header
408 *
409 * unsol_count bytes(as Data-Out) to be sent
410 * without R2T ack right after
411 * immediate data
412 *
413 * r2t data_length bytes to be sent via R2T ack's
414 *
415 * pad_count bytes to be sent as zero-padding
416 */
417 memset(r2t, 0, sizeof(*r2t));
418
419 if (session->imm_data_en) {
420 if (out_len >= session->first_burst)
421 task->imm_count = min(session->first_burst,
422 conn->max_xmit_dlength);
423 else
424 task->imm_count = min(out_len,
425 conn->max_xmit_dlength);
426 hton24(hdr->dlength, task->imm_count);
427 } else
428 zero_data(hdr->dlength);
429
430 if (!session->initial_r2t_en) {
431 r2t->data_length = min(session->first_burst, out_len) -
432 task->imm_count;
433 r2t->data_offset = task->imm_count;
434 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
435 r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
436 }
437
438 if (!task->unsol_r2t.data_length)
439 /* No unsolicit Data-Out's */
440 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
441 } else {
442 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
443 zero_data(hdr->dlength);
444 hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
445
446 if (sc->sc_data_direction == DMA_FROM_DEVICE)
447 hdr->flags |= ISCSI_FLAG_CMD_READ;
448 }
449
450 /* calculate size of additional header segments (AHSs) */
451 hdrlength = task->hdr_len - sizeof(*hdr);
452
453 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
454 hdrlength /= ISCSI_PAD_LEN;
455
456 WARN_ON(hdrlength >= 256);
457 hdr->hlength = hdrlength & 0xFF;
458 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
459
460 if (session->tt->init_task && session->tt->init_task(task))
461 return -EIO;
462
463 task->state = ISCSI_TASK_RUNNING;
464 session->cmdsn++;
465
466 conn->scsicmd_pdus_cnt++;
467 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
468 "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n",
469 scsi_bidi_cmnd(sc) ? "bidirectional" :
470 sc->sc_data_direction == DMA_TO_DEVICE ?
471 "write" : "read", conn->id, sc, sc->cmnd[0],
472 task->itt, scsi_bufflen(sc),
473 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
474 session->cmdsn,
475 session->max_cmdsn - session->exp_cmdsn + 1);
476 return 0;
477}
478
479/**
480 * iscsi_free_task - free a task
481 * @task: iscsi cmd task
482 *
483 * Must be called with session lock.
484 * This function returns the scsi command to scsi-ml or cleans
485 * up mgmt tasks then returns the task to the pool.
486 */
487static void iscsi_free_task(struct iscsi_task *task)
488{
489 struct iscsi_conn *conn = task->conn;
490 struct iscsi_session *session = conn->session;
491 struct scsi_cmnd *sc = task->sc;
492 int oldstate = task->state;
493
494 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
495 task->itt, task->state, task->sc);
496
497 session->tt->cleanup_task(task);
498 task->state = ISCSI_TASK_FREE;
499 task->sc = NULL;
500 /*
501 * login task is preallocated so do not free
502 */
503 if (conn->login_task == task)
504 return;
505
506 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
507
508 if (sc) {
509 task->sc = NULL;
510 /* SCSI eh reuses commands to verify us */
511 sc->SCp.ptr = NULL;
512 /*
513 * queue command may call this to free the task, so
514 * it will decide how to return sc to scsi-ml.
515 */
516 if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
517 sc->scsi_done(sc);
518 }
519}
520
521void __iscsi_get_task(struct iscsi_task *task)
522{
523 atomic_inc(&task->refcount);
524}
525EXPORT_SYMBOL_GPL(__iscsi_get_task);
526
527void __iscsi_put_task(struct iscsi_task *task)
528{
529 if (atomic_dec_and_test(&task->refcount))
530 iscsi_free_task(task);
531}
532EXPORT_SYMBOL_GPL(__iscsi_put_task);
533
534void iscsi_put_task(struct iscsi_task *task)
535{
536 struct iscsi_session *session = task->conn->session;
537
538 spin_lock_bh(&session->lock);
539 __iscsi_put_task(task);
540 spin_unlock_bh(&session->lock);
541}
542EXPORT_SYMBOL_GPL(iscsi_put_task);
543
544/**
545 * iscsi_complete_task - finish a task
546 * @task: iscsi cmd task
547 * @state: state to complete task with
548 *
549 * Must be called with session lock.
550 */
551static void iscsi_complete_task(struct iscsi_task *task, int state)
552{
553 struct iscsi_conn *conn = task->conn;
554
555 ISCSI_DBG_SESSION(conn->session,
556 "complete task itt 0x%x state %d sc %p\n",
557 task->itt, task->state, task->sc);
558 if (task->state == ISCSI_TASK_COMPLETED ||
559 task->state == ISCSI_TASK_ABRT_TMF ||
560 task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
561 task->state == ISCSI_TASK_REQUEUE_SCSIQ)
562 return;
563 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
564 task->state = state;
565
566 if (!list_empty(&task->running))
567 list_del_init(&task->running);
568
569 if (conn->task == task)
570 conn->task = NULL;
571
572 if (conn->ping_task == task)
573 conn->ping_task = NULL;
574
575 /* release get from queueing */
576 __iscsi_put_task(task);
577}
578
579/**
580 * iscsi_complete_scsi_task - finish scsi task normally
581 * @task: iscsi task for scsi cmd
582 * @exp_cmdsn: expected cmd sn in cpu format
583 * @max_cmdsn: max cmd sn in cpu format
584 *
585 * This is used when drivers do not need or cannot perform
586 * lower level pdu processing.
587 *
588 * Called with session lock
589 */
590void iscsi_complete_scsi_task(struct iscsi_task *task,
591 uint32_t exp_cmdsn, uint32_t max_cmdsn)
592{
593 struct iscsi_conn *conn = task->conn;
594
595 ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
596
597 conn->last_recv = jiffies;
598 __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
599 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
600}
601EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
602
603
604/*
605 * session lock must be held and if not called for a task that is
606 * still pending or from the xmit thread, then xmit thread must
607 * be suspended.
608 */
609static void fail_scsi_task(struct iscsi_task *task, int err)
610{
611 struct iscsi_conn *conn = task->conn;
612 struct scsi_cmnd *sc;
613 int state;
614
615 /*
616 * if a command completes and we get a successful tmf response
617 * we will hit this because the scsi eh abort code does not take
618 * a ref to the task.
619 */
620 sc = task->sc;
621 if (!sc)
622 return;
623
624 if (task->state == ISCSI_TASK_PENDING) {
625 /*
626 * cmd never made it to the xmit thread, so we should not count
627 * the cmd in the sequencing
628 */
629 conn->session->queued_cmdsn--;
630 /* it was never sent so just complete like normal */
631 state = ISCSI_TASK_COMPLETED;
632 } else if (err == DID_TRANSPORT_DISRUPTED)
633 state = ISCSI_TASK_ABRT_SESS_RECOV;
634 else
635 state = ISCSI_TASK_ABRT_TMF;
636
637 sc->result = err << 16;
638 if (!scsi_bidi_cmnd(sc))
639 scsi_set_resid(sc, scsi_bufflen(sc));
640 else {
641 scsi_out(sc)->resid = scsi_out(sc)->length;
642 scsi_in(sc)->resid = scsi_in(sc)->length;
643 }
644
645 iscsi_complete_task(task, state);
646}
647
648static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
649 struct iscsi_task *task)
650{
651 struct iscsi_session *session = conn->session;
652 struct iscsi_hdr *hdr = task->hdr;
653 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
654 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
655
656 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
657 return -ENOTCONN;
658
659 if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
660 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
661 /*
662 * pre-format CmdSN for outgoing PDU.
663 */
664 nop->cmdsn = cpu_to_be32(session->cmdsn);
665 if (hdr->itt != RESERVED_ITT) {
666 /*
667 * TODO: We always use immediate for normal session pdus.
668 * If we start to send tmfs or nops as non-immediate then
669 * we should start checking the cmdsn numbers for mgmt tasks.
670 *
671 * During discovery sessions iscsid sends TEXT as non immediate,
672 * but we always only send one PDU at a time.
673 */
674 if (conn->c_stage == ISCSI_CONN_STARTED &&
675 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
676 session->queued_cmdsn++;
677 session->cmdsn++;
678 }
679 }
680
681 if (session->tt->init_task && session->tt->init_task(task))
682 return -EIO;
683
684 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
685 session->state = ISCSI_STATE_LOGGING_OUT;
686
687 task->state = ISCSI_TASK_RUNNING;
688 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
689 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
690 hdr->itt, task->data_count);
691 return 0;
692}
693
694static struct iscsi_task *
695__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
696 char *data, uint32_t data_size)
697{
698 struct iscsi_session *session = conn->session;
699 struct iscsi_host *ihost = shost_priv(session->host);
700 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
701 struct iscsi_task *task;
702 itt_t itt;
703
704 if (session->state == ISCSI_STATE_TERMINATE)
705 return NULL;
706
707 if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
708 /*
709 * Login and Text are sent serially, in
710 * request-followed-by-response sequence.
711 * Same task can be used. Same ITT must be used.
712 * Note that login_task is preallocated at conn_create().
713 */
714 if (conn->login_task->state != ISCSI_TASK_FREE) {
715 iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
716 "progress. Cannot start new task.\n");
717 return NULL;
718 }
719
720 task = conn->login_task;
721 } else {
722 if (session->state != ISCSI_STATE_LOGGED_IN)
723 return NULL;
724
725 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
726 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
727
728 if (!kfifo_out(&session->cmdpool.queue,
729 (void*)&task, sizeof(void*)))
730 return NULL;
731 }
732 /*
733 * released in complete pdu for task we expect a response for, and
734 * released by the lld when it has transmitted the task for
735 * pdus we do not expect a response for.
736 */
737 atomic_set(&task->refcount, 1);
738 task->conn = conn;
739 task->sc = NULL;
740 INIT_LIST_HEAD(&task->running);
741 task->state = ISCSI_TASK_PENDING;
742
743 if (data_size) {
744 memcpy(task->data, data, data_size);
745 task->data_count = data_size;
746 } else
747 task->data_count = 0;
748
749 if (conn->session->tt->alloc_pdu) {
750 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
751 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
752 "pdu for mgmt task.\n");
753 goto free_task;
754 }
755 }
756
757 itt = task->hdr->itt;
758 task->hdr_len = sizeof(struct iscsi_hdr);
759 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
760
761 if (hdr->itt != RESERVED_ITT) {
762 if (session->tt->parse_pdu_itt)
763 task->hdr->itt = itt;
764 else
765 task->hdr->itt = build_itt(task->itt,
766 task->conn->session->age);
767 }
768
769 if (!ihost->workq) {
770 if (iscsi_prep_mgmt_task(conn, task))
771 goto free_task;
772
773 if (session->tt->xmit_task(task))
774 goto free_task;
775 } else {
776 list_add_tail(&task->running, &conn->mgmtqueue);
777 iscsi_conn_queue_work(conn);
778 }
779
780 return task;
781
782free_task:
783 __iscsi_put_task(task);
784 return NULL;
785}
786
787int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
788 char *data, uint32_t data_size)
789{
790 struct iscsi_conn *conn = cls_conn->dd_data;
791 struct iscsi_session *session = conn->session;
792 int err = 0;
793
794 spin_lock_bh(&session->lock);
795 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
796 err = -EPERM;
797 spin_unlock_bh(&session->lock);
798 return err;
799}
800EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
801
802/**
803 * iscsi_cmd_rsp - SCSI Command Response processing
804 * @conn: iscsi connection
805 * @hdr: iscsi header
806 * @task: scsi command task
807 * @data: cmd data buffer
808 * @datalen: len of buffer
809 *
810 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
811 * then completes the command and task.
812 **/
813static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
814 struct iscsi_task *task, char *data,
815 int datalen)
816{
817 struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
818 struct iscsi_session *session = conn->session;
819 struct scsi_cmnd *sc = task->sc;
820
821 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
822 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
823
824 sc->result = (DID_OK << 16) | rhdr->cmd_status;
825
826 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
827 sc->result = DID_ERROR << 16;
828 goto out;
829 }
830
831 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
832 uint16_t senselen;
833
834 if (datalen < 2) {
835invalid_datalen:
836 iscsi_conn_printk(KERN_ERR, conn,
837 "Got CHECK_CONDITION but invalid data "
838 "buffer size of %d\n", datalen);
839 sc->result = DID_BAD_TARGET << 16;
840 goto out;
841 }
842
843 senselen = get_unaligned_be16(data);
844 if (datalen < senselen)
845 goto invalid_datalen;
846
847 memcpy(sc->sense_buffer, data + 2,
848 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
849 ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
850 min_t(uint16_t, senselen,
851 SCSI_SENSE_BUFFERSIZE));
852 }
853
854 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
855 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
856 int res_count = be32_to_cpu(rhdr->bi_residual_count);
857
858 if (scsi_bidi_cmnd(sc) && res_count > 0 &&
859 (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
860 res_count <= scsi_in(sc)->length))
861 scsi_in(sc)->resid = res_count;
862 else
863 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
864 }
865
866 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
867 ISCSI_FLAG_CMD_OVERFLOW)) {
868 int res_count = be32_to_cpu(rhdr->residual_count);
869
870 if (res_count > 0 &&
871 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
872 res_count <= scsi_bufflen(sc)))
873 /* write side for bidi or uni-io set_resid */
874 scsi_set_resid(sc, res_count);
875 else
876 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
877 }
878out:
879 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
880 sc, sc->result, task->itt);
881 conn->scsirsp_pdus_cnt++;
882 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
883}
884
885/**
886 * iscsi_data_in_rsp - SCSI Data-In Response processing
887 * @conn: iscsi connection
888 * @hdr: iscsi pdu
889 * @task: scsi command task
890 **/
891static void
892iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
893 struct iscsi_task *task)
894{
895 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
896 struct scsi_cmnd *sc = task->sc;
897
898 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
899 return;
900
901 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
902 sc->result = (DID_OK << 16) | rhdr->cmd_status;
903 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
904 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
905 ISCSI_FLAG_DATA_OVERFLOW)) {
906 int res_count = be32_to_cpu(rhdr->residual_count);
907
908 if (res_count > 0 &&
909 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
910 res_count <= scsi_in(sc)->length))
911 scsi_in(sc)->resid = res_count;
912 else
913 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
914 }
915
916 ISCSI_DBG_SESSION(conn->session, "data in with status done "
917 "[sc %p res %d itt 0x%x]\n",
918 sc, sc->result, task->itt);
919 conn->scsirsp_pdus_cnt++;
920 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
921}
922
923static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
924{
925 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
926
927 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
928 conn->tmfrsp_pdus_cnt++;
929
930 if (conn->tmf_state != TMF_QUEUED)
931 return;
932
933 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
934 conn->tmf_state = TMF_SUCCESS;
935 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
936 conn->tmf_state = TMF_NOT_FOUND;
937 else
938 conn->tmf_state = TMF_FAILED;
939 wake_up(&conn->ehwait);
940}
941
942static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
943{
944 struct iscsi_nopout hdr;
945 struct iscsi_task *task;
946
947 if (!rhdr && conn->ping_task)
948 return;
949
950 memset(&hdr, 0, sizeof(struct iscsi_nopout));
951 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
952 hdr.flags = ISCSI_FLAG_CMD_FINAL;
953
954 if (rhdr) {
955 hdr.lun = rhdr->lun;
956 hdr.ttt = rhdr->ttt;
957 hdr.itt = RESERVED_ITT;
958 } else
959 hdr.ttt = RESERVED_ITT;
960
961 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
962 if (!task)
963 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
964 else if (!rhdr) {
965 /* only track our nops */
966 conn->ping_task = task;
967 conn->last_ping = jiffies;
968 }
969}
970
971static int iscsi_nop_out_rsp(struct iscsi_task *task,
972 struct iscsi_nopin *nop, char *data, int datalen)
973{
974 struct iscsi_conn *conn = task->conn;
975 int rc = 0;
976
977 if (conn->ping_task != task) {
978 /*
979 * If this is not in response to one of our
980 * nops then it must be from userspace.
981 */
982 if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
983 data, datalen))
984 rc = ISCSI_ERR_CONN_FAILED;
985 } else
986 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
987 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
988 return rc;
989}
990
991static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
992 char *data, int datalen)
993{
994 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
995 struct iscsi_hdr rejected_pdu;
996 int opcode, rc = 0;
997
998 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
999
1000 if (ntoh24(reject->dlength) > datalen ||
1001 ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
1002 iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
1003 "pdu. Invalid data length (pdu dlength "
1004 "%u, datalen %d\n", ntoh24(reject->dlength),
1005 datalen);
1006 return ISCSI_ERR_PROTO;
1007 }
1008 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
1009 opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
1010
1011 switch (reject->reason) {
1012 case ISCSI_REASON_DATA_DIGEST_ERROR:
1013 iscsi_conn_printk(KERN_ERR, conn,
1014 "pdu (op 0x%x itt 0x%x) rejected "
1015 "due to DataDigest error.\n",
1016 rejected_pdu.itt, opcode);
1017 break;
1018 case ISCSI_REASON_IMM_CMD_REJECT:
1019 iscsi_conn_printk(KERN_ERR, conn,
1020 "pdu (op 0x%x itt 0x%x) rejected. Too many "
1021 "immediate commands.\n",
1022 rejected_pdu.itt, opcode);
1023 /*
1024 * We only send one TMF at a time so if the target could not
1025 * handle it, then it should get fixed (RFC mandates that
1026 * a target can handle one immediate TMF per conn).
1027 *
1028 * For nops-outs, we could have sent more than one if
1029 * the target is sending us lots of nop-ins
1030 */
1031 if (opcode != ISCSI_OP_NOOP_OUT)
1032 return 0;
1033
1034 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
1035 /*
1036 * nop-out in response to target's nop-out rejected.
1037 * Just resend.
1038 */
1039 iscsi_send_nopout(conn,
1040 (struct iscsi_nopin*)&rejected_pdu);
1041 else {
1042 struct iscsi_task *task;
1043 /*
1044 * Our nop as ping got dropped. We know the target
1045 * and transport are ok so just clean up
1046 */
1047 task = iscsi_itt_to_task(conn, rejected_pdu.itt);
1048 if (!task) {
1049 iscsi_conn_printk(KERN_ERR, conn,
1050 "Invalid pdu reject. Could "
1051 "not lookup rejected task.\n");
1052 rc = ISCSI_ERR_BAD_ITT;
1053 } else
1054 rc = iscsi_nop_out_rsp(task,
1055 (struct iscsi_nopin*)&rejected_pdu,
1056 NULL, 0);
1057 }
1058 break;
1059 default:
1060 iscsi_conn_printk(KERN_ERR, conn,
1061 "pdu (op 0x%x itt 0x%x) rejected. Reason "
1062 "code 0x%x\n", rejected_pdu.itt,
1063 rejected_pdu.opcode, reject->reason);
1064 break;
1065 }
1066 return rc;
1067}
1068
1069/**
1070 * iscsi_itt_to_task - look up task by itt
1071 * @conn: iscsi connection
1072 * @itt: itt
1073 *
1074 * This should be used for mgmt tasks like login and nops, or if
1075 * the LDD's itt space does not include the session age.
1076 *
1077 * The session lock must be held.
1078 */
1079struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
1080{
1081 struct iscsi_session *session = conn->session;
1082 int i;
1083
1084 if (itt == RESERVED_ITT)
1085 return NULL;
1086
1087 if (session->tt->parse_pdu_itt)
1088 session->tt->parse_pdu_itt(conn, itt, &i, NULL);
1089 else
1090 i = get_itt(itt);
1091 if (i >= session->cmds_max)
1092 return NULL;
1093
1094 return session->cmds[i];
1095}
1096EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
1097
1098/**
1099 * __iscsi_complete_pdu - complete pdu
1100 * @conn: iscsi conn
1101 * @hdr: iscsi header
1102 * @data: data buffer
1103 * @datalen: len of data buffer
1104 *
1105 * Completes pdu processing by freeing any resources allocated at
1106 * queuecommand or send generic. session lock must be held and verify
1107 * itt must have been called.
1108 */
1109int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1110 char *data, int datalen)
1111{
1112 struct iscsi_session *session = conn->session;
1113 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
1114 struct iscsi_task *task;
1115 uint32_t itt;
1116
1117 conn->last_recv = jiffies;
1118 rc = iscsi_verify_itt(conn, hdr->itt);
1119 if (rc)
1120 return rc;
1121
1122 if (hdr->itt != RESERVED_ITT)
1123 itt = get_itt(hdr->itt);
1124 else
1125 itt = ~0U;
1126
1127 ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
1128 opcode, conn->id, itt, datalen);
1129
1130 if (itt == ~0U) {
1131 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1132
1133 switch(opcode) {
1134 case ISCSI_OP_NOOP_IN:
1135 if (datalen) {
1136 rc = ISCSI_ERR_PROTO;
1137 break;
1138 }
1139
1140 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
1141 break;
1142
1143 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
1144 break;
1145 case ISCSI_OP_REJECT:
1146 rc = iscsi_handle_reject(conn, hdr, data, datalen);
1147 break;
1148 case ISCSI_OP_ASYNC_EVENT:
1149 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1150 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1151 rc = ISCSI_ERR_CONN_FAILED;
1152 break;
1153 default:
1154 rc = ISCSI_ERR_BAD_OPCODE;
1155 break;
1156 }
1157 goto out;
1158 }
1159
1160 switch(opcode) {
1161 case ISCSI_OP_SCSI_CMD_RSP:
1162 case ISCSI_OP_SCSI_DATA_IN:
1163 task = iscsi_itt_to_ctask(conn, hdr->itt);
1164 if (!task)
1165 return ISCSI_ERR_BAD_ITT;
1166 task->last_xfer = jiffies;
1167 break;
1168 case ISCSI_OP_R2T:
1169 /*
1170 * LLD handles R2Ts if they need to.
1171 */
1172 return 0;
1173 case ISCSI_OP_LOGOUT_RSP:
1174 case ISCSI_OP_LOGIN_RSP:
1175 case ISCSI_OP_TEXT_RSP:
1176 case ISCSI_OP_SCSI_TMFUNC_RSP:
1177 case ISCSI_OP_NOOP_IN:
1178 task = iscsi_itt_to_task(conn, hdr->itt);
1179 if (!task)
1180 return ISCSI_ERR_BAD_ITT;
1181 break;
1182 default:
1183 return ISCSI_ERR_BAD_OPCODE;
1184 }
1185
1186 switch(opcode) {
1187 case ISCSI_OP_SCSI_CMD_RSP:
1188 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
1189 break;
1190 case ISCSI_OP_SCSI_DATA_IN:
1191 iscsi_data_in_rsp(conn, hdr, task);
1192 break;
1193 case ISCSI_OP_LOGOUT_RSP:
1194 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1195 if (datalen) {
1196 rc = ISCSI_ERR_PROTO;
1197 break;
1198 }
1199 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1200 goto recv_pdu;
1201 case ISCSI_OP_LOGIN_RSP:
1202 case ISCSI_OP_TEXT_RSP:
1203 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1204 /*
1205 * login related PDU's exp_statsn is handled in
1206 * userspace
1207 */
1208 goto recv_pdu;
1209 case ISCSI_OP_SCSI_TMFUNC_RSP:
1210 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1211 if (datalen) {
1212 rc = ISCSI_ERR_PROTO;
1213 break;
1214 }
1215
1216 iscsi_tmf_rsp(conn, hdr);
1217 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1218 break;
1219 case ISCSI_OP_NOOP_IN:
1220 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1221 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
1222 rc = ISCSI_ERR_PROTO;
1223 break;
1224 }
1225 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1226
1227 rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
1228 data, datalen);
1229 break;
1230 default:
1231 rc = ISCSI_ERR_BAD_OPCODE;
1232 break;
1233 }
1234
1235out:
1236 return rc;
1237recv_pdu:
1238 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1239 rc = ISCSI_ERR_CONN_FAILED;
1240 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1241 return rc;
1242}
1243EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
1244
1245int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1246 char *data, int datalen)
1247{
1248 int rc;
1249
1250 spin_lock(&conn->session->lock);
1251 rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
1252 spin_unlock(&conn->session->lock);
1253 return rc;
1254}
1255EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
1256
1257int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
1258{
1259 struct iscsi_session *session = conn->session;
1260 int age = 0, i = 0;
1261
1262 if (itt == RESERVED_ITT)
1263 return 0;
1264
1265 if (session->tt->parse_pdu_itt)
1266 session->tt->parse_pdu_itt(conn, itt, &i, &age);
1267 else {
1268 i = get_itt(itt);
1269 age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
1270 }
1271
1272 if (age != session->age) {
1273 iscsi_conn_printk(KERN_ERR, conn,
1274 "received itt %x expected session age (%x)\n",
1275 (__force u32)itt, session->age);
1276 return ISCSI_ERR_BAD_ITT;
1277 }
1278
1279 if (i >= session->cmds_max) {
1280 iscsi_conn_printk(KERN_ERR, conn,
1281 "received invalid itt index %u (max cmds "
1282 "%u.\n", i, session->cmds_max);
1283 return ISCSI_ERR_BAD_ITT;
1284 }
1285 return 0;
1286}
1287EXPORT_SYMBOL_GPL(iscsi_verify_itt);
1288
1289/**
1290 * iscsi_itt_to_ctask - look up ctask by itt
1291 * @conn: iscsi connection
1292 * @itt: itt
1293 *
1294 * This should be used for cmd tasks.
1295 *
1296 * The session lock must be held.
1297 */
1298struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
1299{
1300 struct iscsi_task *task;
1301
1302 if (iscsi_verify_itt(conn, itt))
1303 return NULL;
1304
1305 task = iscsi_itt_to_task(conn, itt);
1306 if (!task || !task->sc)
1307 return NULL;
1308
1309 if (task->sc->SCp.phase != conn->session->age) {
1310 iscsi_session_printk(KERN_ERR, conn->session,
1311 "task's session age %d, expected %d\n",
1312 task->sc->SCp.phase, conn->session->age);
1313 return NULL;
1314 }
1315
1316 return task;
1317}
1318EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
1319
1320void iscsi_session_failure(struct iscsi_session *session,
1321 enum iscsi_err err)
1322{
1323 struct iscsi_conn *conn;
1324 struct device *dev;
1325
1326 spin_lock_bh(&session->lock);
1327 conn = session->leadconn;
1328 if (session->state == ISCSI_STATE_TERMINATE || !conn) {
1329 spin_unlock_bh(&session->lock);
1330 return;
1331 }
1332
1333 dev = get_device(&conn->cls_conn->dev);
1334 spin_unlock_bh(&session->lock);
1335 if (!dev)
1336 return;
1337 /*
1338 * if the host is being removed bypass the connection
1339 * recovery initialization because we are going to kill
1340 * the session.
1341 */
1342 if (err == ISCSI_ERR_INVALID_HOST)
1343 iscsi_conn_error_event(conn->cls_conn, err);
1344 else
1345 iscsi_conn_failure(conn, err);
1346 put_device(dev);
1347}
1348EXPORT_SYMBOL_GPL(iscsi_session_failure);
1349
1350void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
1351{
1352 struct iscsi_session *session = conn->session;
1353
1354 spin_lock_bh(&session->lock);
1355 if (session->state == ISCSI_STATE_FAILED) {
1356 spin_unlock_bh(&session->lock);
1357 return;
1358 }
1359
1360 if (conn->stop_stage == 0)
1361 session->state = ISCSI_STATE_FAILED;
1362 spin_unlock_bh(&session->lock);
1363
1364 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1365 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1366 iscsi_conn_error_event(conn->cls_conn, err);
1367}
1368EXPORT_SYMBOL_GPL(iscsi_conn_failure);
1369
1370static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
1371{
1372 struct iscsi_session *session = conn->session;
1373
1374 /*
1375 * Check for iSCSI window and take care of CmdSN wrap-around
1376 */
1377 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
1378 ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
1379 "%u MaxCmdSN %u CmdSN %u/%u\n",
1380 session->exp_cmdsn, session->max_cmdsn,
1381 session->cmdsn, session->queued_cmdsn);
1382 return -ENOSPC;
1383 }
1384 return 0;
1385}
1386
1387static int iscsi_xmit_task(struct iscsi_conn *conn)
1388{
1389 struct iscsi_task *task = conn->task;
1390 int rc;
1391
1392 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
1393 return -ENODATA;
1394
1395 __iscsi_get_task(task);
1396 spin_unlock_bh(&conn->session->lock);
1397 rc = conn->session->tt->xmit_task(task);
1398 spin_lock_bh(&conn->session->lock);
1399 if (!rc) {
1400 /* done with this task */
1401 task->last_xfer = jiffies;
1402 conn->task = NULL;
1403 }
1404 __iscsi_put_task(task);
1405 return rc;
1406}
1407
1408/**
1409 * iscsi_requeue_task - requeue task to run from session workqueue
1410 * @task: task to requeue
1411 *
1412 * LLDs that need to run a task from the session workqueue should call
1413 * this. The session lock must be held. This should only be called
1414 * by software drivers.
1415 */
1416void iscsi_requeue_task(struct iscsi_task *task)
1417{
1418 struct iscsi_conn *conn = task->conn;
1419
1420 /*
1421 * this may be on the requeue list already if the xmit_task callout
1422 * is handling the r2ts while we are adding new ones
1423 */
1424 if (list_empty(&task->running))
1425 list_add_tail(&task->running, &conn->requeue);
1426 iscsi_conn_queue_work(conn);
1427}
1428EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1429
1430/**
1431 * iscsi_data_xmit - xmit any command into the scheduled connection
1432 * @conn: iscsi connection
1433 *
1434 * Notes:
1435 * The function can return -EAGAIN in which case the caller must
1436 * re-schedule it again later or recover. '0' return code means
1437 * successful xmit.
1438 **/
1439static int iscsi_data_xmit(struct iscsi_conn *conn)
1440{
1441 struct iscsi_task *task;
1442 int rc = 0;
1443
1444 spin_lock_bh(&conn->session->lock);
1445 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1446 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1447 spin_unlock_bh(&conn->session->lock);
1448 return -ENODATA;
1449 }
1450
1451 if (conn->task) {
1452 rc = iscsi_xmit_task(conn);
1453 if (rc)
1454 goto done;
1455 }
1456
1457 /*
1458 * process mgmt pdus like nops before commands since we should
1459 * only have one nop-out as a ping from us and targets should not
1460 * overflow us with nop-ins
1461 */
1462check_mgmt:
1463 while (!list_empty(&conn->mgmtqueue)) {
1464 conn->task = list_entry(conn->mgmtqueue.next,
1465 struct iscsi_task, running);
1466 list_del_init(&conn->task->running);
1467 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1468 __iscsi_put_task(conn->task);
1469 conn->task = NULL;
1470 continue;
1471 }
1472 rc = iscsi_xmit_task(conn);
1473 if (rc)
1474 goto done;
1475 }
1476
1477 /* process pending command queue */
1478 while (!list_empty(&conn->cmdqueue)) {
1479 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1480 running);
1481 list_del_init(&conn->task->running);
1482 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1483 fail_scsi_task(conn->task, DID_IMM_RETRY);
1484 continue;
1485 }
1486 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1487 if (rc) {
1488 if (rc == -ENOMEM || rc == -EACCES) {
1489 list_add_tail(&conn->task->running,
1490 &conn->cmdqueue);
1491 conn->task = NULL;
1492 goto done;
1493 } else
1494 fail_scsi_task(conn->task, DID_ABORT);
1495 continue;
1496 }
1497 rc = iscsi_xmit_task(conn);
1498 if (rc)
1499 goto done;
1500 /*
1501 * we could continuously get new task requests so
1502 * we need to check the mgmt queue for nops that need to
1503 * be sent to aviod starvation
1504 */
1505 if (!list_empty(&conn->mgmtqueue))
1506 goto check_mgmt;
1507 }
1508
1509 while (!list_empty(&conn->requeue)) {
1510 /*
1511 * we always do fastlogout - conn stop code will clean up.
1512 */
1513 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1514 break;
1515
1516 task = list_entry(conn->requeue.next, struct iscsi_task,
1517 running);
1518 if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
1519 break;
1520
1521 conn->task = task;
1522 list_del_init(&conn->task->running);
1523 conn->task->state = ISCSI_TASK_RUNNING;
1524 rc = iscsi_xmit_task(conn);
1525 if (rc)
1526 goto done;
1527 if (!list_empty(&conn->mgmtqueue))
1528 goto check_mgmt;
1529 }
1530 spin_unlock_bh(&conn->session->lock);
1531 return -ENODATA;
1532
1533done:
1534 spin_unlock_bh(&conn->session->lock);
1535 return rc;
1536}
1537
1538static void iscsi_xmitworker(struct work_struct *work)
1539{
1540 struct iscsi_conn *conn =
1541 container_of(work, struct iscsi_conn, xmitwork);
1542 int rc;
1543 /*
1544 * serialize Xmit worker on a per-connection basis.
1545 */
1546 do {
1547 rc = iscsi_data_xmit(conn);
1548 } while (rc >= 0 || rc == -EAGAIN);
1549}
1550
1551static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1552 struct scsi_cmnd *sc)
1553{
1554 struct iscsi_task *task;
1555
1556 if (!kfifo_out(&conn->session->cmdpool.queue,
1557 (void *) &task, sizeof(void *)))
1558 return NULL;
1559
1560 sc->SCp.phase = conn->session->age;
1561 sc->SCp.ptr = (char *) task;
1562
1563 atomic_set(&task->refcount, 1);
1564 task->state = ISCSI_TASK_PENDING;
1565 task->conn = conn;
1566 task->sc = sc;
1567 task->have_checked_conn = false;
1568 task->last_timeout = jiffies;
1569 task->last_xfer = jiffies;
1570 INIT_LIST_HEAD(&task->running);
1571 return task;
1572}
1573
1574enum {
1575 FAILURE_BAD_HOST = 1,
1576 FAILURE_SESSION_FAILED,
1577 FAILURE_SESSION_FREED,
1578 FAILURE_WINDOW_CLOSED,
1579 FAILURE_OOM,
1580 FAILURE_SESSION_TERMINATE,
1581 FAILURE_SESSION_IN_RECOVERY,
1582 FAILURE_SESSION_RECOVERY_TIMEOUT,
1583 FAILURE_SESSION_LOGGING_OUT,
1584 FAILURE_SESSION_NOT_READY,
1585};
1586
1587int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1588{
1589 struct iscsi_cls_session *cls_session;
1590 struct iscsi_host *ihost;
1591 int reason = 0;
1592 struct iscsi_session *session;
1593 struct iscsi_conn *conn;
1594 struct iscsi_task *task = NULL;
1595
1596 sc->result = 0;
1597 sc->SCp.ptr = NULL;
1598
1599 ihost = shost_priv(host);
1600
1601 cls_session = starget_to_session(scsi_target(sc->device));
1602 session = cls_session->dd_data;
1603 spin_lock_bh(&session->lock);
1604
1605 reason = iscsi_session_chkready(cls_session);
1606 if (reason) {
1607 sc->result = reason;
1608 goto fault;
1609 }
1610
1611 if (session->state != ISCSI_STATE_LOGGED_IN) {
1612 /*
1613 * to handle the race between when we set the recovery state
1614 * and block the session we requeue here (commands could
1615 * be entering our queuecommand while a block is starting
1616 * up because the block code is not locked)
1617 */
1618 switch (session->state) {
1619 case ISCSI_STATE_FAILED:
1620 case ISCSI_STATE_IN_RECOVERY:
1621 reason = FAILURE_SESSION_IN_RECOVERY;
1622 sc->result = DID_IMM_RETRY << 16;
1623 break;
1624 case ISCSI_STATE_LOGGING_OUT:
1625 reason = FAILURE_SESSION_LOGGING_OUT;
1626 sc->result = DID_IMM_RETRY << 16;
1627 break;
1628 case ISCSI_STATE_RECOVERY_FAILED:
1629 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1630 sc->result = DID_TRANSPORT_FAILFAST << 16;
1631 break;
1632 case ISCSI_STATE_TERMINATE:
1633 reason = FAILURE_SESSION_TERMINATE;
1634 sc->result = DID_NO_CONNECT << 16;
1635 break;
1636 default:
1637 reason = FAILURE_SESSION_FREED;
1638 sc->result = DID_NO_CONNECT << 16;
1639 }
1640 goto fault;
1641 }
1642
1643 conn = session->leadconn;
1644 if (!conn) {
1645 reason = FAILURE_SESSION_FREED;
1646 sc->result = DID_NO_CONNECT << 16;
1647 goto fault;
1648 }
1649
1650 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1651 reason = FAILURE_SESSION_IN_RECOVERY;
1652 sc->result = DID_REQUEUE;
1653 goto fault;
1654 }
1655
1656 if (iscsi_check_cmdsn_window_closed(conn)) {
1657 reason = FAILURE_WINDOW_CLOSED;
1658 goto reject;
1659 }
1660
1661 task = iscsi_alloc_task(conn, sc);
1662 if (!task) {
1663 reason = FAILURE_OOM;
1664 goto reject;
1665 }
1666
1667 if (!ihost->workq) {
1668 reason = iscsi_prep_scsi_cmd_pdu(task);
1669 if (reason) {
1670 if (reason == -ENOMEM || reason == -EACCES) {
1671 reason = FAILURE_OOM;
1672 goto prepd_reject;
1673 } else {
1674 sc->result = DID_ABORT << 16;
1675 goto prepd_fault;
1676 }
1677 }
1678 if (session->tt->xmit_task(task)) {
1679 session->cmdsn--;
1680 reason = FAILURE_SESSION_NOT_READY;
1681 goto prepd_reject;
1682 }
1683 } else {
1684 list_add_tail(&task->running, &conn->cmdqueue);
1685 iscsi_conn_queue_work(conn);
1686 }
1687
1688 session->queued_cmdsn++;
1689 spin_unlock_bh(&session->lock);
1690 return 0;
1691
1692prepd_reject:
1693 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1694reject:
1695 spin_unlock_bh(&session->lock);
1696 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1697 sc->cmnd[0], reason);
1698 return SCSI_MLQUEUE_TARGET_BUSY;
1699
1700prepd_fault:
1701 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1702fault:
1703 spin_unlock_bh(&session->lock);
1704 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1705 sc->cmnd[0], reason);
1706 if (!scsi_bidi_cmnd(sc))
1707 scsi_set_resid(sc, scsi_bufflen(sc));
1708 else {
1709 scsi_out(sc)->resid = scsi_out(sc)->length;
1710 scsi_in(sc)->resid = scsi_in(sc)->length;
1711 }
1712 sc->scsi_done(sc);
1713 return 0;
1714}
1715EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1716
1717int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
1718{
1719 switch (reason) {
1720 case SCSI_QDEPTH_DEFAULT:
1721 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1722 break;
1723 case SCSI_QDEPTH_QFULL:
1724 scsi_track_queue_full(sdev, depth);
1725 break;
1726 case SCSI_QDEPTH_RAMP_UP:
1727 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1728 break;
1729 default:
1730 return -EOPNOTSUPP;
1731 }
1732 return sdev->queue_depth;
1733}
1734EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1735
1736int iscsi_target_alloc(struct scsi_target *starget)
1737{
1738 struct iscsi_cls_session *cls_session = starget_to_session(starget);
1739 struct iscsi_session *session = cls_session->dd_data;
1740
1741 starget->can_queue = session->scsi_cmds_max;
1742 return 0;
1743}
1744EXPORT_SYMBOL_GPL(iscsi_target_alloc);
1745
1746static void iscsi_tmf_timedout(unsigned long data)
1747{
1748 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1749 struct iscsi_session *session = conn->session;
1750
1751 spin_lock(&session->lock);
1752 if (conn->tmf_state == TMF_QUEUED) {
1753 conn->tmf_state = TMF_TIMEDOUT;
1754 ISCSI_DBG_EH(session, "tmf timedout\n");
1755 /* unblock eh_abort() */
1756 wake_up(&conn->ehwait);
1757 }
1758 spin_unlock(&session->lock);
1759}
1760
1761static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1762 struct iscsi_tm *hdr, int age,
1763 int timeout)
1764{
1765 struct iscsi_session *session = conn->session;
1766 struct iscsi_task *task;
1767
1768 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1769 NULL, 0);
1770 if (!task) {
1771 spin_unlock_bh(&session->lock);
1772 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
1773 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1774 spin_lock_bh(&session->lock);
1775 return -EPERM;
1776 }
1777 conn->tmfcmd_pdus_cnt++;
1778 conn->tmf_timer.expires = timeout * HZ + jiffies;
1779 conn->tmf_timer.function = iscsi_tmf_timedout;
1780 conn->tmf_timer.data = (unsigned long)conn;
1781 add_timer(&conn->tmf_timer);
1782 ISCSI_DBG_EH(session, "tmf set timeout\n");
1783
1784 spin_unlock_bh(&session->lock);
1785 mutex_unlock(&session->eh_mutex);
1786
1787 /*
1788 * block eh thread until:
1789 *
1790 * 1) tmf response
1791 * 2) tmf timeout
1792 * 3) session is terminated or restarted or userspace has
1793 * given up on recovery
1794 */
1795 wait_event_interruptible(conn->ehwait, age != session->age ||
1796 session->state != ISCSI_STATE_LOGGED_IN ||
1797 conn->tmf_state != TMF_QUEUED);
1798 if (signal_pending(current))
1799 flush_signals(current);
1800 del_timer_sync(&conn->tmf_timer);
1801
1802 mutex_lock(&session->eh_mutex);
1803 spin_lock_bh(&session->lock);
1804 /* if the session drops it will clean up the task */
1805 if (age != session->age ||
1806 session->state != ISCSI_STATE_LOGGED_IN)
1807 return -ENOTCONN;
1808 return 0;
1809}
1810
1811/*
1812 * Fail commands. session lock held and recv side suspended and xmit
1813 * thread flushed
1814 */
1815static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1816 int error)
1817{
1818 struct iscsi_task *task;
1819 int i;
1820
1821 for (i = 0; i < conn->session->cmds_max; i++) {
1822 task = conn->session->cmds[i];
1823 if (!task->sc || task->state == ISCSI_TASK_FREE)
1824 continue;
1825
1826 if (lun != -1 && lun != task->sc->device->lun)
1827 continue;
1828
1829 ISCSI_DBG_SESSION(conn->session,
1830 "failing sc %p itt 0x%x state %d\n",
1831 task->sc, task->itt, task->state);
1832 fail_scsi_task(task, error);
1833 }
1834}
1835
1836/**
1837 * iscsi_suspend_queue - suspend iscsi_queuecommand
1838 * @conn: iscsi conn to stop queueing IO on
1839 *
1840 * This grabs the session lock to make sure no one is in
1841 * xmit_task/queuecommand, and then sets suspend to prevent
1842 * new commands from being queued. This only needs to be called
1843 * by offload drivers that need to sync a path like ep disconnect
1844 * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
1845 * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
1846 */
1847void iscsi_suspend_queue(struct iscsi_conn *conn)
1848{
1849 spin_lock_bh(&conn->session->lock);
1850 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1851 spin_unlock_bh(&conn->session->lock);
1852}
1853EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1854
1855/**
1856 * iscsi_suspend_tx - suspend iscsi_data_xmit
1857 * @conn: iscsi conn tp stop processing IO on.
1858 *
1859 * This function sets the suspend bit to prevent iscsi_data_xmit
1860 * from sending new IO, and if work is queued on the xmit thread
1861 * it will wait for it to be completed.
1862 */
1863void iscsi_suspend_tx(struct iscsi_conn *conn)
1864{
1865 struct Scsi_Host *shost = conn->session->host;
1866 struct iscsi_host *ihost = shost_priv(shost);
1867
1868 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1869 if (ihost->workq)
1870 flush_workqueue(ihost->workq);
1871}
1872EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1873
1874static void iscsi_start_tx(struct iscsi_conn *conn)
1875{
1876 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1877 iscsi_conn_queue_work(conn);
1878}
1879
1880/*
1881 * We want to make sure a ping is in flight. It has timed out.
1882 * And we are not busy processing a pdu that is making
1883 * progress but got started before the ping and is taking a while
1884 * to complete so the ping is just stuck behind it in a queue.
1885 */
1886static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1887{
1888 if (conn->ping_task &&
1889 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1890 (conn->ping_timeout * HZ), jiffies))
1891 return 1;
1892 else
1893 return 0;
1894}
1895
1896static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1897{
1898 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1899 struct iscsi_task *task = NULL, *running_task;
1900 struct iscsi_cls_session *cls_session;
1901 struct iscsi_session *session;
1902 struct iscsi_conn *conn;
1903 int i;
1904
1905 cls_session = starget_to_session(scsi_target(sc->device));
1906 session = cls_session->dd_data;
1907
1908 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1909
1910 spin_lock(&session->lock);
1911 if (session->state != ISCSI_STATE_LOGGED_IN) {
1912 /*
1913 * We are probably in the middle of iscsi recovery so let
1914 * that complete and handle the error.
1915 */
1916 rc = BLK_EH_RESET_TIMER;
1917 goto done;
1918 }
1919
1920 conn = session->leadconn;
1921 if (!conn) {
1922 /* In the middle of shuting down */
1923 rc = BLK_EH_RESET_TIMER;
1924 goto done;
1925 }
1926
1927 task = (struct iscsi_task *)sc->SCp.ptr;
1928 if (!task) {
1929 /*
1930 * Raced with completion. Just reset timer, and let it
1931 * complete normally
1932 */
1933 rc = BLK_EH_RESET_TIMER;
1934 goto done;
1935 }
1936
1937 /*
1938 * If we have sent (at least queued to the network layer) a pdu or
1939 * recvd one for the task since the last timeout ask for
1940 * more time. If on the next timeout we have not made progress
1941 * we can check if it is the task or connection when we send the
1942 * nop as a ping.
1943 */
1944 if (time_after(task->last_xfer, task->last_timeout)) {
1945 ISCSI_DBG_EH(session, "Command making progress. Asking "
1946 "scsi-ml for more time to complete. "
1947 "Last data xfer at %lu. Last timeout was at "
1948 "%lu\n.", task->last_xfer, task->last_timeout);
1949 task->have_checked_conn = false;
1950 rc = BLK_EH_RESET_TIMER;
1951 goto done;
1952 }
1953
1954 if (!conn->recv_timeout && !conn->ping_timeout)
1955 goto done;
1956 /*
1957 * if the ping timedout then we are in the middle of cleaning up
1958 * and can let the iscsi eh handle it
1959 */
1960 if (iscsi_has_ping_timed_out(conn)) {
1961 rc = BLK_EH_RESET_TIMER;
1962 goto done;
1963 }
1964
1965 for (i = 0; i < conn->session->cmds_max; i++) {
1966 running_task = conn->session->cmds[i];
1967 if (!running_task->sc || running_task == task ||
1968 running_task->state != ISCSI_TASK_RUNNING)
1969 continue;
1970
1971 /*
1972 * Only check if cmds started before this one have made
1973 * progress, or this could never fail
1974 */
1975 if (time_after(running_task->sc->jiffies_at_alloc,
1976 task->sc->jiffies_at_alloc))
1977 continue;
1978
1979 if (time_after(running_task->last_xfer, task->last_timeout)) {
1980 /*
1981 * This task has not made progress, but a task
1982 * started before us has transferred data since
1983 * we started/last-checked. We could be queueing
1984 * too many tasks or the LU is bad.
1985 *
1986 * If the device is bad the cmds ahead of us on
1987 * other devs will complete, and this loop will
1988 * eventually fail starting the scsi eh.
1989 */
1990 ISCSI_DBG_EH(session, "Command has not made progress "
1991 "but commands ahead of it have. "
1992 "Asking scsi-ml for more time to "
1993 "complete. Our last xfer vs running task "
1994 "last xfer %lu/%lu. Last check %lu.\n",
1995 task->last_xfer, running_task->last_xfer,
1996 task->last_timeout);
1997 rc = BLK_EH_RESET_TIMER;
1998 goto done;
1999 }
2000 }
2001
2002 /* Assumes nop timeout is shorter than scsi cmd timeout */
2003 if (task->have_checked_conn)
2004 goto done;
2005
2006 /*
2007 * Checking the transport already or nop from a cmd timeout still
2008 * running
2009 */
2010 if (conn->ping_task) {
2011 task->have_checked_conn = true;
2012 rc = BLK_EH_RESET_TIMER;
2013 goto done;
2014 }
2015
2016 /* Make sure there is a transport check done */
2017 iscsi_send_nopout(conn, NULL);
2018 task->have_checked_conn = true;
2019 rc = BLK_EH_RESET_TIMER;
2020
2021done:
2022 if (task)
2023 task->last_timeout = jiffies;
2024 spin_unlock(&session->lock);
2025 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2026 "timer reset" : "nh");
2027 return rc;
2028}
2029
2030static void iscsi_check_transport_timeouts(unsigned long data)
2031{
2032 struct iscsi_conn *conn = (struct iscsi_conn *)data;
2033 struct iscsi_session *session = conn->session;
2034 unsigned long recv_timeout, next_timeout = 0, last_recv;
2035
2036 spin_lock(&session->lock);
2037 if (session->state != ISCSI_STATE_LOGGED_IN)
2038 goto done;
2039
2040 recv_timeout = conn->recv_timeout;
2041 if (!recv_timeout)
2042 goto done;
2043
2044 recv_timeout *= HZ;
2045 last_recv = conn->last_recv;
2046
2047 if (iscsi_has_ping_timed_out(conn)) {
2048 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
2049 "expired, recv timeout %d, last rx %lu, "
2050 "last ping %lu, now %lu\n",
2051 conn->ping_timeout, conn->recv_timeout,
2052 last_recv, conn->last_ping, jiffies);
2053 spin_unlock(&session->lock);
2054 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2055 return;
2056 }
2057
2058 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
2059 /* send a ping to try to provoke some traffic */
2060 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
2061 iscsi_send_nopout(conn, NULL);
2062 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
2063 } else
2064 next_timeout = last_recv + recv_timeout;
2065
2066 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
2067 mod_timer(&conn->transport_timer, next_timeout);
2068done:
2069 spin_unlock(&session->lock);
2070}
2071
2072static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
2073 struct iscsi_tm *hdr)
2074{
2075 memset(hdr, 0, sizeof(*hdr));
2076 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2077 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
2078 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2079 hdr->lun = task->lun;
2080 hdr->rtt = task->hdr_itt;
2081 hdr->refcmdsn = task->cmdsn;
2082}
2083
2084int iscsi_eh_abort(struct scsi_cmnd *sc)
2085{
2086 struct iscsi_cls_session *cls_session;
2087 struct iscsi_session *session;
2088 struct iscsi_conn *conn;
2089 struct iscsi_task *task;
2090 struct iscsi_tm *hdr;
2091 int rc, age;
2092
2093 cls_session = starget_to_session(scsi_target(sc->device));
2094 session = cls_session->dd_data;
2095
2096 ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
2097
2098 mutex_lock(&session->eh_mutex);
2099 spin_lock_bh(&session->lock);
2100 /*
2101 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
2102 * got the command.
2103 */
2104 if (!sc->SCp.ptr) {
2105 ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
2106 "it completed.\n");
2107 spin_unlock_bh(&session->lock);
2108 mutex_unlock(&session->eh_mutex);
2109 return SUCCESS;
2110 }
2111
2112 /*
2113 * If we are not logged in or we have started a new session
2114 * then let the host reset code handle this
2115 */
2116 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
2117 sc->SCp.phase != session->age) {
2118 spin_unlock_bh(&session->lock);
2119 mutex_unlock(&session->eh_mutex);
2120 ISCSI_DBG_EH(session, "failing abort due to dropped "
2121 "session.\n");
2122 return FAILED;
2123 }
2124
2125 conn = session->leadconn;
2126 conn->eh_abort_cnt++;
2127 age = session->age;
2128
2129 task = (struct iscsi_task *)sc->SCp.ptr;
2130 ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
2131 sc, task->itt);
2132
2133 /* task completed before time out */
2134 if (!task->sc) {
2135 ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
2136 goto success;
2137 }
2138
2139 if (task->state == ISCSI_TASK_PENDING) {
2140 fail_scsi_task(task, DID_ABORT);
2141 goto success;
2142 }
2143
2144 /* only have one tmf outstanding at a time */
2145 if (conn->tmf_state != TMF_INITIAL)
2146 goto failed;
2147 conn->tmf_state = TMF_QUEUED;
2148
2149 hdr = &conn->tmhdr;
2150 iscsi_prep_abort_task_pdu(task, hdr);
2151
2152 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
2153 rc = FAILED;
2154 goto failed;
2155 }
2156
2157 switch (conn->tmf_state) {
2158 case TMF_SUCCESS:
2159 spin_unlock_bh(&session->lock);
2160 /*
2161 * stop tx side incase the target had sent a abort rsp but
2162 * the initiator was still writing out data.
2163 */
2164 iscsi_suspend_tx(conn);
2165 /*
2166 * we do not stop the recv side because targets have been
2167 * good and have never sent us a successful tmf response
2168 * then sent more data for the cmd.
2169 */
2170 spin_lock_bh(&session->lock);
2171 fail_scsi_task(task, DID_ABORT);
2172 conn->tmf_state = TMF_INITIAL;
2173 memset(hdr, 0, sizeof(*hdr));
2174 spin_unlock_bh(&session->lock);
2175 iscsi_start_tx(conn);
2176 goto success_unlocked;
2177 case TMF_TIMEDOUT:
2178 spin_unlock_bh(&session->lock);
2179 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2180 goto failed_unlocked;
2181 case TMF_NOT_FOUND:
2182 if (!sc->SCp.ptr) {
2183 conn->tmf_state = TMF_INITIAL;
2184 memset(hdr, 0, sizeof(*hdr));
2185 /* task completed before tmf abort response */
2186 ISCSI_DBG_EH(session, "sc completed while abort in "
2187 "progress\n");
2188 goto success;
2189 }
2190 /* fall through */
2191 default:
2192 conn->tmf_state = TMF_INITIAL;
2193 goto failed;
2194 }
2195
2196success:
2197 spin_unlock_bh(&session->lock);
2198success_unlocked:
2199 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
2200 sc, task->itt);
2201 mutex_unlock(&session->eh_mutex);
2202 return SUCCESS;
2203
2204failed:
2205 spin_unlock_bh(&session->lock);
2206failed_unlocked:
2207 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
2208 task ? task->itt : 0);
2209 mutex_unlock(&session->eh_mutex);
2210 return FAILED;
2211}
2212EXPORT_SYMBOL_GPL(iscsi_eh_abort);
2213
2214static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2215{
2216 memset(hdr, 0, sizeof(*hdr));
2217 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2218 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2219 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2220 int_to_scsilun(sc->device->lun, &hdr->lun);
2221 hdr->rtt = RESERVED_ITT;
2222}
2223
2224int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2225{
2226 struct iscsi_cls_session *cls_session;
2227 struct iscsi_session *session;
2228 struct iscsi_conn *conn;
2229 struct iscsi_tm *hdr;
2230 int rc = FAILED;
2231
2232 cls_session = starget_to_session(scsi_target(sc->device));
2233 session = cls_session->dd_data;
2234
2235 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
2236
2237 mutex_lock(&session->eh_mutex);
2238 spin_lock_bh(&session->lock);
2239 /*
2240 * Just check if we are not logged in. We cannot check for
2241 * the phase because the reset could come from a ioctl.
2242 */
2243 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2244 goto unlock;
2245 conn = session->leadconn;
2246
2247 /* only have one tmf outstanding at a time */
2248 if (conn->tmf_state != TMF_INITIAL)
2249 goto unlock;
2250 conn->tmf_state = TMF_QUEUED;
2251
2252 hdr = &conn->tmhdr;
2253 iscsi_prep_lun_reset_pdu(sc, hdr);
2254
2255 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2256 session->lu_reset_timeout)) {
2257 rc = FAILED;
2258 goto unlock;
2259 }
2260
2261 switch (conn->tmf_state) {
2262 case TMF_SUCCESS:
2263 break;
2264 case TMF_TIMEDOUT:
2265 spin_unlock_bh(&session->lock);
2266 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2267 goto done;
2268 default:
2269 conn->tmf_state = TMF_INITIAL;
2270 goto unlock;
2271 }
2272
2273 rc = SUCCESS;
2274 spin_unlock_bh(&session->lock);
2275
2276 iscsi_suspend_tx(conn);
2277
2278 spin_lock_bh(&session->lock);
2279 memset(hdr, 0, sizeof(*hdr));
2280 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
2281 conn->tmf_state = TMF_INITIAL;
2282 spin_unlock_bh(&session->lock);
2283
2284 iscsi_start_tx(conn);
2285 goto done;
2286
2287unlock:
2288 spin_unlock_bh(&session->lock);
2289done:
2290 ISCSI_DBG_EH(session, "dev reset result = %s\n",
2291 rc == SUCCESS ? "SUCCESS" : "FAILED");
2292 mutex_unlock(&session->eh_mutex);
2293 return rc;
2294}
2295EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
2296
2297void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
2298{
2299 struct iscsi_session *session = cls_session->dd_data;
2300
2301 spin_lock_bh(&session->lock);
2302 if (session->state != ISCSI_STATE_LOGGED_IN) {
2303 session->state = ISCSI_STATE_RECOVERY_FAILED;
2304 if (session->leadconn)
2305 wake_up(&session->leadconn->ehwait);
2306 }
2307 spin_unlock_bh(&session->lock);
2308}
2309EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2310
2311/**
2312 * iscsi_eh_session_reset - drop session and attempt relogin
2313 * @sc: scsi command
2314 *
2315 * This function will wait for a relogin, session termination from
2316 * userspace, or a recovery/replacement timeout.
2317 */
2318int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2319{
2320 struct iscsi_cls_session *cls_session;
2321 struct iscsi_session *session;
2322 struct iscsi_conn *conn;
2323
2324 cls_session = starget_to_session(scsi_target(sc->device));
2325 session = cls_session->dd_data;
2326 conn = session->leadconn;
2327
2328 mutex_lock(&session->eh_mutex);
2329 spin_lock_bh(&session->lock);
2330 if (session->state == ISCSI_STATE_TERMINATE) {
2331failed:
2332 ISCSI_DBG_EH(session,
2333 "failing session reset: Could not log back into "
2334 "%s, %s [age %d]\n", session->targetname,
2335 conn->persistent_address, session->age);
2336 spin_unlock_bh(&session->lock);
2337 mutex_unlock(&session->eh_mutex);
2338 return FAILED;
2339 }
2340
2341 spin_unlock_bh(&session->lock);
2342 mutex_unlock(&session->eh_mutex);
2343 /*
2344 * we drop the lock here but the leadconn cannot be destoyed while
2345 * we are in the scsi eh
2346 */
2347 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2348
2349 ISCSI_DBG_EH(session, "wait for relogin\n");
2350 wait_event_interruptible(conn->ehwait,
2351 session->state == ISCSI_STATE_TERMINATE ||
2352 session->state == ISCSI_STATE_LOGGED_IN ||
2353 session->state == ISCSI_STATE_RECOVERY_FAILED);
2354 if (signal_pending(current))
2355 flush_signals(current);
2356
2357 mutex_lock(&session->eh_mutex);
2358 spin_lock_bh(&session->lock);
2359 if (session->state == ISCSI_STATE_LOGGED_IN) {
2360 ISCSI_DBG_EH(session,
2361 "session reset succeeded for %s,%s\n",
2362 session->targetname, conn->persistent_address);
2363 } else
2364 goto failed;
2365 spin_unlock_bh(&session->lock);
2366 mutex_unlock(&session->eh_mutex);
2367 return SUCCESS;
2368}
2369EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
2370
2371static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2372{
2373 memset(hdr, 0, sizeof(*hdr));
2374 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2375 hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2376 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2377 hdr->rtt = RESERVED_ITT;
2378}
2379
2380/**
2381 * iscsi_eh_target_reset - reset target
2382 * @sc: scsi command
2383 *
2384 * This will attempt to send a warm target reset.
2385 */
2386int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2387{
2388 struct iscsi_cls_session *cls_session;
2389 struct iscsi_session *session;
2390 struct iscsi_conn *conn;
2391 struct iscsi_tm *hdr;
2392 int rc = FAILED;
2393
2394 cls_session = starget_to_session(scsi_target(sc->device));
2395 session = cls_session->dd_data;
2396
2397 ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
2398 session->targetname);
2399
2400 mutex_lock(&session->eh_mutex);
2401 spin_lock_bh(&session->lock);
2402 /*
2403 * Just check if we are not logged in. We cannot check for
2404 * the phase because the reset could come from a ioctl.
2405 */
2406 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2407 goto unlock;
2408 conn = session->leadconn;
2409
2410 /* only have one tmf outstanding at a time */
2411 if (conn->tmf_state != TMF_INITIAL)
2412 goto unlock;
2413 conn->tmf_state = TMF_QUEUED;
2414
2415 hdr = &conn->tmhdr;
2416 iscsi_prep_tgt_reset_pdu(sc, hdr);
2417
2418 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2419 session->tgt_reset_timeout)) {
2420 rc = FAILED;
2421 goto unlock;
2422 }
2423
2424 switch (conn->tmf_state) {
2425 case TMF_SUCCESS:
2426 break;
2427 case TMF_TIMEDOUT:
2428 spin_unlock_bh(&session->lock);
2429 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2430 goto done;
2431 default:
2432 conn->tmf_state = TMF_INITIAL;
2433 goto unlock;
2434 }
2435
2436 rc = SUCCESS;
2437 spin_unlock_bh(&session->lock);
2438
2439 iscsi_suspend_tx(conn);
2440
2441 spin_lock_bh(&session->lock);
2442 memset(hdr, 0, sizeof(*hdr));
2443 fail_scsi_tasks(conn, -1, DID_ERROR);
2444 conn->tmf_state = TMF_INITIAL;
2445 spin_unlock_bh(&session->lock);
2446
2447 iscsi_start_tx(conn);
2448 goto done;
2449
2450unlock:
2451 spin_unlock_bh(&session->lock);
2452done:
2453 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2454 rc == SUCCESS ? "SUCCESS" : "FAILED");
2455 mutex_unlock(&session->eh_mutex);
2456 return rc;
2457}
2458EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
2459
2460/**
2461 * iscsi_eh_recover_target - reset target and possibly the session
2462 * @sc: scsi command
2463 *
2464 * This will attempt to send a warm target reset. If that fails,
2465 * we will escalate to ERL0 session recovery.
2466 */
2467int iscsi_eh_recover_target(struct scsi_cmnd *sc)
2468{
2469 int rc;
2470
2471 rc = iscsi_eh_target_reset(sc);
2472 if (rc == FAILED)
2473 rc = iscsi_eh_session_reset(sc);
2474 return rc;
2475}
2476EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
2477
2478/*
2479 * Pre-allocate a pool of @max items of @item_size. By default, the pool
2480 * should be accessed via kfifo_{get,put} on q->queue.
2481 * Optionally, the caller can obtain the array of object pointers
2482 * by passing in a non-NULL @items pointer
2483 */
2484int
2485iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2486{
2487 int i, num_arrays = 1;
2488
2489 memset(q, 0, sizeof(*q));
2490
2491 q->max = max;
2492
2493 /* If the user passed an items pointer, he wants a copy of
2494 * the array. */
2495 if (items)
2496 num_arrays++;
2497 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
2498 if (q->pool == NULL)
2499 return -ENOMEM;
2500
2501 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
2502
2503 for (i = 0; i < max; i++) {
2504 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
2505 if (q->pool[i] == NULL) {
2506 q->max = i;
2507 goto enomem;
2508 }
2509 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
2510 }
2511
2512 if (items) {
2513 *items = q->pool + max;
2514 memcpy(*items, q->pool, max * sizeof(void *));
2515 }
2516
2517 return 0;
2518
2519enomem:
2520 iscsi_pool_free(q);
2521 return -ENOMEM;
2522}
2523EXPORT_SYMBOL_GPL(iscsi_pool_init);
2524
2525void iscsi_pool_free(struct iscsi_pool *q)
2526{
2527 int i;
2528
2529 for (i = 0; i < q->max; i++)
2530 kfree(q->pool[i]);
2531 kfree(q->pool);
2532}
2533EXPORT_SYMBOL_GPL(iscsi_pool_free);
2534
2535/**
2536 * iscsi_host_add - add host to system
2537 * @shost: scsi host
2538 * @pdev: parent device
2539 *
2540 * This should be called by partial offload and software iscsi drivers
2541 * to add a host to the system.
2542 */
2543int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
2544{
2545 if (!shost->can_queue)
2546 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
2547
2548 if (!shost->cmd_per_lun)
2549 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
2550
2551 if (!shost->transportt->eh_timed_out)
2552 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
2553 return scsi_add_host(shost, pdev);
2554}
2555EXPORT_SYMBOL_GPL(iscsi_host_add);
2556
2557/**
2558 * iscsi_host_alloc - allocate a host and driver data
2559 * @sht: scsi host template
2560 * @dd_data_size: driver host data size
2561 * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
2562 *
2563 * This should be called by partial offload and software iscsi drivers.
2564 * To access the driver specific memory use the iscsi_host_priv() macro.
2565 */
2566struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
2567 int dd_data_size, bool xmit_can_sleep)
2568{
2569 struct Scsi_Host *shost;
2570 struct iscsi_host *ihost;
2571
2572 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
2573 if (!shost)
2574 return NULL;
2575 ihost = shost_priv(shost);
2576
2577 if (xmit_can_sleep) {
2578 snprintf(ihost->workq_name, sizeof(ihost->workq_name),
2579 "iscsi_q_%d", shost->host_no);
2580 ihost->workq = create_singlethread_workqueue(ihost->workq_name);
2581 if (!ihost->workq)
2582 goto free_host;
2583 }
2584
2585 spin_lock_init(&ihost->lock);
2586 ihost->state = ISCSI_HOST_SETUP;
2587 ihost->num_sessions = 0;
2588 init_waitqueue_head(&ihost->session_removal_wq);
2589 return shost;
2590
2591free_host:
2592 scsi_host_put(shost);
2593 return NULL;
2594}
2595EXPORT_SYMBOL_GPL(iscsi_host_alloc);
2596
2597static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
2598{
2599 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
2600}
2601
2602/**
2603 * iscsi_host_remove - remove host and sessions
2604 * @shost: scsi host
2605 *
2606 * If there are any sessions left, this will initiate the removal and wait
2607 * for the completion.
2608 */
2609void iscsi_host_remove(struct Scsi_Host *shost)
2610{
2611 struct iscsi_host *ihost = shost_priv(shost);
2612 unsigned long flags;
2613
2614 spin_lock_irqsave(&ihost->lock, flags);
2615 ihost->state = ISCSI_HOST_REMOVED;
2616 spin_unlock_irqrestore(&ihost->lock, flags);
2617
2618 iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
2619 wait_event_interruptible(ihost->session_removal_wq,
2620 ihost->num_sessions == 0);
2621 if (signal_pending(current))
2622 flush_signals(current);
2623
2624 scsi_remove_host(shost);
2625 if (ihost->workq)
2626 destroy_workqueue(ihost->workq);
2627}
2628EXPORT_SYMBOL_GPL(iscsi_host_remove);
2629
2630void iscsi_host_free(struct Scsi_Host *shost)
2631{
2632 struct iscsi_host *ihost = shost_priv(shost);
2633
2634 kfree(ihost->netdev);
2635 kfree(ihost->hwaddress);
2636 kfree(ihost->initiatorname);
2637 scsi_host_put(shost);
2638}
2639EXPORT_SYMBOL_GPL(iscsi_host_free);
2640
2641static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2642{
2643 struct iscsi_host *ihost = shost_priv(shost);
2644 unsigned long flags;
2645
2646 shost = scsi_host_get(shost);
2647 if (!shost) {
2648 printk(KERN_ERR "Invalid state. Cannot notify host removal "
2649 "of session teardown event because host already "
2650 "removed.\n");
2651 return;
2652 }
2653
2654 spin_lock_irqsave(&ihost->lock, flags);
2655 ihost->num_sessions--;
2656 if (ihost->num_sessions == 0)
2657 wake_up(&ihost->session_removal_wq);
2658 spin_unlock_irqrestore(&ihost->lock, flags);
2659 scsi_host_put(shost);
2660}
2661
2662/**
2663 * iscsi_session_setup - create iscsi cls session and host and session
2664 * @iscsit: iscsi transport template
2665 * @shost: scsi host
2666 * @cmds_max: session can queue
2667 * @cmd_task_size: LLD task private data size
2668 * @initial_cmdsn: initial CmdSN
2669 *
2670 * This can be used by software iscsi_transports that allocate
2671 * a session per scsi host.
2672 *
2673 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
2674 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
2675 * for nop handling and login/logout requests.
2676 */
2677struct iscsi_cls_session *
2678iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2679 uint16_t cmds_max, int dd_size, int cmd_task_size,
2680 uint32_t initial_cmdsn, unsigned int id)
2681{
2682 struct iscsi_host *ihost = shost_priv(shost);
2683 struct iscsi_session *session;
2684 struct iscsi_cls_session *cls_session;
2685 int cmd_i, scsi_cmds, total_cmds = cmds_max;
2686 unsigned long flags;
2687
2688 spin_lock_irqsave(&ihost->lock, flags);
2689 if (ihost->state == ISCSI_HOST_REMOVED) {
2690 spin_unlock_irqrestore(&ihost->lock, flags);
2691 return NULL;
2692 }
2693 ihost->num_sessions++;
2694 spin_unlock_irqrestore(&ihost->lock, flags);
2695
2696 if (!total_cmds)
2697 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
2698 /*
2699 * The iscsi layer needs some tasks for nop handling and tmfs,
2700 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
2701 * + 1 command for scsi IO.
2702 */
2703 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2704 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
2705 "must be a power of two that is at least %d.\n",
2706 total_cmds, ISCSI_TOTAL_CMDS_MIN);
2707 goto dec_session_count;
2708 }
2709
2710 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
2711 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
2712 "must be a power of 2 less than or equal to %d.\n",
2713 cmds_max, ISCSI_TOTAL_CMDS_MAX);
2714 total_cmds = ISCSI_TOTAL_CMDS_MAX;
2715 }
2716
2717 if (!is_power_of_2(total_cmds)) {
2718 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
2719 "must be a power of 2.\n", total_cmds);
2720 total_cmds = rounddown_pow_of_two(total_cmds);
2721 if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
2722 return NULL;
2723 printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
2724 total_cmds);
2725 }
2726 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
2727
2728 cls_session = iscsi_alloc_session(shost, iscsit,
2729 sizeof(struct iscsi_session) +
2730 dd_size);
2731 if (!cls_session)
2732 goto dec_session_count;
2733 session = cls_session->dd_data;
2734 session->cls_session = cls_session;
2735 session->host = shost;
2736 session->state = ISCSI_STATE_FREE;
2737 session->fast_abort = 1;
2738 session->tgt_reset_timeout = 30;
2739 session->lu_reset_timeout = 15;
2740 session->abort_timeout = 10;
2741 session->scsi_cmds_max = scsi_cmds;
2742 session->cmds_max = total_cmds;
2743 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
2744 session->exp_cmdsn = initial_cmdsn + 1;
2745 session->max_cmdsn = initial_cmdsn + 1;
2746 session->max_r2t = 1;
2747 session->tt = iscsit;
2748 session->dd_data = cls_session->dd_data + sizeof(*session);
2749 mutex_init(&session->eh_mutex);
2750 spin_lock_init(&session->lock);
2751
2752 /* initialize SCSI PDU commands pool */
2753 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
2754 (void***)&session->cmds,
2755 cmd_task_size + sizeof(struct iscsi_task)))
2756 goto cmdpool_alloc_fail;
2757
2758 /* pre-format cmds pool with ITT */
2759 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
2760 struct iscsi_task *task = session->cmds[cmd_i];
2761
2762 if (cmd_task_size)
2763 task->dd_data = &task[1];
2764 task->itt = cmd_i;
2765 task->state = ISCSI_TASK_FREE;
2766 INIT_LIST_HEAD(&task->running);
2767 }
2768
2769 if (!try_module_get(iscsit->owner))
2770 goto module_get_fail;
2771
2772 if (iscsi_add_session(cls_session, id))
2773 goto cls_session_fail;
2774
2775 return cls_session;
2776
2777cls_session_fail:
2778 module_put(iscsit->owner);
2779module_get_fail:
2780 iscsi_pool_free(&session->cmdpool);
2781cmdpool_alloc_fail:
2782 iscsi_free_session(cls_session);
2783dec_session_count:
2784 iscsi_host_dec_session_cnt(shost);
2785 return NULL;
2786}
2787EXPORT_SYMBOL_GPL(iscsi_session_setup);
2788
2789/**
2790 * iscsi_session_teardown - destroy session, host, and cls_session
2791 * @cls_session: iscsi session
2792 *
2793 * The driver must have called iscsi_remove_session before
2794 * calling this.
2795 */
2796void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2797{
2798 struct iscsi_session *session = cls_session->dd_data;
2799 struct module *owner = cls_session->transport->owner;
2800 struct Scsi_Host *shost = session->host;
2801
2802 iscsi_pool_free(&session->cmdpool);
2803
2804 kfree(session->password);
2805 kfree(session->password_in);
2806 kfree(session->username);
2807 kfree(session->username_in);
2808 kfree(session->targetname);
2809 kfree(session->initiatorname);
2810 kfree(session->ifacename);
2811
2812 iscsi_destroy_session(cls_session);
2813 iscsi_host_dec_session_cnt(shost);
2814 module_put(owner);
2815}
2816EXPORT_SYMBOL_GPL(iscsi_session_teardown);
2817
2818/**
2819 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
2820 * @cls_session: iscsi_cls_session
2821 * @dd_size: private driver data size
2822 * @conn_idx: cid
2823 */
2824struct iscsi_cls_conn *
2825iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2826 uint32_t conn_idx)
2827{
2828 struct iscsi_session *session = cls_session->dd_data;
2829 struct iscsi_conn *conn;
2830 struct iscsi_cls_conn *cls_conn;
2831 char *data;
2832
2833 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
2834 conn_idx);
2835 if (!cls_conn)
2836 return NULL;
2837 conn = cls_conn->dd_data;
2838 memset(conn, 0, sizeof(*conn) + dd_size);
2839
2840 conn->dd_data = cls_conn->dd_data + sizeof(*conn);
2841 conn->session = session;
2842 conn->cls_conn = cls_conn;
2843 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2844 conn->id = conn_idx;
2845 conn->exp_statsn = 0;
2846 conn->tmf_state = TMF_INITIAL;
2847
2848 init_timer(&conn->transport_timer);
2849 conn->transport_timer.data = (unsigned long)conn;
2850 conn->transport_timer.function = iscsi_check_transport_timeouts;
2851
2852 INIT_LIST_HEAD(&conn->mgmtqueue);
2853 INIT_LIST_HEAD(&conn->cmdqueue);
2854 INIT_LIST_HEAD(&conn->requeue);
2855 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2856
2857 /* allocate login_task used for the login/text sequences */
2858 spin_lock_bh(&session->lock);
2859 if (!kfifo_out(&session->cmdpool.queue,
2860 (void*)&conn->login_task,
2861 sizeof(void*))) {
2862 spin_unlock_bh(&session->lock);
2863 goto login_task_alloc_fail;
2864 }
2865 spin_unlock_bh(&session->lock);
2866
2867 data = (char *) __get_free_pages(GFP_KERNEL,
2868 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2869 if (!data)
2870 goto login_task_data_alloc_fail;
2871 conn->login_task->data = conn->data = data;
2872
2873 init_timer(&conn->tmf_timer);
2874 init_waitqueue_head(&conn->ehwait);
2875
2876 return cls_conn;
2877
2878login_task_data_alloc_fail:
2879 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2880 sizeof(void*));
2881login_task_alloc_fail:
2882 iscsi_destroy_conn(cls_conn);
2883 return NULL;
2884}
2885EXPORT_SYMBOL_GPL(iscsi_conn_setup);
2886
2887/**
2888 * iscsi_conn_teardown - teardown iscsi connection
2889 * cls_conn: iscsi class connection
2890 *
2891 * TODO: we may need to make this into a two step process
2892 * like scsi-mls remove + put host
2893 */
2894void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2895{
2896 struct iscsi_conn *conn = cls_conn->dd_data;
2897 struct iscsi_session *session = conn->session;
2898 unsigned long flags;
2899
2900 del_timer_sync(&conn->transport_timer);
2901
2902 spin_lock_bh(&session->lock);
2903 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2904 if (session->leadconn == conn) {
2905 /*
2906 * leading connection? then give up on recovery.
2907 */
2908 session->state = ISCSI_STATE_TERMINATE;
2909 wake_up(&conn->ehwait);
2910 }
2911 spin_unlock_bh(&session->lock);
2912
2913 /*
2914 * Block until all in-progress commands for this connection
2915 * time out or fail.
2916 */
2917 for (;;) {
2918 spin_lock_irqsave(session->host->host_lock, flags);
2919 if (!session->host->host_busy) { /* OK for ERL == 0 */
2920 spin_unlock_irqrestore(session->host->host_lock, flags);
2921 break;
2922 }
2923 spin_unlock_irqrestore(session->host->host_lock, flags);
2924 msleep_interruptible(500);
2925 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2926 "host_busy %d host_failed %d\n",
2927 session->host->host_busy,
2928 session->host->host_failed);
2929 /*
2930 * force eh_abort() to unblock
2931 */
2932 wake_up(&conn->ehwait);
2933 }
2934
2935 /* flush queued up work because we free the connection below */
2936 iscsi_suspend_tx(conn);
2937
2938 spin_lock_bh(&session->lock);
2939 free_pages((unsigned long) conn->data,
2940 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2941 kfree(conn->persistent_address);
2942 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2943 sizeof(void*));
2944 if (session->leadconn == conn)
2945 session->leadconn = NULL;
2946 spin_unlock_bh(&session->lock);
2947
2948 iscsi_destroy_conn(cls_conn);
2949}
2950EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
2951
2952int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2953{
2954 struct iscsi_conn *conn = cls_conn->dd_data;
2955 struct iscsi_session *session = conn->session;
2956
2957 if (!session) {
2958 iscsi_conn_printk(KERN_ERR, conn,
2959 "can't start unbound connection\n");
2960 return -EPERM;
2961 }
2962
2963 if ((session->imm_data_en || !session->initial_r2t_en) &&
2964 session->first_burst > session->max_burst) {
2965 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
2966 "first_burst %d max_burst %d\n",
2967 session->first_burst, session->max_burst);
2968 return -EINVAL;
2969 }
2970
2971 if (conn->ping_timeout && !conn->recv_timeout) {
2972 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
2973 "zero. Using 5 seconds\n.");
2974 conn->recv_timeout = 5;
2975 }
2976
2977 if (conn->recv_timeout && !conn->ping_timeout) {
2978 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
2979 "zero. Using 5 seconds.\n");
2980 conn->ping_timeout = 5;
2981 }
2982
2983 spin_lock_bh(&session->lock);
2984 conn->c_stage = ISCSI_CONN_STARTED;
2985 session->state = ISCSI_STATE_LOGGED_IN;
2986 session->queued_cmdsn = session->cmdsn;
2987
2988 conn->last_recv = jiffies;
2989 conn->last_ping = jiffies;
2990 if (conn->recv_timeout && conn->ping_timeout)
2991 mod_timer(&conn->transport_timer,
2992 jiffies + (conn->recv_timeout * HZ));
2993
2994 switch(conn->stop_stage) {
2995 case STOP_CONN_RECOVER:
2996 /*
2997 * unblock eh_abort() if it is blocked. re-try all
2998 * commands after successful recovery
2999 */
3000 conn->stop_stage = 0;
3001 conn->tmf_state = TMF_INITIAL;
3002 session->age++;
3003 if (session->age == 16)
3004 session->age = 0;
3005 break;
3006 case STOP_CONN_TERM:
3007 conn->stop_stage = 0;
3008 break;
3009 default:
3010 break;
3011 }
3012 spin_unlock_bh(&session->lock);
3013
3014 iscsi_unblock_session(session->cls_session);
3015 wake_up(&conn->ehwait);
3016 return 0;
3017}
3018EXPORT_SYMBOL_GPL(iscsi_conn_start);
3019
3020static void
3021fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
3022{
3023 struct iscsi_task *task;
3024 int i, state;
3025
3026 for (i = 0; i < conn->session->cmds_max; i++) {
3027 task = conn->session->cmds[i];
3028 if (task->sc)
3029 continue;
3030
3031 if (task->state == ISCSI_TASK_FREE)
3032 continue;
3033
3034 ISCSI_DBG_SESSION(conn->session,
3035 "failing mgmt itt 0x%x state %d\n",
3036 task->itt, task->state);
3037 state = ISCSI_TASK_ABRT_SESS_RECOV;
3038 if (task->state == ISCSI_TASK_PENDING)
3039 state = ISCSI_TASK_COMPLETED;
3040 iscsi_complete_task(task, state);
3041
3042 }
3043}
3044
3045static void iscsi_start_session_recovery(struct iscsi_session *session,
3046 struct iscsi_conn *conn, int flag)
3047{
3048 int old_stop_stage;
3049
3050 mutex_lock(&session->eh_mutex);
3051 spin_lock_bh(&session->lock);
3052 if (conn->stop_stage == STOP_CONN_TERM) {
3053 spin_unlock_bh(&session->lock);
3054 mutex_unlock(&session->eh_mutex);
3055 return;
3056 }
3057
3058 /*
3059 * When this is called for the in_login state, we only want to clean
3060 * up the login task and connection. We do not need to block and set
3061 * the recovery state again
3062 */
3063 if (flag == STOP_CONN_TERM)
3064 session->state = ISCSI_STATE_TERMINATE;
3065 else if (conn->stop_stage != STOP_CONN_RECOVER)
3066 session->state = ISCSI_STATE_IN_RECOVERY;
3067
3068 old_stop_stage = conn->stop_stage;
3069 conn->stop_stage = flag;
3070 spin_unlock_bh(&session->lock);
3071
3072 del_timer_sync(&conn->transport_timer);
3073 iscsi_suspend_tx(conn);
3074
3075 spin_lock_bh(&session->lock);
3076 conn->c_stage = ISCSI_CONN_STOPPED;
3077 spin_unlock_bh(&session->lock);
3078
3079 /*
3080 * for connection level recovery we should not calculate
3081 * header digest. conn->hdr_size used for optimization
3082 * in hdr_extract() and will be re-negotiated at
3083 * set_param() time.
3084 */
3085 if (flag == STOP_CONN_RECOVER) {
3086 conn->hdrdgst_en = 0;
3087 conn->datadgst_en = 0;
3088 if (session->state == ISCSI_STATE_IN_RECOVERY &&
3089 old_stop_stage != STOP_CONN_RECOVER) {
3090 ISCSI_DBG_SESSION(session, "blocking session\n");
3091 iscsi_block_session(session->cls_session);
3092 }
3093 }
3094
3095 /*
3096 * flush queues.
3097 */
3098 spin_lock_bh(&session->lock);
3099 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
3100 fail_mgmt_tasks(session, conn);
3101 memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
3102 spin_unlock_bh(&session->lock);
3103 mutex_unlock(&session->eh_mutex);
3104}
3105
3106void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
3107{
3108 struct iscsi_conn *conn = cls_conn->dd_data;
3109 struct iscsi_session *session = conn->session;
3110
3111 switch (flag) {
3112 case STOP_CONN_RECOVER:
3113 case STOP_CONN_TERM:
3114 iscsi_start_session_recovery(session, conn, flag);
3115 break;
3116 default:
3117 iscsi_conn_printk(KERN_ERR, conn,
3118 "invalid stop flag %d\n", flag);
3119 }
3120}
3121EXPORT_SYMBOL_GPL(iscsi_conn_stop);
3122
3123int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
3124 struct iscsi_cls_conn *cls_conn, int is_leading)
3125{
3126 struct iscsi_session *session = cls_session->dd_data;
3127 struct iscsi_conn *conn = cls_conn->dd_data;
3128
3129 spin_lock_bh(&session->lock);
3130 if (is_leading)
3131 session->leadconn = conn;
3132 spin_unlock_bh(&session->lock);
3133
3134 /*
3135 * Unblock xmitworker(), Login Phase will pass through.
3136 */
3137 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
3138 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
3139 return 0;
3140}
3141EXPORT_SYMBOL_GPL(iscsi_conn_bind);
3142
3143static int iscsi_switch_str_param(char **param, char *new_val_buf)
3144{
3145 char *new_val;
3146
3147 if (*param) {
3148 if (!strcmp(*param, new_val_buf))
3149 return 0;
3150 }
3151
3152 new_val = kstrdup(new_val_buf, GFP_NOIO);
3153 if (!new_val)
3154 return -ENOMEM;
3155
3156 kfree(*param);
3157 *param = new_val;
3158 return 0;
3159}
3160
3161int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3162 enum iscsi_param param, char *buf, int buflen)
3163{
3164 struct iscsi_conn *conn = cls_conn->dd_data;
3165 struct iscsi_session *session = conn->session;
3166 uint32_t value;
3167
3168 switch(param) {
3169 case ISCSI_PARAM_FAST_ABORT:
3170 sscanf(buf, "%d", &session->fast_abort);
3171 break;
3172 case ISCSI_PARAM_ABORT_TMO:
3173 sscanf(buf, "%d", &session->abort_timeout);
3174 break;
3175 case ISCSI_PARAM_LU_RESET_TMO:
3176 sscanf(buf, "%d", &session->lu_reset_timeout);
3177 break;
3178 case ISCSI_PARAM_TGT_RESET_TMO:
3179 sscanf(buf, "%d", &session->tgt_reset_timeout);
3180 break;
3181 case ISCSI_PARAM_PING_TMO:
3182 sscanf(buf, "%d", &conn->ping_timeout);
3183 break;
3184 case ISCSI_PARAM_RECV_TMO:
3185 sscanf(buf, "%d", &conn->recv_timeout);
3186 break;
3187 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3188 sscanf(buf, "%d", &conn->max_recv_dlength);
3189 break;
3190 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3191 sscanf(buf, "%d", &conn->max_xmit_dlength);
3192 break;
3193 case ISCSI_PARAM_HDRDGST_EN:
3194 sscanf(buf, "%d", &conn->hdrdgst_en);
3195 break;
3196 case ISCSI_PARAM_DATADGST_EN:
3197 sscanf(buf, "%d", &conn->datadgst_en);
3198 break;
3199 case ISCSI_PARAM_INITIAL_R2T_EN:
3200 sscanf(buf, "%d", &session->initial_r2t_en);
3201 break;
3202 case ISCSI_PARAM_MAX_R2T:
3203 sscanf(buf, "%d", &session->max_r2t);
3204 break;
3205 case ISCSI_PARAM_IMM_DATA_EN:
3206 sscanf(buf, "%d", &session->imm_data_en);
3207 break;
3208 case ISCSI_PARAM_FIRST_BURST:
3209 sscanf(buf, "%d", &session->first_burst);
3210 break;
3211 case ISCSI_PARAM_MAX_BURST:
3212 sscanf(buf, "%d", &session->max_burst);
3213 break;
3214 case ISCSI_PARAM_PDU_INORDER_EN:
3215 sscanf(buf, "%d", &session->pdu_inorder_en);
3216 break;
3217 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3218 sscanf(buf, "%d", &session->dataseq_inorder_en);
3219 break;
3220 case ISCSI_PARAM_ERL:
3221 sscanf(buf, "%d", &session->erl);
3222 break;
3223 case ISCSI_PARAM_IFMARKER_EN:
3224 sscanf(buf, "%d", &value);
3225 BUG_ON(value);
3226 break;
3227 case ISCSI_PARAM_OFMARKER_EN:
3228 sscanf(buf, "%d", &value);
3229 BUG_ON(value);
3230 break;
3231 case ISCSI_PARAM_EXP_STATSN:
3232 sscanf(buf, "%u", &conn->exp_statsn);
3233 break;
3234 case ISCSI_PARAM_USERNAME:
3235 return iscsi_switch_str_param(&session->username, buf);
3236 case ISCSI_PARAM_USERNAME_IN:
3237 return iscsi_switch_str_param(&session->username_in, buf);
3238 case ISCSI_PARAM_PASSWORD:
3239 return iscsi_switch_str_param(&session->password, buf);
3240 case ISCSI_PARAM_PASSWORD_IN:
3241 return iscsi_switch_str_param(&session->password_in, buf);
3242 case ISCSI_PARAM_TARGET_NAME:
3243 return iscsi_switch_str_param(&session->targetname, buf);
3244 case ISCSI_PARAM_TPGT:
3245 sscanf(buf, "%d", &session->tpgt);
3246 break;
3247 case ISCSI_PARAM_PERSISTENT_PORT:
3248 sscanf(buf, "%d", &conn->persistent_port);
3249 break;
3250 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3251 return iscsi_switch_str_param(&conn->persistent_address, buf);
3252 case ISCSI_PARAM_IFACE_NAME:
3253 return iscsi_switch_str_param(&session->ifacename, buf);
3254 case ISCSI_PARAM_INITIATOR_NAME:
3255 return iscsi_switch_str_param(&session->initiatorname, buf);
3256 default:
3257 return -ENOSYS;
3258 }
3259
3260 return 0;
3261}
3262EXPORT_SYMBOL_GPL(iscsi_set_param);
3263
3264int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3265 enum iscsi_param param, char *buf)
3266{
3267 struct iscsi_session *session = cls_session->dd_data;
3268 int len;
3269
3270 switch(param) {
3271 case ISCSI_PARAM_FAST_ABORT:
3272 len = sprintf(buf, "%d\n", session->fast_abort);
3273 break;
3274 case ISCSI_PARAM_ABORT_TMO:
3275 len = sprintf(buf, "%d\n", session->abort_timeout);
3276 break;
3277 case ISCSI_PARAM_LU_RESET_TMO:
3278 len = sprintf(buf, "%d\n", session->lu_reset_timeout);
3279 break;
3280 case ISCSI_PARAM_TGT_RESET_TMO:
3281 len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
3282 break;
3283 case ISCSI_PARAM_INITIAL_R2T_EN:
3284 len = sprintf(buf, "%d\n", session->initial_r2t_en);
3285 break;
3286 case ISCSI_PARAM_MAX_R2T:
3287 len = sprintf(buf, "%hu\n", session->max_r2t);
3288 break;
3289 case ISCSI_PARAM_IMM_DATA_EN:
3290 len = sprintf(buf, "%d\n", session->imm_data_en);
3291 break;
3292 case ISCSI_PARAM_FIRST_BURST:
3293 len = sprintf(buf, "%u\n", session->first_burst);
3294 break;
3295 case ISCSI_PARAM_MAX_BURST:
3296 len = sprintf(buf, "%u\n", session->max_burst);
3297 break;
3298 case ISCSI_PARAM_PDU_INORDER_EN:
3299 len = sprintf(buf, "%d\n", session->pdu_inorder_en);
3300 break;
3301 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3302 len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
3303 break;
3304 case ISCSI_PARAM_ERL:
3305 len = sprintf(buf, "%d\n", session->erl);
3306 break;
3307 case ISCSI_PARAM_TARGET_NAME:
3308 len = sprintf(buf, "%s\n", session->targetname);
3309 break;
3310 case ISCSI_PARAM_TPGT:
3311 len = sprintf(buf, "%d\n", session->tpgt);
3312 break;
3313 case ISCSI_PARAM_USERNAME:
3314 len = sprintf(buf, "%s\n", session->username);
3315 break;
3316 case ISCSI_PARAM_USERNAME_IN:
3317 len = sprintf(buf, "%s\n", session->username_in);
3318 break;
3319 case ISCSI_PARAM_PASSWORD:
3320 len = sprintf(buf, "%s\n", session->password);
3321 break;
3322 case ISCSI_PARAM_PASSWORD_IN:
3323 len = sprintf(buf, "%s\n", session->password_in);
3324 break;
3325 case ISCSI_PARAM_IFACE_NAME:
3326 len = sprintf(buf, "%s\n", session->ifacename);
3327 break;
3328 case ISCSI_PARAM_INITIATOR_NAME:
3329 len = sprintf(buf, "%s\n", session->initiatorname);
3330 break;
3331 default:
3332 return -ENOSYS;
3333 }
3334
3335 return len;
3336}
3337EXPORT_SYMBOL_GPL(iscsi_session_get_param);
3338
3339int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
3340 enum iscsi_param param, char *buf)
3341{
3342 struct sockaddr_in6 *sin6 = NULL;
3343 struct sockaddr_in *sin = NULL;
3344 int len;
3345
3346 switch (addr->ss_family) {
3347 case AF_INET:
3348 sin = (struct sockaddr_in *)addr;
3349 break;
3350 case AF_INET6:
3351 sin6 = (struct sockaddr_in6 *)addr;
3352 break;
3353 default:
3354 return -EINVAL;
3355 }
3356
3357 switch (param) {
3358 case ISCSI_PARAM_CONN_ADDRESS:
3359 case ISCSI_HOST_PARAM_IPADDRESS:
3360 if (sin)
3361 len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
3362 else
3363 len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
3364 break;
3365 case ISCSI_PARAM_CONN_PORT:
3366 if (sin)
3367 len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
3368 else
3369 len = sprintf(buf, "%hu\n",
3370 be16_to_cpu(sin6->sin6_port));
3371 break;
3372 default:
3373 return -EINVAL;
3374 }
3375
3376 return len;
3377}
3378EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
3379
3380int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3381 enum iscsi_param param, char *buf)
3382{
3383 struct iscsi_conn *conn = cls_conn->dd_data;
3384 int len;
3385
3386 switch(param) {
3387 case ISCSI_PARAM_PING_TMO:
3388 len = sprintf(buf, "%u\n", conn->ping_timeout);
3389 break;
3390 case ISCSI_PARAM_RECV_TMO:
3391 len = sprintf(buf, "%u\n", conn->recv_timeout);
3392 break;
3393 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3394 len = sprintf(buf, "%u\n", conn->max_recv_dlength);
3395 break;
3396 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3397 len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
3398 break;
3399 case ISCSI_PARAM_HDRDGST_EN:
3400 len = sprintf(buf, "%d\n", conn->hdrdgst_en);
3401 break;
3402 case ISCSI_PARAM_DATADGST_EN:
3403 len = sprintf(buf, "%d\n", conn->datadgst_en);
3404 break;
3405 case ISCSI_PARAM_IFMARKER_EN:
3406 len = sprintf(buf, "%d\n", conn->ifmarker_en);
3407 break;
3408 case ISCSI_PARAM_OFMARKER_EN:
3409 len = sprintf(buf, "%d\n", conn->ofmarker_en);
3410 break;
3411 case ISCSI_PARAM_EXP_STATSN:
3412 len = sprintf(buf, "%u\n", conn->exp_statsn);
3413 break;
3414 case ISCSI_PARAM_PERSISTENT_PORT:
3415 len = sprintf(buf, "%d\n", conn->persistent_port);
3416 break;
3417 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3418 len = sprintf(buf, "%s\n", conn->persistent_address);
3419 break;
3420 default:
3421 return -ENOSYS;
3422 }
3423
3424 return len;
3425}
3426EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
3427
3428int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3429 char *buf)
3430{
3431 struct iscsi_host *ihost = shost_priv(shost);
3432 int len;
3433
3434 switch (param) {
3435 case ISCSI_HOST_PARAM_NETDEV_NAME:
3436 len = sprintf(buf, "%s\n", ihost->netdev);
3437 break;
3438 case ISCSI_HOST_PARAM_HWADDRESS:
3439 len = sprintf(buf, "%s\n", ihost->hwaddress);
3440 break;
3441 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3442 len = sprintf(buf, "%s\n", ihost->initiatorname);
3443 break;
3444 default:
3445 return -ENOSYS;
3446 }
3447
3448 return len;
3449}
3450EXPORT_SYMBOL_GPL(iscsi_host_get_param);
3451
3452int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3453 char *buf, int buflen)
3454{
3455 struct iscsi_host *ihost = shost_priv(shost);
3456
3457 switch (param) {
3458 case ISCSI_HOST_PARAM_NETDEV_NAME:
3459 return iscsi_switch_str_param(&ihost->netdev, buf);
3460 case ISCSI_HOST_PARAM_HWADDRESS:
3461 return iscsi_switch_str_param(&ihost->hwaddress, buf);
3462 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3463 return iscsi_switch_str_param(&ihost->initiatorname, buf);
3464 default:
3465 return -ENOSYS;
3466 }
3467
3468 return 0;
3469}
3470EXPORT_SYMBOL_GPL(iscsi_host_set_param);
3471
3472MODULE_AUTHOR("Mike Christie");
3473MODULE_DESCRIPTION("iSCSI library functions");
3474MODULE_LICENSE("GPL");