Loading...
1/*******************************************************************************
2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
5 *
6 * © Copyright 2011-2013 Datera, Inc.
7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/configfs.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h>
34
35#include <target/target_core_base.h>
36#include <target/target_core_fabric.h>
37
38#include "tcm_loop.h"
39
40#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
41
42static struct workqueue_struct *tcm_loop_workqueue;
43static struct kmem_cache *tcm_loop_cmd_cache;
44
45static int tcm_loop_hba_no_cnt;
46
47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
48
49/*
50 * Called from struct target_core_fabric_ops->check_stop_free()
51 */
52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
53{
54 return transport_generic_free_cmd(se_cmd, 0);
55}
56
57static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
58{
59 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
60 struct tcm_loop_cmd, tl_se_cmd);
61
62 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
63}
64
65static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
66{
67 seq_puts(m, "tcm_loop_proc_info()\n");
68 return 0;
69}
70
71static int tcm_loop_driver_probe(struct device *);
72static int tcm_loop_driver_remove(struct device *);
73
74static int pseudo_lld_bus_match(struct device *dev,
75 struct device_driver *dev_driver)
76{
77 return 1;
78}
79
80static struct bus_type tcm_loop_lld_bus = {
81 .name = "tcm_loop_bus",
82 .match = pseudo_lld_bus_match,
83 .probe = tcm_loop_driver_probe,
84 .remove = tcm_loop_driver_remove,
85};
86
87static struct device_driver tcm_loop_driverfs = {
88 .name = "tcm_loop",
89 .bus = &tcm_loop_lld_bus,
90};
91/*
92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
93 */
94static struct device *tcm_loop_primary;
95
96static void tcm_loop_submission_work(struct work_struct *work)
97{
98 struct tcm_loop_cmd *tl_cmd =
99 container_of(work, struct tcm_loop_cmd, work);
100 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
101 struct scsi_cmnd *sc = tl_cmd->sc;
102 struct tcm_loop_nexus *tl_nexus;
103 struct tcm_loop_hba *tl_hba;
104 struct tcm_loop_tpg *tl_tpg;
105 struct scatterlist *sgl_bidi = NULL;
106 u32 sgl_bidi_count = 0, transfer_length;
107 int rc;
108
109 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
110 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
111
112 /*
113 * Ensure that this tl_tpg reference from the incoming sc->device->id
114 * has already been configured via tcm_loop_make_naa_tpg().
115 */
116 if (!tl_tpg->tl_hba) {
117 set_host_byte(sc, DID_NO_CONNECT);
118 goto out_done;
119 }
120 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
121 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
122 goto out_done;
123 }
124 tl_nexus = tl_tpg->tl_nexus;
125 if (!tl_nexus) {
126 scmd_printk(KERN_ERR, sc,
127 "TCM_Loop I_T Nexus does not exist\n");
128 set_host_byte(sc, DID_ERROR);
129 goto out_done;
130 }
131 if (scsi_bidi_cmnd(sc)) {
132 struct scsi_data_buffer *sdb = scsi_in(sc);
133
134 sgl_bidi = sdb->table.sgl;
135 sgl_bidi_count = sdb->table.nents;
136 se_cmd->se_cmd_flags |= SCF_BIDI;
137
138 }
139
140 transfer_length = scsi_transfer_length(sc);
141 if (!scsi_prot_sg_count(sc) &&
142 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
143 se_cmd->prot_pto = true;
144 /*
145 * loopback transport doesn't support
146 * WRITE_GENERATE, READ_STRIP protection
147 * information operations, go ahead unprotected.
148 */
149 transfer_length = scsi_bufflen(sc);
150 }
151
152 se_cmd->tag = tl_cmd->sc_cmd_tag;
153 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
154 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
155 transfer_length, TCM_SIMPLE_TAG,
156 sc->sc_data_direction, 0,
157 scsi_sglist(sc), scsi_sg_count(sc),
158 sgl_bidi, sgl_bidi_count,
159 scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
160 if (rc < 0) {
161 set_host_byte(sc, DID_NO_CONNECT);
162 goto out_done;
163 }
164 return;
165
166out_done:
167 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
168 sc->scsi_done(sc);
169}
170
171/*
172 * ->queuecommand can be and usually is called from interrupt context, so
173 * defer the actual submission to a workqueue.
174 */
175static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
176{
177 struct tcm_loop_cmd *tl_cmd;
178
179 pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
180 __func__, sc->device->host->host_no, sc->device->id,
181 sc->device->channel, sc->device->lun, sc->cmnd[0],
182 scsi_bufflen(sc));
183
184 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
185 if (!tl_cmd) {
186 set_host_byte(sc, DID_ERROR);
187 sc->scsi_done(sc);
188 return 0;
189 }
190
191 tl_cmd->sc = sc;
192 tl_cmd->sc_cmd_tag = sc->request->tag;
193 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
194 queue_work(tcm_loop_workqueue, &tl_cmd->work);
195 return 0;
196}
197
198/*
199 * Called from SCSI EH process context to issue a LUN_RESET TMR
200 * to struct scsi_device
201 */
202static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
203 u64 lun, int task, enum tcm_tmreq_table tmr)
204{
205 struct se_cmd *se_cmd;
206 struct se_session *se_sess;
207 struct tcm_loop_nexus *tl_nexus;
208 struct tcm_loop_cmd *tl_cmd;
209 int ret = TMR_FUNCTION_FAILED, rc;
210
211 /*
212 * Locate the tl_nexus and se_sess pointers
213 */
214 tl_nexus = tl_tpg->tl_nexus;
215 if (!tl_nexus) {
216 pr_err("Unable to perform device reset without active I_T Nexus\n");
217 return ret;
218 }
219
220 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
221 if (!tl_cmd)
222 return ret;
223
224 init_completion(&tl_cmd->tmr_done);
225
226 se_cmd = &tl_cmd->tl_se_cmd;
227 se_sess = tl_tpg->tl_nexus->se_sess;
228
229 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
230 NULL, tmr, GFP_KERNEL, task,
231 TARGET_SCF_ACK_KREF);
232 if (rc < 0)
233 goto release;
234 wait_for_completion(&tl_cmd->tmr_done);
235 ret = se_cmd->se_tmr_req->response;
236 target_put_sess_cmd(se_cmd);
237
238out:
239 return ret;
240
241release:
242 if (se_cmd)
243 transport_generic_free_cmd(se_cmd, 0);
244 else
245 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
246 goto out;
247}
248
249static int tcm_loop_abort_task(struct scsi_cmnd *sc)
250{
251 struct tcm_loop_hba *tl_hba;
252 struct tcm_loop_tpg *tl_tpg;
253 int ret = FAILED;
254
255 /*
256 * Locate the tcm_loop_hba_t pointer
257 */
258 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
259 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
260 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
261 sc->request->tag, TMR_ABORT_TASK);
262 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
263}
264
265/*
266 * Called from SCSI EH process context to issue a LUN_RESET TMR
267 * to struct scsi_device
268 */
269static int tcm_loop_device_reset(struct scsi_cmnd *sc)
270{
271 struct tcm_loop_hba *tl_hba;
272 struct tcm_loop_tpg *tl_tpg;
273 int ret = FAILED;
274
275 /*
276 * Locate the tcm_loop_hba_t pointer
277 */
278 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
279 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
280
281 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
282 0, TMR_LUN_RESET);
283 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
284}
285
286static int tcm_loop_target_reset(struct scsi_cmnd *sc)
287{
288 struct tcm_loop_hba *tl_hba;
289 struct tcm_loop_tpg *tl_tpg;
290
291 /*
292 * Locate the tcm_loop_hba_t pointer
293 */
294 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
295 if (!tl_hba) {
296 pr_err("Unable to perform device reset without active I_T Nexus\n");
297 return FAILED;
298 }
299 /*
300 * Locate the tl_tpg pointer from TargetID in sc->device->id
301 */
302 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
303 if (tl_tpg) {
304 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
305 return SUCCESS;
306 }
307 return FAILED;
308}
309
310static int tcm_loop_slave_alloc(struct scsi_device *sd)
311{
312 blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue);
313 return 0;
314}
315
316static struct scsi_host_template tcm_loop_driver_template = {
317 .show_info = tcm_loop_show_info,
318 .proc_name = "tcm_loopback",
319 .name = "TCM_Loopback",
320 .queuecommand = tcm_loop_queuecommand,
321 .change_queue_depth = scsi_change_queue_depth,
322 .eh_abort_handler = tcm_loop_abort_task,
323 .eh_device_reset_handler = tcm_loop_device_reset,
324 .eh_target_reset_handler = tcm_loop_target_reset,
325 .can_queue = 1024,
326 .this_id = -1,
327 .sg_tablesize = 256,
328 .cmd_per_lun = 1024,
329 .max_sectors = 0xFFFF,
330 .use_clustering = DISABLE_CLUSTERING,
331 .slave_alloc = tcm_loop_slave_alloc,
332 .module = THIS_MODULE,
333 .track_queue_depth = 1,
334};
335
336static int tcm_loop_driver_probe(struct device *dev)
337{
338 struct tcm_loop_hba *tl_hba;
339 struct Scsi_Host *sh;
340 int error, host_prot;
341
342 tl_hba = to_tcm_loop_hba(dev);
343
344 sh = scsi_host_alloc(&tcm_loop_driver_template,
345 sizeof(struct tcm_loop_hba));
346 if (!sh) {
347 pr_err("Unable to allocate struct scsi_host\n");
348 return -ENODEV;
349 }
350 tl_hba->sh = sh;
351
352 /*
353 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
354 */
355 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
356 /*
357 * Setup single ID, Channel and LUN for now..
358 */
359 sh->max_id = 2;
360 sh->max_lun = 0;
361 sh->max_channel = 0;
362 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
363
364 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
365 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
366 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
367
368 scsi_host_set_prot(sh, host_prot);
369 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
370
371 error = scsi_add_host(sh, &tl_hba->dev);
372 if (error) {
373 pr_err("%s: scsi_add_host failed\n", __func__);
374 scsi_host_put(sh);
375 return -ENODEV;
376 }
377 return 0;
378}
379
380static int tcm_loop_driver_remove(struct device *dev)
381{
382 struct tcm_loop_hba *tl_hba;
383 struct Scsi_Host *sh;
384
385 tl_hba = to_tcm_loop_hba(dev);
386 sh = tl_hba->sh;
387
388 scsi_remove_host(sh);
389 scsi_host_put(sh);
390 return 0;
391}
392
393static void tcm_loop_release_adapter(struct device *dev)
394{
395 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
396
397 kfree(tl_hba);
398}
399
400/*
401 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
402 */
403static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
404{
405 int ret;
406
407 tl_hba->dev.bus = &tcm_loop_lld_bus;
408 tl_hba->dev.parent = tcm_loop_primary;
409 tl_hba->dev.release = &tcm_loop_release_adapter;
410 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
411
412 ret = device_register(&tl_hba->dev);
413 if (ret) {
414 pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
415 return -ENODEV;
416 }
417
418 return 0;
419}
420
421/*
422 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
423 * tcm_loop SCSI bus.
424 */
425static int tcm_loop_alloc_core_bus(void)
426{
427 int ret;
428
429 tcm_loop_primary = root_device_register("tcm_loop_0");
430 if (IS_ERR(tcm_loop_primary)) {
431 pr_err("Unable to allocate tcm_loop_primary\n");
432 return PTR_ERR(tcm_loop_primary);
433 }
434
435 ret = bus_register(&tcm_loop_lld_bus);
436 if (ret) {
437 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
438 goto dev_unreg;
439 }
440
441 ret = driver_register(&tcm_loop_driverfs);
442 if (ret) {
443 pr_err("driver_register() failed for tcm_loop_driverfs\n");
444 goto bus_unreg;
445 }
446
447 pr_debug("Initialized TCM Loop Core Bus\n");
448 return ret;
449
450bus_unreg:
451 bus_unregister(&tcm_loop_lld_bus);
452dev_unreg:
453 root_device_unregister(tcm_loop_primary);
454 return ret;
455}
456
457static void tcm_loop_release_core_bus(void)
458{
459 driver_unregister(&tcm_loop_driverfs);
460 bus_unregister(&tcm_loop_lld_bus);
461 root_device_unregister(tcm_loop_primary);
462
463 pr_debug("Releasing TCM Loop Core BUS\n");
464}
465
466static char *tcm_loop_get_fabric_name(void)
467{
468 return "loopback";
469}
470
471static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
472{
473 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
474}
475
476static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
477{
478 /*
479 * Return the passed NAA identifier for the Target Port
480 */
481 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
482}
483
484static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
485{
486 /*
487 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
488 * to represent the SCSI Target Port.
489 */
490 return tl_tpg(se_tpg)->tl_tpgt;
491}
492
493/*
494 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
495 * based upon the incoming fabric dependent SCSI Initiator Port
496 */
497static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
498{
499 return 1;
500}
501
502static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
503{
504 return 0;
505}
506
507/*
508 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
509 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
510 */
511static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
512{
513 return 0;
514}
515
516/*
517 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
518 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
519 * It has been added here as a nop for target_fabric_tf_ops_check()
520 */
521static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
522{
523 return 0;
524}
525
526static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
527{
528 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
529 tl_se_tpg);
530 return tl_tpg->tl_fabric_prot_type;
531}
532
533static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
534{
535 return 1;
536}
537
538static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
539{
540 return 1;
541}
542
543static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
544{
545 return;
546}
547
548static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
549{
550 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
551 struct tcm_loop_cmd, tl_se_cmd);
552
553 return tl_cmd->sc_cmd_state;
554}
555
556static int tcm_loop_write_pending(struct se_cmd *se_cmd)
557{
558 /*
559 * Since Linux/SCSI has already sent down a struct scsi_cmnd
560 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
561 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
562 * format with transport_generic_map_mem_to_cmd().
563 *
564 * We now tell TCM to add this WRITE CDB directly into the TCM storage
565 * object execution queue.
566 */
567 target_execute_cmd(se_cmd);
568 return 0;
569}
570
571static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
572{
573 return 0;
574}
575
576static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
577{
578 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
579 struct tcm_loop_cmd, tl_se_cmd);
580 struct scsi_cmnd *sc = tl_cmd->sc;
581
582 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
583 __func__, sc, sc->cmnd[0]);
584
585 sc->result = SAM_STAT_GOOD;
586 set_host_byte(sc, DID_OK);
587 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
588 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
589 scsi_set_resid(sc, se_cmd->residual_count);
590 sc->scsi_done(sc);
591 return 0;
592}
593
594static int tcm_loop_queue_status(struct se_cmd *se_cmd)
595{
596 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
597 struct tcm_loop_cmd, tl_se_cmd);
598 struct scsi_cmnd *sc = tl_cmd->sc;
599
600 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
601 __func__, sc, sc->cmnd[0]);
602
603 if (se_cmd->sense_buffer &&
604 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
605 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
606
607 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
608 SCSI_SENSE_BUFFERSIZE);
609 sc->result = SAM_STAT_CHECK_CONDITION;
610 set_driver_byte(sc, DRIVER_SENSE);
611 } else
612 sc->result = se_cmd->scsi_status;
613
614 set_host_byte(sc, DID_OK);
615 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
616 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
617 scsi_set_resid(sc, se_cmd->residual_count);
618 sc->scsi_done(sc);
619 return 0;
620}
621
622static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
623{
624 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
625 struct tcm_loop_cmd, tl_se_cmd);
626
627 /* Wake up tcm_loop_issue_tmr(). */
628 complete(&tl_cmd->tmr_done);
629}
630
631static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
632{
633 return;
634}
635
636static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
637{
638 switch (tl_hba->tl_proto_id) {
639 case SCSI_PROTOCOL_SAS:
640 return "SAS";
641 case SCSI_PROTOCOL_FCP:
642 return "FCP";
643 case SCSI_PROTOCOL_ISCSI:
644 return "iSCSI";
645 default:
646 break;
647 }
648
649 return "Unknown";
650}
651
652/* Start items for tcm_loop_port_cit */
653
654static int tcm_loop_port_link(
655 struct se_portal_group *se_tpg,
656 struct se_lun *lun)
657{
658 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
659 struct tcm_loop_tpg, tl_se_tpg);
660 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
661
662 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
663 /*
664 * Add Linux/SCSI struct scsi_device by HCTL
665 */
666 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
667
668 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
669 return 0;
670}
671
672static void tcm_loop_port_unlink(
673 struct se_portal_group *se_tpg,
674 struct se_lun *se_lun)
675{
676 struct scsi_device *sd;
677 struct tcm_loop_hba *tl_hba;
678 struct tcm_loop_tpg *tl_tpg;
679
680 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
681 tl_hba = tl_tpg->tl_hba;
682
683 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
684 se_lun->unpacked_lun);
685 if (!sd) {
686 pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
687 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
688 return;
689 }
690 /*
691 * Remove Linux/SCSI struct scsi_device by HCTL
692 */
693 scsi_remove_device(sd);
694 scsi_device_put(sd);
695
696 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
697
698 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
699}
700
701/* End items for tcm_loop_port_cit */
702
703static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
704 struct config_item *item, char *page)
705{
706 struct se_portal_group *se_tpg = attrib_to_tpg(item);
707 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
708 tl_se_tpg);
709
710 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
711}
712
713static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
714 struct config_item *item, const char *page, size_t count)
715{
716 struct se_portal_group *se_tpg = attrib_to_tpg(item);
717 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
718 tl_se_tpg);
719 unsigned long val;
720 int ret = kstrtoul(page, 0, &val);
721
722 if (ret) {
723 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
724 return ret;
725 }
726 if (val != 0 && val != 1 && val != 3) {
727 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
728 return -EINVAL;
729 }
730 tl_tpg->tl_fabric_prot_type = val;
731
732 return count;
733}
734
735CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
736
737static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
738 &tcm_loop_tpg_attrib_attr_fabric_prot_type,
739 NULL,
740};
741
742/* Start items for tcm_loop_nexus_cit */
743
744static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
745 struct se_session *se_sess, void *p)
746{
747 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
748 struct tcm_loop_tpg, tl_se_tpg);
749
750 tl_tpg->tl_nexus = p;
751 return 0;
752}
753
754static int tcm_loop_make_nexus(
755 struct tcm_loop_tpg *tl_tpg,
756 const char *name)
757{
758 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
759 struct tcm_loop_nexus *tl_nexus;
760 int ret;
761
762 if (tl_tpg->tl_nexus) {
763 pr_debug("tl_tpg->tl_nexus already exists\n");
764 return -EEXIST;
765 }
766
767 tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
768 if (!tl_nexus)
769 return -ENOMEM;
770
771 tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
772 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
773 name, tl_nexus, tcm_loop_alloc_sess_cb);
774 if (IS_ERR(tl_nexus->se_sess)) {
775 ret = PTR_ERR(tl_nexus->se_sess);
776 kfree(tl_nexus);
777 return ret;
778 }
779
780 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
781 tcm_loop_dump_proto_id(tl_hba), name);
782 return 0;
783}
784
785static int tcm_loop_drop_nexus(
786 struct tcm_loop_tpg *tpg)
787{
788 struct se_session *se_sess;
789 struct tcm_loop_nexus *tl_nexus;
790
791 tl_nexus = tpg->tl_nexus;
792 if (!tl_nexus)
793 return -ENODEV;
794
795 se_sess = tl_nexus->se_sess;
796 if (!se_sess)
797 return -ENODEV;
798
799 if (atomic_read(&tpg->tl_tpg_port_count)) {
800 pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
801 atomic_read(&tpg->tl_tpg_port_count));
802 return -EPERM;
803 }
804
805 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
806 tcm_loop_dump_proto_id(tpg->tl_hba),
807 tl_nexus->se_sess->se_node_acl->initiatorname);
808 /*
809 * Release the SCSI I_T Nexus to the emulated Target Port
810 */
811 transport_deregister_session(tl_nexus->se_sess);
812 tpg->tl_nexus = NULL;
813 kfree(tl_nexus);
814 return 0;
815}
816
817/* End items for tcm_loop_nexus_cit */
818
819static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
820{
821 struct se_portal_group *se_tpg = to_tpg(item);
822 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
823 struct tcm_loop_tpg, tl_se_tpg);
824 struct tcm_loop_nexus *tl_nexus;
825 ssize_t ret;
826
827 tl_nexus = tl_tpg->tl_nexus;
828 if (!tl_nexus)
829 return -ENODEV;
830
831 ret = snprintf(page, PAGE_SIZE, "%s\n",
832 tl_nexus->se_sess->se_node_acl->initiatorname);
833
834 return ret;
835}
836
837static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
838 const char *page, size_t count)
839{
840 struct se_portal_group *se_tpg = to_tpg(item);
841 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
842 struct tcm_loop_tpg, tl_se_tpg);
843 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
844 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
845 int ret;
846 /*
847 * Shutdown the active I_T nexus if 'NULL' is passed..
848 */
849 if (!strncmp(page, "NULL", 4)) {
850 ret = tcm_loop_drop_nexus(tl_tpg);
851 return (!ret) ? count : ret;
852 }
853 /*
854 * Otherwise make sure the passed virtual Initiator port WWN matches
855 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
856 * tcm_loop_make_nexus()
857 */
858 if (strlen(page) >= TL_WWN_ADDR_LEN) {
859 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
860 page, TL_WWN_ADDR_LEN);
861 return -EINVAL;
862 }
863 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
864
865 ptr = strstr(i_port, "naa.");
866 if (ptr) {
867 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
868 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
869 i_port, tcm_loop_dump_proto_id(tl_hba));
870 return -EINVAL;
871 }
872 port_ptr = &i_port[0];
873 goto check_newline;
874 }
875 ptr = strstr(i_port, "fc.");
876 if (ptr) {
877 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
878 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
879 i_port, tcm_loop_dump_proto_id(tl_hba));
880 return -EINVAL;
881 }
882 port_ptr = &i_port[3]; /* Skip over "fc." */
883 goto check_newline;
884 }
885 ptr = strstr(i_port, "iqn.");
886 if (ptr) {
887 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
888 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
889 i_port, tcm_loop_dump_proto_id(tl_hba));
890 return -EINVAL;
891 }
892 port_ptr = &i_port[0];
893 goto check_newline;
894 }
895 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
896 i_port);
897 return -EINVAL;
898 /*
899 * Clear any trailing newline for the NAA WWN
900 */
901check_newline:
902 if (i_port[strlen(i_port)-1] == '\n')
903 i_port[strlen(i_port)-1] = '\0';
904
905 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
906 if (ret < 0)
907 return ret;
908
909 return count;
910}
911
912static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
913 char *page)
914{
915 struct se_portal_group *se_tpg = to_tpg(item);
916 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
917 struct tcm_loop_tpg, tl_se_tpg);
918 const char *status = NULL;
919 ssize_t ret = -EINVAL;
920
921 switch (tl_tpg->tl_transport_status) {
922 case TCM_TRANSPORT_ONLINE:
923 status = "online";
924 break;
925 case TCM_TRANSPORT_OFFLINE:
926 status = "offline";
927 break;
928 default:
929 break;
930 }
931
932 if (status)
933 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
934
935 return ret;
936}
937
938static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
939 const char *page, size_t count)
940{
941 struct se_portal_group *se_tpg = to_tpg(item);
942 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
943 struct tcm_loop_tpg, tl_se_tpg);
944
945 if (!strncmp(page, "online", 6)) {
946 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
947 return count;
948 }
949 if (!strncmp(page, "offline", 7)) {
950 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
951 if (tl_tpg->tl_nexus) {
952 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
953
954 core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
955 }
956 return count;
957 }
958 return -EINVAL;
959}
960
961static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
962 char *page)
963{
964 struct se_portal_group *se_tpg = to_tpg(item);
965 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
966 struct tcm_loop_tpg, tl_se_tpg);
967 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
968
969 return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
970 tl_hba->sh->host_no, tl_tpg->tl_tpgt);
971}
972
973CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
974CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
975CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
976
977static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
978 &tcm_loop_tpg_attr_nexus,
979 &tcm_loop_tpg_attr_transport_status,
980 &tcm_loop_tpg_attr_address,
981 NULL,
982};
983
984/* Start items for tcm_loop_naa_cit */
985
986static struct se_portal_group *tcm_loop_make_naa_tpg(
987 struct se_wwn *wwn,
988 struct config_group *group,
989 const char *name)
990{
991 struct tcm_loop_hba *tl_hba = container_of(wwn,
992 struct tcm_loop_hba, tl_hba_wwn);
993 struct tcm_loop_tpg *tl_tpg;
994 int ret;
995 unsigned long tpgt;
996
997 if (strstr(name, "tpgt_") != name) {
998 pr_err("Unable to locate \"tpgt_#\" directory group\n");
999 return ERR_PTR(-EINVAL);
1000 }
1001 if (kstrtoul(name+5, 10, &tpgt))
1002 return ERR_PTR(-EINVAL);
1003
1004 if (tpgt >= TL_TPGS_PER_HBA) {
1005 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
1006 tpgt, TL_TPGS_PER_HBA);
1007 return ERR_PTR(-EINVAL);
1008 }
1009 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1010 tl_tpg->tl_hba = tl_hba;
1011 tl_tpg->tl_tpgt = tpgt;
1012 /*
1013 * Register the tl_tpg as a emulated TCM Target Endpoint
1014 */
1015 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1016 if (ret < 0)
1017 return ERR_PTR(-ENOMEM);
1018
1019 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
1020 tcm_loop_dump_proto_id(tl_hba),
1021 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1022 return &tl_tpg->tl_se_tpg;
1023}
1024
1025static void tcm_loop_drop_naa_tpg(
1026 struct se_portal_group *se_tpg)
1027{
1028 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1029 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1030 struct tcm_loop_tpg, tl_se_tpg);
1031 struct tcm_loop_hba *tl_hba;
1032 unsigned short tpgt;
1033
1034 tl_hba = tl_tpg->tl_hba;
1035 tpgt = tl_tpg->tl_tpgt;
1036 /*
1037 * Release the I_T Nexus for the Virtual target link if present
1038 */
1039 tcm_loop_drop_nexus(tl_tpg);
1040 /*
1041 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1042 */
1043 core_tpg_deregister(se_tpg);
1044
1045 tl_tpg->tl_hba = NULL;
1046 tl_tpg->tl_tpgt = 0;
1047
1048 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
1049 tcm_loop_dump_proto_id(tl_hba),
1050 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1051}
1052
1053/* End items for tcm_loop_naa_cit */
1054
1055/* Start items for tcm_loop_cit */
1056
1057static struct se_wwn *tcm_loop_make_scsi_hba(
1058 struct target_fabric_configfs *tf,
1059 struct config_group *group,
1060 const char *name)
1061{
1062 struct tcm_loop_hba *tl_hba;
1063 struct Scsi_Host *sh;
1064 char *ptr;
1065 int ret, off = 0;
1066
1067 tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
1068 if (!tl_hba)
1069 return ERR_PTR(-ENOMEM);
1070
1071 /*
1072 * Determine the emulated Protocol Identifier and Target Port Name
1073 * based on the incoming configfs directory name.
1074 */
1075 ptr = strstr(name, "naa.");
1076 if (ptr) {
1077 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1078 goto check_len;
1079 }
1080 ptr = strstr(name, "fc.");
1081 if (ptr) {
1082 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1083 off = 3; /* Skip over "fc." */
1084 goto check_len;
1085 }
1086 ptr = strstr(name, "iqn.");
1087 if (!ptr) {
1088 pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1089 name);
1090 ret = -EINVAL;
1091 goto out;
1092 }
1093 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1094
1095check_len:
1096 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1097 pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1098 name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
1099 ret = -EINVAL;
1100 goto out;
1101 }
1102 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1103
1104 /*
1105 * Call device_register(tl_hba->dev) to register the emulated
1106 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1107 * device_register() callbacks in tcm_loop_driver_probe()
1108 */
1109 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1110 if (ret)
1111 goto out;
1112
1113 sh = tl_hba->sh;
1114 tcm_loop_hba_no_cnt++;
1115 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1116 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1117 return &tl_hba->tl_hba_wwn;
1118out:
1119 kfree(tl_hba);
1120 return ERR_PTR(ret);
1121}
1122
1123static void tcm_loop_drop_scsi_hba(
1124 struct se_wwn *wwn)
1125{
1126 struct tcm_loop_hba *tl_hba = container_of(wwn,
1127 struct tcm_loop_hba, tl_hba_wwn);
1128
1129 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1130 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1131 tl_hba->sh->host_no);
1132 /*
1133 * Call device_unregister() on the original tl_hba->dev.
1134 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1135 * release *tl_hba;
1136 */
1137 device_unregister(&tl_hba->dev);
1138}
1139
1140/* Start items for tcm_loop_cit */
1141static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1142{
1143 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1144}
1145
1146CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1147
1148static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1149 &tcm_loop_wwn_attr_version,
1150 NULL,
1151};
1152
1153/* End items for tcm_loop_cit */
1154
1155static const struct target_core_fabric_ops loop_ops = {
1156 .module = THIS_MODULE,
1157 .name = "loopback",
1158 .get_fabric_name = tcm_loop_get_fabric_name,
1159 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1160 .tpg_get_tag = tcm_loop_get_tag,
1161 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
1162 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
1163 .tpg_check_demo_mode_write_protect =
1164 tcm_loop_check_demo_mode_write_protect,
1165 .tpg_check_prod_mode_write_protect =
1166 tcm_loop_check_prod_mode_write_protect,
1167 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
1168 .tpg_get_inst_index = tcm_loop_get_inst_index,
1169 .check_stop_free = tcm_loop_check_stop_free,
1170 .release_cmd = tcm_loop_release_cmd,
1171 .sess_get_index = tcm_loop_sess_get_index,
1172 .write_pending = tcm_loop_write_pending,
1173 .write_pending_status = tcm_loop_write_pending_status,
1174 .set_default_node_attributes = tcm_loop_set_default_node_attributes,
1175 .get_cmd_state = tcm_loop_get_cmd_state,
1176 .queue_data_in = tcm_loop_queue_data_in,
1177 .queue_status = tcm_loop_queue_status,
1178 .queue_tm_rsp = tcm_loop_queue_tm_rsp,
1179 .aborted_task = tcm_loop_aborted_task,
1180 .fabric_make_wwn = tcm_loop_make_scsi_hba,
1181 .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
1182 .fabric_make_tpg = tcm_loop_make_naa_tpg,
1183 .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
1184 .fabric_post_link = tcm_loop_port_link,
1185 .fabric_pre_unlink = tcm_loop_port_unlink,
1186 .tfc_wwn_attrs = tcm_loop_wwn_attrs,
1187 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
1188 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
1189};
1190
1191static int __init tcm_loop_fabric_init(void)
1192{
1193 int ret = -ENOMEM;
1194
1195 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1196 if (!tcm_loop_workqueue)
1197 goto out;
1198
1199 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1200 sizeof(struct tcm_loop_cmd),
1201 __alignof__(struct tcm_loop_cmd),
1202 0, NULL);
1203 if (!tcm_loop_cmd_cache) {
1204 pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
1205 goto out_destroy_workqueue;
1206 }
1207
1208 ret = tcm_loop_alloc_core_bus();
1209 if (ret)
1210 goto out_destroy_cache;
1211
1212 ret = target_register_template(&loop_ops);
1213 if (ret)
1214 goto out_release_core_bus;
1215
1216 return 0;
1217
1218out_release_core_bus:
1219 tcm_loop_release_core_bus();
1220out_destroy_cache:
1221 kmem_cache_destroy(tcm_loop_cmd_cache);
1222out_destroy_workqueue:
1223 destroy_workqueue(tcm_loop_workqueue);
1224out:
1225 return ret;
1226}
1227
1228static void __exit tcm_loop_fabric_exit(void)
1229{
1230 target_unregister_template(&loop_ops);
1231 tcm_loop_release_core_bus();
1232 kmem_cache_destroy(tcm_loop_cmd_cache);
1233 destroy_workqueue(tcm_loop_workqueue);
1234}
1235
1236MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1237MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1238MODULE_LICENSE("GPL");
1239module_init(tcm_loop_fabric_init);
1240module_exit(tcm_loop_fabric_exit);
1/*******************************************************************************
2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
5 *
6 * © Copyright 2011-2013 Datera, Inc.
7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/configfs.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h>
34
35#include <target/target_core_base.h>
36#include <target/target_core_fabric.h>
37#include <target/target_core_fabric_configfs.h>
38#include <target/target_core_configfs.h>
39
40#include "tcm_loop.h"
41
42#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
43
44/* Local pointer to allocated TCM configfs fabric module */
45static struct target_fabric_configfs *tcm_loop_fabric_configfs;
46
47static struct workqueue_struct *tcm_loop_workqueue;
48static struct kmem_cache *tcm_loop_cmd_cache;
49
50static int tcm_loop_hba_no_cnt;
51
52static int tcm_loop_queue_status(struct se_cmd *se_cmd);
53
54/*
55 * Called from struct target_core_fabric_ops->check_stop_free()
56 */
57static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
58{
59 /*
60 * Do not release struct se_cmd's containing a valid TMR
61 * pointer. These will be released directly in tcm_loop_device_reset()
62 * with transport_generic_free_cmd().
63 */
64 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
65 return 0;
66 /*
67 * Release the struct se_cmd, which will make a callback to release
68 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
69 */
70 transport_generic_free_cmd(se_cmd, 0);
71 return 1;
72}
73
74static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
75{
76 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
77 struct tcm_loop_cmd, tl_se_cmd);
78
79 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
80}
81
82static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
83{
84 seq_printf(m, "tcm_loop_proc_info()\n");
85 return 0;
86}
87
88static int tcm_loop_driver_probe(struct device *);
89static int tcm_loop_driver_remove(struct device *);
90
91static int pseudo_lld_bus_match(struct device *dev,
92 struct device_driver *dev_driver)
93{
94 return 1;
95}
96
97static struct bus_type tcm_loop_lld_bus = {
98 .name = "tcm_loop_bus",
99 .match = pseudo_lld_bus_match,
100 .probe = tcm_loop_driver_probe,
101 .remove = tcm_loop_driver_remove,
102};
103
104static struct device_driver tcm_loop_driverfs = {
105 .name = "tcm_loop",
106 .bus = &tcm_loop_lld_bus,
107};
108/*
109 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
110 */
111struct device *tcm_loop_primary;
112
113/*
114 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
115 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
116 */
117static int tcm_loop_change_queue_depth(
118 struct scsi_device *sdev,
119 int depth,
120 int reason)
121{
122 switch (reason) {
123 case SCSI_QDEPTH_DEFAULT:
124 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
125 break;
126 case SCSI_QDEPTH_QFULL:
127 scsi_track_queue_full(sdev, depth);
128 break;
129 case SCSI_QDEPTH_RAMP_UP:
130 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
131 break;
132 default:
133 return -EOPNOTSUPP;
134 }
135 return sdev->queue_depth;
136}
137
138static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
139{
140 if (sdev->tagged_supported) {
141 scsi_set_tag_type(sdev, tag);
142
143 if (tag)
144 scsi_activate_tcq(sdev, sdev->queue_depth);
145 else
146 scsi_deactivate_tcq(sdev, sdev->queue_depth);
147 } else
148 tag = 0;
149
150 return tag;
151}
152
153/*
154 * Locate the SAM Task Attr from struct scsi_cmnd *
155 */
156static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
157{
158 if (sc->device->tagged_supported) {
159 switch (sc->tag) {
160 case HEAD_OF_QUEUE_TAG:
161 return MSG_HEAD_TAG;
162 case ORDERED_QUEUE_TAG:
163 return MSG_ORDERED_TAG;
164 default:
165 break;
166 }
167 }
168
169 return MSG_SIMPLE_TAG;
170}
171
172static void tcm_loop_submission_work(struct work_struct *work)
173{
174 struct tcm_loop_cmd *tl_cmd =
175 container_of(work, struct tcm_loop_cmd, work);
176 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
177 struct scsi_cmnd *sc = tl_cmd->sc;
178 struct tcm_loop_nexus *tl_nexus;
179 struct tcm_loop_hba *tl_hba;
180 struct tcm_loop_tpg *tl_tpg;
181 struct scatterlist *sgl_bidi = NULL;
182 u32 sgl_bidi_count = 0;
183 int rc;
184
185 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
186 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
187
188 /*
189 * Ensure that this tl_tpg reference from the incoming sc->device->id
190 * has already been configured via tcm_loop_make_naa_tpg().
191 */
192 if (!tl_tpg->tl_hba) {
193 set_host_byte(sc, DID_NO_CONNECT);
194 goto out_done;
195 }
196 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
197 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
198 goto out_done;
199 }
200 tl_nexus = tl_hba->tl_nexus;
201 if (!tl_nexus) {
202 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
203 " does not exist\n");
204 set_host_byte(sc, DID_ERROR);
205 goto out_done;
206 }
207 if (scsi_bidi_cmnd(sc)) {
208 struct scsi_data_buffer *sdb = scsi_in(sc);
209
210 sgl_bidi = sdb->table.sgl;
211 sgl_bidi_count = sdb->table.nents;
212 se_cmd->se_cmd_flags |= SCF_BIDI;
213
214 }
215
216 if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
217 se_cmd->prot_pto = true;
218
219 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
220 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
221 scsi_bufflen(sc), tcm_loop_sam_attr(sc),
222 sc->sc_data_direction, 0,
223 scsi_sglist(sc), scsi_sg_count(sc),
224 sgl_bidi, sgl_bidi_count,
225 scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
226 if (rc < 0) {
227 set_host_byte(sc, DID_NO_CONNECT);
228 goto out_done;
229 }
230 return;
231
232out_done:
233 sc->scsi_done(sc);
234 return;
235}
236
237/*
238 * ->queuecommand can be and usually is called from interrupt context, so
239 * defer the actual submission to a workqueue.
240 */
241static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
242{
243 struct tcm_loop_cmd *tl_cmd;
244
245 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
246 " scsi_buf_len: %u\n", sc->device->host->host_no,
247 sc->device->id, sc->device->channel, sc->device->lun,
248 sc->cmnd[0], scsi_bufflen(sc));
249
250 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
251 if (!tl_cmd) {
252 pr_err("Unable to allocate struct tcm_loop_cmd\n");
253 set_host_byte(sc, DID_ERROR);
254 sc->scsi_done(sc);
255 return 0;
256 }
257
258 tl_cmd->sc = sc;
259 tl_cmd->sc_cmd_tag = sc->tag;
260 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
261 queue_work(tcm_loop_workqueue, &tl_cmd->work);
262 return 0;
263}
264
265/*
266 * Called from SCSI EH process context to issue a LUN_RESET TMR
267 * to struct scsi_device
268 */
269static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
270 struct tcm_loop_nexus *tl_nexus,
271 int lun, int task, enum tcm_tmreq_table tmr)
272{
273 struct se_cmd *se_cmd = NULL;
274 struct se_session *se_sess;
275 struct se_portal_group *se_tpg;
276 struct tcm_loop_cmd *tl_cmd = NULL;
277 struct tcm_loop_tmr *tl_tmr = NULL;
278 int ret = TMR_FUNCTION_FAILED, rc;
279
280 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
281 if (!tl_cmd) {
282 pr_err("Unable to allocate memory for tl_cmd\n");
283 return ret;
284 }
285
286 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
287 if (!tl_tmr) {
288 pr_err("Unable to allocate memory for tl_tmr\n");
289 goto release;
290 }
291 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
292
293 se_cmd = &tl_cmd->tl_se_cmd;
294 se_tpg = &tl_tpg->tl_se_tpg;
295 se_sess = tl_nexus->se_sess;
296 /*
297 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
298 */
299 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
300 DMA_NONE, MSG_SIMPLE_TAG,
301 &tl_cmd->tl_sense_buf[0]);
302
303 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
304 if (rc < 0)
305 goto release;
306
307 if (tmr == TMR_ABORT_TASK)
308 se_cmd->se_tmr_req->ref_task_tag = task;
309
310 /*
311 * Locate the underlying TCM struct se_lun
312 */
313 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
314 ret = TMR_LUN_DOES_NOT_EXIST;
315 goto release;
316 }
317 /*
318 * Queue the TMR to TCM Core and sleep waiting for
319 * tcm_loop_queue_tm_rsp() to wake us up.
320 */
321 transport_generic_handle_tmr(se_cmd);
322 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
323 /*
324 * The TMR LUN_RESET has completed, check the response status and
325 * then release allocations.
326 */
327 ret = se_cmd->se_tmr_req->response;
328release:
329 if (se_cmd)
330 transport_generic_free_cmd(se_cmd, 1);
331 else
332 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
333 kfree(tl_tmr);
334 return ret;
335}
336
337static int tcm_loop_abort_task(struct scsi_cmnd *sc)
338{
339 struct tcm_loop_hba *tl_hba;
340 struct tcm_loop_nexus *tl_nexus;
341 struct tcm_loop_tpg *tl_tpg;
342 int ret = FAILED;
343
344 /*
345 * Locate the tcm_loop_hba_t pointer
346 */
347 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
348 /*
349 * Locate the tl_nexus and se_sess pointers
350 */
351 tl_nexus = tl_hba->tl_nexus;
352 if (!tl_nexus) {
353 pr_err("Unable to perform device reset without"
354 " active I_T Nexus\n");
355 return FAILED;
356 }
357
358 /*
359 * Locate the tl_tpg pointer from TargetID in sc->device->id
360 */
361 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
362 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
363 sc->tag, TMR_ABORT_TASK);
364 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
365}
366
367/*
368 * Called from SCSI EH process context to issue a LUN_RESET TMR
369 * to struct scsi_device
370 */
371static int tcm_loop_device_reset(struct scsi_cmnd *sc)
372{
373 struct tcm_loop_hba *tl_hba;
374 struct tcm_loop_nexus *tl_nexus;
375 struct tcm_loop_tpg *tl_tpg;
376 int ret = FAILED;
377
378 /*
379 * Locate the tcm_loop_hba_t pointer
380 */
381 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
382 /*
383 * Locate the tl_nexus and se_sess pointers
384 */
385 tl_nexus = tl_hba->tl_nexus;
386 if (!tl_nexus) {
387 pr_err("Unable to perform device reset without"
388 " active I_T Nexus\n");
389 return FAILED;
390 }
391 /*
392 * Locate the tl_tpg pointer from TargetID in sc->device->id
393 */
394 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
395 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
396 0, TMR_LUN_RESET);
397 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
398}
399
400static int tcm_loop_target_reset(struct scsi_cmnd *sc)
401{
402 struct tcm_loop_hba *tl_hba;
403 struct tcm_loop_tpg *tl_tpg;
404
405 /*
406 * Locate the tcm_loop_hba_t pointer
407 */
408 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
409 if (!tl_hba) {
410 pr_err("Unable to perform device reset without"
411 " active I_T Nexus\n");
412 return FAILED;
413 }
414 /*
415 * Locate the tl_tpg pointer from TargetID in sc->device->id
416 */
417 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
418 if (tl_tpg) {
419 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
420 return SUCCESS;
421 }
422 return FAILED;
423}
424
425static int tcm_loop_slave_alloc(struct scsi_device *sd)
426{
427 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
428 return 0;
429}
430
431static int tcm_loop_slave_configure(struct scsi_device *sd)
432{
433 if (sd->tagged_supported) {
434 scsi_activate_tcq(sd, sd->queue_depth);
435 scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
436 sd->host->cmd_per_lun);
437 } else {
438 scsi_adjust_queue_depth(sd, 0,
439 sd->host->cmd_per_lun);
440 }
441
442 return 0;
443}
444
445static struct scsi_host_template tcm_loop_driver_template = {
446 .show_info = tcm_loop_show_info,
447 .proc_name = "tcm_loopback",
448 .name = "TCM_Loopback",
449 .queuecommand = tcm_loop_queuecommand,
450 .change_queue_depth = tcm_loop_change_queue_depth,
451 .change_queue_type = tcm_loop_change_queue_type,
452 .eh_abort_handler = tcm_loop_abort_task,
453 .eh_device_reset_handler = tcm_loop_device_reset,
454 .eh_target_reset_handler = tcm_loop_target_reset,
455 .can_queue = 1024,
456 .this_id = -1,
457 .sg_tablesize = 256,
458 .cmd_per_lun = 1024,
459 .max_sectors = 0xFFFF,
460 .use_clustering = DISABLE_CLUSTERING,
461 .slave_alloc = tcm_loop_slave_alloc,
462 .slave_configure = tcm_loop_slave_configure,
463 .module = THIS_MODULE,
464};
465
466static int tcm_loop_driver_probe(struct device *dev)
467{
468 struct tcm_loop_hba *tl_hba;
469 struct Scsi_Host *sh;
470 int error, host_prot;
471
472 tl_hba = to_tcm_loop_hba(dev);
473
474 sh = scsi_host_alloc(&tcm_loop_driver_template,
475 sizeof(struct tcm_loop_hba));
476 if (!sh) {
477 pr_err("Unable to allocate struct scsi_host\n");
478 return -ENODEV;
479 }
480 tl_hba->sh = sh;
481
482 /*
483 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
484 */
485 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
486 /*
487 * Setup single ID, Channel and LUN for now..
488 */
489 sh->max_id = 2;
490 sh->max_lun = 0;
491 sh->max_channel = 0;
492 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
493
494 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
495 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
496 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
497
498 scsi_host_set_prot(sh, host_prot);
499 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
500
501 error = scsi_add_host(sh, &tl_hba->dev);
502 if (error) {
503 pr_err("%s: scsi_add_host failed\n", __func__);
504 scsi_host_put(sh);
505 return -ENODEV;
506 }
507 return 0;
508}
509
510static int tcm_loop_driver_remove(struct device *dev)
511{
512 struct tcm_loop_hba *tl_hba;
513 struct Scsi_Host *sh;
514
515 tl_hba = to_tcm_loop_hba(dev);
516 sh = tl_hba->sh;
517
518 scsi_remove_host(sh);
519 scsi_host_put(sh);
520 return 0;
521}
522
523static void tcm_loop_release_adapter(struct device *dev)
524{
525 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
526
527 kfree(tl_hba);
528}
529
530/*
531 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
532 */
533static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
534{
535 int ret;
536
537 tl_hba->dev.bus = &tcm_loop_lld_bus;
538 tl_hba->dev.parent = tcm_loop_primary;
539 tl_hba->dev.release = &tcm_loop_release_adapter;
540 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
541
542 ret = device_register(&tl_hba->dev);
543 if (ret) {
544 pr_err("device_register() failed for"
545 " tl_hba->dev: %d\n", ret);
546 return -ENODEV;
547 }
548
549 return 0;
550}
551
552/*
553 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
554 * tcm_loop SCSI bus.
555 */
556static int tcm_loop_alloc_core_bus(void)
557{
558 int ret;
559
560 tcm_loop_primary = root_device_register("tcm_loop_0");
561 if (IS_ERR(tcm_loop_primary)) {
562 pr_err("Unable to allocate tcm_loop_primary\n");
563 return PTR_ERR(tcm_loop_primary);
564 }
565
566 ret = bus_register(&tcm_loop_lld_bus);
567 if (ret) {
568 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
569 goto dev_unreg;
570 }
571
572 ret = driver_register(&tcm_loop_driverfs);
573 if (ret) {
574 pr_err("driver_register() failed for"
575 "tcm_loop_driverfs\n");
576 goto bus_unreg;
577 }
578
579 pr_debug("Initialized TCM Loop Core Bus\n");
580 return ret;
581
582bus_unreg:
583 bus_unregister(&tcm_loop_lld_bus);
584dev_unreg:
585 root_device_unregister(tcm_loop_primary);
586 return ret;
587}
588
589static void tcm_loop_release_core_bus(void)
590{
591 driver_unregister(&tcm_loop_driverfs);
592 bus_unregister(&tcm_loop_lld_bus);
593 root_device_unregister(tcm_loop_primary);
594
595 pr_debug("Releasing TCM Loop Core BUS\n");
596}
597
598static char *tcm_loop_get_fabric_name(void)
599{
600 return "loopback";
601}
602
603static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
604{
605 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
606 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
607 /*
608 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
609 * time based on the protocol dependent prefix of the passed configfs group.
610 *
611 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
612 * ProtocolID using target_core_fabric_lib.c symbols.
613 */
614 switch (tl_hba->tl_proto_id) {
615 case SCSI_PROTOCOL_SAS:
616 return sas_get_fabric_proto_ident(se_tpg);
617 case SCSI_PROTOCOL_FCP:
618 return fc_get_fabric_proto_ident(se_tpg);
619 case SCSI_PROTOCOL_ISCSI:
620 return iscsi_get_fabric_proto_ident(se_tpg);
621 default:
622 pr_err("Unknown tl_proto_id: 0x%02x, using"
623 " SAS emulation\n", tl_hba->tl_proto_id);
624 break;
625 }
626
627 return sas_get_fabric_proto_ident(se_tpg);
628}
629
630static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
631{
632 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
633 /*
634 * Return the passed NAA identifier for the SAS Target Port
635 */
636 return &tl_tpg->tl_hba->tl_wwn_address[0];
637}
638
639static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
640{
641 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
642 /*
643 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
644 * to represent the SCSI Target Port.
645 */
646 return tl_tpg->tl_tpgt;
647}
648
649static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
650{
651 return 1;
652}
653
654static u32 tcm_loop_get_pr_transport_id(
655 struct se_portal_group *se_tpg,
656 struct se_node_acl *se_nacl,
657 struct t10_pr_registration *pr_reg,
658 int *format_code,
659 unsigned char *buf)
660{
661 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
662 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
663
664 switch (tl_hba->tl_proto_id) {
665 case SCSI_PROTOCOL_SAS:
666 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
667 format_code, buf);
668 case SCSI_PROTOCOL_FCP:
669 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
670 format_code, buf);
671 case SCSI_PROTOCOL_ISCSI:
672 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
673 format_code, buf);
674 default:
675 pr_err("Unknown tl_proto_id: 0x%02x, using"
676 " SAS emulation\n", tl_hba->tl_proto_id);
677 break;
678 }
679
680 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
681 format_code, buf);
682}
683
684static u32 tcm_loop_get_pr_transport_id_len(
685 struct se_portal_group *se_tpg,
686 struct se_node_acl *se_nacl,
687 struct t10_pr_registration *pr_reg,
688 int *format_code)
689{
690 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
691 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
692
693 switch (tl_hba->tl_proto_id) {
694 case SCSI_PROTOCOL_SAS:
695 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
696 format_code);
697 case SCSI_PROTOCOL_FCP:
698 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
699 format_code);
700 case SCSI_PROTOCOL_ISCSI:
701 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
702 format_code);
703 default:
704 pr_err("Unknown tl_proto_id: 0x%02x, using"
705 " SAS emulation\n", tl_hba->tl_proto_id);
706 break;
707 }
708
709 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
710 format_code);
711}
712
713/*
714 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
715 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
716 */
717static char *tcm_loop_parse_pr_out_transport_id(
718 struct se_portal_group *se_tpg,
719 const char *buf,
720 u32 *out_tid_len,
721 char **port_nexus_ptr)
722{
723 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
724 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
725
726 switch (tl_hba->tl_proto_id) {
727 case SCSI_PROTOCOL_SAS:
728 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
729 port_nexus_ptr);
730 case SCSI_PROTOCOL_FCP:
731 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
732 port_nexus_ptr);
733 case SCSI_PROTOCOL_ISCSI:
734 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
735 port_nexus_ptr);
736 default:
737 pr_err("Unknown tl_proto_id: 0x%02x, using"
738 " SAS emulation\n", tl_hba->tl_proto_id);
739 break;
740 }
741
742 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
743 port_nexus_ptr);
744}
745
746/*
747 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
748 * based upon the incoming fabric dependent SCSI Initiator Port
749 */
750static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
751{
752 return 1;
753}
754
755static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
756{
757 return 0;
758}
759
760/*
761 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
762 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
763 */
764static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
765{
766 return 0;
767}
768
769/*
770 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
771 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
772 * It has been added here as a nop for target_fabric_tf_ops_check()
773 */
774static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
775{
776 return 0;
777}
778
779static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
780 struct se_portal_group *se_tpg)
781{
782 struct tcm_loop_nacl *tl_nacl;
783
784 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
785 if (!tl_nacl) {
786 pr_err("Unable to allocate struct tcm_loop_nacl\n");
787 return NULL;
788 }
789
790 return &tl_nacl->se_node_acl;
791}
792
793static void tcm_loop_tpg_release_fabric_acl(
794 struct se_portal_group *se_tpg,
795 struct se_node_acl *se_nacl)
796{
797 struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
798 struct tcm_loop_nacl, se_node_acl);
799
800 kfree(tl_nacl);
801}
802
803static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
804{
805 return 1;
806}
807
808static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
809{
810 return 1;
811}
812
813static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
814{
815 return;
816}
817
818static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
819{
820 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
821 struct tcm_loop_cmd, tl_se_cmd);
822
823 return tl_cmd->sc_cmd_tag;
824}
825
826static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
827{
828 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
829 struct tcm_loop_cmd, tl_se_cmd);
830
831 return tl_cmd->sc_cmd_state;
832}
833
834static int tcm_loop_shutdown_session(struct se_session *se_sess)
835{
836 return 0;
837}
838
839static void tcm_loop_close_session(struct se_session *se_sess)
840{
841 return;
842};
843
844static int tcm_loop_write_pending(struct se_cmd *se_cmd)
845{
846 /*
847 * Since Linux/SCSI has already sent down a struct scsi_cmnd
848 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
849 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
850 * format with transport_generic_map_mem_to_cmd().
851 *
852 * We now tell TCM to add this WRITE CDB directly into the TCM storage
853 * object execution queue.
854 */
855 target_execute_cmd(se_cmd);
856 return 0;
857}
858
859static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
860{
861 return 0;
862}
863
864static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
865{
866 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
867 struct tcm_loop_cmd, tl_se_cmd);
868 struct scsi_cmnd *sc = tl_cmd->sc;
869
870 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
871 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
872
873 sc->result = SAM_STAT_GOOD;
874 set_host_byte(sc, DID_OK);
875 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
876 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
877 scsi_set_resid(sc, se_cmd->residual_count);
878 sc->scsi_done(sc);
879 return 0;
880}
881
882static int tcm_loop_queue_status(struct se_cmd *se_cmd)
883{
884 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
885 struct tcm_loop_cmd, tl_se_cmd);
886 struct scsi_cmnd *sc = tl_cmd->sc;
887
888 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
889 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
890
891 if (se_cmd->sense_buffer &&
892 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
893 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
894
895 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
896 SCSI_SENSE_BUFFERSIZE);
897 sc->result = SAM_STAT_CHECK_CONDITION;
898 set_driver_byte(sc, DRIVER_SENSE);
899 } else
900 sc->result = se_cmd->scsi_status;
901
902 set_host_byte(sc, DID_OK);
903 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
904 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
905 scsi_set_resid(sc, se_cmd->residual_count);
906 sc->scsi_done(sc);
907 return 0;
908}
909
910static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
911{
912 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
913 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
914 /*
915 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
916 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
917 */
918 atomic_set(&tl_tmr->tmr_complete, 1);
919 wake_up(&tl_tmr->tl_tmr_wait);
920}
921
922static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
923{
924 return;
925}
926
927static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
928{
929 switch (tl_hba->tl_proto_id) {
930 case SCSI_PROTOCOL_SAS:
931 return "SAS";
932 case SCSI_PROTOCOL_FCP:
933 return "FCP";
934 case SCSI_PROTOCOL_ISCSI:
935 return "iSCSI";
936 default:
937 break;
938 }
939
940 return "Unknown";
941}
942
943/* Start items for tcm_loop_port_cit */
944
945static int tcm_loop_port_link(
946 struct se_portal_group *se_tpg,
947 struct se_lun *lun)
948{
949 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
950 struct tcm_loop_tpg, tl_se_tpg);
951 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
952
953 atomic_inc(&tl_tpg->tl_tpg_port_count);
954 smp_mb__after_atomic_inc();
955 /*
956 * Add Linux/SCSI struct scsi_device by HCTL
957 */
958 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
959
960 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
961 return 0;
962}
963
964static void tcm_loop_port_unlink(
965 struct se_portal_group *se_tpg,
966 struct se_lun *se_lun)
967{
968 struct scsi_device *sd;
969 struct tcm_loop_hba *tl_hba;
970 struct tcm_loop_tpg *tl_tpg;
971
972 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
973 tl_hba = tl_tpg->tl_hba;
974
975 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
976 se_lun->unpacked_lun);
977 if (!sd) {
978 pr_err("Unable to locate struct scsi_device for %d:%d:"
979 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
980 return;
981 }
982 /*
983 * Remove Linux/SCSI struct scsi_device by HCTL
984 */
985 scsi_remove_device(sd);
986 scsi_device_put(sd);
987
988 atomic_dec(&tl_tpg->tl_tpg_port_count);
989 smp_mb__after_atomic_dec();
990
991 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
992}
993
994/* End items for tcm_loop_port_cit */
995
996/* Start items for tcm_loop_nexus_cit */
997
998static int tcm_loop_make_nexus(
999 struct tcm_loop_tpg *tl_tpg,
1000 const char *name)
1001{
1002 struct se_portal_group *se_tpg;
1003 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1004 struct tcm_loop_nexus *tl_nexus;
1005 int ret = -ENOMEM;
1006
1007 if (tl_tpg->tl_hba->tl_nexus) {
1008 pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
1009 return -EEXIST;
1010 }
1011 se_tpg = &tl_tpg->tl_se_tpg;
1012
1013 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
1014 if (!tl_nexus) {
1015 pr_err("Unable to allocate struct tcm_loop_nexus\n");
1016 return -ENOMEM;
1017 }
1018 /*
1019 * Initialize the struct se_session pointer
1020 */
1021 tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
1022 if (IS_ERR(tl_nexus->se_sess)) {
1023 ret = PTR_ERR(tl_nexus->se_sess);
1024 goto out;
1025 }
1026 /*
1027 * Since we are running in 'demo mode' this call with generate a
1028 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
1029 * Initiator port name of the passed configfs group 'name'.
1030 */
1031 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1032 se_tpg, (unsigned char *)name);
1033 if (!tl_nexus->se_sess->se_node_acl) {
1034 transport_free_session(tl_nexus->se_sess);
1035 goto out;
1036 }
1037 /*
1038 * Now, register the SAS I_T Nexus as active with the call to
1039 * transport_register_session()
1040 */
1041 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1042 tl_nexus->se_sess, tl_nexus);
1043 tl_tpg->tl_hba->tl_nexus = tl_nexus;
1044 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1045 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1046 name);
1047 return 0;
1048
1049out:
1050 kfree(tl_nexus);
1051 return ret;
1052}
1053
1054static int tcm_loop_drop_nexus(
1055 struct tcm_loop_tpg *tpg)
1056{
1057 struct se_session *se_sess;
1058 struct tcm_loop_nexus *tl_nexus;
1059 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
1060
1061 if (!tl_hba)
1062 return -ENODEV;
1063
1064 tl_nexus = tl_hba->tl_nexus;
1065 if (!tl_nexus)
1066 return -ENODEV;
1067
1068 se_sess = tl_nexus->se_sess;
1069 if (!se_sess)
1070 return -ENODEV;
1071
1072 if (atomic_read(&tpg->tl_tpg_port_count)) {
1073 pr_err("Unable to remove TCM_Loop I_T Nexus with"
1074 " active TPG port count: %d\n",
1075 atomic_read(&tpg->tl_tpg_port_count));
1076 return -EPERM;
1077 }
1078
1079 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1080 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1081 tl_nexus->se_sess->se_node_acl->initiatorname);
1082 /*
1083 * Release the SCSI I_T Nexus to the emulated SAS Target Port
1084 */
1085 transport_deregister_session(tl_nexus->se_sess);
1086 tpg->tl_hba->tl_nexus = NULL;
1087 kfree(tl_nexus);
1088 return 0;
1089}
1090
1091/* End items for tcm_loop_nexus_cit */
1092
1093static ssize_t tcm_loop_tpg_show_nexus(
1094 struct se_portal_group *se_tpg,
1095 char *page)
1096{
1097 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1098 struct tcm_loop_tpg, tl_se_tpg);
1099 struct tcm_loop_nexus *tl_nexus;
1100 ssize_t ret;
1101
1102 tl_nexus = tl_tpg->tl_hba->tl_nexus;
1103 if (!tl_nexus)
1104 return -ENODEV;
1105
1106 ret = snprintf(page, PAGE_SIZE, "%s\n",
1107 tl_nexus->se_sess->se_node_acl->initiatorname);
1108
1109 return ret;
1110}
1111
1112static ssize_t tcm_loop_tpg_store_nexus(
1113 struct se_portal_group *se_tpg,
1114 const char *page,
1115 size_t count)
1116{
1117 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1118 struct tcm_loop_tpg, tl_se_tpg);
1119 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1120 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
1121 int ret;
1122 /*
1123 * Shutdown the active I_T nexus if 'NULL' is passed..
1124 */
1125 if (!strncmp(page, "NULL", 4)) {
1126 ret = tcm_loop_drop_nexus(tl_tpg);
1127 return (!ret) ? count : ret;
1128 }
1129 /*
1130 * Otherwise make sure the passed virtual Initiator port WWN matches
1131 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
1132 * tcm_loop_make_nexus()
1133 */
1134 if (strlen(page) >= TL_WWN_ADDR_LEN) {
1135 pr_err("Emulated NAA Sas Address: %s, exceeds"
1136 " max: %d\n", page, TL_WWN_ADDR_LEN);
1137 return -EINVAL;
1138 }
1139 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
1140
1141 ptr = strstr(i_port, "naa.");
1142 if (ptr) {
1143 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
1144 pr_err("Passed SAS Initiator Port %s does not"
1145 " match target port protoid: %s\n", i_port,
1146 tcm_loop_dump_proto_id(tl_hba));
1147 return -EINVAL;
1148 }
1149 port_ptr = &i_port[0];
1150 goto check_newline;
1151 }
1152 ptr = strstr(i_port, "fc.");
1153 if (ptr) {
1154 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
1155 pr_err("Passed FCP Initiator Port %s does not"
1156 " match target port protoid: %s\n", i_port,
1157 tcm_loop_dump_proto_id(tl_hba));
1158 return -EINVAL;
1159 }
1160 port_ptr = &i_port[3]; /* Skip over "fc." */
1161 goto check_newline;
1162 }
1163 ptr = strstr(i_port, "iqn.");
1164 if (ptr) {
1165 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
1166 pr_err("Passed iSCSI Initiator Port %s does not"
1167 " match target port protoid: %s\n", i_port,
1168 tcm_loop_dump_proto_id(tl_hba));
1169 return -EINVAL;
1170 }
1171 port_ptr = &i_port[0];
1172 goto check_newline;
1173 }
1174 pr_err("Unable to locate prefix for emulated Initiator Port:"
1175 " %s\n", i_port);
1176 return -EINVAL;
1177 /*
1178 * Clear any trailing newline for the NAA WWN
1179 */
1180check_newline:
1181 if (i_port[strlen(i_port)-1] == '\n')
1182 i_port[strlen(i_port)-1] = '\0';
1183
1184 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
1185 if (ret < 0)
1186 return ret;
1187
1188 return count;
1189}
1190
1191TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
1192
1193static ssize_t tcm_loop_tpg_show_transport_status(
1194 struct se_portal_group *se_tpg,
1195 char *page)
1196{
1197 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1198 struct tcm_loop_tpg, tl_se_tpg);
1199 const char *status = NULL;
1200 ssize_t ret = -EINVAL;
1201
1202 switch (tl_tpg->tl_transport_status) {
1203 case TCM_TRANSPORT_ONLINE:
1204 status = "online";
1205 break;
1206 case TCM_TRANSPORT_OFFLINE:
1207 status = "offline";
1208 break;
1209 default:
1210 break;
1211 }
1212
1213 if (status)
1214 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1215
1216 return ret;
1217}
1218
1219static ssize_t tcm_loop_tpg_store_transport_status(
1220 struct se_portal_group *se_tpg,
1221 const char *page,
1222 size_t count)
1223{
1224 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1225 struct tcm_loop_tpg, tl_se_tpg);
1226
1227 if (!strncmp(page, "online", 6)) {
1228 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1229 return count;
1230 }
1231 if (!strncmp(page, "offline", 7)) {
1232 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1233 return count;
1234 }
1235 return -EINVAL;
1236}
1237
1238TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
1239
1240static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1241 &tcm_loop_tpg_nexus.attr,
1242 &tcm_loop_tpg_transport_status.attr,
1243 NULL,
1244};
1245
1246/* Start items for tcm_loop_naa_cit */
1247
1248static struct se_portal_group *tcm_loop_make_naa_tpg(
1249 struct se_wwn *wwn,
1250 struct config_group *group,
1251 const char *name)
1252{
1253 struct tcm_loop_hba *tl_hba = container_of(wwn,
1254 struct tcm_loop_hba, tl_hba_wwn);
1255 struct tcm_loop_tpg *tl_tpg;
1256 char *tpgt_str, *end_ptr;
1257 int ret;
1258 unsigned short int tpgt;
1259
1260 tpgt_str = strstr(name, "tpgt_");
1261 if (!tpgt_str) {
1262 pr_err("Unable to locate \"tpgt_#\" directory"
1263 " group\n");
1264 return ERR_PTR(-EINVAL);
1265 }
1266 tpgt_str += 5; /* Skip ahead of "tpgt_" */
1267 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1268
1269 if (tpgt >= TL_TPGS_PER_HBA) {
1270 pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
1271 " %u\n", tpgt, TL_TPGS_PER_HBA);
1272 return ERR_PTR(-EINVAL);
1273 }
1274 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1275 tl_tpg->tl_hba = tl_hba;
1276 tl_tpg->tl_tpgt = tpgt;
1277 /*
1278 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
1279 */
1280 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
1281 wwn, &tl_tpg->tl_se_tpg, tl_tpg,
1282 TRANSPORT_TPG_TYPE_NORMAL);
1283 if (ret < 0)
1284 return ERR_PTR(-ENOMEM);
1285
1286 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1287 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1288 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1289
1290 return &tl_tpg->tl_se_tpg;
1291}
1292
1293static void tcm_loop_drop_naa_tpg(
1294 struct se_portal_group *se_tpg)
1295{
1296 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1297 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1298 struct tcm_loop_tpg, tl_se_tpg);
1299 struct tcm_loop_hba *tl_hba;
1300 unsigned short tpgt;
1301
1302 tl_hba = tl_tpg->tl_hba;
1303 tpgt = tl_tpg->tl_tpgt;
1304 /*
1305 * Release the I_T Nexus for the Virtual SAS link if present
1306 */
1307 tcm_loop_drop_nexus(tl_tpg);
1308 /*
1309 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
1310 */
1311 core_tpg_deregister(se_tpg);
1312
1313 tl_tpg->tl_hba = NULL;
1314 tl_tpg->tl_tpgt = 0;
1315
1316 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1317 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1318 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1319}
1320
1321/* End items for tcm_loop_naa_cit */
1322
1323/* Start items for tcm_loop_cit */
1324
1325static struct se_wwn *tcm_loop_make_scsi_hba(
1326 struct target_fabric_configfs *tf,
1327 struct config_group *group,
1328 const char *name)
1329{
1330 struct tcm_loop_hba *tl_hba;
1331 struct Scsi_Host *sh;
1332 char *ptr;
1333 int ret, off = 0;
1334
1335 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1336 if (!tl_hba) {
1337 pr_err("Unable to allocate struct tcm_loop_hba\n");
1338 return ERR_PTR(-ENOMEM);
1339 }
1340 /*
1341 * Determine the emulated Protocol Identifier and Target Port Name
1342 * based on the incoming configfs directory name.
1343 */
1344 ptr = strstr(name, "naa.");
1345 if (ptr) {
1346 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1347 goto check_len;
1348 }
1349 ptr = strstr(name, "fc.");
1350 if (ptr) {
1351 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1352 off = 3; /* Skip over "fc." */
1353 goto check_len;
1354 }
1355 ptr = strstr(name, "iqn.");
1356 if (!ptr) {
1357 pr_err("Unable to locate prefix for emulated Target "
1358 "Port: %s\n", name);
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1363
1364check_len:
1365 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1366 pr_err("Emulated NAA %s Address: %s, exceeds"
1367 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1368 TL_WWN_ADDR_LEN);
1369 ret = -EINVAL;
1370 goto out;
1371 }
1372 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1373
1374 /*
1375 * Call device_register(tl_hba->dev) to register the emulated
1376 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1377 * device_register() callbacks in tcm_loop_driver_probe()
1378 */
1379 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1380 if (ret)
1381 goto out;
1382
1383 sh = tl_hba->sh;
1384 tcm_loop_hba_no_cnt++;
1385 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1386 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1387 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1388
1389 return &tl_hba->tl_hba_wwn;
1390out:
1391 kfree(tl_hba);
1392 return ERR_PTR(ret);
1393}
1394
1395static void tcm_loop_drop_scsi_hba(
1396 struct se_wwn *wwn)
1397{
1398 struct tcm_loop_hba *tl_hba = container_of(wwn,
1399 struct tcm_loop_hba, tl_hba_wwn);
1400
1401 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1402 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1403 tl_hba->tl_wwn_address, tl_hba->sh->host_no);
1404 /*
1405 * Call device_unregister() on the original tl_hba->dev.
1406 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1407 * release *tl_hba;
1408 */
1409 device_unregister(&tl_hba->dev);
1410}
1411
1412/* Start items for tcm_loop_cit */
1413static ssize_t tcm_loop_wwn_show_attr_version(
1414 struct target_fabric_configfs *tf,
1415 char *page)
1416{
1417 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1418}
1419
1420TF_WWN_ATTR_RO(tcm_loop, version);
1421
1422static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1423 &tcm_loop_wwn_version.attr,
1424 NULL,
1425};
1426
1427/* End items for tcm_loop_cit */
1428
1429static int tcm_loop_register_configfs(void)
1430{
1431 struct target_fabric_configfs *fabric;
1432 int ret;
1433 /*
1434 * Set the TCM Loop HBA counter to zero
1435 */
1436 tcm_loop_hba_no_cnt = 0;
1437 /*
1438 * Register the top level struct config_item_type with TCM core
1439 */
1440 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
1441 if (IS_ERR(fabric)) {
1442 pr_err("tcm_loop_register_configfs() failed!\n");
1443 return PTR_ERR(fabric);
1444 }
1445 /*
1446 * Setup the fabric API of function pointers used by target_core_mod
1447 */
1448 fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
1449 fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
1450 fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
1451 fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
1452 fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
1453 fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
1454 fabric->tf_ops.tpg_get_pr_transport_id_len =
1455 &tcm_loop_get_pr_transport_id_len;
1456 fabric->tf_ops.tpg_parse_pr_out_transport_id =
1457 &tcm_loop_parse_pr_out_transport_id;
1458 fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
1459 fabric->tf_ops.tpg_check_demo_mode_cache =
1460 &tcm_loop_check_demo_mode_cache;
1461 fabric->tf_ops.tpg_check_demo_mode_write_protect =
1462 &tcm_loop_check_demo_mode_write_protect;
1463 fabric->tf_ops.tpg_check_prod_mode_write_protect =
1464 &tcm_loop_check_prod_mode_write_protect;
1465 /*
1466 * The TCM loopback fabric module runs in demo-mode to a local
1467 * virtual SCSI device, so fabric dependent initator ACLs are
1468 * not required.
1469 */
1470 fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
1471 fabric->tf_ops.tpg_release_fabric_acl =
1472 &tcm_loop_tpg_release_fabric_acl;
1473 fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
1474 /*
1475 * Used for setting up remaining TCM resources in process context
1476 */
1477 fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
1478 fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
1479 fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
1480 fabric->tf_ops.close_session = &tcm_loop_close_session;
1481 fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
1482 fabric->tf_ops.sess_get_initiator_sid = NULL;
1483 fabric->tf_ops.write_pending = &tcm_loop_write_pending;
1484 fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
1485 /*
1486 * Not used for TCM loopback
1487 */
1488 fabric->tf_ops.set_default_node_attributes =
1489 &tcm_loop_set_default_node_attributes;
1490 fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
1491 fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
1492 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1493 fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1494 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1495 fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
1496
1497 /*
1498 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1499 */
1500 fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
1501 fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
1502 fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
1503 fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
1504 /*
1505 * fabric_post_link() and fabric_pre_unlink() are used for
1506 * registration and release of TCM Loop Virtual SCSI LUNs.
1507 */
1508 fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
1509 fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
1510 fabric->tf_ops.fabric_make_np = NULL;
1511 fabric->tf_ops.fabric_drop_np = NULL;
1512 /*
1513 * Setup default attribute lists for various fabric->tf_cit_tmpl
1514 */
1515 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
1516 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
1517 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1518 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1519 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1520 /*
1521 * Once fabric->tf_ops has been setup, now register the fabric for
1522 * use within TCM
1523 */
1524 ret = target_fabric_configfs_register(fabric);
1525 if (ret < 0) {
1526 pr_err("target_fabric_configfs_register() for"
1527 " TCM_Loop failed!\n");
1528 target_fabric_configfs_free(fabric);
1529 return -1;
1530 }
1531 /*
1532 * Setup our local pointer to *fabric.
1533 */
1534 tcm_loop_fabric_configfs = fabric;
1535 pr_debug("TCM_LOOP[0] - Set fabric ->"
1536 " tcm_loop_fabric_configfs\n");
1537 return 0;
1538}
1539
1540static void tcm_loop_deregister_configfs(void)
1541{
1542 if (!tcm_loop_fabric_configfs)
1543 return;
1544
1545 target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
1546 tcm_loop_fabric_configfs = NULL;
1547 pr_debug("TCM_LOOP[0] - Cleared"
1548 " tcm_loop_fabric_configfs\n");
1549}
1550
1551static int __init tcm_loop_fabric_init(void)
1552{
1553 int ret = -ENOMEM;
1554
1555 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1556 if (!tcm_loop_workqueue)
1557 goto out;
1558
1559 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1560 sizeof(struct tcm_loop_cmd),
1561 __alignof__(struct tcm_loop_cmd),
1562 0, NULL);
1563 if (!tcm_loop_cmd_cache) {
1564 pr_debug("kmem_cache_create() for"
1565 " tcm_loop_cmd_cache failed\n");
1566 goto out_destroy_workqueue;
1567 }
1568
1569 ret = tcm_loop_alloc_core_bus();
1570 if (ret)
1571 goto out_destroy_cache;
1572
1573 ret = tcm_loop_register_configfs();
1574 if (ret)
1575 goto out_release_core_bus;
1576
1577 return 0;
1578
1579out_release_core_bus:
1580 tcm_loop_release_core_bus();
1581out_destroy_cache:
1582 kmem_cache_destroy(tcm_loop_cmd_cache);
1583out_destroy_workqueue:
1584 destroy_workqueue(tcm_loop_workqueue);
1585out:
1586 return ret;
1587}
1588
1589static void __exit tcm_loop_fabric_exit(void)
1590{
1591 tcm_loop_deregister_configfs();
1592 tcm_loop_release_core_bus();
1593 kmem_cache_destroy(tcm_loop_cmd_cache);
1594 destroy_workqueue(tcm_loop_workqueue);
1595}
1596
1597MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1598MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1599MODULE_LICENSE("GPL");
1600module_init(tcm_loop_fabric_init);
1601module_exit(tcm_loop_fabric_exit);