Linux Audio

Check our new training course

Loading...
v5.9
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
  42static struct workqueue_struct *tcm_loop_workqueue;
  43static struct kmem_cache *tcm_loop_cmd_cache;
  44
  45static int tcm_loop_hba_no_cnt;
  46
  47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  48
  49/*
  50 * Called from struct target_core_fabric_ops->check_stop_free()
  51 */
  52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  53{
  54	return transport_generic_free_cmd(se_cmd, 0);
 
 
 
 
 
 
 
 
 
 
 
 
  55}
  56
  57static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  58{
  59	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  60				struct tcm_loop_cmd, tl_se_cmd);
  61
  62	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  63}
  64
  65static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  66{
  67	seq_puts(m, "tcm_loop_proc_info()\n");
  68	return 0;
  69}
  70
  71static int tcm_loop_driver_probe(struct device *);
  72static int tcm_loop_driver_remove(struct device *);
  73
  74static int pseudo_lld_bus_match(struct device *dev,
  75				struct device_driver *dev_driver)
  76{
  77	return 1;
  78}
  79
  80static struct bus_type tcm_loop_lld_bus = {
  81	.name			= "tcm_loop_bus",
  82	.match			= pseudo_lld_bus_match,
  83	.probe			= tcm_loop_driver_probe,
  84	.remove			= tcm_loop_driver_remove,
  85};
  86
  87static struct device_driver tcm_loop_driverfs = {
  88	.name			= "tcm_loop",
  89	.bus			= &tcm_loop_lld_bus,
  90};
  91/*
  92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  93 */
  94static struct device *tcm_loop_primary;
  95
  96static void tcm_loop_submission_work(struct work_struct *work)
  97{
  98	struct tcm_loop_cmd *tl_cmd =
  99		container_of(work, struct tcm_loop_cmd, work);
 100	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 101	struct scsi_cmnd *sc = tl_cmd->sc;
 102	struct tcm_loop_nexus *tl_nexus;
 103	struct tcm_loop_hba *tl_hba;
 104	struct tcm_loop_tpg *tl_tpg;
 105	struct scatterlist *sgl_bidi = NULL;
 106	u32 sgl_bidi_count = 0, transfer_length;
 107	int rc;
 108
 109	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 110	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 111
 112	/*
 113	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 114	 * has already been configured via tcm_loop_make_naa_tpg().
 115	 */
 116	if (!tl_tpg->tl_hba) {
 117		set_host_byte(sc, DID_NO_CONNECT);
 118		goto out_done;
 119	}
 120	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 121		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 122		goto out_done;
 123	}
 124	tl_nexus = tl_tpg->tl_nexus;
 125	if (!tl_nexus) {
 126		scmd_printk(KERN_ERR, sc,
 127			    "TCM_Loop I_T Nexus does not exist\n");
 128		set_host_byte(sc, DID_ERROR);
 129		goto out_done;
 130	}
 
 
 
 
 
 
 
 
 131
 132	transfer_length = scsi_transfer_length(sc);
 133	if (!scsi_prot_sg_count(sc) &&
 134	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 135		se_cmd->prot_pto = true;
 136		/*
 137		 * loopback transport doesn't support
 138		 * WRITE_GENERATE, READ_STRIP protection
 139		 * information operations, go ahead unprotected.
 140		 */
 141		transfer_length = scsi_bufflen(sc);
 142	}
 143
 144	se_cmd->tag = tl_cmd->sc_cmd_tag;
 145	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 146			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 147			transfer_length, TCM_SIMPLE_TAG,
 148			sc->sc_data_direction, 0,
 149			scsi_sglist(sc), scsi_sg_count(sc),
 150			sgl_bidi, sgl_bidi_count,
 151			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 152	if (rc < 0) {
 153		set_host_byte(sc, DID_NO_CONNECT);
 154		goto out_done;
 155	}
 156	return;
 157
 158out_done:
 159	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 160	sc->scsi_done(sc);
 
 161}
 162
 163/*
 164 * ->queuecommand can be and usually is called from interrupt context, so
 165 * defer the actual submission to a workqueue.
 166 */
 167static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 168{
 169	struct tcm_loop_cmd *tl_cmd;
 170
 171	pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
 172		 __func__, sc->device->host->host_no, sc->device->id,
 173		 sc->device->channel, sc->device->lun, sc->cmnd[0],
 174		 scsi_bufflen(sc));
 175
 176	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 177	if (!tl_cmd) {
 
 178		set_host_byte(sc, DID_ERROR);
 179		sc->scsi_done(sc);
 180		return 0;
 181	}
 182
 183	tl_cmd->sc = sc;
 184	tl_cmd->sc_cmd_tag = sc->request->tag;
 185	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 186	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 187	return 0;
 188}
 189
 190/*
 191 * Called from SCSI EH process context to issue a LUN_RESET TMR
 192 * to struct scsi_device
 193 */
 194static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 195			      u64 lun, int task, enum tcm_tmreq_table tmr)
 196{
 197	struct se_cmd *se_cmd;
 198	struct se_session *se_sess;
 
 199	struct tcm_loop_nexus *tl_nexus;
 200	struct tcm_loop_cmd *tl_cmd;
 
 201	int ret = TMR_FUNCTION_FAILED, rc;
 202
 203	/*
 204	 * Locate the tl_nexus and se_sess pointers
 205	 */
 206	tl_nexus = tl_tpg->tl_nexus;
 207	if (!tl_nexus) {
 208		pr_err("Unable to perform device reset without active I_T Nexus\n");
 
 209		return ret;
 210	}
 211
 212	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 213	if (!tl_cmd)
 
 214		return ret;
 
 215
 216	init_completion(&tl_cmd->tmr_done);
 
 
 
 
 
 217
 218	se_cmd = &tl_cmd->tl_se_cmd;
 
 219	se_sess = tl_tpg->tl_nexus->se_sess;
 
 
 
 
 
 
 220
 221	rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
 222			       NULL, tmr, GFP_KERNEL, task,
 223			       TARGET_SCF_ACK_KREF);
 224	if (rc < 0)
 225		goto release;
 226	wait_for_completion(&tl_cmd->tmr_done);
 227	ret = se_cmd->se_tmr_req->response;
 228	target_put_sess_cmd(se_cmd);
 229
 230out:
 231	return ret;
 232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 233release:
 234	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 235	goto out;
 
 
 
 
 236}
 237
 238static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 239{
 240	struct tcm_loop_hba *tl_hba;
 241	struct tcm_loop_tpg *tl_tpg;
 242	int ret = FAILED;
 243
 244	/*
 245	 * Locate the tcm_loop_hba_t pointer
 246	 */
 247	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 248	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 249	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 250				 sc->request->tag, TMR_ABORT_TASK);
 251	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 252}
 253
 254/*
 255 * Called from SCSI EH process context to issue a LUN_RESET TMR
 256 * to struct scsi_device
 257 */
 258static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 259{
 260	struct tcm_loop_hba *tl_hba;
 261	struct tcm_loop_tpg *tl_tpg;
 262	int ret = FAILED;
 263
 264	/*
 265	 * Locate the tcm_loop_hba_t pointer
 266	 */
 267	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 268	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 269
 270	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 271				 0, TMR_LUN_RESET);
 272	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 273}
 274
 275static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 276{
 277	struct tcm_loop_hba *tl_hba;
 278	struct tcm_loop_tpg *tl_tpg;
 279
 280	/*
 281	 * Locate the tcm_loop_hba_t pointer
 282	 */
 283	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 284	if (!tl_hba) {
 285		pr_err("Unable to perform device reset without active I_T Nexus\n");
 
 286		return FAILED;
 287	}
 288	/*
 289	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 290	 */
 291	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 292	if (tl_tpg) {
 293		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 294		return SUCCESS;
 295	}
 296	return FAILED;
 297}
 298
 
 
 
 
 
 
 299static struct scsi_host_template tcm_loop_driver_template = {
 300	.show_info		= tcm_loop_show_info,
 301	.proc_name		= "tcm_loopback",
 302	.name			= "TCM_Loopback",
 303	.queuecommand		= tcm_loop_queuecommand,
 304	.change_queue_depth	= scsi_change_queue_depth,
 305	.eh_abort_handler = tcm_loop_abort_task,
 306	.eh_device_reset_handler = tcm_loop_device_reset,
 307	.eh_target_reset_handler = tcm_loop_target_reset,
 308	.can_queue		= 1024,
 309	.this_id		= -1,
 310	.sg_tablesize		= 256,
 311	.cmd_per_lun		= 1024,
 312	.max_sectors		= 0xFFFF,
 313	.dma_boundary		= PAGE_SIZE - 1,
 
 314	.module			= THIS_MODULE,
 315	.track_queue_depth	= 1,
 316};
 317
 318static int tcm_loop_driver_probe(struct device *dev)
 319{
 320	struct tcm_loop_hba *tl_hba;
 321	struct Scsi_Host *sh;
 322	int error, host_prot;
 323
 324	tl_hba = to_tcm_loop_hba(dev);
 325
 326	sh = scsi_host_alloc(&tcm_loop_driver_template,
 327			sizeof(struct tcm_loop_hba));
 328	if (!sh) {
 329		pr_err("Unable to allocate struct scsi_host\n");
 330		return -ENODEV;
 331	}
 332	tl_hba->sh = sh;
 333
 334	/*
 335	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 336	 */
 337	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 338	/*
 339	 * Setup single ID, Channel and LUN for now..
 340	 */
 341	sh->max_id = 2;
 342	sh->max_lun = 0;
 343	sh->max_channel = 0;
 344	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 345
 346	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 347		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 348		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 349
 350	scsi_host_set_prot(sh, host_prot);
 351	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 352
 353	error = scsi_add_host(sh, &tl_hba->dev);
 354	if (error) {
 355		pr_err("%s: scsi_add_host failed\n", __func__);
 356		scsi_host_put(sh);
 357		return -ENODEV;
 358	}
 359	return 0;
 360}
 361
 362static int tcm_loop_driver_remove(struct device *dev)
 363{
 364	struct tcm_loop_hba *tl_hba;
 365	struct Scsi_Host *sh;
 366
 367	tl_hba = to_tcm_loop_hba(dev);
 368	sh = tl_hba->sh;
 369
 370	scsi_remove_host(sh);
 371	scsi_host_put(sh);
 372	return 0;
 373}
 374
 375static void tcm_loop_release_adapter(struct device *dev)
 376{
 377	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 378
 379	kfree(tl_hba);
 380}
 381
 382/*
 383 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 384 */
 385static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 386{
 387	int ret;
 388
 389	tl_hba->dev.bus = &tcm_loop_lld_bus;
 390	tl_hba->dev.parent = tcm_loop_primary;
 391	tl_hba->dev.release = &tcm_loop_release_adapter;
 392	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 393
 394	ret = device_register(&tl_hba->dev);
 395	if (ret) {
 396		pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
 
 397		return -ENODEV;
 398	}
 399
 400	return 0;
 401}
 402
 403/*
 404 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 405 * tcm_loop SCSI bus.
 406 */
 407static int tcm_loop_alloc_core_bus(void)
 408{
 409	int ret;
 410
 411	tcm_loop_primary = root_device_register("tcm_loop_0");
 412	if (IS_ERR(tcm_loop_primary)) {
 413		pr_err("Unable to allocate tcm_loop_primary\n");
 414		return PTR_ERR(tcm_loop_primary);
 415	}
 416
 417	ret = bus_register(&tcm_loop_lld_bus);
 418	if (ret) {
 419		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 420		goto dev_unreg;
 421	}
 422
 423	ret = driver_register(&tcm_loop_driverfs);
 424	if (ret) {
 425		pr_err("driver_register() failed for tcm_loop_driverfs\n");
 
 426		goto bus_unreg;
 427	}
 428
 429	pr_debug("Initialized TCM Loop Core Bus\n");
 430	return ret;
 431
 432bus_unreg:
 433	bus_unregister(&tcm_loop_lld_bus);
 434dev_unreg:
 435	root_device_unregister(tcm_loop_primary);
 436	return ret;
 437}
 438
 439static void tcm_loop_release_core_bus(void)
 440{
 441	driver_unregister(&tcm_loop_driverfs);
 442	bus_unregister(&tcm_loop_lld_bus);
 443	root_device_unregister(tcm_loop_primary);
 444
 445	pr_debug("Releasing TCM Loop Core BUS\n");
 446}
 447
 
 
 
 
 
 448static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 449{
 450	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 451}
 452
 453static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 454{
 455	/*
 456	 * Return the passed NAA identifier for the Target Port
 457	 */
 458	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 459}
 460
 461static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 462{
 463	/*
 464	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 465	 * to represent the SCSI Target Port.
 466	 */
 467	return tl_tpg(se_tpg)->tl_tpgt;
 468}
 469
 470/*
 471 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 472 * based upon the incoming fabric dependent SCSI Initiator Port
 473 */
 474static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 475{
 476	return 1;
 477}
 478
 479static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 480{
 481	return 0;
 482}
 483
 484/*
 485 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 486 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 487 */
 488static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 489{
 490	return 0;
 491}
 492
 493/*
 494 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 495 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 496 * It has been added here as a nop for target_fabric_tf_ops_check()
 497 */
 498static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 499{
 500	return 0;
 501}
 502
 503static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 504{
 505	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 506						   tl_se_tpg);
 507	return tl_tpg->tl_fabric_prot_type;
 508}
 509
 510static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 511{
 512	return 1;
 513}
 514
 515static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 516{
 517	return 1;
 518}
 519
 520static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 521{
 522	return;
 523}
 524
 525static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 526{
 527	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 528			struct tcm_loop_cmd, tl_se_cmd);
 529
 530	return tl_cmd->sc_cmd_state;
 531}
 532
 
 
 
 
 
 
 
 
 
 
 533static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 534{
 535	/*
 536	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 537	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 538	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 539	 * format with transport_generic_map_mem_to_cmd().
 540	 *
 541	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 542	 * object execution queue.
 543	 */
 544	target_execute_cmd(se_cmd);
 545	return 0;
 546}
 547
 548static int tcm_loop_queue_data_or_status(const char *func,
 549		struct se_cmd *se_cmd, u8 scsi_status)
 
 
 
 
 550{
 551	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 552				struct tcm_loop_cmd, tl_se_cmd);
 553	struct scsi_cmnd *sc = tl_cmd->sc;
 554
 555	pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
 556		 func, sc, sc->cmnd[0]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557
 558	if (se_cmd->sense_buffer &&
 559	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 560	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 561
 562		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 563				SCSI_SENSE_BUFFERSIZE);
 564		sc->result = SAM_STAT_CHECK_CONDITION;
 565		set_driver_byte(sc, DRIVER_SENSE);
 566	} else
 567		sc->result = scsi_status;
 568
 569	set_host_byte(sc, DID_OK);
 570	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 571	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 572		scsi_set_resid(sc, se_cmd->residual_count);
 573	sc->scsi_done(sc);
 574	return 0;
 575}
 576
 577static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 578{
 579	return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
 580}
 581
 582static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 583{
 584	return tcm_loop_queue_data_or_status(__func__,
 585					     se_cmd, se_cmd->scsi_status);
 586}
 587
 588static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 589{
 590	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 591				struct tcm_loop_cmd, tl_se_cmd);
 592
 593	/* Wake up tcm_loop_issue_tmr(). */
 594	complete(&tl_cmd->tmr_done);
 
 
 
 595}
 596
 597static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 598{
 599	return;
 600}
 601
 602static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 603{
 604	switch (tl_hba->tl_proto_id) {
 605	case SCSI_PROTOCOL_SAS:
 606		return "SAS";
 607	case SCSI_PROTOCOL_FCP:
 608		return "FCP";
 609	case SCSI_PROTOCOL_ISCSI:
 610		return "iSCSI";
 611	default:
 612		break;
 613	}
 614
 615	return "Unknown";
 616}
 617
 618/* Start items for tcm_loop_port_cit */
 619
 620static int tcm_loop_port_link(
 621	struct se_portal_group *se_tpg,
 622	struct se_lun *lun)
 623{
 624	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 625				struct tcm_loop_tpg, tl_se_tpg);
 626	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 627
 628	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 629	/*
 630	 * Add Linux/SCSI struct scsi_device by HCTL
 631	 */
 632	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 633
 634	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 635	return 0;
 636}
 637
 638static void tcm_loop_port_unlink(
 639	struct se_portal_group *se_tpg,
 640	struct se_lun *se_lun)
 641{
 642	struct scsi_device *sd;
 643	struct tcm_loop_hba *tl_hba;
 644	struct tcm_loop_tpg *tl_tpg;
 645
 646	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 647	tl_hba = tl_tpg->tl_hba;
 648
 649	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 650				se_lun->unpacked_lun);
 651	if (!sd) {
 652		pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
 653		       0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 654		return;
 655	}
 656	/*
 657	 * Remove Linux/SCSI struct scsi_device by HCTL
 658	 */
 659	scsi_remove_device(sd);
 660	scsi_device_put(sd);
 661
 662	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 663
 664	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 665}
 666
 667/* End items for tcm_loop_port_cit */
 668
 669static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 670		struct config_item *item, char *page)
 671{
 672	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 673	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 674						   tl_se_tpg);
 675
 676	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 677}
 678
 679static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 680		struct config_item *item, const char *page, size_t count)
 681{
 682	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 683	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 684						   tl_se_tpg);
 685	unsigned long val;
 686	int ret = kstrtoul(page, 0, &val);
 687
 688	if (ret) {
 689		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 690		return ret;
 691	}
 692	if (val != 0 && val != 1 && val != 3) {
 693		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 694		return -EINVAL;
 695	}
 696	tl_tpg->tl_fabric_prot_type = val;
 697
 698	return count;
 699}
 700
 701CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 702
 703static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 704	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 705	NULL,
 706};
 707
 708/* Start items for tcm_loop_nexus_cit */
 709
 710static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 711				  struct se_session *se_sess, void *p)
 712{
 713	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 714					struct tcm_loop_tpg, tl_se_tpg);
 715
 716	tl_tpg->tl_nexus = p;
 717	return 0;
 718}
 719
 720static int tcm_loop_make_nexus(
 721	struct tcm_loop_tpg *tl_tpg,
 722	const char *name)
 723{
 724	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 725	struct tcm_loop_nexus *tl_nexus;
 726	int ret;
 727
 728	if (tl_tpg->tl_nexus) {
 729		pr_debug("tl_tpg->tl_nexus already exists\n");
 730		return -EEXIST;
 731	}
 732
 733	tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
 734	if (!tl_nexus)
 
 735		return -ENOMEM;
 
 736
 737	tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
 738					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 739					name, tl_nexus, tcm_loop_alloc_sess_cb);
 740	if (IS_ERR(tl_nexus->se_sess)) {
 741		ret = PTR_ERR(tl_nexus->se_sess);
 742		kfree(tl_nexus);
 743		return ret;
 744	}
 745
 746	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
 747		 tcm_loop_dump_proto_id(tl_hba), name);
 
 748	return 0;
 749}
 750
 751static int tcm_loop_drop_nexus(
 752	struct tcm_loop_tpg *tpg)
 753{
 754	struct se_session *se_sess;
 755	struct tcm_loop_nexus *tl_nexus;
 756
 757	tl_nexus = tpg->tl_nexus;
 758	if (!tl_nexus)
 759		return -ENODEV;
 760
 761	se_sess = tl_nexus->se_sess;
 762	if (!se_sess)
 763		return -ENODEV;
 764
 765	if (atomic_read(&tpg->tl_tpg_port_count)) {
 766		pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
 767		       atomic_read(&tpg->tl_tpg_port_count));
 
 768		return -EPERM;
 769	}
 770
 771	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
 772		 tcm_loop_dump_proto_id(tpg->tl_hba),
 773		 tl_nexus->se_sess->se_node_acl->initiatorname);
 774	/*
 775	 * Release the SCSI I_T Nexus to the emulated Target Port
 776	 */
 777	target_remove_session(se_sess);
 778	tpg->tl_nexus = NULL;
 779	kfree(tl_nexus);
 780	return 0;
 781}
 782
 783/* End items for tcm_loop_nexus_cit */
 784
 785static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 786{
 787	struct se_portal_group *se_tpg = to_tpg(item);
 788	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 789			struct tcm_loop_tpg, tl_se_tpg);
 790	struct tcm_loop_nexus *tl_nexus;
 791	ssize_t ret;
 792
 793	tl_nexus = tl_tpg->tl_nexus;
 794	if (!tl_nexus)
 795		return -ENODEV;
 796
 797	ret = snprintf(page, PAGE_SIZE, "%s\n",
 798		tl_nexus->se_sess->se_node_acl->initiatorname);
 799
 800	return ret;
 801}
 802
 803static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 804		const char *page, size_t count)
 805{
 806	struct se_portal_group *se_tpg = to_tpg(item);
 807	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 808			struct tcm_loop_tpg, tl_se_tpg);
 809	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 810	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 811	int ret;
 812	/*
 813	 * Shutdown the active I_T nexus if 'NULL' is passed..
 814	 */
 815	if (!strncmp(page, "NULL", 4)) {
 816		ret = tcm_loop_drop_nexus(tl_tpg);
 817		return (!ret) ? count : ret;
 818	}
 819	/*
 820	 * Otherwise make sure the passed virtual Initiator port WWN matches
 821	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 822	 * tcm_loop_make_nexus()
 823	 */
 824	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 825		pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
 826		       page, TL_WWN_ADDR_LEN);
 827		return -EINVAL;
 828	}
 829	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 830
 831	ptr = strstr(i_port, "naa.");
 832	if (ptr) {
 833		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 834			pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
 835			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 836			return -EINVAL;
 837		}
 838		port_ptr = &i_port[0];
 839		goto check_newline;
 840	}
 841	ptr = strstr(i_port, "fc.");
 842	if (ptr) {
 843		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 844			pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
 845			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 846			return -EINVAL;
 847		}
 848		port_ptr = &i_port[3]; /* Skip over "fc." */
 849		goto check_newline;
 850	}
 851	ptr = strstr(i_port, "iqn.");
 852	if (ptr) {
 853		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 854			pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
 855			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 856			return -EINVAL;
 857		}
 858		port_ptr = &i_port[0];
 859		goto check_newline;
 860	}
 861	pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
 862	       i_port);
 863	return -EINVAL;
 864	/*
 865	 * Clear any trailing newline for the NAA WWN
 866	 */
 867check_newline:
 868	if (i_port[strlen(i_port)-1] == '\n')
 869		i_port[strlen(i_port)-1] = '\0';
 870
 871	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 872	if (ret < 0)
 873		return ret;
 874
 875	return count;
 876}
 877
 878static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 879		char *page)
 880{
 881	struct se_portal_group *se_tpg = to_tpg(item);
 882	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 883			struct tcm_loop_tpg, tl_se_tpg);
 884	const char *status = NULL;
 885	ssize_t ret = -EINVAL;
 886
 887	switch (tl_tpg->tl_transport_status) {
 888	case TCM_TRANSPORT_ONLINE:
 889		status = "online";
 890		break;
 891	case TCM_TRANSPORT_OFFLINE:
 892		status = "offline";
 893		break;
 894	default:
 895		break;
 896	}
 897
 898	if (status)
 899		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
 900
 901	return ret;
 902}
 903
 904static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
 905		const char *page, size_t count)
 906{
 907	struct se_portal_group *se_tpg = to_tpg(item);
 908	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 909			struct tcm_loop_tpg, tl_se_tpg);
 910
 911	if (!strncmp(page, "online", 6)) {
 912		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 913		return count;
 914	}
 915	if (!strncmp(page, "offline", 7)) {
 916		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
 917		if (tl_tpg->tl_nexus) {
 918			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
 919
 920			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
 921		}
 922		return count;
 923	}
 924	return -EINVAL;
 925}
 926
 927static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
 928					 char *page)
 929{
 930	struct se_portal_group *se_tpg = to_tpg(item);
 931	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 932			struct tcm_loop_tpg, tl_se_tpg);
 933	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 934
 935	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
 936			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
 937}
 938
 939CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
 940CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
 941CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
 942
 943static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
 944	&tcm_loop_tpg_attr_nexus,
 945	&tcm_loop_tpg_attr_transport_status,
 946	&tcm_loop_tpg_attr_address,
 947	NULL,
 948};
 949
 950/* Start items for tcm_loop_naa_cit */
 951
 952static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
 953						     const char *name)
 
 
 954{
 955	struct tcm_loop_hba *tl_hba = container_of(wwn,
 956			struct tcm_loop_hba, tl_hba_wwn);
 957	struct tcm_loop_tpg *tl_tpg;
 958	int ret;
 959	unsigned long tpgt;
 960
 961	if (strstr(name, "tpgt_") != name) {
 962		pr_err("Unable to locate \"tpgt_#\" directory group\n");
 
 963		return ERR_PTR(-EINVAL);
 964	}
 965	if (kstrtoul(name+5, 10, &tpgt))
 966		return ERR_PTR(-EINVAL);
 967
 968	if (tpgt >= TL_TPGS_PER_HBA) {
 969		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
 970		       tpgt, TL_TPGS_PER_HBA);
 971		return ERR_PTR(-EINVAL);
 972	}
 973	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
 974	tl_tpg->tl_hba = tl_hba;
 975	tl_tpg->tl_tpgt = tpgt;
 976	/*
 977	 * Register the tl_tpg as a emulated TCM Target Endpoint
 978	 */
 979	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
 980	if (ret < 0)
 981		return ERR_PTR(-ENOMEM);
 982
 983	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
 984		 tcm_loop_dump_proto_id(tl_hba),
 985		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
 
 986	return &tl_tpg->tl_se_tpg;
 987}
 988
 989static void tcm_loop_drop_naa_tpg(
 990	struct se_portal_group *se_tpg)
 991{
 992	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
 993	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 994				struct tcm_loop_tpg, tl_se_tpg);
 995	struct tcm_loop_hba *tl_hba;
 996	unsigned short tpgt;
 997
 998	tl_hba = tl_tpg->tl_hba;
 999	tpgt = tl_tpg->tl_tpgt;
1000	/*
1001	 * Release the I_T Nexus for the Virtual target link if present
1002	 */
1003	tcm_loop_drop_nexus(tl_tpg);
1004	/*
1005	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1006	 */
1007	core_tpg_deregister(se_tpg);
1008
1009	tl_tpg->tl_hba = NULL;
1010	tl_tpg->tl_tpgt = 0;
1011
1012	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
1013		 tcm_loop_dump_proto_id(tl_hba),
1014		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1015}
1016
1017/* End items for tcm_loop_naa_cit */
1018
1019/* Start items for tcm_loop_cit */
1020
1021static struct se_wwn *tcm_loop_make_scsi_hba(
1022	struct target_fabric_configfs *tf,
1023	struct config_group *group,
1024	const char *name)
1025{
1026	struct tcm_loop_hba *tl_hba;
1027	struct Scsi_Host *sh;
1028	char *ptr;
1029	int ret, off = 0;
1030
1031	tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
1032	if (!tl_hba)
 
1033		return ERR_PTR(-ENOMEM);
1034
1035	/*
1036	 * Determine the emulated Protocol Identifier and Target Port Name
1037	 * based on the incoming configfs directory name.
1038	 */
1039	ptr = strstr(name, "naa.");
1040	if (ptr) {
1041		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1042		goto check_len;
1043	}
1044	ptr = strstr(name, "fc.");
1045	if (ptr) {
1046		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1047		off = 3; /* Skip over "fc." */
1048		goto check_len;
1049	}
1050	ptr = strstr(name, "iqn.");
1051	if (!ptr) {
1052		pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1053		       name);
1054		ret = -EINVAL;
1055		goto out;
1056	}
1057	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1058
1059check_len:
1060	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1061		pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1062		       name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
 
1063		ret = -EINVAL;
1064		goto out;
1065	}
1066	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1067
1068	/*
1069	 * Call device_register(tl_hba->dev) to register the emulated
1070	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1071	 * device_register() callbacks in tcm_loop_driver_probe()
1072	 */
1073	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1074	if (ret)
1075		goto out;
1076
1077	sh = tl_hba->sh;
1078	tcm_loop_hba_no_cnt++;
1079	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1080		 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
 
 
1081	return &tl_hba->tl_hba_wwn;
1082out:
1083	kfree(tl_hba);
1084	return ERR_PTR(ret);
1085}
1086
1087static void tcm_loop_drop_scsi_hba(
1088	struct se_wwn *wwn)
1089{
1090	struct tcm_loop_hba *tl_hba = container_of(wwn,
1091				struct tcm_loop_hba, tl_hba_wwn);
1092
1093	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1094		 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1095		 tl_hba->sh->host_no);
 
1096	/*
1097	 * Call device_unregister() on the original tl_hba->dev.
1098	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1099	 * release *tl_hba;
1100	 */
1101	device_unregister(&tl_hba->dev);
1102}
1103
1104/* Start items for tcm_loop_cit */
1105static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1106{
1107	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1108}
1109
1110CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1111
1112static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1113	&tcm_loop_wwn_attr_version,
1114	NULL,
1115};
1116
1117/* End items for tcm_loop_cit */
1118
1119static const struct target_core_fabric_ops loop_ops = {
1120	.module				= THIS_MODULE,
1121	.fabric_name			= "loopback",
 
1122	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1123	.tpg_get_tag			= tcm_loop_get_tag,
1124	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1125	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1126	.tpg_check_demo_mode_write_protect =
1127				tcm_loop_check_demo_mode_write_protect,
1128	.tpg_check_prod_mode_write_protect =
1129				tcm_loop_check_prod_mode_write_protect,
1130	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1131	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1132	.check_stop_free		= tcm_loop_check_stop_free,
1133	.release_cmd			= tcm_loop_release_cmd,
 
 
1134	.sess_get_index			= tcm_loop_sess_get_index,
1135	.write_pending			= tcm_loop_write_pending,
 
1136	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1137	.get_cmd_state			= tcm_loop_get_cmd_state,
1138	.queue_data_in			= tcm_loop_queue_data_in,
1139	.queue_status			= tcm_loop_queue_status,
1140	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1141	.aborted_task			= tcm_loop_aborted_task,
1142	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1143	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1144	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1145	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1146	.fabric_post_link		= tcm_loop_port_link,
1147	.fabric_pre_unlink		= tcm_loop_port_unlink,
1148	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1149	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1150	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1151};
1152
1153static int __init tcm_loop_fabric_init(void)
1154{
1155	int ret = -ENOMEM;
1156
1157	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1158	if (!tcm_loop_workqueue)
1159		goto out;
1160
1161	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1162				sizeof(struct tcm_loop_cmd),
1163				__alignof__(struct tcm_loop_cmd),
1164				0, NULL);
1165	if (!tcm_loop_cmd_cache) {
1166		pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
 
1167		goto out_destroy_workqueue;
1168	}
1169
1170	ret = tcm_loop_alloc_core_bus();
1171	if (ret)
1172		goto out_destroy_cache;
1173
1174	ret = target_register_template(&loop_ops);
1175	if (ret)
1176		goto out_release_core_bus;
1177
1178	return 0;
1179
1180out_release_core_bus:
1181	tcm_loop_release_core_bus();
1182out_destroy_cache:
1183	kmem_cache_destroy(tcm_loop_cmd_cache);
1184out_destroy_workqueue:
1185	destroy_workqueue(tcm_loop_workqueue);
1186out:
1187	return ret;
1188}
1189
1190static void __exit tcm_loop_fabric_exit(void)
1191{
1192	target_unregister_template(&loop_ops);
1193	tcm_loop_release_core_bus();
1194	kmem_cache_destroy(tcm_loop_cmd_cache);
1195	destroy_workqueue(tcm_loop_workqueue);
1196}
1197
1198MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1199MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1200MODULE_LICENSE("GPL");
1201module_init(tcm_loop_fabric_init);
1202module_exit(tcm_loop_fabric_exit);
v4.6
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
  42static struct workqueue_struct *tcm_loop_workqueue;
  43static struct kmem_cache *tcm_loop_cmd_cache;
  44
  45static int tcm_loop_hba_no_cnt;
  46
  47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  48
  49/*
  50 * Called from struct target_core_fabric_ops->check_stop_free()
  51 */
  52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  53{
  54	/*
  55	 * Do not release struct se_cmd's containing a valid TMR
  56	 * pointer.  These will be released directly in tcm_loop_device_reset()
  57	 * with transport_generic_free_cmd().
  58	 */
  59	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  60		return 0;
  61	/*
  62	 * Release the struct se_cmd, which will make a callback to release
  63	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  64	 */
  65	transport_generic_free_cmd(se_cmd, 0);
  66	return 1;
  67}
  68
  69static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  70{
  71	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  72				struct tcm_loop_cmd, tl_se_cmd);
  73
  74	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  75}
  76
  77static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  78{
  79	seq_printf(m, "tcm_loop_proc_info()\n");
  80	return 0;
  81}
  82
  83static int tcm_loop_driver_probe(struct device *);
  84static int tcm_loop_driver_remove(struct device *);
  85
  86static int pseudo_lld_bus_match(struct device *dev,
  87				struct device_driver *dev_driver)
  88{
  89	return 1;
  90}
  91
  92static struct bus_type tcm_loop_lld_bus = {
  93	.name			= "tcm_loop_bus",
  94	.match			= pseudo_lld_bus_match,
  95	.probe			= tcm_loop_driver_probe,
  96	.remove			= tcm_loop_driver_remove,
  97};
  98
  99static struct device_driver tcm_loop_driverfs = {
 100	.name			= "tcm_loop",
 101	.bus			= &tcm_loop_lld_bus,
 102};
 103/*
 104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
 105 */
 106static struct device *tcm_loop_primary;
 107
 108static void tcm_loop_submission_work(struct work_struct *work)
 109{
 110	struct tcm_loop_cmd *tl_cmd =
 111		container_of(work, struct tcm_loop_cmd, work);
 112	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 113	struct scsi_cmnd *sc = tl_cmd->sc;
 114	struct tcm_loop_nexus *tl_nexus;
 115	struct tcm_loop_hba *tl_hba;
 116	struct tcm_loop_tpg *tl_tpg;
 117	struct scatterlist *sgl_bidi = NULL;
 118	u32 sgl_bidi_count = 0, transfer_length;
 119	int rc;
 120
 121	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 122	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 123
 124	/*
 125	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 126	 * has already been configured via tcm_loop_make_naa_tpg().
 127	 */
 128	if (!tl_tpg->tl_hba) {
 129		set_host_byte(sc, DID_NO_CONNECT);
 130		goto out_done;
 131	}
 132	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 133		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 134		goto out_done;
 135	}
 136	tl_nexus = tl_tpg->tl_nexus;
 137	if (!tl_nexus) {
 138		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
 139				" does not exist\n");
 140		set_host_byte(sc, DID_ERROR);
 141		goto out_done;
 142	}
 143	if (scsi_bidi_cmnd(sc)) {
 144		struct scsi_data_buffer *sdb = scsi_in(sc);
 145
 146		sgl_bidi = sdb->table.sgl;
 147		sgl_bidi_count = sdb->table.nents;
 148		se_cmd->se_cmd_flags |= SCF_BIDI;
 149
 150	}
 151
 152	transfer_length = scsi_transfer_length(sc);
 153	if (!scsi_prot_sg_count(sc) &&
 154	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 155		se_cmd->prot_pto = true;
 156		/*
 157		 * loopback transport doesn't support
 158		 * WRITE_GENERATE, READ_STRIP protection
 159		 * information operations, go ahead unprotected.
 160		 */
 161		transfer_length = scsi_bufflen(sc);
 162	}
 163
 164	se_cmd->tag = tl_cmd->sc_cmd_tag;
 165	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 166			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 167			transfer_length, TCM_SIMPLE_TAG,
 168			sc->sc_data_direction, 0,
 169			scsi_sglist(sc), scsi_sg_count(sc),
 170			sgl_bidi, sgl_bidi_count,
 171			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 172	if (rc < 0) {
 173		set_host_byte(sc, DID_NO_CONNECT);
 174		goto out_done;
 175	}
 176	return;
 177
 178out_done:
 179	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 180	sc->scsi_done(sc);
 181	return;
 182}
 183
 184/*
 185 * ->queuecommand can be and usually is called from interrupt context, so
 186 * defer the actual submission to a workqueue.
 187 */
 188static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 189{
 190	struct tcm_loop_cmd *tl_cmd;
 191
 192	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
 193		" scsi_buf_len: %u\n", sc->device->host->host_no,
 194		sc->device->id, sc->device->channel, sc->device->lun,
 195		sc->cmnd[0], scsi_bufflen(sc));
 196
 197	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 198	if (!tl_cmd) {
 199		pr_err("Unable to allocate struct tcm_loop_cmd\n");
 200		set_host_byte(sc, DID_ERROR);
 201		sc->scsi_done(sc);
 202		return 0;
 203	}
 204
 205	tl_cmd->sc = sc;
 206	tl_cmd->sc_cmd_tag = sc->request->tag;
 207	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 208	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 209	return 0;
 210}
 211
 212/*
 213 * Called from SCSI EH process context to issue a LUN_RESET TMR
 214 * to struct scsi_device
 215 */
 216static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 217			      u64 lun, int task, enum tcm_tmreq_table tmr)
 218{
 219	struct se_cmd *se_cmd = NULL;
 220	struct se_session *se_sess;
 221	struct se_portal_group *se_tpg;
 222	struct tcm_loop_nexus *tl_nexus;
 223	struct tcm_loop_cmd *tl_cmd = NULL;
 224	struct tcm_loop_tmr *tl_tmr = NULL;
 225	int ret = TMR_FUNCTION_FAILED, rc;
 226
 227	/*
 228	 * Locate the tl_nexus and se_sess pointers
 229	 */
 230	tl_nexus = tl_tpg->tl_nexus;
 231	if (!tl_nexus) {
 232		pr_err("Unable to perform device reset without"
 233				" active I_T Nexus\n");
 234		return ret;
 235	}
 236
 237	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 238	if (!tl_cmd) {
 239		pr_err("Unable to allocate memory for tl_cmd\n");
 240		return ret;
 241	}
 242
 243	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
 244	if (!tl_tmr) {
 245		pr_err("Unable to allocate memory for tl_tmr\n");
 246		goto release;
 247	}
 248	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
 249
 250	se_cmd = &tl_cmd->tl_se_cmd;
 251	se_tpg = &tl_tpg->tl_se_tpg;
 252	se_sess = tl_tpg->tl_nexus->se_sess;
 253	/*
 254	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
 255	 */
 256	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
 257				DMA_NONE, TCM_SIMPLE_TAG,
 258				&tl_cmd->tl_sense_buf[0]);
 259
 260	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
 
 
 261	if (rc < 0)
 262		goto release;
 
 
 
 263
 264	if (tmr == TMR_ABORT_TASK)
 265		se_cmd->se_tmr_req->ref_task_tag = task;
 266
 267	/*
 268	 * Locate the underlying TCM struct se_lun
 269	 */
 270	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
 271		ret = TMR_LUN_DOES_NOT_EXIST;
 272		goto release;
 273	}
 274	/*
 275	 * Queue the TMR to TCM Core and sleep waiting for
 276	 * tcm_loop_queue_tm_rsp() to wake us up.
 277	 */
 278	transport_generic_handle_tmr(se_cmd);
 279	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
 280	/*
 281	 * The TMR LUN_RESET has completed, check the response status and
 282	 * then release allocations.
 283	 */
 284	ret = se_cmd->se_tmr_req->response;
 285release:
 286	if (se_cmd)
 287		transport_generic_free_cmd(se_cmd, 1);
 288	else
 289		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 290	kfree(tl_tmr);
 291	return ret;
 292}
 293
 294static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 295{
 296	struct tcm_loop_hba *tl_hba;
 297	struct tcm_loop_tpg *tl_tpg;
 298	int ret = FAILED;
 299
 300	/*
 301	 * Locate the tcm_loop_hba_t pointer
 302	 */
 303	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 304	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 305	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 306				 sc->request->tag, TMR_ABORT_TASK);
 307	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 308}
 309
 310/*
 311 * Called from SCSI EH process context to issue a LUN_RESET TMR
 312 * to struct scsi_device
 313 */
 314static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 315{
 316	struct tcm_loop_hba *tl_hba;
 317	struct tcm_loop_tpg *tl_tpg;
 318	int ret = FAILED;
 319
 320	/*
 321	 * Locate the tcm_loop_hba_t pointer
 322	 */
 323	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 324	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 325
 326	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 327				 0, TMR_LUN_RESET);
 328	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 329}
 330
 331static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 332{
 333	struct tcm_loop_hba *tl_hba;
 334	struct tcm_loop_tpg *tl_tpg;
 335
 336	/*
 337	 * Locate the tcm_loop_hba_t pointer
 338	 */
 339	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 340	if (!tl_hba) {
 341		pr_err("Unable to perform device reset without"
 342				" active I_T Nexus\n");
 343		return FAILED;
 344	}
 345	/*
 346	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 347	 */
 348	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 349	if (tl_tpg) {
 350		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 351		return SUCCESS;
 352	}
 353	return FAILED;
 354}
 355
 356static int tcm_loop_slave_alloc(struct scsi_device *sd)
 357{
 358	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
 359	return 0;
 360}
 361
 362static struct scsi_host_template tcm_loop_driver_template = {
 363	.show_info		= tcm_loop_show_info,
 364	.proc_name		= "tcm_loopback",
 365	.name			= "TCM_Loopback",
 366	.queuecommand		= tcm_loop_queuecommand,
 367	.change_queue_depth	= scsi_change_queue_depth,
 368	.eh_abort_handler = tcm_loop_abort_task,
 369	.eh_device_reset_handler = tcm_loop_device_reset,
 370	.eh_target_reset_handler = tcm_loop_target_reset,
 371	.can_queue		= 1024,
 372	.this_id		= -1,
 373	.sg_tablesize		= 256,
 374	.cmd_per_lun		= 1024,
 375	.max_sectors		= 0xFFFF,
 376	.use_clustering		= DISABLE_CLUSTERING,
 377	.slave_alloc		= tcm_loop_slave_alloc,
 378	.module			= THIS_MODULE,
 379	.track_queue_depth	= 1,
 380};
 381
 382static int tcm_loop_driver_probe(struct device *dev)
 383{
 384	struct tcm_loop_hba *tl_hba;
 385	struct Scsi_Host *sh;
 386	int error, host_prot;
 387
 388	tl_hba = to_tcm_loop_hba(dev);
 389
 390	sh = scsi_host_alloc(&tcm_loop_driver_template,
 391			sizeof(struct tcm_loop_hba));
 392	if (!sh) {
 393		pr_err("Unable to allocate struct scsi_host\n");
 394		return -ENODEV;
 395	}
 396	tl_hba->sh = sh;
 397
 398	/*
 399	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 400	 */
 401	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 402	/*
 403	 * Setup single ID, Channel and LUN for now..
 404	 */
 405	sh->max_id = 2;
 406	sh->max_lun = 0;
 407	sh->max_channel = 0;
 408	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 409
 410	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 411		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 412		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 413
 414	scsi_host_set_prot(sh, host_prot);
 415	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 416
 417	error = scsi_add_host(sh, &tl_hba->dev);
 418	if (error) {
 419		pr_err("%s: scsi_add_host failed\n", __func__);
 420		scsi_host_put(sh);
 421		return -ENODEV;
 422	}
 423	return 0;
 424}
 425
 426static int tcm_loop_driver_remove(struct device *dev)
 427{
 428	struct tcm_loop_hba *tl_hba;
 429	struct Scsi_Host *sh;
 430
 431	tl_hba = to_tcm_loop_hba(dev);
 432	sh = tl_hba->sh;
 433
 434	scsi_remove_host(sh);
 435	scsi_host_put(sh);
 436	return 0;
 437}
 438
 439static void tcm_loop_release_adapter(struct device *dev)
 440{
 441	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 442
 443	kfree(tl_hba);
 444}
 445
 446/*
 447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 448 */
 449static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 450{
 451	int ret;
 452
 453	tl_hba->dev.bus = &tcm_loop_lld_bus;
 454	tl_hba->dev.parent = tcm_loop_primary;
 455	tl_hba->dev.release = &tcm_loop_release_adapter;
 456	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 457
 458	ret = device_register(&tl_hba->dev);
 459	if (ret) {
 460		pr_err("device_register() failed for"
 461				" tl_hba->dev: %d\n", ret);
 462		return -ENODEV;
 463	}
 464
 465	return 0;
 466}
 467
 468/*
 469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 470 * tcm_loop SCSI bus.
 471 */
 472static int tcm_loop_alloc_core_bus(void)
 473{
 474	int ret;
 475
 476	tcm_loop_primary = root_device_register("tcm_loop_0");
 477	if (IS_ERR(tcm_loop_primary)) {
 478		pr_err("Unable to allocate tcm_loop_primary\n");
 479		return PTR_ERR(tcm_loop_primary);
 480	}
 481
 482	ret = bus_register(&tcm_loop_lld_bus);
 483	if (ret) {
 484		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 485		goto dev_unreg;
 486	}
 487
 488	ret = driver_register(&tcm_loop_driverfs);
 489	if (ret) {
 490		pr_err("driver_register() failed for"
 491				"tcm_loop_driverfs\n");
 492		goto bus_unreg;
 493	}
 494
 495	pr_debug("Initialized TCM Loop Core Bus\n");
 496	return ret;
 497
 498bus_unreg:
 499	bus_unregister(&tcm_loop_lld_bus);
 500dev_unreg:
 501	root_device_unregister(tcm_loop_primary);
 502	return ret;
 503}
 504
 505static void tcm_loop_release_core_bus(void)
 506{
 507	driver_unregister(&tcm_loop_driverfs);
 508	bus_unregister(&tcm_loop_lld_bus);
 509	root_device_unregister(tcm_loop_primary);
 510
 511	pr_debug("Releasing TCM Loop Core BUS\n");
 512}
 513
 514static char *tcm_loop_get_fabric_name(void)
 515{
 516	return "loopback";
 517}
 518
 519static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 520{
 521	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 522}
 523
 524static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 525{
 526	/*
 527	 * Return the passed NAA identifier for the Target Port
 528	 */
 529	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 530}
 531
 532static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 533{
 534	/*
 535	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 536	 * to represent the SCSI Target Port.
 537	 */
 538	return tl_tpg(se_tpg)->tl_tpgt;
 539}
 540
 541/*
 542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 543 * based upon the incoming fabric dependent SCSI Initiator Port
 544 */
 545static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 546{
 547	return 1;
 548}
 549
 550static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 551{
 552	return 0;
 553}
 554
 555/*
 556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 558 */
 559static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 560{
 561	return 0;
 562}
 563
 564/*
 565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 566 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 567 * It has been added here as a nop for target_fabric_tf_ops_check()
 568 */
 569static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 570{
 571	return 0;
 572}
 573
 574static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 575{
 576	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 577						   tl_se_tpg);
 578	return tl_tpg->tl_fabric_prot_type;
 579}
 580
 581static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 582{
 583	return 1;
 584}
 585
 586static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 587{
 588	return 1;
 589}
 590
 591static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 592{
 593	return;
 594}
 595
 596static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 597{
 598	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 599			struct tcm_loop_cmd, tl_se_cmd);
 600
 601	return tl_cmd->sc_cmd_state;
 602}
 603
 604static int tcm_loop_shutdown_session(struct se_session *se_sess)
 605{
 606	return 0;
 607}
 608
 609static void tcm_loop_close_session(struct se_session *se_sess)
 610{
 611	return;
 612};
 613
 614static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 615{
 616	/*
 617	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 618	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 619	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 620	 * format with transport_generic_map_mem_to_cmd().
 621	 *
 622	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 623	 * object execution queue.
 624	 */
 625	target_execute_cmd(se_cmd);
 626	return 0;
 627}
 628
 629static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
 630{
 631	return 0;
 632}
 633
 634static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 635{
 636	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 637				struct tcm_loop_cmd, tl_se_cmd);
 638	struct scsi_cmnd *sc = tl_cmd->sc;
 639
 640	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
 641		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
 642
 643	sc->result = SAM_STAT_GOOD;
 644	set_host_byte(sc, DID_OK);
 645	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 646	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 647		scsi_set_resid(sc, se_cmd->residual_count);
 648	sc->scsi_done(sc);
 649	return 0;
 650}
 651
 652static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 653{
 654	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 655				struct tcm_loop_cmd, tl_se_cmd);
 656	struct scsi_cmnd *sc = tl_cmd->sc;
 657
 658	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
 659			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
 660
 661	if (se_cmd->sense_buffer &&
 662	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 663	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 664
 665		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 666				SCSI_SENSE_BUFFERSIZE);
 667		sc->result = SAM_STAT_CHECK_CONDITION;
 668		set_driver_byte(sc, DRIVER_SENSE);
 669	} else
 670		sc->result = se_cmd->scsi_status;
 671
 672	set_host_byte(sc, DID_OK);
 673	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 674	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 675		scsi_set_resid(sc, se_cmd->residual_count);
 676	sc->scsi_done(sc);
 677	return 0;
 678}
 679
 
 
 
 
 
 
 
 
 
 
 
 680static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 681{
 682	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 683	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
 684	/*
 685	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
 686	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
 687	 */
 688	atomic_set(&tl_tmr->tmr_complete, 1);
 689	wake_up(&tl_tmr->tl_tmr_wait);
 690}
 691
 692static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 693{
 694	return;
 695}
 696
 697static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 698{
 699	switch (tl_hba->tl_proto_id) {
 700	case SCSI_PROTOCOL_SAS:
 701		return "SAS";
 702	case SCSI_PROTOCOL_FCP:
 703		return "FCP";
 704	case SCSI_PROTOCOL_ISCSI:
 705		return "iSCSI";
 706	default:
 707		break;
 708	}
 709
 710	return "Unknown";
 711}
 712
 713/* Start items for tcm_loop_port_cit */
 714
 715static int tcm_loop_port_link(
 716	struct se_portal_group *se_tpg,
 717	struct se_lun *lun)
 718{
 719	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 720				struct tcm_loop_tpg, tl_se_tpg);
 721	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 722
 723	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 724	/*
 725	 * Add Linux/SCSI struct scsi_device by HCTL
 726	 */
 727	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 728
 729	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 730	return 0;
 731}
 732
 733static void tcm_loop_port_unlink(
 734	struct se_portal_group *se_tpg,
 735	struct se_lun *se_lun)
 736{
 737	struct scsi_device *sd;
 738	struct tcm_loop_hba *tl_hba;
 739	struct tcm_loop_tpg *tl_tpg;
 740
 741	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 742	tl_hba = tl_tpg->tl_hba;
 743
 744	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 745				se_lun->unpacked_lun);
 746	if (!sd) {
 747		pr_err("Unable to locate struct scsi_device for %d:%d:"
 748			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 749		return;
 750	}
 751	/*
 752	 * Remove Linux/SCSI struct scsi_device by HCTL
 753	 */
 754	scsi_remove_device(sd);
 755	scsi_device_put(sd);
 756
 757	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 758
 759	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 760}
 761
 762/* End items for tcm_loop_port_cit */
 763
 764static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 765		struct config_item *item, char *page)
 766{
 767	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 768	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 769						   tl_se_tpg);
 770
 771	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 772}
 773
 774static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 775		struct config_item *item, const char *page, size_t count)
 776{
 777	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 778	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 779						   tl_se_tpg);
 780	unsigned long val;
 781	int ret = kstrtoul(page, 0, &val);
 782
 783	if (ret) {
 784		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 785		return ret;
 786	}
 787	if (val != 0 && val != 1 && val != 3) {
 788		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 789		return -EINVAL;
 790	}
 791	tl_tpg->tl_fabric_prot_type = val;
 792
 793	return count;
 794}
 795
 796CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 797
 798static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 799	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 800	NULL,
 801};
 802
 803/* Start items for tcm_loop_nexus_cit */
 804
 805static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 806				  struct se_session *se_sess, void *p)
 807{
 808	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 809					struct tcm_loop_tpg, tl_se_tpg);
 810
 811	tl_tpg->tl_nexus = p;
 812	return 0;
 813}
 814
 815static int tcm_loop_make_nexus(
 816	struct tcm_loop_tpg *tl_tpg,
 817	const char *name)
 818{
 819	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 820	struct tcm_loop_nexus *tl_nexus;
 821	int ret;
 822
 823	if (tl_tpg->tl_nexus) {
 824		pr_debug("tl_tpg->tl_nexus already exists\n");
 825		return -EEXIST;
 826	}
 827
 828	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 829	if (!tl_nexus) {
 830		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 831		return -ENOMEM;
 832	}
 833
 834	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
 835					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 836					name, tl_nexus, tcm_loop_alloc_sess_cb);
 837	if (IS_ERR(tl_nexus->se_sess)) {
 838		ret = PTR_ERR(tl_nexus->se_sess);
 839		kfree(tl_nexus);
 840		return ret;
 841	}
 842
 843	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 844		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 845		name);
 846	return 0;
 847}
 848
 849static int tcm_loop_drop_nexus(
 850	struct tcm_loop_tpg *tpg)
 851{
 852	struct se_session *se_sess;
 853	struct tcm_loop_nexus *tl_nexus;
 854
 855	tl_nexus = tpg->tl_nexus;
 856	if (!tl_nexus)
 857		return -ENODEV;
 858
 859	se_sess = tl_nexus->se_sess;
 860	if (!se_sess)
 861		return -ENODEV;
 862
 863	if (atomic_read(&tpg->tl_tpg_port_count)) {
 864		pr_err("Unable to remove TCM_Loop I_T Nexus with"
 865			" active TPG port count: %d\n",
 866			atomic_read(&tpg->tl_tpg_port_count));
 867		return -EPERM;
 868	}
 869
 870	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
 871		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
 872		tl_nexus->se_sess->se_node_acl->initiatorname);
 873	/*
 874	 * Release the SCSI I_T Nexus to the emulated Target Port
 875	 */
 876	transport_deregister_session(tl_nexus->se_sess);
 877	tpg->tl_nexus = NULL;
 878	kfree(tl_nexus);
 879	return 0;
 880}
 881
 882/* End items for tcm_loop_nexus_cit */
 883
 884static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 885{
 886	struct se_portal_group *se_tpg = to_tpg(item);
 887	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 888			struct tcm_loop_tpg, tl_se_tpg);
 889	struct tcm_loop_nexus *tl_nexus;
 890	ssize_t ret;
 891
 892	tl_nexus = tl_tpg->tl_nexus;
 893	if (!tl_nexus)
 894		return -ENODEV;
 895
 896	ret = snprintf(page, PAGE_SIZE, "%s\n",
 897		tl_nexus->se_sess->se_node_acl->initiatorname);
 898
 899	return ret;
 900}
 901
 902static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 903		const char *page, size_t count)
 904{
 905	struct se_portal_group *se_tpg = to_tpg(item);
 906	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 907			struct tcm_loop_tpg, tl_se_tpg);
 908	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 909	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 910	int ret;
 911	/*
 912	 * Shutdown the active I_T nexus if 'NULL' is passed..
 913	 */
 914	if (!strncmp(page, "NULL", 4)) {
 915		ret = tcm_loop_drop_nexus(tl_tpg);
 916		return (!ret) ? count : ret;
 917	}
 918	/*
 919	 * Otherwise make sure the passed virtual Initiator port WWN matches
 920	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 921	 * tcm_loop_make_nexus()
 922	 */
 923	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 924		pr_err("Emulated NAA Sas Address: %s, exceeds"
 925				" max: %d\n", page, TL_WWN_ADDR_LEN);
 926		return -EINVAL;
 927	}
 928	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 929
 930	ptr = strstr(i_port, "naa.");
 931	if (ptr) {
 932		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 933			pr_err("Passed SAS Initiator Port %s does not"
 934				" match target port protoid: %s\n", i_port,
 935				tcm_loop_dump_proto_id(tl_hba));
 936			return -EINVAL;
 937		}
 938		port_ptr = &i_port[0];
 939		goto check_newline;
 940	}
 941	ptr = strstr(i_port, "fc.");
 942	if (ptr) {
 943		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 944			pr_err("Passed FCP Initiator Port %s does not"
 945				" match target port protoid: %s\n", i_port,
 946				tcm_loop_dump_proto_id(tl_hba));
 947			return -EINVAL;
 948		}
 949		port_ptr = &i_port[3]; /* Skip over "fc." */
 950		goto check_newline;
 951	}
 952	ptr = strstr(i_port, "iqn.");
 953	if (ptr) {
 954		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 955			pr_err("Passed iSCSI Initiator Port %s does not"
 956				" match target port protoid: %s\n", i_port,
 957				tcm_loop_dump_proto_id(tl_hba));
 958			return -EINVAL;
 959		}
 960		port_ptr = &i_port[0];
 961		goto check_newline;
 962	}
 963	pr_err("Unable to locate prefix for emulated Initiator Port:"
 964			" %s\n", i_port);
 965	return -EINVAL;
 966	/*
 967	 * Clear any trailing newline for the NAA WWN
 968	 */
 969check_newline:
 970	if (i_port[strlen(i_port)-1] == '\n')
 971		i_port[strlen(i_port)-1] = '\0';
 972
 973	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 974	if (ret < 0)
 975		return ret;
 976
 977	return count;
 978}
 979
 980static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 981		char *page)
 982{
 983	struct se_portal_group *se_tpg = to_tpg(item);
 984	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 985			struct tcm_loop_tpg, tl_se_tpg);
 986	const char *status = NULL;
 987	ssize_t ret = -EINVAL;
 988
 989	switch (tl_tpg->tl_transport_status) {
 990	case TCM_TRANSPORT_ONLINE:
 991		status = "online";
 992		break;
 993	case TCM_TRANSPORT_OFFLINE:
 994		status = "offline";
 995		break;
 996	default:
 997		break;
 998	}
 999
1000	if (status)
1001		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1002
1003	return ret;
1004}
1005
1006static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1007		const char *page, size_t count)
1008{
1009	struct se_portal_group *se_tpg = to_tpg(item);
1010	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1011			struct tcm_loop_tpg, tl_se_tpg);
1012
1013	if (!strncmp(page, "online", 6)) {
1014		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1015		return count;
1016	}
1017	if (!strncmp(page, "offline", 7)) {
1018		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1019		if (tl_tpg->tl_nexus) {
1020			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1021
1022			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1023		}
1024		return count;
1025	}
1026	return -EINVAL;
1027}
1028
1029static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1030					 char *page)
1031{
1032	struct se_portal_group *se_tpg = to_tpg(item);
1033	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1034			struct tcm_loop_tpg, tl_se_tpg);
1035	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1036
1037	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1038			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1039}
1040
1041CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1042CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1043CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1044
1045static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1046	&tcm_loop_tpg_attr_nexus,
1047	&tcm_loop_tpg_attr_transport_status,
1048	&tcm_loop_tpg_attr_address,
1049	NULL,
1050};
1051
1052/* Start items for tcm_loop_naa_cit */
1053
1054static struct se_portal_group *tcm_loop_make_naa_tpg(
1055	struct se_wwn *wwn,
1056	struct config_group *group,
1057	const char *name)
1058{
1059	struct tcm_loop_hba *tl_hba = container_of(wwn,
1060			struct tcm_loop_hba, tl_hba_wwn);
1061	struct tcm_loop_tpg *tl_tpg;
1062	int ret;
1063	unsigned long tpgt;
1064
1065	if (strstr(name, "tpgt_") != name) {
1066		pr_err("Unable to locate \"tpgt_#\" directory"
1067				" group\n");
1068		return ERR_PTR(-EINVAL);
1069	}
1070	if (kstrtoul(name+5, 10, &tpgt))
1071		return ERR_PTR(-EINVAL);
1072
1073	if (tpgt >= TL_TPGS_PER_HBA) {
1074		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1075				" %u\n", tpgt, TL_TPGS_PER_HBA);
1076		return ERR_PTR(-EINVAL);
1077	}
1078	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1079	tl_tpg->tl_hba = tl_hba;
1080	tl_tpg->tl_tpgt = tpgt;
1081	/*
1082	 * Register the tl_tpg as a emulated TCM Target Endpoint
1083	 */
1084	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1085	if (ret < 0)
1086		return ERR_PTR(-ENOMEM);
1087
1088	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1089		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1090		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1091
1092	return &tl_tpg->tl_se_tpg;
1093}
1094
1095static void tcm_loop_drop_naa_tpg(
1096	struct se_portal_group *se_tpg)
1097{
1098	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1099	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1100				struct tcm_loop_tpg, tl_se_tpg);
1101	struct tcm_loop_hba *tl_hba;
1102	unsigned short tpgt;
1103
1104	tl_hba = tl_tpg->tl_hba;
1105	tpgt = tl_tpg->tl_tpgt;
1106	/*
1107	 * Release the I_T Nexus for the Virtual target link if present
1108	 */
1109	tcm_loop_drop_nexus(tl_tpg);
1110	/*
1111	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1112	 */
1113	core_tpg_deregister(se_tpg);
1114
1115	tl_tpg->tl_hba = NULL;
1116	tl_tpg->tl_tpgt = 0;
1117
1118	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1119		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1120		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1121}
1122
1123/* End items for tcm_loop_naa_cit */
1124
1125/* Start items for tcm_loop_cit */
1126
1127static struct se_wwn *tcm_loop_make_scsi_hba(
1128	struct target_fabric_configfs *tf,
1129	struct config_group *group,
1130	const char *name)
1131{
1132	struct tcm_loop_hba *tl_hba;
1133	struct Scsi_Host *sh;
1134	char *ptr;
1135	int ret, off = 0;
1136
1137	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1138	if (!tl_hba) {
1139		pr_err("Unable to allocate struct tcm_loop_hba\n");
1140		return ERR_PTR(-ENOMEM);
1141	}
1142	/*
1143	 * Determine the emulated Protocol Identifier and Target Port Name
1144	 * based on the incoming configfs directory name.
1145	 */
1146	ptr = strstr(name, "naa.");
1147	if (ptr) {
1148		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1149		goto check_len;
1150	}
1151	ptr = strstr(name, "fc.");
1152	if (ptr) {
1153		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1154		off = 3; /* Skip over "fc." */
1155		goto check_len;
1156	}
1157	ptr = strstr(name, "iqn.");
1158	if (!ptr) {
1159		pr_err("Unable to locate prefix for emulated Target "
1160				"Port: %s\n", name);
1161		ret = -EINVAL;
1162		goto out;
1163	}
1164	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1165
1166check_len:
1167	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1168		pr_err("Emulated NAA %s Address: %s, exceeds"
1169			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1170			TL_WWN_ADDR_LEN);
1171		ret = -EINVAL;
1172		goto out;
1173	}
1174	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1175
1176	/*
1177	 * Call device_register(tl_hba->dev) to register the emulated
1178	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1179	 * device_register() callbacks in tcm_loop_driver_probe()
1180	 */
1181	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1182	if (ret)
1183		goto out;
1184
1185	sh = tl_hba->sh;
1186	tcm_loop_hba_no_cnt++;
1187	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1188		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1189		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1190
1191	return &tl_hba->tl_hba_wwn;
1192out:
1193	kfree(tl_hba);
1194	return ERR_PTR(ret);
1195}
1196
1197static void tcm_loop_drop_scsi_hba(
1198	struct se_wwn *wwn)
1199{
1200	struct tcm_loop_hba *tl_hba = container_of(wwn,
1201				struct tcm_loop_hba, tl_hba_wwn);
1202
1203	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1204		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1205		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1206		tl_hba->sh->host_no);
1207	/*
1208	 * Call device_unregister() on the original tl_hba->dev.
1209	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1210	 * release *tl_hba;
1211	 */
1212	device_unregister(&tl_hba->dev);
1213}
1214
1215/* Start items for tcm_loop_cit */
1216static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1217{
1218	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1219}
1220
1221CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1222
1223static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1224	&tcm_loop_wwn_attr_version,
1225	NULL,
1226};
1227
1228/* End items for tcm_loop_cit */
1229
1230static const struct target_core_fabric_ops loop_ops = {
1231	.module				= THIS_MODULE,
1232	.name				= "loopback",
1233	.get_fabric_name		= tcm_loop_get_fabric_name,
1234	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1235	.tpg_get_tag			= tcm_loop_get_tag,
1236	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1237	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1238	.tpg_check_demo_mode_write_protect =
1239				tcm_loop_check_demo_mode_write_protect,
1240	.tpg_check_prod_mode_write_protect =
1241				tcm_loop_check_prod_mode_write_protect,
1242	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1243	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1244	.check_stop_free		= tcm_loop_check_stop_free,
1245	.release_cmd			= tcm_loop_release_cmd,
1246	.shutdown_session		= tcm_loop_shutdown_session,
1247	.close_session			= tcm_loop_close_session,
1248	.sess_get_index			= tcm_loop_sess_get_index,
1249	.write_pending			= tcm_loop_write_pending,
1250	.write_pending_status		= tcm_loop_write_pending_status,
1251	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1252	.get_cmd_state			= tcm_loop_get_cmd_state,
1253	.queue_data_in			= tcm_loop_queue_data_in,
1254	.queue_status			= tcm_loop_queue_status,
1255	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1256	.aborted_task			= tcm_loop_aborted_task,
1257	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1258	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1259	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1260	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1261	.fabric_post_link		= tcm_loop_port_link,
1262	.fabric_pre_unlink		= tcm_loop_port_unlink,
1263	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1264	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1265	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1266};
1267
1268static int __init tcm_loop_fabric_init(void)
1269{
1270	int ret = -ENOMEM;
1271
1272	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1273	if (!tcm_loop_workqueue)
1274		goto out;
1275
1276	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1277				sizeof(struct tcm_loop_cmd),
1278				__alignof__(struct tcm_loop_cmd),
1279				0, NULL);
1280	if (!tcm_loop_cmd_cache) {
1281		pr_debug("kmem_cache_create() for"
1282			" tcm_loop_cmd_cache failed\n");
1283		goto out_destroy_workqueue;
1284	}
1285
1286	ret = tcm_loop_alloc_core_bus();
1287	if (ret)
1288		goto out_destroy_cache;
1289
1290	ret = target_register_template(&loop_ops);
1291	if (ret)
1292		goto out_release_core_bus;
1293
1294	return 0;
1295
1296out_release_core_bus:
1297	tcm_loop_release_core_bus();
1298out_destroy_cache:
1299	kmem_cache_destroy(tcm_loop_cmd_cache);
1300out_destroy_workqueue:
1301	destroy_workqueue(tcm_loop_workqueue);
1302out:
1303	return ret;
1304}
1305
1306static void __exit tcm_loop_fabric_exit(void)
1307{
1308	target_unregister_template(&loop_ops);
1309	tcm_loop_release_core_bus();
1310	kmem_cache_destroy(tcm_loop_cmd_cache);
1311	destroy_workqueue(tcm_loop_workqueue);
1312}
1313
1314MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1315MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1316MODULE_LICENSE("GPL");
1317module_init(tcm_loop_fabric_init);
1318module_exit(tcm_loop_fabric_exit);