Linux Audio

Check our new training course

Loading...
v4.17
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
  42static struct workqueue_struct *tcm_loop_workqueue;
  43static struct kmem_cache *tcm_loop_cmd_cache;
  44
  45static int tcm_loop_hba_no_cnt;
  46
  47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  48
  49/*
  50 * Called from struct target_core_fabric_ops->check_stop_free()
  51 */
  52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  53{
  54	return transport_generic_free_cmd(se_cmd, 0);
 
 
 
 
 
 
 
 
 
 
 
 
  55}
  56
  57static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  58{
  59	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  60				struct tcm_loop_cmd, tl_se_cmd);
  61
  62	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  63}
  64
  65static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  66{
  67	seq_puts(m, "tcm_loop_proc_info()\n");
  68	return 0;
  69}
  70
  71static int tcm_loop_driver_probe(struct device *);
  72static int tcm_loop_driver_remove(struct device *);
  73
  74static int pseudo_lld_bus_match(struct device *dev,
  75				struct device_driver *dev_driver)
  76{
  77	return 1;
  78}
  79
  80static struct bus_type tcm_loop_lld_bus = {
  81	.name			= "tcm_loop_bus",
  82	.match			= pseudo_lld_bus_match,
  83	.probe			= tcm_loop_driver_probe,
  84	.remove			= tcm_loop_driver_remove,
  85};
  86
  87static struct device_driver tcm_loop_driverfs = {
  88	.name			= "tcm_loop",
  89	.bus			= &tcm_loop_lld_bus,
  90};
  91/*
  92 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  93 */
  94static struct device *tcm_loop_primary;
  95
  96static void tcm_loop_submission_work(struct work_struct *work)
  97{
  98	struct tcm_loop_cmd *tl_cmd =
  99		container_of(work, struct tcm_loop_cmd, work);
 100	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 101	struct scsi_cmnd *sc = tl_cmd->sc;
 102	struct tcm_loop_nexus *tl_nexus;
 103	struct tcm_loop_hba *tl_hba;
 104	struct tcm_loop_tpg *tl_tpg;
 105	struct scatterlist *sgl_bidi = NULL;
 106	u32 sgl_bidi_count = 0, transfer_length;
 107	int rc;
 108
 109	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 110	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 111
 112	/*
 113	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 114	 * has already been configured via tcm_loop_make_naa_tpg().
 115	 */
 116	if (!tl_tpg->tl_hba) {
 117		set_host_byte(sc, DID_NO_CONNECT);
 118		goto out_done;
 119	}
 120	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 121		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 122		goto out_done;
 123	}
 124	tl_nexus = tl_tpg->tl_nexus;
 125	if (!tl_nexus) {
 126		scmd_printk(KERN_ERR, sc,
 127			    "TCM_Loop I_T Nexus does not exist\n");
 128		set_host_byte(sc, DID_ERROR);
 129		goto out_done;
 130	}
 131	if (scsi_bidi_cmnd(sc)) {
 132		struct scsi_data_buffer *sdb = scsi_in(sc);
 133
 134		sgl_bidi = sdb->table.sgl;
 135		sgl_bidi_count = sdb->table.nents;
 136		se_cmd->se_cmd_flags |= SCF_BIDI;
 137
 138	}
 139
 140	transfer_length = scsi_transfer_length(sc);
 141	if (!scsi_prot_sg_count(sc) &&
 142	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 143		se_cmd->prot_pto = true;
 144		/*
 145		 * loopback transport doesn't support
 146		 * WRITE_GENERATE, READ_STRIP protection
 147		 * information operations, go ahead unprotected.
 148		 */
 149		transfer_length = scsi_bufflen(sc);
 150	}
 151
 152	se_cmd->tag = tl_cmd->sc_cmd_tag;
 153	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 154			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 155			transfer_length, TCM_SIMPLE_TAG,
 156			sc->sc_data_direction, 0,
 157			scsi_sglist(sc), scsi_sg_count(sc),
 158			sgl_bidi, sgl_bidi_count,
 159			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 160	if (rc < 0) {
 161		set_host_byte(sc, DID_NO_CONNECT);
 162		goto out_done;
 163	}
 164	return;
 165
 166out_done:
 167	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 168	sc->scsi_done(sc);
 
 169}
 170
 171/*
 172 * ->queuecommand can be and usually is called from interrupt context, so
 173 * defer the actual submission to a workqueue.
 174 */
 175static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 176{
 177	struct tcm_loop_cmd *tl_cmd;
 178
 179	pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
 180		 __func__, sc->device->host->host_no, sc->device->id,
 181		 sc->device->channel, sc->device->lun, sc->cmnd[0],
 182		 scsi_bufflen(sc));
 183
 184	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 185	if (!tl_cmd) {
 
 186		set_host_byte(sc, DID_ERROR);
 187		sc->scsi_done(sc);
 188		return 0;
 189	}
 190
 191	tl_cmd->sc = sc;
 192	tl_cmd->sc_cmd_tag = sc->request->tag;
 193	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 194	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 195	return 0;
 196}
 197
 198/*
 199 * Called from SCSI EH process context to issue a LUN_RESET TMR
 200 * to struct scsi_device
 201 */
 202static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 203			      u64 lun, int task, enum tcm_tmreq_table tmr)
 204{
 205	struct se_cmd *se_cmd;
 206	struct se_session *se_sess;
 
 207	struct tcm_loop_nexus *tl_nexus;
 208	struct tcm_loop_cmd *tl_cmd;
 
 209	int ret = TMR_FUNCTION_FAILED, rc;
 210
 211	/*
 212	 * Locate the tl_nexus and se_sess pointers
 213	 */
 214	tl_nexus = tl_tpg->tl_nexus;
 215	if (!tl_nexus) {
 216		pr_err("Unable to perform device reset without active I_T Nexus\n");
 
 217		return ret;
 218	}
 219
 220	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 221	if (!tl_cmd)
 
 222		return ret;
 
 223
 224	init_completion(&tl_cmd->tmr_done);
 
 
 
 
 
 225
 226	se_cmd = &tl_cmd->tl_se_cmd;
 
 227	se_sess = tl_tpg->tl_nexus->se_sess;
 
 
 
 
 
 
 228
 229	rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
 230			       NULL, tmr, GFP_KERNEL, task,
 231			       TARGET_SCF_ACK_KREF);
 232	if (rc < 0)
 233		goto release;
 234	wait_for_completion(&tl_cmd->tmr_done);
 235	ret = se_cmd->se_tmr_req->response;
 236	target_put_sess_cmd(se_cmd);
 237
 238out:
 239	return ret;
 240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241release:
 242	if (se_cmd)
 243		transport_generic_free_cmd(se_cmd, 0);
 244	else
 245		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 246	goto out;
 
 247}
 248
 249static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 250{
 251	struct tcm_loop_hba *tl_hba;
 252	struct tcm_loop_tpg *tl_tpg;
 253	int ret = FAILED;
 254
 255	/*
 256	 * Locate the tcm_loop_hba_t pointer
 257	 */
 258	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 259	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 260	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 261				 sc->request->tag, TMR_ABORT_TASK);
 262	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 263}
 264
 265/*
 266 * Called from SCSI EH process context to issue a LUN_RESET TMR
 267 * to struct scsi_device
 268 */
 269static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 270{
 271	struct tcm_loop_hba *tl_hba;
 272	struct tcm_loop_tpg *tl_tpg;
 273	int ret = FAILED;
 274
 275	/*
 276	 * Locate the tcm_loop_hba_t pointer
 277	 */
 278	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 279	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 280
 281	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 282				 0, TMR_LUN_RESET);
 283	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 284}
 285
 286static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 287{
 288	struct tcm_loop_hba *tl_hba;
 289	struct tcm_loop_tpg *tl_tpg;
 290
 291	/*
 292	 * Locate the tcm_loop_hba_t pointer
 293	 */
 294	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 295	if (!tl_hba) {
 296		pr_err("Unable to perform device reset without active I_T Nexus\n");
 
 297		return FAILED;
 298	}
 299	/*
 300	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 301	 */
 302	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 303	if (tl_tpg) {
 304		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 305		return SUCCESS;
 306	}
 307	return FAILED;
 308}
 309
 310static int tcm_loop_slave_alloc(struct scsi_device *sd)
 311{
 312	blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue);
 313	return 0;
 314}
 315
 316static struct scsi_host_template tcm_loop_driver_template = {
 317	.show_info		= tcm_loop_show_info,
 318	.proc_name		= "tcm_loopback",
 319	.name			= "TCM_Loopback",
 320	.queuecommand		= tcm_loop_queuecommand,
 321	.change_queue_depth	= scsi_change_queue_depth,
 322	.eh_abort_handler = tcm_loop_abort_task,
 323	.eh_device_reset_handler = tcm_loop_device_reset,
 324	.eh_target_reset_handler = tcm_loop_target_reset,
 325	.can_queue		= 1024,
 326	.this_id		= -1,
 327	.sg_tablesize		= 256,
 328	.cmd_per_lun		= 1024,
 329	.max_sectors		= 0xFFFF,
 330	.use_clustering		= DISABLE_CLUSTERING,
 331	.slave_alloc		= tcm_loop_slave_alloc,
 332	.module			= THIS_MODULE,
 333	.track_queue_depth	= 1,
 334};
 335
 336static int tcm_loop_driver_probe(struct device *dev)
 337{
 338	struct tcm_loop_hba *tl_hba;
 339	struct Scsi_Host *sh;
 340	int error, host_prot;
 341
 342	tl_hba = to_tcm_loop_hba(dev);
 343
 344	sh = scsi_host_alloc(&tcm_loop_driver_template,
 345			sizeof(struct tcm_loop_hba));
 346	if (!sh) {
 347		pr_err("Unable to allocate struct scsi_host\n");
 348		return -ENODEV;
 349	}
 350	tl_hba->sh = sh;
 351
 352	/*
 353	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 354	 */
 355	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 356	/*
 357	 * Setup single ID, Channel and LUN for now..
 358	 */
 359	sh->max_id = 2;
 360	sh->max_lun = 0;
 361	sh->max_channel = 0;
 362	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 363
 364	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 365		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 366		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 367
 368	scsi_host_set_prot(sh, host_prot);
 369	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 370
 371	error = scsi_add_host(sh, &tl_hba->dev);
 372	if (error) {
 373		pr_err("%s: scsi_add_host failed\n", __func__);
 374		scsi_host_put(sh);
 375		return -ENODEV;
 376	}
 377	return 0;
 378}
 379
 380static int tcm_loop_driver_remove(struct device *dev)
 381{
 382	struct tcm_loop_hba *tl_hba;
 383	struct Scsi_Host *sh;
 384
 385	tl_hba = to_tcm_loop_hba(dev);
 386	sh = tl_hba->sh;
 387
 388	scsi_remove_host(sh);
 389	scsi_host_put(sh);
 390	return 0;
 391}
 392
 393static void tcm_loop_release_adapter(struct device *dev)
 394{
 395	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 396
 397	kfree(tl_hba);
 398}
 399
 400/*
 401 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 402 */
 403static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 404{
 405	int ret;
 406
 407	tl_hba->dev.bus = &tcm_loop_lld_bus;
 408	tl_hba->dev.parent = tcm_loop_primary;
 409	tl_hba->dev.release = &tcm_loop_release_adapter;
 410	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 411
 412	ret = device_register(&tl_hba->dev);
 413	if (ret) {
 414		pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
 
 415		return -ENODEV;
 416	}
 417
 418	return 0;
 419}
 420
 421/*
 422 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 423 * tcm_loop SCSI bus.
 424 */
 425static int tcm_loop_alloc_core_bus(void)
 426{
 427	int ret;
 428
 429	tcm_loop_primary = root_device_register("tcm_loop_0");
 430	if (IS_ERR(tcm_loop_primary)) {
 431		pr_err("Unable to allocate tcm_loop_primary\n");
 432		return PTR_ERR(tcm_loop_primary);
 433	}
 434
 435	ret = bus_register(&tcm_loop_lld_bus);
 436	if (ret) {
 437		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 438		goto dev_unreg;
 439	}
 440
 441	ret = driver_register(&tcm_loop_driverfs);
 442	if (ret) {
 443		pr_err("driver_register() failed for tcm_loop_driverfs\n");
 
 444		goto bus_unreg;
 445	}
 446
 447	pr_debug("Initialized TCM Loop Core Bus\n");
 448	return ret;
 449
 450bus_unreg:
 451	bus_unregister(&tcm_loop_lld_bus);
 452dev_unreg:
 453	root_device_unregister(tcm_loop_primary);
 454	return ret;
 455}
 456
 457static void tcm_loop_release_core_bus(void)
 458{
 459	driver_unregister(&tcm_loop_driverfs);
 460	bus_unregister(&tcm_loop_lld_bus);
 461	root_device_unregister(tcm_loop_primary);
 462
 463	pr_debug("Releasing TCM Loop Core BUS\n");
 464}
 465
 466static char *tcm_loop_get_fabric_name(void)
 467{
 468	return "loopback";
 469}
 470
 471static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 472{
 473	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 474}
 475
 476static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 477{
 478	/*
 479	 * Return the passed NAA identifier for the Target Port
 480	 */
 481	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 482}
 483
 484static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 485{
 486	/*
 487	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 488	 * to represent the SCSI Target Port.
 489	 */
 490	return tl_tpg(se_tpg)->tl_tpgt;
 491}
 492
 493/*
 494 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 495 * based upon the incoming fabric dependent SCSI Initiator Port
 496 */
 497static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 498{
 499	return 1;
 500}
 501
 502static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 503{
 504	return 0;
 505}
 506
 507/*
 508 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 509 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 510 */
 511static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 512{
 513	return 0;
 514}
 515
 516/*
 517 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 518 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 519 * It has been added here as a nop for target_fabric_tf_ops_check()
 520 */
 521static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 522{
 523	return 0;
 524}
 525
 526static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 527{
 528	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 529						   tl_se_tpg);
 530	return tl_tpg->tl_fabric_prot_type;
 531}
 532
 533static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 534{
 535	return 1;
 536}
 537
 538static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 539{
 540	return 1;
 541}
 542
 543static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 544{
 545	return;
 546}
 547
 548static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 549{
 550	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 551			struct tcm_loop_cmd, tl_se_cmd);
 552
 553	return tl_cmd->sc_cmd_state;
 554}
 555
 556static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 557{
 558	/*
 559	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 560	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 561	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 562	 * format with transport_generic_map_mem_to_cmd().
 563	 *
 564	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 565	 * object execution queue.
 566	 */
 567	target_execute_cmd(se_cmd);
 568	return 0;
 569}
 570
 571static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
 572{
 573	return 0;
 574}
 575
 576static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 577{
 578	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 579				struct tcm_loop_cmd, tl_se_cmd);
 580	struct scsi_cmnd *sc = tl_cmd->sc;
 581
 582	pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
 583		 __func__, sc, sc->cmnd[0]);
 584
 585	sc->result = SAM_STAT_GOOD;
 586	set_host_byte(sc, DID_OK);
 587	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 588	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 589		scsi_set_resid(sc, se_cmd->residual_count);
 590	sc->scsi_done(sc);
 591	return 0;
 592}
 593
 594static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 595{
 596	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 597				struct tcm_loop_cmd, tl_se_cmd);
 598	struct scsi_cmnd *sc = tl_cmd->sc;
 599
 600	pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
 601		 __func__, sc, sc->cmnd[0]);
 602
 603	if (se_cmd->sense_buffer &&
 604	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 605	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 606
 607		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 608				SCSI_SENSE_BUFFERSIZE);
 609		sc->result = SAM_STAT_CHECK_CONDITION;
 610		set_driver_byte(sc, DRIVER_SENSE);
 611	} else
 612		sc->result = se_cmd->scsi_status;
 613
 614	set_host_byte(sc, DID_OK);
 615	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 616	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 617		scsi_set_resid(sc, se_cmd->residual_count);
 618	sc->scsi_done(sc);
 619	return 0;
 620}
 621
 622static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 623{
 624	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 625				struct tcm_loop_cmd, tl_se_cmd);
 626
 627	/* Wake up tcm_loop_issue_tmr(). */
 628	complete(&tl_cmd->tmr_done);
 
 
 
 629}
 630
 631static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 632{
 633	return;
 634}
 635
 636static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 637{
 638	switch (tl_hba->tl_proto_id) {
 639	case SCSI_PROTOCOL_SAS:
 640		return "SAS";
 641	case SCSI_PROTOCOL_FCP:
 642		return "FCP";
 643	case SCSI_PROTOCOL_ISCSI:
 644		return "iSCSI";
 645	default:
 646		break;
 647	}
 648
 649	return "Unknown";
 650}
 651
 652/* Start items for tcm_loop_port_cit */
 653
 654static int tcm_loop_port_link(
 655	struct se_portal_group *se_tpg,
 656	struct se_lun *lun)
 657{
 658	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 659				struct tcm_loop_tpg, tl_se_tpg);
 660	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 661
 662	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 663	/*
 664	 * Add Linux/SCSI struct scsi_device by HCTL
 665	 */
 666	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 667
 668	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 669	return 0;
 670}
 671
 672static void tcm_loop_port_unlink(
 673	struct se_portal_group *se_tpg,
 674	struct se_lun *se_lun)
 675{
 676	struct scsi_device *sd;
 677	struct tcm_loop_hba *tl_hba;
 678	struct tcm_loop_tpg *tl_tpg;
 679
 680	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 681	tl_hba = tl_tpg->tl_hba;
 682
 683	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 684				se_lun->unpacked_lun);
 685	if (!sd) {
 686		pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
 687		       0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 688		return;
 689	}
 690	/*
 691	 * Remove Linux/SCSI struct scsi_device by HCTL
 692	 */
 693	scsi_remove_device(sd);
 694	scsi_device_put(sd);
 695
 696	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 697
 698	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 699}
 700
 701/* End items for tcm_loop_port_cit */
 702
 703static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 704		struct config_item *item, char *page)
 705{
 706	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 707	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 708						   tl_se_tpg);
 709
 710	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 711}
 712
 713static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 714		struct config_item *item, const char *page, size_t count)
 715{
 716	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 717	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 718						   tl_se_tpg);
 719	unsigned long val;
 720	int ret = kstrtoul(page, 0, &val);
 721
 722	if (ret) {
 723		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 724		return ret;
 725	}
 726	if (val != 0 && val != 1 && val != 3) {
 727		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 728		return -EINVAL;
 729	}
 730	tl_tpg->tl_fabric_prot_type = val;
 731
 732	return count;
 733}
 734
 735CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 736
 737static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 738	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 739	NULL,
 740};
 741
 742/* Start items for tcm_loop_nexus_cit */
 743
 744static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 745				  struct se_session *se_sess, void *p)
 746{
 747	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 748					struct tcm_loop_tpg, tl_se_tpg);
 749
 750	tl_tpg->tl_nexus = p;
 751	return 0;
 752}
 753
 754static int tcm_loop_make_nexus(
 755	struct tcm_loop_tpg *tl_tpg,
 756	const char *name)
 757{
 758	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 759	struct tcm_loop_nexus *tl_nexus;
 760	int ret;
 761
 762	if (tl_tpg->tl_nexus) {
 763		pr_debug("tl_tpg->tl_nexus already exists\n");
 764		return -EEXIST;
 765	}
 766
 767	tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
 768	if (!tl_nexus)
 
 769		return -ENOMEM;
 
 770
 771	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
 772					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 773					name, tl_nexus, tcm_loop_alloc_sess_cb);
 774	if (IS_ERR(tl_nexus->se_sess)) {
 775		ret = PTR_ERR(tl_nexus->se_sess);
 776		kfree(tl_nexus);
 777		return ret;
 778	}
 779
 780	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
 781		 tcm_loop_dump_proto_id(tl_hba), name);
 
 782	return 0;
 783}
 784
 785static int tcm_loop_drop_nexus(
 786	struct tcm_loop_tpg *tpg)
 787{
 788	struct se_session *se_sess;
 789	struct tcm_loop_nexus *tl_nexus;
 790
 791	tl_nexus = tpg->tl_nexus;
 792	if (!tl_nexus)
 793		return -ENODEV;
 794
 795	se_sess = tl_nexus->se_sess;
 796	if (!se_sess)
 797		return -ENODEV;
 798
 799	if (atomic_read(&tpg->tl_tpg_port_count)) {
 800		pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
 801		       atomic_read(&tpg->tl_tpg_port_count));
 
 802		return -EPERM;
 803	}
 804
 805	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
 806		 tcm_loop_dump_proto_id(tpg->tl_hba),
 807		 tl_nexus->se_sess->se_node_acl->initiatorname);
 808	/*
 809	 * Release the SCSI I_T Nexus to the emulated Target Port
 810	 */
 811	transport_deregister_session(tl_nexus->se_sess);
 812	tpg->tl_nexus = NULL;
 813	kfree(tl_nexus);
 814	return 0;
 815}
 816
 817/* End items for tcm_loop_nexus_cit */
 818
 819static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 820{
 821	struct se_portal_group *se_tpg = to_tpg(item);
 822	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 823			struct tcm_loop_tpg, tl_se_tpg);
 824	struct tcm_loop_nexus *tl_nexus;
 825	ssize_t ret;
 826
 827	tl_nexus = tl_tpg->tl_nexus;
 828	if (!tl_nexus)
 829		return -ENODEV;
 830
 831	ret = snprintf(page, PAGE_SIZE, "%s\n",
 832		tl_nexus->se_sess->se_node_acl->initiatorname);
 833
 834	return ret;
 835}
 836
 837static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 838		const char *page, size_t count)
 839{
 840	struct se_portal_group *se_tpg = to_tpg(item);
 841	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 842			struct tcm_loop_tpg, tl_se_tpg);
 843	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 844	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 845	int ret;
 846	/*
 847	 * Shutdown the active I_T nexus if 'NULL' is passed..
 848	 */
 849	if (!strncmp(page, "NULL", 4)) {
 850		ret = tcm_loop_drop_nexus(tl_tpg);
 851		return (!ret) ? count : ret;
 852	}
 853	/*
 854	 * Otherwise make sure the passed virtual Initiator port WWN matches
 855	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 856	 * tcm_loop_make_nexus()
 857	 */
 858	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 859		pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
 860		       page, TL_WWN_ADDR_LEN);
 861		return -EINVAL;
 862	}
 863	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 864
 865	ptr = strstr(i_port, "naa.");
 866	if (ptr) {
 867		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 868			pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
 869			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 870			return -EINVAL;
 871		}
 872		port_ptr = &i_port[0];
 873		goto check_newline;
 874	}
 875	ptr = strstr(i_port, "fc.");
 876	if (ptr) {
 877		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 878			pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
 879			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 880			return -EINVAL;
 881		}
 882		port_ptr = &i_port[3]; /* Skip over "fc." */
 883		goto check_newline;
 884	}
 885	ptr = strstr(i_port, "iqn.");
 886	if (ptr) {
 887		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 888			pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
 889			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 890			return -EINVAL;
 891		}
 892		port_ptr = &i_port[0];
 893		goto check_newline;
 894	}
 895	pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
 896	       i_port);
 897	return -EINVAL;
 898	/*
 899	 * Clear any trailing newline for the NAA WWN
 900	 */
 901check_newline:
 902	if (i_port[strlen(i_port)-1] == '\n')
 903		i_port[strlen(i_port)-1] = '\0';
 904
 905	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 906	if (ret < 0)
 907		return ret;
 908
 909	return count;
 910}
 911
 912static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 913		char *page)
 914{
 915	struct se_portal_group *se_tpg = to_tpg(item);
 916	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 917			struct tcm_loop_tpg, tl_se_tpg);
 918	const char *status = NULL;
 919	ssize_t ret = -EINVAL;
 920
 921	switch (tl_tpg->tl_transport_status) {
 922	case TCM_TRANSPORT_ONLINE:
 923		status = "online";
 924		break;
 925	case TCM_TRANSPORT_OFFLINE:
 926		status = "offline";
 927		break;
 928	default:
 929		break;
 930	}
 931
 932	if (status)
 933		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
 934
 935	return ret;
 936}
 937
 938static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
 939		const char *page, size_t count)
 940{
 941	struct se_portal_group *se_tpg = to_tpg(item);
 942	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 943			struct tcm_loop_tpg, tl_se_tpg);
 944
 945	if (!strncmp(page, "online", 6)) {
 946		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 947		return count;
 948	}
 949	if (!strncmp(page, "offline", 7)) {
 950		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
 951		if (tl_tpg->tl_nexus) {
 952			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
 953
 954			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
 955		}
 956		return count;
 957	}
 958	return -EINVAL;
 959}
 960
 961static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
 962					 char *page)
 963{
 964	struct se_portal_group *se_tpg = to_tpg(item);
 965	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 966			struct tcm_loop_tpg, tl_se_tpg);
 967	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 968
 969	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
 970			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
 971}
 972
 973CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
 974CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
 975CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
 976
 977static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
 978	&tcm_loop_tpg_attr_nexus,
 979	&tcm_loop_tpg_attr_transport_status,
 980	&tcm_loop_tpg_attr_address,
 981	NULL,
 982};
 983
 984/* Start items for tcm_loop_naa_cit */
 985
 986static struct se_portal_group *tcm_loop_make_naa_tpg(
 987	struct se_wwn *wwn,
 988	struct config_group *group,
 989	const char *name)
 990{
 991	struct tcm_loop_hba *tl_hba = container_of(wwn,
 992			struct tcm_loop_hba, tl_hba_wwn);
 993	struct tcm_loop_tpg *tl_tpg;
 994	int ret;
 995	unsigned long tpgt;
 996
 997	if (strstr(name, "tpgt_") != name) {
 998		pr_err("Unable to locate \"tpgt_#\" directory group\n");
 
 999		return ERR_PTR(-EINVAL);
1000	}
1001	if (kstrtoul(name+5, 10, &tpgt))
1002		return ERR_PTR(-EINVAL);
1003
1004	if (tpgt >= TL_TPGS_PER_HBA) {
1005		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
1006		       tpgt, TL_TPGS_PER_HBA);
1007		return ERR_PTR(-EINVAL);
1008	}
1009	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1010	tl_tpg->tl_hba = tl_hba;
1011	tl_tpg->tl_tpgt = tpgt;
1012	/*
1013	 * Register the tl_tpg as a emulated TCM Target Endpoint
1014	 */
1015	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1016	if (ret < 0)
1017		return ERR_PTR(-ENOMEM);
1018
1019	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
1020		 tcm_loop_dump_proto_id(tl_hba),
1021		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
 
1022	return &tl_tpg->tl_se_tpg;
1023}
1024
1025static void tcm_loop_drop_naa_tpg(
1026	struct se_portal_group *se_tpg)
1027{
1028	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1029	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1030				struct tcm_loop_tpg, tl_se_tpg);
1031	struct tcm_loop_hba *tl_hba;
1032	unsigned short tpgt;
1033
1034	tl_hba = tl_tpg->tl_hba;
1035	tpgt = tl_tpg->tl_tpgt;
1036	/*
1037	 * Release the I_T Nexus for the Virtual target link if present
1038	 */
1039	tcm_loop_drop_nexus(tl_tpg);
1040	/*
1041	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1042	 */
1043	core_tpg_deregister(se_tpg);
1044
1045	tl_tpg->tl_hba = NULL;
1046	tl_tpg->tl_tpgt = 0;
1047
1048	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
1049		 tcm_loop_dump_proto_id(tl_hba),
1050		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1051}
1052
1053/* End items for tcm_loop_naa_cit */
1054
1055/* Start items for tcm_loop_cit */
1056
1057static struct se_wwn *tcm_loop_make_scsi_hba(
1058	struct target_fabric_configfs *tf,
1059	struct config_group *group,
1060	const char *name)
1061{
1062	struct tcm_loop_hba *tl_hba;
1063	struct Scsi_Host *sh;
1064	char *ptr;
1065	int ret, off = 0;
1066
1067	tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
1068	if (!tl_hba)
 
1069		return ERR_PTR(-ENOMEM);
1070
1071	/*
1072	 * Determine the emulated Protocol Identifier and Target Port Name
1073	 * based on the incoming configfs directory name.
1074	 */
1075	ptr = strstr(name, "naa.");
1076	if (ptr) {
1077		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1078		goto check_len;
1079	}
1080	ptr = strstr(name, "fc.");
1081	if (ptr) {
1082		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1083		off = 3; /* Skip over "fc." */
1084		goto check_len;
1085	}
1086	ptr = strstr(name, "iqn.");
1087	if (!ptr) {
1088		pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1089		       name);
1090		ret = -EINVAL;
1091		goto out;
1092	}
1093	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1094
1095check_len:
1096	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1097		pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1098		       name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
 
1099		ret = -EINVAL;
1100		goto out;
1101	}
1102	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1103
1104	/*
1105	 * Call device_register(tl_hba->dev) to register the emulated
1106	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1107	 * device_register() callbacks in tcm_loop_driver_probe()
1108	 */
1109	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1110	if (ret)
1111		goto out;
1112
1113	sh = tl_hba->sh;
1114	tcm_loop_hba_no_cnt++;
1115	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1116		 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
 
 
1117	return &tl_hba->tl_hba_wwn;
1118out:
1119	kfree(tl_hba);
1120	return ERR_PTR(ret);
1121}
1122
1123static void tcm_loop_drop_scsi_hba(
1124	struct se_wwn *wwn)
1125{
1126	struct tcm_loop_hba *tl_hba = container_of(wwn,
1127				struct tcm_loop_hba, tl_hba_wwn);
1128
1129	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1130		 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1131		 tl_hba->sh->host_no);
 
1132	/*
1133	 * Call device_unregister() on the original tl_hba->dev.
1134	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1135	 * release *tl_hba;
1136	 */
1137	device_unregister(&tl_hba->dev);
1138}
1139
1140/* Start items for tcm_loop_cit */
1141static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1142{
1143	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1144}
1145
1146CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1147
1148static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1149	&tcm_loop_wwn_attr_version,
1150	NULL,
1151};
1152
1153/* End items for tcm_loop_cit */
1154
1155static const struct target_core_fabric_ops loop_ops = {
1156	.module				= THIS_MODULE,
1157	.name				= "loopback",
1158	.get_fabric_name		= tcm_loop_get_fabric_name,
1159	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1160	.tpg_get_tag			= tcm_loop_get_tag,
1161	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1162	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1163	.tpg_check_demo_mode_write_protect =
1164				tcm_loop_check_demo_mode_write_protect,
1165	.tpg_check_prod_mode_write_protect =
1166				tcm_loop_check_prod_mode_write_protect,
1167	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1168	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1169	.check_stop_free		= tcm_loop_check_stop_free,
1170	.release_cmd			= tcm_loop_release_cmd,
1171	.sess_get_index			= tcm_loop_sess_get_index,
1172	.write_pending			= tcm_loop_write_pending,
1173	.write_pending_status		= tcm_loop_write_pending_status,
1174	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1175	.get_cmd_state			= tcm_loop_get_cmd_state,
1176	.queue_data_in			= tcm_loop_queue_data_in,
1177	.queue_status			= tcm_loop_queue_status,
1178	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1179	.aborted_task			= tcm_loop_aborted_task,
1180	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1181	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1182	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1183	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1184	.fabric_post_link		= tcm_loop_port_link,
1185	.fabric_pre_unlink		= tcm_loop_port_unlink,
1186	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1187	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1188	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1189};
1190
1191static int __init tcm_loop_fabric_init(void)
1192{
1193	int ret = -ENOMEM;
1194
1195	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1196	if (!tcm_loop_workqueue)
1197		goto out;
1198
1199	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1200				sizeof(struct tcm_loop_cmd),
1201				__alignof__(struct tcm_loop_cmd),
1202				0, NULL);
1203	if (!tcm_loop_cmd_cache) {
1204		pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
 
1205		goto out_destroy_workqueue;
1206	}
1207
1208	ret = tcm_loop_alloc_core_bus();
1209	if (ret)
1210		goto out_destroy_cache;
1211
1212	ret = target_register_template(&loop_ops);
1213	if (ret)
1214		goto out_release_core_bus;
1215
1216	return 0;
1217
1218out_release_core_bus:
1219	tcm_loop_release_core_bus();
1220out_destroy_cache:
1221	kmem_cache_destroy(tcm_loop_cmd_cache);
1222out_destroy_workqueue:
1223	destroy_workqueue(tcm_loop_workqueue);
1224out:
1225	return ret;
1226}
1227
1228static void __exit tcm_loop_fabric_exit(void)
1229{
1230	target_unregister_template(&loop_ops);
1231	tcm_loop_release_core_bus();
1232	kmem_cache_destroy(tcm_loop_cmd_cache);
1233	destroy_workqueue(tcm_loop_workqueue);
1234}
1235
1236MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1237MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1238MODULE_LICENSE("GPL");
1239module_init(tcm_loop_fabric_init);
1240module_exit(tcm_loop_fabric_exit);
v4.10.11
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
  42static struct workqueue_struct *tcm_loop_workqueue;
  43static struct kmem_cache *tcm_loop_cmd_cache;
  44
  45static int tcm_loop_hba_no_cnt;
  46
  47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  48
  49/*
  50 * Called from struct target_core_fabric_ops->check_stop_free()
  51 */
  52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  53{
  54	/*
  55	 * Do not release struct se_cmd's containing a valid TMR
  56	 * pointer.  These will be released directly in tcm_loop_device_reset()
  57	 * with transport_generic_free_cmd().
  58	 */
  59	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  60		return 0;
  61	/*
  62	 * Release the struct se_cmd, which will make a callback to release
  63	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  64	 */
  65	transport_generic_free_cmd(se_cmd, 0);
  66	return 1;
  67}
  68
  69static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  70{
  71	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  72				struct tcm_loop_cmd, tl_se_cmd);
  73
  74	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  75}
  76
  77static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  78{
  79	seq_printf(m, "tcm_loop_proc_info()\n");
  80	return 0;
  81}
  82
  83static int tcm_loop_driver_probe(struct device *);
  84static int tcm_loop_driver_remove(struct device *);
  85
  86static int pseudo_lld_bus_match(struct device *dev,
  87				struct device_driver *dev_driver)
  88{
  89	return 1;
  90}
  91
  92static struct bus_type tcm_loop_lld_bus = {
  93	.name			= "tcm_loop_bus",
  94	.match			= pseudo_lld_bus_match,
  95	.probe			= tcm_loop_driver_probe,
  96	.remove			= tcm_loop_driver_remove,
  97};
  98
  99static struct device_driver tcm_loop_driverfs = {
 100	.name			= "tcm_loop",
 101	.bus			= &tcm_loop_lld_bus,
 102};
 103/*
 104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
 105 */
 106static struct device *tcm_loop_primary;
 107
 108static void tcm_loop_submission_work(struct work_struct *work)
 109{
 110	struct tcm_loop_cmd *tl_cmd =
 111		container_of(work, struct tcm_loop_cmd, work);
 112	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 113	struct scsi_cmnd *sc = tl_cmd->sc;
 114	struct tcm_loop_nexus *tl_nexus;
 115	struct tcm_loop_hba *tl_hba;
 116	struct tcm_loop_tpg *tl_tpg;
 117	struct scatterlist *sgl_bidi = NULL;
 118	u32 sgl_bidi_count = 0, transfer_length;
 119	int rc;
 120
 121	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 122	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 123
 124	/*
 125	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 126	 * has already been configured via tcm_loop_make_naa_tpg().
 127	 */
 128	if (!tl_tpg->tl_hba) {
 129		set_host_byte(sc, DID_NO_CONNECT);
 130		goto out_done;
 131	}
 132	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 133		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 134		goto out_done;
 135	}
 136	tl_nexus = tl_tpg->tl_nexus;
 137	if (!tl_nexus) {
 138		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
 139				" does not exist\n");
 140		set_host_byte(sc, DID_ERROR);
 141		goto out_done;
 142	}
 143	if (scsi_bidi_cmnd(sc)) {
 144		struct scsi_data_buffer *sdb = scsi_in(sc);
 145
 146		sgl_bidi = sdb->table.sgl;
 147		sgl_bidi_count = sdb->table.nents;
 148		se_cmd->se_cmd_flags |= SCF_BIDI;
 149
 150	}
 151
 152	transfer_length = scsi_transfer_length(sc);
 153	if (!scsi_prot_sg_count(sc) &&
 154	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 155		se_cmd->prot_pto = true;
 156		/*
 157		 * loopback transport doesn't support
 158		 * WRITE_GENERATE, READ_STRIP protection
 159		 * information operations, go ahead unprotected.
 160		 */
 161		transfer_length = scsi_bufflen(sc);
 162	}
 163
 164	se_cmd->tag = tl_cmd->sc_cmd_tag;
 165	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 166			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 167			transfer_length, TCM_SIMPLE_TAG,
 168			sc->sc_data_direction, 0,
 169			scsi_sglist(sc), scsi_sg_count(sc),
 170			sgl_bidi, sgl_bidi_count,
 171			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 172	if (rc < 0) {
 173		set_host_byte(sc, DID_NO_CONNECT);
 174		goto out_done;
 175	}
 176	return;
 177
 178out_done:
 179	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 180	sc->scsi_done(sc);
 181	return;
 182}
 183
 184/*
 185 * ->queuecommand can be and usually is called from interrupt context, so
 186 * defer the actual submission to a workqueue.
 187 */
 188static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 189{
 190	struct tcm_loop_cmd *tl_cmd;
 191
 192	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
 193		" scsi_buf_len: %u\n", sc->device->host->host_no,
 194		sc->device->id, sc->device->channel, sc->device->lun,
 195		sc->cmnd[0], scsi_bufflen(sc));
 196
 197	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 198	if (!tl_cmd) {
 199		pr_err("Unable to allocate struct tcm_loop_cmd\n");
 200		set_host_byte(sc, DID_ERROR);
 201		sc->scsi_done(sc);
 202		return 0;
 203	}
 204
 205	tl_cmd->sc = sc;
 206	tl_cmd->sc_cmd_tag = sc->request->tag;
 207	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 208	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 209	return 0;
 210}
 211
 212/*
 213 * Called from SCSI EH process context to issue a LUN_RESET TMR
 214 * to struct scsi_device
 215 */
 216static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 217			      u64 lun, int task, enum tcm_tmreq_table tmr)
 218{
 219	struct se_cmd *se_cmd = NULL;
 220	struct se_session *se_sess;
 221	struct se_portal_group *se_tpg;
 222	struct tcm_loop_nexus *tl_nexus;
 223	struct tcm_loop_cmd *tl_cmd = NULL;
 224	struct tcm_loop_tmr *tl_tmr = NULL;
 225	int ret = TMR_FUNCTION_FAILED, rc;
 226
 227	/*
 228	 * Locate the tl_nexus and se_sess pointers
 229	 */
 230	tl_nexus = tl_tpg->tl_nexus;
 231	if (!tl_nexus) {
 232		pr_err("Unable to perform device reset without"
 233				" active I_T Nexus\n");
 234		return ret;
 235	}
 236
 237	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 238	if (!tl_cmd) {
 239		pr_err("Unable to allocate memory for tl_cmd\n");
 240		return ret;
 241	}
 242
 243	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
 244	if (!tl_tmr) {
 245		pr_err("Unable to allocate memory for tl_tmr\n");
 246		goto release;
 247	}
 248	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
 249
 250	se_cmd = &tl_cmd->tl_se_cmd;
 251	se_tpg = &tl_tpg->tl_se_tpg;
 252	se_sess = tl_tpg->tl_nexus->se_sess;
 253	/*
 254	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
 255	 */
 256	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
 257				DMA_NONE, TCM_SIMPLE_TAG,
 258				&tl_cmd->tl_sense_buf[0]);
 259
 260	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
 
 
 261	if (rc < 0)
 262		goto release;
 
 
 
 263
 264	if (tmr == TMR_ABORT_TASK)
 265		se_cmd->se_tmr_req->ref_task_tag = task;
 266
 267	/*
 268	 * Locate the underlying TCM struct se_lun
 269	 */
 270	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
 271		ret = TMR_LUN_DOES_NOT_EXIST;
 272		goto release;
 273	}
 274	/*
 275	 * Queue the TMR to TCM Core and sleep waiting for
 276	 * tcm_loop_queue_tm_rsp() to wake us up.
 277	 */
 278	transport_generic_handle_tmr(se_cmd);
 279	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
 280	/*
 281	 * The TMR LUN_RESET has completed, check the response status and
 282	 * then release allocations.
 283	 */
 284	ret = se_cmd->se_tmr_req->response;
 285release:
 286	if (se_cmd)
 287		transport_generic_free_cmd(se_cmd, 1);
 288	else
 289		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 290	kfree(tl_tmr);
 291	return ret;
 292}
 293
 294static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 295{
 296	struct tcm_loop_hba *tl_hba;
 297	struct tcm_loop_tpg *tl_tpg;
 298	int ret = FAILED;
 299
 300	/*
 301	 * Locate the tcm_loop_hba_t pointer
 302	 */
 303	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 304	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 305	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 306				 sc->request->tag, TMR_ABORT_TASK);
 307	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 308}
 309
 310/*
 311 * Called from SCSI EH process context to issue a LUN_RESET TMR
 312 * to struct scsi_device
 313 */
 314static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 315{
 316	struct tcm_loop_hba *tl_hba;
 317	struct tcm_loop_tpg *tl_tpg;
 318	int ret = FAILED;
 319
 320	/*
 321	 * Locate the tcm_loop_hba_t pointer
 322	 */
 323	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 324	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 325
 326	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 327				 0, TMR_LUN_RESET);
 328	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 329}
 330
 331static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 332{
 333	struct tcm_loop_hba *tl_hba;
 334	struct tcm_loop_tpg *tl_tpg;
 335
 336	/*
 337	 * Locate the tcm_loop_hba_t pointer
 338	 */
 339	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 340	if (!tl_hba) {
 341		pr_err("Unable to perform device reset without"
 342				" active I_T Nexus\n");
 343		return FAILED;
 344	}
 345	/*
 346	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 347	 */
 348	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 349	if (tl_tpg) {
 350		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 351		return SUCCESS;
 352	}
 353	return FAILED;
 354}
 355
 356static int tcm_loop_slave_alloc(struct scsi_device *sd)
 357{
 358	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
 359	return 0;
 360}
 361
 362static struct scsi_host_template tcm_loop_driver_template = {
 363	.show_info		= tcm_loop_show_info,
 364	.proc_name		= "tcm_loopback",
 365	.name			= "TCM_Loopback",
 366	.queuecommand		= tcm_loop_queuecommand,
 367	.change_queue_depth	= scsi_change_queue_depth,
 368	.eh_abort_handler = tcm_loop_abort_task,
 369	.eh_device_reset_handler = tcm_loop_device_reset,
 370	.eh_target_reset_handler = tcm_loop_target_reset,
 371	.can_queue		= 1024,
 372	.this_id		= -1,
 373	.sg_tablesize		= 256,
 374	.cmd_per_lun		= 1024,
 375	.max_sectors		= 0xFFFF,
 376	.use_clustering		= DISABLE_CLUSTERING,
 377	.slave_alloc		= tcm_loop_slave_alloc,
 378	.module			= THIS_MODULE,
 379	.track_queue_depth	= 1,
 380};
 381
 382static int tcm_loop_driver_probe(struct device *dev)
 383{
 384	struct tcm_loop_hba *tl_hba;
 385	struct Scsi_Host *sh;
 386	int error, host_prot;
 387
 388	tl_hba = to_tcm_loop_hba(dev);
 389
 390	sh = scsi_host_alloc(&tcm_loop_driver_template,
 391			sizeof(struct tcm_loop_hba));
 392	if (!sh) {
 393		pr_err("Unable to allocate struct scsi_host\n");
 394		return -ENODEV;
 395	}
 396	tl_hba->sh = sh;
 397
 398	/*
 399	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 400	 */
 401	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 402	/*
 403	 * Setup single ID, Channel and LUN for now..
 404	 */
 405	sh->max_id = 2;
 406	sh->max_lun = 0;
 407	sh->max_channel = 0;
 408	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 409
 410	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 411		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 412		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 413
 414	scsi_host_set_prot(sh, host_prot);
 415	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 416
 417	error = scsi_add_host(sh, &tl_hba->dev);
 418	if (error) {
 419		pr_err("%s: scsi_add_host failed\n", __func__);
 420		scsi_host_put(sh);
 421		return -ENODEV;
 422	}
 423	return 0;
 424}
 425
 426static int tcm_loop_driver_remove(struct device *dev)
 427{
 428	struct tcm_loop_hba *tl_hba;
 429	struct Scsi_Host *sh;
 430
 431	tl_hba = to_tcm_loop_hba(dev);
 432	sh = tl_hba->sh;
 433
 434	scsi_remove_host(sh);
 435	scsi_host_put(sh);
 436	return 0;
 437}
 438
 439static void tcm_loop_release_adapter(struct device *dev)
 440{
 441	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 442
 443	kfree(tl_hba);
 444}
 445
 446/*
 447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 448 */
 449static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 450{
 451	int ret;
 452
 453	tl_hba->dev.bus = &tcm_loop_lld_bus;
 454	tl_hba->dev.parent = tcm_loop_primary;
 455	tl_hba->dev.release = &tcm_loop_release_adapter;
 456	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 457
 458	ret = device_register(&tl_hba->dev);
 459	if (ret) {
 460		pr_err("device_register() failed for"
 461				" tl_hba->dev: %d\n", ret);
 462		return -ENODEV;
 463	}
 464
 465	return 0;
 466}
 467
 468/*
 469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 470 * tcm_loop SCSI bus.
 471 */
 472static int tcm_loop_alloc_core_bus(void)
 473{
 474	int ret;
 475
 476	tcm_loop_primary = root_device_register("tcm_loop_0");
 477	if (IS_ERR(tcm_loop_primary)) {
 478		pr_err("Unable to allocate tcm_loop_primary\n");
 479		return PTR_ERR(tcm_loop_primary);
 480	}
 481
 482	ret = bus_register(&tcm_loop_lld_bus);
 483	if (ret) {
 484		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 485		goto dev_unreg;
 486	}
 487
 488	ret = driver_register(&tcm_loop_driverfs);
 489	if (ret) {
 490		pr_err("driver_register() failed for"
 491				"tcm_loop_driverfs\n");
 492		goto bus_unreg;
 493	}
 494
 495	pr_debug("Initialized TCM Loop Core Bus\n");
 496	return ret;
 497
 498bus_unreg:
 499	bus_unregister(&tcm_loop_lld_bus);
 500dev_unreg:
 501	root_device_unregister(tcm_loop_primary);
 502	return ret;
 503}
 504
 505static void tcm_loop_release_core_bus(void)
 506{
 507	driver_unregister(&tcm_loop_driverfs);
 508	bus_unregister(&tcm_loop_lld_bus);
 509	root_device_unregister(tcm_loop_primary);
 510
 511	pr_debug("Releasing TCM Loop Core BUS\n");
 512}
 513
 514static char *tcm_loop_get_fabric_name(void)
 515{
 516	return "loopback";
 517}
 518
 519static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 520{
 521	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 522}
 523
 524static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 525{
 526	/*
 527	 * Return the passed NAA identifier for the Target Port
 528	 */
 529	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 530}
 531
 532static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 533{
 534	/*
 535	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 536	 * to represent the SCSI Target Port.
 537	 */
 538	return tl_tpg(se_tpg)->tl_tpgt;
 539}
 540
 541/*
 542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 543 * based upon the incoming fabric dependent SCSI Initiator Port
 544 */
 545static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 546{
 547	return 1;
 548}
 549
 550static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 551{
 552	return 0;
 553}
 554
 555/*
 556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 558 */
 559static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 560{
 561	return 0;
 562}
 563
 564/*
 565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 566 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 567 * It has been added here as a nop for target_fabric_tf_ops_check()
 568 */
 569static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 570{
 571	return 0;
 572}
 573
 574static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 575{
 576	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 577						   tl_se_tpg);
 578	return tl_tpg->tl_fabric_prot_type;
 579}
 580
 581static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 582{
 583	return 1;
 584}
 585
 586static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 587{
 588	return 1;
 589}
 590
 591static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 592{
 593	return;
 594}
 595
 596static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 597{
 598	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 599			struct tcm_loop_cmd, tl_se_cmd);
 600
 601	return tl_cmd->sc_cmd_state;
 602}
 603
 604static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 605{
 606	/*
 607	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 608	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 609	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 610	 * format with transport_generic_map_mem_to_cmd().
 611	 *
 612	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 613	 * object execution queue.
 614	 */
 615	target_execute_cmd(se_cmd);
 616	return 0;
 617}
 618
 619static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
 620{
 621	return 0;
 622}
 623
 624static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 625{
 626	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 627				struct tcm_loop_cmd, tl_se_cmd);
 628	struct scsi_cmnd *sc = tl_cmd->sc;
 629
 630	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
 631		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
 632
 633	sc->result = SAM_STAT_GOOD;
 634	set_host_byte(sc, DID_OK);
 635	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 636	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 637		scsi_set_resid(sc, se_cmd->residual_count);
 638	sc->scsi_done(sc);
 639	return 0;
 640}
 641
 642static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 643{
 644	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 645				struct tcm_loop_cmd, tl_se_cmd);
 646	struct scsi_cmnd *sc = tl_cmd->sc;
 647
 648	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
 649			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
 650
 651	if (se_cmd->sense_buffer &&
 652	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 653	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 654
 655		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 656				SCSI_SENSE_BUFFERSIZE);
 657		sc->result = SAM_STAT_CHECK_CONDITION;
 658		set_driver_byte(sc, DRIVER_SENSE);
 659	} else
 660		sc->result = se_cmd->scsi_status;
 661
 662	set_host_byte(sc, DID_OK);
 663	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 664	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 665		scsi_set_resid(sc, se_cmd->residual_count);
 666	sc->scsi_done(sc);
 667	return 0;
 668}
 669
 670static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 671{
 672	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 673	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
 674	/*
 675	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
 676	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
 677	 */
 678	atomic_set(&tl_tmr->tmr_complete, 1);
 679	wake_up(&tl_tmr->tl_tmr_wait);
 680}
 681
 682static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 683{
 684	return;
 685}
 686
 687static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 688{
 689	switch (tl_hba->tl_proto_id) {
 690	case SCSI_PROTOCOL_SAS:
 691		return "SAS";
 692	case SCSI_PROTOCOL_FCP:
 693		return "FCP";
 694	case SCSI_PROTOCOL_ISCSI:
 695		return "iSCSI";
 696	default:
 697		break;
 698	}
 699
 700	return "Unknown";
 701}
 702
 703/* Start items for tcm_loop_port_cit */
 704
 705static int tcm_loop_port_link(
 706	struct se_portal_group *se_tpg,
 707	struct se_lun *lun)
 708{
 709	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 710				struct tcm_loop_tpg, tl_se_tpg);
 711	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 712
 713	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 714	/*
 715	 * Add Linux/SCSI struct scsi_device by HCTL
 716	 */
 717	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 718
 719	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 720	return 0;
 721}
 722
 723static void tcm_loop_port_unlink(
 724	struct se_portal_group *se_tpg,
 725	struct se_lun *se_lun)
 726{
 727	struct scsi_device *sd;
 728	struct tcm_loop_hba *tl_hba;
 729	struct tcm_loop_tpg *tl_tpg;
 730
 731	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 732	tl_hba = tl_tpg->tl_hba;
 733
 734	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 735				se_lun->unpacked_lun);
 736	if (!sd) {
 737		pr_err("Unable to locate struct scsi_device for %d:%d:"
 738			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 739		return;
 740	}
 741	/*
 742	 * Remove Linux/SCSI struct scsi_device by HCTL
 743	 */
 744	scsi_remove_device(sd);
 745	scsi_device_put(sd);
 746
 747	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 748
 749	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 750}
 751
 752/* End items for tcm_loop_port_cit */
 753
 754static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 755		struct config_item *item, char *page)
 756{
 757	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 758	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 759						   tl_se_tpg);
 760
 761	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 762}
 763
 764static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 765		struct config_item *item, const char *page, size_t count)
 766{
 767	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 768	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 769						   tl_se_tpg);
 770	unsigned long val;
 771	int ret = kstrtoul(page, 0, &val);
 772
 773	if (ret) {
 774		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 775		return ret;
 776	}
 777	if (val != 0 && val != 1 && val != 3) {
 778		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 779		return -EINVAL;
 780	}
 781	tl_tpg->tl_fabric_prot_type = val;
 782
 783	return count;
 784}
 785
 786CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 787
 788static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 789	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 790	NULL,
 791};
 792
 793/* Start items for tcm_loop_nexus_cit */
 794
 795static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 796				  struct se_session *se_sess, void *p)
 797{
 798	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 799					struct tcm_loop_tpg, tl_se_tpg);
 800
 801	tl_tpg->tl_nexus = p;
 802	return 0;
 803}
 804
 805static int tcm_loop_make_nexus(
 806	struct tcm_loop_tpg *tl_tpg,
 807	const char *name)
 808{
 809	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 810	struct tcm_loop_nexus *tl_nexus;
 811	int ret;
 812
 813	if (tl_tpg->tl_nexus) {
 814		pr_debug("tl_tpg->tl_nexus already exists\n");
 815		return -EEXIST;
 816	}
 817
 818	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 819	if (!tl_nexus) {
 820		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 821		return -ENOMEM;
 822	}
 823
 824	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
 825					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 826					name, tl_nexus, tcm_loop_alloc_sess_cb);
 827	if (IS_ERR(tl_nexus->se_sess)) {
 828		ret = PTR_ERR(tl_nexus->se_sess);
 829		kfree(tl_nexus);
 830		return ret;
 831	}
 832
 833	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 834		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 835		name);
 836	return 0;
 837}
 838
 839static int tcm_loop_drop_nexus(
 840	struct tcm_loop_tpg *tpg)
 841{
 842	struct se_session *se_sess;
 843	struct tcm_loop_nexus *tl_nexus;
 844
 845	tl_nexus = tpg->tl_nexus;
 846	if (!tl_nexus)
 847		return -ENODEV;
 848
 849	se_sess = tl_nexus->se_sess;
 850	if (!se_sess)
 851		return -ENODEV;
 852
 853	if (atomic_read(&tpg->tl_tpg_port_count)) {
 854		pr_err("Unable to remove TCM_Loop I_T Nexus with"
 855			" active TPG port count: %d\n",
 856			atomic_read(&tpg->tl_tpg_port_count));
 857		return -EPERM;
 858	}
 859
 860	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
 861		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
 862		tl_nexus->se_sess->se_node_acl->initiatorname);
 863	/*
 864	 * Release the SCSI I_T Nexus to the emulated Target Port
 865	 */
 866	transport_deregister_session(tl_nexus->se_sess);
 867	tpg->tl_nexus = NULL;
 868	kfree(tl_nexus);
 869	return 0;
 870}
 871
 872/* End items for tcm_loop_nexus_cit */
 873
 874static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 875{
 876	struct se_portal_group *se_tpg = to_tpg(item);
 877	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 878			struct tcm_loop_tpg, tl_se_tpg);
 879	struct tcm_loop_nexus *tl_nexus;
 880	ssize_t ret;
 881
 882	tl_nexus = tl_tpg->tl_nexus;
 883	if (!tl_nexus)
 884		return -ENODEV;
 885
 886	ret = snprintf(page, PAGE_SIZE, "%s\n",
 887		tl_nexus->se_sess->se_node_acl->initiatorname);
 888
 889	return ret;
 890}
 891
 892static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 893		const char *page, size_t count)
 894{
 895	struct se_portal_group *se_tpg = to_tpg(item);
 896	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 897			struct tcm_loop_tpg, tl_se_tpg);
 898	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 899	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 900	int ret;
 901	/*
 902	 * Shutdown the active I_T nexus if 'NULL' is passed..
 903	 */
 904	if (!strncmp(page, "NULL", 4)) {
 905		ret = tcm_loop_drop_nexus(tl_tpg);
 906		return (!ret) ? count : ret;
 907	}
 908	/*
 909	 * Otherwise make sure the passed virtual Initiator port WWN matches
 910	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 911	 * tcm_loop_make_nexus()
 912	 */
 913	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 914		pr_err("Emulated NAA Sas Address: %s, exceeds"
 915				" max: %d\n", page, TL_WWN_ADDR_LEN);
 916		return -EINVAL;
 917	}
 918	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 919
 920	ptr = strstr(i_port, "naa.");
 921	if (ptr) {
 922		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 923			pr_err("Passed SAS Initiator Port %s does not"
 924				" match target port protoid: %s\n", i_port,
 925				tcm_loop_dump_proto_id(tl_hba));
 926			return -EINVAL;
 927		}
 928		port_ptr = &i_port[0];
 929		goto check_newline;
 930	}
 931	ptr = strstr(i_port, "fc.");
 932	if (ptr) {
 933		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 934			pr_err("Passed FCP Initiator Port %s does not"
 935				" match target port protoid: %s\n", i_port,
 936				tcm_loop_dump_proto_id(tl_hba));
 937			return -EINVAL;
 938		}
 939		port_ptr = &i_port[3]; /* Skip over "fc." */
 940		goto check_newline;
 941	}
 942	ptr = strstr(i_port, "iqn.");
 943	if (ptr) {
 944		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 945			pr_err("Passed iSCSI Initiator Port %s does not"
 946				" match target port protoid: %s\n", i_port,
 947				tcm_loop_dump_proto_id(tl_hba));
 948			return -EINVAL;
 949		}
 950		port_ptr = &i_port[0];
 951		goto check_newline;
 952	}
 953	pr_err("Unable to locate prefix for emulated Initiator Port:"
 954			" %s\n", i_port);
 955	return -EINVAL;
 956	/*
 957	 * Clear any trailing newline for the NAA WWN
 958	 */
 959check_newline:
 960	if (i_port[strlen(i_port)-1] == '\n')
 961		i_port[strlen(i_port)-1] = '\0';
 962
 963	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 964	if (ret < 0)
 965		return ret;
 966
 967	return count;
 968}
 969
 970static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 971		char *page)
 972{
 973	struct se_portal_group *se_tpg = to_tpg(item);
 974	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 975			struct tcm_loop_tpg, tl_se_tpg);
 976	const char *status = NULL;
 977	ssize_t ret = -EINVAL;
 978
 979	switch (tl_tpg->tl_transport_status) {
 980	case TCM_TRANSPORT_ONLINE:
 981		status = "online";
 982		break;
 983	case TCM_TRANSPORT_OFFLINE:
 984		status = "offline";
 985		break;
 986	default:
 987		break;
 988	}
 989
 990	if (status)
 991		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
 992
 993	return ret;
 994}
 995
 996static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
 997		const char *page, size_t count)
 998{
 999	struct se_portal_group *se_tpg = to_tpg(item);
1000	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1001			struct tcm_loop_tpg, tl_se_tpg);
1002
1003	if (!strncmp(page, "online", 6)) {
1004		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1005		return count;
1006	}
1007	if (!strncmp(page, "offline", 7)) {
1008		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1009		if (tl_tpg->tl_nexus) {
1010			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1011
1012			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1013		}
1014		return count;
1015	}
1016	return -EINVAL;
1017}
1018
1019static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1020					 char *page)
1021{
1022	struct se_portal_group *se_tpg = to_tpg(item);
1023	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1024			struct tcm_loop_tpg, tl_se_tpg);
1025	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1026
1027	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1028			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1029}
1030
1031CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1032CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1033CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1034
1035static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1036	&tcm_loop_tpg_attr_nexus,
1037	&tcm_loop_tpg_attr_transport_status,
1038	&tcm_loop_tpg_attr_address,
1039	NULL,
1040};
1041
1042/* Start items for tcm_loop_naa_cit */
1043
1044static struct se_portal_group *tcm_loop_make_naa_tpg(
1045	struct se_wwn *wwn,
1046	struct config_group *group,
1047	const char *name)
1048{
1049	struct tcm_loop_hba *tl_hba = container_of(wwn,
1050			struct tcm_loop_hba, tl_hba_wwn);
1051	struct tcm_loop_tpg *tl_tpg;
1052	int ret;
1053	unsigned long tpgt;
1054
1055	if (strstr(name, "tpgt_") != name) {
1056		pr_err("Unable to locate \"tpgt_#\" directory"
1057				" group\n");
1058		return ERR_PTR(-EINVAL);
1059	}
1060	if (kstrtoul(name+5, 10, &tpgt))
1061		return ERR_PTR(-EINVAL);
1062
1063	if (tpgt >= TL_TPGS_PER_HBA) {
1064		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1065				" %u\n", tpgt, TL_TPGS_PER_HBA);
1066		return ERR_PTR(-EINVAL);
1067	}
1068	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1069	tl_tpg->tl_hba = tl_hba;
1070	tl_tpg->tl_tpgt = tpgt;
1071	/*
1072	 * Register the tl_tpg as a emulated TCM Target Endpoint
1073	 */
1074	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1075	if (ret < 0)
1076		return ERR_PTR(-ENOMEM);
1077
1078	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1079		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1080		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1081
1082	return &tl_tpg->tl_se_tpg;
1083}
1084
1085static void tcm_loop_drop_naa_tpg(
1086	struct se_portal_group *se_tpg)
1087{
1088	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1089	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1090				struct tcm_loop_tpg, tl_se_tpg);
1091	struct tcm_loop_hba *tl_hba;
1092	unsigned short tpgt;
1093
1094	tl_hba = tl_tpg->tl_hba;
1095	tpgt = tl_tpg->tl_tpgt;
1096	/*
1097	 * Release the I_T Nexus for the Virtual target link if present
1098	 */
1099	tcm_loop_drop_nexus(tl_tpg);
1100	/*
1101	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1102	 */
1103	core_tpg_deregister(se_tpg);
1104
1105	tl_tpg->tl_hba = NULL;
1106	tl_tpg->tl_tpgt = 0;
1107
1108	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1109		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1110		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1111}
1112
1113/* End items for tcm_loop_naa_cit */
1114
1115/* Start items for tcm_loop_cit */
1116
1117static struct se_wwn *tcm_loop_make_scsi_hba(
1118	struct target_fabric_configfs *tf,
1119	struct config_group *group,
1120	const char *name)
1121{
1122	struct tcm_loop_hba *tl_hba;
1123	struct Scsi_Host *sh;
1124	char *ptr;
1125	int ret, off = 0;
1126
1127	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1128	if (!tl_hba) {
1129		pr_err("Unable to allocate struct tcm_loop_hba\n");
1130		return ERR_PTR(-ENOMEM);
1131	}
1132	/*
1133	 * Determine the emulated Protocol Identifier and Target Port Name
1134	 * based on the incoming configfs directory name.
1135	 */
1136	ptr = strstr(name, "naa.");
1137	if (ptr) {
1138		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1139		goto check_len;
1140	}
1141	ptr = strstr(name, "fc.");
1142	if (ptr) {
1143		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1144		off = 3; /* Skip over "fc." */
1145		goto check_len;
1146	}
1147	ptr = strstr(name, "iqn.");
1148	if (!ptr) {
1149		pr_err("Unable to locate prefix for emulated Target "
1150				"Port: %s\n", name);
1151		ret = -EINVAL;
1152		goto out;
1153	}
1154	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1155
1156check_len:
1157	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1158		pr_err("Emulated NAA %s Address: %s, exceeds"
1159			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1160			TL_WWN_ADDR_LEN);
1161		ret = -EINVAL;
1162		goto out;
1163	}
1164	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1165
1166	/*
1167	 * Call device_register(tl_hba->dev) to register the emulated
1168	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1169	 * device_register() callbacks in tcm_loop_driver_probe()
1170	 */
1171	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1172	if (ret)
1173		goto out;
1174
1175	sh = tl_hba->sh;
1176	tcm_loop_hba_no_cnt++;
1177	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1178		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1179		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1180
1181	return &tl_hba->tl_hba_wwn;
1182out:
1183	kfree(tl_hba);
1184	return ERR_PTR(ret);
1185}
1186
1187static void tcm_loop_drop_scsi_hba(
1188	struct se_wwn *wwn)
1189{
1190	struct tcm_loop_hba *tl_hba = container_of(wwn,
1191				struct tcm_loop_hba, tl_hba_wwn);
1192
1193	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1194		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1195		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1196		tl_hba->sh->host_no);
1197	/*
1198	 * Call device_unregister() on the original tl_hba->dev.
1199	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1200	 * release *tl_hba;
1201	 */
1202	device_unregister(&tl_hba->dev);
1203}
1204
1205/* Start items for tcm_loop_cit */
1206static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1207{
1208	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1209}
1210
1211CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1212
1213static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1214	&tcm_loop_wwn_attr_version,
1215	NULL,
1216};
1217
1218/* End items for tcm_loop_cit */
1219
1220static const struct target_core_fabric_ops loop_ops = {
1221	.module				= THIS_MODULE,
1222	.name				= "loopback",
1223	.get_fabric_name		= tcm_loop_get_fabric_name,
1224	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1225	.tpg_get_tag			= tcm_loop_get_tag,
1226	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1227	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1228	.tpg_check_demo_mode_write_protect =
1229				tcm_loop_check_demo_mode_write_protect,
1230	.tpg_check_prod_mode_write_protect =
1231				tcm_loop_check_prod_mode_write_protect,
1232	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1233	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1234	.check_stop_free		= tcm_loop_check_stop_free,
1235	.release_cmd			= tcm_loop_release_cmd,
1236	.sess_get_index			= tcm_loop_sess_get_index,
1237	.write_pending			= tcm_loop_write_pending,
1238	.write_pending_status		= tcm_loop_write_pending_status,
1239	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1240	.get_cmd_state			= tcm_loop_get_cmd_state,
1241	.queue_data_in			= tcm_loop_queue_data_in,
1242	.queue_status			= tcm_loop_queue_status,
1243	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1244	.aborted_task			= tcm_loop_aborted_task,
1245	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1246	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1247	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1248	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1249	.fabric_post_link		= tcm_loop_port_link,
1250	.fabric_pre_unlink		= tcm_loop_port_unlink,
1251	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1252	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1253	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1254};
1255
1256static int __init tcm_loop_fabric_init(void)
1257{
1258	int ret = -ENOMEM;
1259
1260	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1261	if (!tcm_loop_workqueue)
1262		goto out;
1263
1264	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1265				sizeof(struct tcm_loop_cmd),
1266				__alignof__(struct tcm_loop_cmd),
1267				0, NULL);
1268	if (!tcm_loop_cmd_cache) {
1269		pr_debug("kmem_cache_create() for"
1270			" tcm_loop_cmd_cache failed\n");
1271		goto out_destroy_workqueue;
1272	}
1273
1274	ret = tcm_loop_alloc_core_bus();
1275	if (ret)
1276		goto out_destroy_cache;
1277
1278	ret = target_register_template(&loop_ops);
1279	if (ret)
1280		goto out_release_core_bus;
1281
1282	return 0;
1283
1284out_release_core_bus:
1285	tcm_loop_release_core_bus();
1286out_destroy_cache:
1287	kmem_cache_destroy(tcm_loop_cmd_cache);
1288out_destroy_workqueue:
1289	destroy_workqueue(tcm_loop_workqueue);
1290out:
1291	return ret;
1292}
1293
1294static void __exit tcm_loop_fabric_exit(void)
1295{
1296	target_unregister_template(&loop_ops);
1297	tcm_loop_release_core_bus();
1298	kmem_cache_destroy(tcm_loop_cmd_cache);
1299	destroy_workqueue(tcm_loop_workqueue);
1300}
1301
1302MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1303MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1304MODULE_LICENSE("GPL");
1305module_init(tcm_loop_fabric_init);
1306module_exit(tcm_loop_fabric_exit);