Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
 
  42static struct kmem_cache *tcm_loop_cmd_cache;
  43
  44static int tcm_loop_hba_no_cnt;
  45
  46static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  47
  48static unsigned int tcm_loop_nr_hw_queues = 1;
  49module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
  50
  51static unsigned int tcm_loop_can_queue = 1024;
  52module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
  53
  54static unsigned int tcm_loop_cmd_per_lun = 1024;
  55module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
  56
  57/*
  58 * Called from struct target_core_fabric_ops->check_stop_free()
  59 */
  60static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  61{
  62	return transport_generic_free_cmd(se_cmd, 0);
 
 
 
 
 
 
 
 
 
 
 
 
  63}
  64
  65static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  66{
  67	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  68				struct tcm_loop_cmd, tl_se_cmd);
  69	struct scsi_cmnd *sc = tl_cmd->sc;
  70
  71	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  72		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  73	else
  74		scsi_done(sc);
  75}
  76
  77static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  78{
  79	seq_puts(m, "tcm_loop_proc_info()\n");
  80	return 0;
  81}
  82
  83static int tcm_loop_driver_probe(struct device *);
  84static void tcm_loop_driver_remove(struct device *);
 
 
 
 
 
 
  85
  86static const struct bus_type tcm_loop_lld_bus = {
  87	.name			= "tcm_loop_bus",
 
  88	.probe			= tcm_loop_driver_probe,
  89	.remove			= tcm_loop_driver_remove,
  90};
  91
  92static struct device_driver tcm_loop_driverfs = {
  93	.name			= "tcm_loop",
  94	.bus			= &tcm_loop_lld_bus,
  95};
  96/*
  97 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  98 */
  99static struct device *tcm_loop_primary;
 100
 101static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
 102{
 
 
 103	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 104	struct scsi_cmnd *sc = tl_cmd->sc;
 105	struct tcm_loop_nexus *tl_nexus;
 106	struct tcm_loop_hba *tl_hba;
 107	struct tcm_loop_tpg *tl_tpg;
 108	struct scatterlist *sgl_bidi = NULL;
 109	u32 sgl_bidi_count = 0, transfer_length;
 
 110
 111	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 112	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 113
 114	/*
 115	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 116	 * has already been configured via tcm_loop_make_naa_tpg().
 117	 */
 118	if (!tl_tpg->tl_hba) {
 119		set_host_byte(sc, DID_NO_CONNECT);
 120		goto out_done;
 121	}
 122	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 123		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 124		goto out_done;
 125	}
 126	tl_nexus = tl_tpg->tl_nexus;
 127	if (!tl_nexus) {
 128		scmd_printk(KERN_ERR, sc,
 129			    "TCM_Loop I_T Nexus does not exist\n");
 130		set_host_byte(sc, DID_ERROR);
 131		goto out_done;
 132	}
 
 
 
 
 
 
 
 
 133
 134	transfer_length = scsi_transfer_length(sc);
 135	if (!scsi_prot_sg_count(sc) &&
 136	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 137		se_cmd->prot_pto = true;
 138		/*
 139		 * loopback transport doesn't support
 140		 * WRITE_GENERATE, READ_STRIP protection
 141		 * information operations, go ahead unprotected.
 142		 */
 143		transfer_length = scsi_bufflen(sc);
 144	}
 145
 146	se_cmd->tag = tl_cmd->sc_cmd_tag;
 147	target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
 148			tl_cmd->sc->device->lun, transfer_length,
 149			TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
 150
 151	if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
 152			       scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
 153			       scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
 154			       GFP_ATOMIC))
 155		return;
 156
 157	target_submit(se_cmd);
 158	return;
 159
 160out_done:
 161	scsi_done(sc);
 
 
 162}
 163
 164/*
 165 * ->queuecommand can be and usually is called from interrupt context, so
 166 * defer the actual submission to a workqueue.
 167 */
 168static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 169{
 170	struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
 171
 172	pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
 173		 __func__, sc->device->host->host_no, sc->device->id,
 174		 sc->device->channel, sc->device->lun, sc->cmnd[0],
 175		 scsi_bufflen(sc));
 
 
 
 
 
 
 
 
 176
 177	memset(tl_cmd, 0, sizeof(*tl_cmd));
 178	tl_cmd->sc = sc;
 179	tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
 180
 181	tcm_loop_target_queue_cmd(tl_cmd);
 182	return 0;
 183}
 184
 185/*
 186 * Called from SCSI EH process context to issue a LUN_RESET TMR
 187 * to struct scsi_device
 188 */
 189static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 190			      u64 lun, int task, enum tcm_tmreq_table tmr)
 191{
 192	struct se_cmd *se_cmd;
 193	struct se_session *se_sess;
 
 194	struct tcm_loop_nexus *tl_nexus;
 195	struct tcm_loop_cmd *tl_cmd;
 
 196	int ret = TMR_FUNCTION_FAILED, rc;
 197
 198	/*
 199	 * Locate the tl_nexus and se_sess pointers
 200	 */
 201	tl_nexus = tl_tpg->tl_nexus;
 202	if (!tl_nexus) {
 203		pr_err("Unable to perform device reset without active I_T Nexus\n");
 
 204		return ret;
 205	}
 206
 207	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 208	if (!tl_cmd)
 
 209		return ret;
 
 210
 211	init_completion(&tl_cmd->tmr_done);
 
 
 
 
 
 212
 213	se_cmd = &tl_cmd->tl_se_cmd;
 
 214	se_sess = tl_tpg->tl_nexus->se_sess;
 
 
 
 
 
 
 215
 216	rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
 217			       NULL, tmr, GFP_KERNEL, task,
 218			       TARGET_SCF_ACK_KREF);
 219	if (rc < 0)
 220		goto release;
 221	wait_for_completion(&tl_cmd->tmr_done);
 222	ret = se_cmd->se_tmr_req->response;
 223	target_put_sess_cmd(se_cmd);
 224
 225out:
 226	return ret;
 227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 228release:
 229	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 230	goto out;
 
 
 
 
 231}
 232
 233static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 234{
 235	struct tcm_loop_hba *tl_hba;
 236	struct tcm_loop_tpg *tl_tpg;
 237	int ret;
 238
 239	/*
 240	 * Locate the tcm_loop_hba_t pointer
 241	 */
 242	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 243	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 244	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 245				 scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
 246	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 247}
 248
 249/*
 250 * Called from SCSI EH process context to issue a LUN_RESET TMR
 251 * to struct scsi_device
 252 */
 253static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 254{
 255	struct tcm_loop_hba *tl_hba;
 256	struct tcm_loop_tpg *tl_tpg;
 257	int ret;
 258
 259	/*
 260	 * Locate the tcm_loop_hba_t pointer
 261	 */
 262	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 263	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 264
 265	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 266				 0, TMR_LUN_RESET);
 267	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 268}
 269
 270static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 271{
 272	struct tcm_loop_hba *tl_hba;
 273	struct tcm_loop_tpg *tl_tpg;
 274
 275	/*
 276	 * Locate the tcm_loop_hba_t pointer
 277	 */
 278	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 279	if (!tl_hba) {
 280		pr_err("Unable to perform device reset without active I_T Nexus\n");
 
 281		return FAILED;
 282	}
 283	/*
 284	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 285	 */
 286	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 287	if (tl_tpg) {
 288		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 289		return SUCCESS;
 290	}
 291	return FAILED;
 292}
 293
 294static const struct scsi_host_template tcm_loop_driver_template = {
 
 
 
 
 
 
 295	.show_info		= tcm_loop_show_info,
 296	.proc_name		= "tcm_loopback",
 297	.name			= "TCM_Loopback",
 298	.queuecommand		= tcm_loop_queuecommand,
 299	.change_queue_depth	= scsi_change_queue_depth,
 300	.eh_abort_handler = tcm_loop_abort_task,
 301	.eh_device_reset_handler = tcm_loop_device_reset,
 302	.eh_target_reset_handler = tcm_loop_target_reset,
 
 303	.this_id		= -1,
 304	.sg_tablesize		= 256,
 
 305	.max_sectors		= 0xFFFF,
 306	.dma_boundary		= PAGE_SIZE - 1,
 
 307	.module			= THIS_MODULE,
 308	.track_queue_depth	= 1,
 309	.cmd_size		= sizeof(struct tcm_loop_cmd),
 310};
 311
 312static int tcm_loop_driver_probe(struct device *dev)
 313{
 314	struct tcm_loop_hba *tl_hba;
 315	struct Scsi_Host *sh;
 316	int error, host_prot;
 317
 318	tl_hba = to_tcm_loop_hba(dev);
 319
 320	sh = scsi_host_alloc(&tcm_loop_driver_template,
 321			sizeof(struct tcm_loop_hba));
 322	if (!sh) {
 323		pr_err("Unable to allocate struct scsi_host\n");
 324		return -ENODEV;
 325	}
 326	tl_hba->sh = sh;
 327
 328	/*
 329	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 330	 */
 331	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 332	/*
 333	 * Setup single ID, Channel and LUN for now..
 334	 */
 335	sh->max_id = 2;
 336	sh->max_lun = 0;
 337	sh->max_channel = 0;
 338	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 339	sh->nr_hw_queues = tcm_loop_nr_hw_queues;
 340	sh->can_queue = tcm_loop_can_queue;
 341	sh->cmd_per_lun = tcm_loop_cmd_per_lun;
 342
 343	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 344		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 345		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 346
 347	scsi_host_set_prot(sh, host_prot);
 348	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 349
 350	error = scsi_add_host(sh, &tl_hba->dev);
 351	if (error) {
 352		pr_err("%s: scsi_add_host failed\n", __func__);
 353		scsi_host_put(sh);
 354		return -ENODEV;
 355	}
 356	return 0;
 357}
 358
 359static void tcm_loop_driver_remove(struct device *dev)
 360{
 361	struct tcm_loop_hba *tl_hba;
 362	struct Scsi_Host *sh;
 363
 364	tl_hba = to_tcm_loop_hba(dev);
 365	sh = tl_hba->sh;
 366
 367	scsi_remove_host(sh);
 368	scsi_host_put(sh);
 
 369}
 370
 371static void tcm_loop_release_adapter(struct device *dev)
 372{
 373	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 374
 375	kfree(tl_hba);
 376}
 377
 378/*
 379 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 380 */
 381static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 382{
 383	int ret;
 384
 385	tl_hba->dev.bus = &tcm_loop_lld_bus;
 386	tl_hba->dev.parent = tcm_loop_primary;
 387	tl_hba->dev.release = &tcm_loop_release_adapter;
 388	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 389
 390	ret = device_register(&tl_hba->dev);
 391	if (ret) {
 392		pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
 393		put_device(&tl_hba->dev);
 394		return -ENODEV;
 395	}
 396
 397	return 0;
 398}
 399
 400/*
 401 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 402 * tcm_loop SCSI bus.
 403 */
 404static int tcm_loop_alloc_core_bus(void)
 405{
 406	int ret;
 407
 408	tcm_loop_primary = root_device_register("tcm_loop_0");
 409	if (IS_ERR(tcm_loop_primary)) {
 410		pr_err("Unable to allocate tcm_loop_primary\n");
 411		return PTR_ERR(tcm_loop_primary);
 412	}
 413
 414	ret = bus_register(&tcm_loop_lld_bus);
 415	if (ret) {
 416		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 417		goto dev_unreg;
 418	}
 419
 420	ret = driver_register(&tcm_loop_driverfs);
 421	if (ret) {
 422		pr_err("driver_register() failed for tcm_loop_driverfs\n");
 
 423		goto bus_unreg;
 424	}
 425
 426	pr_debug("Initialized TCM Loop Core Bus\n");
 427	return ret;
 428
 429bus_unreg:
 430	bus_unregister(&tcm_loop_lld_bus);
 431dev_unreg:
 432	root_device_unregister(tcm_loop_primary);
 433	return ret;
 434}
 435
 436static void tcm_loop_release_core_bus(void)
 437{
 438	driver_unregister(&tcm_loop_driverfs);
 439	bus_unregister(&tcm_loop_lld_bus);
 440	root_device_unregister(tcm_loop_primary);
 441
 442	pr_debug("Releasing TCM Loop Core BUS\n");
 443}
 444
 
 
 
 
 
 445static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 446{
 447	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 448}
 449
 450static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 451{
 452	/*
 453	 * Return the passed NAA identifier for the Target Port
 454	 */
 455	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 456}
 457
 458static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 459{
 460	/*
 461	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 462	 * to represent the SCSI Target Port.
 463	 */
 464	return tl_tpg(se_tpg)->tl_tpgt;
 465}
 466
 467/*
 468 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 469 * based upon the incoming fabric dependent SCSI Initiator Port
 470 */
 471static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 472{
 473	return 1;
 474}
 475
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 477{
 478	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 479						   tl_se_tpg);
 480	return tl_tpg->tl_fabric_prot_type;
 481}
 482
 
 
 
 
 
 483static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 484{
 485	return 1;
 486}
 487
 
 
 
 
 
 488static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 489{
 490	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 491			struct tcm_loop_cmd, tl_se_cmd);
 492
 493	return tl_cmd->sc_cmd_state;
 494}
 495
 
 
 
 
 
 
 
 
 
 
 496static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 497{
 498	/*
 499	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 500	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 501	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 502	 * format with transport_generic_map_mem_to_cmd().
 503	 *
 504	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 505	 * object execution queue.
 506	 */
 507	target_execute_cmd(se_cmd);
 508	return 0;
 509}
 510
 511static int tcm_loop_queue_data_or_status(const char *func,
 512		struct se_cmd *se_cmd, u8 scsi_status)
 
 
 
 
 513{
 514	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 515				struct tcm_loop_cmd, tl_se_cmd);
 516	struct scsi_cmnd *sc = tl_cmd->sc;
 517
 518	pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
 519		 func, sc, sc->cmnd[0]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520
 521	if (se_cmd->sense_buffer &&
 522	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 523	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 524
 525		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 526				SCSI_SENSE_BUFFERSIZE);
 527		sc->result = SAM_STAT_CHECK_CONDITION;
 
 528	} else
 529		sc->result = scsi_status;
 530
 531	set_host_byte(sc, DID_OK);
 532	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 533	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 534		scsi_set_resid(sc, se_cmd->residual_count);
 
 535	return 0;
 536}
 537
 538static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 539{
 540	return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
 541}
 542
 543static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 544{
 545	return tcm_loop_queue_data_or_status(__func__,
 546					     se_cmd, se_cmd->scsi_status);
 547}
 548
 549static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 550{
 551	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 552				struct tcm_loop_cmd, tl_se_cmd);
 553
 554	/* Wake up tcm_loop_issue_tmr(). */
 555	complete(&tl_cmd->tmr_done);
 
 
 
 556}
 557
 558static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 559{
 560	return;
 561}
 562
 563static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 564{
 565	switch (tl_hba->tl_proto_id) {
 566	case SCSI_PROTOCOL_SAS:
 567		return "SAS";
 568	case SCSI_PROTOCOL_FCP:
 569		return "FCP";
 570	case SCSI_PROTOCOL_ISCSI:
 571		return "iSCSI";
 572	default:
 573		break;
 574	}
 575
 576	return "Unknown";
 577}
 578
 579/* Start items for tcm_loop_port_cit */
 580
 581static int tcm_loop_port_link(
 582	struct se_portal_group *se_tpg,
 583	struct se_lun *lun)
 584{
 585	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 586				struct tcm_loop_tpg, tl_se_tpg);
 587	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 588
 589	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 590	/*
 591	 * Add Linux/SCSI struct scsi_device by HCTL
 592	 */
 593	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 594
 595	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 596	return 0;
 597}
 598
 599static void tcm_loop_port_unlink(
 600	struct se_portal_group *se_tpg,
 601	struct se_lun *se_lun)
 602{
 603	struct scsi_device *sd;
 604	struct tcm_loop_hba *tl_hba;
 605	struct tcm_loop_tpg *tl_tpg;
 606
 607	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 608	tl_hba = tl_tpg->tl_hba;
 609
 610	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 611				se_lun->unpacked_lun);
 612	if (!sd) {
 613		pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
 614		       0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 615		return;
 616	}
 617	/*
 618	 * Remove Linux/SCSI struct scsi_device by HCTL
 619	 */
 620	scsi_remove_device(sd);
 621	scsi_device_put(sd);
 622
 623	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 624
 625	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 626}
 627
 628/* End items for tcm_loop_port_cit */
 629
 630static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 631		struct config_item *item, char *page)
 632{
 633	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 634	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 635						   tl_se_tpg);
 636
 637	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 638}
 639
 640static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 641		struct config_item *item, const char *page, size_t count)
 642{
 643	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 644	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 645						   tl_se_tpg);
 646	unsigned long val;
 647	int ret = kstrtoul(page, 0, &val);
 648
 649	if (ret) {
 650		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 651		return ret;
 652	}
 653	if (val != 0 && val != 1 && val != 3) {
 654		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 655		return -EINVAL;
 656	}
 657	tl_tpg->tl_fabric_prot_type = val;
 658
 659	return count;
 660}
 661
 662CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 663
 664static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 665	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 666	NULL,
 667};
 668
 669/* Start items for tcm_loop_nexus_cit */
 670
 671static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 672				  struct se_session *se_sess, void *p)
 673{
 674	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 675					struct tcm_loop_tpg, tl_se_tpg);
 676
 677	tl_tpg->tl_nexus = p;
 678	return 0;
 679}
 680
 681static int tcm_loop_make_nexus(
 682	struct tcm_loop_tpg *tl_tpg,
 683	const char *name)
 684{
 685	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 686	struct tcm_loop_nexus *tl_nexus;
 687	int ret;
 688
 689	if (tl_tpg->tl_nexus) {
 690		pr_debug("tl_tpg->tl_nexus already exists\n");
 691		return -EEXIST;
 692	}
 693
 694	tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
 695	if (!tl_nexus)
 
 696		return -ENOMEM;
 
 697
 698	tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
 699					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 700					name, tl_nexus, tcm_loop_alloc_sess_cb);
 701	if (IS_ERR(tl_nexus->se_sess)) {
 702		ret = PTR_ERR(tl_nexus->se_sess);
 703		kfree(tl_nexus);
 704		return ret;
 705	}
 706
 707	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
 708		 tcm_loop_dump_proto_id(tl_hba), name);
 
 709	return 0;
 710}
 711
 712static int tcm_loop_drop_nexus(
 713	struct tcm_loop_tpg *tpg)
 714{
 715	struct se_session *se_sess;
 716	struct tcm_loop_nexus *tl_nexus;
 717
 718	tl_nexus = tpg->tl_nexus;
 719	if (!tl_nexus)
 720		return -ENODEV;
 721
 722	se_sess = tl_nexus->se_sess;
 723	if (!se_sess)
 724		return -ENODEV;
 725
 726	if (atomic_read(&tpg->tl_tpg_port_count)) {
 727		pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
 728		       atomic_read(&tpg->tl_tpg_port_count));
 
 729		return -EPERM;
 730	}
 731
 732	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
 733		 tcm_loop_dump_proto_id(tpg->tl_hba),
 734		 tl_nexus->se_sess->se_node_acl->initiatorname);
 735	/*
 736	 * Release the SCSI I_T Nexus to the emulated Target Port
 737	 */
 738	target_remove_session(se_sess);
 739	tpg->tl_nexus = NULL;
 740	kfree(tl_nexus);
 741	return 0;
 742}
 743
 744/* End items for tcm_loop_nexus_cit */
 745
 746static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 747{
 748	struct se_portal_group *se_tpg = to_tpg(item);
 749	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 750			struct tcm_loop_tpg, tl_se_tpg);
 751	struct tcm_loop_nexus *tl_nexus;
 752	ssize_t ret;
 753
 754	tl_nexus = tl_tpg->tl_nexus;
 755	if (!tl_nexus)
 756		return -ENODEV;
 757
 758	ret = snprintf(page, PAGE_SIZE, "%s\n",
 759		tl_nexus->se_sess->se_node_acl->initiatorname);
 760
 761	return ret;
 762}
 763
 764static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 765		const char *page, size_t count)
 766{
 767	struct se_portal_group *se_tpg = to_tpg(item);
 768	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 769			struct tcm_loop_tpg, tl_se_tpg);
 770	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 771	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 772	int ret;
 773	/*
 774	 * Shutdown the active I_T nexus if 'NULL' is passed..
 775	 */
 776	if (!strncmp(page, "NULL", 4)) {
 777		ret = tcm_loop_drop_nexus(tl_tpg);
 778		return (!ret) ? count : ret;
 779	}
 780	/*
 781	 * Otherwise make sure the passed virtual Initiator port WWN matches
 782	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 783	 * tcm_loop_make_nexus()
 784	 */
 785	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 786		pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
 787		       page, TL_WWN_ADDR_LEN);
 788		return -EINVAL;
 789	}
 790	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 791
 792	ptr = strstr(i_port, "naa.");
 793	if (ptr) {
 794		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 795			pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
 796			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 797			return -EINVAL;
 798		}
 799		port_ptr = &i_port[0];
 800		goto check_newline;
 801	}
 802	ptr = strstr(i_port, "fc.");
 803	if (ptr) {
 804		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 805			pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
 806			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 807			return -EINVAL;
 808		}
 809		port_ptr = &i_port[3]; /* Skip over "fc." */
 810		goto check_newline;
 811	}
 812	ptr = strstr(i_port, "iqn.");
 813	if (ptr) {
 814		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 815			pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
 816			       i_port, tcm_loop_dump_proto_id(tl_hba));
 
 817			return -EINVAL;
 818		}
 819		port_ptr = &i_port[0];
 820		goto check_newline;
 821	}
 822	pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
 823	       i_port);
 824	return -EINVAL;
 825	/*
 826	 * Clear any trailing newline for the NAA WWN
 827	 */
 828check_newline:
 829	if (i_port[strlen(i_port)-1] == '\n')
 830		i_port[strlen(i_port)-1] = '\0';
 831
 832	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 833	if (ret < 0)
 834		return ret;
 835
 836	return count;
 837}
 838
 839static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 840		char *page)
 841{
 842	struct se_portal_group *se_tpg = to_tpg(item);
 843	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 844			struct tcm_loop_tpg, tl_se_tpg);
 845	const char *status = NULL;
 846	ssize_t ret = -EINVAL;
 847
 848	switch (tl_tpg->tl_transport_status) {
 849	case TCM_TRANSPORT_ONLINE:
 850		status = "online";
 851		break;
 852	case TCM_TRANSPORT_OFFLINE:
 853		status = "offline";
 854		break;
 855	default:
 856		break;
 857	}
 858
 859	if (status)
 860		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
 861
 862	return ret;
 863}
 864
 865static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
 866		const char *page, size_t count)
 867{
 868	struct se_portal_group *se_tpg = to_tpg(item);
 869	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 870			struct tcm_loop_tpg, tl_se_tpg);
 871
 872	if (!strncmp(page, "online", 6)) {
 873		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 874		return count;
 875	}
 876	if (!strncmp(page, "offline", 7)) {
 877		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
 878		if (tl_tpg->tl_nexus) {
 879			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
 880
 881			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
 882		}
 883		return count;
 884	}
 885	return -EINVAL;
 886}
 887
 888static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
 889					 char *page)
 890{
 891	struct se_portal_group *se_tpg = to_tpg(item);
 892	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 893			struct tcm_loop_tpg, tl_se_tpg);
 894	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 895
 896	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
 897			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
 898}
 899
 900CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
 901CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
 902CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
 903
 904static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
 905	&tcm_loop_tpg_attr_nexus,
 906	&tcm_loop_tpg_attr_transport_status,
 907	&tcm_loop_tpg_attr_address,
 908	NULL,
 909};
 910
 911/* Start items for tcm_loop_naa_cit */
 912
 913static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
 914						     const char *name)
 
 
 915{
 916	struct tcm_loop_hba *tl_hba = container_of(wwn,
 917			struct tcm_loop_hba, tl_hba_wwn);
 918	struct tcm_loop_tpg *tl_tpg;
 919	int ret;
 920	unsigned long tpgt;
 921
 922	if (strstr(name, "tpgt_") != name) {
 923		pr_err("Unable to locate \"tpgt_#\" directory group\n");
 
 924		return ERR_PTR(-EINVAL);
 925	}
 926	if (kstrtoul(name+5, 10, &tpgt))
 927		return ERR_PTR(-EINVAL);
 928
 929	if (tpgt >= TL_TPGS_PER_HBA) {
 930		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
 931		       tpgt, TL_TPGS_PER_HBA);
 932		return ERR_PTR(-EINVAL);
 933	}
 934	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
 935	tl_tpg->tl_hba = tl_hba;
 936	tl_tpg->tl_tpgt = tpgt;
 937	/*
 938	 * Register the tl_tpg as a emulated TCM Target Endpoint
 939	 */
 940	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
 941	if (ret < 0)
 942		return ERR_PTR(-ENOMEM);
 943
 944	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
 945		 tcm_loop_dump_proto_id(tl_hba),
 946		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
 
 947	return &tl_tpg->tl_se_tpg;
 948}
 949
 950static void tcm_loop_drop_naa_tpg(
 951	struct se_portal_group *se_tpg)
 952{
 953	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
 954	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 955				struct tcm_loop_tpg, tl_se_tpg);
 956	struct tcm_loop_hba *tl_hba;
 957	unsigned short tpgt;
 958
 959	tl_hba = tl_tpg->tl_hba;
 960	tpgt = tl_tpg->tl_tpgt;
 961	/*
 962	 * Release the I_T Nexus for the Virtual target link if present
 963	 */
 964	tcm_loop_drop_nexus(tl_tpg);
 965	/*
 966	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
 967	 */
 968	core_tpg_deregister(se_tpg);
 969
 970	tl_tpg->tl_hba = NULL;
 971	tl_tpg->tl_tpgt = 0;
 972
 973	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
 974		 tcm_loop_dump_proto_id(tl_hba),
 975		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
 976}
 977
 978/* End items for tcm_loop_naa_cit */
 979
 980/* Start items for tcm_loop_cit */
 981
 982static struct se_wwn *tcm_loop_make_scsi_hba(
 983	struct target_fabric_configfs *tf,
 984	struct config_group *group,
 985	const char *name)
 986{
 987	struct tcm_loop_hba *tl_hba;
 988	struct Scsi_Host *sh;
 989	char *ptr;
 990	int ret, off = 0;
 991
 992	tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
 993	if (!tl_hba)
 
 994		return ERR_PTR(-ENOMEM);
 995
 996	/*
 997	 * Determine the emulated Protocol Identifier and Target Port Name
 998	 * based on the incoming configfs directory name.
 999	 */
1000	ptr = strstr(name, "naa.");
1001	if (ptr) {
1002		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1003		goto check_len;
1004	}
1005	ptr = strstr(name, "fc.");
1006	if (ptr) {
1007		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1008		off = 3; /* Skip over "fc." */
1009		goto check_len;
1010	}
1011	ptr = strstr(name, "iqn.");
1012	if (!ptr) {
1013		pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1014		       name);
1015		ret = -EINVAL;
1016		goto out;
1017	}
1018	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1019
1020check_len:
1021	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1022		pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1023		       name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
 
1024		ret = -EINVAL;
1025		goto out;
1026	}
1027	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1028
1029	/*
1030	 * Call device_register(tl_hba->dev) to register the emulated
1031	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1032	 * device_register() callbacks in tcm_loop_driver_probe()
1033	 */
1034	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1035	if (ret)
1036		return ERR_PTR(ret);
1037
1038	sh = tl_hba->sh;
1039	tcm_loop_hba_no_cnt++;
1040	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1041		 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
 
 
1042	return &tl_hba->tl_hba_wwn;
1043out:
1044	kfree(tl_hba);
1045	return ERR_PTR(ret);
1046}
1047
1048static void tcm_loop_drop_scsi_hba(
1049	struct se_wwn *wwn)
1050{
1051	struct tcm_loop_hba *tl_hba = container_of(wwn,
1052				struct tcm_loop_hba, tl_hba_wwn);
1053
1054	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1055		 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1056		 tl_hba->sh->host_no);
 
1057	/*
1058	 * Call device_unregister() on the original tl_hba->dev.
1059	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1060	 * release *tl_hba;
1061	 */
1062	device_unregister(&tl_hba->dev);
1063}
1064
1065/* Start items for tcm_loop_cit */
1066static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1067{
1068	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1069}
1070
1071CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1072
1073static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1074	&tcm_loop_wwn_attr_version,
1075	NULL,
1076};
1077
1078/* End items for tcm_loop_cit */
1079
1080static const struct target_core_fabric_ops loop_ops = {
1081	.module				= THIS_MODULE,
1082	.fabric_name			= "loopback",
 
1083	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1084	.tpg_get_tag			= tcm_loop_get_tag,
1085	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
 
 
 
 
 
1086	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
 
1087	.check_stop_free		= tcm_loop_check_stop_free,
1088	.release_cmd			= tcm_loop_release_cmd,
 
 
1089	.sess_get_index			= tcm_loop_sess_get_index,
1090	.write_pending			= tcm_loop_write_pending,
 
 
1091	.get_cmd_state			= tcm_loop_get_cmd_state,
1092	.queue_data_in			= tcm_loop_queue_data_in,
1093	.queue_status			= tcm_loop_queue_status,
1094	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1095	.aborted_task			= tcm_loop_aborted_task,
1096	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1097	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1098	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1099	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1100	.fabric_post_link		= tcm_loop_port_link,
1101	.fabric_pre_unlink		= tcm_loop_port_unlink,
1102	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1103	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1104	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1105	.default_submit_type		= TARGET_QUEUE_SUBMIT,
1106	.direct_submit_supp		= 0,
1107};
1108
1109static int __init tcm_loop_fabric_init(void)
1110{
1111	int ret = -ENOMEM;
1112
 
 
 
 
1113	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1114				sizeof(struct tcm_loop_cmd),
1115				__alignof__(struct tcm_loop_cmd),
1116				0, NULL);
1117	if (!tcm_loop_cmd_cache) {
1118		pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
1119		goto out;
 
1120	}
1121
1122	ret = tcm_loop_alloc_core_bus();
1123	if (ret)
1124		goto out_destroy_cache;
1125
1126	ret = target_register_template(&loop_ops);
1127	if (ret)
1128		goto out_release_core_bus;
1129
1130	return 0;
1131
1132out_release_core_bus:
1133	tcm_loop_release_core_bus();
1134out_destroy_cache:
1135	kmem_cache_destroy(tcm_loop_cmd_cache);
 
 
1136out:
1137	return ret;
1138}
1139
1140static void __exit tcm_loop_fabric_exit(void)
1141{
1142	target_unregister_template(&loop_ops);
1143	tcm_loop_release_core_bus();
1144	kmem_cache_destroy(tcm_loop_cmd_cache);
 
1145}
1146
1147MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1148MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1149MODULE_LICENSE("GPL");
1150module_init(tcm_loop_fabric_init);
1151module_exit(tcm_loop_fabric_exit);
v4.6
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
  42static struct workqueue_struct *tcm_loop_workqueue;
  43static struct kmem_cache *tcm_loop_cmd_cache;
  44
  45static int tcm_loop_hba_no_cnt;
  46
  47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  48
 
 
 
 
 
 
 
 
 
  49/*
  50 * Called from struct target_core_fabric_ops->check_stop_free()
  51 */
  52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  53{
  54	/*
  55	 * Do not release struct se_cmd's containing a valid TMR
  56	 * pointer.  These will be released directly in tcm_loop_device_reset()
  57	 * with transport_generic_free_cmd().
  58	 */
  59	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  60		return 0;
  61	/*
  62	 * Release the struct se_cmd, which will make a callback to release
  63	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  64	 */
  65	transport_generic_free_cmd(se_cmd, 0);
  66	return 1;
  67}
  68
  69static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  70{
  71	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  72				struct tcm_loop_cmd, tl_se_cmd);
 
  73
  74	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 
 
 
  75}
  76
  77static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  78{
  79	seq_printf(m, "tcm_loop_proc_info()\n");
  80	return 0;
  81}
  82
  83static int tcm_loop_driver_probe(struct device *);
  84static int tcm_loop_driver_remove(struct device *);
  85
  86static int pseudo_lld_bus_match(struct device *dev,
  87				struct device_driver *dev_driver)
  88{
  89	return 1;
  90}
  91
  92static struct bus_type tcm_loop_lld_bus = {
  93	.name			= "tcm_loop_bus",
  94	.match			= pseudo_lld_bus_match,
  95	.probe			= tcm_loop_driver_probe,
  96	.remove			= tcm_loop_driver_remove,
  97};
  98
  99static struct device_driver tcm_loop_driverfs = {
 100	.name			= "tcm_loop",
 101	.bus			= &tcm_loop_lld_bus,
 102};
 103/*
 104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
 105 */
 106static struct device *tcm_loop_primary;
 107
 108static void tcm_loop_submission_work(struct work_struct *work)
 109{
 110	struct tcm_loop_cmd *tl_cmd =
 111		container_of(work, struct tcm_loop_cmd, work);
 112	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 113	struct scsi_cmnd *sc = tl_cmd->sc;
 114	struct tcm_loop_nexus *tl_nexus;
 115	struct tcm_loop_hba *tl_hba;
 116	struct tcm_loop_tpg *tl_tpg;
 117	struct scatterlist *sgl_bidi = NULL;
 118	u32 sgl_bidi_count = 0, transfer_length;
 119	int rc;
 120
 121	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 122	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 123
 124	/*
 125	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 126	 * has already been configured via tcm_loop_make_naa_tpg().
 127	 */
 128	if (!tl_tpg->tl_hba) {
 129		set_host_byte(sc, DID_NO_CONNECT);
 130		goto out_done;
 131	}
 132	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 133		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 134		goto out_done;
 135	}
 136	tl_nexus = tl_tpg->tl_nexus;
 137	if (!tl_nexus) {
 138		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
 139				" does not exist\n");
 140		set_host_byte(sc, DID_ERROR);
 141		goto out_done;
 142	}
 143	if (scsi_bidi_cmnd(sc)) {
 144		struct scsi_data_buffer *sdb = scsi_in(sc);
 145
 146		sgl_bidi = sdb->table.sgl;
 147		sgl_bidi_count = sdb->table.nents;
 148		se_cmd->se_cmd_flags |= SCF_BIDI;
 149
 150	}
 151
 152	transfer_length = scsi_transfer_length(sc);
 153	if (!scsi_prot_sg_count(sc) &&
 154	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 155		se_cmd->prot_pto = true;
 156		/*
 157		 * loopback transport doesn't support
 158		 * WRITE_GENERATE, READ_STRIP protection
 159		 * information operations, go ahead unprotected.
 160		 */
 161		transfer_length = scsi_bufflen(sc);
 162	}
 163
 164	se_cmd->tag = tl_cmd->sc_cmd_tag;
 165	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 166			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 167			transfer_length, TCM_SIMPLE_TAG,
 168			sc->sc_data_direction, 0,
 169			scsi_sglist(sc), scsi_sg_count(sc),
 170			sgl_bidi, sgl_bidi_count,
 171			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 172	if (rc < 0) {
 173		set_host_byte(sc, DID_NO_CONNECT);
 174		goto out_done;
 175	}
 176	return;
 177
 178out_done:
 179	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 180	sc->scsi_done(sc);
 181	return;
 182}
 183
 184/*
 185 * ->queuecommand can be and usually is called from interrupt context, so
 186 * defer the actual submission to a workqueue.
 187 */
 188static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 189{
 190	struct tcm_loop_cmd *tl_cmd;
 191
 192	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
 193		" scsi_buf_len: %u\n", sc->device->host->host_no,
 194		sc->device->id, sc->device->channel, sc->device->lun,
 195		sc->cmnd[0], scsi_bufflen(sc));
 196
 197	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 198	if (!tl_cmd) {
 199		pr_err("Unable to allocate struct tcm_loop_cmd\n");
 200		set_host_byte(sc, DID_ERROR);
 201		sc->scsi_done(sc);
 202		return 0;
 203	}
 204
 
 205	tl_cmd->sc = sc;
 206	tl_cmd->sc_cmd_tag = sc->request->tag;
 207	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 208	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 209	return 0;
 210}
 211
 212/*
 213 * Called from SCSI EH process context to issue a LUN_RESET TMR
 214 * to struct scsi_device
 215 */
 216static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 217			      u64 lun, int task, enum tcm_tmreq_table tmr)
 218{
 219	struct se_cmd *se_cmd = NULL;
 220	struct se_session *se_sess;
 221	struct se_portal_group *se_tpg;
 222	struct tcm_loop_nexus *tl_nexus;
 223	struct tcm_loop_cmd *tl_cmd = NULL;
 224	struct tcm_loop_tmr *tl_tmr = NULL;
 225	int ret = TMR_FUNCTION_FAILED, rc;
 226
 227	/*
 228	 * Locate the tl_nexus and se_sess pointers
 229	 */
 230	tl_nexus = tl_tpg->tl_nexus;
 231	if (!tl_nexus) {
 232		pr_err("Unable to perform device reset without"
 233				" active I_T Nexus\n");
 234		return ret;
 235	}
 236
 237	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 238	if (!tl_cmd) {
 239		pr_err("Unable to allocate memory for tl_cmd\n");
 240		return ret;
 241	}
 242
 243	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
 244	if (!tl_tmr) {
 245		pr_err("Unable to allocate memory for tl_tmr\n");
 246		goto release;
 247	}
 248	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
 249
 250	se_cmd = &tl_cmd->tl_se_cmd;
 251	se_tpg = &tl_tpg->tl_se_tpg;
 252	se_sess = tl_tpg->tl_nexus->se_sess;
 253	/*
 254	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
 255	 */
 256	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
 257				DMA_NONE, TCM_SIMPLE_TAG,
 258				&tl_cmd->tl_sense_buf[0]);
 259
 260	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
 
 
 261	if (rc < 0)
 262		goto release;
 
 
 
 263
 264	if (tmr == TMR_ABORT_TASK)
 265		se_cmd->se_tmr_req->ref_task_tag = task;
 266
 267	/*
 268	 * Locate the underlying TCM struct se_lun
 269	 */
 270	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
 271		ret = TMR_LUN_DOES_NOT_EXIST;
 272		goto release;
 273	}
 274	/*
 275	 * Queue the TMR to TCM Core and sleep waiting for
 276	 * tcm_loop_queue_tm_rsp() to wake us up.
 277	 */
 278	transport_generic_handle_tmr(se_cmd);
 279	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
 280	/*
 281	 * The TMR LUN_RESET has completed, check the response status and
 282	 * then release allocations.
 283	 */
 284	ret = se_cmd->se_tmr_req->response;
 285release:
 286	if (se_cmd)
 287		transport_generic_free_cmd(se_cmd, 1);
 288	else
 289		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 290	kfree(tl_tmr);
 291	return ret;
 292}
 293
 294static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 295{
 296	struct tcm_loop_hba *tl_hba;
 297	struct tcm_loop_tpg *tl_tpg;
 298	int ret = FAILED;
 299
 300	/*
 301	 * Locate the tcm_loop_hba_t pointer
 302	 */
 303	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 304	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 305	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 306				 sc->request->tag, TMR_ABORT_TASK);
 307	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 308}
 309
 310/*
 311 * Called from SCSI EH process context to issue a LUN_RESET TMR
 312 * to struct scsi_device
 313 */
 314static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 315{
 316	struct tcm_loop_hba *tl_hba;
 317	struct tcm_loop_tpg *tl_tpg;
 318	int ret = FAILED;
 319
 320	/*
 321	 * Locate the tcm_loop_hba_t pointer
 322	 */
 323	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 324	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 325
 326	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 327				 0, TMR_LUN_RESET);
 328	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 329}
 330
 331static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 332{
 333	struct tcm_loop_hba *tl_hba;
 334	struct tcm_loop_tpg *tl_tpg;
 335
 336	/*
 337	 * Locate the tcm_loop_hba_t pointer
 338	 */
 339	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 340	if (!tl_hba) {
 341		pr_err("Unable to perform device reset without"
 342				" active I_T Nexus\n");
 343		return FAILED;
 344	}
 345	/*
 346	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 347	 */
 348	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 349	if (tl_tpg) {
 350		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 351		return SUCCESS;
 352	}
 353	return FAILED;
 354}
 355
 356static int tcm_loop_slave_alloc(struct scsi_device *sd)
 357{
 358	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
 359	return 0;
 360}
 361
 362static struct scsi_host_template tcm_loop_driver_template = {
 363	.show_info		= tcm_loop_show_info,
 364	.proc_name		= "tcm_loopback",
 365	.name			= "TCM_Loopback",
 366	.queuecommand		= tcm_loop_queuecommand,
 367	.change_queue_depth	= scsi_change_queue_depth,
 368	.eh_abort_handler = tcm_loop_abort_task,
 369	.eh_device_reset_handler = tcm_loop_device_reset,
 370	.eh_target_reset_handler = tcm_loop_target_reset,
 371	.can_queue		= 1024,
 372	.this_id		= -1,
 373	.sg_tablesize		= 256,
 374	.cmd_per_lun		= 1024,
 375	.max_sectors		= 0xFFFF,
 376	.use_clustering		= DISABLE_CLUSTERING,
 377	.slave_alloc		= tcm_loop_slave_alloc,
 378	.module			= THIS_MODULE,
 379	.track_queue_depth	= 1,
 
 380};
 381
 382static int tcm_loop_driver_probe(struct device *dev)
 383{
 384	struct tcm_loop_hba *tl_hba;
 385	struct Scsi_Host *sh;
 386	int error, host_prot;
 387
 388	tl_hba = to_tcm_loop_hba(dev);
 389
 390	sh = scsi_host_alloc(&tcm_loop_driver_template,
 391			sizeof(struct tcm_loop_hba));
 392	if (!sh) {
 393		pr_err("Unable to allocate struct scsi_host\n");
 394		return -ENODEV;
 395	}
 396	tl_hba->sh = sh;
 397
 398	/*
 399	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 400	 */
 401	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 402	/*
 403	 * Setup single ID, Channel and LUN for now..
 404	 */
 405	sh->max_id = 2;
 406	sh->max_lun = 0;
 407	sh->max_channel = 0;
 408	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 
 
 
 409
 410	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 411		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 412		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 413
 414	scsi_host_set_prot(sh, host_prot);
 415	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 416
 417	error = scsi_add_host(sh, &tl_hba->dev);
 418	if (error) {
 419		pr_err("%s: scsi_add_host failed\n", __func__);
 420		scsi_host_put(sh);
 421		return -ENODEV;
 422	}
 423	return 0;
 424}
 425
 426static int tcm_loop_driver_remove(struct device *dev)
 427{
 428	struct tcm_loop_hba *tl_hba;
 429	struct Scsi_Host *sh;
 430
 431	tl_hba = to_tcm_loop_hba(dev);
 432	sh = tl_hba->sh;
 433
 434	scsi_remove_host(sh);
 435	scsi_host_put(sh);
 436	return 0;
 437}
 438
 439static void tcm_loop_release_adapter(struct device *dev)
 440{
 441	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 442
 443	kfree(tl_hba);
 444}
 445
 446/*
 447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 448 */
 449static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 450{
 451	int ret;
 452
 453	tl_hba->dev.bus = &tcm_loop_lld_bus;
 454	tl_hba->dev.parent = tcm_loop_primary;
 455	tl_hba->dev.release = &tcm_loop_release_adapter;
 456	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 457
 458	ret = device_register(&tl_hba->dev);
 459	if (ret) {
 460		pr_err("device_register() failed for"
 461				" tl_hba->dev: %d\n", ret);
 462		return -ENODEV;
 463	}
 464
 465	return 0;
 466}
 467
 468/*
 469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 470 * tcm_loop SCSI bus.
 471 */
 472static int tcm_loop_alloc_core_bus(void)
 473{
 474	int ret;
 475
 476	tcm_loop_primary = root_device_register("tcm_loop_0");
 477	if (IS_ERR(tcm_loop_primary)) {
 478		pr_err("Unable to allocate tcm_loop_primary\n");
 479		return PTR_ERR(tcm_loop_primary);
 480	}
 481
 482	ret = bus_register(&tcm_loop_lld_bus);
 483	if (ret) {
 484		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 485		goto dev_unreg;
 486	}
 487
 488	ret = driver_register(&tcm_loop_driverfs);
 489	if (ret) {
 490		pr_err("driver_register() failed for"
 491				"tcm_loop_driverfs\n");
 492		goto bus_unreg;
 493	}
 494
 495	pr_debug("Initialized TCM Loop Core Bus\n");
 496	return ret;
 497
 498bus_unreg:
 499	bus_unregister(&tcm_loop_lld_bus);
 500dev_unreg:
 501	root_device_unregister(tcm_loop_primary);
 502	return ret;
 503}
 504
 505static void tcm_loop_release_core_bus(void)
 506{
 507	driver_unregister(&tcm_loop_driverfs);
 508	bus_unregister(&tcm_loop_lld_bus);
 509	root_device_unregister(tcm_loop_primary);
 510
 511	pr_debug("Releasing TCM Loop Core BUS\n");
 512}
 513
 514static char *tcm_loop_get_fabric_name(void)
 515{
 516	return "loopback";
 517}
 518
 519static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 520{
 521	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 522}
 523
 524static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 525{
 526	/*
 527	 * Return the passed NAA identifier for the Target Port
 528	 */
 529	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 530}
 531
 532static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 533{
 534	/*
 535	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 536	 * to represent the SCSI Target Port.
 537	 */
 538	return tl_tpg(se_tpg)->tl_tpgt;
 539}
 540
 541/*
 542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 543 * based upon the incoming fabric dependent SCSI Initiator Port
 544 */
 545static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 546{
 547	return 1;
 548}
 549
 550static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 551{
 552	return 0;
 553}
 554
 555/*
 556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 558 */
 559static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 560{
 561	return 0;
 562}
 563
 564/*
 565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 566 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 567 * It has been added here as a nop for target_fabric_tf_ops_check()
 568 */
 569static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 570{
 571	return 0;
 572}
 573
 574static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 575{
 576	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 577						   tl_se_tpg);
 578	return tl_tpg->tl_fabric_prot_type;
 579}
 580
 581static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 582{
 583	return 1;
 584}
 585
 586static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 587{
 588	return 1;
 589}
 590
 591static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 592{
 593	return;
 594}
 595
 596static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 597{
 598	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 599			struct tcm_loop_cmd, tl_se_cmd);
 600
 601	return tl_cmd->sc_cmd_state;
 602}
 603
 604static int tcm_loop_shutdown_session(struct se_session *se_sess)
 605{
 606	return 0;
 607}
 608
 609static void tcm_loop_close_session(struct se_session *se_sess)
 610{
 611	return;
 612};
 613
 614static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 615{
 616	/*
 617	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 618	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 619	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 620	 * format with transport_generic_map_mem_to_cmd().
 621	 *
 622	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 623	 * object execution queue.
 624	 */
 625	target_execute_cmd(se_cmd);
 626	return 0;
 627}
 628
 629static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
 630{
 631	return 0;
 632}
 633
 634static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 635{
 636	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 637				struct tcm_loop_cmd, tl_se_cmd);
 638	struct scsi_cmnd *sc = tl_cmd->sc;
 639
 640	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
 641		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
 642
 643	sc->result = SAM_STAT_GOOD;
 644	set_host_byte(sc, DID_OK);
 645	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 646	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 647		scsi_set_resid(sc, se_cmd->residual_count);
 648	sc->scsi_done(sc);
 649	return 0;
 650}
 651
 652static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 653{
 654	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 655				struct tcm_loop_cmd, tl_se_cmd);
 656	struct scsi_cmnd *sc = tl_cmd->sc;
 657
 658	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
 659			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
 660
 661	if (se_cmd->sense_buffer &&
 662	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 663	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 664
 665		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 666				SCSI_SENSE_BUFFERSIZE);
 667		sc->result = SAM_STAT_CHECK_CONDITION;
 668		set_driver_byte(sc, DRIVER_SENSE);
 669	} else
 670		sc->result = se_cmd->scsi_status;
 671
 672	set_host_byte(sc, DID_OK);
 673	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 674	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 675		scsi_set_resid(sc, se_cmd->residual_count);
 676	sc->scsi_done(sc);
 677	return 0;
 678}
 679
 
 
 
 
 
 
 
 
 
 
 
 680static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 681{
 682	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 683	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
 684	/*
 685	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
 686	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
 687	 */
 688	atomic_set(&tl_tmr->tmr_complete, 1);
 689	wake_up(&tl_tmr->tl_tmr_wait);
 690}
 691
 692static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 693{
 694	return;
 695}
 696
 697static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 698{
 699	switch (tl_hba->tl_proto_id) {
 700	case SCSI_PROTOCOL_SAS:
 701		return "SAS";
 702	case SCSI_PROTOCOL_FCP:
 703		return "FCP";
 704	case SCSI_PROTOCOL_ISCSI:
 705		return "iSCSI";
 706	default:
 707		break;
 708	}
 709
 710	return "Unknown";
 711}
 712
 713/* Start items for tcm_loop_port_cit */
 714
 715static int tcm_loop_port_link(
 716	struct se_portal_group *se_tpg,
 717	struct se_lun *lun)
 718{
 719	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 720				struct tcm_loop_tpg, tl_se_tpg);
 721	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 722
 723	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 724	/*
 725	 * Add Linux/SCSI struct scsi_device by HCTL
 726	 */
 727	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 728
 729	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 730	return 0;
 731}
 732
 733static void tcm_loop_port_unlink(
 734	struct se_portal_group *se_tpg,
 735	struct se_lun *se_lun)
 736{
 737	struct scsi_device *sd;
 738	struct tcm_loop_hba *tl_hba;
 739	struct tcm_loop_tpg *tl_tpg;
 740
 741	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 742	tl_hba = tl_tpg->tl_hba;
 743
 744	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 745				se_lun->unpacked_lun);
 746	if (!sd) {
 747		pr_err("Unable to locate struct scsi_device for %d:%d:"
 748			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 749		return;
 750	}
 751	/*
 752	 * Remove Linux/SCSI struct scsi_device by HCTL
 753	 */
 754	scsi_remove_device(sd);
 755	scsi_device_put(sd);
 756
 757	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 758
 759	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 760}
 761
 762/* End items for tcm_loop_port_cit */
 763
 764static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 765		struct config_item *item, char *page)
 766{
 767	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 768	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 769						   tl_se_tpg);
 770
 771	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 772}
 773
 774static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 775		struct config_item *item, const char *page, size_t count)
 776{
 777	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 778	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 779						   tl_se_tpg);
 780	unsigned long val;
 781	int ret = kstrtoul(page, 0, &val);
 782
 783	if (ret) {
 784		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 785		return ret;
 786	}
 787	if (val != 0 && val != 1 && val != 3) {
 788		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 789		return -EINVAL;
 790	}
 791	tl_tpg->tl_fabric_prot_type = val;
 792
 793	return count;
 794}
 795
 796CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 797
 798static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 799	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 800	NULL,
 801};
 802
 803/* Start items for tcm_loop_nexus_cit */
 804
 805static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 806				  struct se_session *se_sess, void *p)
 807{
 808	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 809					struct tcm_loop_tpg, tl_se_tpg);
 810
 811	tl_tpg->tl_nexus = p;
 812	return 0;
 813}
 814
 815static int tcm_loop_make_nexus(
 816	struct tcm_loop_tpg *tl_tpg,
 817	const char *name)
 818{
 819	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 820	struct tcm_loop_nexus *tl_nexus;
 821	int ret;
 822
 823	if (tl_tpg->tl_nexus) {
 824		pr_debug("tl_tpg->tl_nexus already exists\n");
 825		return -EEXIST;
 826	}
 827
 828	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 829	if (!tl_nexus) {
 830		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 831		return -ENOMEM;
 832	}
 833
 834	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
 835					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 836					name, tl_nexus, tcm_loop_alloc_sess_cb);
 837	if (IS_ERR(tl_nexus->se_sess)) {
 838		ret = PTR_ERR(tl_nexus->se_sess);
 839		kfree(tl_nexus);
 840		return ret;
 841	}
 842
 843	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 844		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 845		name);
 846	return 0;
 847}
 848
 849static int tcm_loop_drop_nexus(
 850	struct tcm_loop_tpg *tpg)
 851{
 852	struct se_session *se_sess;
 853	struct tcm_loop_nexus *tl_nexus;
 854
 855	tl_nexus = tpg->tl_nexus;
 856	if (!tl_nexus)
 857		return -ENODEV;
 858
 859	se_sess = tl_nexus->se_sess;
 860	if (!se_sess)
 861		return -ENODEV;
 862
 863	if (atomic_read(&tpg->tl_tpg_port_count)) {
 864		pr_err("Unable to remove TCM_Loop I_T Nexus with"
 865			" active TPG port count: %d\n",
 866			atomic_read(&tpg->tl_tpg_port_count));
 867		return -EPERM;
 868	}
 869
 870	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
 871		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
 872		tl_nexus->se_sess->se_node_acl->initiatorname);
 873	/*
 874	 * Release the SCSI I_T Nexus to the emulated Target Port
 875	 */
 876	transport_deregister_session(tl_nexus->se_sess);
 877	tpg->tl_nexus = NULL;
 878	kfree(tl_nexus);
 879	return 0;
 880}
 881
 882/* End items for tcm_loop_nexus_cit */
 883
 884static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 885{
 886	struct se_portal_group *se_tpg = to_tpg(item);
 887	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 888			struct tcm_loop_tpg, tl_se_tpg);
 889	struct tcm_loop_nexus *tl_nexus;
 890	ssize_t ret;
 891
 892	tl_nexus = tl_tpg->tl_nexus;
 893	if (!tl_nexus)
 894		return -ENODEV;
 895
 896	ret = snprintf(page, PAGE_SIZE, "%s\n",
 897		tl_nexus->se_sess->se_node_acl->initiatorname);
 898
 899	return ret;
 900}
 901
 902static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 903		const char *page, size_t count)
 904{
 905	struct se_portal_group *se_tpg = to_tpg(item);
 906	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 907			struct tcm_loop_tpg, tl_se_tpg);
 908	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 909	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 910	int ret;
 911	/*
 912	 * Shutdown the active I_T nexus if 'NULL' is passed..
 913	 */
 914	if (!strncmp(page, "NULL", 4)) {
 915		ret = tcm_loop_drop_nexus(tl_tpg);
 916		return (!ret) ? count : ret;
 917	}
 918	/*
 919	 * Otherwise make sure the passed virtual Initiator port WWN matches
 920	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 921	 * tcm_loop_make_nexus()
 922	 */
 923	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 924		pr_err("Emulated NAA Sas Address: %s, exceeds"
 925				" max: %d\n", page, TL_WWN_ADDR_LEN);
 926		return -EINVAL;
 927	}
 928	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 929
 930	ptr = strstr(i_port, "naa.");
 931	if (ptr) {
 932		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 933			pr_err("Passed SAS Initiator Port %s does not"
 934				" match target port protoid: %s\n", i_port,
 935				tcm_loop_dump_proto_id(tl_hba));
 936			return -EINVAL;
 937		}
 938		port_ptr = &i_port[0];
 939		goto check_newline;
 940	}
 941	ptr = strstr(i_port, "fc.");
 942	if (ptr) {
 943		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 944			pr_err("Passed FCP Initiator Port %s does not"
 945				" match target port protoid: %s\n", i_port,
 946				tcm_loop_dump_proto_id(tl_hba));
 947			return -EINVAL;
 948		}
 949		port_ptr = &i_port[3]; /* Skip over "fc." */
 950		goto check_newline;
 951	}
 952	ptr = strstr(i_port, "iqn.");
 953	if (ptr) {
 954		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 955			pr_err("Passed iSCSI Initiator Port %s does not"
 956				" match target port protoid: %s\n", i_port,
 957				tcm_loop_dump_proto_id(tl_hba));
 958			return -EINVAL;
 959		}
 960		port_ptr = &i_port[0];
 961		goto check_newline;
 962	}
 963	pr_err("Unable to locate prefix for emulated Initiator Port:"
 964			" %s\n", i_port);
 965	return -EINVAL;
 966	/*
 967	 * Clear any trailing newline for the NAA WWN
 968	 */
 969check_newline:
 970	if (i_port[strlen(i_port)-1] == '\n')
 971		i_port[strlen(i_port)-1] = '\0';
 972
 973	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 974	if (ret < 0)
 975		return ret;
 976
 977	return count;
 978}
 979
 980static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 981		char *page)
 982{
 983	struct se_portal_group *se_tpg = to_tpg(item);
 984	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 985			struct tcm_loop_tpg, tl_se_tpg);
 986	const char *status = NULL;
 987	ssize_t ret = -EINVAL;
 988
 989	switch (tl_tpg->tl_transport_status) {
 990	case TCM_TRANSPORT_ONLINE:
 991		status = "online";
 992		break;
 993	case TCM_TRANSPORT_OFFLINE:
 994		status = "offline";
 995		break;
 996	default:
 997		break;
 998	}
 999
1000	if (status)
1001		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1002
1003	return ret;
1004}
1005
1006static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1007		const char *page, size_t count)
1008{
1009	struct se_portal_group *se_tpg = to_tpg(item);
1010	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1011			struct tcm_loop_tpg, tl_se_tpg);
1012
1013	if (!strncmp(page, "online", 6)) {
1014		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1015		return count;
1016	}
1017	if (!strncmp(page, "offline", 7)) {
1018		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1019		if (tl_tpg->tl_nexus) {
1020			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1021
1022			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1023		}
1024		return count;
1025	}
1026	return -EINVAL;
1027}
1028
1029static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1030					 char *page)
1031{
1032	struct se_portal_group *se_tpg = to_tpg(item);
1033	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1034			struct tcm_loop_tpg, tl_se_tpg);
1035	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1036
1037	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1038			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1039}
1040
1041CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1042CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1043CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1044
1045static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1046	&tcm_loop_tpg_attr_nexus,
1047	&tcm_loop_tpg_attr_transport_status,
1048	&tcm_loop_tpg_attr_address,
1049	NULL,
1050};
1051
1052/* Start items for tcm_loop_naa_cit */
1053
1054static struct se_portal_group *tcm_loop_make_naa_tpg(
1055	struct se_wwn *wwn,
1056	struct config_group *group,
1057	const char *name)
1058{
1059	struct tcm_loop_hba *tl_hba = container_of(wwn,
1060			struct tcm_loop_hba, tl_hba_wwn);
1061	struct tcm_loop_tpg *tl_tpg;
1062	int ret;
1063	unsigned long tpgt;
1064
1065	if (strstr(name, "tpgt_") != name) {
1066		pr_err("Unable to locate \"tpgt_#\" directory"
1067				" group\n");
1068		return ERR_PTR(-EINVAL);
1069	}
1070	if (kstrtoul(name+5, 10, &tpgt))
1071		return ERR_PTR(-EINVAL);
1072
1073	if (tpgt >= TL_TPGS_PER_HBA) {
1074		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1075				" %u\n", tpgt, TL_TPGS_PER_HBA);
1076		return ERR_PTR(-EINVAL);
1077	}
1078	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1079	tl_tpg->tl_hba = tl_hba;
1080	tl_tpg->tl_tpgt = tpgt;
1081	/*
1082	 * Register the tl_tpg as a emulated TCM Target Endpoint
1083	 */
1084	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1085	if (ret < 0)
1086		return ERR_PTR(-ENOMEM);
1087
1088	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1089		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1090		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1091
1092	return &tl_tpg->tl_se_tpg;
1093}
1094
1095static void tcm_loop_drop_naa_tpg(
1096	struct se_portal_group *se_tpg)
1097{
1098	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1099	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1100				struct tcm_loop_tpg, tl_se_tpg);
1101	struct tcm_loop_hba *tl_hba;
1102	unsigned short tpgt;
1103
1104	tl_hba = tl_tpg->tl_hba;
1105	tpgt = tl_tpg->tl_tpgt;
1106	/*
1107	 * Release the I_T Nexus for the Virtual target link if present
1108	 */
1109	tcm_loop_drop_nexus(tl_tpg);
1110	/*
1111	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1112	 */
1113	core_tpg_deregister(se_tpg);
1114
1115	tl_tpg->tl_hba = NULL;
1116	tl_tpg->tl_tpgt = 0;
1117
1118	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1119		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1120		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1121}
1122
1123/* End items for tcm_loop_naa_cit */
1124
1125/* Start items for tcm_loop_cit */
1126
1127static struct se_wwn *tcm_loop_make_scsi_hba(
1128	struct target_fabric_configfs *tf,
1129	struct config_group *group,
1130	const char *name)
1131{
1132	struct tcm_loop_hba *tl_hba;
1133	struct Scsi_Host *sh;
1134	char *ptr;
1135	int ret, off = 0;
1136
1137	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1138	if (!tl_hba) {
1139		pr_err("Unable to allocate struct tcm_loop_hba\n");
1140		return ERR_PTR(-ENOMEM);
1141	}
1142	/*
1143	 * Determine the emulated Protocol Identifier and Target Port Name
1144	 * based on the incoming configfs directory name.
1145	 */
1146	ptr = strstr(name, "naa.");
1147	if (ptr) {
1148		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1149		goto check_len;
1150	}
1151	ptr = strstr(name, "fc.");
1152	if (ptr) {
1153		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1154		off = 3; /* Skip over "fc." */
1155		goto check_len;
1156	}
1157	ptr = strstr(name, "iqn.");
1158	if (!ptr) {
1159		pr_err("Unable to locate prefix for emulated Target "
1160				"Port: %s\n", name);
1161		ret = -EINVAL;
1162		goto out;
1163	}
1164	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1165
1166check_len:
1167	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1168		pr_err("Emulated NAA %s Address: %s, exceeds"
1169			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1170			TL_WWN_ADDR_LEN);
1171		ret = -EINVAL;
1172		goto out;
1173	}
1174	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1175
1176	/*
1177	 * Call device_register(tl_hba->dev) to register the emulated
1178	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1179	 * device_register() callbacks in tcm_loop_driver_probe()
1180	 */
1181	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1182	if (ret)
1183		goto out;
1184
1185	sh = tl_hba->sh;
1186	tcm_loop_hba_no_cnt++;
1187	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1188		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1189		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1190
1191	return &tl_hba->tl_hba_wwn;
1192out:
1193	kfree(tl_hba);
1194	return ERR_PTR(ret);
1195}
1196
1197static void tcm_loop_drop_scsi_hba(
1198	struct se_wwn *wwn)
1199{
1200	struct tcm_loop_hba *tl_hba = container_of(wwn,
1201				struct tcm_loop_hba, tl_hba_wwn);
1202
1203	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1204		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1205		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1206		tl_hba->sh->host_no);
1207	/*
1208	 * Call device_unregister() on the original tl_hba->dev.
1209	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1210	 * release *tl_hba;
1211	 */
1212	device_unregister(&tl_hba->dev);
1213}
1214
1215/* Start items for tcm_loop_cit */
1216static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1217{
1218	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1219}
1220
1221CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1222
1223static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1224	&tcm_loop_wwn_attr_version,
1225	NULL,
1226};
1227
1228/* End items for tcm_loop_cit */
1229
1230static const struct target_core_fabric_ops loop_ops = {
1231	.module				= THIS_MODULE,
1232	.name				= "loopback",
1233	.get_fabric_name		= tcm_loop_get_fabric_name,
1234	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1235	.tpg_get_tag			= tcm_loop_get_tag,
1236	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1237	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1238	.tpg_check_demo_mode_write_protect =
1239				tcm_loop_check_demo_mode_write_protect,
1240	.tpg_check_prod_mode_write_protect =
1241				tcm_loop_check_prod_mode_write_protect,
1242	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1243	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1244	.check_stop_free		= tcm_loop_check_stop_free,
1245	.release_cmd			= tcm_loop_release_cmd,
1246	.shutdown_session		= tcm_loop_shutdown_session,
1247	.close_session			= tcm_loop_close_session,
1248	.sess_get_index			= tcm_loop_sess_get_index,
1249	.write_pending			= tcm_loop_write_pending,
1250	.write_pending_status		= tcm_loop_write_pending_status,
1251	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1252	.get_cmd_state			= tcm_loop_get_cmd_state,
1253	.queue_data_in			= tcm_loop_queue_data_in,
1254	.queue_status			= tcm_loop_queue_status,
1255	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1256	.aborted_task			= tcm_loop_aborted_task,
1257	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1258	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1259	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1260	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1261	.fabric_post_link		= tcm_loop_port_link,
1262	.fabric_pre_unlink		= tcm_loop_port_unlink,
1263	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1264	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1265	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
 
 
1266};
1267
1268static int __init tcm_loop_fabric_init(void)
1269{
1270	int ret = -ENOMEM;
1271
1272	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1273	if (!tcm_loop_workqueue)
1274		goto out;
1275
1276	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1277				sizeof(struct tcm_loop_cmd),
1278				__alignof__(struct tcm_loop_cmd),
1279				0, NULL);
1280	if (!tcm_loop_cmd_cache) {
1281		pr_debug("kmem_cache_create() for"
1282			" tcm_loop_cmd_cache failed\n");
1283		goto out_destroy_workqueue;
1284	}
1285
1286	ret = tcm_loop_alloc_core_bus();
1287	if (ret)
1288		goto out_destroy_cache;
1289
1290	ret = target_register_template(&loop_ops);
1291	if (ret)
1292		goto out_release_core_bus;
1293
1294	return 0;
1295
1296out_release_core_bus:
1297	tcm_loop_release_core_bus();
1298out_destroy_cache:
1299	kmem_cache_destroy(tcm_loop_cmd_cache);
1300out_destroy_workqueue:
1301	destroy_workqueue(tcm_loop_workqueue);
1302out:
1303	return ret;
1304}
1305
1306static void __exit tcm_loop_fabric_exit(void)
1307{
1308	target_unregister_template(&loop_ops);
1309	tcm_loop_release_core_bus();
1310	kmem_cache_destroy(tcm_loop_cmd_cache);
1311	destroy_workqueue(tcm_loop_workqueue);
1312}
1313
1314MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1315MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1316MODULE_LICENSE("GPL");
1317module_init(tcm_loop_fabric_init);
1318module_exit(tcm_loop_fabric_exit);