Linux Audio

Check our new training course

Loading...
v4.6
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011-2013 Datera, Inc.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
 
 
  37
  38#include "tcm_loop.h"
  39
  40#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  41
 
 
 
  42static struct workqueue_struct *tcm_loop_workqueue;
  43static struct kmem_cache *tcm_loop_cmd_cache;
  44
  45static int tcm_loop_hba_no_cnt;
  46
  47static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  48
  49/*
  50 * Called from struct target_core_fabric_ops->check_stop_free()
  51 */
  52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  53{
  54	/*
  55	 * Do not release struct se_cmd's containing a valid TMR
  56	 * pointer.  These will be released directly in tcm_loop_device_reset()
  57	 * with transport_generic_free_cmd().
  58	 */
  59	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  60		return 0;
  61	/*
  62	 * Release the struct se_cmd, which will make a callback to release
  63	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  64	 */
  65	transport_generic_free_cmd(se_cmd, 0);
  66	return 1;
  67}
  68
  69static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  70{
  71	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  72				struct tcm_loop_cmd, tl_se_cmd);
  73
  74	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  75}
  76
  77static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
 
 
  78{
  79	seq_printf(m, "tcm_loop_proc_info()\n");
  80	return 0;
  81}
  82
  83static int tcm_loop_driver_probe(struct device *);
  84static int tcm_loop_driver_remove(struct device *);
  85
  86static int pseudo_lld_bus_match(struct device *dev,
  87				struct device_driver *dev_driver)
  88{
  89	return 1;
  90}
  91
  92static struct bus_type tcm_loop_lld_bus = {
  93	.name			= "tcm_loop_bus",
  94	.match			= pseudo_lld_bus_match,
  95	.probe			= tcm_loop_driver_probe,
  96	.remove			= tcm_loop_driver_remove,
  97};
  98
  99static struct device_driver tcm_loop_driverfs = {
 100	.name			= "tcm_loop",
 101	.bus			= &tcm_loop_lld_bus,
 102};
 103/*
 104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
 105 */
 106static struct device *tcm_loop_primary;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107
 108static void tcm_loop_submission_work(struct work_struct *work)
 109{
 110	struct tcm_loop_cmd *tl_cmd =
 111		container_of(work, struct tcm_loop_cmd, work);
 112	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 113	struct scsi_cmnd *sc = tl_cmd->sc;
 114	struct tcm_loop_nexus *tl_nexus;
 115	struct tcm_loop_hba *tl_hba;
 116	struct tcm_loop_tpg *tl_tpg;
 117	struct scatterlist *sgl_bidi = NULL;
 118	u32 sgl_bidi_count = 0, transfer_length;
 119	int rc;
 120
 121	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 122	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 123
 124	/*
 125	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 126	 * has already been configured via tcm_loop_make_naa_tpg().
 127	 */
 128	if (!tl_tpg->tl_hba) {
 129		set_host_byte(sc, DID_NO_CONNECT);
 130		goto out_done;
 131	}
 132	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
 133		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
 134		goto out_done;
 135	}
 136	tl_nexus = tl_tpg->tl_nexus;
 137	if (!tl_nexus) {
 138		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
 139				" does not exist\n");
 140		set_host_byte(sc, DID_ERROR);
 141		goto out_done;
 142	}
 
 
 
 
 
 
 143	if (scsi_bidi_cmnd(sc)) {
 144		struct scsi_data_buffer *sdb = scsi_in(sc);
 145
 146		sgl_bidi = sdb->table.sgl;
 147		sgl_bidi_count = sdb->table.nents;
 148		se_cmd->se_cmd_flags |= SCF_BIDI;
 149
 150	}
 151
 152	transfer_length = scsi_transfer_length(sc);
 153	if (!scsi_prot_sg_count(sc) &&
 154	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
 155		se_cmd->prot_pto = true;
 156		/*
 157		 * loopback transport doesn't support
 158		 * WRITE_GENERATE, READ_STRIP protection
 159		 * information operations, go ahead unprotected.
 160		 */
 161		transfer_length = scsi_bufflen(sc);
 162	}
 163
 164	se_cmd->tag = tl_cmd->sc_cmd_tag;
 165	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
 166			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
 167			transfer_length, TCM_SIMPLE_TAG,
 168			sc->sc_data_direction, 0,
 169			scsi_sglist(sc), scsi_sg_count(sc),
 170			sgl_bidi, sgl_bidi_count,
 171			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 172	if (rc < 0) {
 173		set_host_byte(sc, DID_NO_CONNECT);
 174		goto out_done;
 175	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 176	return;
 177
 178out_done:
 179	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 180	sc->scsi_done(sc);
 181	return;
 182}
 183
 184/*
 185 * ->queuecommand can be and usually is called from interrupt context, so
 186 * defer the actual submission to a workqueue.
 187 */
 188static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 189{
 190	struct tcm_loop_cmd *tl_cmd;
 191
 192	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
 193		" scsi_buf_len: %u\n", sc->device->host->host_no,
 194		sc->device->id, sc->device->channel, sc->device->lun,
 195		sc->cmnd[0], scsi_bufflen(sc));
 196
 197	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 198	if (!tl_cmd) {
 199		pr_err("Unable to allocate struct tcm_loop_cmd\n");
 200		set_host_byte(sc, DID_ERROR);
 201		sc->scsi_done(sc);
 202		return 0;
 203	}
 204
 205	tl_cmd->sc = sc;
 206	tl_cmd->sc_cmd_tag = sc->request->tag;
 207	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 208	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 209	return 0;
 210}
 211
 212/*
 213 * Called from SCSI EH process context to issue a LUN_RESET TMR
 214 * to struct scsi_device
 215 */
 216static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 217			      u64 lun, int task, enum tcm_tmreq_table tmr)
 218{
 219	struct se_cmd *se_cmd = NULL;
 220	struct se_session *se_sess;
 221	struct se_portal_group *se_tpg;
 222	struct tcm_loop_nexus *tl_nexus;
 223	struct tcm_loop_cmd *tl_cmd = NULL;
 
 
 224	struct tcm_loop_tmr *tl_tmr = NULL;
 225	int ret = TMR_FUNCTION_FAILED, rc;
 226
 
 
 
 
 227	/*
 228	 * Locate the tl_nexus and se_sess pointers
 229	 */
 230	tl_nexus = tl_tpg->tl_nexus;
 231	if (!tl_nexus) {
 232		pr_err("Unable to perform device reset without"
 233				" active I_T Nexus\n");
 234		return ret;
 235	}
 
 
 
 
 
 
 236
 237	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 238	if (!tl_cmd) {
 239		pr_err("Unable to allocate memory for tl_cmd\n");
 240		return ret;
 241	}
 242
 243	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
 244	if (!tl_tmr) {
 245		pr_err("Unable to allocate memory for tl_tmr\n");
 246		goto release;
 247	}
 248	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
 249
 250	se_cmd = &tl_cmd->tl_se_cmd;
 251	se_tpg = &tl_tpg->tl_se_tpg;
 252	se_sess = tl_tpg->tl_nexus->se_sess;
 253	/*
 254	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
 255	 */
 256	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
 257				DMA_NONE, TCM_SIMPLE_TAG,
 258				&tl_cmd->tl_sense_buf[0]);
 259
 260	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
 261	if (rc < 0)
 262		goto release;
 263
 264	if (tmr == TMR_ABORT_TASK)
 265		se_cmd->se_tmr_req->ref_task_tag = task;
 266
 267	/*
 268	 * Locate the underlying TCM struct se_lun
 269	 */
 270	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
 271		ret = TMR_LUN_DOES_NOT_EXIST;
 272		goto release;
 273	}
 274	/*
 275	 * Queue the TMR to TCM Core and sleep waiting for
 276	 * tcm_loop_queue_tm_rsp() to wake us up.
 277	 */
 278	transport_generic_handle_tmr(se_cmd);
 279	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
 280	/*
 281	 * The TMR LUN_RESET has completed, check the response status and
 282	 * then release allocations.
 283	 */
 284	ret = se_cmd->se_tmr_req->response;
 
 285release:
 286	if (se_cmd)
 287		transport_generic_free_cmd(se_cmd, 1);
 288	else
 289		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 290	kfree(tl_tmr);
 291	return ret;
 292}
 293
 294static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 295{
 296	struct tcm_loop_hba *tl_hba;
 297	struct tcm_loop_tpg *tl_tpg;
 298	int ret = FAILED;
 299
 300	/*
 301	 * Locate the tcm_loop_hba_t pointer
 302	 */
 303	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 304	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 305	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 306				 sc->request->tag, TMR_ABORT_TASK);
 307	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 308}
 309
 310/*
 311 * Called from SCSI EH process context to issue a LUN_RESET TMR
 312 * to struct scsi_device
 313 */
 314static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 315{
 316	struct tcm_loop_hba *tl_hba;
 317	struct tcm_loop_tpg *tl_tpg;
 318	int ret = FAILED;
 319
 320	/*
 321	 * Locate the tcm_loop_hba_t pointer
 322	 */
 323	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 324	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 325
 326	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
 327				 0, TMR_LUN_RESET);
 328	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 329}
 330
 331static int tcm_loop_target_reset(struct scsi_cmnd *sc)
 332{
 333	struct tcm_loop_hba *tl_hba;
 334	struct tcm_loop_tpg *tl_tpg;
 335
 336	/*
 337	 * Locate the tcm_loop_hba_t pointer
 338	 */
 339	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 340	if (!tl_hba) {
 341		pr_err("Unable to perform device reset without"
 342				" active I_T Nexus\n");
 343		return FAILED;
 344	}
 345	/*
 346	 * Locate the tl_tpg pointer from TargetID in sc->device->id
 347	 */
 348	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 349	if (tl_tpg) {
 350		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
 351		return SUCCESS;
 352	}
 353	return FAILED;
 354}
 355
 356static int tcm_loop_slave_alloc(struct scsi_device *sd)
 357{
 358	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
 359	return 0;
 360}
 361
 362static struct scsi_host_template tcm_loop_driver_template = {
 363	.show_info		= tcm_loop_show_info,
 364	.proc_name		= "tcm_loopback",
 365	.name			= "TCM_Loopback",
 366	.queuecommand		= tcm_loop_queuecommand,
 367	.change_queue_depth	= scsi_change_queue_depth,
 368	.eh_abort_handler = tcm_loop_abort_task,
 369	.eh_device_reset_handler = tcm_loop_device_reset,
 370	.eh_target_reset_handler = tcm_loop_target_reset,
 371	.can_queue		= 1024,
 372	.this_id		= -1,
 373	.sg_tablesize		= 256,
 374	.cmd_per_lun		= 1024,
 375	.max_sectors		= 0xFFFF,
 376	.use_clustering		= DISABLE_CLUSTERING,
 377	.slave_alloc		= tcm_loop_slave_alloc,
 
 378	.module			= THIS_MODULE,
 379	.track_queue_depth	= 1,
 380};
 381
 382static int tcm_loop_driver_probe(struct device *dev)
 383{
 384	struct tcm_loop_hba *tl_hba;
 385	struct Scsi_Host *sh;
 386	int error, host_prot;
 387
 388	tl_hba = to_tcm_loop_hba(dev);
 389
 390	sh = scsi_host_alloc(&tcm_loop_driver_template,
 391			sizeof(struct tcm_loop_hba));
 392	if (!sh) {
 393		pr_err("Unable to allocate struct scsi_host\n");
 394		return -ENODEV;
 395	}
 396	tl_hba->sh = sh;
 397
 398	/*
 399	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 400	 */
 401	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 402	/*
 403	 * Setup single ID, Channel and LUN for now..
 404	 */
 405	sh->max_id = 2;
 406	sh->max_lun = 0;
 407	sh->max_channel = 0;
 408	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 409
 410	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
 411		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
 412		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
 413
 414	scsi_host_set_prot(sh, host_prot);
 415	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
 416
 417	error = scsi_add_host(sh, &tl_hba->dev);
 418	if (error) {
 419		pr_err("%s: scsi_add_host failed\n", __func__);
 420		scsi_host_put(sh);
 421		return -ENODEV;
 422	}
 423	return 0;
 424}
 425
 426static int tcm_loop_driver_remove(struct device *dev)
 427{
 428	struct tcm_loop_hba *tl_hba;
 429	struct Scsi_Host *sh;
 430
 431	tl_hba = to_tcm_loop_hba(dev);
 432	sh = tl_hba->sh;
 433
 434	scsi_remove_host(sh);
 435	scsi_host_put(sh);
 436	return 0;
 437}
 438
 439static void tcm_loop_release_adapter(struct device *dev)
 440{
 441	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 442
 443	kfree(tl_hba);
 444}
 445
 446/*
 447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 448 */
 449static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 450{
 451	int ret;
 452
 453	tl_hba->dev.bus = &tcm_loop_lld_bus;
 454	tl_hba->dev.parent = tcm_loop_primary;
 455	tl_hba->dev.release = &tcm_loop_release_adapter;
 456	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 457
 458	ret = device_register(&tl_hba->dev);
 459	if (ret) {
 460		pr_err("device_register() failed for"
 461				" tl_hba->dev: %d\n", ret);
 462		return -ENODEV;
 463	}
 464
 465	return 0;
 466}
 467
 468/*
 469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 470 * tcm_loop SCSI bus.
 471 */
 472static int tcm_loop_alloc_core_bus(void)
 473{
 474	int ret;
 475
 476	tcm_loop_primary = root_device_register("tcm_loop_0");
 477	if (IS_ERR(tcm_loop_primary)) {
 478		pr_err("Unable to allocate tcm_loop_primary\n");
 479		return PTR_ERR(tcm_loop_primary);
 480	}
 481
 482	ret = bus_register(&tcm_loop_lld_bus);
 483	if (ret) {
 484		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 485		goto dev_unreg;
 486	}
 487
 488	ret = driver_register(&tcm_loop_driverfs);
 489	if (ret) {
 490		pr_err("driver_register() failed for"
 491				"tcm_loop_driverfs\n");
 492		goto bus_unreg;
 493	}
 494
 495	pr_debug("Initialized TCM Loop Core Bus\n");
 496	return ret;
 497
 498bus_unreg:
 499	bus_unregister(&tcm_loop_lld_bus);
 500dev_unreg:
 501	root_device_unregister(tcm_loop_primary);
 502	return ret;
 503}
 504
 505static void tcm_loop_release_core_bus(void)
 506{
 507	driver_unregister(&tcm_loop_driverfs);
 508	bus_unregister(&tcm_loop_lld_bus);
 509	root_device_unregister(tcm_loop_primary);
 510
 511	pr_debug("Releasing TCM Loop Core BUS\n");
 512}
 513
 514static char *tcm_loop_get_fabric_name(void)
 515{
 516	return "loopback";
 517}
 518
 519static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 520{
 521	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522}
 523
 524static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 525{
 
 526	/*
 527	 * Return the passed NAA identifier for the Target Port
 528	 */
 529	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 530}
 531
 532static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 533{
 
 534	/*
 535	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 536	 * to represent the SCSI Target Port.
 537	 */
 538	return tl_tpg(se_tpg)->tl_tpgt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539}
 540
 541/*
 542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 543 * based upon the incoming fabric dependent SCSI Initiator Port
 544 */
 545static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 546{
 547	return 1;
 548}
 549
 550static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 551{
 552	return 0;
 553}
 554
 555/*
 556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 558 */
 559static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 560{
 561	return 0;
 562}
 563
 564/*
 565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 566 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 567 * It has been added here as a nop for target_fabric_tf_ops_check()
 568 */
 569static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 570{
 571	return 0;
 572}
 573
 574static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
 
 575{
 576	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 577						   tl_se_tpg);
 578	return tl_tpg->tl_fabric_prot_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579}
 580
 581static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 582{
 583	return 1;
 584}
 585
 586static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 587{
 588	return 1;
 589}
 590
 591static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 592{
 593	return;
 594}
 595
 
 
 
 
 
 596static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 597{
 598	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 599			struct tcm_loop_cmd, tl_se_cmd);
 600
 601	return tl_cmd->sc_cmd_state;
 602}
 603
 604static int tcm_loop_shutdown_session(struct se_session *se_sess)
 605{
 606	return 0;
 607}
 608
 609static void tcm_loop_close_session(struct se_session *se_sess)
 610{
 611	return;
 612};
 613
 614static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 615{
 616	/*
 617	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 618	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 619	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 620	 * format with transport_generic_map_mem_to_cmd().
 621	 *
 622	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 623	 * object execution queue.
 624	 */
 625	target_execute_cmd(se_cmd);
 626	return 0;
 627}
 628
 629static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
 630{
 631	return 0;
 632}
 633
 634static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 635{
 636	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 637				struct tcm_loop_cmd, tl_se_cmd);
 638	struct scsi_cmnd *sc = tl_cmd->sc;
 639
 640	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
 641		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
 642
 643	sc->result = SAM_STAT_GOOD;
 644	set_host_byte(sc, DID_OK);
 645	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 646	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 647		scsi_set_resid(sc, se_cmd->residual_count);
 648	sc->scsi_done(sc);
 649	return 0;
 650}
 651
 652static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 653{
 654	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 655				struct tcm_loop_cmd, tl_se_cmd);
 656	struct scsi_cmnd *sc = tl_cmd->sc;
 657
 658	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
 659			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
 660
 661	if (se_cmd->sense_buffer &&
 662	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 663	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 664
 665		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 666				SCSI_SENSE_BUFFERSIZE);
 667		sc->result = SAM_STAT_CHECK_CONDITION;
 668		set_driver_byte(sc, DRIVER_SENSE);
 669	} else
 670		sc->result = se_cmd->scsi_status;
 671
 672	set_host_byte(sc, DID_OK);
 673	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 674	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 675		scsi_set_resid(sc, se_cmd->residual_count);
 676	sc->scsi_done(sc);
 677	return 0;
 678}
 679
 680static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 681{
 682	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 683	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
 684	/*
 685	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
 686	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
 687	 */
 688	atomic_set(&tl_tmr->tmr_complete, 1);
 689	wake_up(&tl_tmr->tl_tmr_wait);
 
 690}
 691
 692static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
 693{
 694	return;
 
 
 
 
 
 695}
 696
 697static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 698{
 699	switch (tl_hba->tl_proto_id) {
 700	case SCSI_PROTOCOL_SAS:
 701		return "SAS";
 702	case SCSI_PROTOCOL_FCP:
 703		return "FCP";
 704	case SCSI_PROTOCOL_ISCSI:
 705		return "iSCSI";
 706	default:
 707		break;
 708	}
 709
 710	return "Unknown";
 711}
 712
 713/* Start items for tcm_loop_port_cit */
 714
 715static int tcm_loop_port_link(
 716	struct se_portal_group *se_tpg,
 717	struct se_lun *lun)
 718{
 719	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 720				struct tcm_loop_tpg, tl_se_tpg);
 721	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 722
 723	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
 
 724	/*
 725	 * Add Linux/SCSI struct scsi_device by HCTL
 726	 */
 727	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 728
 729	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 730	return 0;
 731}
 732
 733static void tcm_loop_port_unlink(
 734	struct se_portal_group *se_tpg,
 735	struct se_lun *se_lun)
 736{
 737	struct scsi_device *sd;
 738	struct tcm_loop_hba *tl_hba;
 739	struct tcm_loop_tpg *tl_tpg;
 740
 741	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 742	tl_hba = tl_tpg->tl_hba;
 743
 744	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 745				se_lun->unpacked_lun);
 746	if (!sd) {
 747		pr_err("Unable to locate struct scsi_device for %d:%d:"
 748			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 749		return;
 750	}
 751	/*
 752	 * Remove Linux/SCSI struct scsi_device by HCTL
 753	 */
 754	scsi_remove_device(sd);
 755	scsi_device_put(sd);
 756
 757	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
 
 758
 759	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 760}
 761
 762/* End items for tcm_loop_port_cit */
 763
 764static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
 765		struct config_item *item, char *page)
 766{
 767	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 768	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 769						   tl_se_tpg);
 770
 771	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
 772}
 773
 774static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
 775		struct config_item *item, const char *page, size_t count)
 776{
 777	struct se_portal_group *se_tpg = attrib_to_tpg(item);
 778	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
 779						   tl_se_tpg);
 780	unsigned long val;
 781	int ret = kstrtoul(page, 0, &val);
 782
 783	if (ret) {
 784		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
 785		return ret;
 786	}
 787	if (val != 0 && val != 1 && val != 3) {
 788		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
 789		return -EINVAL;
 790	}
 791	tl_tpg->tl_fabric_prot_type = val;
 792
 793	return count;
 794}
 795
 796CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
 797
 798static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 799	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
 800	NULL,
 801};
 802
 803/* Start items for tcm_loop_nexus_cit */
 804
 805static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
 806				  struct se_session *se_sess, void *p)
 807{
 808	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 809					struct tcm_loop_tpg, tl_se_tpg);
 810
 811	tl_tpg->tl_nexus = p;
 812	return 0;
 813}
 814
 815static int tcm_loop_make_nexus(
 816	struct tcm_loop_tpg *tl_tpg,
 817	const char *name)
 818{
 
 819	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 820	struct tcm_loop_nexus *tl_nexus;
 821	int ret;
 822
 823	if (tl_tpg->tl_nexus) {
 824		pr_debug("tl_tpg->tl_nexus already exists\n");
 825		return -EEXIST;
 826	}
 
 827
 828	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 829	if (!tl_nexus) {
 830		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 831		return -ENOMEM;
 832	}
 833
 834	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
 835					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
 836					name, tl_nexus, tcm_loop_alloc_sess_cb);
 837	if (IS_ERR(tl_nexus->se_sess)) {
 838		ret = PTR_ERR(tl_nexus->se_sess);
 839		kfree(tl_nexus);
 840		return ret;
 841	}
 842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 844		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 845		name);
 846	return 0;
 
 
 
 
 847}
 848
 849static int tcm_loop_drop_nexus(
 850	struct tcm_loop_tpg *tpg)
 851{
 852	struct se_session *se_sess;
 853	struct tcm_loop_nexus *tl_nexus;
 
 854
 855	tl_nexus = tpg->tl_nexus;
 856	if (!tl_nexus)
 857		return -ENODEV;
 858
 859	se_sess = tl_nexus->se_sess;
 860	if (!se_sess)
 861		return -ENODEV;
 862
 863	if (atomic_read(&tpg->tl_tpg_port_count)) {
 864		pr_err("Unable to remove TCM_Loop I_T Nexus with"
 865			" active TPG port count: %d\n",
 866			atomic_read(&tpg->tl_tpg_port_count));
 867		return -EPERM;
 868	}
 869
 870	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
 871		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
 872		tl_nexus->se_sess->se_node_acl->initiatorname);
 873	/*
 874	 * Release the SCSI I_T Nexus to the emulated Target Port
 875	 */
 876	transport_deregister_session(tl_nexus->se_sess);
 877	tpg->tl_nexus = NULL;
 878	kfree(tl_nexus);
 879	return 0;
 880}
 881
 882/* End items for tcm_loop_nexus_cit */
 883
 884static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
 
 
 885{
 886	struct se_portal_group *se_tpg = to_tpg(item);
 887	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 888			struct tcm_loop_tpg, tl_se_tpg);
 889	struct tcm_loop_nexus *tl_nexus;
 890	ssize_t ret;
 891
 892	tl_nexus = tl_tpg->tl_nexus;
 893	if (!tl_nexus)
 894		return -ENODEV;
 895
 896	ret = snprintf(page, PAGE_SIZE, "%s\n",
 897		tl_nexus->se_sess->se_node_acl->initiatorname);
 898
 899	return ret;
 900}
 901
 902static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
 903		const char *page, size_t count)
 
 
 904{
 905	struct se_portal_group *se_tpg = to_tpg(item);
 906	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 907			struct tcm_loop_tpg, tl_se_tpg);
 908	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 909	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
 910	int ret;
 911	/*
 912	 * Shutdown the active I_T nexus if 'NULL' is passed..
 913	 */
 914	if (!strncmp(page, "NULL", 4)) {
 915		ret = tcm_loop_drop_nexus(tl_tpg);
 916		return (!ret) ? count : ret;
 917	}
 918	/*
 919	 * Otherwise make sure the passed virtual Initiator port WWN matches
 920	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
 921	 * tcm_loop_make_nexus()
 922	 */
 923	if (strlen(page) >= TL_WWN_ADDR_LEN) {
 924		pr_err("Emulated NAA Sas Address: %s, exceeds"
 925				" max: %d\n", page, TL_WWN_ADDR_LEN);
 926		return -EINVAL;
 927	}
 928	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
 929
 930	ptr = strstr(i_port, "naa.");
 931	if (ptr) {
 932		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
 933			pr_err("Passed SAS Initiator Port %s does not"
 934				" match target port protoid: %s\n", i_port,
 935				tcm_loop_dump_proto_id(tl_hba));
 936			return -EINVAL;
 937		}
 938		port_ptr = &i_port[0];
 939		goto check_newline;
 940	}
 941	ptr = strstr(i_port, "fc.");
 942	if (ptr) {
 943		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
 944			pr_err("Passed FCP Initiator Port %s does not"
 945				" match target port protoid: %s\n", i_port,
 946				tcm_loop_dump_proto_id(tl_hba));
 947			return -EINVAL;
 948		}
 949		port_ptr = &i_port[3]; /* Skip over "fc." */
 950		goto check_newline;
 951	}
 952	ptr = strstr(i_port, "iqn.");
 953	if (ptr) {
 954		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
 955			pr_err("Passed iSCSI Initiator Port %s does not"
 956				" match target port protoid: %s\n", i_port,
 957				tcm_loop_dump_proto_id(tl_hba));
 958			return -EINVAL;
 959		}
 960		port_ptr = &i_port[0];
 961		goto check_newline;
 962	}
 963	pr_err("Unable to locate prefix for emulated Initiator Port:"
 964			" %s\n", i_port);
 965	return -EINVAL;
 966	/*
 967	 * Clear any trailing newline for the NAA WWN
 968	 */
 969check_newline:
 970	if (i_port[strlen(i_port)-1] == '\n')
 971		i_port[strlen(i_port)-1] = '\0';
 972
 973	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
 974	if (ret < 0)
 975		return ret;
 976
 977	return count;
 978}
 979
 980static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
 981		char *page)
 982{
 983	struct se_portal_group *se_tpg = to_tpg(item);
 984	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 985			struct tcm_loop_tpg, tl_se_tpg);
 986	const char *status = NULL;
 987	ssize_t ret = -EINVAL;
 988
 989	switch (tl_tpg->tl_transport_status) {
 990	case TCM_TRANSPORT_ONLINE:
 991		status = "online";
 992		break;
 993	case TCM_TRANSPORT_OFFLINE:
 994		status = "offline";
 995		break;
 996	default:
 997		break;
 998	}
 999
1000	if (status)
1001		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1002
1003	return ret;
1004}
1005
1006static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1007		const char *page, size_t count)
1008{
1009	struct se_portal_group *se_tpg = to_tpg(item);
1010	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1011			struct tcm_loop_tpg, tl_se_tpg);
1012
1013	if (!strncmp(page, "online", 6)) {
1014		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1015		return count;
1016	}
1017	if (!strncmp(page, "offline", 7)) {
1018		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1019		if (tl_tpg->tl_nexus) {
1020			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1021
1022			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1023		}
1024		return count;
1025	}
1026	return -EINVAL;
1027}
1028
1029static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1030					 char *page)
1031{
1032	struct se_portal_group *se_tpg = to_tpg(item);
1033	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1034			struct tcm_loop_tpg, tl_se_tpg);
1035	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1036
1037	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1038			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1039}
1040
1041CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1042CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1043CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1044
1045static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1046	&tcm_loop_tpg_attr_nexus,
1047	&tcm_loop_tpg_attr_transport_status,
1048	&tcm_loop_tpg_attr_address,
1049	NULL,
1050};
1051
1052/* Start items for tcm_loop_naa_cit */
1053
1054static struct se_portal_group *tcm_loop_make_naa_tpg(
1055	struct se_wwn *wwn,
1056	struct config_group *group,
1057	const char *name)
1058{
1059	struct tcm_loop_hba *tl_hba = container_of(wwn,
1060			struct tcm_loop_hba, tl_hba_wwn);
1061	struct tcm_loop_tpg *tl_tpg;
 
1062	int ret;
1063	unsigned long tpgt;
1064
1065	if (strstr(name, "tpgt_") != name) {
 
1066		pr_err("Unable to locate \"tpgt_#\" directory"
1067				" group\n");
1068		return ERR_PTR(-EINVAL);
1069	}
1070	if (kstrtoul(name+5, 10, &tpgt))
1071		return ERR_PTR(-EINVAL);
1072
1073	if (tpgt >= TL_TPGS_PER_HBA) {
1074		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1075				" %u\n", tpgt, TL_TPGS_PER_HBA);
1076		return ERR_PTR(-EINVAL);
1077	}
1078	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1079	tl_tpg->tl_hba = tl_hba;
1080	tl_tpg->tl_tpgt = tpgt;
1081	/*
1082	 * Register the tl_tpg as a emulated TCM Target Endpoint
1083	 */
1084	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
 
 
1085	if (ret < 0)
1086		return ERR_PTR(-ENOMEM);
1087
1088	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1089		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1090		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1091
1092	return &tl_tpg->tl_se_tpg;
1093}
1094
1095static void tcm_loop_drop_naa_tpg(
1096	struct se_portal_group *se_tpg)
1097{
1098	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1099	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1100				struct tcm_loop_tpg, tl_se_tpg);
1101	struct tcm_loop_hba *tl_hba;
1102	unsigned short tpgt;
1103
1104	tl_hba = tl_tpg->tl_hba;
1105	tpgt = tl_tpg->tl_tpgt;
1106	/*
1107	 * Release the I_T Nexus for the Virtual target link if present
1108	 */
1109	tcm_loop_drop_nexus(tl_tpg);
1110	/*
1111	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1112	 */
1113	core_tpg_deregister(se_tpg);
1114
1115	tl_tpg->tl_hba = NULL;
1116	tl_tpg->tl_tpgt = 0;
1117
1118	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1119		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1120		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1121}
1122
1123/* End items for tcm_loop_naa_cit */
1124
1125/* Start items for tcm_loop_cit */
1126
1127static struct se_wwn *tcm_loop_make_scsi_hba(
1128	struct target_fabric_configfs *tf,
1129	struct config_group *group,
1130	const char *name)
1131{
1132	struct tcm_loop_hba *tl_hba;
1133	struct Scsi_Host *sh;
1134	char *ptr;
1135	int ret, off = 0;
1136
1137	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1138	if (!tl_hba) {
1139		pr_err("Unable to allocate struct tcm_loop_hba\n");
1140		return ERR_PTR(-ENOMEM);
1141	}
1142	/*
1143	 * Determine the emulated Protocol Identifier and Target Port Name
1144	 * based on the incoming configfs directory name.
1145	 */
1146	ptr = strstr(name, "naa.");
1147	if (ptr) {
1148		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1149		goto check_len;
1150	}
1151	ptr = strstr(name, "fc.");
1152	if (ptr) {
1153		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1154		off = 3; /* Skip over "fc." */
1155		goto check_len;
1156	}
1157	ptr = strstr(name, "iqn.");
1158	if (!ptr) {
1159		pr_err("Unable to locate prefix for emulated Target "
1160				"Port: %s\n", name);
1161		ret = -EINVAL;
1162		goto out;
1163	}
1164	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1165
1166check_len:
1167	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1168		pr_err("Emulated NAA %s Address: %s, exceeds"
1169			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1170			TL_WWN_ADDR_LEN);
1171		ret = -EINVAL;
1172		goto out;
1173	}
1174	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1175
1176	/*
1177	 * Call device_register(tl_hba->dev) to register the emulated
1178	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1179	 * device_register() callbacks in tcm_loop_driver_probe()
1180	 */
1181	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1182	if (ret)
1183		goto out;
1184
1185	sh = tl_hba->sh;
1186	tcm_loop_hba_no_cnt++;
1187	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1188		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1189		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1190
1191	return &tl_hba->tl_hba_wwn;
1192out:
1193	kfree(tl_hba);
1194	return ERR_PTR(ret);
1195}
1196
1197static void tcm_loop_drop_scsi_hba(
1198	struct se_wwn *wwn)
1199{
1200	struct tcm_loop_hba *tl_hba = container_of(wwn,
1201				struct tcm_loop_hba, tl_hba_wwn);
1202
1203	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1204		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1205		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1206		tl_hba->sh->host_no);
1207	/*
1208	 * Call device_unregister() on the original tl_hba->dev.
1209	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1210	 * release *tl_hba;
1211	 */
1212	device_unregister(&tl_hba->dev);
1213}
1214
1215/* Start items for tcm_loop_cit */
1216static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
 
 
1217{
1218	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1219}
1220
1221CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1222
1223static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1224	&tcm_loop_wwn_attr_version,
1225	NULL,
1226};
1227
1228/* End items for tcm_loop_cit */
1229
1230static const struct target_core_fabric_ops loop_ops = {
1231	.module				= THIS_MODULE,
1232	.name				= "loopback",
1233	.get_fabric_name		= tcm_loop_get_fabric_name,
1234	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1235	.tpg_get_tag			= tcm_loop_get_tag,
1236	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1237	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1238	.tpg_check_demo_mode_write_protect =
1239				tcm_loop_check_demo_mode_write_protect,
1240	.tpg_check_prod_mode_write_protect =
1241				tcm_loop_check_prod_mode_write_protect,
1242	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1243	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1244	.check_stop_free		= tcm_loop_check_stop_free,
1245	.release_cmd			= tcm_loop_release_cmd,
1246	.shutdown_session		= tcm_loop_shutdown_session,
1247	.close_session			= tcm_loop_close_session,
1248	.sess_get_index			= tcm_loop_sess_get_index,
1249	.write_pending			= tcm_loop_write_pending,
1250	.write_pending_status		= tcm_loop_write_pending_status,
1251	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1252	.get_cmd_state			= tcm_loop_get_cmd_state,
1253	.queue_data_in			= tcm_loop_queue_data_in,
1254	.queue_status			= tcm_loop_queue_status,
1255	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1256	.aborted_task			= tcm_loop_aborted_task,
1257	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1258	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1259	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1260	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1261	.fabric_post_link		= tcm_loop_port_link,
1262	.fabric_pre_unlink		= tcm_loop_port_unlink,
1263	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1264	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1265	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1266};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267
1268static int __init tcm_loop_fabric_init(void)
1269{
1270	int ret = -ENOMEM;
1271
1272	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1273	if (!tcm_loop_workqueue)
1274		goto out;
1275
1276	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1277				sizeof(struct tcm_loop_cmd),
1278				__alignof__(struct tcm_loop_cmd),
1279				0, NULL);
1280	if (!tcm_loop_cmd_cache) {
1281		pr_debug("kmem_cache_create() for"
1282			" tcm_loop_cmd_cache failed\n");
1283		goto out_destroy_workqueue;
1284	}
1285
1286	ret = tcm_loop_alloc_core_bus();
1287	if (ret)
1288		goto out_destroy_cache;
1289
1290	ret = target_register_template(&loop_ops);
1291	if (ret)
1292		goto out_release_core_bus;
1293
1294	return 0;
1295
1296out_release_core_bus:
1297	tcm_loop_release_core_bus();
1298out_destroy_cache:
1299	kmem_cache_destroy(tcm_loop_cmd_cache);
1300out_destroy_workqueue:
1301	destroy_workqueue(tcm_loop_workqueue);
1302out:
1303	return ret;
1304}
1305
1306static void __exit tcm_loop_fabric_exit(void)
1307{
1308	target_unregister_template(&loop_ops);
1309	tcm_loop_release_core_bus();
1310	kmem_cache_destroy(tcm_loop_cmd_cache);
1311	destroy_workqueue(tcm_loop_workqueue);
1312}
1313
1314MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1315MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1316MODULE_LICENSE("GPL");
1317module_init(tcm_loop_fabric_init);
1318module_exit(tcm_loop_fabric_exit);
v3.5.6
   1/*******************************************************************************
   2 *
   3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
   4 * for emulated SAS initiator ports
   5 *
   6 * © Copyright 2011 RisingTide Systems LLC.
   7 *
   8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   9 *
  10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 ****************************************************************************/
  22
  23#include <linux/module.h>
  24#include <linux/moduleparam.h>
  25#include <linux/init.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/configfs.h>
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_tcq.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_cmnd.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_fabric.h>
  37#include <target/target_core_fabric_configfs.h>
  38#include <target/target_core_configfs.h>
  39
  40#include "tcm_loop.h"
  41
  42#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
  43
  44/* Local pointer to allocated TCM configfs fabric module */
  45static struct target_fabric_configfs *tcm_loop_fabric_configfs;
  46
  47static struct workqueue_struct *tcm_loop_workqueue;
  48static struct kmem_cache *tcm_loop_cmd_cache;
  49
  50static int tcm_loop_hba_no_cnt;
  51
  52static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  53
  54/*
  55 * Called from struct target_core_fabric_ops->check_stop_free()
  56 */
  57static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  58{
  59	/*
  60	 * Do not release struct se_cmd's containing a valid TMR
  61	 * pointer.  These will be released directly in tcm_loop_device_reset()
  62	 * with transport_generic_free_cmd().
  63	 */
  64	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  65		return 0;
  66	/*
  67	 * Release the struct se_cmd, which will make a callback to release
  68	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  69	 */
  70	transport_generic_free_cmd(se_cmd, 0);
  71	return 1;
  72}
  73
  74static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  75{
  76	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  77				struct tcm_loop_cmd, tl_se_cmd);
  78
  79	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  80}
  81
  82static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
  83				char **start, off_t offset,
  84				int length, int inout)
  85{
  86	return sprintf(buffer, "tcm_loop_proc_info()\n");
 
  87}
  88
  89static int tcm_loop_driver_probe(struct device *);
  90static int tcm_loop_driver_remove(struct device *);
  91
  92static int pseudo_lld_bus_match(struct device *dev,
  93				struct device_driver *dev_driver)
  94{
  95	return 1;
  96}
  97
  98static struct bus_type tcm_loop_lld_bus = {
  99	.name			= "tcm_loop_bus",
 100	.match			= pseudo_lld_bus_match,
 101	.probe			= tcm_loop_driver_probe,
 102	.remove			= tcm_loop_driver_remove,
 103};
 104
 105static struct device_driver tcm_loop_driverfs = {
 106	.name			= "tcm_loop",
 107	.bus			= &tcm_loop_lld_bus,
 108};
 109/*
 110 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
 111 */
 112struct device *tcm_loop_primary;
 113
 114/*
 115 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
 116 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
 117 */
 118static int tcm_loop_change_queue_depth(
 119	struct scsi_device *sdev,
 120	int depth,
 121	int reason)
 122{
 123	switch (reason) {
 124	case SCSI_QDEPTH_DEFAULT:
 125		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
 126		break;
 127	case SCSI_QDEPTH_QFULL:
 128		scsi_track_queue_full(sdev, depth);
 129		break;
 130	case SCSI_QDEPTH_RAMP_UP:
 131		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
 132		break;
 133	default:
 134		return -EOPNOTSUPP;
 135	}
 136	return sdev->queue_depth;
 137}
 138
 139/*
 140 * Locate the SAM Task Attr from struct scsi_cmnd *
 141 */
 142static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
 143{
 144	if (sc->device->tagged_supported) {
 145		switch (sc->tag) {
 146		case HEAD_OF_QUEUE_TAG:
 147			return MSG_HEAD_TAG;
 148		case ORDERED_QUEUE_TAG:
 149			return MSG_ORDERED_TAG;
 150		default:
 151			break;
 152		}
 153	}
 154
 155	return MSG_SIMPLE_TAG;
 156}
 157
 158static void tcm_loop_submission_work(struct work_struct *work)
 159{
 160	struct tcm_loop_cmd *tl_cmd =
 161		container_of(work, struct tcm_loop_cmd, work);
 162	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
 163	struct scsi_cmnd *sc = tl_cmd->sc;
 164	struct tcm_loop_nexus *tl_nexus;
 165	struct tcm_loop_hba *tl_hba;
 166	struct tcm_loop_tpg *tl_tpg;
 167	struct scatterlist *sgl_bidi = NULL;
 168	u32 sgl_bidi_count = 0;
 169	int ret;
 170
 171	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 172	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 173
 174	/*
 175	 * Ensure that this tl_tpg reference from the incoming sc->device->id
 176	 * has already been configured via tcm_loop_make_naa_tpg().
 177	 */
 178	if (!tl_tpg->tl_hba) {
 179		set_host_byte(sc, DID_NO_CONNECT);
 180		goto out_done;
 181	}
 182
 183	tl_nexus = tl_hba->tl_nexus;
 
 
 
 184	if (!tl_nexus) {
 185		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
 186				" does not exist\n");
 187		set_host_byte(sc, DID_ERROR);
 188		goto out_done;
 189	}
 190
 191	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
 192			tl_nexus->se_sess,
 193			scsi_bufflen(sc), sc->sc_data_direction,
 194			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);
 195
 196	if (scsi_bidi_cmnd(sc)) {
 197		struct scsi_data_buffer *sdb = scsi_in(sc);
 198
 199		sgl_bidi = sdb->table.sgl;
 200		sgl_bidi_count = sdb->table.nents;
 201		se_cmd->se_cmd_flags |= SCF_BIDI;
 202
 203	}
 204
 205	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
 206		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207		set_host_byte(sc, DID_NO_CONNECT);
 208		goto out_done;
 209	}
 210
 211	/*
 212	 * Because some userspace code via scsi-generic do not memset their
 213	 * associated read buffers, go ahead and do that here for type
 214	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
 215	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
 216	 * by target core in target_setup_cmd_from_cdb() ->
 217	 * transport_generic_cmd_sequencer().
 218	 */
 219	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
 220	    se_cmd->data_direction == DMA_FROM_DEVICE) {
 221		struct scatterlist *sg = scsi_sglist(sc);
 222		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
 223
 224		if (buf != NULL) {
 225			memset(buf, 0, sg->length);
 226			kunmap(sg_page(sg));
 227		}
 228	}
 229
 230	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
 231	if (ret == -ENOMEM) {
 232		transport_send_check_condition_and_sense(se_cmd,
 233				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 234		transport_generic_free_cmd(se_cmd, 0);
 235		return;
 236	} else if (ret < 0) {
 237		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
 238			tcm_loop_queue_status(se_cmd);
 239		else
 240			transport_send_check_condition_and_sense(se_cmd,
 241					se_cmd->scsi_sense_reason, 0);
 242		transport_generic_free_cmd(se_cmd, 0);
 243		return;
 244	}
 245
 246	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
 247			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
 248	if (ret) {
 249		transport_send_check_condition_and_sense(se_cmd,
 250					se_cmd->scsi_sense_reason, 0);
 251		transport_generic_free_cmd(se_cmd, 0);
 252		return;
 253	}
 254	transport_handle_cdb_direct(se_cmd);
 255	return;
 256
 257out_done:
 
 258	sc->scsi_done(sc);
 259	return;
 260}
 261
 262/*
 263 * ->queuecommand can be and usually is called from interrupt context, so
 264 * defer the actual submission to a workqueue.
 265 */
 266static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 267{
 268	struct tcm_loop_cmd *tl_cmd;
 269
 270	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
 271		" scsi_buf_len: %u\n", sc->device->host->host_no,
 272		sc->device->id, sc->device->channel, sc->device->lun,
 273		sc->cmnd[0], scsi_bufflen(sc));
 274
 275	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 276	if (!tl_cmd) {
 277		pr_err("Unable to allocate struct tcm_loop_cmd\n");
 278		set_host_byte(sc, DID_ERROR);
 279		sc->scsi_done(sc);
 280		return 0;
 281	}
 282
 283	tl_cmd->sc = sc;
 
 284	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
 285	queue_work(tcm_loop_workqueue, &tl_cmd->work);
 286	return 0;
 287}
 288
 289/*
 290 * Called from SCSI EH process context to issue a LUN_RESET TMR
 291 * to struct scsi_device
 292 */
 293static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 
 294{
 295	struct se_cmd *se_cmd = NULL;
 
 296	struct se_portal_group *se_tpg;
 297	struct se_session *se_sess;
 298	struct tcm_loop_cmd *tl_cmd = NULL;
 299	struct tcm_loop_hba *tl_hba;
 300	struct tcm_loop_nexus *tl_nexus;
 301	struct tcm_loop_tmr *tl_tmr = NULL;
 302	struct tcm_loop_tpg *tl_tpg;
 303	int ret = FAILED, rc;
 304	/*
 305	 * Locate the tcm_loop_hba_t pointer
 306	 */
 307	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
 308	/*
 309	 * Locate the tl_nexus and se_sess pointers
 310	 */
 311	tl_nexus = tl_hba->tl_nexus;
 312	if (!tl_nexus) {
 313		pr_err("Unable to perform device reset without"
 314				" active I_T Nexus\n");
 315		return FAILED;
 316	}
 317	se_sess = tl_nexus->se_sess;
 318	/*
 319	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
 320	 */
 321	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
 322	se_tpg = &tl_tpg->tl_se_tpg;
 323
 324	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 325	if (!tl_cmd) {
 326		pr_err("Unable to allocate memory for tl_cmd\n");
 327		return FAILED;
 328	}
 329
 330	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
 331	if (!tl_tmr) {
 332		pr_err("Unable to allocate memory for tl_tmr\n");
 333		goto release;
 334	}
 335	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
 336
 337	se_cmd = &tl_cmd->tl_se_cmd;
 
 
 338	/*
 339	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
 340	 */
 341	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
 342				DMA_NONE, MSG_SIMPLE_TAG,
 343				&tl_cmd->tl_sense_buf[0]);
 344
 345	rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
 346	if (rc < 0)
 347		goto release;
 
 
 
 
 348	/*
 349	 * Locate the underlying TCM struct se_lun from sc->device->lun
 350	 */
 351	if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
 
 352		goto release;
 
 353	/*
 354	 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
 355	 * to wake us up.
 356	 */
 357	transport_generic_handle_tmr(se_cmd);
 358	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
 359	/*
 360	 * The TMR LUN_RESET has completed, check the response status and
 361	 * then release allocations.
 362	 */
 363	ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
 364		SUCCESS : FAILED;
 365release:
 366	if (se_cmd)
 367		transport_generic_free_cmd(se_cmd, 1);
 368	else
 369		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 370	kfree(tl_tmr);
 371	return ret;
 372}
 373
 374static int tcm_loop_slave_alloc(struct scsi_device *sd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375{
 376	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
 377	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378}
 379
 380static int tcm_loop_slave_configure(struct scsi_device *sd)
 381{
 
 382	return 0;
 383}
 384
 385static struct scsi_host_template tcm_loop_driver_template = {
 386	.proc_info		= tcm_loop_proc_info,
 387	.proc_name		= "tcm_loopback",
 388	.name			= "TCM_Loopback",
 389	.queuecommand		= tcm_loop_queuecommand,
 390	.change_queue_depth	= tcm_loop_change_queue_depth,
 
 391	.eh_device_reset_handler = tcm_loop_device_reset,
 
 392	.can_queue		= 1024,
 393	.this_id		= -1,
 394	.sg_tablesize		= 256,
 395	.cmd_per_lun		= 1024,
 396	.max_sectors		= 0xFFFF,
 397	.use_clustering		= DISABLE_CLUSTERING,
 398	.slave_alloc		= tcm_loop_slave_alloc,
 399	.slave_configure	= tcm_loop_slave_configure,
 400	.module			= THIS_MODULE,
 
 401};
 402
 403static int tcm_loop_driver_probe(struct device *dev)
 404{
 405	struct tcm_loop_hba *tl_hba;
 406	struct Scsi_Host *sh;
 407	int error;
 408
 409	tl_hba = to_tcm_loop_hba(dev);
 410
 411	sh = scsi_host_alloc(&tcm_loop_driver_template,
 412			sizeof(struct tcm_loop_hba));
 413	if (!sh) {
 414		pr_err("Unable to allocate struct scsi_host\n");
 415		return -ENODEV;
 416	}
 417	tl_hba->sh = sh;
 418
 419	/*
 420	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
 421	 */
 422	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
 423	/*
 424	 * Setup single ID, Channel and LUN for now..
 425	 */
 426	sh->max_id = 2;
 427	sh->max_lun = 0;
 428	sh->max_channel = 0;
 429	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
 
 
 
 
 
 
 
 430
 431	error = scsi_add_host(sh, &tl_hba->dev);
 432	if (error) {
 433		pr_err("%s: scsi_add_host failed\n", __func__);
 434		scsi_host_put(sh);
 435		return -ENODEV;
 436	}
 437	return 0;
 438}
 439
 440static int tcm_loop_driver_remove(struct device *dev)
 441{
 442	struct tcm_loop_hba *tl_hba;
 443	struct Scsi_Host *sh;
 444
 445	tl_hba = to_tcm_loop_hba(dev);
 446	sh = tl_hba->sh;
 447
 448	scsi_remove_host(sh);
 449	scsi_host_put(sh);
 450	return 0;
 451}
 452
 453static void tcm_loop_release_adapter(struct device *dev)
 454{
 455	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
 456
 457	kfree(tl_hba);
 458}
 459
 460/*
 461 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
 462 */
 463static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
 464{
 465	int ret;
 466
 467	tl_hba->dev.bus = &tcm_loop_lld_bus;
 468	tl_hba->dev.parent = tcm_loop_primary;
 469	tl_hba->dev.release = &tcm_loop_release_adapter;
 470	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
 471
 472	ret = device_register(&tl_hba->dev);
 473	if (ret) {
 474		pr_err("device_register() failed for"
 475				" tl_hba->dev: %d\n", ret);
 476		return -ENODEV;
 477	}
 478
 479	return 0;
 480}
 481
 482/*
 483 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
 484 * tcm_loop SCSI bus.
 485 */
 486static int tcm_loop_alloc_core_bus(void)
 487{
 488	int ret;
 489
 490	tcm_loop_primary = root_device_register("tcm_loop_0");
 491	if (IS_ERR(tcm_loop_primary)) {
 492		pr_err("Unable to allocate tcm_loop_primary\n");
 493		return PTR_ERR(tcm_loop_primary);
 494	}
 495
 496	ret = bus_register(&tcm_loop_lld_bus);
 497	if (ret) {
 498		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 499		goto dev_unreg;
 500	}
 501
 502	ret = driver_register(&tcm_loop_driverfs);
 503	if (ret) {
 504		pr_err("driver_register() failed for"
 505				"tcm_loop_driverfs\n");
 506		goto bus_unreg;
 507	}
 508
 509	pr_debug("Initialized TCM Loop Core Bus\n");
 510	return ret;
 511
 512bus_unreg:
 513	bus_unregister(&tcm_loop_lld_bus);
 514dev_unreg:
 515	root_device_unregister(tcm_loop_primary);
 516	return ret;
 517}
 518
 519static void tcm_loop_release_core_bus(void)
 520{
 521	driver_unregister(&tcm_loop_driverfs);
 522	bus_unregister(&tcm_loop_lld_bus);
 523	root_device_unregister(tcm_loop_primary);
 524
 525	pr_debug("Releasing TCM Loop Core BUS\n");
 526}
 527
 528static char *tcm_loop_get_fabric_name(void)
 529{
 530	return "loopback";
 531}
 532
 533static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 534{
 535	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 536	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 537	/*
 538	 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
 539	 * time based on the protocol dependent prefix of the passed configfs group.
 540	 *
 541	 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
 542	 * ProtocolID using target_core_fabric_lib.c symbols.
 543	 */
 544	switch (tl_hba->tl_proto_id) {
 545	case SCSI_PROTOCOL_SAS:
 546		return sas_get_fabric_proto_ident(se_tpg);
 547	case SCSI_PROTOCOL_FCP:
 548		return fc_get_fabric_proto_ident(se_tpg);
 549	case SCSI_PROTOCOL_ISCSI:
 550		return iscsi_get_fabric_proto_ident(se_tpg);
 551	default:
 552		pr_err("Unknown tl_proto_id: 0x%02x, using"
 553			" SAS emulation\n", tl_hba->tl_proto_id);
 554		break;
 555	}
 556
 557	return sas_get_fabric_proto_ident(se_tpg);
 558}
 559
 560static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 561{
 562	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 563	/*
 564	 * Return the passed NAA identifier for the SAS Target Port
 565	 */
 566	return &tl_tpg->tl_hba->tl_wwn_address[0];
 567}
 568
 569static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 570{
 571	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 572	/*
 573	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
 574	 * to represent the SCSI Target Port.
 575	 */
 576	return tl_tpg->tl_tpgt;
 577}
 578
 579static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
 580{
 581	return 1;
 582}
 583
 584static u32 tcm_loop_get_pr_transport_id(
 585	struct se_portal_group *se_tpg,
 586	struct se_node_acl *se_nacl,
 587	struct t10_pr_registration *pr_reg,
 588	int *format_code,
 589	unsigned char *buf)
 590{
 591	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 592	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 593
 594	switch (tl_hba->tl_proto_id) {
 595	case SCSI_PROTOCOL_SAS:
 596		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 597					format_code, buf);
 598	case SCSI_PROTOCOL_FCP:
 599		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 600					format_code, buf);
 601	case SCSI_PROTOCOL_ISCSI:
 602		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 603					format_code, buf);
 604	default:
 605		pr_err("Unknown tl_proto_id: 0x%02x, using"
 606			" SAS emulation\n", tl_hba->tl_proto_id);
 607		break;
 608	}
 609
 610	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 611			format_code, buf);
 612}
 613
 614static u32 tcm_loop_get_pr_transport_id_len(
 615	struct se_portal_group *se_tpg,
 616	struct se_node_acl *se_nacl,
 617	struct t10_pr_registration *pr_reg,
 618	int *format_code)
 619{
 620	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 621	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 622
 623	switch (tl_hba->tl_proto_id) {
 624	case SCSI_PROTOCOL_SAS:
 625		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 626					format_code);
 627	case SCSI_PROTOCOL_FCP:
 628		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 629					format_code);
 630	case SCSI_PROTOCOL_ISCSI:
 631		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 632					format_code);
 633	default:
 634		pr_err("Unknown tl_proto_id: 0x%02x, using"
 635			" SAS emulation\n", tl_hba->tl_proto_id);
 636		break;
 637	}
 638
 639	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 640			format_code);
 641}
 642
 643/*
 644 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
 645 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
 646 */
 647static char *tcm_loop_parse_pr_out_transport_id(
 648	struct se_portal_group *se_tpg,
 649	const char *buf,
 650	u32 *out_tid_len,
 651	char **port_nexus_ptr)
 652{
 653	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
 654	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 655
 656	switch (tl_hba->tl_proto_id) {
 657	case SCSI_PROTOCOL_SAS:
 658		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 659					port_nexus_ptr);
 660	case SCSI_PROTOCOL_FCP:
 661		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 662					port_nexus_ptr);
 663	case SCSI_PROTOCOL_ISCSI:
 664		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 665					port_nexus_ptr);
 666	default:
 667		pr_err("Unknown tl_proto_id: 0x%02x, using"
 668			" SAS emulation\n", tl_hba->tl_proto_id);
 669		break;
 670	}
 671
 672	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 673			port_nexus_ptr);
 674}
 675
 676/*
 677 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
 678 * based upon the incoming fabric dependent SCSI Initiator Port
 679 */
 680static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
 681{
 682	return 1;
 683}
 684
 685static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
 686{
 687	return 0;
 688}
 689
 690/*
 691 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
 692 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
 693 */
 694static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
 695{
 696	return 0;
 697}
 698
 699/*
 700 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
 701 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
 702 * It has been added here as a nop for target_fabric_tf_ops_check()
 703 */
 704static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
 705{
 706	return 0;
 707}
 708
 709static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
 710	struct se_portal_group *se_tpg)
 711{
 712	struct tcm_loop_nacl *tl_nacl;
 713
 714	tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
 715	if (!tl_nacl) {
 716		pr_err("Unable to allocate struct tcm_loop_nacl\n");
 717		return NULL;
 718	}
 719
 720	return &tl_nacl->se_node_acl;
 721}
 722
 723static void tcm_loop_tpg_release_fabric_acl(
 724	struct se_portal_group *se_tpg,
 725	struct se_node_acl *se_nacl)
 726{
 727	struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
 728				struct tcm_loop_nacl, se_node_acl);
 729
 730	kfree(tl_nacl);
 731}
 732
 733static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 734{
 735	return 1;
 736}
 737
 738static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
 739{
 740	return 1;
 741}
 742
 743static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
 744{
 745	return;
 746}
 747
 748static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
 749{
 750	return 1;
 751}
 752
 753static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 754{
 755	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 756			struct tcm_loop_cmd, tl_se_cmd);
 757
 758	return tl_cmd->sc_cmd_state;
 759}
 760
 761static int tcm_loop_shutdown_session(struct se_session *se_sess)
 762{
 763	return 0;
 764}
 765
 766static void tcm_loop_close_session(struct se_session *se_sess)
 767{
 768	return;
 769};
 770
 771static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 772{
 773	/*
 774	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
 775	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
 776	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
 777	 * format with transport_generic_map_mem_to_cmd().
 778	 *
 779	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
 780	 * object execution queue.
 781	 */
 782	transport_generic_process_write(se_cmd);
 783	return 0;
 784}
 785
 786static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
 787{
 788	return 0;
 789}
 790
 791static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 792{
 793	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 794				struct tcm_loop_cmd, tl_se_cmd);
 795	struct scsi_cmnd *sc = tl_cmd->sc;
 796
 797	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
 798		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
 799
 800	sc->result = SAM_STAT_GOOD;
 801	set_host_byte(sc, DID_OK);
 802	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 803	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 804		scsi_set_resid(sc, se_cmd->residual_count);
 805	sc->scsi_done(sc);
 806	return 0;
 807}
 808
 809static int tcm_loop_queue_status(struct se_cmd *se_cmd)
 810{
 811	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 812				struct tcm_loop_cmd, tl_se_cmd);
 813	struct scsi_cmnd *sc = tl_cmd->sc;
 814
 815	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
 816			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
 817
 818	if (se_cmd->sense_buffer &&
 819	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
 820	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
 821
 822		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
 823				SCSI_SENSE_BUFFERSIZE);
 824		sc->result = SAM_STAT_CHECK_CONDITION;
 825		set_driver_byte(sc, DRIVER_SENSE);
 826	} else
 827		sc->result = se_cmd->scsi_status;
 828
 829	set_host_byte(sc, DID_OK);
 830	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
 831	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
 832		scsi_set_resid(sc, se_cmd->residual_count);
 833	sc->scsi_done(sc);
 834	return 0;
 835}
 836
 837static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
 838{
 839	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 840	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
 841	/*
 842	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
 843	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
 844	 */
 845	atomic_set(&tl_tmr->tmr_complete, 1);
 846	wake_up(&tl_tmr->tl_tmr_wait);
 847	return 0;
 848}
 849
 850static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
 851{
 852	return 0;
 853}
 854
 855static u16 tcm_loop_get_fabric_sense_len(void)
 856{
 857	return 0;
 858}
 859
 860static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 861{
 862	switch (tl_hba->tl_proto_id) {
 863	case SCSI_PROTOCOL_SAS:
 864		return "SAS";
 865	case SCSI_PROTOCOL_FCP:
 866		return "FCP";
 867	case SCSI_PROTOCOL_ISCSI:
 868		return "iSCSI";
 869	default:
 870		break;
 871	}
 872
 873	return "Unknown";
 874}
 875
 876/* Start items for tcm_loop_port_cit */
 877
 878static int tcm_loop_port_link(
 879	struct se_portal_group *se_tpg,
 880	struct se_lun *lun)
 881{
 882	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
 883				struct tcm_loop_tpg, tl_se_tpg);
 884	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 885
 886	atomic_inc(&tl_tpg->tl_tpg_port_count);
 887	smp_mb__after_atomic_inc();
 888	/*
 889	 * Add Linux/SCSI struct scsi_device by HCTL
 890	 */
 891	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 892
 893	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 894	return 0;
 895}
 896
 897static void tcm_loop_port_unlink(
 898	struct se_portal_group *se_tpg,
 899	struct se_lun *se_lun)
 900{
 901	struct scsi_device *sd;
 902	struct tcm_loop_hba *tl_hba;
 903	struct tcm_loop_tpg *tl_tpg;
 904
 905	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 906	tl_hba = tl_tpg->tl_hba;
 907
 908	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 909				se_lun->unpacked_lun);
 910	if (!sd) {
 911		pr_err("Unable to locate struct scsi_device for %d:%d:"
 912			"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 913		return;
 914	}
 915	/*
 916	 * Remove Linux/SCSI struct scsi_device by HCTL
 917	 */
 918	scsi_remove_device(sd);
 919	scsi_device_put(sd);
 920
 921	atomic_dec(&tl_tpg->tl_tpg_port_count);
 922	smp_mb__after_atomic_dec();
 923
 924	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 925}
 926
 927/* End items for tcm_loop_port_cit */
 928
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929/* Start items for tcm_loop_nexus_cit */
 930
 
 
 
 
 
 
 
 
 
 
 931static int tcm_loop_make_nexus(
 932	struct tcm_loop_tpg *tl_tpg,
 933	const char *name)
 934{
 935	struct se_portal_group *se_tpg;
 936	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 937	struct tcm_loop_nexus *tl_nexus;
 938	int ret = -ENOMEM;
 939
 940	if (tl_tpg->tl_hba->tl_nexus) {
 941		pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
 942		return -EEXIST;
 943	}
 944	se_tpg = &tl_tpg->tl_se_tpg;
 945
 946	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 947	if (!tl_nexus) {
 948		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 949		return -ENOMEM;
 950	}
 951	/*
 952	 * Initialize the struct se_session pointer
 953	 */
 954	tl_nexus->se_sess = transport_init_session();
 955	if (IS_ERR(tl_nexus->se_sess)) {
 956		ret = PTR_ERR(tl_nexus->se_sess);
 957		goto out;
 
 958	}
 959	/*
 960	 * Since we are running in 'demo mode' this call with generate a
 961	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
 962	 * Initiator port name of the passed configfs group 'name'.
 963	 */
 964	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
 965				se_tpg, (unsigned char *)name);
 966	if (!tl_nexus->se_sess->se_node_acl) {
 967		transport_free_session(tl_nexus->se_sess);
 968		goto out;
 969	}
 970	/*
 971	 * Now, register the SAS I_T Nexus as active with the call to
 972	 * transport_register_session()
 973	 */
 974	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
 975			tl_nexus->se_sess, tl_nexus);
 976	tl_tpg->tl_hba->tl_nexus = tl_nexus;
 977	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 978		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 979		name);
 980	return 0;
 981
 982out:
 983	kfree(tl_nexus);
 984	return ret;
 985}
 986
 987static int tcm_loop_drop_nexus(
 988	struct tcm_loop_tpg *tpg)
 989{
 990	struct se_session *se_sess;
 991	struct tcm_loop_nexus *tl_nexus;
 992	struct tcm_loop_hba *tl_hba = tpg->tl_hba;
 993
 994	tl_nexus = tpg->tl_hba->tl_nexus;
 995	if (!tl_nexus)
 996		return -ENODEV;
 997
 998	se_sess = tl_nexus->se_sess;
 999	if (!se_sess)
1000		return -ENODEV;
1001
1002	if (atomic_read(&tpg->tl_tpg_port_count)) {
1003		pr_err("Unable to remove TCM_Loop I_T Nexus with"
1004			" active TPG port count: %d\n",
1005			atomic_read(&tpg->tl_tpg_port_count));
1006		return -EPERM;
1007	}
1008
1009	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1010		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1011		tl_nexus->se_sess->se_node_acl->initiatorname);
1012	/*
1013	 * Release the SCSI I_T Nexus to the emulated SAS Target Port
1014	 */
1015	transport_deregister_session(tl_nexus->se_sess);
1016	tpg->tl_hba->tl_nexus = NULL;
1017	kfree(tl_nexus);
1018	return 0;
1019}
1020
1021/* End items for tcm_loop_nexus_cit */
1022
1023static ssize_t tcm_loop_tpg_show_nexus(
1024	struct se_portal_group *se_tpg,
1025	char *page)
1026{
 
1027	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1028			struct tcm_loop_tpg, tl_se_tpg);
1029	struct tcm_loop_nexus *tl_nexus;
1030	ssize_t ret;
1031
1032	tl_nexus = tl_tpg->tl_hba->tl_nexus;
1033	if (!tl_nexus)
1034		return -ENODEV;
1035
1036	ret = snprintf(page, PAGE_SIZE, "%s\n",
1037		tl_nexus->se_sess->se_node_acl->initiatorname);
1038
1039	return ret;
1040}
1041
1042static ssize_t tcm_loop_tpg_store_nexus(
1043	struct se_portal_group *se_tpg,
1044	const char *page,
1045	size_t count)
1046{
 
1047	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1048			struct tcm_loop_tpg, tl_se_tpg);
1049	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1050	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
1051	int ret;
1052	/*
1053	 * Shutdown the active I_T nexus if 'NULL' is passed..
1054	 */
1055	if (!strncmp(page, "NULL", 4)) {
1056		ret = tcm_loop_drop_nexus(tl_tpg);
1057		return (!ret) ? count : ret;
1058	}
1059	/*
1060	 * Otherwise make sure the passed virtual Initiator port WWN matches
1061	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
1062	 * tcm_loop_make_nexus()
1063	 */
1064	if (strlen(page) >= TL_WWN_ADDR_LEN) {
1065		pr_err("Emulated NAA Sas Address: %s, exceeds"
1066				" max: %d\n", page, TL_WWN_ADDR_LEN);
1067		return -EINVAL;
1068	}
1069	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
1070
1071	ptr = strstr(i_port, "naa.");
1072	if (ptr) {
1073		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
1074			pr_err("Passed SAS Initiator Port %s does not"
1075				" match target port protoid: %s\n", i_port,
1076				tcm_loop_dump_proto_id(tl_hba));
1077			return -EINVAL;
1078		}
1079		port_ptr = &i_port[0];
1080		goto check_newline;
1081	}
1082	ptr = strstr(i_port, "fc.");
1083	if (ptr) {
1084		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
1085			pr_err("Passed FCP Initiator Port %s does not"
1086				" match target port protoid: %s\n", i_port,
1087				tcm_loop_dump_proto_id(tl_hba));
1088			return -EINVAL;
1089		}
1090		port_ptr = &i_port[3]; /* Skip over "fc." */
1091		goto check_newline;
1092	}
1093	ptr = strstr(i_port, "iqn.");
1094	if (ptr) {
1095		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
1096			pr_err("Passed iSCSI Initiator Port %s does not"
1097				" match target port protoid: %s\n", i_port,
1098				tcm_loop_dump_proto_id(tl_hba));
1099			return -EINVAL;
1100		}
1101		port_ptr = &i_port[0];
1102		goto check_newline;
1103	}
1104	pr_err("Unable to locate prefix for emulated Initiator Port:"
1105			" %s\n", i_port);
1106	return -EINVAL;
1107	/*
1108	 * Clear any trailing newline for the NAA WWN
1109	 */
1110check_newline:
1111	if (i_port[strlen(i_port)-1] == '\n')
1112		i_port[strlen(i_port)-1] = '\0';
1113
1114	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
1115	if (ret < 0)
1116		return ret;
1117
1118	return count;
1119}
1120
1121TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122
1123static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1124	&tcm_loop_tpg_nexus.attr,
 
 
1125	NULL,
1126};
1127
1128/* Start items for tcm_loop_naa_cit */
1129
1130struct se_portal_group *tcm_loop_make_naa_tpg(
1131	struct se_wwn *wwn,
1132	struct config_group *group,
1133	const char *name)
1134{
1135	struct tcm_loop_hba *tl_hba = container_of(wwn,
1136			struct tcm_loop_hba, tl_hba_wwn);
1137	struct tcm_loop_tpg *tl_tpg;
1138	char *tpgt_str, *end_ptr;
1139	int ret;
1140	unsigned short int tpgt;
1141
1142	tpgt_str = strstr(name, "tpgt_");
1143	if (!tpgt_str) {
1144		pr_err("Unable to locate \"tpgt_#\" directory"
1145				" group\n");
1146		return ERR_PTR(-EINVAL);
1147	}
1148	tpgt_str += 5; /* Skip ahead of "tpgt_" */
1149	tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1150
1151	if (tpgt >= TL_TPGS_PER_HBA) {
1152		pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
1153				" %u\n", tpgt, TL_TPGS_PER_HBA);
1154		return ERR_PTR(-EINVAL);
1155	}
1156	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1157	tl_tpg->tl_hba = tl_hba;
1158	tl_tpg->tl_tpgt = tpgt;
1159	/*
1160	 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
1161	 */
1162	ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
1163			wwn, &tl_tpg->tl_se_tpg, tl_tpg,
1164			TRANSPORT_TPG_TYPE_NORMAL);
1165	if (ret < 0)
1166		return ERR_PTR(-ENOMEM);
1167
1168	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1169		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1170		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1171
1172	return &tl_tpg->tl_se_tpg;
1173}
1174
1175void tcm_loop_drop_naa_tpg(
1176	struct se_portal_group *se_tpg)
1177{
1178	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1179	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1180				struct tcm_loop_tpg, tl_se_tpg);
1181	struct tcm_loop_hba *tl_hba;
1182	unsigned short tpgt;
1183
1184	tl_hba = tl_tpg->tl_hba;
1185	tpgt = tl_tpg->tl_tpgt;
1186	/*
1187	 * Release the I_T Nexus for the Virtual SAS link if present
1188	 */
1189	tcm_loop_drop_nexus(tl_tpg);
1190	/*
1191	 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
1192	 */
1193	core_tpg_deregister(se_tpg);
1194
1195	tl_tpg->tl_hba = NULL;
1196	tl_tpg->tl_tpgt = 0;
1197
1198	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1199		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1200		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1201}
1202
1203/* End items for tcm_loop_naa_cit */
1204
1205/* Start items for tcm_loop_cit */
1206
1207struct se_wwn *tcm_loop_make_scsi_hba(
1208	struct target_fabric_configfs *tf,
1209	struct config_group *group,
1210	const char *name)
1211{
1212	struct tcm_loop_hba *tl_hba;
1213	struct Scsi_Host *sh;
1214	char *ptr;
1215	int ret, off = 0;
1216
1217	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1218	if (!tl_hba) {
1219		pr_err("Unable to allocate struct tcm_loop_hba\n");
1220		return ERR_PTR(-ENOMEM);
1221	}
1222	/*
1223	 * Determine the emulated Protocol Identifier and Target Port Name
1224	 * based on the incoming configfs directory name.
1225	 */
1226	ptr = strstr(name, "naa.");
1227	if (ptr) {
1228		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1229		goto check_len;
1230	}
1231	ptr = strstr(name, "fc.");
1232	if (ptr) {
1233		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1234		off = 3; /* Skip over "fc." */
1235		goto check_len;
1236	}
1237	ptr = strstr(name, "iqn.");
1238	if (!ptr) {
1239		pr_err("Unable to locate prefix for emulated Target "
1240				"Port: %s\n", name);
1241		ret = -EINVAL;
1242		goto out;
1243	}
1244	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1245
1246check_len:
1247	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1248		pr_err("Emulated NAA %s Address: %s, exceeds"
1249			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1250			TL_WWN_ADDR_LEN);
1251		ret = -EINVAL;
1252		goto out;
1253	}
1254	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1255
1256	/*
1257	 * Call device_register(tl_hba->dev) to register the emulated
1258	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1259	 * device_register() callbacks in tcm_loop_driver_probe()
1260	 */
1261	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1262	if (ret)
1263		goto out;
1264
1265	sh = tl_hba->sh;
1266	tcm_loop_hba_no_cnt++;
1267	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1268		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1269		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1270
1271	return &tl_hba->tl_hba_wwn;
1272out:
1273	kfree(tl_hba);
1274	return ERR_PTR(ret);
1275}
1276
1277void tcm_loop_drop_scsi_hba(
1278	struct se_wwn *wwn)
1279{
1280	struct tcm_loop_hba *tl_hba = container_of(wwn,
1281				struct tcm_loop_hba, tl_hba_wwn);
1282
1283	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1284		" SAS Address: %s at Linux/SCSI Host ID: %d\n",
1285		tl_hba->tl_wwn_address, tl_hba->sh->host_no);
 
1286	/*
1287	 * Call device_unregister() on the original tl_hba->dev.
1288	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1289	 * release *tl_hba;
1290	 */
1291	device_unregister(&tl_hba->dev);
1292}
1293
1294/* Start items for tcm_loop_cit */
1295static ssize_t tcm_loop_wwn_show_attr_version(
1296	struct target_fabric_configfs *tf,
1297	char *page)
1298{
1299	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1300}
1301
1302TF_WWN_ATTR_RO(tcm_loop, version);
1303
1304static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1305	&tcm_loop_wwn_version.attr,
1306	NULL,
1307};
1308
1309/* End items for tcm_loop_cit */
1310
1311static int tcm_loop_register_configfs(void)
1312{
1313	struct target_fabric_configfs *fabric;
1314	int ret;
1315	/*
1316	 * Set the TCM Loop HBA counter to zero
1317	 */
1318	tcm_loop_hba_no_cnt = 0;
1319	/*
1320	 * Register the top level struct config_item_type with TCM core
1321	 */
1322	fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
1323	if (IS_ERR(fabric)) {
1324		pr_err("tcm_loop_register_configfs() failed!\n");
1325		return PTR_ERR(fabric);
1326	}
1327	/*
1328	 * Setup the fabric API of function pointers used by target_core_mod
1329	 */
1330	fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
1331	fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
1332	fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
1333	fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
1334	fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
1335	fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
1336	fabric->tf_ops.tpg_get_pr_transport_id_len =
1337					&tcm_loop_get_pr_transport_id_len;
1338	fabric->tf_ops.tpg_parse_pr_out_transport_id =
1339					&tcm_loop_parse_pr_out_transport_id;
1340	fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
1341	fabric->tf_ops.tpg_check_demo_mode_cache =
1342					&tcm_loop_check_demo_mode_cache;
1343	fabric->tf_ops.tpg_check_demo_mode_write_protect =
1344					&tcm_loop_check_demo_mode_write_protect;
1345	fabric->tf_ops.tpg_check_prod_mode_write_protect =
1346					&tcm_loop_check_prod_mode_write_protect;
1347	/*
1348	 * The TCM loopback fabric module runs in demo-mode to a local
1349	 * virtual SCSI device, so fabric dependent initator ACLs are
1350	 * not required.
1351	 */
1352	fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
1353	fabric->tf_ops.tpg_release_fabric_acl =
1354					&tcm_loop_tpg_release_fabric_acl;
1355	fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
1356	/*
1357	 * Used for setting up remaining TCM resources in process context
1358	 */
1359	fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
1360	fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
1361	fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
1362	fabric->tf_ops.close_session = &tcm_loop_close_session;
1363	fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
1364	fabric->tf_ops.sess_get_initiator_sid = NULL;
1365	fabric->tf_ops.write_pending = &tcm_loop_write_pending;
1366	fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
1367	/*
1368	 * Not used for TCM loopback
1369	 */
1370	fabric->tf_ops.set_default_node_attributes =
1371					&tcm_loop_set_default_node_attributes;
1372	fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
1373	fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
1374	fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1375	fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1376	fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1377	fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
1378	fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
1379
1380	/*
1381	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1382	 */
1383	fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
1384	fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
1385	fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
1386	fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
1387	/*
1388	 * fabric_post_link() and fabric_pre_unlink() are used for
1389	 * registration and release of TCM Loop Virtual SCSI LUNs.
1390	 */
1391	fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
1392	fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
1393	fabric->tf_ops.fabric_make_np = NULL;
1394	fabric->tf_ops.fabric_drop_np = NULL;
1395	/*
1396	 * Setup default attribute lists for various fabric->tf_cit_tmpl
1397	 */
1398	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
1399	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
1400	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1401	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1402	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1403	/*
1404	 * Once fabric->tf_ops has been setup, now register the fabric for
1405	 * use within TCM
1406	 */
1407	ret = target_fabric_configfs_register(fabric);
1408	if (ret < 0) {
1409		pr_err("target_fabric_configfs_register() for"
1410				" TCM_Loop failed!\n");
1411		target_fabric_configfs_free(fabric);
1412		return -1;
1413	}
1414	/*
1415	 * Setup our local pointer to *fabric.
1416	 */
1417	tcm_loop_fabric_configfs = fabric;
1418	pr_debug("TCM_LOOP[0] - Set fabric ->"
1419			" tcm_loop_fabric_configfs\n");
1420	return 0;
1421}
1422
1423static void tcm_loop_deregister_configfs(void)
1424{
1425	if (!tcm_loop_fabric_configfs)
1426		return;
1427
1428	target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
1429	tcm_loop_fabric_configfs = NULL;
1430	pr_debug("TCM_LOOP[0] - Cleared"
1431				" tcm_loop_fabric_configfs\n");
1432}
1433
1434static int __init tcm_loop_fabric_init(void)
1435{
1436	int ret = -ENOMEM;
1437
1438	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1439	if (!tcm_loop_workqueue)
1440		goto out;
1441
1442	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1443				sizeof(struct tcm_loop_cmd),
1444				__alignof__(struct tcm_loop_cmd),
1445				0, NULL);
1446	if (!tcm_loop_cmd_cache) {
1447		pr_debug("kmem_cache_create() for"
1448			" tcm_loop_cmd_cache failed\n");
1449		goto out_destroy_workqueue;
1450	}
1451
1452	ret = tcm_loop_alloc_core_bus();
1453	if (ret)
1454		goto out_destroy_cache;
1455
1456	ret = tcm_loop_register_configfs();
1457	if (ret)
1458		goto out_release_core_bus;
1459
1460	return 0;
1461
1462out_release_core_bus:
1463	tcm_loop_release_core_bus();
1464out_destroy_cache:
1465	kmem_cache_destroy(tcm_loop_cmd_cache);
1466out_destroy_workqueue:
1467	destroy_workqueue(tcm_loop_workqueue);
1468out:
1469	return ret;
1470}
1471
1472static void __exit tcm_loop_fabric_exit(void)
1473{
1474	tcm_loop_deregister_configfs();
1475	tcm_loop_release_core_bus();
1476	kmem_cache_destroy(tcm_loop_cmd_cache);
1477	destroy_workqueue(tcm_loop_workqueue);
1478}
1479
1480MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1481MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1482MODULE_LICENSE("GPL");
1483module_init(tcm_loop_fabric_init);
1484module_exit(tcm_loop_fabric_exit);