Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   4 *
   5 * This file contains the TCM Virtual Device and Disk Transport
   6 * agnostic related functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 ******************************************************************************/
  13
  14#include <linux/net.h>
  15#include <linux/string.h>
  16#include <linux/delay.h>
  17#include <linux/timer.h>
  18#include <linux/slab.h>
  19#include <linux/spinlock.h>
  20#include <linux/kthread.h>
  21#include <linux/in.h>
  22#include <linux/export.h>
  23#include <linux/t10-pi.h>
  24#include <asm/unaligned.h>
  25#include <net/sock.h>
  26#include <net/tcp.h>
  27#include <scsi/scsi_common.h>
  28#include <scsi/scsi_proto.h>
  29
  30#include <target/target_core_base.h>
  31#include <target/target_core_backend.h>
  32#include <target/target_core_fabric.h>
  33
  34#include "target_core_internal.h"
  35#include "target_core_alua.h"
  36#include "target_core_pr.h"
  37#include "target_core_ua.h"
  38
  39static DEFINE_MUTEX(device_mutex);
  40static LIST_HEAD(device_list);
  41static DEFINE_IDR(devices_idr);
  42
  43static struct se_hba *lun0_hba;
  44/* not static, needed by tpg.c */
  45struct se_device *g_lun0_dev;
  46
  47sense_reason_t
  48transport_lookup_cmd_lun(struct se_cmd *se_cmd)
  49{
  50	struct se_lun *se_lun = NULL;
  51	struct se_session *se_sess = se_cmd->se_sess;
  52	struct se_node_acl *nacl = se_sess->se_node_acl;
  53	struct se_dev_entry *deve;
  54	sense_reason_t ret = TCM_NO_SENSE;
  55
  56	rcu_read_lock();
  57	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
  58	if (deve) {
  59		atomic_long_inc(&deve->total_cmds);
  60
  61		if (se_cmd->data_direction == DMA_TO_DEVICE)
  62			atomic_long_add(se_cmd->data_length,
  63					&deve->write_bytes);
  64		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  65			atomic_long_add(se_cmd->data_length,
  66					&deve->read_bytes);
  67
  68		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  69		    deve->lun_access_ro) {
  70			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  71				" Access for 0x%08llx\n",
  72				se_cmd->se_tfo->fabric_name,
  73				se_cmd->orig_fe_lun);
  74			rcu_read_unlock();
  75			return TCM_WRITE_PROTECTED;
  76		}
  77
  78		se_lun = deve->se_lun;
  79
  80		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  81			se_lun = NULL;
  82			goto out_unlock;
  83		}
  84
  85		se_cmd->se_lun = se_lun;
 
  86		se_cmd->pr_res_key = deve->pr_res_key;
 
  87		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 
 
  88		se_cmd->lun_ref_active = true;
  89	}
  90out_unlock:
  91	rcu_read_unlock();
  92
  93	if (!se_lun) {
  94		/*
  95		 * Use the se_portal_group->tpg_virt_lun0 to allow for
  96		 * REPORT_LUNS, et al to be returned when no active
  97		 * MappedLUN=0 exists for this Initiator Port.
  98		 */
  99		if (se_cmd->orig_fe_lun != 0) {
 100			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 101				" Access for 0x%08llx from %s\n",
 102				se_cmd->se_tfo->fabric_name,
 103				se_cmd->orig_fe_lun,
 104				nacl->initiatorname);
 105			return TCM_NON_EXISTENT_LUN;
 106		}
 107
 108		/*
 109		 * Force WRITE PROTECT for virtual LUN 0
 110		 */
 111		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 112		    (se_cmd->data_direction != DMA_NONE))
 113			return TCM_WRITE_PROTECTED;
 114
 115		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 116		if (!percpu_ref_tryget_live(&se_lun->lun_ref))
 117			return TCM_NON_EXISTENT_LUN;
 118
 119		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
 120		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 
 
 121		se_cmd->lun_ref_active = true;
 122	}
 123	/*
 124	 * RCU reference protected by percpu se_lun->lun_ref taken above that
 125	 * must drop to zero (including initial reference) before this se_lun
 126	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 127	 * target_core_fabric_configfs.c:target_fabric_port_release
 128	 */
 129	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 130	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 131
 
 
 
 
 
 132	if (se_cmd->data_direction == DMA_TO_DEVICE)
 133		atomic_long_add(se_cmd->data_length,
 134				&se_cmd->se_dev->write_bytes);
 135	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 136		atomic_long_add(se_cmd->data_length,
 137				&se_cmd->se_dev->read_bytes);
 138
 139	return ret;
 140}
 141EXPORT_SYMBOL(transport_lookup_cmd_lun);
 142
 143int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
 144{
 145	struct se_dev_entry *deve;
 146	struct se_lun *se_lun = NULL;
 147	struct se_session *se_sess = se_cmd->se_sess;
 148	struct se_node_acl *nacl = se_sess->se_node_acl;
 149	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 150	unsigned long flags;
 151
 152	rcu_read_lock();
 153	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
 154	if (deve) {
 155		se_lun = deve->se_lun;
 156
 157		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
 158			se_lun = NULL;
 159			goto out_unlock;
 160		}
 161
 162		se_cmd->se_lun = se_lun;
 
 
 
 
 
 
 
 163		se_cmd->pr_res_key = deve->pr_res_key;
 164		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 165		se_cmd->lun_ref_active = true;
 166	}
 167out_unlock:
 168	rcu_read_unlock();
 169
 170	if (!se_lun) {
 171		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 172			" Access for 0x%08llx for %s\n",
 173			se_cmd->se_tfo->fabric_name,
 174			se_cmd->orig_fe_lun,
 175			nacl->initiatorname);
 176		return -ENODEV;
 177	}
 178	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 179	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 
 
 180
 181	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 182	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 183	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 184
 185	return 0;
 186}
 187EXPORT_SYMBOL(transport_lookup_tmr_lun);
 188
 189bool target_lun_is_rdonly(struct se_cmd *cmd)
 190{
 191	struct se_session *se_sess = cmd->se_sess;
 192	struct se_dev_entry *deve;
 193	bool ret;
 194
 195	rcu_read_lock();
 196	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 197	ret = deve && deve->lun_access_ro;
 198	rcu_read_unlock();
 199
 200	return ret;
 201}
 202EXPORT_SYMBOL(target_lun_is_rdonly);
 203
 204/*
 205 * This function is called from core_scsi3_emulate_pro_register_and_move()
 206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 207 * when a matching rtpi is found.
 208 */
 209struct se_dev_entry *core_get_se_deve_from_rtpi(
 210	struct se_node_acl *nacl,
 211	u16 rtpi)
 212{
 213	struct se_dev_entry *deve;
 214	struct se_lun *lun;
 
 215	struct se_portal_group *tpg = nacl->se_tpg;
 
 
 
 
 
 
 
 
 216
 217	rcu_read_lock();
 218	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 219		lun = deve->se_lun;
 220		if (!lun) {
 221			pr_err("%s device entries device pointer is"
 222				" NULL, but Initiator has access.\n",
 223				tpg->se_tpg_tfo->fabric_name);
 
 
 
 
 
 
 
 224			continue;
 225		}
 226		if (lun->lun_rtpi != rtpi)
 227			continue;
 228
 229		kref_get(&deve->pr_kref);
 230		rcu_read_unlock();
 
 231
 232		return deve;
 233	}
 234	rcu_read_unlock();
 235
 236	return NULL;
 237}
 238
 239void core_free_device_list_for_node(
 240	struct se_node_acl *nacl,
 241	struct se_portal_group *tpg)
 242{
 243	struct se_dev_entry *deve;
 
 
 244
 245	mutex_lock(&nacl->lun_entry_mutex);
 246	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 247		core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
 248	mutex_unlock(&nacl->lun_entry_mutex);
 249}
 250
 251void core_update_device_list_access(
 252	u64 mapped_lun,
 253	bool lun_access_ro,
 254	struct se_node_acl *nacl)
 255{
 256	struct se_dev_entry *deve;
 257
 258	mutex_lock(&nacl->lun_entry_mutex);
 259	deve = target_nacl_find_deve(nacl, mapped_lun);
 260	if (deve)
 261		deve->lun_access_ro = lun_access_ro;
 262	mutex_unlock(&nacl->lun_entry_mutex);
 263}
 264
 265/*
 266 * Called with rcu_read_lock or nacl->device_list_lock held.
 267 */
 268struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
 269{
 270	struct se_dev_entry *deve;
 
 271
 272	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 273		if (deve->mapped_lun == mapped_lun)
 274			return deve;
 
 
 
 275
 276	return NULL;
 277}
 278EXPORT_SYMBOL(target_nacl_find_deve);
 279
 280void target_pr_kref_release(struct kref *kref)
 281{
 282	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
 283						 pr_kref);
 284	complete(&deve->pr_comp);
 285}
 286
 287/*
 288 * Establish UA condition on SCSI device - all LUNs
 289 */
 290void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
 291{
 292	struct se_dev_entry *se_deve;
 293	struct se_lun *lun;
 294
 295	spin_lock(&dev->se_port_lock);
 296	list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
 297
 298		spin_lock(&lun->lun_deve_lock);
 299		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
 300			core_scsi3_ua_allocate(se_deve, asc, ascq);
 301		spin_unlock(&lun->lun_deve_lock);
 302	}
 303	spin_unlock(&dev->se_port_lock);
 304}
 305
 306static void
 307target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
 308			     bool skip_new)
 309{
 310	struct se_dev_entry *tmp;
 311
 312	rcu_read_lock();
 313	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
 314		if (skip_new && tmp == new)
 315			continue;
 316		core_scsi3_ua_allocate(tmp, 0x3F,
 317				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
 318	}
 319	rcu_read_unlock();
 320}
 321
 
 
 
 
 322int core_enable_device_list_for_node(
 323	struct se_lun *lun,
 324	struct se_lun_acl *lun_acl,
 325	u64 mapped_lun,
 326	bool lun_access_ro,
 327	struct se_node_acl *nacl,
 328	struct se_portal_group *tpg)
 329{
 330	struct se_dev_entry *orig, *new;
 
 
 
 
 
 331
 332	new = kzalloc(sizeof(*new), GFP_KERNEL);
 333	if (!new) {
 334		pr_err("Unable to allocate se_dev_entry memory\n");
 335		return -ENOMEM;
 336	}
 337
 338	spin_lock_init(&new->ua_lock);
 339	INIT_LIST_HEAD(&new->ua_list);
 340	INIT_LIST_HEAD(&new->lun_link);
 341
 342	new->mapped_lun = mapped_lun;
 343	kref_init(&new->pr_kref);
 344	init_completion(&new->pr_comp);
 345
 346	new->lun_access_ro = lun_access_ro;
 347	new->creation_time = get_jiffies_64();
 348	new->attach_count++;
 349
 350	mutex_lock(&nacl->lun_entry_mutex);
 351	orig = target_nacl_find_deve(nacl, mapped_lun);
 352	if (orig && orig->se_lun) {
 353		struct se_lun *orig_lun = orig->se_lun;
 354
 355		if (orig_lun != lun) {
 356			pr_err("Existing orig->se_lun doesn't match new lun"
 357			       " for dynamic -> explicit NodeACL conversion:"
 358				" %s\n", nacl->initiatorname);
 359			mutex_unlock(&nacl->lun_entry_mutex);
 360			kfree(new);
 361			return -EINVAL;
 362		}
 363		if (orig->se_lun_acl != NULL) {
 364			pr_warn_ratelimited("Detected existing explicit"
 365				" se_lun_acl->se_lun_group reference for %s"
 366				" mapped_lun: %llu, failing\n",
 367				 nacl->initiatorname, mapped_lun);
 368			mutex_unlock(&nacl->lun_entry_mutex);
 369			kfree(new);
 370			return -EINVAL;
 371		}
 
 372
 373		new->se_lun = lun;
 374		new->se_lun_acl = lun_acl;
 375		hlist_del_rcu(&orig->link);
 376		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 377		mutex_unlock(&nacl->lun_entry_mutex);
 378
 379		spin_lock(&lun->lun_deve_lock);
 380		list_del(&orig->lun_link);
 381		list_add_tail(&new->lun_link, &lun->lun_deve_list);
 382		spin_unlock(&lun->lun_deve_lock);
 383
 384		kref_put(&orig->pr_kref, target_pr_kref_release);
 385		wait_for_completion(&orig->pr_comp);
 386
 387		target_luns_data_has_changed(nacl, new, true);
 388		kfree_rcu(orig, rcu_head);
 389		return 0;
 390	}
 391
 392	new->se_lun = lun;
 393	new->se_lun_acl = lun_acl;
 394	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 395	mutex_unlock(&nacl->lun_entry_mutex);
 396
 397	spin_lock(&lun->lun_deve_lock);
 398	list_add_tail(&new->lun_link, &lun->lun_deve_list);
 399	spin_unlock(&lun->lun_deve_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 400
 401	target_luns_data_has_changed(nacl, new, true);
 402	return 0;
 403}
 404
 405void core_disable_device_list_for_node(
 
 
 
 
 406	struct se_lun *lun,
 407	struct se_dev_entry *orig,
 
 
 408	struct se_node_acl *nacl,
 409	struct se_portal_group *tpg)
 410{
 411	/*
 412	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 413	 * reference to se_device->dev_group.
 414	 */
 415	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 416
 417	lockdep_assert_held(&nacl->lun_entry_mutex);
 418
 419	/*
 420	 * If the MappedLUN entry is being disabled, the entry in
 421	 * lun->lun_deve_list must be removed now before clearing the
 422	 * struct se_dev_entry pointers below as logic in
 423	 * core_alua_do_transition_tg_pt() depends on these being present.
 424	 *
 425	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 426	 * that have not been explicitly converted to MappedLUNs ->
 427	 * struct se_lun_acl, but we remove deve->lun_link from
 428	 * lun->lun_deve_list. This also means that active UAs and
 429	 * NodeACL context specific PR metadata for demo-mode
 430	 * MappedLUN *deve will be released below..
 431	 */
 432	spin_lock(&lun->lun_deve_lock);
 433	list_del(&orig->lun_link);
 434	spin_unlock(&lun->lun_deve_lock);
 435	/*
 436	 * Disable struct se_dev_entry LUN ACL mapping
 
 437	 */
 438	core_scsi3_ua_release_all(orig);
 
 439
 440	hlist_del_rcu(&orig->link);
 441	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 442	orig->lun_access_ro = false;
 443	orig->creation_time = 0;
 444	orig->attach_count--;
 445	/*
 446	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
 447	 * or REGISTER_AND_MOVE PR operation to complete.
 448	 */
 449	kref_put(&orig->pr_kref, target_pr_kref_release);
 450	wait_for_completion(&orig->pr_comp);
 
 
 
 
 
 451
 452	kfree_rcu(orig, rcu_head);
 453
 454	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 455	target_luns_data_has_changed(nacl, NULL, false);
 456}
 457
 458/*      core_clear_lun_from_tpg():
 459 *
 460 *
 461 */
 462void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 463{
 464	struct se_node_acl *nacl;
 465	struct se_dev_entry *deve;
 
 466
 467	mutex_lock(&tpg->acl_node_mutex);
 468	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 
 469
 470		mutex_lock(&nacl->lun_entry_mutex);
 471		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 
 472			if (lun != deve->se_lun)
 473				continue;
 
 474
 475			core_disable_device_list_for_node(lun, deve, nacl, tpg);
 
 
 
 
 476		}
 477		mutex_unlock(&nacl->lun_entry_mutex);
 
 
 478	}
 479	mutex_unlock(&tpg->acl_node_mutex);
 480}
 481
 482int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
 483{
 484	struct se_lun *tmp;
 
 
 
 
 
 
 
 
 
 
 
 485
 486	spin_lock(&dev->se_port_lock);
 487	if (dev->export_count == 0x0000ffff) {
 488		pr_warn("Reached dev->dev_port_count =="
 489				" 0x0000ffff\n");
 490		spin_unlock(&dev->se_port_lock);
 491		return -ENOSPC;
 492	}
 493again:
 494	/*
 495	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
 496	 * Here is the table from spc4r17 section 7.7.3.8.
 497	 *
 498	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 499	 *
 500	 * Code      Description
 501	 * 0h        Reserved
 502	 * 1h        Relative port 1, historically known as port A
 503	 * 2h        Relative port 2, historically known as port B
 504	 * 3h to FFFFh    Relative port 3 through 65 535
 505	 */
 506	lun->lun_rtpi = dev->dev_rpti_counter++;
 507	if (!lun->lun_rtpi)
 508		goto again;
 509
 510	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
 511		/*
 512		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
 513		 * for 16-bit wrap..
 514		 */
 515		if (lun->lun_rtpi == tmp->lun_rtpi)
 516			goto again;
 517	}
 518	spin_unlock(&dev->se_port_lock);
 519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520	return 0;
 521}
 522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523static void se_release_vpd_for_dev(struct se_device *dev)
 524{
 525	struct t10_vpd *vpd, *vpd_tmp;
 526
 527	spin_lock(&dev->t10_wwn.t10_vpd_lock);
 528	list_for_each_entry_safe(vpd, vpd_tmp,
 529			&dev->t10_wwn.t10_vpd_list, vpd_list) {
 530		list_del(&vpd->vpd_list);
 531		kfree(vpd);
 532	}
 533	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 534}
 535
 536static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 537{
 538	u32 aligned_max_sectors;
 539	u32 alignment;
 540	/*
 541	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
 542	 * transport_allocate_data_tasks() operation.
 543	 */
 544	alignment = max(1ul, PAGE_SIZE / block_size);
 545	aligned_max_sectors = rounddown(max_sectors, alignment);
 546
 547	if (max_sectors != aligned_max_sectors)
 548		pr_info("Rounding down aligned max_sectors from %u to %u\n",
 549			max_sectors, aligned_max_sectors);
 550
 551	return aligned_max_sectors;
 552}
 553
 554int core_dev_add_lun(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555	struct se_portal_group *tpg,
 556	struct se_device *dev,
 557	struct se_lun *lun)
 558{
 
 559	int rc;
 560
 561	rc = core_tpg_add_lun(tpg, lun, false, dev);
 
 
 
 
 
 562	if (rc < 0)
 563		return rc;
 564
 565	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
 566		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
 567		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 568		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
 569	/*
 570	 * Update LUN maps for dynamically added initiators when
 571	 * generate_node_acl is enabled.
 572	 */
 573	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
 574		struct se_node_acl *acl;
 575
 576		mutex_lock(&tpg->acl_node_mutex);
 577		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 578			if (acl->dynamic_node_acl &&
 579			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
 580			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
 581				core_tpg_add_node_to_devs(acl, tpg, lun);
 
 
 582			}
 583		}
 584		mutex_unlock(&tpg->acl_node_mutex);
 585	}
 586
 587	return 0;
 588}
 589
 590/*      core_dev_del_lun():
 591 *
 592 *
 593 */
 594void core_dev_del_lun(
 595	struct se_portal_group *tpg,
 596	struct se_lun *lun)
 597{
 598	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
 599		" device object\n", tpg->se_tpg_tfo->fabric_name,
 600		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 601		tpg->se_tpg_tfo->fabric_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 602
 603	core_tpg_remove_lun(tpg, lun);
 604}
 605
 606struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 607	struct se_portal_group *tpg,
 608	struct se_node_acl *nacl,
 609	u64 mapped_lun,
 610	int *ret)
 611{
 612	struct se_lun_acl *lacl;
 613
 614	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
 615		pr_err("%s InitiatorName exceeds maximum size.\n",
 616			tpg->se_tpg_tfo->fabric_name);
 617		*ret = -EOVERFLOW;
 618		return NULL;
 619	}
 620	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
 621	if (!lacl) {
 622		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 623		*ret = -ENOMEM;
 624		return NULL;
 625	}
 626
 
 627	lacl->mapped_lun = mapped_lun;
 628	lacl->se_lun_nacl = nacl;
 
 
 629
 630	return lacl;
 631}
 632
 633int core_dev_add_initiator_node_lun_acl(
 634	struct se_portal_group *tpg,
 635	struct se_lun_acl *lacl,
 636	struct se_lun *lun,
 637	bool lun_access_ro)
 638{
 639	struct se_node_acl *nacl = lacl->se_lun_nacl;
 640	/*
 641	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 642	 * reference to se_device->dev_group.
 643	 */
 644	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
 
 
 
 
 645
 
 646	if (!nacl)
 647		return -EINVAL;
 648
 649	if (lun->lun_access_ro)
 650		lun_access_ro = true;
 
 651
 652	lacl->se_lun = lun;
 653
 654	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 655			lun_access_ro, nacl, tpg) < 0)
 656		return -EINVAL;
 657
 658	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 659		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
 660		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 661		lun_access_ro ? "RO" : "RW",
 662		nacl->initiatorname);
 
 
 
 
 
 
 663	/*
 664	 * Check to see if there are any existing persistent reservation APTPL
 665	 * pre-registrations that need to be enabled for this LUN ACL..
 666	 */
 667	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
 668					    lacl->mapped_lun);
 669	return 0;
 670}
 671
 
 
 
 
 672int core_dev_del_initiator_node_lun_acl(
 
 673	struct se_lun *lun,
 674	struct se_lun_acl *lacl)
 675{
 676	struct se_portal_group *tpg = lun->lun_tpg;
 677	struct se_node_acl *nacl;
 678	struct se_dev_entry *deve;
 679
 680	nacl = lacl->se_lun_nacl;
 681	if (!nacl)
 682		return -EINVAL;
 683
 684	mutex_lock(&nacl->lun_entry_mutex);
 685	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
 686	if (deve)
 687		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 688	mutex_unlock(&nacl->lun_entry_mutex);
 689
 690	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
 691		" InitiatorNode: %s Mapped LUN: %llu\n",
 692		tpg->se_tpg_tfo->fabric_name,
 
 
 
 
 
 693		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 694		nacl->initiatorname, lacl->mapped_lun);
 695
 696	return 0;
 697}
 698
 699void core_dev_free_initiator_node_lun_acl(
 700	struct se_portal_group *tpg,
 701	struct se_lun_acl *lacl)
 702{
 703	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 704		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
 705		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 706		tpg->se_tpg_tfo->fabric_name,
 707		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 708
 709	kfree(lacl);
 710}
 711
 712static void scsi_dump_inquiry(struct se_device *dev)
 713{
 714	struct t10_wwn *wwn = &dev->t10_wwn;
 715	int device_type = dev->transport->get_device_type(dev);
 716
 717	/*
 718	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 719	 */
 720	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
 721		wwn->vendor);
 722	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
 723		wwn->model);
 724	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
 725		wwn->revision);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726	pr_debug("  Type:   %s ", scsi_device_type(device_type));
 727}
 728
 729struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 730{
 731	struct se_device *dev;
 732	struct se_lun *xcopy_lun;
 733	int i;
 734
 735	dev = hba->backend->ops->alloc_device(hba, name);
 736	if (!dev)
 737		return NULL;
 738
 739	dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
 740	if (!dev->queues) {
 741		dev->transport->free_device(dev);
 742		return NULL;
 743	}
 744
 745	dev->queue_cnt = nr_cpu_ids;
 746	for (i = 0; i < dev->queue_cnt; i++) {
 747		struct se_device_queue *q;
 748
 749		q = &dev->queues[i];
 750		INIT_LIST_HEAD(&q->state_list);
 751		spin_lock_init(&q->lock);
 752
 753		init_llist_head(&q->sq.cmd_list);
 754		INIT_WORK(&q->sq.work, target_queued_submit_work);
 755	}
 756
 757	dev->se_hba = hba;
 758	dev->transport = hba->backend->ops;
 759	dev->transport_flags = dev->transport->transport_flags_default;
 760	dev->prot_length = sizeof(struct t10_pi_tuple);
 761	dev->hba_index = hba->hba_index;
 762
 
 763	INIT_LIST_HEAD(&dev->dev_sep_list);
 764	INIT_LIST_HEAD(&dev->dev_tmr_list);
 765	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 
 766	INIT_LIST_HEAD(&dev->qf_cmd_list);
 
 
 767	spin_lock_init(&dev->delayed_cmd_lock);
 768	spin_lock_init(&dev->dev_reservation_lock);
 769	spin_lock_init(&dev->se_port_lock);
 770	spin_lock_init(&dev->se_tmr_lock);
 771	spin_lock_init(&dev->qf_cmd_lock);
 772	sema_init(&dev->caw_sem, 1);
 
 773	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 774	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 775	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
 776	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
 777	spin_lock_init(&dev->t10_pr.registration_lock);
 778	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 779	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 780	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
 781	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
 782	spin_lock_init(&dev->t10_alua.lba_map_lock);
 783
 784	INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
 785
 786	dev->t10_wwn.t10_dev = dev;
 787	/*
 788	 * Use OpenFabrics IEEE Company ID: 00 14 05
 789	 */
 790	dev->t10_wwn.company_id = 0x001405;
 791
 792	dev->t10_alua.t10_dev = dev;
 793
 794	dev->dev_attrib.da_dev = dev;
 795	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
 796	dev->dev_attrib.emulate_dpo = 1;
 797	dev->dev_attrib.emulate_fua_write = 1;
 798	dev->dev_attrib.emulate_fua_read = 1;
 799	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 800	dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
 801	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
 802	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 803	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 804	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 805	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 806	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
 807	dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
 808	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 809	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 810	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
 811	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 812	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 813	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 814	dev->dev_attrib.max_unmap_block_desc_count =
 815		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 816	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 817	dev->dev_attrib.unmap_granularity_alignment =
 818				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 819	dev->dev_attrib.unmap_zeroes_data =
 820				DA_UNMAP_ZEROES_DATA_DEFAULT;
 821	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 
 
 822
 823	xcopy_lun = &dev->xcopy_lun;
 824	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
 825	init_completion(&xcopy_lun->lun_shutdown_comp);
 826	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
 827	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
 828	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
 829	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 830
 831	/* Preload the default INQUIRY const values */
 832	strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
 833	strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
 834		sizeof(dev->t10_wwn.model));
 835	strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
 836		sizeof(dev->t10_wwn.revision));
 837
 838	return dev;
 839}
 840
 841/*
 842 * Check if the underlying struct block_device supports discard and if yes
 843 * configure the UNMAP parameters.
 844 */
 845bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
 846				       struct block_device *bdev)
 847{
 848	int block_size = bdev_logical_block_size(bdev);
 849
 850	if (!bdev_max_discard_sectors(bdev))
 851		return false;
 852
 853	attrib->max_unmap_lba_count =
 854		bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
 855	/*
 856	 * Currently hardcoded to 1 in Linux/SCSI code..
 857	 */
 858	attrib->max_unmap_block_desc_count = 1;
 859	attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
 860	attrib->unmap_granularity_alignment =
 861		bdev_discard_alignment(bdev) / block_size;
 862	return true;
 863}
 864EXPORT_SYMBOL(target_configure_unmap_from_queue);
 865
 866/*
 867 * Convert from blocksize advertised to the initiator to the 512 byte
 868 * units unconditionally used by the Linux block layer.
 869 */
 870sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
 871{
 872	switch (dev->dev_attrib.block_size) {
 873	case 4096:
 874		return lb << 3;
 875	case 2048:
 876		return lb << 2;
 877	case 1024:
 878		return lb << 1;
 879	default:
 880		return lb;
 881	}
 882}
 883EXPORT_SYMBOL(target_to_linux_sector);
 884
 885struct devices_idr_iter {
 886	struct config_item *prev_item;
 887	int (*fn)(struct se_device *dev, void *data);
 888	void *data;
 889};
 890
 891static int target_devices_idr_iter(int id, void *p, void *data)
 892	 __must_hold(&device_mutex)
 893{
 894	struct devices_idr_iter *iter = data;
 895	struct se_device *dev = p;
 896	int ret;
 897
 898	config_item_put(iter->prev_item);
 899	iter->prev_item = NULL;
 900
 901	/*
 902	 * We add the device early to the idr, so it can be used
 903	 * by backend modules during configuration. We do not want
 904	 * to allow other callers to access partially setup devices,
 905	 * so we skip them here.
 906	 */
 907	if (!target_dev_configured(dev))
 908		return 0;
 909
 910	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
 911	if (!iter->prev_item)
 912		return 0;
 913	mutex_unlock(&device_mutex);
 914
 915	ret = iter->fn(dev, iter->data);
 916
 917	mutex_lock(&device_mutex);
 918	return ret;
 919}
 920
 921/**
 922 * target_for_each_device - iterate over configured devices
 923 * @fn: iterator function
 924 * @data: pointer to data that will be passed to fn
 925 *
 926 * fn must return 0 to continue looping over devices. non-zero will break
 927 * from the loop and return that value to the caller.
 928 */
 929int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
 930			   void *data)
 931{
 932	struct devices_idr_iter iter = { .fn = fn, .data = data };
 933	int ret;
 934
 935	mutex_lock(&device_mutex);
 936	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
 937	mutex_unlock(&device_mutex);
 938	config_item_put(iter.prev_item);
 939	return ret;
 940}
 941
 942int target_configure_device(struct se_device *dev)
 943{
 944	struct se_hba *hba = dev->se_hba;
 945	int ret, id;
 946
 947	if (target_dev_configured(dev)) {
 948		pr_err("se_dev->se_dev_ptr already set for storage"
 949				" object\n");
 950		return -EEXIST;
 951	}
 952
 953	/*
 954	 * Add early so modules like tcmu can use during its
 955	 * configuration.
 956	 */
 957	mutex_lock(&device_mutex);
 958	/*
 959	 * Use cyclic to try and avoid collisions with devices
 960	 * that were recently removed.
 961	 */
 962	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
 963	mutex_unlock(&device_mutex);
 964	if (id < 0) {
 965		ret = -ENOMEM;
 966		goto out;
 967	}
 968	dev->dev_index = id;
 969
 970	ret = dev->transport->configure_device(dev);
 971	if (ret)
 972		goto out_free_index;
 973
 974	if (dev->transport->configure_unmap &&
 975	    dev->transport->configure_unmap(dev)) {
 976		pr_debug("Discard support available, but disabled by default.\n");
 977	}
 978
 979	/*
 980	 * XXX: there is not much point to have two different values here..
 981	 */
 982	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
 983	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
 984
 985	/*
 986	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 987	 */
 988	dev->dev_attrib.hw_max_sectors =
 989		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
 990					 dev->dev_attrib.hw_block_size);
 991	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 992
 
 993	dev->creation_time = get_jiffies_64();
 994
 995	ret = core_setup_alua(dev);
 996	if (ret)
 997		goto out_destroy_device;
 
 
 
 
 
 
 
 
 
 
 
 
 998
 999	/*
1000	 * Setup work_queue for QUEUE_FULL
1001	 */
1002	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1003
 
 
 
 
 
 
 
 
 
 
 
 
 
1004	scsi_dump_inquiry(dev);
1005
1006	spin_lock(&hba->device_lock);
1007	hba->dev_count++;
1008	spin_unlock(&hba->device_lock);
1009
1010	dev->dev_flags |= DF_CONFIGURED;
 
 
1011
1012	return 0;
1013
1014out_destroy_device:
1015	dev->transport->destroy_device(dev);
1016out_free_index:
1017	mutex_lock(&device_mutex);
1018	idr_remove(&devices_idr, dev->dev_index);
1019	mutex_unlock(&device_mutex);
1020out:
1021	se_release_vpd_for_dev(dev);
1022	return ret;
1023}
1024
1025void target_free_device(struct se_device *dev)
1026{
1027	struct se_hba *hba = dev->se_hba;
1028
1029	WARN_ON(!list_empty(&dev->dev_sep_list));
1030
1031	if (target_dev_configured(dev)) {
1032		dev->transport->destroy_device(dev);
1033
1034		mutex_lock(&device_mutex);
1035		idr_remove(&devices_idr, dev->dev_index);
1036		mutex_unlock(&device_mutex);
1037
1038		spin_lock(&hba->device_lock);
1039		hba->dev_count--;
1040		spin_unlock(&hba->device_lock);
1041	}
1042
1043	core_alua_free_lu_gp_mem(dev);
1044	core_alua_set_lba_map(dev, NULL, 0, 0);
1045	core_scsi3_free_all_registrations(dev);
1046	se_release_vpd_for_dev(dev);
1047
1048	if (dev->transport->free_prot)
1049		dev->transport->free_prot(dev);
1050
1051	kfree(dev->queues);
1052	dev->transport->free_device(dev);
1053}
1054
1055int core_dev_setup_virtual_lun0(void)
1056{
1057	struct se_hba *hba;
1058	struct se_device *dev;
1059	char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1060	int ret;
1061
1062	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1063	if (IS_ERR(hba))
1064		return PTR_ERR(hba);
1065
1066	dev = target_alloc_device(hba, "virt_lun0");
1067	if (!dev) {
1068		ret = -ENOMEM;
1069		goto out_free_hba;
1070	}
1071
1072	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1073
1074	ret = target_configure_device(dev);
1075	if (ret)
1076		goto out_free_se_dev;
1077
1078	lun0_hba = hba;
1079	g_lun0_dev = dev;
1080	return 0;
1081
1082out_free_se_dev:
1083	target_free_device(dev);
1084out_free_hba:
1085	core_delete_hba(hba);
1086	return ret;
1087}
1088
1089
1090void core_dev_release_virtual_lun0(void)
1091{
1092	struct se_hba *hba = lun0_hba;
1093
1094	if (!hba)
1095		return;
1096
1097	if (g_lun0_dev)
1098		target_free_device(g_lun0_dev);
1099	core_delete_hba(hba);
1100}
1101
1102/*
1103 * Common CDB parsing for kernel and user passthrough.
1104 */
1105sense_reason_t
1106passthrough_parse_cdb(struct se_cmd *cmd,
1107	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1108{
1109	unsigned char *cdb = cmd->t_task_cdb;
1110	struct se_device *dev = cmd->se_dev;
1111	unsigned int size;
1112
1113	/*
1114	 * For REPORT LUNS we always need to emulate the response, for everything
1115	 * else, pass it up.
1116	 */
1117	if (cdb[0] == REPORT_LUNS) {
1118		cmd->execute_cmd = spc_emulate_report_luns;
1119		return TCM_NO_SENSE;
1120	}
1121
1122	/*
1123	 * With emulate_pr disabled, all reservation requests should fail,
1124	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1125	 */
1126	if (!dev->dev_attrib.emulate_pr &&
1127	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1128	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1129	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1130	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1131		return TCM_UNSUPPORTED_SCSI_OPCODE;
1132	}
1133
1134	/*
1135	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1136	 * emulate the response, since tcmu does not have the information
1137	 * required to process these commands.
1138	 */
1139	if (!(dev->transport_flags &
1140	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1141		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1142			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1143			size = get_unaligned_be16(&cdb[7]);
1144			return target_cmd_size_check(cmd, size);
1145		}
1146		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1147			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1148			size = get_unaligned_be32(&cdb[5]);
1149			return target_cmd_size_check(cmd, size);
1150		}
1151
1152		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1153			cmd->execute_cmd = target_scsi2_reservation_release;
1154			if (cdb[0] == RELEASE_10)
1155				size = get_unaligned_be16(&cdb[7]);
1156			else
1157				size = cmd->data_length;
1158			return target_cmd_size_check(cmd, size);
1159		}
1160		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1161			cmd->execute_cmd = target_scsi2_reservation_reserve;
1162			if (cdb[0] == RESERVE_10)
1163				size = get_unaligned_be16(&cdb[7]);
1164			else
1165				size = cmd->data_length;
1166			return target_cmd_size_check(cmd, size);
1167		}
1168	}
1169
1170	/* Set DATA_CDB flag for ops that should have it */
1171	switch (cdb[0]) {
1172	case READ_6:
1173	case READ_10:
1174	case READ_12:
1175	case READ_16:
1176	case WRITE_6:
1177	case WRITE_10:
1178	case WRITE_12:
1179	case WRITE_16:
1180	case WRITE_VERIFY:
1181	case WRITE_VERIFY_12:
1182	case WRITE_VERIFY_16:
1183	case COMPARE_AND_WRITE:
1184	case XDWRITEREAD_10:
1185		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1186		break;
1187	case VARIABLE_LENGTH_CMD:
1188		switch (get_unaligned_be16(&cdb[8])) {
1189		case READ_32:
1190		case WRITE_32:
1191		case WRITE_VERIFY_32:
1192		case XDWRITEREAD_32:
1193			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1194			break;
1195		}
1196	}
1197
1198	cmd->execute_cmd = exec_cmd;
1199
1200	return TCM_NO_SENSE;
1201}
1202EXPORT_SYMBOL(passthrough_parse_cdb);
v3.15
 
   1/*******************************************************************************
   2 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   3 *
   4 * This file contains the TCM Virtual Device and Disk Transport
   5 * agnostic related functions.
   6 *
   7 * (c) Copyright 2003-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24 *
  25 ******************************************************************************/
  26
  27#include <linux/net.h>
  28#include <linux/string.h>
  29#include <linux/delay.h>
  30#include <linux/timer.h>
  31#include <linux/slab.h>
  32#include <linux/spinlock.h>
  33#include <linux/kthread.h>
  34#include <linux/in.h>
  35#include <linux/export.h>
 
 
  36#include <net/sock.h>
  37#include <net/tcp.h>
  38#include <scsi/scsi.h>
  39#include <scsi/scsi_device.h>
  40
  41#include <target/target_core_base.h>
  42#include <target/target_core_backend.h>
  43#include <target/target_core_fabric.h>
  44
  45#include "target_core_internal.h"
  46#include "target_core_alua.h"
  47#include "target_core_pr.h"
  48#include "target_core_ua.h"
  49
  50DEFINE_MUTEX(g_device_mutex);
  51LIST_HEAD(g_device_list);
 
  52
  53static struct se_hba *lun0_hba;
  54/* not static, needed by tpg.c */
  55struct se_device *g_lun0_dev;
  56
  57sense_reason_t
  58transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  59{
  60	struct se_lun *se_lun = NULL;
  61	struct se_session *se_sess = se_cmd->se_sess;
  62	struct se_device *dev;
  63	unsigned long flags;
 
  64
  65	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
  66		return TCM_NON_EXISTENT_LUN;
 
 
  67
  68	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  69	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  70	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  71		struct se_dev_entry *deve = se_cmd->se_deve;
  72
  73		deve->total_cmds++;
  74
  75		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  76		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  77			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  78				" Access for 0x%08x\n",
  79				se_cmd->se_tfo->get_fabric_name(),
  80				unpacked_lun);
  81			spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  82			return TCM_WRITE_PROTECTED;
  83		}
  84
  85		if (se_cmd->data_direction == DMA_TO_DEVICE)
  86			deve->write_bytes += se_cmd->data_length;
  87		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  88			deve->read_bytes += se_cmd->data_length;
 
 
  89
  90		se_lun = deve->se_lun;
  91		se_cmd->se_lun = deve->se_lun;
  92		se_cmd->pr_res_key = deve->pr_res_key;
  93		se_cmd->orig_fe_lun = unpacked_lun;
  94		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  95
  96		percpu_ref_get(&se_lun->lun_ref);
  97		se_cmd->lun_ref_active = true;
  98	}
  99	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
 100
 101	if (!se_lun) {
 102		/*
 103		 * Use the se_portal_group->tpg_virt_lun0 to allow for
 104		 * REPORT_LUNS, et al to be returned when no active
 105		 * MappedLUN=0 exists for this Initiator Port.
 106		 */
 107		if (unpacked_lun != 0) {
 108			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 109				" Access for 0x%08x\n",
 110				se_cmd->se_tfo->get_fabric_name(),
 111				unpacked_lun);
 
 112			return TCM_NON_EXISTENT_LUN;
 113		}
 
 114		/*
 115		 * Force WRITE PROTECT for virtual LUN 0
 116		 */
 117		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 118		    (se_cmd->data_direction != DMA_NONE))
 119			return TCM_WRITE_PROTECTED;
 120
 121		se_lun = &se_sess->se_tpg->tpg_virt_lun0;
 122		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
 123		se_cmd->orig_fe_lun = 0;
 
 
 124		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 125
 126		percpu_ref_get(&se_lun->lun_ref);
 127		se_cmd->lun_ref_active = true;
 128	}
 
 
 
 
 
 
 
 
 129
 130	/* Directly associate cmd with se_dev */
 131	se_cmd->se_dev = se_lun->lun_se_dev;
 132
 133	dev = se_lun->lun_se_dev;
 134	atomic_long_inc(&dev->num_cmds);
 135	if (se_cmd->data_direction == DMA_TO_DEVICE)
 136		atomic_long_add(se_cmd->data_length, &dev->write_bytes);
 
 137	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 138		atomic_long_add(se_cmd->data_length, &dev->read_bytes);
 
 139
 140	return 0;
 141}
 142EXPORT_SYMBOL(transport_lookup_cmd_lun);
 143
 144int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 145{
 146	struct se_dev_entry *deve;
 147	struct se_lun *se_lun = NULL;
 148	struct se_session *se_sess = se_cmd->se_sess;
 
 149	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 150	unsigned long flags;
 151
 152	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
 153		return -ENODEV;
 
 
 
 
 
 
 
 154
 155	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
 156	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
 157	deve = se_cmd->se_deve;
 158
 159	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 160		se_tmr->tmr_lun = deve->se_lun;
 161		se_cmd->se_lun = deve->se_lun;
 162		se_lun = deve->se_lun;
 163		se_cmd->pr_res_key = deve->pr_res_key;
 164		se_cmd->orig_fe_lun = unpacked_lun;
 
 165	}
 166	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
 167
 168	if (!se_lun) {
 169		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 170			" Access for 0x%08x\n",
 171			se_cmd->se_tfo->get_fabric_name(),
 172			unpacked_lun);
 
 173		return -ENODEV;
 174	}
 175
 176	/* Directly associate cmd with se_dev */
 177	se_cmd->se_dev = se_lun->lun_se_dev;
 178	se_tmr->tmr_dev = se_lun->lun_se_dev;
 179
 180	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 181	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 182	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 183
 184	return 0;
 185}
 186EXPORT_SYMBOL(transport_lookup_tmr_lun);
 187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188/*
 189 * This function is called from core_scsi3_emulate_pro_register_and_move()
 190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
 191 * when a matching rtpi is found.
 192 */
 193struct se_dev_entry *core_get_se_deve_from_rtpi(
 194	struct se_node_acl *nacl,
 195	u16 rtpi)
 196{
 197	struct se_dev_entry *deve;
 198	struct se_lun *lun;
 199	struct se_port *port;
 200	struct se_portal_group *tpg = nacl->se_tpg;
 201	u32 i;
 202
 203	spin_lock_irq(&nacl->device_list_lock);
 204	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 205		deve = nacl->device_list[i];
 206
 207		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 208			continue;
 209
 
 
 210		lun = deve->se_lun;
 211		if (!lun) {
 212			pr_err("%s device entries device pointer is"
 213				" NULL, but Initiator has access.\n",
 214				tpg->se_tpg_tfo->get_fabric_name());
 215			continue;
 216		}
 217		port = lun->lun_sep;
 218		if (!port) {
 219			pr_err("%s device entries device pointer is"
 220				" NULL, but Initiator has access.\n",
 221				tpg->se_tpg_tfo->get_fabric_name());
 222			continue;
 223		}
 224		if (port->sep_rtpi != rtpi)
 225			continue;
 226
 227		atomic_inc(&deve->pr_ref_count);
 228		smp_mb__after_atomic_inc();
 229		spin_unlock_irq(&nacl->device_list_lock);
 230
 231		return deve;
 232	}
 233	spin_unlock_irq(&nacl->device_list_lock);
 234
 235	return NULL;
 236}
 237
 238int core_free_device_list_for_node(
 239	struct se_node_acl *nacl,
 240	struct se_portal_group *tpg)
 241{
 242	struct se_dev_entry *deve;
 243	struct se_lun *lun;
 244	u32 i;
 245
 246	if (!nacl->device_list)
 247		return 0;
 
 
 
 248
 249	spin_lock_irq(&nacl->device_list_lock);
 250	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 251		deve = nacl->device_list[i];
 
 
 
 252
 253		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 254			continue;
 
 
 
 
 255
 256		if (!deve->se_lun) {
 257			pr_err("%s device entries device pointer is"
 258				" NULL, but Initiator has access.\n",
 259				tpg->se_tpg_tfo->get_fabric_name());
 260			continue;
 261		}
 262		lun = deve->se_lun;
 263
 264		spin_unlock_irq(&nacl->device_list_lock);
 265		core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
 266			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
 267		spin_lock_irq(&nacl->device_list_lock);
 268	}
 269	spin_unlock_irq(&nacl->device_list_lock);
 270
 271	array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
 272	nacl->device_list = NULL;
 
 273
 274	return 0;
 
 
 
 
 275}
 276
 277void core_update_device_list_access(
 278	u32 mapped_lun,
 279	u32 lun_access,
 280	struct se_node_acl *nacl)
 281{
 282	struct se_dev_entry *deve;
 
 
 
 
 
 
 
 
 
 
 
 
 283
 284	spin_lock_irq(&nacl->device_list_lock);
 285	deve = nacl->device_list[mapped_lun];
 286	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 287		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 288		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 289	} else {
 290		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
 291		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 
 
 
 
 292	}
 293	spin_unlock_irq(&nacl->device_list_lock);
 294}
 295
 296/*      core_enable_device_list_for_node():
 297 *
 298 *
 299 */
 300int core_enable_device_list_for_node(
 301	struct se_lun *lun,
 302	struct se_lun_acl *lun_acl,
 303	u32 mapped_lun,
 304	u32 lun_access,
 305	struct se_node_acl *nacl,
 306	struct se_portal_group *tpg)
 307{
 308	struct se_port *port = lun->lun_sep;
 309	struct se_dev_entry *deve;
 310
 311	spin_lock_irq(&nacl->device_list_lock);
 312
 313	deve = nacl->device_list[mapped_lun];
 314
 315	/*
 316	 * Check if the call is handling demo mode -> explicit LUN ACL
 317	 * transition.  This transition must be for the same struct se_lun
 318	 * + mapped_lun that was setup in demo mode..
 319	 */
 320	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 321		if (deve->se_lun_acl != NULL) {
 322			pr_err("struct se_dev_entry->se_lun_acl"
 323			       " already set for demo mode -> explicit"
 324			       " LUN ACL transition\n");
 325			spin_unlock_irq(&nacl->device_list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326			return -EINVAL;
 327		}
 328		if (deve->se_lun != lun) {
 329			pr_err("struct se_dev_entry->se_lun does"
 330			       " match passed struct se_lun for demo mode"
 331			       " -> explicit LUN ACL transition\n");
 332			spin_unlock_irq(&nacl->device_list_lock);
 
 
 333			return -EINVAL;
 334		}
 335		deve->se_lun_acl = lun_acl;
 336
 337		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 338			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 339			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 340		} else {
 341			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
 342			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 343		}
 
 
 
 
 
 
 344
 345		spin_unlock_irq(&nacl->device_list_lock);
 
 346		return 0;
 347	}
 348
 349	deve->se_lun = lun;
 350	deve->se_lun_acl = lun_acl;
 351	deve->mapped_lun = mapped_lun;
 352	deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
 353
 354	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 355		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 356		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 357	} else {
 358		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
 359		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 360	}
 361
 362	deve->creation_time = get_jiffies_64();
 363	deve->attach_count++;
 364	spin_unlock_irq(&nacl->device_list_lock);
 365
 366	spin_lock_bh(&port->sep_alua_lock);
 367	list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
 368	spin_unlock_bh(&port->sep_alua_lock);
 369
 
 370	return 0;
 371}
 372
 373/*      core_disable_device_list_for_node():
 374 *
 375 *
 376 */
 377int core_disable_device_list_for_node(
 378	struct se_lun *lun,
 379	struct se_lun_acl *lun_acl,
 380	u32 mapped_lun,
 381	u32 lun_access,
 382	struct se_node_acl *nacl,
 383	struct se_portal_group *tpg)
 384{
 385	struct se_port *port = lun->lun_sep;
 386	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
 
 
 
 
 
 387
 388	/*
 389	 * If the MappedLUN entry is being disabled, the entry in
 390	 * port->sep_alua_list must be removed now before clearing the
 391	 * struct se_dev_entry pointers below as logic in
 392	 * core_alua_do_transition_tg_pt() depends on these being present.
 393	 *
 394	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 395	 * that have not been explicitly converted to MappedLUNs ->
 396	 * struct se_lun_acl, but we remove deve->alua_port_list from
 397	 * port->sep_alua_list. This also means that active UAs and
 398	 * NodeACL context specific PR metadata for demo-mode
 399	 * MappedLUN *deve will be released below..
 400	 */
 401	spin_lock_bh(&port->sep_alua_lock);
 402	list_del(&deve->alua_port_list);
 403	spin_unlock_bh(&port->sep_alua_lock);
 404	/*
 405	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
 406	 * PR operation to complete.
 407	 */
 408	while (atomic_read(&deve->pr_ref_count) != 0)
 409		cpu_relax();
 410
 411	spin_lock_irq(&nacl->device_list_lock);
 
 
 
 
 412	/*
 413	 * Disable struct se_dev_entry LUN ACL mapping
 
 414	 */
 415	core_scsi3_ua_release_all(deve);
 416	deve->se_lun = NULL;
 417	deve->se_lun_acl = NULL;
 418	deve->lun_flags = 0;
 419	deve->creation_time = 0;
 420	deve->attach_count--;
 421	spin_unlock_irq(&nacl->device_list_lock);
 422
 423	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
 424	return 0;
 
 
 425}
 426
 427/*      core_clear_lun_from_tpg():
 428 *
 429 *
 430 */
 431void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 432{
 433	struct se_node_acl *nacl;
 434	struct se_dev_entry *deve;
 435	u32 i;
 436
 437	spin_lock_irq(&tpg->acl_node_lock);
 438	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 439		spin_unlock_irq(&tpg->acl_node_lock);
 440
 441		spin_lock_irq(&nacl->device_list_lock);
 442		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 443			deve = nacl->device_list[i];
 444			if (lun != deve->se_lun)
 445				continue;
 446			spin_unlock_irq(&nacl->device_list_lock);
 447
 448			core_disable_device_list_for_node(lun, NULL,
 449				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
 450				nacl, tpg);
 451
 452			spin_lock_irq(&nacl->device_list_lock);
 453		}
 454		spin_unlock_irq(&nacl->device_list_lock);
 455
 456		spin_lock_irq(&tpg->acl_node_lock);
 457	}
 458	spin_unlock_irq(&tpg->acl_node_lock);
 459}
 460
 461static struct se_port *core_alloc_port(struct se_device *dev)
 462{
 463	struct se_port *port, *port_tmp;
 464
 465	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
 466	if (!port) {
 467		pr_err("Unable to allocate struct se_port\n");
 468		return ERR_PTR(-ENOMEM);
 469	}
 470	INIT_LIST_HEAD(&port->sep_alua_list);
 471	INIT_LIST_HEAD(&port->sep_list);
 472	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
 473	spin_lock_init(&port->sep_alua_lock);
 474	mutex_init(&port->sep_tg_pt_md_mutex);
 475
 476	spin_lock(&dev->se_port_lock);
 477	if (dev->dev_port_count == 0x0000ffff) {
 478		pr_warn("Reached dev->dev_port_count =="
 479				" 0x0000ffff\n");
 480		spin_unlock(&dev->se_port_lock);
 481		return ERR_PTR(-ENOSPC);
 482	}
 483again:
 484	/*
 485	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
 486	 * Here is the table from spc4r17 section 7.7.3.8.
 487	 *
 488	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 489	 *
 490	 * Code      Description
 491	 * 0h        Reserved
 492	 * 1h        Relative port 1, historically known as port A
 493	 * 2h        Relative port 2, historically known as port B
 494	 * 3h to FFFFh    Relative port 3 through 65 535
 495	 */
 496	port->sep_rtpi = dev->dev_rpti_counter++;
 497	if (!port->sep_rtpi)
 498		goto again;
 499
 500	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
 501		/*
 502		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
 503		 * for 16-bit wrap..
 504		 */
 505		if (port->sep_rtpi == port_tmp->sep_rtpi)
 506			goto again;
 507	}
 508	spin_unlock(&dev->se_port_lock);
 509
 510	return port;
 511}
 512
 513static void core_export_port(
 514	struct se_device *dev,
 515	struct se_portal_group *tpg,
 516	struct se_port *port,
 517	struct se_lun *lun)
 518{
 519	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
 520
 521	spin_lock(&dev->se_port_lock);
 522	spin_lock(&lun->lun_sep_lock);
 523	port->sep_tpg = tpg;
 524	port->sep_lun = lun;
 525	lun->lun_sep = port;
 526	spin_unlock(&lun->lun_sep_lock);
 527
 528	list_add_tail(&port->sep_list, &dev->dev_sep_list);
 529	spin_unlock(&dev->se_port_lock);
 530
 531	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
 532	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
 533		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
 534		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
 535			pr_err("Unable to allocate t10_alua_tg_pt"
 536					"_gp_member_t\n");
 537			return;
 538		}
 539		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 540		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
 541			dev->t10_alua.default_tg_pt_gp);
 542		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 543		pr_debug("%s/%s: Adding to default ALUA Target Port"
 544			" Group: alua/default_tg_pt_gp\n",
 545			dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
 546	}
 547
 548	dev->dev_port_count++;
 549	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
 550}
 551
 552/*
 553 *	Called with struct se_device->se_port_lock spinlock held.
 554 */
 555static void core_release_port(struct se_device *dev, struct se_port *port)
 556	__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
 557{
 558	/*
 559	 * Wait for any port reference for PR ALL_TG_PT=1 operation
 560	 * to complete in __core_scsi3_alloc_registration()
 561	 */
 562	spin_unlock(&dev->se_port_lock);
 563	if (atomic_read(&port->sep_tg_pt_ref_cnt))
 564		cpu_relax();
 565	spin_lock(&dev->se_port_lock);
 566
 567	core_alua_free_tg_pt_gp_mem(port);
 568
 569	list_del(&port->sep_list);
 570	dev->dev_port_count--;
 571	kfree(port);
 572}
 573
 574int core_dev_export(
 575	struct se_device *dev,
 576	struct se_portal_group *tpg,
 577	struct se_lun *lun)
 578{
 579	struct se_hba *hba = dev->se_hba;
 580	struct se_port *port;
 581
 582	port = core_alloc_port(dev);
 583	if (IS_ERR(port))
 584		return PTR_ERR(port);
 585
 586	lun->lun_se_dev = dev;
 587
 588	spin_lock(&hba->device_lock);
 589	dev->export_count++;
 590	spin_unlock(&hba->device_lock);
 591
 592	core_export_port(dev, tpg, port, lun);
 593	return 0;
 594}
 595
 596void core_dev_unexport(
 597	struct se_device *dev,
 598	struct se_portal_group *tpg,
 599	struct se_lun *lun)
 600{
 601	struct se_hba *hba = dev->se_hba;
 602	struct se_port *port = lun->lun_sep;
 603
 604	spin_lock(&lun->lun_sep_lock);
 605	if (lun->lun_se_dev == NULL) {
 606		spin_unlock(&lun->lun_sep_lock);
 607		return;
 608	}
 609	spin_unlock(&lun->lun_sep_lock);
 610
 611	spin_lock(&dev->se_port_lock);
 612	core_release_port(dev, port);
 613	spin_unlock(&dev->se_port_lock);
 614
 615	spin_lock(&hba->device_lock);
 616	dev->export_count--;
 617	spin_unlock(&hba->device_lock);
 618
 619	lun->lun_se_dev = NULL;
 620}
 621
 622static void se_release_vpd_for_dev(struct se_device *dev)
 623{
 624	struct t10_vpd *vpd, *vpd_tmp;
 625
 626	spin_lock(&dev->t10_wwn.t10_vpd_lock);
 627	list_for_each_entry_safe(vpd, vpd_tmp,
 628			&dev->t10_wwn.t10_vpd_list, vpd_list) {
 629		list_del(&vpd->vpd_list);
 630		kfree(vpd);
 631	}
 632	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 633}
 634
 635static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 636{
 637	u32 aligned_max_sectors;
 638	u32 alignment;
 639	/*
 640	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
 641	 * transport_allocate_data_tasks() operation.
 642	 */
 643	alignment = max(1ul, PAGE_SIZE / block_size);
 644	aligned_max_sectors = rounddown(max_sectors, alignment);
 645
 646	if (max_sectors != aligned_max_sectors)
 647		pr_info("Rounding down aligned max_sectors from %u to %u\n",
 648			max_sectors, aligned_max_sectors);
 649
 650	return aligned_max_sectors;
 651}
 652
 653int se_dev_set_max_unmap_lba_count(
 654	struct se_device *dev,
 655	u32 max_unmap_lba_count)
 656{
 657	dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
 658	pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
 659			dev, dev->dev_attrib.max_unmap_lba_count);
 660	return 0;
 661}
 662
 663int se_dev_set_max_unmap_block_desc_count(
 664	struct se_device *dev,
 665	u32 max_unmap_block_desc_count)
 666{
 667	dev->dev_attrib.max_unmap_block_desc_count =
 668		max_unmap_block_desc_count;
 669	pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
 670			dev, dev->dev_attrib.max_unmap_block_desc_count);
 671	return 0;
 672}
 673
 674int se_dev_set_unmap_granularity(
 675	struct se_device *dev,
 676	u32 unmap_granularity)
 677{
 678	dev->dev_attrib.unmap_granularity = unmap_granularity;
 679	pr_debug("dev[%p]: Set unmap_granularity: %u\n",
 680			dev, dev->dev_attrib.unmap_granularity);
 681	return 0;
 682}
 683
 684int se_dev_set_unmap_granularity_alignment(
 685	struct se_device *dev,
 686	u32 unmap_granularity_alignment)
 687{
 688	dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
 689	pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
 690			dev, dev->dev_attrib.unmap_granularity_alignment);
 691	return 0;
 692}
 693
 694int se_dev_set_max_write_same_len(
 695	struct se_device *dev,
 696	u32 max_write_same_len)
 697{
 698	dev->dev_attrib.max_write_same_len = max_write_same_len;
 699	pr_debug("dev[%p]: Set max_write_same_len: %u\n",
 700			dev, dev->dev_attrib.max_write_same_len);
 701	return 0;
 702}
 703
 704static void dev_set_t10_wwn_model_alias(struct se_device *dev)
 705{
 706	const char *configname;
 707
 708	configname = config_item_name(&dev->dev_group.cg_item);
 709	if (strlen(configname) >= 16) {
 710		pr_warn("dev[%p]: Backstore name '%s' is too long for "
 711			"INQUIRY_MODEL, truncating to 16 bytes\n", dev,
 712			configname);
 713	}
 714	snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
 715}
 716
 717int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
 718{
 719	if (dev->export_count) {
 720		pr_err("dev[%p]: Unable to change model alias"
 721			" while export_count is %d\n",
 722			dev, dev->export_count);
 723			return -EINVAL;
 724	}
 725
 726	if (flag != 0 && flag != 1) {
 727		pr_err("Illegal value %d\n", flag);
 728		return -EINVAL;
 729	}
 730
 731	if (flag) {
 732		dev_set_t10_wwn_model_alias(dev);
 733	} else {
 734		strncpy(&dev->t10_wwn.model[0],
 735			dev->transport->inquiry_prod, 16);
 736	}
 737	dev->dev_attrib.emulate_model_alias = flag;
 738
 739	return 0;
 740}
 741
 742int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
 743{
 744	if (flag != 0 && flag != 1) {
 745		pr_err("Illegal value %d\n", flag);
 746		return -EINVAL;
 747	}
 748
 749	if (flag) {
 750		pr_err("dpo_emulated not supported\n");
 751		return -EINVAL;
 752	}
 753
 754	return 0;
 755}
 756
 757int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
 758{
 759	if (flag != 0 && flag != 1) {
 760		pr_err("Illegal value %d\n", flag);
 761		return -EINVAL;
 762	}
 763
 764	if (flag &&
 765	    dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
 766		pr_err("emulate_fua_write not supported for pSCSI\n");
 767		return -EINVAL;
 768	}
 769	dev->dev_attrib.emulate_fua_write = flag;
 770	pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
 771			dev, dev->dev_attrib.emulate_fua_write);
 772	return 0;
 773}
 774
 775int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
 776{
 777	if (flag != 0 && flag != 1) {
 778		pr_err("Illegal value %d\n", flag);
 779		return -EINVAL;
 780	}
 781
 782	if (flag) {
 783		pr_err("ua read emulated not supported\n");
 784		return -EINVAL;
 785	}
 786
 787	return 0;
 788}
 789
 790int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
 791{
 792	if (flag != 0 && flag != 1) {
 793		pr_err("Illegal value %d\n", flag);
 794		return -EINVAL;
 795	}
 796	if (flag &&
 797	    dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
 798		pr_err("emulate_write_cache not supported for pSCSI\n");
 799		return -EINVAL;
 800	}
 801	if (flag &&
 802	    dev->transport->get_write_cache) {
 803		pr_err("emulate_write_cache not supported for this device\n");
 804		return -EINVAL;
 805	}
 806
 807	dev->dev_attrib.emulate_write_cache = flag;
 808	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
 809			dev, dev->dev_attrib.emulate_write_cache);
 810	return 0;
 811}
 812
 813int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
 814{
 815	if ((flag != 0) && (flag != 1) && (flag != 2)) {
 816		pr_err("Illegal value %d\n", flag);
 817		return -EINVAL;
 818	}
 819
 820	if (dev->export_count) {
 821		pr_err("dev[%p]: Unable to change SE Device"
 822			" UA_INTRLCK_CTRL while export_count is %d\n",
 823			dev, dev->export_count);
 824		return -EINVAL;
 825	}
 826	dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
 827	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
 828		dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
 829
 830	return 0;
 831}
 832
 833int se_dev_set_emulate_tas(struct se_device *dev, int flag)
 834{
 835	if ((flag != 0) && (flag != 1)) {
 836		pr_err("Illegal value %d\n", flag);
 837		return -EINVAL;
 838	}
 839
 840	if (dev->export_count) {
 841		pr_err("dev[%p]: Unable to change SE Device TAS while"
 842			" export_count is %d\n",
 843			dev, dev->export_count);
 844		return -EINVAL;
 845	}
 846	dev->dev_attrib.emulate_tas = flag;
 847	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
 848		dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
 849
 850	return 0;
 851}
 852
 853int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
 854{
 855	if ((flag != 0) && (flag != 1)) {
 856		pr_err("Illegal value %d\n", flag);
 857		return -EINVAL;
 858	}
 859	/*
 860	 * We expect this value to be non-zero when generic Block Layer
 861	 * Discard supported is detected iblock_create_virtdevice().
 862	 */
 863	if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
 864		pr_err("Generic Block Discard not supported\n");
 865		return -ENOSYS;
 866	}
 867
 868	dev->dev_attrib.emulate_tpu = flag;
 869	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
 870				dev, flag);
 871	return 0;
 872}
 873
 874int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
 875{
 876	if ((flag != 0) && (flag != 1)) {
 877		pr_err("Illegal value %d\n", flag);
 878		return -EINVAL;
 879	}
 880	/*
 881	 * We expect this value to be non-zero when generic Block Layer
 882	 * Discard supported is detected iblock_create_virtdevice().
 883	 */
 884	if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
 885		pr_err("Generic Block Discard not supported\n");
 886		return -ENOSYS;
 887	}
 888
 889	dev->dev_attrib.emulate_tpws = flag;
 890	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
 891				dev, flag);
 892	return 0;
 893}
 894
 895int se_dev_set_emulate_caw(struct se_device *dev, int flag)
 896{
 897	if (flag != 0 && flag != 1) {
 898		pr_err("Illegal value %d\n", flag);
 899		return -EINVAL;
 900	}
 901	dev->dev_attrib.emulate_caw = flag;
 902	pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
 903		 dev, flag);
 904
 905	return 0;
 906}
 907
 908int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
 909{
 910	if (flag != 0 && flag != 1) {
 911		pr_err("Illegal value %d\n", flag);
 912		return -EINVAL;
 913	}
 914	dev->dev_attrib.emulate_3pc = flag;
 915	pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
 916		dev, flag);
 917
 918	return 0;
 919}
 920
 921int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
 922{
 923	int rc, old_prot = dev->dev_attrib.pi_prot_type;
 924
 925	if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
 926		pr_err("Illegal value %d for pi_prot_type\n", flag);
 927		return -EINVAL;
 928	}
 929	if (flag == 2) {
 930		pr_err("DIF TYPE2 protection currently not supported\n");
 931		return -ENOSYS;
 932	}
 933	if (dev->dev_attrib.hw_pi_prot_type) {
 934		pr_warn("DIF protection enabled on underlying hardware,"
 935			" ignoring\n");
 936		return 0;
 937	}
 938	if (!dev->transport->init_prot || !dev->transport->free_prot) {
 939		/* 0 is only allowed value for non-supporting backends */
 940		if (flag == 0)
 941			return 0;
 942
 943		pr_err("DIF protection not supported by backend: %s\n",
 944		       dev->transport->name);
 945		return -ENOSYS;
 946	}
 947	if (!(dev->dev_flags & DF_CONFIGURED)) {
 948		pr_err("DIF protection requires device to be configured\n");
 949		return -ENODEV;
 950	}
 951	if (dev->export_count) {
 952		pr_err("dev[%p]: Unable to change SE Device PROT type while"
 953		       " export_count is %d\n", dev, dev->export_count);
 954		return -EINVAL;
 955	}
 956
 957	dev->dev_attrib.pi_prot_type = flag;
 958
 959	if (flag && !old_prot) {
 960		rc = dev->transport->init_prot(dev);
 961		if (rc) {
 962			dev->dev_attrib.pi_prot_type = old_prot;
 963			return rc;
 964		}
 965
 966	} else if (!flag && old_prot) {
 967		dev->transport->free_prot(dev);
 968	}
 969	pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
 970
 971	return 0;
 972}
 973
 974int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
 975{
 976	int rc;
 977
 978	if (!flag)
 979		return 0;
 980
 981	if (flag != 1) {
 982		pr_err("Illegal value %d for pi_prot_format\n", flag);
 983		return -EINVAL;
 984	}
 985	if (!dev->transport->format_prot) {
 986		pr_err("DIF protection format not supported by backend %s\n",
 987		       dev->transport->name);
 988		return -ENOSYS;
 989	}
 990	if (!(dev->dev_flags & DF_CONFIGURED)) {
 991		pr_err("DIF protection format requires device to be configured\n");
 992		return -ENODEV;
 993	}
 994	if (dev->export_count) {
 995		pr_err("dev[%p]: Unable to format SE Device PROT type while"
 996		       " export_count is %d\n", dev, dev->export_count);
 997		return -EINVAL;
 998	}
 999
1000	rc = dev->transport->format_prot(dev);
1001	if (rc)
1002		return rc;
1003
1004	pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
1005
1006	return 0;
1007}
1008
1009int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1010{
1011	if ((flag != 0) && (flag != 1)) {
1012		pr_err("Illegal value %d\n", flag);
1013		return -EINVAL;
1014	}
1015	dev->dev_attrib.enforce_pr_isids = flag;
1016	pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1017		(dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1018	return 0;
1019}
1020
1021int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1022{
1023	if ((flag != 0) && (flag != 1)) {
1024		printk(KERN_ERR "Illegal value %d\n", flag);
1025		return -EINVAL;
1026	}
1027	dev->dev_attrib.is_nonrot = flag;
1028	pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1029	       dev, flag);
1030	return 0;
1031}
1032
1033int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1034{
1035	if (flag != 0) {
1036		printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1037			" reordering not implemented\n", dev);
1038		return -ENOSYS;
1039	}
1040	dev->dev_attrib.emulate_rest_reord = flag;
1041	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1042	return 0;
1043}
1044
1045/*
1046 * Note, this can only be called on unexported SE Device Object.
1047 */
1048int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1049{
1050	if (dev->export_count) {
1051		pr_err("dev[%p]: Unable to change SE Device TCQ while"
1052			" export_count is %d\n",
1053			dev, dev->export_count);
1054		return -EINVAL;
1055	}
1056	if (!queue_depth) {
1057		pr_err("dev[%p]: Illegal ZERO value for queue"
1058			"_depth\n", dev);
1059		return -EINVAL;
1060	}
1061
1062	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1063		if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1064			pr_err("dev[%p]: Passed queue_depth: %u"
1065				" exceeds TCM/SE_Device TCQ: %u\n",
1066				dev, queue_depth,
1067				dev->dev_attrib.hw_queue_depth);
1068			return -EINVAL;
1069		}
1070	} else {
1071		if (queue_depth > dev->dev_attrib.queue_depth) {
1072			if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1073				pr_err("dev[%p]: Passed queue_depth:"
1074					" %u exceeds TCM/SE_Device MAX"
1075					" TCQ: %u\n", dev, queue_depth,
1076					dev->dev_attrib.hw_queue_depth);
1077				return -EINVAL;
1078			}
1079		}
1080	}
1081
1082	dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1083	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1084			dev, queue_depth);
1085	return 0;
1086}
1087
1088int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1089{
1090	int block_size = dev->dev_attrib.block_size;
1091
1092	if (dev->export_count) {
1093		pr_err("dev[%p]: Unable to change SE Device"
1094			" fabric_max_sectors while export_count is %d\n",
1095			dev, dev->export_count);
1096		return -EINVAL;
1097	}
1098	if (!fabric_max_sectors) {
1099		pr_err("dev[%p]: Illegal ZERO value for"
1100			" fabric_max_sectors\n", dev);
1101		return -EINVAL;
1102	}
1103	if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1104		pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1105			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1106				DA_STATUS_MAX_SECTORS_MIN);
1107		return -EINVAL;
1108	}
1109	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1110		if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1111			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1112				" greater than TCM/SE_Device max_sectors:"
1113				" %u\n", dev, fabric_max_sectors,
1114				dev->dev_attrib.hw_max_sectors);
1115			 return -EINVAL;
1116		}
1117	} else {
1118		if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1119			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1120				" greater than DA_STATUS_MAX_SECTORS_MAX:"
1121				" %u\n", dev, fabric_max_sectors,
1122				DA_STATUS_MAX_SECTORS_MAX);
1123			return -EINVAL;
1124		}
1125	}
1126	/*
1127	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1128	 */
1129	if (!block_size) {
1130		block_size = 512;
1131		pr_warn("Defaulting to 512 for zero block_size\n");
1132	}
1133	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1134						      block_size);
1135
1136	dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1137	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1138			dev, fabric_max_sectors);
1139	return 0;
1140}
1141
1142int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1143{
1144	if (dev->export_count) {
1145		pr_err("dev[%p]: Unable to change SE Device"
1146			" optimal_sectors while export_count is %d\n",
1147			dev, dev->export_count);
1148		return -EINVAL;
1149	}
1150	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1151		pr_err("dev[%p]: Passed optimal_sectors cannot be"
1152				" changed for TCM/pSCSI\n", dev);
1153		return -EINVAL;
1154	}
1155	if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1156		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1157			" greater than fabric_max_sectors: %u\n", dev,
1158			optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1159		return -EINVAL;
1160	}
1161
1162	dev->dev_attrib.optimal_sectors = optimal_sectors;
1163	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1164			dev, optimal_sectors);
1165	return 0;
1166}
1167
1168int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1169{
1170	if (dev->export_count) {
1171		pr_err("dev[%p]: Unable to change SE Device block_size"
1172			" while export_count is %d\n",
1173			dev, dev->export_count);
1174		return -EINVAL;
1175	}
1176
1177	if ((block_size != 512) &&
1178	    (block_size != 1024) &&
1179	    (block_size != 2048) &&
1180	    (block_size != 4096)) {
1181		pr_err("dev[%p]: Illegal value for block_device: %u"
1182			" for SE device, must be 512, 1024, 2048 or 4096\n",
1183			dev, block_size);
1184		return -EINVAL;
1185	}
1186
1187	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1188		pr_err("dev[%p]: Not allowed to change block_size for"
1189			" Physical Device, use for Linux/SCSI to change"
1190			" block_size for underlying hardware\n", dev);
1191		return -EINVAL;
1192	}
1193
1194	dev->dev_attrib.block_size = block_size;
1195	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1196			dev, block_size);
1197
1198	if (dev->dev_attrib.max_bytes_per_io)
1199		dev->dev_attrib.hw_max_sectors =
1200			dev->dev_attrib.max_bytes_per_io / block_size;
1201
1202	return 0;
1203}
1204
1205struct se_lun *core_dev_add_lun(
1206	struct se_portal_group *tpg,
1207	struct se_device *dev,
1208	u32 unpacked_lun)
1209{
1210	struct se_lun *lun;
1211	int rc;
1212
1213	lun = core_tpg_alloc_lun(tpg, unpacked_lun);
1214	if (IS_ERR(lun))
1215		return lun;
1216
1217	rc = core_tpg_add_lun(tpg, lun,
1218				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1219	if (rc < 0)
1220		return ERR_PTR(rc);
1221
1222	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1223		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1224		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1225		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1226	/*
1227	 * Update LUN maps for dynamically added initiators when
1228	 * generate_node_acl is enabled.
1229	 */
1230	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1231		struct se_node_acl *acl;
1232		spin_lock_irq(&tpg->acl_node_lock);
 
1233		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1234			if (acl->dynamic_node_acl &&
1235			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1236			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1237				spin_unlock_irq(&tpg->acl_node_lock);
1238				core_tpg_add_node_to_devs(acl, tpg);
1239				spin_lock_irq(&tpg->acl_node_lock);
1240			}
1241		}
1242		spin_unlock_irq(&tpg->acl_node_lock);
1243	}
1244
1245	return lun;
1246}
1247
1248/*      core_dev_del_lun():
1249 *
1250 *
1251 */
1252int core_dev_del_lun(
1253	struct se_portal_group *tpg,
1254	u32 unpacked_lun)
1255{
1256	struct se_lun *lun;
1257
1258	lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1259	if (IS_ERR(lun))
1260		return PTR_ERR(lun);
1261
1262	core_tpg_post_dellun(tpg, lun);
1263
1264	pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1265		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1266		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1267		tpg->se_tpg_tfo->get_fabric_name());
1268
1269	return 0;
1270}
1271
1272struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1273{
1274	struct se_lun *lun;
1275
1276	spin_lock(&tpg->tpg_lun_lock);
1277	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1278		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1279			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
1280			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1281			TRANSPORT_MAX_LUNS_PER_TPG-1,
1282			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1283		spin_unlock(&tpg->tpg_lun_lock);
1284		return NULL;
1285	}
1286	lun = tpg->tpg_lun_list[unpacked_lun];
1287
1288	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1289		pr_err("%s Logical Unit Number: %u is not free on"
1290			" Target Portal Group: %hu, ignoring request.\n",
1291			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1292			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1293		spin_unlock(&tpg->tpg_lun_lock);
1294		return NULL;
1295	}
1296	spin_unlock(&tpg->tpg_lun_lock);
1297
1298	return lun;
1299}
1300
1301/*      core_dev_get_lun():
1302 *
1303 *
1304 */
1305static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1306{
1307	struct se_lun *lun;
1308
1309	spin_lock(&tpg->tpg_lun_lock);
1310	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1311		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1312			"_TPG-1: %u for Target Portal Group: %hu\n",
1313			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1314			TRANSPORT_MAX_LUNS_PER_TPG-1,
1315			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1316		spin_unlock(&tpg->tpg_lun_lock);
1317		return NULL;
1318	}
1319	lun = tpg->tpg_lun_list[unpacked_lun];
1320
1321	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1322		pr_err("%s Logical Unit Number: %u is not active on"
1323			" Target Portal Group: %hu, ignoring request.\n",
1324			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1325			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1326		spin_unlock(&tpg->tpg_lun_lock);
1327		return NULL;
1328	}
1329	spin_unlock(&tpg->tpg_lun_lock);
1330
1331	return lun;
1332}
1333
1334struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1335	struct se_portal_group *tpg,
1336	struct se_node_acl *nacl,
1337	u32 mapped_lun,
1338	int *ret)
1339{
1340	struct se_lun_acl *lacl;
1341
1342	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1343		pr_err("%s InitiatorName exceeds maximum size.\n",
1344			tpg->se_tpg_tfo->get_fabric_name());
1345		*ret = -EOVERFLOW;
1346		return NULL;
1347	}
1348	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1349	if (!lacl) {
1350		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1351		*ret = -ENOMEM;
1352		return NULL;
1353	}
1354
1355	INIT_LIST_HEAD(&lacl->lacl_list);
1356	lacl->mapped_lun = mapped_lun;
1357	lacl->se_lun_nacl = nacl;
1358	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1359		 nacl->initiatorname);
1360
1361	return lacl;
1362}
1363
1364int core_dev_add_initiator_node_lun_acl(
1365	struct se_portal_group *tpg,
1366	struct se_lun_acl *lacl,
1367	u32 unpacked_lun,
1368	u32 lun_access)
1369{
1370	struct se_lun *lun;
1371	struct se_node_acl *nacl;
1372
1373	lun = core_dev_get_lun(tpg, unpacked_lun);
1374	if (!lun) {
1375		pr_err("%s Logical Unit Number: %u is not active on"
1376			" Target Portal Group: %hu, ignoring request.\n",
1377			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1378			tpg->se_tpg_tfo->tpg_get_tag(tpg));
1379		return -EINVAL;
1380	}
1381
1382	nacl = lacl->se_lun_nacl;
1383	if (!nacl)
1384		return -EINVAL;
1385
1386	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1387	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1388		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1389
1390	lacl->se_lun = lun;
1391
1392	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1393			lun_access, nacl, tpg) < 0)
1394		return -EINVAL;
1395
1396	spin_lock(&lun->lun_acl_lock);
1397	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1398	atomic_inc(&lun->lun_acl_count);
1399	smp_mb__after_atomic_inc();
1400	spin_unlock(&lun->lun_acl_lock);
1401
1402	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1403		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1404		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1405		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1406		lacl->initiatorname);
1407	/*
1408	 * Check to see if there are any existing persistent reservation APTPL
1409	 * pre-registrations that need to be enabled for this LUN ACL..
1410	 */
1411	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
 
1412	return 0;
1413}
1414
1415/*      core_dev_del_initiator_node_lun_acl():
1416 *
1417 *
1418 */
1419int core_dev_del_initiator_node_lun_acl(
1420	struct se_portal_group *tpg,
1421	struct se_lun *lun,
1422	struct se_lun_acl *lacl)
1423{
 
1424	struct se_node_acl *nacl;
 
1425
1426	nacl = lacl->se_lun_nacl;
1427	if (!nacl)
1428		return -EINVAL;
1429
1430	spin_lock(&lun->lun_acl_lock);
1431	list_del(&lacl->lacl_list);
1432	atomic_dec(&lun->lun_acl_count);
1433	smp_mb__after_atomic_dec();
1434	spin_unlock(&lun->lun_acl_lock);
1435
1436	core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1437		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1438
1439	lacl->se_lun = NULL;
1440
1441	pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1442		" InitiatorNode: %s Mapped LUN: %u\n",
1443		tpg->se_tpg_tfo->get_fabric_name(),
1444		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1445		lacl->initiatorname, lacl->mapped_lun);
1446
1447	return 0;
1448}
1449
1450void core_dev_free_initiator_node_lun_acl(
1451	struct se_portal_group *tpg,
1452	struct se_lun_acl *lacl)
1453{
1454	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1455		" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1456		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1457		tpg->se_tpg_tfo->get_fabric_name(),
1458		lacl->initiatorname, lacl->mapped_lun);
1459
1460	kfree(lacl);
1461}
1462
1463static void scsi_dump_inquiry(struct se_device *dev)
1464{
1465	struct t10_wwn *wwn = &dev->t10_wwn;
1466	char buf[17];
1467	int i, device_type;
1468	/*
1469	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1470	 */
1471	for (i = 0; i < 8; i++)
1472		if (wwn->vendor[i] >= 0x20)
1473			buf[i] = wwn->vendor[i];
1474		else
1475			buf[i] = ' ';
1476	buf[i] = '\0';
1477	pr_debug("  Vendor: %s\n", buf);
1478
1479	for (i = 0; i < 16; i++)
1480		if (wwn->model[i] >= 0x20)
1481			buf[i] = wwn->model[i];
1482		else
1483			buf[i] = ' ';
1484	buf[i] = '\0';
1485	pr_debug("  Model: %s\n", buf);
1486
1487	for (i = 0; i < 4; i++)
1488		if (wwn->revision[i] >= 0x20)
1489			buf[i] = wwn->revision[i];
1490		else
1491			buf[i] = ' ';
1492	buf[i] = '\0';
1493	pr_debug("  Revision: %s\n", buf);
1494
1495	device_type = dev->transport->get_device_type(dev);
1496	pr_debug("  Type:   %s ", scsi_device_type(device_type));
1497}
1498
1499struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1500{
1501	struct se_device *dev;
1502	struct se_lun *xcopy_lun;
 
1503
1504	dev = hba->transport->alloc_device(hba, name);
1505	if (!dev)
1506		return NULL;
1507
1508	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509	dev->se_hba = hba;
1510	dev->transport = hba->transport;
1511	dev->prot_length = sizeof(struct se_dif_v1_tuple);
 
 
1512
1513	INIT_LIST_HEAD(&dev->dev_list);
1514	INIT_LIST_HEAD(&dev->dev_sep_list);
1515	INIT_LIST_HEAD(&dev->dev_tmr_list);
1516	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1517	INIT_LIST_HEAD(&dev->state_list);
1518	INIT_LIST_HEAD(&dev->qf_cmd_list);
1519	INIT_LIST_HEAD(&dev->g_dev_node);
1520	spin_lock_init(&dev->execute_task_lock);
1521	spin_lock_init(&dev->delayed_cmd_lock);
1522	spin_lock_init(&dev->dev_reservation_lock);
1523	spin_lock_init(&dev->se_port_lock);
1524	spin_lock_init(&dev->se_tmr_lock);
1525	spin_lock_init(&dev->qf_cmd_lock);
1526	sema_init(&dev->caw_sem, 1);
1527	atomic_set(&dev->dev_ordered_id, 0);
1528	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1529	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1530	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1531	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1532	spin_lock_init(&dev->t10_pr.registration_lock);
1533	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1534	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1535	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1536	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
1537	spin_lock_init(&dev->t10_alua.lba_map_lock);
1538
 
 
1539	dev->t10_wwn.t10_dev = dev;
 
 
 
 
 
1540	dev->t10_alua.t10_dev = dev;
1541
1542	dev->dev_attrib.da_dev = dev;
1543	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1544	dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1545	dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1546	dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1547	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1548	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1549	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1550	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1551	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1552	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1553	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 
 
1554	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1555	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 
1556	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1557	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1558	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1559	dev->dev_attrib.max_unmap_block_desc_count =
1560		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1561	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1562	dev->dev_attrib.unmap_granularity_alignment =
1563				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 
 
1564	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1565	dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1566	dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1567
1568	xcopy_lun = &dev->xcopy_lun;
1569	xcopy_lun->lun_se_dev = dev;
1570	init_completion(&xcopy_lun->lun_shutdown_comp);
1571	INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1572	spin_lock_init(&xcopy_lun->lun_acl_lock);
1573	spin_lock_init(&xcopy_lun->lun_sep_lock);
1574	init_completion(&xcopy_lun->lun_ref_comp);
 
 
 
 
 
 
 
1575
1576	return dev;
1577}
1578
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1579int target_configure_device(struct se_device *dev)
1580{
1581	struct se_hba *hba = dev->se_hba;
1582	int ret;
1583
1584	if (dev->dev_flags & DF_CONFIGURED) {
1585		pr_err("se_dev->se_dev_ptr already set for storage"
1586				" object\n");
1587		return -EEXIST;
1588	}
1589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1590	ret = dev->transport->configure_device(dev);
1591	if (ret)
1592		goto out;
1593	dev->dev_flags |= DF_CONFIGURED;
 
 
 
 
1594
1595	/*
1596	 * XXX: there is not much point to have two different values here..
1597	 */
1598	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1599	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1600
1601	/*
1602	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1603	 */
1604	dev->dev_attrib.hw_max_sectors =
1605		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1606					 dev->dev_attrib.hw_block_size);
 
1607
1608	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1609	dev->creation_time = get_jiffies_64();
1610
1611	ret = core_setup_alua(dev);
1612	if (ret)
1613		goto out;
1614
1615	/*
1616	 * Startup the struct se_device processing thread
1617	 */
1618	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1619				      dev->transport->name);
1620	if (!dev->tmr_wq) {
1621		pr_err("Unable to create tmr workqueue for %s\n",
1622			dev->transport->name);
1623		ret = -ENOMEM;
1624		goto out_free_alua;
1625	}
1626
1627	/*
1628	 * Setup work_queue for QUEUE_FULL
1629	 */
1630	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1631
1632	/*
1633	 * Preload the initial INQUIRY const values if we are doing
1634	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1635	 * passthrough because this is being provided by the backend LLD.
1636	 */
1637	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1638		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1639		strncpy(&dev->t10_wwn.model[0],
1640			dev->transport->inquiry_prod, 16);
1641		strncpy(&dev->t10_wwn.revision[0],
1642			dev->transport->inquiry_rev, 4);
1643	}
1644
1645	scsi_dump_inquiry(dev);
1646
1647	spin_lock(&hba->device_lock);
1648	hba->dev_count++;
1649	spin_unlock(&hba->device_lock);
1650
1651	mutex_lock(&g_device_mutex);
1652	list_add_tail(&dev->g_dev_node, &g_device_list);
1653	mutex_unlock(&g_device_mutex);
1654
1655	return 0;
1656
1657out_free_alua:
1658	core_alua_free_lu_gp_mem(dev);
 
 
 
 
1659out:
1660	se_release_vpd_for_dev(dev);
1661	return ret;
1662}
1663
1664void target_free_device(struct se_device *dev)
1665{
1666	struct se_hba *hba = dev->se_hba;
1667
1668	WARN_ON(!list_empty(&dev->dev_sep_list));
1669
1670	if (dev->dev_flags & DF_CONFIGURED) {
1671		destroy_workqueue(dev->tmr_wq);
1672
1673		mutex_lock(&g_device_mutex);
1674		list_del(&dev->g_dev_node);
1675		mutex_unlock(&g_device_mutex);
1676
1677		spin_lock(&hba->device_lock);
1678		hba->dev_count--;
1679		spin_unlock(&hba->device_lock);
1680	}
1681
1682	core_alua_free_lu_gp_mem(dev);
1683	core_alua_set_lba_map(dev, NULL, 0, 0);
1684	core_scsi3_free_all_registrations(dev);
1685	se_release_vpd_for_dev(dev);
1686
1687	if (dev->transport->free_prot)
1688		dev->transport->free_prot(dev);
1689
 
1690	dev->transport->free_device(dev);
1691}
1692
1693int core_dev_setup_virtual_lun0(void)
1694{
1695	struct se_hba *hba;
1696	struct se_device *dev;
1697	char buf[] = "rd_pages=8,rd_nullio=1";
1698	int ret;
1699
1700	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1701	if (IS_ERR(hba))
1702		return PTR_ERR(hba);
1703
1704	dev = target_alloc_device(hba, "virt_lun0");
1705	if (!dev) {
1706		ret = -ENOMEM;
1707		goto out_free_hba;
1708	}
1709
1710	hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1711
1712	ret = target_configure_device(dev);
1713	if (ret)
1714		goto out_free_se_dev;
1715
1716	lun0_hba = hba;
1717	g_lun0_dev = dev;
1718	return 0;
1719
1720out_free_se_dev:
1721	target_free_device(dev);
1722out_free_hba:
1723	core_delete_hba(hba);
1724	return ret;
1725}
1726
1727
1728void core_dev_release_virtual_lun0(void)
1729{
1730	struct se_hba *hba = lun0_hba;
1731
1732	if (!hba)
1733		return;
1734
1735	if (g_lun0_dev)
1736		target_free_device(g_lun0_dev);
1737	core_delete_hba(hba);
1738}