Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   4 *
   5 * This file contains the TCM Virtual Device and Disk Transport
   6 * agnostic related functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 ******************************************************************************/
  13
  14#include <linux/net.h>
  15#include <linux/string.h>
  16#include <linux/delay.h>
  17#include <linux/timer.h>
  18#include <linux/slab.h>
  19#include <linux/spinlock.h>
  20#include <linux/kthread.h>
  21#include <linux/in.h>
  22#include <linux/export.h>
  23#include <linux/t10-pi.h>
  24#include <asm/unaligned.h>
  25#include <net/sock.h>
  26#include <net/tcp.h>
  27#include <scsi/scsi_common.h>
  28#include <scsi/scsi_proto.h>
  29
  30#include <target/target_core_base.h>
  31#include <target/target_core_backend.h>
  32#include <target/target_core_fabric.h>
  33
  34#include "target_core_internal.h"
  35#include "target_core_alua.h"
  36#include "target_core_pr.h"
  37#include "target_core_ua.h"
  38
  39static DEFINE_MUTEX(device_mutex);
  40static LIST_HEAD(device_list);
  41static DEFINE_IDR(devices_idr);
  42
  43static struct se_hba *lun0_hba;
  44/* not static, needed by tpg.c */
  45struct se_device *g_lun0_dev;
  46
  47sense_reason_t
  48transport_lookup_cmd_lun(struct se_cmd *se_cmd)
  49{
  50	struct se_lun *se_lun = NULL;
  51	struct se_session *se_sess = se_cmd->se_sess;
  52	struct se_node_acl *nacl = se_sess->se_node_acl;
  53	struct se_dev_entry *deve;
  54	sense_reason_t ret = TCM_NO_SENSE;
  55
  56	rcu_read_lock();
  57	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
  58	if (deve) {
  59		atomic_long_inc(&deve->total_cmds);
  60
  61		if (se_cmd->data_direction == DMA_TO_DEVICE)
  62			atomic_long_add(se_cmd->data_length,
  63					&deve->write_bytes);
  64		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  65			atomic_long_add(se_cmd->data_length,
  66					&deve->read_bytes);
  67
  68		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  69		    deve->lun_access_ro) {
  70			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  71				" Access for 0x%08llx\n",
  72				se_cmd->se_tfo->fabric_name,
  73				se_cmd->orig_fe_lun);
  74			rcu_read_unlock();
  75			return TCM_WRITE_PROTECTED;
  76		}
  77
  78		se_lun = deve->se_lun;
  79
  80		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  81			se_lun = NULL;
  82			goto out_unlock;
  83		}
  84
  85		se_cmd->se_lun = se_lun;
  86		se_cmd->pr_res_key = deve->pr_res_key;
 
  87		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  88		se_cmd->lun_ref_active = true;
 
 
 
 
 
 
 
 
 
 
 
  89	}
  90out_unlock:
  91	rcu_read_unlock();
  92
  93	if (!se_lun) {
  94		/*
  95		 * Use the se_portal_group->tpg_virt_lun0 to allow for
  96		 * REPORT_LUNS, et al to be returned when no active
  97		 * MappedLUN=0 exists for this Initiator Port.
  98		 */
  99		if (se_cmd->orig_fe_lun != 0) {
 100			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 101				" Access for 0x%08llx from %s\n",
 102				se_cmd->se_tfo->fabric_name,
 103				se_cmd->orig_fe_lun,
 104				nacl->initiatorname);
 105			return TCM_NON_EXISTENT_LUN;
 106		}
 107
 108		/*
 109		 * Force WRITE PROTECT for virtual LUN 0
 110		 */
 111		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 112		    (se_cmd->data_direction != DMA_NONE))
 113			return TCM_WRITE_PROTECTED;
 114
 115		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 116		if (!percpu_ref_tryget_live(&se_lun->lun_ref))
 117			return TCM_NON_EXISTENT_LUN;
 118
 119		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
 
 120		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 
 
 121		se_cmd->lun_ref_active = true;
 
 
 
 
 
 
 
 
 
 122	}
 123	/*
 124	 * RCU reference protected by percpu se_lun->lun_ref taken above that
 125	 * must drop to zero (including initial reference) before this se_lun
 126	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 127	 * target_core_fabric_configfs.c:target_fabric_port_release
 128	 */
 
 129	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 130	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 131
 132	if (se_cmd->data_direction == DMA_TO_DEVICE)
 133		atomic_long_add(se_cmd->data_length,
 134				&se_cmd->se_dev->write_bytes);
 135	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 136		atomic_long_add(se_cmd->data_length,
 137				&se_cmd->se_dev->read_bytes);
 138
 139	return ret;
 140}
 141EXPORT_SYMBOL(transport_lookup_cmd_lun);
 142
 143int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
 144{
 145	struct se_dev_entry *deve;
 146	struct se_lun *se_lun = NULL;
 147	struct se_session *se_sess = se_cmd->se_sess;
 148	struct se_node_acl *nacl = se_sess->se_node_acl;
 149	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 
 150
 151	rcu_read_lock();
 152	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
 153	if (deve) {
 154		se_lun = deve->se_lun;
 155
 156		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
 157			se_lun = NULL;
 158			goto out_unlock;
 159		}
 160
 161		se_cmd->se_lun = se_lun;
 162		se_cmd->pr_res_key = deve->pr_res_key;
 
 163		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 164		se_cmd->lun_ref_active = true;
 165	}
 166out_unlock:
 167	rcu_read_unlock();
 168
 169	if (!se_lun) {
 170		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 171			" Access for 0x%08llx for %s\n",
 172			se_cmd->se_tfo->fabric_name,
 173			se_cmd->orig_fe_lun,
 174			nacl->initiatorname);
 175		return -ENODEV;
 176	}
 177	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 178	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 179
 
 
 
 
 180	return 0;
 181}
 182EXPORT_SYMBOL(transport_lookup_tmr_lun);
 183
 184bool target_lun_is_rdonly(struct se_cmd *cmd)
 185{
 186	struct se_session *se_sess = cmd->se_sess;
 187	struct se_dev_entry *deve;
 188	bool ret;
 189
 190	rcu_read_lock();
 191	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 192	ret = deve && deve->lun_access_ro;
 193	rcu_read_unlock();
 194
 195	return ret;
 196}
 197EXPORT_SYMBOL(target_lun_is_rdonly);
 198
 199/*
 200 * This function is called from core_scsi3_emulate_pro_register_and_move()
 201 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 202 * when a matching rtpi is found.
 203 */
 204struct se_dev_entry *core_get_se_deve_from_rtpi(
 205	struct se_node_acl *nacl,
 206	u16 rtpi)
 207{
 208	struct se_dev_entry *deve;
 209	struct se_lun *lun;
 210	struct se_portal_group *tpg = nacl->se_tpg;
 211
 212	rcu_read_lock();
 213	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 214		lun = deve->se_lun;
 215		if (!lun) {
 216			pr_err("%s device entries device pointer is"
 217				" NULL, but Initiator has access.\n",
 218				tpg->se_tpg_tfo->fabric_name);
 219			continue;
 220		}
 221		if (lun->lun_tpg->tpg_rtpi != rtpi)
 222			continue;
 223
 224		kref_get(&deve->pr_kref);
 225		rcu_read_unlock();
 226
 227		return deve;
 228	}
 229	rcu_read_unlock();
 230
 231	return NULL;
 232}
 233
 234void core_free_device_list_for_node(
 235	struct se_node_acl *nacl,
 236	struct se_portal_group *tpg)
 237{
 238	struct se_dev_entry *deve;
 239
 240	mutex_lock(&nacl->lun_entry_mutex);
 241	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 242		core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
 
 
 
 243	mutex_unlock(&nacl->lun_entry_mutex);
 244}
 245
 246void core_update_device_list_access(
 247	u64 mapped_lun,
 248	bool lun_access_ro,
 249	struct se_node_acl *nacl)
 250{
 251	struct se_dev_entry *deve;
 252
 253	mutex_lock(&nacl->lun_entry_mutex);
 254	deve = target_nacl_find_deve(nacl, mapped_lun);
 255	if (deve)
 256		deve->lun_access_ro = lun_access_ro;
 257	mutex_unlock(&nacl->lun_entry_mutex);
 258}
 259
 260/*
 261 * Called with rcu_read_lock or nacl->device_list_lock held.
 262 */
 263struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
 264{
 265	struct se_dev_entry *deve;
 266
 267	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 268		if (deve->mapped_lun == mapped_lun)
 269			return deve;
 270
 271	return NULL;
 272}
 273EXPORT_SYMBOL(target_nacl_find_deve);
 274
 275void target_pr_kref_release(struct kref *kref)
 276{
 277	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
 278						 pr_kref);
 279	complete(&deve->pr_comp);
 280}
 281
 282/*
 283 * Establish UA condition on SCSI device - all LUNs
 284 */
 285void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
 286{
 287	struct se_dev_entry *se_deve;
 288	struct se_lun *lun;
 289
 290	spin_lock(&dev->se_port_lock);
 291	list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
 292
 293		spin_lock(&lun->lun_deve_lock);
 294		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
 295			core_scsi3_ua_allocate(se_deve, asc, ascq);
 296		spin_unlock(&lun->lun_deve_lock);
 297	}
 298	spin_unlock(&dev->se_port_lock);
 299}
 300
 301static void
 302target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
 303			     bool skip_new)
 304{
 305	struct se_dev_entry *tmp;
 306
 307	rcu_read_lock();
 308	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
 309		if (skip_new && tmp == new)
 310			continue;
 311		core_scsi3_ua_allocate(tmp, 0x3F,
 312				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
 313	}
 314	rcu_read_unlock();
 315}
 316
 317int core_enable_device_list_for_node(
 318	struct se_lun *lun,
 319	struct se_lun_acl *lun_acl,
 320	u64 mapped_lun,
 321	bool lun_access_ro,
 322	struct se_node_acl *nacl,
 323	struct se_portal_group *tpg)
 324{
 325	struct se_dev_entry *orig, *new;
 326
 327	new = kzalloc(sizeof(*new), GFP_KERNEL);
 328	if (!new) {
 329		pr_err("Unable to allocate se_dev_entry memory\n");
 330		return -ENOMEM;
 331	}
 332
 
 333	spin_lock_init(&new->ua_lock);
 334	INIT_LIST_HEAD(&new->ua_list);
 335	INIT_LIST_HEAD(&new->lun_link);
 336
 337	new->mapped_lun = mapped_lun;
 338	kref_init(&new->pr_kref);
 339	init_completion(&new->pr_comp);
 340
 341	new->lun_access_ro = lun_access_ro;
 342	new->creation_time = get_jiffies_64();
 343	new->attach_count++;
 344
 345	mutex_lock(&nacl->lun_entry_mutex);
 346	orig = target_nacl_find_deve(nacl, mapped_lun);
 347	if (orig && orig->se_lun) {
 348		struct se_lun *orig_lun = orig->se_lun;
 
 349
 350		if (orig_lun != lun) {
 351			pr_err("Existing orig->se_lun doesn't match new lun"
 352			       " for dynamic -> explicit NodeACL conversion:"
 353				" %s\n", nacl->initiatorname);
 354			mutex_unlock(&nacl->lun_entry_mutex);
 355			kfree(new);
 356			return -EINVAL;
 357		}
 358		if (orig->se_lun_acl != NULL) {
 359			pr_warn_ratelimited("Detected existing explicit"
 360				" se_lun_acl->se_lun_group reference for %s"
 361				" mapped_lun: %llu, failing\n",
 362				 nacl->initiatorname, mapped_lun);
 363			mutex_unlock(&nacl->lun_entry_mutex);
 364			kfree(new);
 365			return -EINVAL;
 366		}
 367
 368		new->se_lun = lun;
 369		new->se_lun_acl = lun_acl;
 370		hlist_del_rcu(&orig->link);
 371		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 372		mutex_unlock(&nacl->lun_entry_mutex);
 373
 374		spin_lock(&lun->lun_deve_lock);
 375		list_del(&orig->lun_link);
 376		list_add_tail(&new->lun_link, &lun->lun_deve_list);
 377		spin_unlock(&lun->lun_deve_lock);
 378
 379		kref_put(&orig->pr_kref, target_pr_kref_release);
 380		wait_for_completion(&orig->pr_comp);
 381
 382		target_luns_data_has_changed(nacl, new, true);
 383		kfree_rcu(orig, rcu_head);
 384		return 0;
 385	}
 386
 387	new->se_lun = lun;
 388	new->se_lun_acl = lun_acl;
 389	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 390	mutex_unlock(&nacl->lun_entry_mutex);
 391
 392	spin_lock(&lun->lun_deve_lock);
 393	list_add_tail(&new->lun_link, &lun->lun_deve_list);
 394	spin_unlock(&lun->lun_deve_lock);
 395
 396	target_luns_data_has_changed(nacl, new, true);
 397	return 0;
 398}
 399
 
 
 
 400void core_disable_device_list_for_node(
 401	struct se_lun *lun,
 402	struct se_dev_entry *orig,
 403	struct se_node_acl *nacl,
 404	struct se_portal_group *tpg)
 405{
 406	/*
 407	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 408	 * reference to se_device->dev_group.
 409	 */
 410	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 411
 412	lockdep_assert_held(&nacl->lun_entry_mutex);
 413
 414	/*
 415	 * If the MappedLUN entry is being disabled, the entry in
 416	 * lun->lun_deve_list must be removed now before clearing the
 417	 * struct se_dev_entry pointers below as logic in
 418	 * core_alua_do_transition_tg_pt() depends on these being present.
 419	 *
 420	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 421	 * that have not been explicitly converted to MappedLUNs ->
 422	 * struct se_lun_acl, but we remove deve->lun_link from
 423	 * lun->lun_deve_list. This also means that active UAs and
 424	 * NodeACL context specific PR metadata for demo-mode
 425	 * MappedLUN *deve will be released below..
 426	 */
 427	spin_lock(&lun->lun_deve_lock);
 428	list_del(&orig->lun_link);
 429	spin_unlock(&lun->lun_deve_lock);
 430	/*
 431	 * Disable struct se_dev_entry LUN ACL mapping
 432	 */
 433	core_scsi3_ua_release_all(orig);
 434
 435	hlist_del_rcu(&orig->link);
 436	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 437	orig->lun_access_ro = false;
 438	orig->creation_time = 0;
 439	orig->attach_count--;
 440	/*
 441	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
 442	 * or REGISTER_AND_MOVE PR operation to complete.
 443	 */
 444	kref_put(&orig->pr_kref, target_pr_kref_release);
 445	wait_for_completion(&orig->pr_comp);
 446
 
 
 
 447	kfree_rcu(orig, rcu_head);
 448
 449	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 450	target_luns_data_has_changed(nacl, NULL, false);
 451}
 452
 453/*      core_clear_lun_from_tpg():
 454 *
 455 *
 456 */
 457void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 458{
 459	struct se_node_acl *nacl;
 460	struct se_dev_entry *deve;
 461
 462	mutex_lock(&tpg->acl_node_mutex);
 463	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 464
 465		mutex_lock(&nacl->lun_entry_mutex);
 466		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 467			if (lun != deve->se_lun)
 
 
 
 468				continue;
 469
 470			core_disable_device_list_for_node(lun, deve, nacl, tpg);
 471		}
 472		mutex_unlock(&nacl->lun_entry_mutex);
 473	}
 474	mutex_unlock(&tpg->acl_node_mutex);
 475}
 476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477static void se_release_vpd_for_dev(struct se_device *dev)
 478{
 479	struct t10_vpd *vpd, *vpd_tmp;
 480
 481	spin_lock(&dev->t10_wwn.t10_vpd_lock);
 482	list_for_each_entry_safe(vpd, vpd_tmp,
 483			&dev->t10_wwn.t10_vpd_list, vpd_list) {
 484		list_del(&vpd->vpd_list);
 485		kfree(vpd);
 486	}
 487	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 488}
 489
 490static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 491{
 492	u32 aligned_max_sectors;
 493	u32 alignment;
 494	/*
 495	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
 496	 * transport_allocate_data_tasks() operation.
 497	 */
 498	alignment = max(1ul, PAGE_SIZE / block_size);
 499	aligned_max_sectors = rounddown(max_sectors, alignment);
 500
 501	if (max_sectors != aligned_max_sectors)
 502		pr_info("Rounding down aligned max_sectors from %u to %u\n",
 503			max_sectors, aligned_max_sectors);
 504
 505	return aligned_max_sectors;
 506}
 507
 508int core_dev_add_lun(
 509	struct se_portal_group *tpg,
 510	struct se_device *dev,
 511	struct se_lun *lun)
 512{
 513	int rc;
 514
 515	rc = core_tpg_add_lun(tpg, lun, false, dev);
 516	if (rc < 0)
 517		return rc;
 518
 519	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
 520		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
 521		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 522		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
 523	/*
 524	 * Update LUN maps for dynamically added initiators when
 525	 * generate_node_acl is enabled.
 526	 */
 527	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
 528		struct se_node_acl *acl;
 529
 530		mutex_lock(&tpg->acl_node_mutex);
 531		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 532			if (acl->dynamic_node_acl &&
 533			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
 534			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
 535				core_tpg_add_node_to_devs(acl, tpg, lun);
 536			}
 537		}
 538		mutex_unlock(&tpg->acl_node_mutex);
 539	}
 540
 541	return 0;
 542}
 543
 544/*      core_dev_del_lun():
 545 *
 546 *
 547 */
 548void core_dev_del_lun(
 549	struct se_portal_group *tpg,
 550	struct se_lun *lun)
 551{
 552	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
 553		" device object\n", tpg->se_tpg_tfo->fabric_name,
 554		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 555		tpg->se_tpg_tfo->fabric_name);
 556
 557	core_tpg_remove_lun(tpg, lun);
 558}
 559
 560struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 561	struct se_portal_group *tpg,
 562	struct se_node_acl *nacl,
 563	u64 mapped_lun,
 564	int *ret)
 565{
 566	struct se_lun_acl *lacl;
 567
 568	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
 569		pr_err("%s InitiatorName exceeds maximum size.\n",
 570			tpg->se_tpg_tfo->fabric_name);
 571		*ret = -EOVERFLOW;
 572		return NULL;
 573	}
 574	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
 575	if (!lacl) {
 576		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 577		*ret = -ENOMEM;
 578		return NULL;
 579	}
 580
 581	lacl->mapped_lun = mapped_lun;
 582	lacl->se_lun_nacl = nacl;
 583
 584	return lacl;
 585}
 586
 587int core_dev_add_initiator_node_lun_acl(
 588	struct se_portal_group *tpg,
 589	struct se_lun_acl *lacl,
 590	struct se_lun *lun,
 591	bool lun_access_ro)
 592{
 593	struct se_node_acl *nacl = lacl->se_lun_nacl;
 594	/*
 595	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 596	 * reference to se_device->dev_group.
 597	 */
 598	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 599
 600	if (!nacl)
 601		return -EINVAL;
 602
 603	if (lun->lun_access_ro)
 604		lun_access_ro = true;
 605
 606	lacl->se_lun = lun;
 607
 608	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 609			lun_access_ro, nacl, tpg) < 0)
 610		return -EINVAL;
 611
 612	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 613		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
 614		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 615		lun_access_ro ? "RO" : "RW",
 616		nacl->initiatorname);
 617	/*
 618	 * Check to see if there are any existing persistent reservation APTPL
 619	 * pre-registrations that need to be enabled for this LUN ACL..
 620	 */
 621	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
 622					    lacl->mapped_lun);
 623	return 0;
 624}
 625
 626int core_dev_del_initiator_node_lun_acl(
 627	struct se_lun *lun,
 628	struct se_lun_acl *lacl)
 629{
 630	struct se_portal_group *tpg = lun->lun_tpg;
 631	struct se_node_acl *nacl;
 632	struct se_dev_entry *deve;
 633
 634	nacl = lacl->se_lun_nacl;
 635	if (!nacl)
 636		return -EINVAL;
 637
 638	mutex_lock(&nacl->lun_entry_mutex);
 639	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
 640	if (deve)
 641		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 642	mutex_unlock(&nacl->lun_entry_mutex);
 643
 644	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
 645		" InitiatorNode: %s Mapped LUN: %llu\n",
 646		tpg->se_tpg_tfo->fabric_name,
 647		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 648		nacl->initiatorname, lacl->mapped_lun);
 649
 650	return 0;
 651}
 652
 653void core_dev_free_initiator_node_lun_acl(
 654	struct se_portal_group *tpg,
 655	struct se_lun_acl *lacl)
 656{
 657	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 658		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
 659		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 660		tpg->se_tpg_tfo->fabric_name,
 661		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 662
 663	kfree(lacl);
 664}
 665
 666static void scsi_dump_inquiry(struct se_device *dev)
 667{
 668	struct t10_wwn *wwn = &dev->t10_wwn;
 669	int device_type = dev->transport->get_device_type(dev);
 670
 671	/*
 672	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 673	 */
 674	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
 675		wwn->vendor);
 676	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
 677		wwn->model);
 678	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
 679		wwn->revision);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680	pr_debug("  Type:   %s ", scsi_device_type(device_type));
 681}
 682
 683struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 684{
 685	struct se_device *dev;
 686	struct se_lun *xcopy_lun;
 687	int i;
 688
 689	dev = hba->backend->ops->alloc_device(hba, name);
 690	if (!dev)
 691		return NULL;
 692
 693	dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
 694	if (!dev->queues) {
 695		dev->transport->free_device(dev);
 696		return NULL;
 697	}
 698
 699	dev->queue_cnt = nr_cpu_ids;
 700	for (i = 0; i < dev->queue_cnt; i++) {
 701		struct se_device_queue *q;
 702
 703		q = &dev->queues[i];
 704		INIT_LIST_HEAD(&q->state_list);
 705		spin_lock_init(&q->lock);
 706
 707		init_llist_head(&q->sq.cmd_list);
 708		INIT_WORK(&q->sq.work, target_queued_submit_work);
 709	}
 710
 711	dev->se_hba = hba;
 712	dev->transport = hba->backend->ops;
 713	dev->transport_flags = dev->transport->transport_flags_default;
 714	dev->prot_length = sizeof(struct t10_pi_tuple);
 715	dev->hba_index = hba->hba_index;
 716
 717	INIT_LIST_HEAD(&dev->dev_sep_list);
 718	INIT_LIST_HEAD(&dev->dev_tmr_list);
 719	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 
 720	INIT_LIST_HEAD(&dev->qf_cmd_list);
 
 721	spin_lock_init(&dev->delayed_cmd_lock);
 722	spin_lock_init(&dev->dev_reservation_lock);
 723	spin_lock_init(&dev->se_port_lock);
 724	spin_lock_init(&dev->se_tmr_lock);
 725	spin_lock_init(&dev->qf_cmd_lock);
 726	sema_init(&dev->caw_sem, 1);
 727	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 728	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 729	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
 730	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
 731	spin_lock_init(&dev->t10_pr.registration_lock);
 732	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 733	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 734	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
 735	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
 736	spin_lock_init(&dev->t10_alua.lba_map_lock);
 737
 738	INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
 739	mutex_init(&dev->lun_reset_mutex);
 740
 741	dev->t10_wwn.t10_dev = dev;
 742	/*
 743	 * Use OpenFabrics IEEE Company ID: 00 14 05
 744	 */
 745	dev->t10_wwn.company_id = 0x001405;
 746
 747	dev->t10_alua.t10_dev = dev;
 748
 749	dev->dev_attrib.da_dev = dev;
 750	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
 751	dev->dev_attrib.emulate_dpo = 1;
 752	dev->dev_attrib.emulate_fua_write = 1;
 753	dev->dev_attrib.emulate_fua_read = 1;
 754	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 755	dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
 756	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
 757	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 758	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 759	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 760	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 761	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
 762	dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
 763	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 764	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 765	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
 766	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 767	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 768	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 769	dev->dev_attrib.max_unmap_block_desc_count =
 770		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 771	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 772	dev->dev_attrib.unmap_granularity_alignment =
 773				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 774	dev->dev_attrib.unmap_zeroes_data =
 775				DA_UNMAP_ZEROES_DATA_DEFAULT;
 776	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 777	dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
 778
 779	xcopy_lun = &dev->xcopy_lun;
 780	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
 
 781	init_completion(&xcopy_lun->lun_shutdown_comp);
 782	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
 783	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
 784	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
 785	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 786
 787	/* Preload the default INQUIRY const values */
 788	strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
 789	strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
 790		sizeof(dev->t10_wwn.model));
 791	strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
 792		sizeof(dev->t10_wwn.revision));
 793
 794	return dev;
 795}
 796
 797/*
 798 * Check if the underlying struct block_device supports discard and if yes
 799 * configure the UNMAP parameters.
 
 800 */
 801bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
 802				       struct block_device *bdev)
 803{
 804	int block_size = bdev_logical_block_size(bdev);
 805
 806	if (!bdev_max_discard_sectors(bdev))
 807		return false;
 808
 809	attrib->max_unmap_lba_count =
 810		bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
 811	/*
 812	 * Currently hardcoded to 1 in Linux/SCSI code..
 813	 */
 814	attrib->max_unmap_block_desc_count = 1;
 815	attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
 816	attrib->unmap_granularity_alignment =
 817		bdev_discard_alignment(bdev) / block_size;
 
 818	return true;
 819}
 820EXPORT_SYMBOL(target_configure_unmap_from_queue);
 821
 822/*
 823 * Convert from blocksize advertised to the initiator to the 512 byte
 824 * units unconditionally used by the Linux block layer.
 825 */
 826sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
 827{
 828	switch (dev->dev_attrib.block_size) {
 829	case 4096:
 830		return lb << 3;
 831	case 2048:
 832		return lb << 2;
 833	case 1024:
 834		return lb << 1;
 835	default:
 836		return lb;
 837	}
 838}
 839EXPORT_SYMBOL(target_to_linux_sector);
 840
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 841struct devices_idr_iter {
 842	int (*fn)(struct se_device *dev, void *data);
 843	void *data;
 844};
 845
 846static int target_devices_idr_iter(int id, void *p, void *data)
 847	 __must_hold(&device_mutex)
 848{
 849	struct devices_idr_iter *iter = data;
 850	struct se_device *dev = p;
 851	struct config_item *item;
 852	int ret;
 853
 854	/*
 855	 * We add the device early to the idr, so it can be used
 856	 * by backend modules during configuration. We do not want
 857	 * to allow other callers to access partially setup devices,
 858	 * so we skip them here.
 859	 */
 860	if (!target_dev_configured(dev))
 861		return 0;
 862
 863	item = config_item_get_unless_zero(&dev->dev_group.cg_item);
 864	if (!item)
 865		return 0;
 866	mutex_unlock(&device_mutex);
 867
 868	ret = iter->fn(dev, iter->data);
 869	config_item_put(item);
 870
 871	mutex_lock(&device_mutex);
 872	return ret;
 873}
 874
 875/**
 876 * target_for_each_device - iterate over configured devices
 877 * @fn: iterator function
 878 * @data: pointer to data that will be passed to fn
 879 *
 880 * fn must return 0 to continue looping over devices. non-zero will break
 881 * from the loop and return that value to the caller.
 882 */
 883int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
 884			   void *data)
 885{
 886	struct devices_idr_iter iter = { .fn = fn, .data = data };
 887	int ret;
 888
 
 
 
 889	mutex_lock(&device_mutex);
 890	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
 891	mutex_unlock(&device_mutex);
 892	return ret;
 893}
 894
 895int target_configure_device(struct se_device *dev)
 896{
 897	struct se_hba *hba = dev->se_hba;
 898	int ret, id;
 899
 900	if (target_dev_configured(dev)) {
 901		pr_err("se_dev->se_dev_ptr already set for storage"
 902				" object\n");
 903		return -EEXIST;
 904	}
 905
 906	/*
 907	 * Add early so modules like tcmu can use during its
 908	 * configuration.
 909	 */
 910	mutex_lock(&device_mutex);
 911	/*
 912	 * Use cyclic to try and avoid collisions with devices
 913	 * that were recently removed.
 914	 */
 915	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
 916	mutex_unlock(&device_mutex);
 917	if (id < 0) {
 918		ret = -ENOMEM;
 919		goto out;
 920	}
 921	dev->dev_index = id;
 922
 923	ret = dev->transport->configure_device(dev);
 924	if (ret)
 925		goto out_free_index;
 926
 927	if (dev->transport->configure_unmap &&
 928	    dev->transport->configure_unmap(dev)) {
 929		pr_debug("Discard support available, but disabled by default.\n");
 930	}
 931
 932	/*
 933	 * XXX: there is not much point to have two different values here..
 934	 */
 935	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
 936	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
 937
 938	/*
 939	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 940	 */
 941	dev->dev_attrib.hw_max_sectors =
 942		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
 943					 dev->dev_attrib.hw_block_size);
 944	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 945
 946	dev->creation_time = get_jiffies_64();
 947
 948	ret = core_setup_alua(dev);
 949	if (ret)
 950		goto out_destroy_device;
 951
 952	/*
 
 
 
 
 
 
 
 
 
 
 
 
 953	 * Setup work_queue for QUEUE_FULL
 954	 */
 955	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
 956
 
 
 
 
 
 
 
 
 
 
 
 
 
 957	scsi_dump_inquiry(dev);
 958
 959	spin_lock(&hba->device_lock);
 960	hba->dev_count++;
 961	spin_unlock(&hba->device_lock);
 962
 963	dev->dev_flags |= DF_CONFIGURED;
 964
 965	return 0;
 966
 
 
 967out_destroy_device:
 968	dev->transport->destroy_device(dev);
 969out_free_index:
 970	mutex_lock(&device_mutex);
 971	idr_remove(&devices_idr, dev->dev_index);
 972	mutex_unlock(&device_mutex);
 973out:
 974	se_release_vpd_for_dev(dev);
 975	return ret;
 976}
 977
 978void target_free_device(struct se_device *dev)
 979{
 980	struct se_hba *hba = dev->se_hba;
 981
 982	WARN_ON(!list_empty(&dev->dev_sep_list));
 983
 984	if (target_dev_configured(dev)) {
 
 
 985		dev->transport->destroy_device(dev);
 986
 987		mutex_lock(&device_mutex);
 988		idr_remove(&devices_idr, dev->dev_index);
 989		mutex_unlock(&device_mutex);
 990
 991		spin_lock(&hba->device_lock);
 992		hba->dev_count--;
 993		spin_unlock(&hba->device_lock);
 994	}
 995
 996	core_alua_free_lu_gp_mem(dev);
 997	core_alua_set_lba_map(dev, NULL, 0, 0);
 998	core_scsi3_free_all_registrations(dev);
 999	se_release_vpd_for_dev(dev);
1000
1001	if (dev->transport->free_prot)
1002		dev->transport->free_prot(dev);
1003
1004	kfree(dev->queues);
1005	dev->transport->free_device(dev);
1006}
1007
1008int core_dev_setup_virtual_lun0(void)
1009{
1010	struct se_hba *hba;
1011	struct se_device *dev;
1012	char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1013	int ret;
1014
1015	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1016	if (IS_ERR(hba))
1017		return PTR_ERR(hba);
1018
1019	dev = target_alloc_device(hba, "virt_lun0");
1020	if (!dev) {
1021		ret = -ENOMEM;
1022		goto out_free_hba;
1023	}
1024
1025	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1026
1027	ret = target_configure_device(dev);
1028	if (ret)
1029		goto out_free_se_dev;
1030
1031	lun0_hba = hba;
1032	g_lun0_dev = dev;
1033	return 0;
1034
1035out_free_se_dev:
1036	target_free_device(dev);
1037out_free_hba:
1038	core_delete_hba(hba);
1039	return ret;
1040}
1041
1042
1043void core_dev_release_virtual_lun0(void)
1044{
1045	struct se_hba *hba = lun0_hba;
1046
1047	if (!hba)
1048		return;
1049
1050	if (g_lun0_dev)
1051		target_free_device(g_lun0_dev);
1052	core_delete_hba(hba);
1053}
1054
1055/*
1056 * Common CDB parsing for kernel and user passthrough.
1057 */
1058sense_reason_t
1059passthrough_parse_cdb(struct se_cmd *cmd,
1060	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1061{
1062	unsigned char *cdb = cmd->t_task_cdb;
1063	struct se_device *dev = cmd->se_dev;
1064	unsigned int size;
1065
1066	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067	 * For REPORT LUNS we always need to emulate the response, for everything
1068	 * else, pass it up.
1069	 */
1070	if (cdb[0] == REPORT_LUNS) {
1071		cmd->execute_cmd = spc_emulate_report_luns;
1072		return TCM_NO_SENSE;
1073	}
1074
1075	/*
1076	 * With emulate_pr disabled, all reservation requests should fail,
1077	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1078	 */
1079	if (!dev->dev_attrib.emulate_pr &&
1080	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1081	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1082	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1083	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1084		return TCM_UNSUPPORTED_SCSI_OPCODE;
1085	}
1086
1087	/*
1088	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1089	 * emulate the response, since tcmu does not have the information
1090	 * required to process these commands.
1091	 */
1092	if (!(dev->transport_flags &
1093	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1094		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1095			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1096			size = get_unaligned_be16(&cdb[7]);
1097			return target_cmd_size_check(cmd, size);
1098		}
1099		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1100			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1101			size = get_unaligned_be32(&cdb[5]);
1102			return target_cmd_size_check(cmd, size);
1103		}
1104
1105		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1106			cmd->execute_cmd = target_scsi2_reservation_release;
1107			if (cdb[0] == RELEASE_10)
1108				size = get_unaligned_be16(&cdb[7]);
1109			else
1110				size = cmd->data_length;
1111			return target_cmd_size_check(cmd, size);
1112		}
1113		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1114			cmd->execute_cmd = target_scsi2_reservation_reserve;
1115			if (cdb[0] == RESERVE_10)
1116				size = get_unaligned_be16(&cdb[7]);
1117			else
1118				size = cmd->data_length;
1119			return target_cmd_size_check(cmd, size);
1120		}
1121	}
1122
1123	/* Set DATA_CDB flag for ops that should have it */
1124	switch (cdb[0]) {
1125	case READ_6:
1126	case READ_10:
1127	case READ_12:
1128	case READ_16:
1129	case WRITE_6:
1130	case WRITE_10:
1131	case WRITE_12:
1132	case WRITE_16:
1133	case WRITE_VERIFY:
1134	case WRITE_VERIFY_12:
1135	case WRITE_VERIFY_16:
1136	case COMPARE_AND_WRITE:
1137	case XDWRITEREAD_10:
1138		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1139		break;
1140	case VARIABLE_LENGTH_CMD:
1141		switch (get_unaligned_be16(&cdb[8])) {
1142		case READ_32:
1143		case WRITE_32:
1144		case WRITE_VERIFY_32:
1145		case XDWRITEREAD_32:
1146			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1147			break;
1148		}
1149	}
1150
1151	cmd->execute_cmd = exec_cmd;
1152
1153	return TCM_NO_SENSE;
1154}
1155EXPORT_SYMBOL(passthrough_parse_cdb);
v4.17
 
   1/*******************************************************************************
   2 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   3 *
   4 * This file contains the TCM Virtual Device and Disk Transport
   5 * agnostic related functions.
   6 *
   7 * (c) Copyright 2003-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24 *
  25 ******************************************************************************/
  26
  27#include <linux/net.h>
  28#include <linux/string.h>
  29#include <linux/delay.h>
  30#include <linux/timer.h>
  31#include <linux/slab.h>
  32#include <linux/spinlock.h>
  33#include <linux/kthread.h>
  34#include <linux/in.h>
  35#include <linux/export.h>
  36#include <linux/t10-pi.h>
  37#include <asm/unaligned.h>
  38#include <net/sock.h>
  39#include <net/tcp.h>
  40#include <scsi/scsi_common.h>
  41#include <scsi/scsi_proto.h>
  42
  43#include <target/target_core_base.h>
  44#include <target/target_core_backend.h>
  45#include <target/target_core_fabric.h>
  46
  47#include "target_core_internal.h"
  48#include "target_core_alua.h"
  49#include "target_core_pr.h"
  50#include "target_core_ua.h"
  51
  52static DEFINE_MUTEX(device_mutex);
  53static LIST_HEAD(device_list);
  54static DEFINE_IDR(devices_idr);
  55
  56static struct se_hba *lun0_hba;
  57/* not static, needed by tpg.c */
  58struct se_device *g_lun0_dev;
  59
  60sense_reason_t
  61transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
  62{
  63	struct se_lun *se_lun = NULL;
  64	struct se_session *se_sess = se_cmd->se_sess;
  65	struct se_node_acl *nacl = se_sess->se_node_acl;
  66	struct se_dev_entry *deve;
  67	sense_reason_t ret = TCM_NO_SENSE;
  68
  69	rcu_read_lock();
  70	deve = target_nacl_find_deve(nacl, unpacked_lun);
  71	if (deve) {
  72		atomic_long_inc(&deve->total_cmds);
  73
  74		if (se_cmd->data_direction == DMA_TO_DEVICE)
  75			atomic_long_add(se_cmd->data_length,
  76					&deve->write_bytes);
  77		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  78			atomic_long_add(se_cmd->data_length,
  79					&deve->read_bytes);
  80
  81		se_lun = rcu_dereference(deve->se_lun);
 
 
 
 
 
 
 
 
 
 
  82
  83		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  84			se_lun = NULL;
  85			goto out_unlock;
  86		}
  87
  88		se_cmd->se_lun = rcu_dereference(deve->se_lun);
  89		se_cmd->pr_res_key = deve->pr_res_key;
  90		se_cmd->orig_fe_lun = unpacked_lun;
  91		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  92		se_cmd->lun_ref_active = true;
  93
  94		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  95		    deve->lun_access_ro) {
  96			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  97				" Access for 0x%08llx\n",
  98				se_cmd->se_tfo->get_fabric_name(),
  99				unpacked_lun);
 100			rcu_read_unlock();
 101			ret = TCM_WRITE_PROTECTED;
 102			goto ref_dev;
 103		}
 104	}
 105out_unlock:
 106	rcu_read_unlock();
 107
 108	if (!se_lun) {
 109		/*
 110		 * Use the se_portal_group->tpg_virt_lun0 to allow for
 111		 * REPORT_LUNS, et al to be returned when no active
 112		 * MappedLUN=0 exists for this Initiator Port.
 113		 */
 114		if (unpacked_lun != 0) {
 115			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 116				" Access for 0x%08llx\n",
 117				se_cmd->se_tfo->get_fabric_name(),
 118				unpacked_lun);
 
 119			return TCM_NON_EXISTENT_LUN;
 120		}
 121
 
 
 
 
 
 
 
 122		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 
 
 
 123		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
 124		se_cmd->orig_fe_lun = 0;
 125		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 126
 127		percpu_ref_get(&se_lun->lun_ref);
 128		se_cmd->lun_ref_active = true;
 129
 130		/*
 131		 * Force WRITE PROTECT for virtual LUN 0
 132		 */
 133		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 134		    (se_cmd->data_direction != DMA_NONE)) {
 135			ret = TCM_WRITE_PROTECTED;
 136			goto ref_dev;
 137		}
 138	}
 139	/*
 140	 * RCU reference protected by percpu se_lun->lun_ref taken above that
 141	 * must drop to zero (including initial reference) before this se_lun
 142	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 143	 * target_core_fabric_configfs.c:target_fabric_port_release
 144	 */
 145ref_dev:
 146	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 147	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 148
 149	if (se_cmd->data_direction == DMA_TO_DEVICE)
 150		atomic_long_add(se_cmd->data_length,
 151				&se_cmd->se_dev->write_bytes);
 152	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 153		atomic_long_add(se_cmd->data_length,
 154				&se_cmd->se_dev->read_bytes);
 155
 156	return ret;
 157}
 158EXPORT_SYMBOL(transport_lookup_cmd_lun);
 159
 160int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
 161{
 162	struct se_dev_entry *deve;
 163	struct se_lun *se_lun = NULL;
 164	struct se_session *se_sess = se_cmd->se_sess;
 165	struct se_node_acl *nacl = se_sess->se_node_acl;
 166	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 167	unsigned long flags;
 168
 169	rcu_read_lock();
 170	deve = target_nacl_find_deve(nacl, unpacked_lun);
 171	if (deve) {
 172		se_lun = rcu_dereference(deve->se_lun);
 173
 174		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
 175			se_lun = NULL;
 176			goto out_unlock;
 177		}
 178
 179		se_cmd->se_lun = rcu_dereference(deve->se_lun);
 180		se_cmd->pr_res_key = deve->pr_res_key;
 181		se_cmd->orig_fe_lun = unpacked_lun;
 182		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 183		se_cmd->lun_ref_active = true;
 184	}
 185out_unlock:
 186	rcu_read_unlock();
 187
 188	if (!se_lun) {
 189		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 190			" Access for 0x%08llx\n",
 191			se_cmd->se_tfo->get_fabric_name(),
 192			unpacked_lun);
 
 193		return -ENODEV;
 194	}
 195	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 196	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 197
 198	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 199	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 200	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 201
 202	return 0;
 203}
 204EXPORT_SYMBOL(transport_lookup_tmr_lun);
 205
 206bool target_lun_is_rdonly(struct se_cmd *cmd)
 207{
 208	struct se_session *se_sess = cmd->se_sess;
 209	struct se_dev_entry *deve;
 210	bool ret;
 211
 212	rcu_read_lock();
 213	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 214	ret = deve && deve->lun_access_ro;
 215	rcu_read_unlock();
 216
 217	return ret;
 218}
 219EXPORT_SYMBOL(target_lun_is_rdonly);
 220
 221/*
 222 * This function is called from core_scsi3_emulate_pro_register_and_move()
 223 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 224 * when a matching rtpi is found.
 225 */
 226struct se_dev_entry *core_get_se_deve_from_rtpi(
 227	struct se_node_acl *nacl,
 228	u16 rtpi)
 229{
 230	struct se_dev_entry *deve;
 231	struct se_lun *lun;
 232	struct se_portal_group *tpg = nacl->se_tpg;
 233
 234	rcu_read_lock();
 235	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 236		lun = rcu_dereference(deve->se_lun);
 237		if (!lun) {
 238			pr_err("%s device entries device pointer is"
 239				" NULL, but Initiator has access.\n",
 240				tpg->se_tpg_tfo->get_fabric_name());
 241			continue;
 242		}
 243		if (lun->lun_rtpi != rtpi)
 244			continue;
 245
 246		kref_get(&deve->pr_kref);
 247		rcu_read_unlock();
 248
 249		return deve;
 250	}
 251	rcu_read_unlock();
 252
 253	return NULL;
 254}
 255
 256void core_free_device_list_for_node(
 257	struct se_node_acl *nacl,
 258	struct se_portal_group *tpg)
 259{
 260	struct se_dev_entry *deve;
 261
 262	mutex_lock(&nacl->lun_entry_mutex);
 263	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 264		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
 265					lockdep_is_held(&nacl->lun_entry_mutex));
 266		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 267	}
 268	mutex_unlock(&nacl->lun_entry_mutex);
 269}
 270
 271void core_update_device_list_access(
 272	u64 mapped_lun,
 273	bool lun_access_ro,
 274	struct se_node_acl *nacl)
 275{
 276	struct se_dev_entry *deve;
 277
 278	mutex_lock(&nacl->lun_entry_mutex);
 279	deve = target_nacl_find_deve(nacl, mapped_lun);
 280	if (deve)
 281		deve->lun_access_ro = lun_access_ro;
 282	mutex_unlock(&nacl->lun_entry_mutex);
 283}
 284
 285/*
 286 * Called with rcu_read_lock or nacl->device_list_lock held.
 287 */
 288struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
 289{
 290	struct se_dev_entry *deve;
 291
 292	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 293		if (deve->mapped_lun == mapped_lun)
 294			return deve;
 295
 296	return NULL;
 297}
 298EXPORT_SYMBOL(target_nacl_find_deve);
 299
 300void target_pr_kref_release(struct kref *kref)
 301{
 302	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
 303						 pr_kref);
 304	complete(&deve->pr_comp);
 305}
 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307static void
 308target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
 309			     bool skip_new)
 310{
 311	struct se_dev_entry *tmp;
 312
 313	rcu_read_lock();
 314	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
 315		if (skip_new && tmp == new)
 316			continue;
 317		core_scsi3_ua_allocate(tmp, 0x3F,
 318				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
 319	}
 320	rcu_read_unlock();
 321}
 322
 323int core_enable_device_list_for_node(
 324	struct se_lun *lun,
 325	struct se_lun_acl *lun_acl,
 326	u64 mapped_lun,
 327	bool lun_access_ro,
 328	struct se_node_acl *nacl,
 329	struct se_portal_group *tpg)
 330{
 331	struct se_dev_entry *orig, *new;
 332
 333	new = kzalloc(sizeof(*new), GFP_KERNEL);
 334	if (!new) {
 335		pr_err("Unable to allocate se_dev_entry memory\n");
 336		return -ENOMEM;
 337	}
 338
 339	atomic_set(&new->ua_count, 0);
 340	spin_lock_init(&new->ua_lock);
 341	INIT_LIST_HEAD(&new->ua_list);
 342	INIT_LIST_HEAD(&new->lun_link);
 343
 344	new->mapped_lun = mapped_lun;
 345	kref_init(&new->pr_kref);
 346	init_completion(&new->pr_comp);
 347
 348	new->lun_access_ro = lun_access_ro;
 349	new->creation_time = get_jiffies_64();
 350	new->attach_count++;
 351
 352	mutex_lock(&nacl->lun_entry_mutex);
 353	orig = target_nacl_find_deve(nacl, mapped_lun);
 354	if (orig && orig->se_lun) {
 355		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
 356					lockdep_is_held(&nacl->lun_entry_mutex));
 357
 358		if (orig_lun != lun) {
 359			pr_err("Existing orig->se_lun doesn't match new lun"
 360			       " for dynamic -> explicit NodeACL conversion:"
 361				" %s\n", nacl->initiatorname);
 362			mutex_unlock(&nacl->lun_entry_mutex);
 363			kfree(new);
 364			return -EINVAL;
 365		}
 366		if (orig->se_lun_acl != NULL) {
 367			pr_warn_ratelimited("Detected existing explicit"
 368				" se_lun_acl->se_lun_group reference for %s"
 369				" mapped_lun: %llu, failing\n",
 370				 nacl->initiatorname, mapped_lun);
 371			mutex_unlock(&nacl->lun_entry_mutex);
 372			kfree(new);
 373			return -EINVAL;
 374		}
 375
 376		rcu_assign_pointer(new->se_lun, lun);
 377		rcu_assign_pointer(new->se_lun_acl, lun_acl);
 378		hlist_del_rcu(&orig->link);
 379		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 380		mutex_unlock(&nacl->lun_entry_mutex);
 381
 382		spin_lock(&lun->lun_deve_lock);
 383		list_del(&orig->lun_link);
 384		list_add_tail(&new->lun_link, &lun->lun_deve_list);
 385		spin_unlock(&lun->lun_deve_lock);
 386
 387		kref_put(&orig->pr_kref, target_pr_kref_release);
 388		wait_for_completion(&orig->pr_comp);
 389
 390		target_luns_data_has_changed(nacl, new, true);
 391		kfree_rcu(orig, rcu_head);
 392		return 0;
 393	}
 394
 395	rcu_assign_pointer(new->se_lun, lun);
 396	rcu_assign_pointer(new->se_lun_acl, lun_acl);
 397	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 398	mutex_unlock(&nacl->lun_entry_mutex);
 399
 400	spin_lock(&lun->lun_deve_lock);
 401	list_add_tail(&new->lun_link, &lun->lun_deve_list);
 402	spin_unlock(&lun->lun_deve_lock);
 403
 404	target_luns_data_has_changed(nacl, new, true);
 405	return 0;
 406}
 407
 408/*
 409 *	Called with se_node_acl->lun_entry_mutex held.
 410 */
 411void core_disable_device_list_for_node(
 412	struct se_lun *lun,
 413	struct se_dev_entry *orig,
 414	struct se_node_acl *nacl,
 415	struct se_portal_group *tpg)
 416{
 417	/*
 418	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 419	 * reference to se_device->dev_group.
 420	 */
 421	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
 
 
 422	/*
 423	 * If the MappedLUN entry is being disabled, the entry in
 424	 * lun->lun_deve_list must be removed now before clearing the
 425	 * struct se_dev_entry pointers below as logic in
 426	 * core_alua_do_transition_tg_pt() depends on these being present.
 427	 *
 428	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 429	 * that have not been explicitly converted to MappedLUNs ->
 430	 * struct se_lun_acl, but we remove deve->lun_link from
 431	 * lun->lun_deve_list. This also means that active UAs and
 432	 * NodeACL context specific PR metadata for demo-mode
 433	 * MappedLUN *deve will be released below..
 434	 */
 435	spin_lock(&lun->lun_deve_lock);
 436	list_del(&orig->lun_link);
 437	spin_unlock(&lun->lun_deve_lock);
 438	/*
 439	 * Disable struct se_dev_entry LUN ACL mapping
 440	 */
 441	core_scsi3_ua_release_all(orig);
 442
 443	hlist_del_rcu(&orig->link);
 444	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 445	orig->lun_access_ro = false;
 446	orig->creation_time = 0;
 447	orig->attach_count--;
 448	/*
 449	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
 450	 * or REGISTER_AND_MOVE PR operation to complete.
 451	 */
 452	kref_put(&orig->pr_kref, target_pr_kref_release);
 453	wait_for_completion(&orig->pr_comp);
 454
 455	rcu_assign_pointer(orig->se_lun, NULL);
 456	rcu_assign_pointer(orig->se_lun_acl, NULL);
 457
 458	kfree_rcu(orig, rcu_head);
 459
 460	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 461	target_luns_data_has_changed(nacl, NULL, false);
 462}
 463
 464/*      core_clear_lun_from_tpg():
 465 *
 466 *
 467 */
 468void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 469{
 470	struct se_node_acl *nacl;
 471	struct se_dev_entry *deve;
 472
 473	mutex_lock(&tpg->acl_node_mutex);
 474	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 475
 476		mutex_lock(&nacl->lun_entry_mutex);
 477		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 478			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
 479					lockdep_is_held(&nacl->lun_entry_mutex));
 480
 481			if (lun != tmp_lun)
 482				continue;
 483
 484			core_disable_device_list_for_node(lun, deve, nacl, tpg);
 485		}
 486		mutex_unlock(&nacl->lun_entry_mutex);
 487	}
 488	mutex_unlock(&tpg->acl_node_mutex);
 489}
 490
 491int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
 492{
 493	struct se_lun *tmp;
 494
 495	spin_lock(&dev->se_port_lock);
 496	if (dev->export_count == 0x0000ffff) {
 497		pr_warn("Reached dev->dev_port_count =="
 498				" 0x0000ffff\n");
 499		spin_unlock(&dev->se_port_lock);
 500		return -ENOSPC;
 501	}
 502again:
 503	/*
 504	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
 505	 * Here is the table from spc4r17 section 7.7.3.8.
 506	 *
 507	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 508	 *
 509	 * Code      Description
 510	 * 0h        Reserved
 511	 * 1h        Relative port 1, historically known as port A
 512	 * 2h        Relative port 2, historically known as port B
 513	 * 3h to FFFFh    Relative port 3 through 65 535
 514	 */
 515	lun->lun_rtpi = dev->dev_rpti_counter++;
 516	if (!lun->lun_rtpi)
 517		goto again;
 518
 519	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
 520		/*
 521		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
 522		 * for 16-bit wrap..
 523		 */
 524		if (lun->lun_rtpi == tmp->lun_rtpi)
 525			goto again;
 526	}
 527	spin_unlock(&dev->se_port_lock);
 528
 529	return 0;
 530}
 531
 532static void se_release_vpd_for_dev(struct se_device *dev)
 533{
 534	struct t10_vpd *vpd, *vpd_tmp;
 535
 536	spin_lock(&dev->t10_wwn.t10_vpd_lock);
 537	list_for_each_entry_safe(vpd, vpd_tmp,
 538			&dev->t10_wwn.t10_vpd_list, vpd_list) {
 539		list_del(&vpd->vpd_list);
 540		kfree(vpd);
 541	}
 542	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 543}
 544
 545static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 546{
 547	u32 aligned_max_sectors;
 548	u32 alignment;
 549	/*
 550	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
 551	 * transport_allocate_data_tasks() operation.
 552	 */
 553	alignment = max(1ul, PAGE_SIZE / block_size);
 554	aligned_max_sectors = rounddown(max_sectors, alignment);
 555
 556	if (max_sectors != aligned_max_sectors)
 557		pr_info("Rounding down aligned max_sectors from %u to %u\n",
 558			max_sectors, aligned_max_sectors);
 559
 560	return aligned_max_sectors;
 561}
 562
 563int core_dev_add_lun(
 564	struct se_portal_group *tpg,
 565	struct se_device *dev,
 566	struct se_lun *lun)
 567{
 568	int rc;
 569
 570	rc = core_tpg_add_lun(tpg, lun, false, dev);
 571	if (rc < 0)
 572		return rc;
 573
 574	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
 575		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
 576		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 577		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
 578	/*
 579	 * Update LUN maps for dynamically added initiators when
 580	 * generate_node_acl is enabled.
 581	 */
 582	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
 583		struct se_node_acl *acl;
 584
 585		mutex_lock(&tpg->acl_node_mutex);
 586		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 587			if (acl->dynamic_node_acl &&
 588			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
 589			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
 590				core_tpg_add_node_to_devs(acl, tpg, lun);
 591			}
 592		}
 593		mutex_unlock(&tpg->acl_node_mutex);
 594	}
 595
 596	return 0;
 597}
 598
 599/*      core_dev_del_lun():
 600 *
 601 *
 602 */
 603void core_dev_del_lun(
 604	struct se_portal_group *tpg,
 605	struct se_lun *lun)
 606{
 607	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
 608		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
 609		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 610		tpg->se_tpg_tfo->get_fabric_name());
 611
 612	core_tpg_remove_lun(tpg, lun);
 613}
 614
 615struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 616	struct se_portal_group *tpg,
 617	struct se_node_acl *nacl,
 618	u64 mapped_lun,
 619	int *ret)
 620{
 621	struct se_lun_acl *lacl;
 622
 623	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
 624		pr_err("%s InitiatorName exceeds maximum size.\n",
 625			tpg->se_tpg_tfo->get_fabric_name());
 626		*ret = -EOVERFLOW;
 627		return NULL;
 628	}
 629	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
 630	if (!lacl) {
 631		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 632		*ret = -ENOMEM;
 633		return NULL;
 634	}
 635
 636	lacl->mapped_lun = mapped_lun;
 637	lacl->se_lun_nacl = nacl;
 638
 639	return lacl;
 640}
 641
 642int core_dev_add_initiator_node_lun_acl(
 643	struct se_portal_group *tpg,
 644	struct se_lun_acl *lacl,
 645	struct se_lun *lun,
 646	bool lun_access_ro)
 647{
 648	struct se_node_acl *nacl = lacl->se_lun_nacl;
 649	/*
 650	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 651	 * reference to se_device->dev_group.
 652	 */
 653	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 654
 655	if (!nacl)
 656		return -EINVAL;
 657
 658	if (lun->lun_access_ro)
 659		lun_access_ro = true;
 660
 661	lacl->se_lun = lun;
 662
 663	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 664			lun_access_ro, nacl, tpg) < 0)
 665		return -EINVAL;
 666
 667	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 668		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 669		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 670		lun_access_ro ? "RO" : "RW",
 671		nacl->initiatorname);
 672	/*
 673	 * Check to see if there are any existing persistent reservation APTPL
 674	 * pre-registrations that need to be enabled for this LUN ACL..
 675	 */
 676	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
 677					    lacl->mapped_lun);
 678	return 0;
 679}
 680
 681int core_dev_del_initiator_node_lun_acl(
 682	struct se_lun *lun,
 683	struct se_lun_acl *lacl)
 684{
 685	struct se_portal_group *tpg = lun->lun_tpg;
 686	struct se_node_acl *nacl;
 687	struct se_dev_entry *deve;
 688
 689	nacl = lacl->se_lun_nacl;
 690	if (!nacl)
 691		return -EINVAL;
 692
 693	mutex_lock(&nacl->lun_entry_mutex);
 694	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
 695	if (deve)
 696		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 697	mutex_unlock(&nacl->lun_entry_mutex);
 698
 699	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
 700		" InitiatorNode: %s Mapped LUN: %llu\n",
 701		tpg->se_tpg_tfo->get_fabric_name(),
 702		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 703		nacl->initiatorname, lacl->mapped_lun);
 704
 705	return 0;
 706}
 707
 708void core_dev_free_initiator_node_lun_acl(
 709	struct se_portal_group *tpg,
 710	struct se_lun_acl *lacl)
 711{
 712	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 713		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
 714		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 715		tpg->se_tpg_tfo->get_fabric_name(),
 716		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 717
 718	kfree(lacl);
 719}
 720
 721static void scsi_dump_inquiry(struct se_device *dev)
 722{
 723	struct t10_wwn *wwn = &dev->t10_wwn;
 724	char buf[17];
 725	int i, device_type;
 726	/*
 727	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 728	 */
 729	for (i = 0; i < 8; i++)
 730		if (wwn->vendor[i] >= 0x20)
 731			buf[i] = wwn->vendor[i];
 732		else
 733			buf[i] = ' ';
 734	buf[i] = '\0';
 735	pr_debug("  Vendor: %s\n", buf);
 736
 737	for (i = 0; i < 16; i++)
 738		if (wwn->model[i] >= 0x20)
 739			buf[i] = wwn->model[i];
 740		else
 741			buf[i] = ' ';
 742	buf[i] = '\0';
 743	pr_debug("  Model: %s\n", buf);
 744
 745	for (i = 0; i < 4; i++)
 746		if (wwn->revision[i] >= 0x20)
 747			buf[i] = wwn->revision[i];
 748		else
 749			buf[i] = ' ';
 750	buf[i] = '\0';
 751	pr_debug("  Revision: %s\n", buf);
 752
 753	device_type = dev->transport->get_device_type(dev);
 754	pr_debug("  Type:   %s ", scsi_device_type(device_type));
 755}
 756
 757struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 758{
 759	struct se_device *dev;
 760	struct se_lun *xcopy_lun;
 
 761
 762	dev = hba->backend->ops->alloc_device(hba, name);
 763	if (!dev)
 764		return NULL;
 765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766	dev->se_hba = hba;
 767	dev->transport = hba->backend->ops;
 
 768	dev->prot_length = sizeof(struct t10_pi_tuple);
 769	dev->hba_index = hba->hba_index;
 770
 771	INIT_LIST_HEAD(&dev->dev_sep_list);
 772	INIT_LIST_HEAD(&dev->dev_tmr_list);
 773	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 774	INIT_LIST_HEAD(&dev->state_list);
 775	INIT_LIST_HEAD(&dev->qf_cmd_list);
 776	spin_lock_init(&dev->execute_task_lock);
 777	spin_lock_init(&dev->delayed_cmd_lock);
 778	spin_lock_init(&dev->dev_reservation_lock);
 779	spin_lock_init(&dev->se_port_lock);
 780	spin_lock_init(&dev->se_tmr_lock);
 781	spin_lock_init(&dev->qf_cmd_lock);
 782	sema_init(&dev->caw_sem, 1);
 783	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 784	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 785	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
 786	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
 787	spin_lock_init(&dev->t10_pr.registration_lock);
 788	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 789	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 790	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
 791	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
 792	spin_lock_init(&dev->t10_alua.lba_map_lock);
 793
 
 
 
 794	dev->t10_wwn.t10_dev = dev;
 
 
 
 
 
 795	dev->t10_alua.t10_dev = dev;
 796
 797	dev->dev_attrib.da_dev = dev;
 798	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
 799	dev->dev_attrib.emulate_dpo = 1;
 800	dev->dev_attrib.emulate_fua_write = 1;
 801	dev->dev_attrib.emulate_fua_read = 1;
 802	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 803	dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
 804	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
 805	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 806	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 807	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 808	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 
 
 809	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 810	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 811	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
 812	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 813	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 814	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 815	dev->dev_attrib.max_unmap_block_desc_count =
 816		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 817	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 818	dev->dev_attrib.unmap_granularity_alignment =
 819				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 820	dev->dev_attrib.unmap_zeroes_data =
 821				DA_UNMAP_ZEROES_DATA_DEFAULT;
 822	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 
 823
 824	xcopy_lun = &dev->xcopy_lun;
 825	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
 826	init_completion(&xcopy_lun->lun_ref_comp);
 827	init_completion(&xcopy_lun->lun_shutdown_comp);
 828	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
 829	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
 830	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
 831	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 832
 
 
 
 
 
 
 
 833	return dev;
 834}
 835
 836/*
 837 * Check if the underlying struct block_device request_queue supports
 838 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
 839 * in ATA and we need to set TPE=1
 840 */
 841bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
 842				       struct request_queue *q)
 843{
 844	int block_size = queue_logical_block_size(q);
 845
 846	if (!blk_queue_discard(q))
 847		return false;
 848
 849	attrib->max_unmap_lba_count =
 850		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
 851	/*
 852	 * Currently hardcoded to 1 in Linux/SCSI code..
 853	 */
 854	attrib->max_unmap_block_desc_count = 1;
 855	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
 856	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
 857								block_size;
 858	attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
 859	return true;
 860}
 861EXPORT_SYMBOL(target_configure_unmap_from_queue);
 862
 863/*
 864 * Convert from blocksize advertised to the initiator to the 512 byte
 865 * units unconditionally used by the Linux block layer.
 866 */
 867sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
 868{
 869	switch (dev->dev_attrib.block_size) {
 870	case 4096:
 871		return lb << 3;
 872	case 2048:
 873		return lb << 2;
 874	case 1024:
 875		return lb << 1;
 876	default:
 877		return lb;
 878	}
 879}
 880EXPORT_SYMBOL(target_to_linux_sector);
 881
 882/**
 883 * target_find_device - find a se_device by its dev_index
 884 * @id: dev_index
 885 * @do_depend: true if caller needs target_depend_item to be done
 886 *
 887 * If do_depend is true, the caller must do a target_undepend_item
 888 * when finished using the device.
 889 *
 890 * If do_depend is false, the caller must be called in a configfs
 891 * callback or during removal.
 892 */
 893struct se_device *target_find_device(int id, bool do_depend)
 894{
 895	struct se_device *dev;
 896
 897	mutex_lock(&device_mutex);
 898	dev = idr_find(&devices_idr, id);
 899	if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
 900		dev = NULL;
 901	mutex_unlock(&device_mutex);
 902	return dev;
 903}
 904EXPORT_SYMBOL(target_find_device);
 905
 906struct devices_idr_iter {
 907	int (*fn)(struct se_device *dev, void *data);
 908	void *data;
 909};
 910
 911static int target_devices_idr_iter(int id, void *p, void *data)
 
 912{
 913	struct devices_idr_iter *iter = data;
 914	struct se_device *dev = p;
 
 
 915
 916	/*
 917	 * We add the device early to the idr, so it can be used
 918	 * by backend modules during configuration. We do not want
 919	 * to allow other callers to access partially setup devices,
 920	 * so we skip them here.
 921	 */
 922	if (!(dev->dev_flags & DF_CONFIGURED))
 
 
 
 
 923		return 0;
 
 924
 925	return iter->fn(dev, iter->data);
 
 
 
 
 926}
 927
 928/**
 929 * target_for_each_device - iterate over configured devices
 930 * @fn: iterator function
 931 * @data: pointer to data that will be passed to fn
 932 *
 933 * fn must return 0 to continue looping over devices. non-zero will break
 934 * from the loop and return that value to the caller.
 935 */
 936int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
 937			   void *data)
 938{
 939	struct devices_idr_iter iter;
 940	int ret;
 941
 942	iter.fn = fn;
 943	iter.data = data;
 944
 945	mutex_lock(&device_mutex);
 946	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
 947	mutex_unlock(&device_mutex);
 948	return ret;
 949}
 950
 951int target_configure_device(struct se_device *dev)
 952{
 953	struct se_hba *hba = dev->se_hba;
 954	int ret, id;
 955
 956	if (dev->dev_flags & DF_CONFIGURED) {
 957		pr_err("se_dev->se_dev_ptr already set for storage"
 958				" object\n");
 959		return -EEXIST;
 960	}
 961
 962	/*
 963	 * Add early so modules like tcmu can use during its
 964	 * configuration.
 965	 */
 966	mutex_lock(&device_mutex);
 967	/*
 968	 * Use cyclic to try and avoid collisions with devices
 969	 * that were recently removed.
 970	 */
 971	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
 972	mutex_unlock(&device_mutex);
 973	if (id < 0) {
 974		ret = -ENOMEM;
 975		goto out;
 976	}
 977	dev->dev_index = id;
 978
 979	ret = dev->transport->configure_device(dev);
 980	if (ret)
 981		goto out_free_index;
 
 
 
 
 
 
 982	/*
 983	 * XXX: there is not much point to have two different values here..
 984	 */
 985	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
 986	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
 987
 988	/*
 989	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 990	 */
 991	dev->dev_attrib.hw_max_sectors =
 992		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
 993					 dev->dev_attrib.hw_block_size);
 994	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 995
 996	dev->creation_time = get_jiffies_64();
 997
 998	ret = core_setup_alua(dev);
 999	if (ret)
1000		goto out_destroy_device;
1001
1002	/*
1003	 * Startup the struct se_device processing thread
1004	 */
1005	dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1006				      dev->transport->name);
1007	if (!dev->tmr_wq) {
1008		pr_err("Unable to create tmr workqueue for %s\n",
1009			dev->transport->name);
1010		ret = -ENOMEM;
1011		goto out_free_alua;
1012	}
1013
1014	/*
1015	 * Setup work_queue for QUEUE_FULL
1016	 */
1017	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1018
1019	/*
1020	 * Preload the initial INQUIRY const values if we are doing
1021	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1022	 * passthrough because this is being provided by the backend LLD.
1023	 */
1024	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
1025		strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1026		strncpy(&dev->t10_wwn.model[0],
1027			dev->transport->inquiry_prod, 16);
1028		strncpy(&dev->t10_wwn.revision[0],
1029			dev->transport->inquiry_rev, 4);
1030	}
1031
1032	scsi_dump_inquiry(dev);
1033
1034	spin_lock(&hba->device_lock);
1035	hba->dev_count++;
1036	spin_unlock(&hba->device_lock);
1037
1038	dev->dev_flags |= DF_CONFIGURED;
1039
1040	return 0;
1041
1042out_free_alua:
1043	core_alua_free_lu_gp_mem(dev);
1044out_destroy_device:
1045	dev->transport->destroy_device(dev);
1046out_free_index:
1047	mutex_lock(&device_mutex);
1048	idr_remove(&devices_idr, dev->dev_index);
1049	mutex_unlock(&device_mutex);
1050out:
1051	se_release_vpd_for_dev(dev);
1052	return ret;
1053}
1054
1055void target_free_device(struct se_device *dev)
1056{
1057	struct se_hba *hba = dev->se_hba;
1058
1059	WARN_ON(!list_empty(&dev->dev_sep_list));
1060
1061	if (dev->dev_flags & DF_CONFIGURED) {
1062		destroy_workqueue(dev->tmr_wq);
1063
1064		dev->transport->destroy_device(dev);
1065
1066		mutex_lock(&device_mutex);
1067		idr_remove(&devices_idr, dev->dev_index);
1068		mutex_unlock(&device_mutex);
1069
1070		spin_lock(&hba->device_lock);
1071		hba->dev_count--;
1072		spin_unlock(&hba->device_lock);
1073	}
1074
1075	core_alua_free_lu_gp_mem(dev);
1076	core_alua_set_lba_map(dev, NULL, 0, 0);
1077	core_scsi3_free_all_registrations(dev);
1078	se_release_vpd_for_dev(dev);
1079
1080	if (dev->transport->free_prot)
1081		dev->transport->free_prot(dev);
1082
 
1083	dev->transport->free_device(dev);
1084}
1085
1086int core_dev_setup_virtual_lun0(void)
1087{
1088	struct se_hba *hba;
1089	struct se_device *dev;
1090	char buf[] = "rd_pages=8,rd_nullio=1";
1091	int ret;
1092
1093	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1094	if (IS_ERR(hba))
1095		return PTR_ERR(hba);
1096
1097	dev = target_alloc_device(hba, "virt_lun0");
1098	if (!dev) {
1099		ret = -ENOMEM;
1100		goto out_free_hba;
1101	}
1102
1103	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1104
1105	ret = target_configure_device(dev);
1106	if (ret)
1107		goto out_free_se_dev;
1108
1109	lun0_hba = hba;
1110	g_lun0_dev = dev;
1111	return 0;
1112
1113out_free_se_dev:
1114	target_free_device(dev);
1115out_free_hba:
1116	core_delete_hba(hba);
1117	return ret;
1118}
1119
1120
1121void core_dev_release_virtual_lun0(void)
1122{
1123	struct se_hba *hba = lun0_hba;
1124
1125	if (!hba)
1126		return;
1127
1128	if (g_lun0_dev)
1129		target_free_device(g_lun0_dev);
1130	core_delete_hba(hba);
1131}
1132
1133/*
1134 * Common CDB parsing for kernel and user passthrough.
1135 */
1136sense_reason_t
1137passthrough_parse_cdb(struct se_cmd *cmd,
1138	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1139{
1140	unsigned char *cdb = cmd->t_task_cdb;
1141	struct se_device *dev = cmd->se_dev;
1142	unsigned int size;
1143
1144	/*
1145	 * Clear a lun set in the cdb if the initiator talking to use spoke
1146	 * and old standards version, as we can't assume the underlying device
1147	 * won't choke up on it.
1148	 */
1149	switch (cdb[0]) {
1150	case READ_10: /* SBC - RDProtect */
1151	case READ_12: /* SBC - RDProtect */
1152	case READ_16: /* SBC - RDProtect */
1153	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1154	case VERIFY: /* SBC - VRProtect */
1155	case VERIFY_16: /* SBC - VRProtect */
1156	case WRITE_VERIFY: /* SBC - VRProtect */
1157	case WRITE_VERIFY_12: /* SBC - VRProtect */
1158	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1159		break;
1160	default:
1161		cdb[1] &= 0x1f; /* clear logical unit number */
1162		break;
1163	}
1164
1165	/*
1166	 * For REPORT LUNS we always need to emulate the response, for everything
1167	 * else, pass it up.
1168	 */
1169	if (cdb[0] == REPORT_LUNS) {
1170		cmd->execute_cmd = spc_emulate_report_luns;
1171		return TCM_NO_SENSE;
1172	}
1173
1174	/*
 
 
 
 
 
 
 
 
 
 
 
 
1175	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1176	 * emulate the response, since tcmu does not have the information
1177	 * required to process these commands.
1178	 */
1179	if (!(dev->transport->transport_flags &
1180	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1181		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1182			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1183			size = get_unaligned_be16(&cdb[7]);
1184			return target_cmd_size_check(cmd, size);
1185		}
1186		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1187			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1188			size = get_unaligned_be32(&cdb[5]);
1189			return target_cmd_size_check(cmd, size);
1190		}
1191
1192		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1193			cmd->execute_cmd = target_scsi2_reservation_release;
1194			if (cdb[0] == RELEASE_10)
1195				size = get_unaligned_be16(&cdb[7]);
1196			else
1197				size = cmd->data_length;
1198			return target_cmd_size_check(cmd, size);
1199		}
1200		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1201			cmd->execute_cmd = target_scsi2_reservation_reserve;
1202			if (cdb[0] == RESERVE_10)
1203				size = get_unaligned_be16(&cdb[7]);
1204			else
1205				size = cmd->data_length;
1206			return target_cmd_size_check(cmd, size);
1207		}
1208	}
1209
1210	/* Set DATA_CDB flag for ops that should have it */
1211	switch (cdb[0]) {
1212	case READ_6:
1213	case READ_10:
1214	case READ_12:
1215	case READ_16:
1216	case WRITE_6:
1217	case WRITE_10:
1218	case WRITE_12:
1219	case WRITE_16:
1220	case WRITE_VERIFY:
1221	case WRITE_VERIFY_12:
1222	case WRITE_VERIFY_16:
1223	case COMPARE_AND_WRITE:
1224	case XDWRITEREAD_10:
1225		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1226		break;
1227	case VARIABLE_LENGTH_CMD:
1228		switch (get_unaligned_be16(&cdb[8])) {
1229		case READ_32:
1230		case WRITE_32:
1231		case WRITE_VERIFY_32:
1232		case XDWRITEREAD_32:
1233			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1234			break;
1235		}
1236	}
1237
1238	cmd->execute_cmd = exec_cmd;
1239
1240	return TCM_NO_SENSE;
1241}
1242EXPORT_SYMBOL(passthrough_parse_cdb);