Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   4 *
   5 * This file contains the TCM Virtual Device and Disk Transport
   6 * agnostic related functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
  12 ******************************************************************************/
  13
  14#include <linux/net.h>
  15#include <linux/string.h>
  16#include <linux/delay.h>
  17#include <linux/timer.h>
  18#include <linux/slab.h>
  19#include <linux/spinlock.h>
  20#include <linux/kthread.h>
  21#include <linux/in.h>
  22#include <linux/export.h>
  23#include <linux/t10-pi.h>
  24#include <asm/unaligned.h>
  25#include <net/sock.h>
  26#include <net/tcp.h>
  27#include <scsi/scsi_common.h>
  28#include <scsi/scsi_proto.h>
  29
  30#include <target/target_core_base.h>
  31#include <target/target_core_backend.h>
  32#include <target/target_core_fabric.h>
  33
  34#include "target_core_internal.h"
  35#include "target_core_alua.h"
  36#include "target_core_pr.h"
  37#include "target_core_ua.h"
  38
  39static DEFINE_MUTEX(device_mutex);
  40static LIST_HEAD(device_list);
  41static DEFINE_IDR(devices_idr);
  42
  43static struct se_hba *lun0_hba;
  44/* not static, needed by tpg.c */
  45struct se_device *g_lun0_dev;
  46
  47sense_reason_t
  48transport_lookup_cmd_lun(struct se_cmd *se_cmd)
  49{
  50	struct se_lun *se_lun = NULL;
  51	struct se_session *se_sess = se_cmd->se_sess;
  52	struct se_node_acl *nacl = se_sess->se_node_acl;
  53	struct se_dev_entry *deve;
  54	sense_reason_t ret = TCM_NO_SENSE;
  55
  56	rcu_read_lock();
  57	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
  58	if (deve) {
  59		atomic_long_inc(&deve->total_cmds);
  60
  61		if (se_cmd->data_direction == DMA_TO_DEVICE)
  62			atomic_long_add(se_cmd->data_length,
  63					&deve->write_bytes);
  64		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  65			atomic_long_add(se_cmd->data_length,
  66					&deve->read_bytes);
  67
  68		se_lun = rcu_dereference(deve->se_lun);
 
 
 
 
 
 
 
 
 
 
  69
  70		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  71			se_lun = NULL;
  72			goto out_unlock;
  73		}
  74
  75		se_cmd->se_lun = se_lun;
  76		se_cmd->pr_res_key = deve->pr_res_key;
  77		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  78		se_cmd->lun_ref_active = true;
  79
  80		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  81		    deve->lun_access_ro) {
  82			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  83				" Access for 0x%08llx\n",
  84				se_cmd->se_tfo->fabric_name,
  85				se_cmd->orig_fe_lun);
  86			rcu_read_unlock();
  87			ret = TCM_WRITE_PROTECTED;
  88			goto ref_dev;
  89		}
  90	}
  91out_unlock:
  92	rcu_read_unlock();
  93
  94	if (!se_lun) {
  95		/*
  96		 * Use the se_portal_group->tpg_virt_lun0 to allow for
  97		 * REPORT_LUNS, et al to be returned when no active
  98		 * MappedLUN=0 exists for this Initiator Port.
  99		 */
 100		if (se_cmd->orig_fe_lun != 0) {
 101			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 102				" Access for 0x%08llx from %s\n",
 103				se_cmd->se_tfo->fabric_name,
 104				se_cmd->orig_fe_lun,
 105				nacl->initiatorname);
 106			return TCM_NON_EXISTENT_LUN;
 107		}
 108
 109		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 110		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
 111		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 112
 113		percpu_ref_get(&se_lun->lun_ref);
 114		se_cmd->lun_ref_active = true;
 115
 116		/*
 117		 * Force WRITE PROTECT for virtual LUN 0
 118		 */
 119		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 120		    (se_cmd->data_direction != DMA_NONE)) {
 121			ret = TCM_WRITE_PROTECTED;
 122			goto ref_dev;
 123		}
 
 
 
 
 
 
 124	}
 125	/*
 126	 * RCU reference protected by percpu se_lun->lun_ref taken above that
 127	 * must drop to zero (including initial reference) before this se_lun
 128	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 129	 * target_core_fabric_configfs.c:target_fabric_port_release
 130	 */
 131ref_dev:
 132	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 133	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 134
 135	if (se_cmd->data_direction == DMA_TO_DEVICE)
 136		atomic_long_add(se_cmd->data_length,
 137				&se_cmd->se_dev->write_bytes);
 138	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 139		atomic_long_add(se_cmd->data_length,
 140				&se_cmd->se_dev->read_bytes);
 141
 142	return ret;
 143}
 144EXPORT_SYMBOL(transport_lookup_cmd_lun);
 145
 146int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
 147{
 148	struct se_dev_entry *deve;
 149	struct se_lun *se_lun = NULL;
 150	struct se_session *se_sess = se_cmd->se_sess;
 151	struct se_node_acl *nacl = se_sess->se_node_acl;
 152	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 153	unsigned long flags;
 154
 155	rcu_read_lock();
 156	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
 157	if (deve) {
 158		se_lun = rcu_dereference(deve->se_lun);
 159
 160		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
 161			se_lun = NULL;
 162			goto out_unlock;
 163		}
 164
 165		se_cmd->se_lun = se_lun;
 166		se_cmd->pr_res_key = deve->pr_res_key;
 167		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 168		se_cmd->lun_ref_active = true;
 169	}
 170out_unlock:
 171	rcu_read_unlock();
 172
 173	if (!se_lun) {
 174		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 175			" Access for 0x%08llx for %s\n",
 176			se_cmd->se_tfo->fabric_name,
 177			se_cmd->orig_fe_lun,
 178			nacl->initiatorname);
 179		return -ENODEV;
 180	}
 181	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 182	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 183
 184	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 185	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 186	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 187
 188	return 0;
 189}
 190EXPORT_SYMBOL(transport_lookup_tmr_lun);
 191
 192bool target_lun_is_rdonly(struct se_cmd *cmd)
 193{
 194	struct se_session *se_sess = cmd->se_sess;
 195	struct se_dev_entry *deve;
 196	bool ret;
 197
 198	rcu_read_lock();
 199	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 200	ret = deve && deve->lun_access_ro;
 201	rcu_read_unlock();
 202
 203	return ret;
 204}
 205EXPORT_SYMBOL(target_lun_is_rdonly);
 206
 207/*
 208 * This function is called from core_scsi3_emulate_pro_register_and_move()
 209 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 210 * when a matching rtpi is found.
 211 */
 212struct se_dev_entry *core_get_se_deve_from_rtpi(
 213	struct se_node_acl *nacl,
 214	u16 rtpi)
 215{
 216	struct se_dev_entry *deve;
 217	struct se_lun *lun;
 218	struct se_portal_group *tpg = nacl->se_tpg;
 219
 220	rcu_read_lock();
 221	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 222		lun = rcu_dereference(deve->se_lun);
 223		if (!lun) {
 224			pr_err("%s device entries device pointer is"
 225				" NULL, but Initiator has access.\n",
 226				tpg->se_tpg_tfo->fabric_name);
 227			continue;
 228		}
 229		if (lun->lun_rtpi != rtpi)
 230			continue;
 231
 232		kref_get(&deve->pr_kref);
 233		rcu_read_unlock();
 234
 235		return deve;
 236	}
 237	rcu_read_unlock();
 238
 239	return NULL;
 240}
 241
 242void core_free_device_list_for_node(
 243	struct se_node_acl *nacl,
 244	struct se_portal_group *tpg)
 245{
 246	struct se_dev_entry *deve;
 247
 248	mutex_lock(&nacl->lun_entry_mutex);
 249	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 250		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
 251					lockdep_is_held(&nacl->lun_entry_mutex));
 252		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 253	}
 254	mutex_unlock(&nacl->lun_entry_mutex);
 255}
 256
 257void core_update_device_list_access(
 258	u64 mapped_lun,
 259	bool lun_access_ro,
 260	struct se_node_acl *nacl)
 261{
 262	struct se_dev_entry *deve;
 263
 264	mutex_lock(&nacl->lun_entry_mutex);
 265	deve = target_nacl_find_deve(nacl, mapped_lun);
 266	if (deve)
 267		deve->lun_access_ro = lun_access_ro;
 268	mutex_unlock(&nacl->lun_entry_mutex);
 269}
 270
 271/*
 272 * Called with rcu_read_lock or nacl->device_list_lock held.
 273 */
 274struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
 275{
 276	struct se_dev_entry *deve;
 277
 278	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 279		if (deve->mapped_lun == mapped_lun)
 280			return deve;
 281
 282	return NULL;
 283}
 284EXPORT_SYMBOL(target_nacl_find_deve);
 285
 286void target_pr_kref_release(struct kref *kref)
 287{
 288	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
 289						 pr_kref);
 290	complete(&deve->pr_comp);
 291}
 292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293static void
 294target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
 295			     bool skip_new)
 296{
 297	struct se_dev_entry *tmp;
 298
 299	rcu_read_lock();
 300	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
 301		if (skip_new && tmp == new)
 302			continue;
 303		core_scsi3_ua_allocate(tmp, 0x3F,
 304				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
 305	}
 306	rcu_read_unlock();
 307}
 308
 309int core_enable_device_list_for_node(
 310	struct se_lun *lun,
 311	struct se_lun_acl *lun_acl,
 312	u64 mapped_lun,
 313	bool lun_access_ro,
 314	struct se_node_acl *nacl,
 315	struct se_portal_group *tpg)
 316{
 317	struct se_dev_entry *orig, *new;
 318
 319	new = kzalloc(sizeof(*new), GFP_KERNEL);
 320	if (!new) {
 321		pr_err("Unable to allocate se_dev_entry memory\n");
 322		return -ENOMEM;
 323	}
 324
 325	spin_lock_init(&new->ua_lock);
 326	INIT_LIST_HEAD(&new->ua_list);
 327	INIT_LIST_HEAD(&new->lun_link);
 328
 329	new->mapped_lun = mapped_lun;
 330	kref_init(&new->pr_kref);
 331	init_completion(&new->pr_comp);
 332
 333	new->lun_access_ro = lun_access_ro;
 334	new->creation_time = get_jiffies_64();
 335	new->attach_count++;
 336
 337	mutex_lock(&nacl->lun_entry_mutex);
 338	orig = target_nacl_find_deve(nacl, mapped_lun);
 339	if (orig && orig->se_lun) {
 340		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
 341					lockdep_is_held(&nacl->lun_entry_mutex));
 342
 343		if (orig_lun != lun) {
 344			pr_err("Existing orig->se_lun doesn't match new lun"
 345			       " for dynamic -> explicit NodeACL conversion:"
 346				" %s\n", nacl->initiatorname);
 347			mutex_unlock(&nacl->lun_entry_mutex);
 348			kfree(new);
 349			return -EINVAL;
 350		}
 351		if (orig->se_lun_acl != NULL) {
 352			pr_warn_ratelimited("Detected existing explicit"
 353				" se_lun_acl->se_lun_group reference for %s"
 354				" mapped_lun: %llu, failing\n",
 355				 nacl->initiatorname, mapped_lun);
 356			mutex_unlock(&nacl->lun_entry_mutex);
 357			kfree(new);
 358			return -EINVAL;
 359		}
 360
 361		rcu_assign_pointer(new->se_lun, lun);
 362		rcu_assign_pointer(new->se_lun_acl, lun_acl);
 363		hlist_del_rcu(&orig->link);
 364		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 365		mutex_unlock(&nacl->lun_entry_mutex);
 366
 367		spin_lock(&lun->lun_deve_lock);
 368		list_del(&orig->lun_link);
 369		list_add_tail(&new->lun_link, &lun->lun_deve_list);
 370		spin_unlock(&lun->lun_deve_lock);
 371
 372		kref_put(&orig->pr_kref, target_pr_kref_release);
 373		wait_for_completion(&orig->pr_comp);
 374
 375		target_luns_data_has_changed(nacl, new, true);
 376		kfree_rcu(orig, rcu_head);
 377		return 0;
 378	}
 379
 380	rcu_assign_pointer(new->se_lun, lun);
 381	rcu_assign_pointer(new->se_lun_acl, lun_acl);
 382	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 383	mutex_unlock(&nacl->lun_entry_mutex);
 384
 385	spin_lock(&lun->lun_deve_lock);
 386	list_add_tail(&new->lun_link, &lun->lun_deve_list);
 387	spin_unlock(&lun->lun_deve_lock);
 388
 389	target_luns_data_has_changed(nacl, new, true);
 390	return 0;
 391}
 392
 393void core_disable_device_list_for_node(
 394	struct se_lun *lun,
 395	struct se_dev_entry *orig,
 396	struct se_node_acl *nacl,
 397	struct se_portal_group *tpg)
 398{
 399	/*
 400	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 401	 * reference to se_device->dev_group.
 402	 */
 403	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 404
 405	lockdep_assert_held(&nacl->lun_entry_mutex);
 406
 407	/*
 408	 * If the MappedLUN entry is being disabled, the entry in
 409	 * lun->lun_deve_list must be removed now before clearing the
 410	 * struct se_dev_entry pointers below as logic in
 411	 * core_alua_do_transition_tg_pt() depends on these being present.
 412	 *
 413	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 414	 * that have not been explicitly converted to MappedLUNs ->
 415	 * struct se_lun_acl, but we remove deve->lun_link from
 416	 * lun->lun_deve_list. This also means that active UAs and
 417	 * NodeACL context specific PR metadata for demo-mode
 418	 * MappedLUN *deve will be released below..
 419	 */
 420	spin_lock(&lun->lun_deve_lock);
 421	list_del(&orig->lun_link);
 422	spin_unlock(&lun->lun_deve_lock);
 423	/*
 424	 * Disable struct se_dev_entry LUN ACL mapping
 425	 */
 426	core_scsi3_ua_release_all(orig);
 427
 428	hlist_del_rcu(&orig->link);
 429	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 430	orig->lun_access_ro = false;
 431	orig->creation_time = 0;
 432	orig->attach_count--;
 433	/*
 434	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
 435	 * or REGISTER_AND_MOVE PR operation to complete.
 436	 */
 437	kref_put(&orig->pr_kref, target_pr_kref_release);
 438	wait_for_completion(&orig->pr_comp);
 439
 440	rcu_assign_pointer(orig->se_lun, NULL);
 441	rcu_assign_pointer(orig->se_lun_acl, NULL);
 442
 443	kfree_rcu(orig, rcu_head);
 444
 445	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 446	target_luns_data_has_changed(nacl, NULL, false);
 447}
 448
 449/*      core_clear_lun_from_tpg():
 450 *
 451 *
 452 */
 453void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 454{
 455	struct se_node_acl *nacl;
 456	struct se_dev_entry *deve;
 457
 458	mutex_lock(&tpg->acl_node_mutex);
 459	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 460
 461		mutex_lock(&nacl->lun_entry_mutex);
 462		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 463			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
 464					lockdep_is_held(&nacl->lun_entry_mutex));
 465
 466			if (lun != tmp_lun)
 467				continue;
 468
 469			core_disable_device_list_for_node(lun, deve, nacl, tpg);
 470		}
 471		mutex_unlock(&nacl->lun_entry_mutex);
 472	}
 473	mutex_unlock(&tpg->acl_node_mutex);
 474}
 475
 476int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
 477{
 478	struct se_lun *tmp;
 479
 480	spin_lock(&dev->se_port_lock);
 481	if (dev->export_count == 0x0000ffff) {
 482		pr_warn("Reached dev->dev_port_count =="
 483				" 0x0000ffff\n");
 484		spin_unlock(&dev->se_port_lock);
 485		return -ENOSPC;
 486	}
 487again:
 488	/*
 489	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
 490	 * Here is the table from spc4r17 section 7.7.3.8.
 491	 *
 492	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 493	 *
 494	 * Code      Description
 495	 * 0h        Reserved
 496	 * 1h        Relative port 1, historically known as port A
 497	 * 2h        Relative port 2, historically known as port B
 498	 * 3h to FFFFh    Relative port 3 through 65 535
 499	 */
 500	lun->lun_rtpi = dev->dev_rpti_counter++;
 501	if (!lun->lun_rtpi)
 502		goto again;
 503
 504	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
 505		/*
 506		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
 507		 * for 16-bit wrap..
 508		 */
 509		if (lun->lun_rtpi == tmp->lun_rtpi)
 510			goto again;
 511	}
 512	spin_unlock(&dev->se_port_lock);
 513
 514	return 0;
 515}
 516
 517static void se_release_vpd_for_dev(struct se_device *dev)
 518{
 519	struct t10_vpd *vpd, *vpd_tmp;
 520
 521	spin_lock(&dev->t10_wwn.t10_vpd_lock);
 522	list_for_each_entry_safe(vpd, vpd_tmp,
 523			&dev->t10_wwn.t10_vpd_list, vpd_list) {
 524		list_del(&vpd->vpd_list);
 525		kfree(vpd);
 526	}
 527	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 528}
 529
 530static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 531{
 532	u32 aligned_max_sectors;
 533	u32 alignment;
 534	/*
 535	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
 536	 * transport_allocate_data_tasks() operation.
 537	 */
 538	alignment = max(1ul, PAGE_SIZE / block_size);
 539	aligned_max_sectors = rounddown(max_sectors, alignment);
 540
 541	if (max_sectors != aligned_max_sectors)
 542		pr_info("Rounding down aligned max_sectors from %u to %u\n",
 543			max_sectors, aligned_max_sectors);
 544
 545	return aligned_max_sectors;
 546}
 547
 548int core_dev_add_lun(
 549	struct se_portal_group *tpg,
 550	struct se_device *dev,
 551	struct se_lun *lun)
 552{
 553	int rc;
 554
 555	rc = core_tpg_add_lun(tpg, lun, false, dev);
 556	if (rc < 0)
 557		return rc;
 558
 559	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
 560		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
 561		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 562		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
 563	/*
 564	 * Update LUN maps for dynamically added initiators when
 565	 * generate_node_acl is enabled.
 566	 */
 567	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
 568		struct se_node_acl *acl;
 569
 570		mutex_lock(&tpg->acl_node_mutex);
 571		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 572			if (acl->dynamic_node_acl &&
 573			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
 574			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
 575				core_tpg_add_node_to_devs(acl, tpg, lun);
 576			}
 577		}
 578		mutex_unlock(&tpg->acl_node_mutex);
 579	}
 580
 581	return 0;
 582}
 583
 584/*      core_dev_del_lun():
 585 *
 586 *
 587 */
 588void core_dev_del_lun(
 589	struct se_portal_group *tpg,
 590	struct se_lun *lun)
 591{
 592	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
 593		" device object\n", tpg->se_tpg_tfo->fabric_name,
 594		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 595		tpg->se_tpg_tfo->fabric_name);
 596
 597	core_tpg_remove_lun(tpg, lun);
 598}
 599
 600struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 601	struct se_portal_group *tpg,
 602	struct se_node_acl *nacl,
 603	u64 mapped_lun,
 604	int *ret)
 605{
 606	struct se_lun_acl *lacl;
 607
 608	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
 609		pr_err("%s InitiatorName exceeds maximum size.\n",
 610			tpg->se_tpg_tfo->fabric_name);
 611		*ret = -EOVERFLOW;
 612		return NULL;
 613	}
 614	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
 615	if (!lacl) {
 616		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 617		*ret = -ENOMEM;
 618		return NULL;
 619	}
 620
 621	lacl->mapped_lun = mapped_lun;
 622	lacl->se_lun_nacl = nacl;
 623
 624	return lacl;
 625}
 626
 627int core_dev_add_initiator_node_lun_acl(
 628	struct se_portal_group *tpg,
 629	struct se_lun_acl *lacl,
 630	struct se_lun *lun,
 631	bool lun_access_ro)
 632{
 633	struct se_node_acl *nacl = lacl->se_lun_nacl;
 634	/*
 635	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 636	 * reference to se_device->dev_group.
 637	 */
 638	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 639
 640	if (!nacl)
 641		return -EINVAL;
 642
 643	if (lun->lun_access_ro)
 644		lun_access_ro = true;
 645
 646	lacl->se_lun = lun;
 647
 648	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 649			lun_access_ro, nacl, tpg) < 0)
 650		return -EINVAL;
 651
 652	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 653		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
 654		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 655		lun_access_ro ? "RO" : "RW",
 656		nacl->initiatorname);
 657	/*
 658	 * Check to see if there are any existing persistent reservation APTPL
 659	 * pre-registrations that need to be enabled for this LUN ACL..
 660	 */
 661	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
 662					    lacl->mapped_lun);
 663	return 0;
 664}
 665
 666int core_dev_del_initiator_node_lun_acl(
 667	struct se_lun *lun,
 668	struct se_lun_acl *lacl)
 669{
 670	struct se_portal_group *tpg = lun->lun_tpg;
 671	struct se_node_acl *nacl;
 672	struct se_dev_entry *deve;
 673
 674	nacl = lacl->se_lun_nacl;
 675	if (!nacl)
 676		return -EINVAL;
 677
 678	mutex_lock(&nacl->lun_entry_mutex);
 679	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
 680	if (deve)
 681		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 682	mutex_unlock(&nacl->lun_entry_mutex);
 683
 684	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
 685		" InitiatorNode: %s Mapped LUN: %llu\n",
 686		tpg->se_tpg_tfo->fabric_name,
 687		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 688		nacl->initiatorname, lacl->mapped_lun);
 689
 690	return 0;
 691}
 692
 693void core_dev_free_initiator_node_lun_acl(
 694	struct se_portal_group *tpg,
 695	struct se_lun_acl *lacl)
 696{
 697	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 698		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
 699		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 700		tpg->se_tpg_tfo->fabric_name,
 701		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 702
 703	kfree(lacl);
 704}
 705
 706static void scsi_dump_inquiry(struct se_device *dev)
 707{
 708	struct t10_wwn *wwn = &dev->t10_wwn;
 709	int device_type = dev->transport->get_device_type(dev);
 710
 711	/*
 712	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 713	 */
 714	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
 715		wwn->vendor);
 716	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
 717		wwn->model);
 718	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
 719		wwn->revision);
 720	pr_debug("  Type:   %s ", scsi_device_type(device_type));
 721}
 722
 723struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 724{
 725	struct se_device *dev;
 726	struct se_lun *xcopy_lun;
 
 727
 728	dev = hba->backend->ops->alloc_device(hba, name);
 729	if (!dev)
 730		return NULL;
 731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732	dev->se_hba = hba;
 733	dev->transport = hba->backend->ops;
 734	dev->transport_flags = dev->transport->transport_flags_default;
 735	dev->prot_length = sizeof(struct t10_pi_tuple);
 736	dev->hba_index = hba->hba_index;
 737
 738	INIT_LIST_HEAD(&dev->dev_sep_list);
 739	INIT_LIST_HEAD(&dev->dev_tmr_list);
 740	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 741	INIT_LIST_HEAD(&dev->state_list);
 742	INIT_LIST_HEAD(&dev->qf_cmd_list);
 743	spin_lock_init(&dev->execute_task_lock);
 744	spin_lock_init(&dev->delayed_cmd_lock);
 745	spin_lock_init(&dev->dev_reservation_lock);
 746	spin_lock_init(&dev->se_port_lock);
 747	spin_lock_init(&dev->se_tmr_lock);
 748	spin_lock_init(&dev->qf_cmd_lock);
 749	sema_init(&dev->caw_sem, 1);
 750	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 751	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 752	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
 753	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
 754	spin_lock_init(&dev->t10_pr.registration_lock);
 755	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 756	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 757	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
 758	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
 759	spin_lock_init(&dev->t10_alua.lba_map_lock);
 760
 
 
 761	dev->t10_wwn.t10_dev = dev;
 
 
 
 
 
 762	dev->t10_alua.t10_dev = dev;
 763
 764	dev->dev_attrib.da_dev = dev;
 765	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
 766	dev->dev_attrib.emulate_dpo = 1;
 767	dev->dev_attrib.emulate_fua_write = 1;
 768	dev->dev_attrib.emulate_fua_read = 1;
 769	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 770	dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
 771	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
 772	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 773	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 774	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 775	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 776	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
 
 777	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 778	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 779	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
 780	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 781	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 782	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 783	dev->dev_attrib.max_unmap_block_desc_count =
 784		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 785	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 786	dev->dev_attrib.unmap_granularity_alignment =
 787				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 788	dev->dev_attrib.unmap_zeroes_data =
 789				DA_UNMAP_ZEROES_DATA_DEFAULT;
 790	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 791
 792	xcopy_lun = &dev->xcopy_lun;
 793	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
 794	init_completion(&xcopy_lun->lun_shutdown_comp);
 795	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
 796	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
 797	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
 798	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 799
 800	/* Preload the default INQUIRY const values */
 801	strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
 802	strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
 803		sizeof(dev->t10_wwn.model));
 804	strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
 805		sizeof(dev->t10_wwn.revision));
 806
 807	return dev;
 808}
 809
 810/*
 811 * Check if the underlying struct block_device request_queue supports
 812 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
 813 * in ATA and we need to set TPE=1
 814 */
 815bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
 816				       struct request_queue *q)
 817{
 818	int block_size = queue_logical_block_size(q);
 819
 820	if (!blk_queue_discard(q))
 821		return false;
 822
 823	attrib->max_unmap_lba_count =
 824		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
 825	/*
 826	 * Currently hardcoded to 1 in Linux/SCSI code..
 827	 */
 828	attrib->max_unmap_block_desc_count = 1;
 829	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
 830	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
 831								block_size;
 832	attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
 833	return true;
 834}
 835EXPORT_SYMBOL(target_configure_unmap_from_queue);
 836
 837/*
 838 * Convert from blocksize advertised to the initiator to the 512 byte
 839 * units unconditionally used by the Linux block layer.
 840 */
 841sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
 842{
 843	switch (dev->dev_attrib.block_size) {
 844	case 4096:
 845		return lb << 3;
 846	case 2048:
 847		return lb << 2;
 848	case 1024:
 849		return lb << 1;
 850	default:
 851		return lb;
 852	}
 853}
 854EXPORT_SYMBOL(target_to_linux_sector);
 855
 856struct devices_idr_iter {
 857	struct config_item *prev_item;
 858	int (*fn)(struct se_device *dev, void *data);
 859	void *data;
 860};
 861
 862static int target_devices_idr_iter(int id, void *p, void *data)
 863	 __must_hold(&device_mutex)
 864{
 865	struct devices_idr_iter *iter = data;
 866	struct se_device *dev = p;
 867	int ret;
 868
 869	config_item_put(iter->prev_item);
 870	iter->prev_item = NULL;
 871
 872	/*
 873	 * We add the device early to the idr, so it can be used
 874	 * by backend modules during configuration. We do not want
 875	 * to allow other callers to access partially setup devices,
 876	 * so we skip them here.
 877	 */
 878	if (!target_dev_configured(dev))
 879		return 0;
 880
 881	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
 882	if (!iter->prev_item)
 883		return 0;
 884	mutex_unlock(&device_mutex);
 885
 886	ret = iter->fn(dev, iter->data);
 887
 888	mutex_lock(&device_mutex);
 889	return ret;
 890}
 891
 892/**
 893 * target_for_each_device - iterate over configured devices
 894 * @fn: iterator function
 895 * @data: pointer to data that will be passed to fn
 896 *
 897 * fn must return 0 to continue looping over devices. non-zero will break
 898 * from the loop and return that value to the caller.
 899 */
 900int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
 901			   void *data)
 902{
 903	struct devices_idr_iter iter = { .fn = fn, .data = data };
 904	int ret;
 905
 906	mutex_lock(&device_mutex);
 907	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
 908	mutex_unlock(&device_mutex);
 909	config_item_put(iter.prev_item);
 910	return ret;
 911}
 912
 913int target_configure_device(struct se_device *dev)
 914{
 915	struct se_hba *hba = dev->se_hba;
 916	int ret, id;
 917
 918	if (target_dev_configured(dev)) {
 919		pr_err("se_dev->se_dev_ptr already set for storage"
 920				" object\n");
 921		return -EEXIST;
 922	}
 923
 924	/*
 925	 * Add early so modules like tcmu can use during its
 926	 * configuration.
 927	 */
 928	mutex_lock(&device_mutex);
 929	/*
 930	 * Use cyclic to try and avoid collisions with devices
 931	 * that were recently removed.
 932	 */
 933	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
 934	mutex_unlock(&device_mutex);
 935	if (id < 0) {
 936		ret = -ENOMEM;
 937		goto out;
 938	}
 939	dev->dev_index = id;
 940
 941	ret = dev->transport->configure_device(dev);
 942	if (ret)
 943		goto out_free_index;
 
 
 
 
 
 
 944	/*
 945	 * XXX: there is not much point to have two different values here..
 946	 */
 947	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
 948	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
 949
 950	/*
 951	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 952	 */
 953	dev->dev_attrib.hw_max_sectors =
 954		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
 955					 dev->dev_attrib.hw_block_size);
 956	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 957
 958	dev->creation_time = get_jiffies_64();
 959
 960	ret = core_setup_alua(dev);
 961	if (ret)
 962		goto out_destroy_device;
 963
 964	/*
 965	 * Setup work_queue for QUEUE_FULL
 966	 */
 967	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
 968
 969	scsi_dump_inquiry(dev);
 970
 971	spin_lock(&hba->device_lock);
 972	hba->dev_count++;
 973	spin_unlock(&hba->device_lock);
 974
 975	dev->dev_flags |= DF_CONFIGURED;
 976
 977	return 0;
 978
 979out_destroy_device:
 980	dev->transport->destroy_device(dev);
 981out_free_index:
 982	mutex_lock(&device_mutex);
 983	idr_remove(&devices_idr, dev->dev_index);
 984	mutex_unlock(&device_mutex);
 985out:
 986	se_release_vpd_for_dev(dev);
 987	return ret;
 988}
 989
 990void target_free_device(struct se_device *dev)
 991{
 992	struct se_hba *hba = dev->se_hba;
 993
 994	WARN_ON(!list_empty(&dev->dev_sep_list));
 995
 996	if (target_dev_configured(dev)) {
 997		dev->transport->destroy_device(dev);
 998
 999		mutex_lock(&device_mutex);
1000		idr_remove(&devices_idr, dev->dev_index);
1001		mutex_unlock(&device_mutex);
1002
1003		spin_lock(&hba->device_lock);
1004		hba->dev_count--;
1005		spin_unlock(&hba->device_lock);
1006	}
1007
1008	core_alua_free_lu_gp_mem(dev);
1009	core_alua_set_lba_map(dev, NULL, 0, 0);
1010	core_scsi3_free_all_registrations(dev);
1011	se_release_vpd_for_dev(dev);
1012
1013	if (dev->transport->free_prot)
1014		dev->transport->free_prot(dev);
1015
 
1016	dev->transport->free_device(dev);
1017}
1018
1019int core_dev_setup_virtual_lun0(void)
1020{
1021	struct se_hba *hba;
1022	struct se_device *dev;
1023	char buf[] = "rd_pages=8,rd_nullio=1";
1024	int ret;
1025
1026	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1027	if (IS_ERR(hba))
1028		return PTR_ERR(hba);
1029
1030	dev = target_alloc_device(hba, "virt_lun0");
1031	if (!dev) {
1032		ret = -ENOMEM;
1033		goto out_free_hba;
1034	}
1035
1036	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1037
1038	ret = target_configure_device(dev);
1039	if (ret)
1040		goto out_free_se_dev;
1041
1042	lun0_hba = hba;
1043	g_lun0_dev = dev;
1044	return 0;
1045
1046out_free_se_dev:
1047	target_free_device(dev);
1048out_free_hba:
1049	core_delete_hba(hba);
1050	return ret;
1051}
1052
1053
1054void core_dev_release_virtual_lun0(void)
1055{
1056	struct se_hba *hba = lun0_hba;
1057
1058	if (!hba)
1059		return;
1060
1061	if (g_lun0_dev)
1062		target_free_device(g_lun0_dev);
1063	core_delete_hba(hba);
1064}
1065
1066/*
1067 * Common CDB parsing for kernel and user passthrough.
1068 */
1069sense_reason_t
1070passthrough_parse_cdb(struct se_cmd *cmd,
1071	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1072{
1073	unsigned char *cdb = cmd->t_task_cdb;
1074	struct se_device *dev = cmd->se_dev;
1075	unsigned int size;
1076
1077	/*
1078	 * For REPORT LUNS we always need to emulate the response, for everything
1079	 * else, pass it up.
1080	 */
1081	if (cdb[0] == REPORT_LUNS) {
1082		cmd->execute_cmd = spc_emulate_report_luns;
1083		return TCM_NO_SENSE;
1084	}
1085
1086	/*
1087	 * With emulate_pr disabled, all reservation requests should fail,
1088	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1089	 */
1090	if (!dev->dev_attrib.emulate_pr &&
1091	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1092	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1093	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1094	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1095		return TCM_UNSUPPORTED_SCSI_OPCODE;
1096	}
1097
1098	/*
1099	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1100	 * emulate the response, since tcmu does not have the information
1101	 * required to process these commands.
1102	 */
1103	if (!(dev->transport_flags &
1104	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1105		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1106			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1107			size = get_unaligned_be16(&cdb[7]);
1108			return target_cmd_size_check(cmd, size);
1109		}
1110		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1111			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1112			size = get_unaligned_be32(&cdb[5]);
1113			return target_cmd_size_check(cmd, size);
1114		}
1115
1116		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1117			cmd->execute_cmd = target_scsi2_reservation_release;
1118			if (cdb[0] == RELEASE_10)
1119				size = get_unaligned_be16(&cdb[7]);
1120			else
1121				size = cmd->data_length;
1122			return target_cmd_size_check(cmd, size);
1123		}
1124		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1125			cmd->execute_cmd = target_scsi2_reservation_reserve;
1126			if (cdb[0] == RESERVE_10)
1127				size = get_unaligned_be16(&cdb[7]);
1128			else
1129				size = cmd->data_length;
1130			return target_cmd_size_check(cmd, size);
1131		}
1132	}
1133
1134	/* Set DATA_CDB flag for ops that should have it */
1135	switch (cdb[0]) {
1136	case READ_6:
1137	case READ_10:
1138	case READ_12:
1139	case READ_16:
1140	case WRITE_6:
1141	case WRITE_10:
1142	case WRITE_12:
1143	case WRITE_16:
1144	case WRITE_VERIFY:
1145	case WRITE_VERIFY_12:
1146	case WRITE_VERIFY_16:
1147	case COMPARE_AND_WRITE:
1148	case XDWRITEREAD_10:
1149		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1150		break;
1151	case VARIABLE_LENGTH_CMD:
1152		switch (get_unaligned_be16(&cdb[8])) {
1153		case READ_32:
1154		case WRITE_32:
1155		case WRITE_VERIFY_32:
1156		case XDWRITEREAD_32:
1157			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1158			break;
1159		}
1160	}
1161
1162	cmd->execute_cmd = exec_cmd;
1163
1164	return TCM_NO_SENSE;
1165}
1166EXPORT_SYMBOL(passthrough_parse_cdb);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   4 *
   5 * This file contains the TCM Virtual Device and Disk Transport
   6 * agnostic related functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
  12 ******************************************************************************/
  13
  14#include <linux/net.h>
  15#include <linux/string.h>
  16#include <linux/delay.h>
  17#include <linux/timer.h>
  18#include <linux/slab.h>
  19#include <linux/spinlock.h>
  20#include <linux/kthread.h>
  21#include <linux/in.h>
  22#include <linux/export.h>
  23#include <linux/t10-pi.h>
  24#include <asm/unaligned.h>
  25#include <net/sock.h>
  26#include <net/tcp.h>
  27#include <scsi/scsi_common.h>
  28#include <scsi/scsi_proto.h>
  29
  30#include <target/target_core_base.h>
  31#include <target/target_core_backend.h>
  32#include <target/target_core_fabric.h>
  33
  34#include "target_core_internal.h"
  35#include "target_core_alua.h"
  36#include "target_core_pr.h"
  37#include "target_core_ua.h"
  38
  39static DEFINE_MUTEX(device_mutex);
  40static LIST_HEAD(device_list);
  41static DEFINE_IDR(devices_idr);
  42
  43static struct se_hba *lun0_hba;
  44/* not static, needed by tpg.c */
  45struct se_device *g_lun0_dev;
  46
  47sense_reason_t
  48transport_lookup_cmd_lun(struct se_cmd *se_cmd)
  49{
  50	struct se_lun *se_lun = NULL;
  51	struct se_session *se_sess = se_cmd->se_sess;
  52	struct se_node_acl *nacl = se_sess->se_node_acl;
  53	struct se_dev_entry *deve;
  54	sense_reason_t ret = TCM_NO_SENSE;
  55
  56	rcu_read_lock();
  57	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
  58	if (deve) {
  59		atomic_long_inc(&deve->total_cmds);
  60
  61		if (se_cmd->data_direction == DMA_TO_DEVICE)
  62			atomic_long_add(se_cmd->data_length,
  63					&deve->write_bytes);
  64		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  65			atomic_long_add(se_cmd->data_length,
  66					&deve->read_bytes);
  67
  68		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  69		    deve->lun_access_ro) {
  70			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  71				" Access for 0x%08llx\n",
  72				se_cmd->se_tfo->fabric_name,
  73				se_cmd->orig_fe_lun);
  74			rcu_read_unlock();
  75			return TCM_WRITE_PROTECTED;
  76		}
  77
  78		se_lun = deve->se_lun;
  79
  80		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  81			se_lun = NULL;
  82			goto out_unlock;
  83		}
  84
  85		se_cmd->se_lun = se_lun;
  86		se_cmd->pr_res_key = deve->pr_res_key;
  87		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  88		se_cmd->lun_ref_active = true;
 
 
 
 
 
 
 
 
 
 
 
  89	}
  90out_unlock:
  91	rcu_read_unlock();
  92
  93	if (!se_lun) {
  94		/*
  95		 * Use the se_portal_group->tpg_virt_lun0 to allow for
  96		 * REPORT_LUNS, et al to be returned when no active
  97		 * MappedLUN=0 exists for this Initiator Port.
  98		 */
  99		if (se_cmd->orig_fe_lun != 0) {
 100			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 101				" Access for 0x%08llx from %s\n",
 102				se_cmd->se_tfo->fabric_name,
 103				se_cmd->orig_fe_lun,
 104				nacl->initiatorname);
 105			return TCM_NON_EXISTENT_LUN;
 106		}
 107
 
 
 
 
 
 
 
 108		/*
 109		 * Force WRITE PROTECT for virtual LUN 0
 110		 */
 111		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 112		    (se_cmd->data_direction != DMA_NONE))
 113			return TCM_WRITE_PROTECTED;
 114
 115		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 116		if (!percpu_ref_tryget_live(&se_lun->lun_ref))
 117			return TCM_NON_EXISTENT_LUN;
 118
 119		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
 120		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 121		se_cmd->lun_ref_active = true;
 122	}
 123	/*
 124	 * RCU reference protected by percpu se_lun->lun_ref taken above that
 125	 * must drop to zero (including initial reference) before this se_lun
 126	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 127	 * target_core_fabric_configfs.c:target_fabric_port_release
 128	 */
 
 129	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 130	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 131
 132	if (se_cmd->data_direction == DMA_TO_DEVICE)
 133		atomic_long_add(se_cmd->data_length,
 134				&se_cmd->se_dev->write_bytes);
 135	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 136		atomic_long_add(se_cmd->data_length,
 137				&se_cmd->se_dev->read_bytes);
 138
 139	return ret;
 140}
 141EXPORT_SYMBOL(transport_lookup_cmd_lun);
 142
 143int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
 144{
 145	struct se_dev_entry *deve;
 146	struct se_lun *se_lun = NULL;
 147	struct se_session *se_sess = se_cmd->se_sess;
 148	struct se_node_acl *nacl = se_sess->se_node_acl;
 149	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 150	unsigned long flags;
 151
 152	rcu_read_lock();
 153	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
 154	if (deve) {
 155		se_lun = deve->se_lun;
 156
 157		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
 158			se_lun = NULL;
 159			goto out_unlock;
 160		}
 161
 162		se_cmd->se_lun = se_lun;
 163		se_cmd->pr_res_key = deve->pr_res_key;
 164		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 165		se_cmd->lun_ref_active = true;
 166	}
 167out_unlock:
 168	rcu_read_unlock();
 169
 170	if (!se_lun) {
 171		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 172			" Access for 0x%08llx for %s\n",
 173			se_cmd->se_tfo->fabric_name,
 174			se_cmd->orig_fe_lun,
 175			nacl->initiatorname);
 176		return -ENODEV;
 177	}
 178	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 179	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 180
 181	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 182	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 183	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 184
 185	return 0;
 186}
 187EXPORT_SYMBOL(transport_lookup_tmr_lun);
 188
 189bool target_lun_is_rdonly(struct se_cmd *cmd)
 190{
 191	struct se_session *se_sess = cmd->se_sess;
 192	struct se_dev_entry *deve;
 193	bool ret;
 194
 195	rcu_read_lock();
 196	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 197	ret = deve && deve->lun_access_ro;
 198	rcu_read_unlock();
 199
 200	return ret;
 201}
 202EXPORT_SYMBOL(target_lun_is_rdonly);
 203
 204/*
 205 * This function is called from core_scsi3_emulate_pro_register_and_move()
 206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 207 * when a matching rtpi is found.
 208 */
 209struct se_dev_entry *core_get_se_deve_from_rtpi(
 210	struct se_node_acl *nacl,
 211	u16 rtpi)
 212{
 213	struct se_dev_entry *deve;
 214	struct se_lun *lun;
 215	struct se_portal_group *tpg = nacl->se_tpg;
 216
 217	rcu_read_lock();
 218	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 219		lun = deve->se_lun;
 220		if (!lun) {
 221			pr_err("%s device entries device pointer is"
 222				" NULL, but Initiator has access.\n",
 223				tpg->se_tpg_tfo->fabric_name);
 224			continue;
 225		}
 226		if (lun->lun_rtpi != rtpi)
 227			continue;
 228
 229		kref_get(&deve->pr_kref);
 230		rcu_read_unlock();
 231
 232		return deve;
 233	}
 234	rcu_read_unlock();
 235
 236	return NULL;
 237}
 238
 239void core_free_device_list_for_node(
 240	struct se_node_acl *nacl,
 241	struct se_portal_group *tpg)
 242{
 243	struct se_dev_entry *deve;
 244
 245	mutex_lock(&nacl->lun_entry_mutex);
 246	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 247		core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
 
 
 
 248	mutex_unlock(&nacl->lun_entry_mutex);
 249}
 250
 251void core_update_device_list_access(
 252	u64 mapped_lun,
 253	bool lun_access_ro,
 254	struct se_node_acl *nacl)
 255{
 256	struct se_dev_entry *deve;
 257
 258	mutex_lock(&nacl->lun_entry_mutex);
 259	deve = target_nacl_find_deve(nacl, mapped_lun);
 260	if (deve)
 261		deve->lun_access_ro = lun_access_ro;
 262	mutex_unlock(&nacl->lun_entry_mutex);
 263}
 264
 265/*
 266 * Called with rcu_read_lock or nacl->device_list_lock held.
 267 */
 268struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
 269{
 270	struct se_dev_entry *deve;
 271
 272	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 273		if (deve->mapped_lun == mapped_lun)
 274			return deve;
 275
 276	return NULL;
 277}
 278EXPORT_SYMBOL(target_nacl_find_deve);
 279
 280void target_pr_kref_release(struct kref *kref)
 281{
 282	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
 283						 pr_kref);
 284	complete(&deve->pr_comp);
 285}
 286
 287/*
 288 * Establish UA condition on SCSI device - all LUNs
 289 */
 290void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
 291{
 292	struct se_dev_entry *se_deve;
 293	struct se_lun *lun;
 294
 295	spin_lock(&dev->se_port_lock);
 296	list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
 297
 298		spin_lock(&lun->lun_deve_lock);
 299		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
 300			core_scsi3_ua_allocate(se_deve, asc, ascq);
 301		spin_unlock(&lun->lun_deve_lock);
 302	}
 303	spin_unlock(&dev->se_port_lock);
 304}
 305
 306static void
 307target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
 308			     bool skip_new)
 309{
 310	struct se_dev_entry *tmp;
 311
 312	rcu_read_lock();
 313	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
 314		if (skip_new && tmp == new)
 315			continue;
 316		core_scsi3_ua_allocate(tmp, 0x3F,
 317				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
 318	}
 319	rcu_read_unlock();
 320}
 321
 322int core_enable_device_list_for_node(
 323	struct se_lun *lun,
 324	struct se_lun_acl *lun_acl,
 325	u64 mapped_lun,
 326	bool lun_access_ro,
 327	struct se_node_acl *nacl,
 328	struct se_portal_group *tpg)
 329{
 330	struct se_dev_entry *orig, *new;
 331
 332	new = kzalloc(sizeof(*new), GFP_KERNEL);
 333	if (!new) {
 334		pr_err("Unable to allocate se_dev_entry memory\n");
 335		return -ENOMEM;
 336	}
 337
 338	spin_lock_init(&new->ua_lock);
 339	INIT_LIST_HEAD(&new->ua_list);
 340	INIT_LIST_HEAD(&new->lun_link);
 341
 342	new->mapped_lun = mapped_lun;
 343	kref_init(&new->pr_kref);
 344	init_completion(&new->pr_comp);
 345
 346	new->lun_access_ro = lun_access_ro;
 347	new->creation_time = get_jiffies_64();
 348	new->attach_count++;
 349
 350	mutex_lock(&nacl->lun_entry_mutex);
 351	orig = target_nacl_find_deve(nacl, mapped_lun);
 352	if (orig && orig->se_lun) {
 353		struct se_lun *orig_lun = orig->se_lun;
 
 354
 355		if (orig_lun != lun) {
 356			pr_err("Existing orig->se_lun doesn't match new lun"
 357			       " for dynamic -> explicit NodeACL conversion:"
 358				" %s\n", nacl->initiatorname);
 359			mutex_unlock(&nacl->lun_entry_mutex);
 360			kfree(new);
 361			return -EINVAL;
 362		}
 363		if (orig->se_lun_acl != NULL) {
 364			pr_warn_ratelimited("Detected existing explicit"
 365				" se_lun_acl->se_lun_group reference for %s"
 366				" mapped_lun: %llu, failing\n",
 367				 nacl->initiatorname, mapped_lun);
 368			mutex_unlock(&nacl->lun_entry_mutex);
 369			kfree(new);
 370			return -EINVAL;
 371		}
 372
 373		new->se_lun = lun;
 374		new->se_lun_acl = lun_acl;
 375		hlist_del_rcu(&orig->link);
 376		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 377		mutex_unlock(&nacl->lun_entry_mutex);
 378
 379		spin_lock(&lun->lun_deve_lock);
 380		list_del(&orig->lun_link);
 381		list_add_tail(&new->lun_link, &lun->lun_deve_list);
 382		spin_unlock(&lun->lun_deve_lock);
 383
 384		kref_put(&orig->pr_kref, target_pr_kref_release);
 385		wait_for_completion(&orig->pr_comp);
 386
 387		target_luns_data_has_changed(nacl, new, true);
 388		kfree_rcu(orig, rcu_head);
 389		return 0;
 390	}
 391
 392	new->se_lun = lun;
 393	new->se_lun_acl = lun_acl;
 394	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 395	mutex_unlock(&nacl->lun_entry_mutex);
 396
 397	spin_lock(&lun->lun_deve_lock);
 398	list_add_tail(&new->lun_link, &lun->lun_deve_list);
 399	spin_unlock(&lun->lun_deve_lock);
 400
 401	target_luns_data_has_changed(nacl, new, true);
 402	return 0;
 403}
 404
 405void core_disable_device_list_for_node(
 406	struct se_lun *lun,
 407	struct se_dev_entry *orig,
 408	struct se_node_acl *nacl,
 409	struct se_portal_group *tpg)
 410{
 411	/*
 412	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 413	 * reference to se_device->dev_group.
 414	 */
 415	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 416
 417	lockdep_assert_held(&nacl->lun_entry_mutex);
 418
 419	/*
 420	 * If the MappedLUN entry is being disabled, the entry in
 421	 * lun->lun_deve_list must be removed now before clearing the
 422	 * struct se_dev_entry pointers below as logic in
 423	 * core_alua_do_transition_tg_pt() depends on these being present.
 424	 *
 425	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 426	 * that have not been explicitly converted to MappedLUNs ->
 427	 * struct se_lun_acl, but we remove deve->lun_link from
 428	 * lun->lun_deve_list. This also means that active UAs and
 429	 * NodeACL context specific PR metadata for demo-mode
 430	 * MappedLUN *deve will be released below..
 431	 */
 432	spin_lock(&lun->lun_deve_lock);
 433	list_del(&orig->lun_link);
 434	spin_unlock(&lun->lun_deve_lock);
 435	/*
 436	 * Disable struct se_dev_entry LUN ACL mapping
 437	 */
 438	core_scsi3_ua_release_all(orig);
 439
 440	hlist_del_rcu(&orig->link);
 441	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 442	orig->lun_access_ro = false;
 443	orig->creation_time = 0;
 444	orig->attach_count--;
 445	/*
 446	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
 447	 * or REGISTER_AND_MOVE PR operation to complete.
 448	 */
 449	kref_put(&orig->pr_kref, target_pr_kref_release);
 450	wait_for_completion(&orig->pr_comp);
 451
 
 
 
 452	kfree_rcu(orig, rcu_head);
 453
 454	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 455	target_luns_data_has_changed(nacl, NULL, false);
 456}
 457
 458/*      core_clear_lun_from_tpg():
 459 *
 460 *
 461 */
 462void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 463{
 464	struct se_node_acl *nacl;
 465	struct se_dev_entry *deve;
 466
 467	mutex_lock(&tpg->acl_node_mutex);
 468	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 469
 470		mutex_lock(&nacl->lun_entry_mutex);
 471		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 472			if (lun != deve->se_lun)
 
 
 
 473				continue;
 474
 475			core_disable_device_list_for_node(lun, deve, nacl, tpg);
 476		}
 477		mutex_unlock(&nacl->lun_entry_mutex);
 478	}
 479	mutex_unlock(&tpg->acl_node_mutex);
 480}
 481
 482int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
 483{
 484	struct se_lun *tmp;
 485
 486	spin_lock(&dev->se_port_lock);
 487	if (dev->export_count == 0x0000ffff) {
 488		pr_warn("Reached dev->dev_port_count =="
 489				" 0x0000ffff\n");
 490		spin_unlock(&dev->se_port_lock);
 491		return -ENOSPC;
 492	}
 493again:
 494	/*
 495	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
 496	 * Here is the table from spc4r17 section 7.7.3.8.
 497	 *
 498	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 499	 *
 500	 * Code      Description
 501	 * 0h        Reserved
 502	 * 1h        Relative port 1, historically known as port A
 503	 * 2h        Relative port 2, historically known as port B
 504	 * 3h to FFFFh    Relative port 3 through 65 535
 505	 */
 506	lun->lun_rtpi = dev->dev_rpti_counter++;
 507	if (!lun->lun_rtpi)
 508		goto again;
 509
 510	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
 511		/*
 512		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
 513		 * for 16-bit wrap..
 514		 */
 515		if (lun->lun_rtpi == tmp->lun_rtpi)
 516			goto again;
 517	}
 518	spin_unlock(&dev->se_port_lock);
 519
 520	return 0;
 521}
 522
 523static void se_release_vpd_for_dev(struct se_device *dev)
 524{
 525	struct t10_vpd *vpd, *vpd_tmp;
 526
 527	spin_lock(&dev->t10_wwn.t10_vpd_lock);
 528	list_for_each_entry_safe(vpd, vpd_tmp,
 529			&dev->t10_wwn.t10_vpd_list, vpd_list) {
 530		list_del(&vpd->vpd_list);
 531		kfree(vpd);
 532	}
 533	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 534}
 535
 536static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 537{
 538	u32 aligned_max_sectors;
 539	u32 alignment;
 540	/*
 541	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
 542	 * transport_allocate_data_tasks() operation.
 543	 */
 544	alignment = max(1ul, PAGE_SIZE / block_size);
 545	aligned_max_sectors = rounddown(max_sectors, alignment);
 546
 547	if (max_sectors != aligned_max_sectors)
 548		pr_info("Rounding down aligned max_sectors from %u to %u\n",
 549			max_sectors, aligned_max_sectors);
 550
 551	return aligned_max_sectors;
 552}
 553
 554int core_dev_add_lun(
 555	struct se_portal_group *tpg,
 556	struct se_device *dev,
 557	struct se_lun *lun)
 558{
 559	int rc;
 560
 561	rc = core_tpg_add_lun(tpg, lun, false, dev);
 562	if (rc < 0)
 563		return rc;
 564
 565	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
 566		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
 567		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 568		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
 569	/*
 570	 * Update LUN maps for dynamically added initiators when
 571	 * generate_node_acl is enabled.
 572	 */
 573	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
 574		struct se_node_acl *acl;
 575
 576		mutex_lock(&tpg->acl_node_mutex);
 577		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 578			if (acl->dynamic_node_acl &&
 579			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
 580			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
 581				core_tpg_add_node_to_devs(acl, tpg, lun);
 582			}
 583		}
 584		mutex_unlock(&tpg->acl_node_mutex);
 585	}
 586
 587	return 0;
 588}
 589
 590/*      core_dev_del_lun():
 591 *
 592 *
 593 */
 594void core_dev_del_lun(
 595	struct se_portal_group *tpg,
 596	struct se_lun *lun)
 597{
 598	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
 599		" device object\n", tpg->se_tpg_tfo->fabric_name,
 600		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 601		tpg->se_tpg_tfo->fabric_name);
 602
 603	core_tpg_remove_lun(tpg, lun);
 604}
 605
 606struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 607	struct se_portal_group *tpg,
 608	struct se_node_acl *nacl,
 609	u64 mapped_lun,
 610	int *ret)
 611{
 612	struct se_lun_acl *lacl;
 613
 614	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
 615		pr_err("%s InitiatorName exceeds maximum size.\n",
 616			tpg->se_tpg_tfo->fabric_name);
 617		*ret = -EOVERFLOW;
 618		return NULL;
 619	}
 620	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
 621	if (!lacl) {
 622		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 623		*ret = -ENOMEM;
 624		return NULL;
 625	}
 626
 627	lacl->mapped_lun = mapped_lun;
 628	lacl->se_lun_nacl = nacl;
 629
 630	return lacl;
 631}
 632
 633int core_dev_add_initiator_node_lun_acl(
 634	struct se_portal_group *tpg,
 635	struct se_lun_acl *lacl,
 636	struct se_lun *lun,
 637	bool lun_access_ro)
 638{
 639	struct se_node_acl *nacl = lacl->se_lun_nacl;
 640	/*
 641	 * rcu_dereference_raw protected by se_lun->lun_group symlink
 642	 * reference to se_device->dev_group.
 643	 */
 644	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 645
 646	if (!nacl)
 647		return -EINVAL;
 648
 649	if (lun->lun_access_ro)
 650		lun_access_ro = true;
 651
 652	lacl->se_lun = lun;
 653
 654	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 655			lun_access_ro, nacl, tpg) < 0)
 656		return -EINVAL;
 657
 658	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 659		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
 660		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 661		lun_access_ro ? "RO" : "RW",
 662		nacl->initiatorname);
 663	/*
 664	 * Check to see if there are any existing persistent reservation APTPL
 665	 * pre-registrations that need to be enabled for this LUN ACL..
 666	 */
 667	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
 668					    lacl->mapped_lun);
 669	return 0;
 670}
 671
 672int core_dev_del_initiator_node_lun_acl(
 673	struct se_lun *lun,
 674	struct se_lun_acl *lacl)
 675{
 676	struct se_portal_group *tpg = lun->lun_tpg;
 677	struct se_node_acl *nacl;
 678	struct se_dev_entry *deve;
 679
 680	nacl = lacl->se_lun_nacl;
 681	if (!nacl)
 682		return -EINVAL;
 683
 684	mutex_lock(&nacl->lun_entry_mutex);
 685	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
 686	if (deve)
 687		core_disable_device_list_for_node(lun, deve, nacl, tpg);
 688	mutex_unlock(&nacl->lun_entry_mutex);
 689
 690	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
 691		" InitiatorNode: %s Mapped LUN: %llu\n",
 692		tpg->se_tpg_tfo->fabric_name,
 693		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 694		nacl->initiatorname, lacl->mapped_lun);
 695
 696	return 0;
 697}
 698
 699void core_dev_free_initiator_node_lun_acl(
 700	struct se_portal_group *tpg,
 701	struct se_lun_acl *lacl)
 702{
 703	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 704		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
 705		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 706		tpg->se_tpg_tfo->fabric_name,
 707		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 708
 709	kfree(lacl);
 710}
 711
 712static void scsi_dump_inquiry(struct se_device *dev)
 713{
 714	struct t10_wwn *wwn = &dev->t10_wwn;
 715	int device_type = dev->transport->get_device_type(dev);
 716
 717	/*
 718	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 719	 */
 720	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
 721		wwn->vendor);
 722	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
 723		wwn->model);
 724	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
 725		wwn->revision);
 726	pr_debug("  Type:   %s ", scsi_device_type(device_type));
 727}
 728
 729struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 730{
 731	struct se_device *dev;
 732	struct se_lun *xcopy_lun;
 733	int i;
 734
 735	dev = hba->backend->ops->alloc_device(hba, name);
 736	if (!dev)
 737		return NULL;
 738
 739	dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
 740	if (!dev->queues) {
 741		dev->transport->free_device(dev);
 742		return NULL;
 743	}
 744
 745	dev->queue_cnt = nr_cpu_ids;
 746	for (i = 0; i < dev->queue_cnt; i++) {
 747		struct se_device_queue *q;
 748
 749		q = &dev->queues[i];
 750		INIT_LIST_HEAD(&q->state_list);
 751		spin_lock_init(&q->lock);
 752
 753		init_llist_head(&q->sq.cmd_list);
 754		INIT_WORK(&q->sq.work, target_queued_submit_work);
 755	}
 756
 757	dev->se_hba = hba;
 758	dev->transport = hba->backend->ops;
 759	dev->transport_flags = dev->transport->transport_flags_default;
 760	dev->prot_length = sizeof(struct t10_pi_tuple);
 761	dev->hba_index = hba->hba_index;
 762
 763	INIT_LIST_HEAD(&dev->dev_sep_list);
 764	INIT_LIST_HEAD(&dev->dev_tmr_list);
 765	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 
 766	INIT_LIST_HEAD(&dev->qf_cmd_list);
 
 767	spin_lock_init(&dev->delayed_cmd_lock);
 768	spin_lock_init(&dev->dev_reservation_lock);
 769	spin_lock_init(&dev->se_port_lock);
 770	spin_lock_init(&dev->se_tmr_lock);
 771	spin_lock_init(&dev->qf_cmd_lock);
 772	sema_init(&dev->caw_sem, 1);
 773	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 774	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 775	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
 776	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
 777	spin_lock_init(&dev->t10_pr.registration_lock);
 778	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 779	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 780	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
 781	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
 782	spin_lock_init(&dev->t10_alua.lba_map_lock);
 783
 784	INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
 785
 786	dev->t10_wwn.t10_dev = dev;
 787	/*
 788	 * Use OpenFabrics IEEE Company ID: 00 14 05
 789	 */
 790	dev->t10_wwn.company_id = 0x001405;
 791
 792	dev->t10_alua.t10_dev = dev;
 793
 794	dev->dev_attrib.da_dev = dev;
 795	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
 796	dev->dev_attrib.emulate_dpo = 1;
 797	dev->dev_attrib.emulate_fua_write = 1;
 798	dev->dev_attrib.emulate_fua_read = 1;
 799	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 800	dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
 801	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
 802	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 803	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 804	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 805	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 806	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
 807	dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
 808	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 809	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 810	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
 811	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 812	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 813	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 814	dev->dev_attrib.max_unmap_block_desc_count =
 815		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 816	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 817	dev->dev_attrib.unmap_granularity_alignment =
 818				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 819	dev->dev_attrib.unmap_zeroes_data =
 820				DA_UNMAP_ZEROES_DATA_DEFAULT;
 821	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 822
 823	xcopy_lun = &dev->xcopy_lun;
 824	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
 825	init_completion(&xcopy_lun->lun_shutdown_comp);
 826	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
 827	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
 828	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
 829	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 830
 831	/* Preload the default INQUIRY const values */
 832	strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
 833	strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
 834		sizeof(dev->t10_wwn.model));
 835	strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
 836		sizeof(dev->t10_wwn.revision));
 837
 838	return dev;
 839}
 840
 841/*
 842 * Check if the underlying struct block_device supports discard and if yes
 843 * configure the UNMAP parameters.
 
 844 */
 845bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
 846				       struct block_device *bdev)
 847{
 848	int block_size = bdev_logical_block_size(bdev);
 849
 850	if (!bdev_max_discard_sectors(bdev))
 851		return false;
 852
 853	attrib->max_unmap_lba_count =
 854		bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
 855	/*
 856	 * Currently hardcoded to 1 in Linux/SCSI code..
 857	 */
 858	attrib->max_unmap_block_desc_count = 1;
 859	attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
 860	attrib->unmap_granularity_alignment =
 861		bdev_discard_alignment(bdev) / block_size;
 
 862	return true;
 863}
 864EXPORT_SYMBOL(target_configure_unmap_from_queue);
 865
 866/*
 867 * Convert from blocksize advertised to the initiator to the 512 byte
 868 * units unconditionally used by the Linux block layer.
 869 */
 870sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
 871{
 872	switch (dev->dev_attrib.block_size) {
 873	case 4096:
 874		return lb << 3;
 875	case 2048:
 876		return lb << 2;
 877	case 1024:
 878		return lb << 1;
 879	default:
 880		return lb;
 881	}
 882}
 883EXPORT_SYMBOL(target_to_linux_sector);
 884
 885struct devices_idr_iter {
 886	struct config_item *prev_item;
 887	int (*fn)(struct se_device *dev, void *data);
 888	void *data;
 889};
 890
 891static int target_devices_idr_iter(int id, void *p, void *data)
 892	 __must_hold(&device_mutex)
 893{
 894	struct devices_idr_iter *iter = data;
 895	struct se_device *dev = p;
 896	int ret;
 897
 898	config_item_put(iter->prev_item);
 899	iter->prev_item = NULL;
 900
 901	/*
 902	 * We add the device early to the idr, so it can be used
 903	 * by backend modules during configuration. We do not want
 904	 * to allow other callers to access partially setup devices,
 905	 * so we skip them here.
 906	 */
 907	if (!target_dev_configured(dev))
 908		return 0;
 909
 910	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
 911	if (!iter->prev_item)
 912		return 0;
 913	mutex_unlock(&device_mutex);
 914
 915	ret = iter->fn(dev, iter->data);
 916
 917	mutex_lock(&device_mutex);
 918	return ret;
 919}
 920
 921/**
 922 * target_for_each_device - iterate over configured devices
 923 * @fn: iterator function
 924 * @data: pointer to data that will be passed to fn
 925 *
 926 * fn must return 0 to continue looping over devices. non-zero will break
 927 * from the loop and return that value to the caller.
 928 */
 929int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
 930			   void *data)
 931{
 932	struct devices_idr_iter iter = { .fn = fn, .data = data };
 933	int ret;
 934
 935	mutex_lock(&device_mutex);
 936	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
 937	mutex_unlock(&device_mutex);
 938	config_item_put(iter.prev_item);
 939	return ret;
 940}
 941
 942int target_configure_device(struct se_device *dev)
 943{
 944	struct se_hba *hba = dev->se_hba;
 945	int ret, id;
 946
 947	if (target_dev_configured(dev)) {
 948		pr_err("se_dev->se_dev_ptr already set for storage"
 949				" object\n");
 950		return -EEXIST;
 951	}
 952
 953	/*
 954	 * Add early so modules like tcmu can use during its
 955	 * configuration.
 956	 */
 957	mutex_lock(&device_mutex);
 958	/*
 959	 * Use cyclic to try and avoid collisions with devices
 960	 * that were recently removed.
 961	 */
 962	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
 963	mutex_unlock(&device_mutex);
 964	if (id < 0) {
 965		ret = -ENOMEM;
 966		goto out;
 967	}
 968	dev->dev_index = id;
 969
 970	ret = dev->transport->configure_device(dev);
 971	if (ret)
 972		goto out_free_index;
 973
 974	if (dev->transport->configure_unmap &&
 975	    dev->transport->configure_unmap(dev)) {
 976		pr_debug("Discard support available, but disabled by default.\n");
 977	}
 978
 979	/*
 980	 * XXX: there is not much point to have two different values here..
 981	 */
 982	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
 983	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
 984
 985	/*
 986	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 987	 */
 988	dev->dev_attrib.hw_max_sectors =
 989		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
 990					 dev->dev_attrib.hw_block_size);
 991	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 992
 993	dev->creation_time = get_jiffies_64();
 994
 995	ret = core_setup_alua(dev);
 996	if (ret)
 997		goto out_destroy_device;
 998
 999	/*
1000	 * Setup work_queue for QUEUE_FULL
1001	 */
1002	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1003
1004	scsi_dump_inquiry(dev);
1005
1006	spin_lock(&hba->device_lock);
1007	hba->dev_count++;
1008	spin_unlock(&hba->device_lock);
1009
1010	dev->dev_flags |= DF_CONFIGURED;
1011
1012	return 0;
1013
1014out_destroy_device:
1015	dev->transport->destroy_device(dev);
1016out_free_index:
1017	mutex_lock(&device_mutex);
1018	idr_remove(&devices_idr, dev->dev_index);
1019	mutex_unlock(&device_mutex);
1020out:
1021	se_release_vpd_for_dev(dev);
1022	return ret;
1023}
1024
1025void target_free_device(struct se_device *dev)
1026{
1027	struct se_hba *hba = dev->se_hba;
1028
1029	WARN_ON(!list_empty(&dev->dev_sep_list));
1030
1031	if (target_dev_configured(dev)) {
1032		dev->transport->destroy_device(dev);
1033
1034		mutex_lock(&device_mutex);
1035		idr_remove(&devices_idr, dev->dev_index);
1036		mutex_unlock(&device_mutex);
1037
1038		spin_lock(&hba->device_lock);
1039		hba->dev_count--;
1040		spin_unlock(&hba->device_lock);
1041	}
1042
1043	core_alua_free_lu_gp_mem(dev);
1044	core_alua_set_lba_map(dev, NULL, 0, 0);
1045	core_scsi3_free_all_registrations(dev);
1046	se_release_vpd_for_dev(dev);
1047
1048	if (dev->transport->free_prot)
1049		dev->transport->free_prot(dev);
1050
1051	kfree(dev->queues);
1052	dev->transport->free_device(dev);
1053}
1054
1055int core_dev_setup_virtual_lun0(void)
1056{
1057	struct se_hba *hba;
1058	struct se_device *dev;
1059	char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1060	int ret;
1061
1062	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1063	if (IS_ERR(hba))
1064		return PTR_ERR(hba);
1065
1066	dev = target_alloc_device(hba, "virt_lun0");
1067	if (!dev) {
1068		ret = -ENOMEM;
1069		goto out_free_hba;
1070	}
1071
1072	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1073
1074	ret = target_configure_device(dev);
1075	if (ret)
1076		goto out_free_se_dev;
1077
1078	lun0_hba = hba;
1079	g_lun0_dev = dev;
1080	return 0;
1081
1082out_free_se_dev:
1083	target_free_device(dev);
1084out_free_hba:
1085	core_delete_hba(hba);
1086	return ret;
1087}
1088
1089
1090void core_dev_release_virtual_lun0(void)
1091{
1092	struct se_hba *hba = lun0_hba;
1093
1094	if (!hba)
1095		return;
1096
1097	if (g_lun0_dev)
1098		target_free_device(g_lun0_dev);
1099	core_delete_hba(hba);
1100}
1101
1102/*
1103 * Common CDB parsing for kernel and user passthrough.
1104 */
1105sense_reason_t
1106passthrough_parse_cdb(struct se_cmd *cmd,
1107	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1108{
1109	unsigned char *cdb = cmd->t_task_cdb;
1110	struct se_device *dev = cmd->se_dev;
1111	unsigned int size;
1112
1113	/*
1114	 * For REPORT LUNS we always need to emulate the response, for everything
1115	 * else, pass it up.
1116	 */
1117	if (cdb[0] == REPORT_LUNS) {
1118		cmd->execute_cmd = spc_emulate_report_luns;
1119		return TCM_NO_SENSE;
1120	}
1121
1122	/*
1123	 * With emulate_pr disabled, all reservation requests should fail,
1124	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1125	 */
1126	if (!dev->dev_attrib.emulate_pr &&
1127	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1128	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1129	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1130	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1131		return TCM_UNSUPPORTED_SCSI_OPCODE;
1132	}
1133
1134	/*
1135	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1136	 * emulate the response, since tcmu does not have the information
1137	 * required to process these commands.
1138	 */
1139	if (!(dev->transport_flags &
1140	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1141		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1142			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1143			size = get_unaligned_be16(&cdb[7]);
1144			return target_cmd_size_check(cmd, size);
1145		}
1146		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1147			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1148			size = get_unaligned_be32(&cdb[5]);
1149			return target_cmd_size_check(cmd, size);
1150		}
1151
1152		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1153			cmd->execute_cmd = target_scsi2_reservation_release;
1154			if (cdb[0] == RELEASE_10)
1155				size = get_unaligned_be16(&cdb[7]);
1156			else
1157				size = cmd->data_length;
1158			return target_cmd_size_check(cmd, size);
1159		}
1160		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1161			cmd->execute_cmd = target_scsi2_reservation_reserve;
1162			if (cdb[0] == RESERVE_10)
1163				size = get_unaligned_be16(&cdb[7]);
1164			else
1165				size = cmd->data_length;
1166			return target_cmd_size_check(cmd, size);
1167		}
1168	}
1169
1170	/* Set DATA_CDB flag for ops that should have it */
1171	switch (cdb[0]) {
1172	case READ_6:
1173	case READ_10:
1174	case READ_12:
1175	case READ_16:
1176	case WRITE_6:
1177	case WRITE_10:
1178	case WRITE_12:
1179	case WRITE_16:
1180	case WRITE_VERIFY:
1181	case WRITE_VERIFY_12:
1182	case WRITE_VERIFY_16:
1183	case COMPARE_AND_WRITE:
1184	case XDWRITEREAD_10:
1185		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1186		break;
1187	case VARIABLE_LENGTH_CMD:
1188		switch (get_unaligned_be16(&cdb[8])) {
1189		case READ_32:
1190		case WRITE_32:
1191		case WRITE_VERIFY_32:
1192		case XDWRITEREAD_32:
1193			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1194			break;
1195		}
1196	}
1197
1198	cmd->execute_cmd = exec_cmd;
1199
1200	return TCM_NO_SENSE;
1201}
1202EXPORT_SYMBOL(passthrough_parse_cdb);