Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_transport.c
   4 *
   5 * This file contains the Generic Target Engine Core.
   6 *
   7 * (c) Copyright 2002-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 ******************************************************************************/
  12
  13#include <linux/net.h>
  14#include <linux/delay.h>
  15#include <linux/string.h>
  16#include <linux/timer.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/kthread.h>
  20#include <linux/in.h>
  21#include <linux/cdrom.h>
  22#include <linux/module.h>
  23#include <linux/ratelimit.h>
  24#include <linux/vmalloc.h>
  25#include <asm/unaligned.h>
  26#include <net/sock.h>
  27#include <net/tcp.h>
  28#include <scsi/scsi_proto.h>
  29#include <scsi/scsi_common.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33#include <target/target_core_fabric.h>
  34
  35#include "target_core_internal.h"
  36#include "target_core_alua.h"
  37#include "target_core_pr.h"
  38#include "target_core_ua.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include <trace/events/target.h>
  42
  43static struct workqueue_struct *target_completion_wq;
  44static struct workqueue_struct *target_submission_wq;
  45static struct kmem_cache *se_sess_cache;
  46struct kmem_cache *se_ua_cache;
  47struct kmem_cache *t10_pr_reg_cache;
  48struct kmem_cache *t10_alua_lu_gp_cache;
  49struct kmem_cache *t10_alua_lu_gp_mem_cache;
  50struct kmem_cache *t10_alua_tg_pt_gp_cache;
  51struct kmem_cache *t10_alua_lba_map_cache;
  52struct kmem_cache *t10_alua_lba_map_mem_cache;
  53
  54static void transport_complete_task_attr(struct se_cmd *cmd);
  55static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
  56static void transport_handle_queue_full(struct se_cmd *cmd,
  57		struct se_device *dev, int err, bool write_pending);
  58static void target_complete_ok_work(struct work_struct *work);
  59
  60int init_se_kmem_caches(void)
  61{
  62	se_sess_cache = kmem_cache_create("se_sess_cache",
  63			sizeof(struct se_session), __alignof__(struct se_session),
  64			0, NULL);
  65	if (!se_sess_cache) {
  66		pr_err("kmem_cache_create() for struct se_session"
  67				" failed\n");
  68		goto out;
  69	}
  70	se_ua_cache = kmem_cache_create("se_ua_cache",
  71			sizeof(struct se_ua), __alignof__(struct se_ua),
  72			0, NULL);
  73	if (!se_ua_cache) {
  74		pr_err("kmem_cache_create() for struct se_ua failed\n");
  75		goto out_free_sess_cache;
  76	}
  77	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
  78			sizeof(struct t10_pr_registration),
  79			__alignof__(struct t10_pr_registration), 0, NULL);
  80	if (!t10_pr_reg_cache) {
  81		pr_err("kmem_cache_create() for struct t10_pr_registration"
  82				" failed\n");
  83		goto out_free_ua_cache;
  84	}
  85	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
  86			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
  87			0, NULL);
  88	if (!t10_alua_lu_gp_cache) {
  89		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
  90				" failed\n");
  91		goto out_free_pr_reg_cache;
  92	}
  93	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
  94			sizeof(struct t10_alua_lu_gp_member),
  95			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
  96	if (!t10_alua_lu_gp_mem_cache) {
  97		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
  98				"cache failed\n");
  99		goto out_free_lu_gp_cache;
 100	}
 101	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
 102			sizeof(struct t10_alua_tg_pt_gp),
 103			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
 104	if (!t10_alua_tg_pt_gp_cache) {
 105		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
 106				"cache failed\n");
 107		goto out_free_lu_gp_mem_cache;
 108	}
 109	t10_alua_lba_map_cache = kmem_cache_create(
 110			"t10_alua_lba_map_cache",
 111			sizeof(struct t10_alua_lba_map),
 112			__alignof__(struct t10_alua_lba_map), 0, NULL);
 113	if (!t10_alua_lba_map_cache) {
 114		pr_err("kmem_cache_create() for t10_alua_lba_map_"
 115				"cache failed\n");
 116		goto out_free_tg_pt_gp_cache;
 117	}
 118	t10_alua_lba_map_mem_cache = kmem_cache_create(
 119			"t10_alua_lba_map_mem_cache",
 120			sizeof(struct t10_alua_lba_map_member),
 121			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
 122	if (!t10_alua_lba_map_mem_cache) {
 123		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
 124				"cache failed\n");
 125		goto out_free_lba_map_cache;
 126	}
 127
 128	target_completion_wq = alloc_workqueue("target_completion",
 129					       WQ_MEM_RECLAIM, 0);
 130	if (!target_completion_wq)
 131		goto out_free_lba_map_mem_cache;
 132
 133	target_submission_wq = alloc_workqueue("target_submission",
 134					       WQ_MEM_RECLAIM, 0);
 135	if (!target_submission_wq)
 136		goto out_free_completion_wq;
 137
 138	return 0;
 139
 140out_free_completion_wq:
 141	destroy_workqueue(target_completion_wq);
 142out_free_lba_map_mem_cache:
 143	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 144out_free_lba_map_cache:
 145	kmem_cache_destroy(t10_alua_lba_map_cache);
 146out_free_tg_pt_gp_cache:
 147	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 148out_free_lu_gp_mem_cache:
 149	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
 150out_free_lu_gp_cache:
 151	kmem_cache_destroy(t10_alua_lu_gp_cache);
 152out_free_pr_reg_cache:
 153	kmem_cache_destroy(t10_pr_reg_cache);
 154out_free_ua_cache:
 155	kmem_cache_destroy(se_ua_cache);
 156out_free_sess_cache:
 157	kmem_cache_destroy(se_sess_cache);
 158out:
 159	return -ENOMEM;
 160}
 161
 162void release_se_kmem_caches(void)
 163{
 164	destroy_workqueue(target_submission_wq);
 165	destroy_workqueue(target_completion_wq);
 166	kmem_cache_destroy(se_sess_cache);
 167	kmem_cache_destroy(se_ua_cache);
 168	kmem_cache_destroy(t10_pr_reg_cache);
 169	kmem_cache_destroy(t10_alua_lu_gp_cache);
 170	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
 171	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 172	kmem_cache_destroy(t10_alua_lba_map_cache);
 173	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 174}
 175
 176/* This code ensures unique mib indexes are handed out. */
 177static DEFINE_SPINLOCK(scsi_mib_index_lock);
 178static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
 179
 180/*
 181 * Allocate a new row index for the entry type specified
 182 */
 183u32 scsi_get_new_index(scsi_index_t type)
 184{
 185	u32 new_index;
 186
 187	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
 188
 189	spin_lock(&scsi_mib_index_lock);
 190	new_index = ++scsi_mib_index[type];
 191	spin_unlock(&scsi_mib_index_lock);
 192
 193	return new_index;
 194}
 195
 196void transport_subsystem_check_init(void)
 197{
 198	int ret;
 199	static int sub_api_initialized;
 200
 201	if (sub_api_initialized)
 202		return;
 203
 204	ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
 205	if (ret != 0)
 206		pr_err("Unable to load target_core_iblock\n");
 207
 208	ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
 209	if (ret != 0)
 210		pr_err("Unable to load target_core_file\n");
 211
 212	ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
 213	if (ret != 0)
 214		pr_err("Unable to load target_core_pscsi\n");
 215
 216	ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
 217	if (ret != 0)
 218		pr_err("Unable to load target_core_user\n");
 219
 220	sub_api_initialized = 1;
 221}
 222
 223static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
 224{
 225	struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
 226
 227	wake_up(&sess->cmd_count_wq);
 228}
 229
 230/**
 231 * transport_init_session - initialize a session object
 232 * @se_sess: Session object pointer.
 233 *
 234 * The caller must have zero-initialized @se_sess before calling this function.
 235 */
 236int transport_init_session(struct se_session *se_sess)
 237{
 238	INIT_LIST_HEAD(&se_sess->sess_list);
 239	INIT_LIST_HEAD(&se_sess->sess_acl_list);
 
 240	spin_lock_init(&se_sess->sess_cmd_lock);
 241	init_waitqueue_head(&se_sess->cmd_count_wq);
 242	init_completion(&se_sess->stop_done);
 243	atomic_set(&se_sess->stopped, 0);
 244	return percpu_ref_init(&se_sess->cmd_count,
 245			       target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
 246}
 247EXPORT_SYMBOL(transport_init_session);
 248
 249void transport_uninit_session(struct se_session *se_sess)
 250{
 251	/*
 252	 * Drivers like iscsi and loop do not call target_stop_session
 253	 * during session shutdown so we have to drop the ref taken at init
 254	 * time here.
 255	 */
 256	if (!atomic_read(&se_sess->stopped))
 257		percpu_ref_put(&se_sess->cmd_count);
 258
 259	percpu_ref_exit(&se_sess->cmd_count);
 260}
 261
 262/**
 263 * transport_alloc_session - allocate a session object and initialize it
 264 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
 265 */
 266struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
 267{
 268	struct se_session *se_sess;
 269	int ret;
 270
 271	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
 272	if (!se_sess) {
 273		pr_err("Unable to allocate struct se_session from"
 274				" se_sess_cache\n");
 275		return ERR_PTR(-ENOMEM);
 276	}
 277	ret = transport_init_session(se_sess);
 278	if (ret < 0) {
 279		kmem_cache_free(se_sess_cache, se_sess);
 280		return ERR_PTR(ret);
 281	}
 282	se_sess->sup_prot_ops = sup_prot_ops;
 283
 284	return se_sess;
 285}
 286EXPORT_SYMBOL(transport_alloc_session);
 287
 288/**
 289 * transport_alloc_session_tags - allocate target driver private data
 290 * @se_sess:  Session pointer.
 291 * @tag_num:  Maximum number of in-flight commands between initiator and target.
 292 * @tag_size: Size in bytes of the private data a target driver associates with
 293 *	      each command.
 294 */
 295int transport_alloc_session_tags(struct se_session *se_sess,
 296			         unsigned int tag_num, unsigned int tag_size)
 297{
 298	int rc;
 299
 300	se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
 301					 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
 302	if (!se_sess->sess_cmd_map) {
 303		pr_err("Unable to allocate se_sess->sess_cmd_map\n");
 304		return -ENOMEM;
 305	}
 306
 307	rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
 308			false, GFP_KERNEL, NUMA_NO_NODE);
 309	if (rc < 0) {
 310		pr_err("Unable to init se_sess->sess_tag_pool,"
 311			" tag_num: %u\n", tag_num);
 312		kvfree(se_sess->sess_cmd_map);
 313		se_sess->sess_cmd_map = NULL;
 314		return -ENOMEM;
 315	}
 316
 317	return 0;
 318}
 319EXPORT_SYMBOL(transport_alloc_session_tags);
 320
 321/**
 322 * transport_init_session_tags - allocate a session and target driver private data
 323 * @tag_num:  Maximum number of in-flight commands between initiator and target.
 324 * @tag_size: Size in bytes of the private data a target driver associates with
 325 *	      each command.
 326 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
 327 */
 328static struct se_session *
 329transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
 330			    enum target_prot_op sup_prot_ops)
 331{
 332	struct se_session *se_sess;
 333	int rc;
 334
 335	if (tag_num != 0 && !tag_size) {
 336		pr_err("init_session_tags called with percpu-ida tag_num:"
 337		       " %u, but zero tag_size\n", tag_num);
 338		return ERR_PTR(-EINVAL);
 339	}
 340	if (!tag_num && tag_size) {
 341		pr_err("init_session_tags called with percpu-ida tag_size:"
 342		       " %u, but zero tag_num\n", tag_size);
 343		return ERR_PTR(-EINVAL);
 344	}
 345
 346	se_sess = transport_alloc_session(sup_prot_ops);
 347	if (IS_ERR(se_sess))
 348		return se_sess;
 349
 350	rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
 351	if (rc < 0) {
 352		transport_free_session(se_sess);
 353		return ERR_PTR(-ENOMEM);
 354	}
 355
 356	return se_sess;
 357}
 358
 359/*
 360 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
 361 */
 362void __transport_register_session(
 363	struct se_portal_group *se_tpg,
 364	struct se_node_acl *se_nacl,
 365	struct se_session *se_sess,
 366	void *fabric_sess_ptr)
 367{
 368	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
 369	unsigned char buf[PR_REG_ISID_LEN];
 370	unsigned long flags;
 371
 372	se_sess->se_tpg = se_tpg;
 373	se_sess->fabric_sess_ptr = fabric_sess_ptr;
 374	/*
 375	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
 376	 *
 377	 * Only set for struct se_session's that will actually be moving I/O.
 378	 * eg: *NOT* discovery sessions.
 379	 */
 380	if (se_nacl) {
 381		/*
 382		 *
 383		 * Determine if fabric allows for T10-PI feature bits exposed to
 384		 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
 385		 *
 386		 * If so, then always save prot_type on a per se_node_acl node
 387		 * basis and re-instate the previous sess_prot_type to avoid
 388		 * disabling PI from below any previously initiator side
 389		 * registered LUNs.
 390		 */
 391		if (se_nacl->saved_prot_type)
 392			se_sess->sess_prot_type = se_nacl->saved_prot_type;
 393		else if (tfo->tpg_check_prot_fabric_only)
 394			se_sess->sess_prot_type = se_nacl->saved_prot_type =
 395					tfo->tpg_check_prot_fabric_only(se_tpg);
 396		/*
 397		 * If the fabric module supports an ISID based TransportID,
 398		 * save this value in binary from the fabric I_T Nexus now.
 399		 */
 400		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
 401			memset(&buf[0], 0, PR_REG_ISID_LEN);
 402			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
 403					&buf[0], PR_REG_ISID_LEN);
 404			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
 405		}
 406
 407		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 408		/*
 409		 * The se_nacl->nacl_sess pointer will be set to the
 410		 * last active I_T Nexus for each struct se_node_acl.
 411		 */
 412		se_nacl->nacl_sess = se_sess;
 413
 414		list_add_tail(&se_sess->sess_acl_list,
 415			      &se_nacl->acl_sess_list);
 416		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 417	}
 418	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
 419
 420	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
 421		se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
 422}
 423EXPORT_SYMBOL(__transport_register_session);
 424
 425void transport_register_session(
 426	struct se_portal_group *se_tpg,
 427	struct se_node_acl *se_nacl,
 428	struct se_session *se_sess,
 429	void *fabric_sess_ptr)
 430{
 431	unsigned long flags;
 432
 433	spin_lock_irqsave(&se_tpg->session_lock, flags);
 434	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
 435	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 436}
 437EXPORT_SYMBOL(transport_register_session);
 438
 439struct se_session *
 440target_setup_session(struct se_portal_group *tpg,
 441		     unsigned int tag_num, unsigned int tag_size,
 442		     enum target_prot_op prot_op,
 443		     const char *initiatorname, void *private,
 444		     int (*callback)(struct se_portal_group *,
 445				     struct se_session *, void *))
 446{
 447	struct se_session *sess;
 448
 449	/*
 450	 * If the fabric driver is using percpu-ida based pre allocation
 451	 * of I/O descriptor tags, go ahead and perform that setup now..
 452	 */
 453	if (tag_num != 0)
 454		sess = transport_init_session_tags(tag_num, tag_size, prot_op);
 455	else
 456		sess = transport_alloc_session(prot_op);
 457
 458	if (IS_ERR(sess))
 459		return sess;
 460
 461	sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
 462					(unsigned char *)initiatorname);
 463	if (!sess->se_node_acl) {
 464		transport_free_session(sess);
 465		return ERR_PTR(-EACCES);
 466	}
 467	/*
 468	 * Go ahead and perform any remaining fabric setup that is
 469	 * required before transport_register_session().
 470	 */
 471	if (callback != NULL) {
 472		int rc = callback(tpg, sess, private);
 473		if (rc) {
 474			transport_free_session(sess);
 475			return ERR_PTR(rc);
 476		}
 477	}
 478
 479	transport_register_session(tpg, sess->se_node_acl, sess, private);
 480	return sess;
 481}
 482EXPORT_SYMBOL(target_setup_session);
 483
 484ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
 485{
 486	struct se_session *se_sess;
 487	ssize_t len = 0;
 488
 489	spin_lock_bh(&se_tpg->session_lock);
 490	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
 491		if (!se_sess->se_node_acl)
 492			continue;
 493		if (!se_sess->se_node_acl->dynamic_node_acl)
 494			continue;
 495		if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
 496			break;
 497
 498		len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
 499				se_sess->se_node_acl->initiatorname);
 500		len += 1; /* Include NULL terminator */
 501	}
 502	spin_unlock_bh(&se_tpg->session_lock);
 503
 504	return len;
 505}
 506EXPORT_SYMBOL(target_show_dynamic_sessions);
 507
 508static void target_complete_nacl(struct kref *kref)
 509{
 510	struct se_node_acl *nacl = container_of(kref,
 511				struct se_node_acl, acl_kref);
 512	struct se_portal_group *se_tpg = nacl->se_tpg;
 513
 514	if (!nacl->dynamic_stop) {
 515		complete(&nacl->acl_free_comp);
 516		return;
 517	}
 518
 519	mutex_lock(&se_tpg->acl_node_mutex);
 520	list_del_init(&nacl->acl_list);
 521	mutex_unlock(&se_tpg->acl_node_mutex);
 522
 523	core_tpg_wait_for_nacl_pr_ref(nacl);
 524	core_free_device_list_for_node(nacl, se_tpg);
 525	kfree(nacl);
 526}
 527
 528void target_put_nacl(struct se_node_acl *nacl)
 529{
 530	kref_put(&nacl->acl_kref, target_complete_nacl);
 531}
 532EXPORT_SYMBOL(target_put_nacl);
 533
 534void transport_deregister_session_configfs(struct se_session *se_sess)
 535{
 536	struct se_node_acl *se_nacl;
 537	unsigned long flags;
 538	/*
 539	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
 540	 */
 541	se_nacl = se_sess->se_node_acl;
 542	if (se_nacl) {
 543		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 544		if (!list_empty(&se_sess->sess_acl_list))
 545			list_del_init(&se_sess->sess_acl_list);
 546		/*
 547		 * If the session list is empty, then clear the pointer.
 548		 * Otherwise, set the struct se_session pointer from the tail
 549		 * element of the per struct se_node_acl active session list.
 550		 */
 551		if (list_empty(&se_nacl->acl_sess_list))
 552			se_nacl->nacl_sess = NULL;
 553		else {
 554			se_nacl->nacl_sess = container_of(
 555					se_nacl->acl_sess_list.prev,
 556					struct se_session, sess_acl_list);
 557		}
 558		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 559	}
 560}
 561EXPORT_SYMBOL(transport_deregister_session_configfs);
 562
 563void transport_free_session(struct se_session *se_sess)
 564{
 565	struct se_node_acl *se_nacl = se_sess->se_node_acl;
 566
 567	/*
 568	 * Drop the se_node_acl->nacl_kref obtained from within
 569	 * core_tpg_get_initiator_node_acl().
 570	 */
 571	if (se_nacl) {
 572		struct se_portal_group *se_tpg = se_nacl->se_tpg;
 573		const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
 574		unsigned long flags;
 575
 576		se_sess->se_node_acl = NULL;
 577
 578		/*
 579		 * Also determine if we need to drop the extra ->cmd_kref if
 580		 * it had been previously dynamically generated, and
 581		 * the endpoint is not caching dynamic ACLs.
 582		 */
 583		mutex_lock(&se_tpg->acl_node_mutex);
 584		if (se_nacl->dynamic_node_acl &&
 585		    !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
 586			spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 587			if (list_empty(&se_nacl->acl_sess_list))
 588				se_nacl->dynamic_stop = true;
 589			spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 590
 591			if (se_nacl->dynamic_stop)
 592				list_del_init(&se_nacl->acl_list);
 593		}
 594		mutex_unlock(&se_tpg->acl_node_mutex);
 595
 596		if (se_nacl->dynamic_stop)
 597			target_put_nacl(se_nacl);
 598
 599		target_put_nacl(se_nacl);
 600	}
 601	if (se_sess->sess_cmd_map) {
 602		sbitmap_queue_free(&se_sess->sess_tag_pool);
 603		kvfree(se_sess->sess_cmd_map);
 604	}
 605	transport_uninit_session(se_sess);
 606	kmem_cache_free(se_sess_cache, se_sess);
 607}
 608EXPORT_SYMBOL(transport_free_session);
 609
 610static int target_release_res(struct se_device *dev, void *data)
 611{
 612	struct se_session *sess = data;
 613
 614	if (dev->reservation_holder == sess)
 615		target_release_reservation(dev);
 616	return 0;
 617}
 618
 619void transport_deregister_session(struct se_session *se_sess)
 620{
 621	struct se_portal_group *se_tpg = se_sess->se_tpg;
 622	unsigned long flags;
 623
 624	if (!se_tpg) {
 625		transport_free_session(se_sess);
 626		return;
 627	}
 628
 629	spin_lock_irqsave(&se_tpg->session_lock, flags);
 630	list_del(&se_sess->sess_list);
 631	se_sess->se_tpg = NULL;
 632	se_sess->fabric_sess_ptr = NULL;
 633	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 634
 635	/*
 636	 * Since the session is being removed, release SPC-2
 637	 * reservations held by the session that is disappearing.
 638	 */
 639	target_for_each_device(target_release_res, se_sess);
 640
 641	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
 642		se_tpg->se_tpg_tfo->fabric_name);
 643	/*
 644	 * If last kref is dropping now for an explicit NodeACL, awake sleeping
 645	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
 646	 * removal context from within transport_free_session() code.
 647	 *
 648	 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
 649	 * to release all remaining generate_node_acl=1 created ACL resources.
 650	 */
 651
 652	transport_free_session(se_sess);
 653}
 654EXPORT_SYMBOL(transport_deregister_session);
 655
 656void target_remove_session(struct se_session *se_sess)
 657{
 658	transport_deregister_session_configfs(se_sess);
 659	transport_deregister_session(se_sess);
 660}
 661EXPORT_SYMBOL(target_remove_session);
 662
 663static void target_remove_from_state_list(struct se_cmd *cmd)
 664{
 665	struct se_device *dev = cmd->se_dev;
 666	unsigned long flags;
 667
 668	if (!dev)
 669		return;
 670
 671	spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
 672	if (cmd->state_active) {
 673		list_del(&cmd->state_list);
 674		cmd->state_active = false;
 675	}
 676	spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
 677}
 678
 679static void target_remove_from_tmr_list(struct se_cmd *cmd)
 680{
 681	struct se_device *dev = NULL;
 682	unsigned long flags;
 683
 684	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
 685		dev = cmd->se_tmr_req->tmr_dev;
 686
 687	if (dev) {
 688		spin_lock_irqsave(&dev->se_tmr_lock, flags);
 689		if (cmd->se_tmr_req->tmr_dev)
 690			list_del_init(&cmd->se_tmr_req->tmr_list);
 691		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
 692	}
 693}
 694/*
 695 * This function is called by the target core after the target core has
 696 * finished processing a SCSI command or SCSI TMF. Both the regular command
 697 * processing code and the code for aborting commands can call this
 698 * function. CMD_T_STOP is set if and only if another thread is waiting
 699 * inside transport_wait_for_tasks() for t_transport_stop_comp.
 700 */
 701static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
 702{
 703	unsigned long flags;
 704
 
 
 
 
 
 
 
 705	spin_lock_irqsave(&cmd->t_state_lock, flags);
 706	/*
 707	 * Determine if frontend context caller is requesting the stopping of
 708	 * this command for frontend exceptions.
 709	 */
 710	if (cmd->transport_state & CMD_T_STOP) {
 711		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
 712			__func__, __LINE__, cmd->tag);
 713
 714		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 715
 716		complete_all(&cmd->t_transport_stop_comp);
 717		return 1;
 718	}
 719	cmd->transport_state &= ~CMD_T_ACTIVE;
 720	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 721
 722	/*
 723	 * Some fabric modules like tcm_loop can release their internally
 724	 * allocated I/O reference and struct se_cmd now.
 725	 *
 726	 * Fabric modules are expected to return '1' here if the se_cmd being
 727	 * passed is released at this point, or zero if not being released.
 728	 */
 729	return cmd->se_tfo->check_stop_free(cmd);
 730}
 731
 732static void transport_lun_remove_cmd(struct se_cmd *cmd)
 733{
 734	struct se_lun *lun = cmd->se_lun;
 735
 736	if (!lun)
 737		return;
 738
 739	target_remove_from_state_list(cmd);
 740	target_remove_from_tmr_list(cmd);
 741
 742	if (cmpxchg(&cmd->lun_ref_active, true, false))
 743		percpu_ref_put(&lun->lun_ref);
 744
 745	/*
 746	 * Clear struct se_cmd->se_lun before the handoff to FE.
 747	 */
 748	cmd->se_lun = NULL;
 749}
 750
 751static void target_complete_failure_work(struct work_struct *work)
 752{
 753	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 754
 755	transport_generic_request_failure(cmd, cmd->sense_reason);
 
 756}
 757
 758/*
 759 * Used when asking transport to copy Sense Data from the underlying
 760 * Linux/SCSI struct scsi_cmnd
 761 */
 762static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
 763{
 764	struct se_device *dev = cmd->se_dev;
 765
 766	WARN_ON(!cmd->se_lun);
 767
 768	if (!dev)
 769		return NULL;
 770
 771	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
 772		return NULL;
 773
 774	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
 775
 776	pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
 777		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
 778	return cmd->sense_buffer;
 779}
 780
 781void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
 782{
 783	unsigned char *cmd_sense_buf;
 784	unsigned long flags;
 785
 786	spin_lock_irqsave(&cmd->t_state_lock, flags);
 787	cmd_sense_buf = transport_get_sense_buffer(cmd);
 788	if (!cmd_sense_buf) {
 789		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 790		return;
 791	}
 792
 793	cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
 794	memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
 795	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 796}
 797EXPORT_SYMBOL(transport_copy_sense_to_cmd);
 798
 799static void target_handle_abort(struct se_cmd *cmd)
 800{
 801	bool tas = cmd->transport_state & CMD_T_TAS;
 802	bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
 803	int ret;
 804
 805	pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
 806
 807	if (tas) {
 808		if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
 809			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 810			pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
 811				 cmd->t_task_cdb[0], cmd->tag);
 812			trace_target_cmd_complete(cmd);
 813			ret = cmd->se_tfo->queue_status(cmd);
 814			if (ret) {
 815				transport_handle_queue_full(cmd, cmd->se_dev,
 816							    ret, false);
 817				return;
 818			}
 819		} else {
 820			cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
 821			cmd->se_tfo->queue_tm_rsp(cmd);
 822		}
 823	} else {
 824		/*
 825		 * Allow the fabric driver to unmap any resources before
 826		 * releasing the descriptor via TFO->release_cmd().
 827		 */
 828		cmd->se_tfo->aborted_task(cmd);
 829		if (ack_kref)
 830			WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
 831		/*
 832		 * To do: establish a unit attention condition on the I_T
 833		 * nexus associated with cmd. See also the paragraph "Aborting
 834		 * commands" in SAM.
 835		 */
 836	}
 837
 838	WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
 839
 840	transport_lun_remove_cmd(cmd);
 841
 842	transport_cmd_check_stop_to_fabric(cmd);
 843}
 844
 845static void target_abort_work(struct work_struct *work)
 846{
 847	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 848
 849	target_handle_abort(cmd);
 850}
 851
 852static bool target_cmd_interrupted(struct se_cmd *cmd)
 853{
 854	int post_ret;
 855
 856	if (cmd->transport_state & CMD_T_ABORTED) {
 857		if (cmd->transport_complete_callback)
 858			cmd->transport_complete_callback(cmd, false, &post_ret);
 859		INIT_WORK(&cmd->work, target_abort_work);
 860		queue_work(target_completion_wq, &cmd->work);
 861		return true;
 862	} else if (cmd->transport_state & CMD_T_STOP) {
 863		if (cmd->transport_complete_callback)
 864			cmd->transport_complete_callback(cmd, false, &post_ret);
 865		complete_all(&cmd->t_transport_stop_comp);
 866		return true;
 867	}
 868
 869	return false;
 870}
 871
 872/* May be called from interrupt context so must not sleep. */
 873void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
 874				    sense_reason_t sense_reason)
 875{
 876	struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
 877	int success, cpu;
 878	unsigned long flags;
 879
 880	if (target_cmd_interrupted(cmd))
 881		return;
 882
 883	cmd->scsi_status = scsi_status;
 884	cmd->sense_reason = sense_reason;
 885
 886	spin_lock_irqsave(&cmd->t_state_lock, flags);
 887	switch (cmd->scsi_status) {
 888	case SAM_STAT_CHECK_CONDITION:
 889		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
 890			success = 1;
 891		else
 892			success = 0;
 893		break;
 894	default:
 895		success = 1;
 896		break;
 897	}
 898
 899	cmd->t_state = TRANSPORT_COMPLETE;
 900	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
 901	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 902
 903	INIT_WORK(&cmd->work, success ? target_complete_ok_work :
 904		  target_complete_failure_work);
 905
 906	if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
 907		cpu = cmd->cpuid;
 908	else
 909		cpu = wwn->cmd_compl_affinity;
 910
 911	queue_work_on(cpu, target_completion_wq, &cmd->work);
 912}
 913EXPORT_SYMBOL(target_complete_cmd_with_sense);
 914
 915void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 916{
 917	target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ?
 918			      TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE :
 919			      TCM_NO_SENSE);
 920}
 921EXPORT_SYMBOL(target_complete_cmd);
 922
 923void target_set_cmd_data_length(struct se_cmd *cmd, int length)
 924{
 925	if (length < cmd->data_length) {
 
 
 926		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
 927			cmd->residual_count += cmd->data_length - length;
 928		} else {
 929			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
 930			cmd->residual_count = cmd->data_length - length;
 931		}
 932
 933		cmd->data_length = length;
 934	}
 935}
 936EXPORT_SYMBOL(target_set_cmd_data_length);
 937
 938void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
 939{
 940	if (scsi_status == SAM_STAT_GOOD ||
 941	    cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
 942		target_set_cmd_data_length(cmd, length);
 943	}
 944
 945	target_complete_cmd(cmd, scsi_status);
 946}
 947EXPORT_SYMBOL(target_complete_cmd_with_length);
 948
 949static void target_add_to_state_list(struct se_cmd *cmd)
 950{
 951	struct se_device *dev = cmd->se_dev;
 952	unsigned long flags;
 953
 954	spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
 955	if (!cmd->state_active) {
 956		list_add_tail(&cmd->state_list,
 957			      &dev->queues[cmd->cpuid].state_list);
 958		cmd->state_active = true;
 959	}
 960	spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
 961}
 962
 963/*
 964 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
 965 */
 966static void transport_write_pending_qf(struct se_cmd *cmd);
 967static void transport_complete_qf(struct se_cmd *cmd);
 968
 969void target_qf_do_work(struct work_struct *work)
 970{
 971	struct se_device *dev = container_of(work, struct se_device,
 972					qf_work_queue);
 973	LIST_HEAD(qf_cmd_list);
 974	struct se_cmd *cmd, *cmd_tmp;
 975
 976	spin_lock_irq(&dev->qf_cmd_lock);
 977	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
 978	spin_unlock_irq(&dev->qf_cmd_lock);
 979
 980	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
 981		list_del(&cmd->se_qf_node);
 982		atomic_dec_mb(&dev->dev_qf_count);
 983
 984		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
 985			" context: %s\n", cmd->se_tfo->fabric_name, cmd,
 986			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
 987			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
 988			: "UNKNOWN");
 989
 990		if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
 991			transport_write_pending_qf(cmd);
 992		else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
 993			 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
 994			transport_complete_qf(cmd);
 995	}
 996}
 997
 998unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
 999{
1000	switch (cmd->data_direction) {
1001	case DMA_NONE:
1002		return "NONE";
1003	case DMA_FROM_DEVICE:
1004		return "READ";
1005	case DMA_TO_DEVICE:
1006		return "WRITE";
1007	case DMA_BIDIRECTIONAL:
1008		return "BIDI";
1009	default:
1010		break;
1011	}
1012
1013	return "UNKNOWN";
1014}
1015
1016void transport_dump_dev_state(
1017	struct se_device *dev,
1018	char *b,
1019	int *bl)
1020{
1021	*bl += sprintf(b + *bl, "Status: ");
1022	if (dev->export_count)
1023		*bl += sprintf(b + *bl, "ACTIVATED");
1024	else
1025		*bl += sprintf(b + *bl, "DEACTIVATED");
1026
1027	*bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
1028	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
1029		dev->dev_attrib.block_size,
1030		dev->dev_attrib.hw_max_sectors);
1031	*bl += sprintf(b + *bl, "        ");
1032}
1033
1034void transport_dump_vpd_proto_id(
1035	struct t10_vpd *vpd,
1036	unsigned char *p_buf,
1037	int p_buf_len)
1038{
1039	unsigned char buf[VPD_TMP_BUF_SIZE];
1040	int len;
1041
1042	memset(buf, 0, VPD_TMP_BUF_SIZE);
1043	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1044
1045	switch (vpd->protocol_identifier) {
1046	case 0x00:
1047		sprintf(buf+len, "Fibre Channel\n");
1048		break;
1049	case 0x10:
1050		sprintf(buf+len, "Parallel SCSI\n");
1051		break;
1052	case 0x20:
1053		sprintf(buf+len, "SSA\n");
1054		break;
1055	case 0x30:
1056		sprintf(buf+len, "IEEE 1394\n");
1057		break;
1058	case 0x40:
1059		sprintf(buf+len, "SCSI Remote Direct Memory Access"
1060				" Protocol\n");
1061		break;
1062	case 0x50:
1063		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1064		break;
1065	case 0x60:
1066		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1067		break;
1068	case 0x70:
1069		sprintf(buf+len, "Automation/Drive Interface Transport"
1070				" Protocol\n");
1071		break;
1072	case 0x80:
1073		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1074		break;
1075	default:
1076		sprintf(buf+len, "Unknown 0x%02x\n",
1077				vpd->protocol_identifier);
1078		break;
1079	}
1080
1081	if (p_buf)
1082		strncpy(p_buf, buf, p_buf_len);
1083	else
1084		pr_debug("%s", buf);
1085}
1086
1087void
1088transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1089{
1090	/*
1091	 * Check if the Protocol Identifier Valid (PIV) bit is set..
1092	 *
1093	 * from spc3r23.pdf section 7.5.1
1094	 */
1095	 if (page_83[1] & 0x80) {
1096		vpd->protocol_identifier = (page_83[0] & 0xf0);
1097		vpd->protocol_identifier_set = 1;
1098		transport_dump_vpd_proto_id(vpd, NULL, 0);
1099	}
1100}
1101EXPORT_SYMBOL(transport_set_vpd_proto_id);
1102
1103int transport_dump_vpd_assoc(
1104	struct t10_vpd *vpd,
1105	unsigned char *p_buf,
1106	int p_buf_len)
1107{
1108	unsigned char buf[VPD_TMP_BUF_SIZE];
1109	int ret = 0;
1110	int len;
1111
1112	memset(buf, 0, VPD_TMP_BUF_SIZE);
1113	len = sprintf(buf, "T10 VPD Identifier Association: ");
1114
1115	switch (vpd->association) {
1116	case 0x00:
1117		sprintf(buf+len, "addressed logical unit\n");
1118		break;
1119	case 0x10:
1120		sprintf(buf+len, "target port\n");
1121		break;
1122	case 0x20:
1123		sprintf(buf+len, "SCSI target device\n");
1124		break;
1125	default:
1126		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1127		ret = -EINVAL;
1128		break;
1129	}
1130
1131	if (p_buf)
1132		strncpy(p_buf, buf, p_buf_len);
1133	else
1134		pr_debug("%s", buf);
1135
1136	return ret;
1137}
1138
1139int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1140{
1141	/*
1142	 * The VPD identification association..
1143	 *
1144	 * from spc3r23.pdf Section 7.6.3.1 Table 297
1145	 */
1146	vpd->association = (page_83[1] & 0x30);
1147	return transport_dump_vpd_assoc(vpd, NULL, 0);
1148}
1149EXPORT_SYMBOL(transport_set_vpd_assoc);
1150
1151int transport_dump_vpd_ident_type(
1152	struct t10_vpd *vpd,
1153	unsigned char *p_buf,
1154	int p_buf_len)
1155{
1156	unsigned char buf[VPD_TMP_BUF_SIZE];
1157	int ret = 0;
1158	int len;
1159
1160	memset(buf, 0, VPD_TMP_BUF_SIZE);
1161	len = sprintf(buf, "T10 VPD Identifier Type: ");
1162
1163	switch (vpd->device_identifier_type) {
1164	case 0x00:
1165		sprintf(buf+len, "Vendor specific\n");
1166		break;
1167	case 0x01:
1168		sprintf(buf+len, "T10 Vendor ID based\n");
1169		break;
1170	case 0x02:
1171		sprintf(buf+len, "EUI-64 based\n");
1172		break;
1173	case 0x03:
1174		sprintf(buf+len, "NAA\n");
1175		break;
1176	case 0x04:
1177		sprintf(buf+len, "Relative target port identifier\n");
1178		break;
1179	case 0x08:
1180		sprintf(buf+len, "SCSI name string\n");
1181		break;
1182	default:
1183		sprintf(buf+len, "Unsupported: 0x%02x\n",
1184				vpd->device_identifier_type);
1185		ret = -EINVAL;
1186		break;
1187	}
1188
1189	if (p_buf) {
1190		if (p_buf_len < strlen(buf)+1)
1191			return -EINVAL;
1192		strncpy(p_buf, buf, p_buf_len);
1193	} else {
1194		pr_debug("%s", buf);
1195	}
1196
1197	return ret;
1198}
1199
1200int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1201{
1202	/*
1203	 * The VPD identifier type..
1204	 *
1205	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1206	 */
1207	vpd->device_identifier_type = (page_83[1] & 0x0f);
1208	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1209}
1210EXPORT_SYMBOL(transport_set_vpd_ident_type);
1211
1212int transport_dump_vpd_ident(
1213	struct t10_vpd *vpd,
1214	unsigned char *p_buf,
1215	int p_buf_len)
1216{
1217	unsigned char buf[VPD_TMP_BUF_SIZE];
1218	int ret = 0;
1219
1220	memset(buf, 0, VPD_TMP_BUF_SIZE);
1221
1222	switch (vpd->device_identifier_code_set) {
1223	case 0x01: /* Binary */
1224		snprintf(buf, sizeof(buf),
1225			"T10 VPD Binary Device Identifier: %s\n",
1226			&vpd->device_identifier[0]);
1227		break;
1228	case 0x02: /* ASCII */
1229		snprintf(buf, sizeof(buf),
1230			"T10 VPD ASCII Device Identifier: %s\n",
1231			&vpd->device_identifier[0]);
1232		break;
1233	case 0x03: /* UTF-8 */
1234		snprintf(buf, sizeof(buf),
1235			"T10 VPD UTF-8 Device Identifier: %s\n",
1236			&vpd->device_identifier[0]);
1237		break;
1238	default:
1239		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1240			" 0x%02x", vpd->device_identifier_code_set);
1241		ret = -EINVAL;
1242		break;
1243	}
1244
1245	if (p_buf)
1246		strncpy(p_buf, buf, p_buf_len);
1247	else
1248		pr_debug("%s", buf);
1249
1250	return ret;
1251}
1252
1253int
1254transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1255{
1256	static const char hex_str[] = "0123456789abcdef";
1257	int j = 0, i = 4; /* offset to start of the identifier */
1258
1259	/*
1260	 * The VPD Code Set (encoding)
1261	 *
1262	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1263	 */
1264	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1265	switch (vpd->device_identifier_code_set) {
1266	case 0x01: /* Binary */
1267		vpd->device_identifier[j++] =
1268				hex_str[vpd->device_identifier_type];
1269		while (i < (4 + page_83[3])) {
1270			vpd->device_identifier[j++] =
1271				hex_str[(page_83[i] & 0xf0) >> 4];
1272			vpd->device_identifier[j++] =
1273				hex_str[page_83[i] & 0x0f];
1274			i++;
1275		}
1276		break;
1277	case 0x02: /* ASCII */
1278	case 0x03: /* UTF-8 */
1279		while (i < (4 + page_83[3]))
1280			vpd->device_identifier[j++] = page_83[i++];
1281		break;
1282	default:
1283		break;
1284	}
1285
1286	return transport_dump_vpd_ident(vpd, NULL, 0);
1287}
1288EXPORT_SYMBOL(transport_set_vpd_ident);
1289
1290static sense_reason_t
1291target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1292			       unsigned int size)
1293{
1294	u32 mtl;
1295
1296	if (!cmd->se_tfo->max_data_sg_nents)
1297		return TCM_NO_SENSE;
1298	/*
1299	 * Check if fabric enforced maximum SGL entries per I/O descriptor
1300	 * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1301	 * residual_count and reduce original cmd->data_length to maximum
1302	 * length based on single PAGE_SIZE entry scatter-lists.
1303	 */
1304	mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1305	if (cmd->data_length > mtl) {
1306		/*
1307		 * If an existing CDB overflow is present, calculate new residual
1308		 * based on CDB size minus fabric maximum transfer length.
1309		 *
1310		 * If an existing CDB underflow is present, calculate new residual
1311		 * based on original cmd->data_length minus fabric maximum transfer
1312		 * length.
1313		 *
1314		 * Otherwise, set the underflow residual based on cmd->data_length
1315		 * minus fabric maximum transfer length.
1316		 */
1317		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1318			cmd->residual_count = (size - mtl);
1319		} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1320			u32 orig_dl = size + cmd->residual_count;
1321			cmd->residual_count = (orig_dl - mtl);
1322		} else {
1323			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1324			cmd->residual_count = (cmd->data_length - mtl);
1325		}
1326		cmd->data_length = mtl;
1327		/*
1328		 * Reset sbc_check_prot() calculated protection payload
1329		 * length based upon the new smaller MTL.
1330		 */
1331		if (cmd->prot_length) {
1332			u32 sectors = (mtl / dev->dev_attrib.block_size);
1333			cmd->prot_length = dev->prot_length * sectors;
1334		}
1335	}
1336	return TCM_NO_SENSE;
1337}
1338
1339/**
1340 * target_cmd_size_check - Check whether there will be a residual.
1341 * @cmd: SCSI command.
1342 * @size: Data buffer size derived from CDB. The data buffer size provided by
1343 *   the SCSI transport driver is available in @cmd->data_length.
1344 *
1345 * Compare the data buffer size from the CDB with the data buffer limit from the transport
1346 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1347 *
1348 * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
1349 *
1350 * Return: TCM_NO_SENSE
1351 */
1352sense_reason_t
1353target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1354{
1355	struct se_device *dev = cmd->se_dev;
1356
1357	if (cmd->unknown_data_length) {
1358		cmd->data_length = size;
1359	} else if (size != cmd->data_length) {
1360		pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1361			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
1362			" 0x%02x\n", cmd->se_tfo->fabric_name,
1363				cmd->data_length, size, cmd->t_task_cdb[0]);
1364		/*
1365		 * For READ command for the overflow case keep the existing
1366		 * fabric provided ->data_length. Otherwise for the underflow
1367		 * case, reset ->data_length to the smaller SCSI expected data
1368		 * transfer length.
1369		 */
1370		if (size > cmd->data_length) {
1371			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1372			cmd->residual_count = (size - cmd->data_length);
1373		} else {
1374			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1375			cmd->residual_count = (cmd->data_length - size);
1376			/*
1377			 * Do not truncate ->data_length for WRITE command to
1378			 * dump all payload
1379			 */
1380			if (cmd->data_direction == DMA_FROM_DEVICE) {
1381				cmd->data_length = size;
1382			}
1383		}
1384
1385		if (cmd->data_direction == DMA_TO_DEVICE) {
1386			if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1387				pr_err_ratelimited("Rejecting underflow/overflow"
1388						   " for WRITE data CDB\n");
1389				return TCM_INVALID_FIELD_IN_COMMAND_IU;
1390			}
1391			/*
1392			 * Some fabric drivers like iscsi-target still expect to
1393			 * always reject overflow writes.  Reject this case until
1394			 * full fabric driver level support for overflow writes
1395			 * is introduced tree-wide.
1396			 */
1397			if (size > cmd->data_length) {
1398				pr_err_ratelimited("Rejecting overflow for"
1399						   " WRITE control CDB\n");
1400				return TCM_INVALID_CDB_FIELD;
1401			}
1402		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1403	}
1404
1405	return target_check_max_data_sg_nents(cmd, dev, size);
1406
1407}
1408
1409/*
1410 * Used by fabric modules containing a local struct se_cmd within their
1411 * fabric dependent per I/O descriptor.
1412 *
1413 * Preserves the value of @cmd->tag.
1414 */
1415void __target_init_cmd(
1416	struct se_cmd *cmd,
1417	const struct target_core_fabric_ops *tfo,
1418	struct se_session *se_sess,
1419	u32 data_length,
1420	int data_direction,
1421	int task_attr,
1422	unsigned char *sense_buffer, u64 unpacked_lun)
1423{
1424	INIT_LIST_HEAD(&cmd->se_delayed_node);
1425	INIT_LIST_HEAD(&cmd->se_qf_node);
 
1426	INIT_LIST_HEAD(&cmd->state_list);
1427	init_completion(&cmd->t_transport_stop_comp);
1428	cmd->free_compl = NULL;
1429	cmd->abrt_compl = NULL;
1430	spin_lock_init(&cmd->t_state_lock);
1431	INIT_WORK(&cmd->work, NULL);
1432	kref_init(&cmd->cmd_kref);
1433
1434	cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1435	cmd->se_tfo = tfo;
1436	cmd->se_sess = se_sess;
1437	cmd->data_length = data_length;
1438	cmd->data_direction = data_direction;
1439	cmd->sam_task_attr = task_attr;
1440	cmd->sense_buffer = sense_buffer;
1441	cmd->orig_fe_lun = unpacked_lun;
1442
1443	if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
1444		cmd->cpuid = raw_smp_processor_id();
1445
1446	cmd->state_active = false;
1447}
1448EXPORT_SYMBOL(__target_init_cmd);
1449
1450static sense_reason_t
1451transport_check_alloc_task_attr(struct se_cmd *cmd)
1452{
1453	struct se_device *dev = cmd->se_dev;
1454
1455	/*
1456	 * Check if SAM Task Attribute emulation is enabled for this
1457	 * struct se_device storage object
1458	 */
1459	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1460		return 0;
1461
1462	if (cmd->sam_task_attr == TCM_ACA_TAG) {
1463		pr_debug("SAM Task Attribute ACA"
1464			" emulation is not supported\n");
1465		return TCM_INVALID_CDB_FIELD;
1466	}
1467
1468	return 0;
1469}
1470
1471sense_reason_t
1472target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
1473{
1474	sense_reason_t ret;
1475
 
1476	/*
1477	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1478	 * for VARIABLE_LENGTH_CMD
1479	 */
1480	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1481		pr_err("Received SCSI CDB with command_size: %d that"
1482			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1483			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1484		ret = TCM_INVALID_CDB_FIELD;
1485		goto err;
1486	}
1487	/*
1488	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1489	 * allocate the additional extended CDB buffer now..  Otherwise
1490	 * setup the pointer from __t_task_cdb to t_task_cdb.
1491	 */
1492	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1493		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
 
1494		if (!cmd->t_task_cdb) {
1495			pr_err("Unable to allocate cmd->t_task_cdb"
1496				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1497				scsi_command_size(cdb),
1498				(unsigned long)sizeof(cmd->__t_task_cdb));
1499			ret = TCM_OUT_OF_RESOURCES;
1500			goto err;
1501		}
1502	}
1503	/*
1504	 * Copy the original CDB into cmd->
1505	 */
1506	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1507
1508	trace_target_sequencer_start(cmd);
1509	return 0;
1510
1511err:
1512	/*
1513	 * Copy the CDB here to allow trace_target_cmd_complete() to
1514	 * print the cdb to the trace buffers.
1515	 */
1516	memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1517					 (unsigned int)TCM_MAX_COMMAND_SIZE));
1518	return ret;
1519}
1520EXPORT_SYMBOL(target_cmd_init_cdb);
1521
1522sense_reason_t
1523target_cmd_parse_cdb(struct se_cmd *cmd)
1524{
1525	struct se_device *dev = cmd->se_dev;
1526	sense_reason_t ret;
1527
1528	ret = dev->transport->parse_cdb(cmd);
1529	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1530		pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1531				     cmd->se_tfo->fabric_name,
1532				     cmd->se_sess->se_node_acl->initiatorname,
1533				     cmd->t_task_cdb[0]);
1534	if (ret)
1535		return ret;
1536
1537	ret = transport_check_alloc_task_attr(cmd);
1538	if (ret)
1539		return ret;
1540
1541	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1542	atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1543	return 0;
1544}
1545EXPORT_SYMBOL(target_cmd_parse_cdb);
1546
1547/*
1548 * Used by fabric module frontends to queue tasks directly.
1549 * May only be used from process context.
1550 */
1551int transport_handle_cdb_direct(
1552	struct se_cmd *cmd)
1553{
1554	sense_reason_t ret;
1555
1556	might_sleep();
1557
1558	if (!cmd->se_lun) {
1559		dump_stack();
1560		pr_err("cmd->se_lun is NULL\n");
1561		return -EINVAL;
1562	}
1563
 
 
 
 
 
1564	/*
1565	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1566	 * outstanding descriptors are handled correctly during shutdown via
1567	 * transport_wait_for_tasks()
1568	 *
1569	 * Also, we don't take cmd->t_state_lock here as we only expect
1570	 * this to be called for initial descriptor submission.
1571	 */
1572	cmd->t_state = TRANSPORT_NEW_CMD;
1573	cmd->transport_state |= CMD_T_ACTIVE;
1574
1575	/*
1576	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1577	 * so follow TRANSPORT_NEW_CMD processing thread context usage
1578	 * and call transport_generic_request_failure() if necessary..
1579	 */
1580	ret = transport_generic_new_cmd(cmd);
1581	if (ret)
1582		transport_generic_request_failure(cmd, ret);
1583	return 0;
1584}
1585EXPORT_SYMBOL(transport_handle_cdb_direct);
1586
1587sense_reason_t
1588transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1589		u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1590{
1591	if (!sgl || !sgl_count)
1592		return 0;
1593
1594	/*
1595	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1596	 * scatterlists already have been set to follow what the fabric
1597	 * passes for the original expected data transfer length.
1598	 */
1599	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1600		pr_warn("Rejecting SCSI DATA overflow for fabric using"
1601			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1602		return TCM_INVALID_CDB_FIELD;
1603	}
1604
1605	cmd->t_data_sg = sgl;
1606	cmd->t_data_nents = sgl_count;
1607	cmd->t_bidi_data_sg = sgl_bidi;
1608	cmd->t_bidi_data_nents = sgl_bidi_count;
1609
1610	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1611	return 0;
1612}
1613
1614/**
1615 * target_init_cmd - initialize se_cmd
1616 * @se_cmd: command descriptor to init
 
 
1617 * @se_sess: associated se_sess for endpoint
 
1618 * @sense: pointer to SCSI sense buffer
1619 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1620 * @data_length: fabric expected data transfer length
1621 * @task_attr: SAM task attribute
1622 * @data_dir: DMA data direction
1623 * @flags: flags for command submission from target_sc_flags_tables
 
 
 
 
 
 
1624 *
1625 * Task tags are supported if the caller has set @se_cmd->tag.
1626 *
1627 * Returns:
1628 *	- less than zero to signal active I/O shutdown failure.
1629 *	- zero on success.
1630 *
1631 * If the fabric driver calls target_stop_session, then it must check the
1632 * return code and handle failures. This will never fail for other drivers,
1633 * and the return code can be ignored.
1634 */
1635int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1636		    unsigned char *sense, u64 unpacked_lun,
1637		    u32 data_length, int task_attr, int data_dir, int flags)
 
 
1638{
1639	struct se_portal_group *se_tpg;
 
 
1640
1641	se_tpg = se_sess->se_tpg;
1642	BUG_ON(!se_tpg);
1643	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
 
 
 
 
 
 
 
 
 
1644
1645	if (flags & TARGET_SCF_USE_CPUID)
1646		se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1647	/*
1648	 * Signal bidirectional data payloads to target-core
1649	 */
1650	if (flags & TARGET_SCF_BIDI_OP)
1651		se_cmd->se_cmd_flags |= SCF_BIDI;
1652
1653	if (flags & TARGET_SCF_UNKNOWN_SIZE)
1654		se_cmd->unknown_data_length = 1;
1655	/*
1656	 * Initialize se_cmd for target operation.  From this point
1657	 * exceptions are handled by sending exception status via
1658	 * target_core_fabric_ops->queue_status() callback
 
1659	 */
1660	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
1661			  data_dir, task_attr, sense, unpacked_lun);
1662
1663	/*
1664	 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
1665	 * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
1666	 * kref_put() to happen during fabric packet acknowledgement.
1667	 */
1668	return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1669}
1670EXPORT_SYMBOL_GPL(target_init_cmd);
1671
1672/**
1673 * target_submit_prep - prepare cmd for submission
1674 * @se_cmd: command descriptor to prep
1675 * @cdb: pointer to SCSI CDB
1676 * @sgl: struct scatterlist memory for unidirectional mapping
1677 * @sgl_count: scatterlist count for unidirectional mapping
1678 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1679 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1680 * @sgl_prot: struct scatterlist memory protection information
1681 * @sgl_prot_count: scatterlist count for protection information
1682 * @gfp: gfp allocation type
1683 *
1684 * Returns:
1685 *	- less than zero to signal failure.
1686 *	- zero on success.
1687 *
1688 * If failure is returned, lio will the callers queue_status to complete
1689 * the cmd.
1690 */
1691int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
1692		       struct scatterlist *sgl, u32 sgl_count,
1693		       struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1694		       struct scatterlist *sgl_prot, u32 sgl_prot_count,
1695		       gfp_t gfp)
1696{
1697	sense_reason_t rc;
1698
1699	rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
1700	if (rc)
1701		goto send_cc_direct;
 
 
 
1702
1703	/*
1704	 * Locate se_lun pointer and attach it to struct se_cmd
1705	 */
1706	rc = transport_lookup_cmd_lun(se_cmd);
1707	if (rc)
1708		goto send_cc_direct;
 
 
 
1709
1710	rc = target_cmd_parse_cdb(se_cmd);
1711	if (rc != 0)
1712		goto generic_fail;
 
 
1713
1714	/*
1715	 * Save pointers for SGLs containing protection information,
1716	 * if present.
1717	 */
1718	if (sgl_prot_count) {
1719		se_cmd->t_prot_sg = sgl_prot;
1720		se_cmd->t_prot_nents = sgl_prot_count;
1721		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1722	}
1723
1724	/*
1725	 * When a non zero sgl_count has been passed perform SGL passthrough
1726	 * mapping for pre-allocated fabric memory instead of having target
1727	 * core perform an internal SGL allocation..
1728	 */
1729	if (sgl_count != 0) {
1730		BUG_ON(!sgl);
1731
1732		rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1733				sgl_bidi, sgl_bidi_count);
1734		if (rc != 0)
1735			goto generic_fail;
1736	}
1737
1738	return 0;
1739
1740send_cc_direct:
1741	transport_send_check_condition_and_sense(se_cmd, rc, 0);
1742	target_put_sess_cmd(se_cmd);
1743	return -EIO;
1744
1745generic_fail:
1746	transport_generic_request_failure(se_cmd, rc);
1747	return -EIO;
1748}
1749EXPORT_SYMBOL_GPL(target_submit_prep);
1750
1751/**
1752 * target_submit - perform final initialization and submit cmd to LIO core
1753 * @se_cmd: command descriptor to submit
1754 *
1755 * target_submit_prep must have been called on the cmd, and this must be
1756 * called from process context.
1757 */
1758void target_submit(struct se_cmd *se_cmd)
1759{
1760	struct scatterlist *sgl = se_cmd->t_data_sg;
1761	unsigned char *buf = NULL;
1762
1763	might_sleep();
1764
1765	if (se_cmd->t_data_nents != 0) {
1766		BUG_ON(!sgl);
1767		/*
1768		 * A work-around for tcm_loop as some userspace code via
1769		 * scsi-generic do not memset their associated read buffers,
1770		 * so go ahead and do that here for type non-data CDBs.  Also
1771		 * note that this is currently guaranteed to be a single SGL
1772		 * for this case by target core in target_setup_cmd_from_cdb()
1773		 * -> transport_generic_cmd_sequencer().
1774		 */
1775		if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1776		     se_cmd->data_direction == DMA_FROM_DEVICE) {
 
 
1777			if (sgl)
1778				buf = kmap(sg_page(sgl)) + sgl->offset;
1779
1780			if (buf) {
1781				memset(buf, 0, sgl->length);
1782				kunmap(sg_page(sgl));
1783			}
1784		}
1785
 
 
 
 
 
 
1786	}
1787
1788	/*
1789	 * Check if we need to delay processing because of ALUA
1790	 * Active/NonOptimized primary access state..
1791	 */
1792	core_alua_check_nonop_delay(se_cmd);
1793
1794	transport_handle_cdb_direct(se_cmd);
 
1795}
1796EXPORT_SYMBOL_GPL(target_submit);
1797
1798/**
1799 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1800 *
1801 * @se_cmd: command descriptor to submit
1802 * @se_sess: associated se_sess for endpoint
1803 * @cdb: pointer to SCSI CDB
1804 * @sense: pointer to SCSI sense buffer
1805 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1806 * @data_length: fabric expected data transfer length
1807 * @task_attr: SAM task attribute
1808 * @data_dir: DMA data direction
1809 * @flags: flags for command submission from target_sc_flags_tables
1810 *
1811 * Task tags are supported if the caller has set @se_cmd->tag.
1812 *
 
 
 
 
1813 * This may only be called from process context, and also currently
1814 * assumes internal allocation of fabric payload buffer by target-core.
1815 *
1816 * It also assumes interal target core SGL memory allocation.
1817 *
1818 * This function must only be used by drivers that do their own
1819 * sync during shutdown and does not use target_stop_session. If there
1820 * is a failure this function will call into the fabric driver's
1821 * queue_status with a CHECK_CONDITION.
1822 */
1823void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1824		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1825		u32 data_length, int task_attr, int data_dir, int flags)
1826{
1827	int rc;
1828
1829	rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
1830			     task_attr, data_dir, flags);
1831	WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
1832	if (rc)
1833		return;
1834
1835	if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
1836			       GFP_KERNEL))
1837		return;
1838
1839	target_submit(se_cmd);
1840}
1841EXPORT_SYMBOL(target_submit_cmd);
1842
1843
1844static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
1845{
1846	struct se_dev_plug *se_plug;
1847
1848	if (!se_dev->transport->plug_device)
1849		return NULL;
1850
1851	se_plug = se_dev->transport->plug_device(se_dev);
1852	if (!se_plug)
1853		return NULL;
1854
1855	se_plug->se_dev = se_dev;
1856	/*
1857	 * We have a ref to the lun at this point, but the cmds could
1858	 * complete before we unplug, so grab a ref to the se_device so we
1859	 * can call back into the backend.
1860	 */
1861	config_group_get(&se_dev->dev_group);
1862	return se_plug;
1863}
1864
1865static void target_unplug_device(struct se_dev_plug *se_plug)
1866{
1867	struct se_device *se_dev = se_plug->se_dev;
1868
1869	se_dev->transport->unplug_device(se_plug);
1870	config_group_put(&se_dev->dev_group);
1871}
1872
1873void target_queued_submit_work(struct work_struct *work)
 
1874{
1875	struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
1876	struct se_cmd *se_cmd, *next_cmd;
1877	struct se_dev_plug *se_plug = NULL;
1878	struct se_device *se_dev = NULL;
1879	struct llist_node *cmd_list;
1880
1881	cmd_list = llist_del_all(&sq->cmd_list);
1882	if (!cmd_list)
1883		/* Previous call took what we were queued to submit */
1884		return;
1885
1886	cmd_list = llist_reverse_order(cmd_list);
1887	llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
1888		if (!se_dev) {
1889			se_dev = se_cmd->se_dev;
1890			se_plug = target_plug_device(se_dev);
1891		}
1892
1893		target_submit(se_cmd);
1894	}
 
1895
1896	if (se_plug)
1897		target_unplug_device(se_plug);
1898}
1899
1900/**
1901 * target_queue_submission - queue the cmd to run on the LIO workqueue
1902 * @se_cmd: command descriptor to submit
1903 */
1904void target_queue_submission(struct se_cmd *se_cmd)
1905{
1906	struct se_device *se_dev = se_cmd->se_dev;
1907	int cpu = se_cmd->cpuid;
1908	struct se_cmd_queue *sq;
1909
1910	sq = &se_dev->queues[cpu].sq;
1911	llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
1912	queue_work_on(cpu, target_submission_wq, &sq->work);
1913}
1914EXPORT_SYMBOL_GPL(target_queue_submission);
1915
1916static void target_complete_tmr_failure(struct work_struct *work)
1917{
1918	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1919
1920	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1921	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1922
1923	transport_lun_remove_cmd(se_cmd);
1924	transport_cmd_check_stop_to_fabric(se_cmd);
1925}
1926
1927/**
1928 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1929 *                     for TMR CDBs
1930 *
1931 * @se_cmd: command descriptor to submit
1932 * @se_sess: associated se_sess for endpoint
1933 * @sense: pointer to SCSI sense buffer
1934 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1935 * @fabric_tmr_ptr: fabric context for TMR req
1936 * @tm_type: Type of TM request
1937 * @gfp: gfp type for caller
1938 * @tag: referenced task tag for TMR_ABORT_TASK
1939 * @flags: submit cmd flags
1940 *
1941 * Callable from all contexts.
1942 **/
1943
1944int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1945		unsigned char *sense, u64 unpacked_lun,
1946		void *fabric_tmr_ptr, unsigned char tm_type,
1947		gfp_t gfp, u64 tag, int flags)
1948{
1949	struct se_portal_group *se_tpg;
1950	int ret;
1951
1952	se_tpg = se_sess->se_tpg;
1953	BUG_ON(!se_tpg);
1954
1955	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1956			  0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
1957	/*
1958	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1959	 * allocation failure.
1960	 */
1961	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1962	if (ret < 0)
1963		return -ENOMEM;
1964
1965	if (tm_type == TMR_ABORT_TASK)
1966		se_cmd->se_tmr_req->ref_task_tag = tag;
1967
1968	/* See target_submit_cmd for commentary */
1969	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1970	if (ret) {
1971		core_tmr_release_req(se_cmd->se_tmr_req);
1972		return ret;
1973	}
 
 
 
 
 
 
 
 
 
 
1974
1975	ret = transport_lookup_tmr_lun(se_cmd);
1976	if (ret)
1977		goto failure;
1978
1979	transport_generic_handle_tmr(se_cmd);
1980	return 0;
1981
1982	/*
1983	 * For callback during failure handling, push this work off
1984	 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1985	 */
1986failure:
1987	INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1988	schedule_work(&se_cmd->work);
1989	return 0;
1990}
1991EXPORT_SYMBOL(target_submit_tmr);
1992
1993/*
1994 * Handle SAM-esque emulation for generic transport request failures.
1995 */
1996void transport_generic_request_failure(struct se_cmd *cmd,
1997		sense_reason_t sense_reason)
1998{
1999	int ret = 0, post_ret;
2000
2001	pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
2002		 sense_reason);
2003	target_show_cmd("-----[ ", cmd);
2004
2005	/*
2006	 * For SAM Task Attribute emulation for failed struct se_cmd
2007	 */
2008	transport_complete_task_attr(cmd);
2009
2010	if (cmd->transport_complete_callback)
2011		cmd->transport_complete_callback(cmd, false, &post_ret);
2012
2013	if (cmd->transport_state & CMD_T_ABORTED) {
2014		INIT_WORK(&cmd->work, target_abort_work);
2015		queue_work(target_completion_wq, &cmd->work);
2016		return;
2017	}
2018
2019	switch (sense_reason) {
2020	case TCM_NON_EXISTENT_LUN:
2021	case TCM_UNSUPPORTED_SCSI_OPCODE:
2022	case TCM_INVALID_CDB_FIELD:
2023	case TCM_INVALID_PARAMETER_LIST:
2024	case TCM_PARAMETER_LIST_LENGTH_ERROR:
2025	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2026	case TCM_UNKNOWN_MODE_PAGE:
2027	case TCM_WRITE_PROTECTED:
2028	case TCM_ADDRESS_OUT_OF_RANGE:
2029	case TCM_CHECK_CONDITION_ABORT_CMD:
2030	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
 
2031	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2032	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2033	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2034	case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
2035	case TCM_TOO_MANY_TARGET_DESCS:
2036	case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
2037	case TCM_TOO_MANY_SEGMENT_DESCS:
2038	case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
2039	case TCM_INVALID_FIELD_IN_COMMAND_IU:
2040	case TCM_ALUA_TG_PT_STANDBY:
2041	case TCM_ALUA_TG_PT_UNAVAILABLE:
2042	case TCM_ALUA_STATE_TRANSITION:
2043	case TCM_ALUA_OFFLINE:
2044		break;
2045	case TCM_OUT_OF_RESOURCES:
2046		cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
2047		goto queue_status;
2048	case TCM_LUN_BUSY:
2049		cmd->scsi_status = SAM_STAT_BUSY;
2050		goto queue_status;
2051	case TCM_RESERVATION_CONFLICT:
2052		/*
2053		 * No SENSE Data payload for this case, set SCSI Status
2054		 * and queue the response to $FABRIC_MOD.
2055		 *
2056		 * Uses linux/include/scsi/scsi.h SAM status codes defs
2057		 */
2058		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2059		/*
2060		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2061		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2062		 * CONFLICT STATUS.
2063		 *
2064		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2065		 */
2066		if (cmd->se_sess &&
2067		    cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
2068					== TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
2069			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
2070					       cmd->orig_fe_lun, 0x2C,
2071					ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2072		}
2073
2074		goto queue_status;
2075	default:
2076		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2077			cmd->t_task_cdb[0], sense_reason);
2078		sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2079		break;
2080	}
2081
2082	ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
2083	if (ret)
2084		goto queue_full;
2085
2086check_stop:
2087	transport_lun_remove_cmd(cmd);
2088	transport_cmd_check_stop_to_fabric(cmd);
2089	return;
2090
2091queue_status:
2092	trace_target_cmd_complete(cmd);
2093	ret = cmd->se_tfo->queue_status(cmd);
2094	if (!ret)
2095		goto check_stop;
2096queue_full:
2097	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2098}
2099EXPORT_SYMBOL(transport_generic_request_failure);
2100
2101void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
2102{
2103	sense_reason_t ret;
2104
2105	if (!cmd->execute_cmd) {
2106		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2107		goto err;
2108	}
2109	if (do_checks) {
2110		/*
2111		 * Check for an existing UNIT ATTENTION condition after
2112		 * target_handle_task_attr() has done SAM task attr
2113		 * checking, and possibly have already defered execution
2114		 * out to target_restart_delayed_cmds() context.
2115		 */
2116		ret = target_scsi3_ua_check(cmd);
2117		if (ret)
2118			goto err;
2119
2120		ret = target_alua_state_check(cmd);
2121		if (ret)
2122			goto err;
2123
2124		ret = target_check_reservation(cmd);
2125		if (ret) {
2126			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2127			goto err;
2128		}
2129	}
2130
2131	ret = cmd->execute_cmd(cmd);
2132	if (!ret)
2133		return;
2134err:
2135	spin_lock_irq(&cmd->t_state_lock);
2136	cmd->transport_state &= ~CMD_T_SENT;
2137	spin_unlock_irq(&cmd->t_state_lock);
2138
2139	transport_generic_request_failure(cmd, ret);
2140}
2141
2142static int target_write_prot_action(struct se_cmd *cmd)
2143{
2144	u32 sectors;
2145	/*
2146	 * Perform WRITE_INSERT of PI using software emulation when backend
2147	 * device has PI enabled, if the transport has not already generated
2148	 * PI using hardware WRITE_INSERT offload.
2149	 */
2150	switch (cmd->prot_op) {
2151	case TARGET_PROT_DOUT_INSERT:
2152		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
2153			sbc_dif_generate(cmd);
2154		break;
2155	case TARGET_PROT_DOUT_STRIP:
2156		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
2157			break;
2158
2159		sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
2160		cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2161					     sectors, 0, cmd->t_prot_sg, 0);
2162		if (unlikely(cmd->pi_err)) {
2163			spin_lock_irq(&cmd->t_state_lock);
2164			cmd->transport_state &= ~CMD_T_SENT;
2165			spin_unlock_irq(&cmd->t_state_lock);
2166			transport_generic_request_failure(cmd, cmd->pi_err);
2167			return -1;
2168		}
2169		break;
2170	default:
2171		break;
2172	}
2173
2174	return 0;
2175}
2176
2177static bool target_handle_task_attr(struct se_cmd *cmd)
2178{
2179	struct se_device *dev = cmd->se_dev;
2180
2181	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2182		return false;
2183
2184	cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2185
2186	/*
2187	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2188	 * to allow the passed struct se_cmd list of tasks to the front of the list.
2189	 */
2190	switch (cmd->sam_task_attr) {
2191	case TCM_HEAD_TAG:
2192		atomic_inc_mb(&dev->non_ordered);
2193		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2194			 cmd->t_task_cdb[0]);
2195		return false;
2196	case TCM_ORDERED_TAG:
2197		atomic_inc_mb(&dev->delayed_cmd_count);
2198
2199		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2200			 cmd->t_task_cdb[0]);
2201		break;
2202	default:
2203		/*
2204		 * For SIMPLE and UNTAGGED Task Attribute commands
 
2205		 */
2206		atomic_inc_mb(&dev->non_ordered);
2207
2208		if (atomic_read(&dev->delayed_cmd_count) == 0)
2209			return false;
2210		break;
2211	}
2212
2213	if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
2214		atomic_inc_mb(&dev->delayed_cmd_count);
2215		/*
2216		 * We will account for this when we dequeue from the delayed
2217		 * list.
2218		 */
2219		atomic_dec_mb(&dev->non_ordered);
 
2220	}
2221
2222	spin_lock_irq(&cmd->t_state_lock);
2223	cmd->transport_state &= ~CMD_T_SENT;
2224	spin_unlock_irq(&cmd->t_state_lock);
2225
2226	spin_lock(&dev->delayed_cmd_lock);
2227	list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2228	spin_unlock(&dev->delayed_cmd_lock);
2229
2230	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2231		cmd->t_task_cdb[0], cmd->sam_task_attr);
2232	/*
2233	 * We may have no non ordered cmds when this function started or we
2234	 * could have raced with the last simple/head cmd completing, so kick
2235	 * the delayed handler here.
2236	 */
2237	schedule_work(&dev->delayed_cmd_work);
2238	return true;
2239}
2240
2241void target_execute_cmd(struct se_cmd *cmd)
2242{
2243	/*
2244	 * Determine if frontend context caller is requesting the stopping of
2245	 * this command for frontend exceptions.
2246	 *
2247	 * If the received CDB has already been aborted stop processing it here.
2248	 */
2249	if (target_cmd_interrupted(cmd))
2250		return;
2251
2252	spin_lock_irq(&cmd->t_state_lock);
2253	cmd->t_state = TRANSPORT_PROCESSING;
2254	cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2255	spin_unlock_irq(&cmd->t_state_lock);
2256
2257	if (target_write_prot_action(cmd))
2258		return;
2259
2260	if (target_handle_task_attr(cmd))
 
 
 
2261		return;
 
2262
2263	__target_execute_cmd(cmd, true);
2264}
2265EXPORT_SYMBOL(target_execute_cmd);
2266
2267/*
2268 * Process all commands up to the last received ORDERED task attribute which
2269 * requires another blocking boundary
2270 */
2271void target_do_delayed_work(struct work_struct *work)
2272{
2273	struct se_device *dev = container_of(work, struct se_device,
2274					     delayed_cmd_work);
2275
2276	spin_lock(&dev->delayed_cmd_lock);
2277	while (!dev->ordered_sync_in_progress) {
2278		struct se_cmd *cmd;
2279
2280		if (list_empty(&dev->delayed_cmd_list))
 
 
2281			break;
 
2282
2283		cmd = list_entry(dev->delayed_cmd_list.next,
2284				 struct se_cmd, se_delayed_node);
2285
2286		if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2287			/*
2288			 * Check if we started with:
2289			 * [ordered] [simple] [ordered]
2290			 * and we are now at the last ordered so we have to wait
2291			 * for the simple cmd.
2292			 */
2293			if (atomic_read(&dev->non_ordered) > 0)
2294				break;
2295
2296			dev->ordered_sync_in_progress = true;
2297		}
2298
2299		list_del(&cmd->se_delayed_node);
2300		atomic_dec_mb(&dev->delayed_cmd_count);
2301		spin_unlock(&dev->delayed_cmd_lock);
2302
2303		if (cmd->sam_task_attr != TCM_ORDERED_TAG)
2304			atomic_inc_mb(&dev->non_ordered);
2305
2306		cmd->transport_state |= CMD_T_SENT;
2307
2308		__target_execute_cmd(cmd, true);
2309
2310		spin_lock(&dev->delayed_cmd_lock);
 
2311	}
2312	spin_unlock(&dev->delayed_cmd_lock);
2313}
2314
2315/*
2316 * Called from I/O completion to determine which dormant/delayed
2317 * and ordered cmds need to have their tasks added to the execution queue.
2318 */
2319static void transport_complete_task_attr(struct se_cmd *cmd)
2320{
2321	struct se_device *dev = cmd->se_dev;
2322
2323	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2324		return;
2325
2326	if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2327		goto restart;
2328
2329	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2330		atomic_dec_mb(&dev->non_ordered);
2331		dev->dev_cur_ordered_id++;
2332	} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2333		atomic_dec_mb(&dev->non_ordered);
2334		dev->dev_cur_ordered_id++;
2335		pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2336			 dev->dev_cur_ordered_id);
2337	} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2338		spin_lock(&dev->delayed_cmd_lock);
2339		dev->ordered_sync_in_progress = false;
2340		spin_unlock(&dev->delayed_cmd_lock);
2341
2342		dev->dev_cur_ordered_id++;
2343		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2344			 dev->dev_cur_ordered_id);
2345	}
2346	cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2347
2348restart:
2349	if (atomic_read(&dev->delayed_cmd_count) > 0)
2350		schedule_work(&dev->delayed_cmd_work);
2351}
2352
2353static void transport_complete_qf(struct se_cmd *cmd)
2354{
2355	int ret = 0;
2356
2357	transport_complete_task_attr(cmd);
2358	/*
2359	 * If a fabric driver ->write_pending() or ->queue_data_in() callback
2360	 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2361	 * the same callbacks should not be retried.  Return CHECK_CONDITION
2362	 * if a scsi_status is not already set.
2363	 *
2364	 * If a fabric driver ->queue_status() has returned non zero, always
2365	 * keep retrying no matter what..
2366	 */
2367	if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2368		if (cmd->scsi_status)
2369			goto queue_status;
2370
2371		translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2372		goto queue_status;
2373	}
2374
2375	/*
2376	 * Check if we need to send a sense buffer from
2377	 * the struct se_cmd in question. We do NOT want
2378	 * to take this path of the IO has been marked as
2379	 * needing to be treated like a "normal read". This
2380	 * is the case if it's a tape read, and either the
2381	 * FM, EOM, or ILI bits are set, but there is no
2382	 * sense data.
2383	 */
2384	if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2385	    cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2386		goto queue_status;
2387
2388	switch (cmd->data_direction) {
2389	case DMA_FROM_DEVICE:
2390		/* queue status if not treating this as a normal read */
2391		if (cmd->scsi_status &&
2392		    !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2393			goto queue_status;
2394
2395		trace_target_cmd_complete(cmd);
2396		ret = cmd->se_tfo->queue_data_in(cmd);
2397		break;
2398	case DMA_TO_DEVICE:
2399		if (cmd->se_cmd_flags & SCF_BIDI) {
2400			ret = cmd->se_tfo->queue_data_in(cmd);
2401			break;
2402		}
2403		fallthrough;
2404	case DMA_NONE:
2405queue_status:
2406		trace_target_cmd_complete(cmd);
2407		ret = cmd->se_tfo->queue_status(cmd);
2408		break;
2409	default:
2410		break;
2411	}
2412
2413	if (ret < 0) {
2414		transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2415		return;
2416	}
2417	transport_lun_remove_cmd(cmd);
2418	transport_cmd_check_stop_to_fabric(cmd);
2419}
2420
2421static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2422					int err, bool write_pending)
2423{
2424	/*
2425	 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2426	 * ->queue_data_in() callbacks from new process context.
2427	 *
2428	 * Otherwise for other errors, transport_complete_qf() will send
2429	 * CHECK_CONDITION via ->queue_status() instead of attempting to
2430	 * retry associated fabric driver data-transfer callbacks.
2431	 */
2432	if (err == -EAGAIN || err == -ENOMEM) {
2433		cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2434						 TRANSPORT_COMPLETE_QF_OK;
2435	} else {
2436		pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2437		cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2438	}
2439
2440	spin_lock_irq(&dev->qf_cmd_lock);
2441	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2442	atomic_inc_mb(&dev->dev_qf_count);
2443	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2444
2445	schedule_work(&cmd->se_dev->qf_work_queue);
2446}
2447
2448static bool target_read_prot_action(struct se_cmd *cmd)
2449{
2450	switch (cmd->prot_op) {
2451	case TARGET_PROT_DIN_STRIP:
2452		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2453			u32 sectors = cmd->data_length >>
2454				  ilog2(cmd->se_dev->dev_attrib.block_size);
2455
2456			cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2457						     sectors, 0, cmd->t_prot_sg,
2458						     0);
2459			if (cmd->pi_err)
2460				return true;
2461		}
2462		break;
2463	case TARGET_PROT_DIN_INSERT:
2464		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2465			break;
2466
2467		sbc_dif_generate(cmd);
2468		break;
2469	default:
2470		break;
2471	}
2472
2473	return false;
2474}
2475
2476static void target_complete_ok_work(struct work_struct *work)
2477{
2478	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2479	int ret;
2480
2481	/*
2482	 * Check if we need to move delayed/dormant tasks from cmds on the
2483	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2484	 * Attribute.
2485	 */
2486	transport_complete_task_attr(cmd);
2487
2488	/*
2489	 * Check to schedule QUEUE_FULL work, or execute an existing
2490	 * cmd->transport_qf_callback()
2491	 */
2492	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2493		schedule_work(&cmd->se_dev->qf_work_queue);
2494
2495	/*
2496	 * Check if we need to send a sense buffer from
2497	 * the struct se_cmd in question. We do NOT want
2498	 * to take this path of the IO has been marked as
2499	 * needing to be treated like a "normal read". This
2500	 * is the case if it's a tape read, and either the
2501	 * FM, EOM, or ILI bits are set, but there is no
2502	 * sense data.
2503	 */
2504	if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2505	    cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2506		WARN_ON(!cmd->scsi_status);
2507		ret = transport_send_check_condition_and_sense(
2508					cmd, 0, 1);
2509		if (ret)
2510			goto queue_full;
2511
2512		transport_lun_remove_cmd(cmd);
2513		transport_cmd_check_stop_to_fabric(cmd);
2514		return;
2515	}
2516	/*
2517	 * Check for a callback, used by amongst other things
2518	 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2519	 */
2520	if (cmd->transport_complete_callback) {
2521		sense_reason_t rc;
2522		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2523		bool zero_dl = !(cmd->data_length);
2524		int post_ret = 0;
2525
2526		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2527		if (!rc && !post_ret) {
2528			if (caw && zero_dl)
2529				goto queue_rsp;
2530
2531			return;
2532		} else if (rc) {
2533			ret = transport_send_check_condition_and_sense(cmd,
2534						rc, 0);
2535			if (ret)
2536				goto queue_full;
2537
2538			transport_lun_remove_cmd(cmd);
2539			transport_cmd_check_stop_to_fabric(cmd);
2540			return;
2541		}
2542	}
2543
2544queue_rsp:
2545	switch (cmd->data_direction) {
2546	case DMA_FROM_DEVICE:
2547		/*
2548		 * if this is a READ-type IO, but SCSI status
2549		 * is set, then skip returning data and just
2550		 * return the status -- unless this IO is marked
2551		 * as needing to be treated as a normal read,
2552		 * in which case we want to go ahead and return
2553		 * the data. This happens, for example, for tape
2554		 * reads with the FM, EOM, or ILI bits set, with
2555		 * no sense data.
2556		 */
2557		if (cmd->scsi_status &&
2558		    !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2559			goto queue_status;
2560
2561		atomic_long_add(cmd->data_length,
2562				&cmd->se_lun->lun_stats.tx_data_octets);
2563		/*
2564		 * Perform READ_STRIP of PI using software emulation when
2565		 * backend had PI enabled, if the transport will not be
2566		 * performing hardware READ_STRIP offload.
2567		 */
2568		if (target_read_prot_action(cmd)) {
2569			ret = transport_send_check_condition_and_sense(cmd,
2570						cmd->pi_err, 0);
2571			if (ret)
2572				goto queue_full;
2573
2574			transport_lun_remove_cmd(cmd);
2575			transport_cmd_check_stop_to_fabric(cmd);
2576			return;
2577		}
2578
2579		trace_target_cmd_complete(cmd);
2580		ret = cmd->se_tfo->queue_data_in(cmd);
2581		if (ret)
2582			goto queue_full;
2583		break;
2584	case DMA_TO_DEVICE:
2585		atomic_long_add(cmd->data_length,
2586				&cmd->se_lun->lun_stats.rx_data_octets);
2587		/*
2588		 * Check if we need to send READ payload for BIDI-COMMAND
2589		 */
2590		if (cmd->se_cmd_flags & SCF_BIDI) {
2591			atomic_long_add(cmd->data_length,
2592					&cmd->se_lun->lun_stats.tx_data_octets);
2593			ret = cmd->se_tfo->queue_data_in(cmd);
2594			if (ret)
2595				goto queue_full;
2596			break;
2597		}
2598		fallthrough;
2599	case DMA_NONE:
2600queue_status:
2601		trace_target_cmd_complete(cmd);
2602		ret = cmd->se_tfo->queue_status(cmd);
2603		if (ret)
2604			goto queue_full;
2605		break;
2606	default:
2607		break;
2608	}
2609
2610	transport_lun_remove_cmd(cmd);
2611	transport_cmd_check_stop_to_fabric(cmd);
2612	return;
2613
2614queue_full:
2615	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2616		" data_direction: %d\n", cmd, cmd->data_direction);
2617
2618	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2619}
2620
2621void target_free_sgl(struct scatterlist *sgl, int nents)
2622{
2623	sgl_free_n_order(sgl, nents, 0);
2624}
2625EXPORT_SYMBOL(target_free_sgl);
2626
2627static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2628{
2629	/*
2630	 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2631	 * emulation, and free + reset pointers if necessary..
2632	 */
2633	if (!cmd->t_data_sg_orig)
2634		return;
2635
2636	kfree(cmd->t_data_sg);
2637	cmd->t_data_sg = cmd->t_data_sg_orig;
2638	cmd->t_data_sg_orig = NULL;
2639	cmd->t_data_nents = cmd->t_data_nents_orig;
2640	cmd->t_data_nents_orig = 0;
2641}
2642
2643static inline void transport_free_pages(struct se_cmd *cmd)
2644{
2645	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2646		target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2647		cmd->t_prot_sg = NULL;
2648		cmd->t_prot_nents = 0;
2649	}
2650
2651	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2652		/*
2653		 * Release special case READ buffer payload required for
2654		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2655		 */
2656		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2657			target_free_sgl(cmd->t_bidi_data_sg,
2658					   cmd->t_bidi_data_nents);
2659			cmd->t_bidi_data_sg = NULL;
2660			cmd->t_bidi_data_nents = 0;
2661		}
2662		transport_reset_sgl_orig(cmd);
2663		return;
2664	}
2665	transport_reset_sgl_orig(cmd);
2666
2667	target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2668	cmd->t_data_sg = NULL;
2669	cmd->t_data_nents = 0;
2670
2671	target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2672	cmd->t_bidi_data_sg = NULL;
2673	cmd->t_bidi_data_nents = 0;
2674}
2675
2676void *transport_kmap_data_sg(struct se_cmd *cmd)
2677{
2678	struct scatterlist *sg = cmd->t_data_sg;
2679	struct page **pages;
2680	int i;
2681
2682	/*
2683	 * We need to take into account a possible offset here for fabrics like
2684	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2685	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2686	 */
2687	if (!cmd->t_data_nents)
2688		return NULL;
2689
2690	BUG_ON(!sg);
2691	if (cmd->t_data_nents == 1)
2692		return kmap(sg_page(sg)) + sg->offset;
2693
2694	/* >1 page. use vmap */
2695	pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2696	if (!pages)
2697		return NULL;
2698
2699	/* convert sg[] to pages[] */
2700	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2701		pages[i] = sg_page(sg);
2702	}
2703
2704	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2705	kfree(pages);
2706	if (!cmd->t_data_vmap)
2707		return NULL;
2708
2709	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2710}
2711EXPORT_SYMBOL(transport_kmap_data_sg);
2712
2713void transport_kunmap_data_sg(struct se_cmd *cmd)
2714{
2715	if (!cmd->t_data_nents) {
2716		return;
2717	} else if (cmd->t_data_nents == 1) {
2718		kunmap(sg_page(cmd->t_data_sg));
2719		return;
2720	}
2721
2722	vunmap(cmd->t_data_vmap);
2723	cmd->t_data_vmap = NULL;
2724}
2725EXPORT_SYMBOL(transport_kunmap_data_sg);
2726
2727int
2728target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2729		 bool zero_page, bool chainable)
2730{
2731	gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2732
2733	*sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2734	return *sgl ? 0 : -ENOMEM;
2735}
2736EXPORT_SYMBOL(target_alloc_sgl);
2737
2738/*
2739 * Allocate any required resources to execute the command.  For writes we
2740 * might not have the payload yet, so notify the fabric via a call to
2741 * ->write_pending instead. Otherwise place it on the execution queue.
2742 */
2743sense_reason_t
2744transport_generic_new_cmd(struct se_cmd *cmd)
2745{
2746	unsigned long flags;
2747	int ret = 0;
2748	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2749
2750	if (cmd->prot_op != TARGET_PROT_NORMAL &&
2751	    !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2752		ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2753				       cmd->prot_length, true, false);
2754		if (ret < 0)
2755			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2756	}
2757
2758	/*
2759	 * Determine if the TCM fabric module has already allocated physical
2760	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2761	 * beforehand.
2762	 */
2763	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2764	    cmd->data_length) {
2765
2766		if ((cmd->se_cmd_flags & SCF_BIDI) ||
2767		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2768			u32 bidi_length;
2769
2770			if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2771				bidi_length = cmd->t_task_nolb *
2772					      cmd->se_dev->dev_attrib.block_size;
2773			else
2774				bidi_length = cmd->data_length;
2775
2776			ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2777					       &cmd->t_bidi_data_nents,
2778					       bidi_length, zero_flag, false);
2779			if (ret < 0)
2780				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2781		}
2782
2783		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2784				       cmd->data_length, zero_flag, false);
2785		if (ret < 0)
2786			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2787	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2788		    cmd->data_length) {
2789		/*
2790		 * Special case for COMPARE_AND_WRITE with fabrics
2791		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2792		 */
2793		u32 caw_length = cmd->t_task_nolb *
2794				 cmd->se_dev->dev_attrib.block_size;
2795
2796		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2797				       &cmd->t_bidi_data_nents,
2798				       caw_length, zero_flag, false);
2799		if (ret < 0)
2800			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2801	}
2802	/*
2803	 * If this command is not a write we can execute it right here,
2804	 * for write buffers we need to notify the fabric driver first
2805	 * and let it call back once the write buffers are ready.
2806	 */
2807	target_add_to_state_list(cmd);
2808	if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2809		target_execute_cmd(cmd);
2810		return 0;
2811	}
2812
2813	spin_lock_irqsave(&cmd->t_state_lock, flags);
2814	cmd->t_state = TRANSPORT_WRITE_PENDING;
2815	/*
2816	 * Determine if frontend context caller is requesting the stopping of
2817	 * this command for frontend exceptions.
2818	 */
2819	if (cmd->transport_state & CMD_T_STOP &&
2820	    !cmd->se_tfo->write_pending_must_be_called) {
2821		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2822			 __func__, __LINE__, cmd->tag);
2823
2824		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2825
2826		complete_all(&cmd->t_transport_stop_comp);
2827		return 0;
2828	}
2829	cmd->transport_state &= ~CMD_T_ACTIVE;
2830	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2831
2832	ret = cmd->se_tfo->write_pending(cmd);
2833	if (ret)
2834		goto queue_full;
2835
2836	return 0;
2837
2838queue_full:
2839	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2840	transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2841	return 0;
2842}
2843EXPORT_SYMBOL(transport_generic_new_cmd);
2844
2845static void transport_write_pending_qf(struct se_cmd *cmd)
2846{
2847	unsigned long flags;
2848	int ret;
2849	bool stop;
2850
2851	spin_lock_irqsave(&cmd->t_state_lock, flags);
2852	stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2853	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2854
2855	if (stop) {
2856		pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2857			__func__, __LINE__, cmd->tag);
2858		complete_all(&cmd->t_transport_stop_comp);
2859		return;
2860	}
2861
2862	ret = cmd->se_tfo->write_pending(cmd);
2863	if (ret) {
2864		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2865			 cmd);
2866		transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2867	}
2868}
2869
2870static bool
2871__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2872			   unsigned long *flags);
2873
2874static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2875{
2876	unsigned long flags;
2877
2878	spin_lock_irqsave(&cmd->t_state_lock, flags);
2879	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2880	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2881}
2882
2883/*
2884 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2885 * finished.
2886 */
2887void target_put_cmd_and_wait(struct se_cmd *cmd)
2888{
2889	DECLARE_COMPLETION_ONSTACK(compl);
2890
2891	WARN_ON_ONCE(cmd->abrt_compl);
2892	cmd->abrt_compl = &compl;
2893	target_put_sess_cmd(cmd);
2894	wait_for_completion(&compl);
2895}
2896
2897/*
2898 * This function is called by frontend drivers after processing of a command
2899 * has finished.
2900 *
2901 * The protocol for ensuring that either the regular frontend command
2902 * processing flow or target_handle_abort() code drops one reference is as
2903 * follows:
2904 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2905 *   the frontend driver to call this function synchronously or asynchronously.
2906 *   That will cause one reference to be dropped.
2907 * - During regular command processing the target core sets CMD_T_COMPLETE
2908 *   before invoking one of the .queue_*() functions.
2909 * - The code that aborts commands skips commands and TMFs for which
2910 *   CMD_T_COMPLETE has been set.
2911 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2912 *   commands that will be aborted.
2913 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2914 *   transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2915 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2916 *   be called and will drop a reference.
2917 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2918 *   will be called. target_handle_abort() will drop the final reference.
2919 */
2920int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2921{
2922	DECLARE_COMPLETION_ONSTACK(compl);
2923	int ret = 0;
2924	bool aborted = false, tas = false;
2925
2926	if (wait_for_tasks)
2927		target_wait_free_cmd(cmd, &aborted, &tas);
2928
2929	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2930		/*
2931		 * Handle WRITE failure case where transport_generic_new_cmd()
2932		 * has already added se_cmd to state_list, but fabric has
2933		 * failed command before I/O submission.
2934		 */
2935		if (cmd->state_active)
2936			target_remove_from_state_list(cmd);
2937
2938		if (cmd->se_lun)
2939			transport_lun_remove_cmd(cmd);
2940	}
2941	if (aborted)
2942		cmd->free_compl = &compl;
2943	ret = target_put_sess_cmd(cmd);
2944	if (aborted) {
2945		pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2946		wait_for_completion(&compl);
2947		ret = 1;
2948	}
2949	return ret;
2950}
2951EXPORT_SYMBOL(transport_generic_free_cmd);
2952
2953/**
2954 * target_get_sess_cmd - Verify the session is accepting cmds and take ref
2955 * @se_cmd:	command descriptor to add
2956 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
2957 */
2958int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2959{
2960	struct se_session *se_sess = se_cmd->se_sess;
 
2961	int ret = 0;
2962
2963	/*
2964	 * Add a second kref if the fabric caller is expecting to handle
2965	 * fabric acknowledgement that requires two target_put_sess_cmd()
2966	 * invocations before se_cmd descriptor release.
2967	 */
2968	if (ack_kref) {
2969		kref_get(&se_cmd->cmd_kref);
 
 
2970		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2971	}
2972
2973	if (!percpu_ref_tryget_live(&se_sess->cmd_count))
 
2974		ret = -ESHUTDOWN;
 
 
 
 
 
 
2975
2976	if (ret && ack_kref)
2977		target_put_sess_cmd(se_cmd);
2978
2979	return ret;
2980}
2981EXPORT_SYMBOL(target_get_sess_cmd);
2982
2983static void target_free_cmd_mem(struct se_cmd *cmd)
2984{
2985	transport_free_pages(cmd);
2986
2987	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2988		core_tmr_release_req(cmd->se_tmr_req);
2989	if (cmd->t_task_cdb != cmd->__t_task_cdb)
2990		kfree(cmd->t_task_cdb);
2991}
2992
2993static void target_release_cmd_kref(struct kref *kref)
2994{
2995	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2996	struct se_session *se_sess = se_cmd->se_sess;
2997	struct completion *free_compl = se_cmd->free_compl;
2998	struct completion *abrt_compl = se_cmd->abrt_compl;
 
 
 
 
 
 
 
2999
3000	target_free_cmd_mem(se_cmd);
3001	se_cmd->se_tfo->release_cmd(se_cmd);
3002	if (free_compl)
3003		complete(free_compl);
3004	if (abrt_compl)
3005		complete(abrt_compl);
3006
3007	percpu_ref_put(&se_sess->cmd_count);
3008}
3009
3010/**
3011 * target_put_sess_cmd - decrease the command reference count
3012 * @se_cmd:	command to drop a reference from
3013 *
3014 * Returns 1 if and only if this target_put_sess_cmd() call caused the
3015 * refcount to drop to zero. Returns zero otherwise.
3016 */
3017int target_put_sess_cmd(struct se_cmd *se_cmd)
3018{
3019	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
3020}
3021EXPORT_SYMBOL(target_put_sess_cmd);
3022
3023static const char *data_dir_name(enum dma_data_direction d)
3024{
3025	switch (d) {
3026	case DMA_BIDIRECTIONAL:	return "BIDI";
3027	case DMA_TO_DEVICE:	return "WRITE";
3028	case DMA_FROM_DEVICE:	return "READ";
3029	case DMA_NONE:		return "NONE";
3030	}
3031
3032	return "(?)";
3033}
3034
3035static const char *cmd_state_name(enum transport_state_table t)
3036{
3037	switch (t) {
3038	case TRANSPORT_NO_STATE:	return "NO_STATE";
3039	case TRANSPORT_NEW_CMD:		return "NEW_CMD";
3040	case TRANSPORT_WRITE_PENDING:	return "WRITE_PENDING";
3041	case TRANSPORT_PROCESSING:	return "PROCESSING";
3042	case TRANSPORT_COMPLETE:	return "COMPLETE";
3043	case TRANSPORT_ISTATE_PROCESSING:
3044					return "ISTATE_PROCESSING";
3045	case TRANSPORT_COMPLETE_QF_WP:	return "COMPLETE_QF_WP";
3046	case TRANSPORT_COMPLETE_QF_OK:	return "COMPLETE_QF_OK";
3047	case TRANSPORT_COMPLETE_QF_ERR:	return "COMPLETE_QF_ERR";
3048	}
3049
3050	return "(?)";
3051}
3052
3053static void target_append_str(char **str, const char *txt)
3054{
3055	char *prev = *str;
3056
3057	*str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
3058		kstrdup(txt, GFP_ATOMIC);
3059	kfree(prev);
3060}
3061
3062/*
3063 * Convert a transport state bitmask into a string. The caller is
3064 * responsible for freeing the returned pointer.
3065 */
3066static char *target_ts_to_str(u32 ts)
3067{
3068	char *str = NULL;
3069
3070	if (ts & CMD_T_ABORTED)
3071		target_append_str(&str, "aborted");
3072	if (ts & CMD_T_ACTIVE)
3073		target_append_str(&str, "active");
3074	if (ts & CMD_T_COMPLETE)
3075		target_append_str(&str, "complete");
3076	if (ts & CMD_T_SENT)
3077		target_append_str(&str, "sent");
3078	if (ts & CMD_T_STOP)
3079		target_append_str(&str, "stop");
3080	if (ts & CMD_T_FABRIC_STOP)
3081		target_append_str(&str, "fabric_stop");
3082
3083	return str;
3084}
3085
3086static const char *target_tmf_name(enum tcm_tmreq_table tmf)
3087{
3088	switch (tmf) {
3089	case TMR_ABORT_TASK:		return "ABORT_TASK";
3090	case TMR_ABORT_TASK_SET:	return "ABORT_TASK_SET";
3091	case TMR_CLEAR_ACA:		return "CLEAR_ACA";
3092	case TMR_CLEAR_TASK_SET:	return "CLEAR_TASK_SET";
3093	case TMR_LUN_RESET:		return "LUN_RESET";
3094	case TMR_TARGET_WARM_RESET:	return "TARGET_WARM_RESET";
3095	case TMR_TARGET_COLD_RESET:	return "TARGET_COLD_RESET";
3096	case TMR_LUN_RESET_PRO:		return "LUN_RESET_PRO";
3097	case TMR_UNKNOWN:		break;
3098	}
3099	return "(?)";
3100}
3101
3102void target_show_cmd(const char *pfx, struct se_cmd *cmd)
3103{
3104	char *ts_str = target_ts_to_str(cmd->transport_state);
3105	const u8 *cdb = cmd->t_task_cdb;
3106	struct se_tmr_req *tmf = cmd->se_tmr_req;
3107
3108	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
3109		pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
3110			 pfx, cdb[0], cdb[1], cmd->tag,
3111			 data_dir_name(cmd->data_direction),
3112			 cmd->se_tfo->get_cmd_state(cmd),
3113			 cmd_state_name(cmd->t_state), cmd->data_length,
3114			 kref_read(&cmd->cmd_kref), ts_str);
3115	} else {
3116		pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
3117			 pfx, target_tmf_name(tmf->function), cmd->tag,
3118			 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
3119			 cmd_state_name(cmd->t_state),
3120			 kref_read(&cmd->cmd_kref), ts_str);
3121	}
3122	kfree(ts_str);
3123}
3124EXPORT_SYMBOL(target_show_cmd);
3125
3126static void target_stop_session_confirm(struct percpu_ref *ref)
3127{
3128	struct se_session *se_sess = container_of(ref, struct se_session,
3129						  cmd_count);
3130	complete_all(&se_sess->stop_done);
3131}
3132
3133/**
3134 * target_stop_session - Stop new IO from being queued on the session.
3135 * @se_sess:    session to stop
3136 */
3137void target_stop_session(struct se_session *se_sess)
3138{
3139	pr_debug("Stopping session queue.\n");
3140	if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
3141		percpu_ref_kill_and_confirm(&se_sess->cmd_count,
3142					    target_stop_session_confirm);
 
 
 
3143}
3144EXPORT_SYMBOL(target_stop_session);
3145
3146/**
3147 * target_wait_for_sess_cmds - Wait for outstanding commands
3148 * @se_sess:    session to wait for active I/O
3149 */
3150void target_wait_for_sess_cmds(struct se_session *se_sess)
3151{
 
3152	int ret;
3153
3154	WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
3155
3156	do {
3157		pr_debug("Waiting for running cmds to complete.\n");
3158		ret = wait_event_timeout(se_sess->cmd_count_wq,
3159				percpu_ref_is_zero(&se_sess->cmd_count),
3160				180 * HZ);
 
 
 
3161	} while (ret <= 0);
3162
3163	wait_for_completion(&se_sess->stop_done);
3164	pr_debug("Waiting for cmds done.\n");
3165}
3166EXPORT_SYMBOL(target_wait_for_sess_cmds);
3167
3168/*
3169 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
3170 * all references to the LUN have been released. Called during LUN shutdown.
3171 */
3172void transport_clear_lun_ref(struct se_lun *lun)
3173{
3174	percpu_ref_kill(&lun->lun_ref);
3175	wait_for_completion(&lun->lun_shutdown_comp);
3176}
3177
3178static bool
3179__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
3180			   bool *aborted, bool *tas, unsigned long *flags)
3181	__releases(&cmd->t_state_lock)
3182	__acquires(&cmd->t_state_lock)
3183{
3184	lockdep_assert_held(&cmd->t_state_lock);
 
 
3185
3186	if (fabric_stop)
3187		cmd->transport_state |= CMD_T_FABRIC_STOP;
3188
3189	if (cmd->transport_state & CMD_T_ABORTED)
3190		*aborted = true;
3191
3192	if (cmd->transport_state & CMD_T_TAS)
3193		*tas = true;
3194
3195	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3196	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3197		return false;
3198
3199	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3200	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3201		return false;
3202
3203	if (!(cmd->transport_state & CMD_T_ACTIVE))
3204		return false;
3205
3206	if (fabric_stop && *aborted)
3207		return false;
3208
3209	cmd->transport_state |= CMD_T_STOP;
3210
3211	target_show_cmd("wait_for_tasks: Stopping ", cmd);
3212
3213	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3214
3215	while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3216					    180 * HZ))
3217		target_show_cmd("wait for tasks: ", cmd);
3218
3219	spin_lock_irqsave(&cmd->t_state_lock, *flags);
3220	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3221
3222	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3223		 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3224
3225	return true;
3226}
3227
3228/**
3229 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3230 * @cmd: command to wait on
3231 */
3232bool transport_wait_for_tasks(struct se_cmd *cmd)
3233{
3234	unsigned long flags;
3235	bool ret, aborted = false, tas = false;
3236
3237	spin_lock_irqsave(&cmd->t_state_lock, flags);
3238	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3239	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3240
3241	return ret;
3242}
3243EXPORT_SYMBOL(transport_wait_for_tasks);
3244
3245struct sense_detail {
3246	u8 key;
3247	u8 asc;
3248	u8 ascq;
3249	bool add_sense_info;
3250};
3251
3252static const struct sense_detail sense_detail_table[] = {
3253	[TCM_NO_SENSE] = {
3254		.key = NOT_READY
3255	},
3256	[TCM_NON_EXISTENT_LUN] = {
3257		.key = ILLEGAL_REQUEST,
3258		.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3259	},
3260	[TCM_UNSUPPORTED_SCSI_OPCODE] = {
3261		.key = ILLEGAL_REQUEST,
3262		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3263	},
3264	[TCM_SECTOR_COUNT_TOO_MANY] = {
3265		.key = ILLEGAL_REQUEST,
3266		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3267	},
3268	[TCM_UNKNOWN_MODE_PAGE] = {
3269		.key = ILLEGAL_REQUEST,
3270		.asc = 0x24, /* INVALID FIELD IN CDB */
3271	},
3272	[TCM_CHECK_CONDITION_ABORT_CMD] = {
3273		.key = ABORTED_COMMAND,
3274		.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3275		.ascq = 0x03,
3276	},
3277	[TCM_INCORRECT_AMOUNT_OF_DATA] = {
3278		.key = ABORTED_COMMAND,
3279		.asc = 0x0c, /* WRITE ERROR */
3280		.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3281	},
3282	[TCM_INVALID_CDB_FIELD] = {
3283		.key = ILLEGAL_REQUEST,
3284		.asc = 0x24, /* INVALID FIELD IN CDB */
3285	},
3286	[TCM_INVALID_PARAMETER_LIST] = {
3287		.key = ILLEGAL_REQUEST,
3288		.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3289	},
3290	[TCM_TOO_MANY_TARGET_DESCS] = {
3291		.key = ILLEGAL_REQUEST,
3292		.asc = 0x26,
3293		.ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3294	},
3295	[TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3296		.key = ILLEGAL_REQUEST,
3297		.asc = 0x26,
3298		.ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3299	},
3300	[TCM_TOO_MANY_SEGMENT_DESCS] = {
3301		.key = ILLEGAL_REQUEST,
3302		.asc = 0x26,
3303		.ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3304	},
3305	[TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3306		.key = ILLEGAL_REQUEST,
3307		.asc = 0x26,
3308		.ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3309	},
3310	[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3311		.key = ILLEGAL_REQUEST,
3312		.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3313	},
3314	[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3315		.key = ILLEGAL_REQUEST,
3316		.asc = 0x0c, /* WRITE ERROR */
3317		.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3318	},
3319	[TCM_SERVICE_CRC_ERROR] = {
3320		.key = ABORTED_COMMAND,
3321		.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3322		.ascq = 0x05, /* N/A */
3323	},
3324	[TCM_SNACK_REJECTED] = {
3325		.key = ABORTED_COMMAND,
3326		.asc = 0x11, /* READ ERROR */
3327		.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3328	},
3329	[TCM_WRITE_PROTECTED] = {
3330		.key = DATA_PROTECT,
3331		.asc = 0x27, /* WRITE PROTECTED */
3332	},
3333	[TCM_ADDRESS_OUT_OF_RANGE] = {
3334		.key = ILLEGAL_REQUEST,
3335		.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3336	},
3337	[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3338		.key = UNIT_ATTENTION,
3339	},
 
 
 
3340	[TCM_MISCOMPARE_VERIFY] = {
3341		.key = MISCOMPARE,
3342		.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3343		.ascq = 0x00,
3344		.add_sense_info = true,
3345	},
3346	[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3347		.key = ABORTED_COMMAND,
3348		.asc = 0x10,
3349		.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3350		.add_sense_info = true,
3351	},
3352	[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3353		.key = ABORTED_COMMAND,
3354		.asc = 0x10,
3355		.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3356		.add_sense_info = true,
3357	},
3358	[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3359		.key = ABORTED_COMMAND,
3360		.asc = 0x10,
3361		.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3362		.add_sense_info = true,
3363	},
3364	[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3365		.key = COPY_ABORTED,
3366		.asc = 0x0d,
3367		.ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3368
3369	},
3370	[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3371		/*
3372		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
3373		 * Solaris initiators.  Returning NOT READY instead means the
3374		 * operations will be retried a finite number of times and we
3375		 * can survive intermittent errors.
3376		 */
3377		.key = NOT_READY,
3378		.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3379	},
3380	[TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3381		/*
3382		 * From spc4r22 section5.7.7,5.7.8
3383		 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3384		 * or a REGISTER AND IGNORE EXISTING KEY service action or
3385		 * REGISTER AND MOVE service actionis attempted,
3386		 * but there are insufficient device server resources to complete the
3387		 * operation, then the command shall be terminated with CHECK CONDITION
3388		 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3389		 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3390		 */
3391		.key = ILLEGAL_REQUEST,
3392		.asc = 0x55,
3393		.ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3394	},
3395	[TCM_INVALID_FIELD_IN_COMMAND_IU] = {
3396		.key = ILLEGAL_REQUEST,
3397		.asc = 0x0e,
3398		.ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
3399	},
3400	[TCM_ALUA_TG_PT_STANDBY] = {
3401		.key = NOT_READY,
3402		.asc = 0x04,
3403		.ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
3404	},
3405	[TCM_ALUA_TG_PT_UNAVAILABLE] = {
3406		.key = NOT_READY,
3407		.asc = 0x04,
3408		.ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
3409	},
3410	[TCM_ALUA_STATE_TRANSITION] = {
3411		.key = NOT_READY,
3412		.asc = 0x04,
3413		.ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
3414	},
3415	[TCM_ALUA_OFFLINE] = {
3416		.key = NOT_READY,
3417		.asc = 0x04,
3418		.ascq = ASCQ_04H_ALUA_OFFLINE,
3419	},
3420};
3421
3422/**
3423 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3424 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3425 *   be stored.
3426 * @reason: LIO sense reason code. If this argument has the value
3427 *   TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3428 *   dequeuing a unit attention fails due to multiple commands being processed
3429 *   concurrently, set the command status to BUSY.
3430 *
3431 * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3432 */
3433static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3434{
3435	const struct sense_detail *sd;
3436	u8 *buffer = cmd->sense_buffer;
3437	int r = (__force int)reason;
3438	u8 key, asc, ascq;
3439	bool desc_format = target_sense_desc_format(cmd->se_dev);
3440
3441	if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
3442		sd = &sense_detail_table[r];
3443	else
3444		sd = &sense_detail_table[(__force int)
3445				       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3446
3447	key = sd->key;
3448	if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3449		if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3450						       &ascq)) {
3451			cmd->scsi_status = SAM_STAT_BUSY;
3452			return;
3453		}
 
 
 
 
3454	} else {
3455		WARN_ON_ONCE(sd->asc == 0);
3456		asc = sd->asc;
3457		ascq = sd->ascq;
3458	}
3459
3460	cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3461	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3462	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
3463	scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3464	if (sd->add_sense_info)
3465		WARN_ON_ONCE(scsi_set_sense_information(buffer,
3466							cmd->scsi_sense_length,
3467							cmd->sense_info) < 0);
3468}
3469
3470int
3471transport_send_check_condition_and_sense(struct se_cmd *cmd,
3472		sense_reason_t reason, int from_transport)
3473{
3474	unsigned long flags;
3475
3476	WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3477
3478	spin_lock_irqsave(&cmd->t_state_lock, flags);
3479	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3480		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3481		return 0;
3482	}
3483	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3484	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3485
3486	if (!from_transport)
3487		translate_sense_reason(cmd, reason);
3488
3489	trace_target_cmd_complete(cmd);
3490	return cmd->se_tfo->queue_status(cmd);
3491}
3492EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3493
3494/**
3495 * target_send_busy - Send SCSI BUSY status back to the initiator
3496 * @cmd: SCSI command for which to send a BUSY reply.
3497 *
3498 * Note: Only call this function if target_submit_cmd*() failed.
3499 */
3500int target_send_busy(struct se_cmd *cmd)
3501{
3502	WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3503
3504	cmd->scsi_status = SAM_STAT_BUSY;
3505	trace_target_cmd_complete(cmd);
3506	return cmd->se_tfo->queue_status(cmd);
3507}
3508EXPORT_SYMBOL(target_send_busy);
3509
3510static void target_tmr_work(struct work_struct *work)
3511{
3512	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3513	struct se_device *dev = cmd->se_dev;
3514	struct se_tmr_req *tmr = cmd->se_tmr_req;
3515	int ret;
3516
3517	if (cmd->transport_state & CMD_T_ABORTED)
3518		goto aborted;
3519
3520	switch (tmr->function) {
3521	case TMR_ABORT_TASK:
3522		core_tmr_abort_task(dev, tmr, cmd->se_sess);
3523		break;
3524	case TMR_ABORT_TASK_SET:
3525	case TMR_CLEAR_ACA:
3526	case TMR_CLEAR_TASK_SET:
3527		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3528		break;
3529	case TMR_LUN_RESET:
3530		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3531		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3532					 TMR_FUNCTION_REJECTED;
3533		if (tmr->response == TMR_FUNCTION_COMPLETE) {
3534			target_dev_ua_allocate(dev, 0x29,
 
3535					       ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3536		}
3537		break;
3538	case TMR_TARGET_WARM_RESET:
3539		tmr->response = TMR_FUNCTION_REJECTED;
3540		break;
3541	case TMR_TARGET_COLD_RESET:
3542		tmr->response = TMR_FUNCTION_REJECTED;
3543		break;
3544	default:
3545		pr_err("Unknown TMR function: 0x%02x.\n",
3546				tmr->function);
3547		tmr->response = TMR_FUNCTION_REJECTED;
3548		break;
3549	}
3550
3551	if (cmd->transport_state & CMD_T_ABORTED)
3552		goto aborted;
3553
3554	cmd->se_tfo->queue_tm_rsp(cmd);
3555
3556	transport_lun_remove_cmd(cmd);
3557	transport_cmd_check_stop_to_fabric(cmd);
3558	return;
3559
3560aborted:
3561	target_handle_abort(cmd);
3562}
3563
3564int transport_generic_handle_tmr(
3565	struct se_cmd *cmd)
3566{
3567	unsigned long flags;
3568	bool aborted = false;
3569
3570	spin_lock_irqsave(&cmd->t_state_lock, flags);
3571	if (cmd->transport_state & CMD_T_ABORTED) {
3572		aborted = true;
3573	} else {
3574		cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3575		cmd->transport_state |= CMD_T_ACTIVE;
3576	}
3577	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3578
3579	if (aborted) {
3580		pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3581				    cmd->se_tmr_req->function,
3582				    cmd->se_tmr_req->ref_task_tag, cmd->tag);
3583		target_handle_abort(cmd);
3584		return 0;
3585	}
3586
3587	INIT_WORK(&cmd->work, target_tmr_work);
3588	schedule_work(&cmd->work);
3589	return 0;
3590}
3591EXPORT_SYMBOL(transport_generic_handle_tmr);
3592
3593bool
3594target_check_wce(struct se_device *dev)
3595{
3596	bool wce = false;
3597
3598	if (dev->transport->get_write_cache)
3599		wce = dev->transport->get_write_cache(dev);
3600	else if (dev->dev_attrib.emulate_write_cache > 0)
3601		wce = true;
3602
3603	return wce;
3604}
3605
3606bool
3607target_check_fua(struct se_device *dev)
3608{
3609	return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3610}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_transport.c
   4 *
   5 * This file contains the Generic Target Engine Core.
   6 *
   7 * (c) Copyright 2002-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 ******************************************************************************/
  12
  13#include <linux/net.h>
  14#include <linux/delay.h>
  15#include <linux/string.h>
  16#include <linux/timer.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/kthread.h>
  20#include <linux/in.h>
  21#include <linux/cdrom.h>
  22#include <linux/module.h>
  23#include <linux/ratelimit.h>
  24#include <linux/vmalloc.h>
  25#include <asm/unaligned.h>
  26#include <net/sock.h>
  27#include <net/tcp.h>
  28#include <scsi/scsi_proto.h>
  29#include <scsi/scsi_common.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33#include <target/target_core_fabric.h>
  34
  35#include "target_core_internal.h"
  36#include "target_core_alua.h"
  37#include "target_core_pr.h"
  38#include "target_core_ua.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include <trace/events/target.h>
  42
  43static struct workqueue_struct *target_completion_wq;
 
  44static struct kmem_cache *se_sess_cache;
  45struct kmem_cache *se_ua_cache;
  46struct kmem_cache *t10_pr_reg_cache;
  47struct kmem_cache *t10_alua_lu_gp_cache;
  48struct kmem_cache *t10_alua_lu_gp_mem_cache;
  49struct kmem_cache *t10_alua_tg_pt_gp_cache;
  50struct kmem_cache *t10_alua_lba_map_cache;
  51struct kmem_cache *t10_alua_lba_map_mem_cache;
  52
  53static void transport_complete_task_attr(struct se_cmd *cmd);
  54static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
  55static void transport_handle_queue_full(struct se_cmd *cmd,
  56		struct se_device *dev, int err, bool write_pending);
  57static void target_complete_ok_work(struct work_struct *work);
  58
  59int init_se_kmem_caches(void)
  60{
  61	se_sess_cache = kmem_cache_create("se_sess_cache",
  62			sizeof(struct se_session), __alignof__(struct se_session),
  63			0, NULL);
  64	if (!se_sess_cache) {
  65		pr_err("kmem_cache_create() for struct se_session"
  66				" failed\n");
  67		goto out;
  68	}
  69	se_ua_cache = kmem_cache_create("se_ua_cache",
  70			sizeof(struct se_ua), __alignof__(struct se_ua),
  71			0, NULL);
  72	if (!se_ua_cache) {
  73		pr_err("kmem_cache_create() for struct se_ua failed\n");
  74		goto out_free_sess_cache;
  75	}
  76	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
  77			sizeof(struct t10_pr_registration),
  78			__alignof__(struct t10_pr_registration), 0, NULL);
  79	if (!t10_pr_reg_cache) {
  80		pr_err("kmem_cache_create() for struct t10_pr_registration"
  81				" failed\n");
  82		goto out_free_ua_cache;
  83	}
  84	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
  85			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
  86			0, NULL);
  87	if (!t10_alua_lu_gp_cache) {
  88		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
  89				" failed\n");
  90		goto out_free_pr_reg_cache;
  91	}
  92	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
  93			sizeof(struct t10_alua_lu_gp_member),
  94			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
  95	if (!t10_alua_lu_gp_mem_cache) {
  96		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
  97				"cache failed\n");
  98		goto out_free_lu_gp_cache;
  99	}
 100	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
 101			sizeof(struct t10_alua_tg_pt_gp),
 102			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
 103	if (!t10_alua_tg_pt_gp_cache) {
 104		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
 105				"cache failed\n");
 106		goto out_free_lu_gp_mem_cache;
 107	}
 108	t10_alua_lba_map_cache = kmem_cache_create(
 109			"t10_alua_lba_map_cache",
 110			sizeof(struct t10_alua_lba_map),
 111			__alignof__(struct t10_alua_lba_map), 0, NULL);
 112	if (!t10_alua_lba_map_cache) {
 113		pr_err("kmem_cache_create() for t10_alua_lba_map_"
 114				"cache failed\n");
 115		goto out_free_tg_pt_gp_cache;
 116	}
 117	t10_alua_lba_map_mem_cache = kmem_cache_create(
 118			"t10_alua_lba_map_mem_cache",
 119			sizeof(struct t10_alua_lba_map_member),
 120			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
 121	if (!t10_alua_lba_map_mem_cache) {
 122		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
 123				"cache failed\n");
 124		goto out_free_lba_map_cache;
 125	}
 126
 127	target_completion_wq = alloc_workqueue("target_completion",
 128					       WQ_MEM_RECLAIM, 0);
 129	if (!target_completion_wq)
 130		goto out_free_lba_map_mem_cache;
 131
 
 
 
 
 
 132	return 0;
 133
 
 
 134out_free_lba_map_mem_cache:
 135	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 136out_free_lba_map_cache:
 137	kmem_cache_destroy(t10_alua_lba_map_cache);
 138out_free_tg_pt_gp_cache:
 139	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 140out_free_lu_gp_mem_cache:
 141	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
 142out_free_lu_gp_cache:
 143	kmem_cache_destroy(t10_alua_lu_gp_cache);
 144out_free_pr_reg_cache:
 145	kmem_cache_destroy(t10_pr_reg_cache);
 146out_free_ua_cache:
 147	kmem_cache_destroy(se_ua_cache);
 148out_free_sess_cache:
 149	kmem_cache_destroy(se_sess_cache);
 150out:
 151	return -ENOMEM;
 152}
 153
 154void release_se_kmem_caches(void)
 155{
 
 156	destroy_workqueue(target_completion_wq);
 157	kmem_cache_destroy(se_sess_cache);
 158	kmem_cache_destroy(se_ua_cache);
 159	kmem_cache_destroy(t10_pr_reg_cache);
 160	kmem_cache_destroy(t10_alua_lu_gp_cache);
 161	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
 162	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 163	kmem_cache_destroy(t10_alua_lba_map_cache);
 164	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 165}
 166
 167/* This code ensures unique mib indexes are handed out. */
 168static DEFINE_SPINLOCK(scsi_mib_index_lock);
 169static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
 170
 171/*
 172 * Allocate a new row index for the entry type specified
 173 */
 174u32 scsi_get_new_index(scsi_index_t type)
 175{
 176	u32 new_index;
 177
 178	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
 179
 180	spin_lock(&scsi_mib_index_lock);
 181	new_index = ++scsi_mib_index[type];
 182	spin_unlock(&scsi_mib_index_lock);
 183
 184	return new_index;
 185}
 186
 187void transport_subsystem_check_init(void)
 188{
 189	int ret;
 190	static int sub_api_initialized;
 191
 192	if (sub_api_initialized)
 193		return;
 194
 195	ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
 196	if (ret != 0)
 197		pr_err("Unable to load target_core_iblock\n");
 198
 199	ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
 200	if (ret != 0)
 201		pr_err("Unable to load target_core_file\n");
 202
 203	ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
 204	if (ret != 0)
 205		pr_err("Unable to load target_core_pscsi\n");
 206
 207	ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
 208	if (ret != 0)
 209		pr_err("Unable to load target_core_user\n");
 210
 211	sub_api_initialized = 1;
 212}
 213
 214static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
 215{
 216	struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
 217
 218	wake_up(&sess->cmd_list_wq);
 219}
 220
 221/**
 222 * transport_init_session - initialize a session object
 223 * @se_sess: Session object pointer.
 224 *
 225 * The caller must have zero-initialized @se_sess before calling this function.
 226 */
 227int transport_init_session(struct se_session *se_sess)
 228{
 229	INIT_LIST_HEAD(&se_sess->sess_list);
 230	INIT_LIST_HEAD(&se_sess->sess_acl_list);
 231	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
 232	spin_lock_init(&se_sess->sess_cmd_lock);
 233	init_waitqueue_head(&se_sess->cmd_list_wq);
 
 
 234	return percpu_ref_init(&se_sess->cmd_count,
 235			       target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
 236}
 237EXPORT_SYMBOL(transport_init_session);
 238
 239void transport_uninit_session(struct se_session *se_sess)
 240{
 
 
 
 
 
 
 
 
 241	percpu_ref_exit(&se_sess->cmd_count);
 242}
 243
 244/**
 245 * transport_alloc_session - allocate a session object and initialize it
 246 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
 247 */
 248struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
 249{
 250	struct se_session *se_sess;
 251	int ret;
 252
 253	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
 254	if (!se_sess) {
 255		pr_err("Unable to allocate struct se_session from"
 256				" se_sess_cache\n");
 257		return ERR_PTR(-ENOMEM);
 258	}
 259	ret = transport_init_session(se_sess);
 260	if (ret < 0) {
 261		kmem_cache_free(se_sess_cache, se_sess);
 262		return ERR_PTR(ret);
 263	}
 264	se_sess->sup_prot_ops = sup_prot_ops;
 265
 266	return se_sess;
 267}
 268EXPORT_SYMBOL(transport_alloc_session);
 269
 270/**
 271 * transport_alloc_session_tags - allocate target driver private data
 272 * @se_sess:  Session pointer.
 273 * @tag_num:  Maximum number of in-flight commands between initiator and target.
 274 * @tag_size: Size in bytes of the private data a target driver associates with
 275 *	      each command.
 276 */
 277int transport_alloc_session_tags(struct se_session *se_sess,
 278			         unsigned int tag_num, unsigned int tag_size)
 279{
 280	int rc;
 281
 282	se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
 283					 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
 284	if (!se_sess->sess_cmd_map) {
 285		pr_err("Unable to allocate se_sess->sess_cmd_map\n");
 286		return -ENOMEM;
 287	}
 288
 289	rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
 290			false, GFP_KERNEL, NUMA_NO_NODE);
 291	if (rc < 0) {
 292		pr_err("Unable to init se_sess->sess_tag_pool,"
 293			" tag_num: %u\n", tag_num);
 294		kvfree(se_sess->sess_cmd_map);
 295		se_sess->sess_cmd_map = NULL;
 296		return -ENOMEM;
 297	}
 298
 299	return 0;
 300}
 301EXPORT_SYMBOL(transport_alloc_session_tags);
 302
 303/**
 304 * transport_init_session_tags - allocate a session and target driver private data
 305 * @tag_num:  Maximum number of in-flight commands between initiator and target.
 306 * @tag_size: Size in bytes of the private data a target driver associates with
 307 *	      each command.
 308 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
 309 */
 310static struct se_session *
 311transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
 312			    enum target_prot_op sup_prot_ops)
 313{
 314	struct se_session *se_sess;
 315	int rc;
 316
 317	if (tag_num != 0 && !tag_size) {
 318		pr_err("init_session_tags called with percpu-ida tag_num:"
 319		       " %u, but zero tag_size\n", tag_num);
 320		return ERR_PTR(-EINVAL);
 321	}
 322	if (!tag_num && tag_size) {
 323		pr_err("init_session_tags called with percpu-ida tag_size:"
 324		       " %u, but zero tag_num\n", tag_size);
 325		return ERR_PTR(-EINVAL);
 326	}
 327
 328	se_sess = transport_alloc_session(sup_prot_ops);
 329	if (IS_ERR(se_sess))
 330		return se_sess;
 331
 332	rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
 333	if (rc < 0) {
 334		transport_free_session(se_sess);
 335		return ERR_PTR(-ENOMEM);
 336	}
 337
 338	return se_sess;
 339}
 340
 341/*
 342 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
 343 */
 344void __transport_register_session(
 345	struct se_portal_group *se_tpg,
 346	struct se_node_acl *se_nacl,
 347	struct se_session *se_sess,
 348	void *fabric_sess_ptr)
 349{
 350	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
 351	unsigned char buf[PR_REG_ISID_LEN];
 352	unsigned long flags;
 353
 354	se_sess->se_tpg = se_tpg;
 355	se_sess->fabric_sess_ptr = fabric_sess_ptr;
 356	/*
 357	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
 358	 *
 359	 * Only set for struct se_session's that will actually be moving I/O.
 360	 * eg: *NOT* discovery sessions.
 361	 */
 362	if (se_nacl) {
 363		/*
 364		 *
 365		 * Determine if fabric allows for T10-PI feature bits exposed to
 366		 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
 367		 *
 368		 * If so, then always save prot_type on a per se_node_acl node
 369		 * basis and re-instate the previous sess_prot_type to avoid
 370		 * disabling PI from below any previously initiator side
 371		 * registered LUNs.
 372		 */
 373		if (se_nacl->saved_prot_type)
 374			se_sess->sess_prot_type = se_nacl->saved_prot_type;
 375		else if (tfo->tpg_check_prot_fabric_only)
 376			se_sess->sess_prot_type = se_nacl->saved_prot_type =
 377					tfo->tpg_check_prot_fabric_only(se_tpg);
 378		/*
 379		 * If the fabric module supports an ISID based TransportID,
 380		 * save this value in binary from the fabric I_T Nexus now.
 381		 */
 382		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
 383			memset(&buf[0], 0, PR_REG_ISID_LEN);
 384			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
 385					&buf[0], PR_REG_ISID_LEN);
 386			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
 387		}
 388
 389		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 390		/*
 391		 * The se_nacl->nacl_sess pointer will be set to the
 392		 * last active I_T Nexus for each struct se_node_acl.
 393		 */
 394		se_nacl->nacl_sess = se_sess;
 395
 396		list_add_tail(&se_sess->sess_acl_list,
 397			      &se_nacl->acl_sess_list);
 398		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 399	}
 400	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
 401
 402	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
 403		se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
 404}
 405EXPORT_SYMBOL(__transport_register_session);
 406
 407void transport_register_session(
 408	struct se_portal_group *se_tpg,
 409	struct se_node_acl *se_nacl,
 410	struct se_session *se_sess,
 411	void *fabric_sess_ptr)
 412{
 413	unsigned long flags;
 414
 415	spin_lock_irqsave(&se_tpg->session_lock, flags);
 416	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
 417	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 418}
 419EXPORT_SYMBOL(transport_register_session);
 420
 421struct se_session *
 422target_setup_session(struct se_portal_group *tpg,
 423		     unsigned int tag_num, unsigned int tag_size,
 424		     enum target_prot_op prot_op,
 425		     const char *initiatorname, void *private,
 426		     int (*callback)(struct se_portal_group *,
 427				     struct se_session *, void *))
 428{
 429	struct se_session *sess;
 430
 431	/*
 432	 * If the fabric driver is using percpu-ida based pre allocation
 433	 * of I/O descriptor tags, go ahead and perform that setup now..
 434	 */
 435	if (tag_num != 0)
 436		sess = transport_init_session_tags(tag_num, tag_size, prot_op);
 437	else
 438		sess = transport_alloc_session(prot_op);
 439
 440	if (IS_ERR(sess))
 441		return sess;
 442
 443	sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
 444					(unsigned char *)initiatorname);
 445	if (!sess->se_node_acl) {
 446		transport_free_session(sess);
 447		return ERR_PTR(-EACCES);
 448	}
 449	/*
 450	 * Go ahead and perform any remaining fabric setup that is
 451	 * required before transport_register_session().
 452	 */
 453	if (callback != NULL) {
 454		int rc = callback(tpg, sess, private);
 455		if (rc) {
 456			transport_free_session(sess);
 457			return ERR_PTR(rc);
 458		}
 459	}
 460
 461	transport_register_session(tpg, sess->se_node_acl, sess, private);
 462	return sess;
 463}
 464EXPORT_SYMBOL(target_setup_session);
 465
 466ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
 467{
 468	struct se_session *se_sess;
 469	ssize_t len = 0;
 470
 471	spin_lock_bh(&se_tpg->session_lock);
 472	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
 473		if (!se_sess->se_node_acl)
 474			continue;
 475		if (!se_sess->se_node_acl->dynamic_node_acl)
 476			continue;
 477		if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
 478			break;
 479
 480		len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
 481				se_sess->se_node_acl->initiatorname);
 482		len += 1; /* Include NULL terminator */
 483	}
 484	spin_unlock_bh(&se_tpg->session_lock);
 485
 486	return len;
 487}
 488EXPORT_SYMBOL(target_show_dynamic_sessions);
 489
 490static void target_complete_nacl(struct kref *kref)
 491{
 492	struct se_node_acl *nacl = container_of(kref,
 493				struct se_node_acl, acl_kref);
 494	struct se_portal_group *se_tpg = nacl->se_tpg;
 495
 496	if (!nacl->dynamic_stop) {
 497		complete(&nacl->acl_free_comp);
 498		return;
 499	}
 500
 501	mutex_lock(&se_tpg->acl_node_mutex);
 502	list_del_init(&nacl->acl_list);
 503	mutex_unlock(&se_tpg->acl_node_mutex);
 504
 505	core_tpg_wait_for_nacl_pr_ref(nacl);
 506	core_free_device_list_for_node(nacl, se_tpg);
 507	kfree(nacl);
 508}
 509
 510void target_put_nacl(struct se_node_acl *nacl)
 511{
 512	kref_put(&nacl->acl_kref, target_complete_nacl);
 513}
 514EXPORT_SYMBOL(target_put_nacl);
 515
 516void transport_deregister_session_configfs(struct se_session *se_sess)
 517{
 518	struct se_node_acl *se_nacl;
 519	unsigned long flags;
 520	/*
 521	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
 522	 */
 523	se_nacl = se_sess->se_node_acl;
 524	if (se_nacl) {
 525		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 526		if (!list_empty(&se_sess->sess_acl_list))
 527			list_del_init(&se_sess->sess_acl_list);
 528		/*
 529		 * If the session list is empty, then clear the pointer.
 530		 * Otherwise, set the struct se_session pointer from the tail
 531		 * element of the per struct se_node_acl active session list.
 532		 */
 533		if (list_empty(&se_nacl->acl_sess_list))
 534			se_nacl->nacl_sess = NULL;
 535		else {
 536			se_nacl->nacl_sess = container_of(
 537					se_nacl->acl_sess_list.prev,
 538					struct se_session, sess_acl_list);
 539		}
 540		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 541	}
 542}
 543EXPORT_SYMBOL(transport_deregister_session_configfs);
 544
 545void transport_free_session(struct se_session *se_sess)
 546{
 547	struct se_node_acl *se_nacl = se_sess->se_node_acl;
 548
 549	/*
 550	 * Drop the se_node_acl->nacl_kref obtained from within
 551	 * core_tpg_get_initiator_node_acl().
 552	 */
 553	if (se_nacl) {
 554		struct se_portal_group *se_tpg = se_nacl->se_tpg;
 555		const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
 556		unsigned long flags;
 557
 558		se_sess->se_node_acl = NULL;
 559
 560		/*
 561		 * Also determine if we need to drop the extra ->cmd_kref if
 562		 * it had been previously dynamically generated, and
 563		 * the endpoint is not caching dynamic ACLs.
 564		 */
 565		mutex_lock(&se_tpg->acl_node_mutex);
 566		if (se_nacl->dynamic_node_acl &&
 567		    !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
 568			spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 569			if (list_empty(&se_nacl->acl_sess_list))
 570				se_nacl->dynamic_stop = true;
 571			spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 572
 573			if (se_nacl->dynamic_stop)
 574				list_del_init(&se_nacl->acl_list);
 575		}
 576		mutex_unlock(&se_tpg->acl_node_mutex);
 577
 578		if (se_nacl->dynamic_stop)
 579			target_put_nacl(se_nacl);
 580
 581		target_put_nacl(se_nacl);
 582	}
 583	if (se_sess->sess_cmd_map) {
 584		sbitmap_queue_free(&se_sess->sess_tag_pool);
 585		kvfree(se_sess->sess_cmd_map);
 586	}
 587	transport_uninit_session(se_sess);
 588	kmem_cache_free(se_sess_cache, se_sess);
 589}
 590EXPORT_SYMBOL(transport_free_session);
 591
 592static int target_release_res(struct se_device *dev, void *data)
 593{
 594	struct se_session *sess = data;
 595
 596	if (dev->reservation_holder == sess)
 597		target_release_reservation(dev);
 598	return 0;
 599}
 600
 601void transport_deregister_session(struct se_session *se_sess)
 602{
 603	struct se_portal_group *se_tpg = se_sess->se_tpg;
 604	unsigned long flags;
 605
 606	if (!se_tpg) {
 607		transport_free_session(se_sess);
 608		return;
 609	}
 610
 611	spin_lock_irqsave(&se_tpg->session_lock, flags);
 612	list_del(&se_sess->sess_list);
 613	se_sess->se_tpg = NULL;
 614	se_sess->fabric_sess_ptr = NULL;
 615	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 616
 617	/*
 618	 * Since the session is being removed, release SPC-2
 619	 * reservations held by the session that is disappearing.
 620	 */
 621	target_for_each_device(target_release_res, se_sess);
 622
 623	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
 624		se_tpg->se_tpg_tfo->fabric_name);
 625	/*
 626	 * If last kref is dropping now for an explicit NodeACL, awake sleeping
 627	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
 628	 * removal context from within transport_free_session() code.
 629	 *
 630	 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
 631	 * to release all remaining generate_node_acl=1 created ACL resources.
 632	 */
 633
 634	transport_free_session(se_sess);
 635}
 636EXPORT_SYMBOL(transport_deregister_session);
 637
 638void target_remove_session(struct se_session *se_sess)
 639{
 640	transport_deregister_session_configfs(se_sess);
 641	transport_deregister_session(se_sess);
 642}
 643EXPORT_SYMBOL(target_remove_session);
 644
 645static void target_remove_from_state_list(struct se_cmd *cmd)
 646{
 647	struct se_device *dev = cmd->se_dev;
 648	unsigned long flags;
 649
 650	if (!dev)
 651		return;
 652
 653	spin_lock_irqsave(&dev->execute_task_lock, flags);
 654	if (cmd->state_active) {
 655		list_del(&cmd->state_list);
 656		cmd->state_active = false;
 657	}
 658	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 659}
 660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661/*
 662 * This function is called by the target core after the target core has
 663 * finished processing a SCSI command or SCSI TMF. Both the regular command
 664 * processing code and the code for aborting commands can call this
 665 * function. CMD_T_STOP is set if and only if another thread is waiting
 666 * inside transport_wait_for_tasks() for t_transport_stop_comp.
 667 */
 668static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
 669{
 670	unsigned long flags;
 671
 672	target_remove_from_state_list(cmd);
 673
 674	/*
 675	 * Clear struct se_cmd->se_lun before the handoff to FE.
 676	 */
 677	cmd->se_lun = NULL;
 678
 679	spin_lock_irqsave(&cmd->t_state_lock, flags);
 680	/*
 681	 * Determine if frontend context caller is requesting the stopping of
 682	 * this command for frontend exceptions.
 683	 */
 684	if (cmd->transport_state & CMD_T_STOP) {
 685		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
 686			__func__, __LINE__, cmd->tag);
 687
 688		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 689
 690		complete_all(&cmd->t_transport_stop_comp);
 691		return 1;
 692	}
 693	cmd->transport_state &= ~CMD_T_ACTIVE;
 694	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 695
 696	/*
 697	 * Some fabric modules like tcm_loop can release their internally
 698	 * allocated I/O reference and struct se_cmd now.
 699	 *
 700	 * Fabric modules are expected to return '1' here if the se_cmd being
 701	 * passed is released at this point, or zero if not being released.
 702	 */
 703	return cmd->se_tfo->check_stop_free(cmd);
 704}
 705
 706static void transport_lun_remove_cmd(struct se_cmd *cmd)
 707{
 708	struct se_lun *lun = cmd->se_lun;
 709
 710	if (!lun)
 711		return;
 712
 
 
 
 713	if (cmpxchg(&cmd->lun_ref_active, true, false))
 714		percpu_ref_put(&lun->lun_ref);
 
 
 
 
 
 715}
 716
 717static void target_complete_failure_work(struct work_struct *work)
 718{
 719	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 720
 721	transport_generic_request_failure(cmd,
 722			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
 723}
 724
 725/*
 726 * Used when asking transport to copy Sense Data from the underlying
 727 * Linux/SCSI struct scsi_cmnd
 728 */
 729static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
 730{
 731	struct se_device *dev = cmd->se_dev;
 732
 733	WARN_ON(!cmd->se_lun);
 734
 735	if (!dev)
 736		return NULL;
 737
 738	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
 739		return NULL;
 740
 741	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
 742
 743	pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
 744		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
 745	return cmd->sense_buffer;
 746}
 747
 748void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
 749{
 750	unsigned char *cmd_sense_buf;
 751	unsigned long flags;
 752
 753	spin_lock_irqsave(&cmd->t_state_lock, flags);
 754	cmd_sense_buf = transport_get_sense_buffer(cmd);
 755	if (!cmd_sense_buf) {
 756		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 757		return;
 758	}
 759
 760	cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
 761	memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
 762	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 763}
 764EXPORT_SYMBOL(transport_copy_sense_to_cmd);
 765
 766static void target_handle_abort(struct se_cmd *cmd)
 767{
 768	bool tas = cmd->transport_state & CMD_T_TAS;
 769	bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
 770	int ret;
 771
 772	pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
 773
 774	if (tas) {
 775		if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
 776			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 777			pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
 778				 cmd->t_task_cdb[0], cmd->tag);
 779			trace_target_cmd_complete(cmd);
 780			ret = cmd->se_tfo->queue_status(cmd);
 781			if (ret) {
 782				transport_handle_queue_full(cmd, cmd->se_dev,
 783							    ret, false);
 784				return;
 785			}
 786		} else {
 787			cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
 788			cmd->se_tfo->queue_tm_rsp(cmd);
 789		}
 790	} else {
 791		/*
 792		 * Allow the fabric driver to unmap any resources before
 793		 * releasing the descriptor via TFO->release_cmd().
 794		 */
 795		cmd->se_tfo->aborted_task(cmd);
 796		if (ack_kref)
 797			WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
 798		/*
 799		 * To do: establish a unit attention condition on the I_T
 800		 * nexus associated with cmd. See also the paragraph "Aborting
 801		 * commands" in SAM.
 802		 */
 803	}
 804
 805	WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
 806
 807	transport_lun_remove_cmd(cmd);
 808
 809	transport_cmd_check_stop_to_fabric(cmd);
 810}
 811
 812static void target_abort_work(struct work_struct *work)
 813{
 814	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 815
 816	target_handle_abort(cmd);
 817}
 818
 819static bool target_cmd_interrupted(struct se_cmd *cmd)
 820{
 821	int post_ret;
 822
 823	if (cmd->transport_state & CMD_T_ABORTED) {
 824		if (cmd->transport_complete_callback)
 825			cmd->transport_complete_callback(cmd, false, &post_ret);
 826		INIT_WORK(&cmd->work, target_abort_work);
 827		queue_work(target_completion_wq, &cmd->work);
 828		return true;
 829	} else if (cmd->transport_state & CMD_T_STOP) {
 830		if (cmd->transport_complete_callback)
 831			cmd->transport_complete_callback(cmd, false, &post_ret);
 832		complete_all(&cmd->t_transport_stop_comp);
 833		return true;
 834	}
 835
 836	return false;
 837}
 838
 839/* May be called from interrupt context so must not sleep. */
 840void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 
 841{
 842	int success;
 
 843	unsigned long flags;
 844
 845	if (target_cmd_interrupted(cmd))
 846		return;
 847
 848	cmd->scsi_status = scsi_status;
 
 849
 850	spin_lock_irqsave(&cmd->t_state_lock, flags);
 851	switch (cmd->scsi_status) {
 852	case SAM_STAT_CHECK_CONDITION:
 853		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
 854			success = 1;
 855		else
 856			success = 0;
 857		break;
 858	default:
 859		success = 1;
 860		break;
 861	}
 862
 863	cmd->t_state = TRANSPORT_COMPLETE;
 864	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
 865	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 866
 867	INIT_WORK(&cmd->work, success ? target_complete_ok_work :
 868		  target_complete_failure_work);
 869	if (cmd->se_cmd_flags & SCF_USE_CPUID)
 870		queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
 
 871	else
 872		queue_work(target_completion_wq, &cmd->work);
 
 
 
 
 
 
 
 
 
 
 873}
 874EXPORT_SYMBOL(target_complete_cmd);
 875
 876void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
 877{
 878	if ((scsi_status == SAM_STAT_GOOD ||
 879	     cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
 880	    length < cmd->data_length) {
 881		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
 882			cmd->residual_count += cmd->data_length - length;
 883		} else {
 884			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
 885			cmd->residual_count = cmd->data_length - length;
 886		}
 887
 888		cmd->data_length = length;
 889	}
 
 
 
 
 
 
 
 
 
 890
 891	target_complete_cmd(cmd, scsi_status);
 892}
 893EXPORT_SYMBOL(target_complete_cmd_with_length);
 894
 895static void target_add_to_state_list(struct se_cmd *cmd)
 896{
 897	struct se_device *dev = cmd->se_dev;
 898	unsigned long flags;
 899
 900	spin_lock_irqsave(&dev->execute_task_lock, flags);
 901	if (!cmd->state_active) {
 902		list_add_tail(&cmd->state_list, &dev->state_list);
 
 903		cmd->state_active = true;
 904	}
 905	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 906}
 907
 908/*
 909 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
 910 */
 911static void transport_write_pending_qf(struct se_cmd *cmd);
 912static void transport_complete_qf(struct se_cmd *cmd);
 913
 914void target_qf_do_work(struct work_struct *work)
 915{
 916	struct se_device *dev = container_of(work, struct se_device,
 917					qf_work_queue);
 918	LIST_HEAD(qf_cmd_list);
 919	struct se_cmd *cmd, *cmd_tmp;
 920
 921	spin_lock_irq(&dev->qf_cmd_lock);
 922	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
 923	spin_unlock_irq(&dev->qf_cmd_lock);
 924
 925	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
 926		list_del(&cmd->se_qf_node);
 927		atomic_dec_mb(&dev->dev_qf_count);
 928
 929		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
 930			" context: %s\n", cmd->se_tfo->fabric_name, cmd,
 931			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
 932			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
 933			: "UNKNOWN");
 934
 935		if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
 936			transport_write_pending_qf(cmd);
 937		else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
 938			 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
 939			transport_complete_qf(cmd);
 940	}
 941}
 942
 943unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
 944{
 945	switch (cmd->data_direction) {
 946	case DMA_NONE:
 947		return "NONE";
 948	case DMA_FROM_DEVICE:
 949		return "READ";
 950	case DMA_TO_DEVICE:
 951		return "WRITE";
 952	case DMA_BIDIRECTIONAL:
 953		return "BIDI";
 954	default:
 955		break;
 956	}
 957
 958	return "UNKNOWN";
 959}
 960
 961void transport_dump_dev_state(
 962	struct se_device *dev,
 963	char *b,
 964	int *bl)
 965{
 966	*bl += sprintf(b + *bl, "Status: ");
 967	if (dev->export_count)
 968		*bl += sprintf(b + *bl, "ACTIVATED");
 969	else
 970		*bl += sprintf(b + *bl, "DEACTIVATED");
 971
 972	*bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
 973	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
 974		dev->dev_attrib.block_size,
 975		dev->dev_attrib.hw_max_sectors);
 976	*bl += sprintf(b + *bl, "        ");
 977}
 978
 979void transport_dump_vpd_proto_id(
 980	struct t10_vpd *vpd,
 981	unsigned char *p_buf,
 982	int p_buf_len)
 983{
 984	unsigned char buf[VPD_TMP_BUF_SIZE];
 985	int len;
 986
 987	memset(buf, 0, VPD_TMP_BUF_SIZE);
 988	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
 989
 990	switch (vpd->protocol_identifier) {
 991	case 0x00:
 992		sprintf(buf+len, "Fibre Channel\n");
 993		break;
 994	case 0x10:
 995		sprintf(buf+len, "Parallel SCSI\n");
 996		break;
 997	case 0x20:
 998		sprintf(buf+len, "SSA\n");
 999		break;
1000	case 0x30:
1001		sprintf(buf+len, "IEEE 1394\n");
1002		break;
1003	case 0x40:
1004		sprintf(buf+len, "SCSI Remote Direct Memory Access"
1005				" Protocol\n");
1006		break;
1007	case 0x50:
1008		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1009		break;
1010	case 0x60:
1011		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1012		break;
1013	case 0x70:
1014		sprintf(buf+len, "Automation/Drive Interface Transport"
1015				" Protocol\n");
1016		break;
1017	case 0x80:
1018		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1019		break;
1020	default:
1021		sprintf(buf+len, "Unknown 0x%02x\n",
1022				vpd->protocol_identifier);
1023		break;
1024	}
1025
1026	if (p_buf)
1027		strncpy(p_buf, buf, p_buf_len);
1028	else
1029		pr_debug("%s", buf);
1030}
1031
1032void
1033transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1034{
1035	/*
1036	 * Check if the Protocol Identifier Valid (PIV) bit is set..
1037	 *
1038	 * from spc3r23.pdf section 7.5.1
1039	 */
1040	 if (page_83[1] & 0x80) {
1041		vpd->protocol_identifier = (page_83[0] & 0xf0);
1042		vpd->protocol_identifier_set = 1;
1043		transport_dump_vpd_proto_id(vpd, NULL, 0);
1044	}
1045}
1046EXPORT_SYMBOL(transport_set_vpd_proto_id);
1047
1048int transport_dump_vpd_assoc(
1049	struct t10_vpd *vpd,
1050	unsigned char *p_buf,
1051	int p_buf_len)
1052{
1053	unsigned char buf[VPD_TMP_BUF_SIZE];
1054	int ret = 0;
1055	int len;
1056
1057	memset(buf, 0, VPD_TMP_BUF_SIZE);
1058	len = sprintf(buf, "T10 VPD Identifier Association: ");
1059
1060	switch (vpd->association) {
1061	case 0x00:
1062		sprintf(buf+len, "addressed logical unit\n");
1063		break;
1064	case 0x10:
1065		sprintf(buf+len, "target port\n");
1066		break;
1067	case 0x20:
1068		sprintf(buf+len, "SCSI target device\n");
1069		break;
1070	default:
1071		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1072		ret = -EINVAL;
1073		break;
1074	}
1075
1076	if (p_buf)
1077		strncpy(p_buf, buf, p_buf_len);
1078	else
1079		pr_debug("%s", buf);
1080
1081	return ret;
1082}
1083
1084int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1085{
1086	/*
1087	 * The VPD identification association..
1088	 *
1089	 * from spc3r23.pdf Section 7.6.3.1 Table 297
1090	 */
1091	vpd->association = (page_83[1] & 0x30);
1092	return transport_dump_vpd_assoc(vpd, NULL, 0);
1093}
1094EXPORT_SYMBOL(transport_set_vpd_assoc);
1095
1096int transport_dump_vpd_ident_type(
1097	struct t10_vpd *vpd,
1098	unsigned char *p_buf,
1099	int p_buf_len)
1100{
1101	unsigned char buf[VPD_TMP_BUF_SIZE];
1102	int ret = 0;
1103	int len;
1104
1105	memset(buf, 0, VPD_TMP_BUF_SIZE);
1106	len = sprintf(buf, "T10 VPD Identifier Type: ");
1107
1108	switch (vpd->device_identifier_type) {
1109	case 0x00:
1110		sprintf(buf+len, "Vendor specific\n");
1111		break;
1112	case 0x01:
1113		sprintf(buf+len, "T10 Vendor ID based\n");
1114		break;
1115	case 0x02:
1116		sprintf(buf+len, "EUI-64 based\n");
1117		break;
1118	case 0x03:
1119		sprintf(buf+len, "NAA\n");
1120		break;
1121	case 0x04:
1122		sprintf(buf+len, "Relative target port identifier\n");
1123		break;
1124	case 0x08:
1125		sprintf(buf+len, "SCSI name string\n");
1126		break;
1127	default:
1128		sprintf(buf+len, "Unsupported: 0x%02x\n",
1129				vpd->device_identifier_type);
1130		ret = -EINVAL;
1131		break;
1132	}
1133
1134	if (p_buf) {
1135		if (p_buf_len < strlen(buf)+1)
1136			return -EINVAL;
1137		strncpy(p_buf, buf, p_buf_len);
1138	} else {
1139		pr_debug("%s", buf);
1140	}
1141
1142	return ret;
1143}
1144
1145int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1146{
1147	/*
1148	 * The VPD identifier type..
1149	 *
1150	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1151	 */
1152	vpd->device_identifier_type = (page_83[1] & 0x0f);
1153	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1154}
1155EXPORT_SYMBOL(transport_set_vpd_ident_type);
1156
1157int transport_dump_vpd_ident(
1158	struct t10_vpd *vpd,
1159	unsigned char *p_buf,
1160	int p_buf_len)
1161{
1162	unsigned char buf[VPD_TMP_BUF_SIZE];
1163	int ret = 0;
1164
1165	memset(buf, 0, VPD_TMP_BUF_SIZE);
1166
1167	switch (vpd->device_identifier_code_set) {
1168	case 0x01: /* Binary */
1169		snprintf(buf, sizeof(buf),
1170			"T10 VPD Binary Device Identifier: %s\n",
1171			&vpd->device_identifier[0]);
1172		break;
1173	case 0x02: /* ASCII */
1174		snprintf(buf, sizeof(buf),
1175			"T10 VPD ASCII Device Identifier: %s\n",
1176			&vpd->device_identifier[0]);
1177		break;
1178	case 0x03: /* UTF-8 */
1179		snprintf(buf, sizeof(buf),
1180			"T10 VPD UTF-8 Device Identifier: %s\n",
1181			&vpd->device_identifier[0]);
1182		break;
1183	default:
1184		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1185			" 0x%02x", vpd->device_identifier_code_set);
1186		ret = -EINVAL;
1187		break;
1188	}
1189
1190	if (p_buf)
1191		strncpy(p_buf, buf, p_buf_len);
1192	else
1193		pr_debug("%s", buf);
1194
1195	return ret;
1196}
1197
1198int
1199transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1200{
1201	static const char hex_str[] = "0123456789abcdef";
1202	int j = 0, i = 4; /* offset to start of the identifier */
1203
1204	/*
1205	 * The VPD Code Set (encoding)
1206	 *
1207	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1208	 */
1209	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1210	switch (vpd->device_identifier_code_set) {
1211	case 0x01: /* Binary */
1212		vpd->device_identifier[j++] =
1213				hex_str[vpd->device_identifier_type];
1214		while (i < (4 + page_83[3])) {
1215			vpd->device_identifier[j++] =
1216				hex_str[(page_83[i] & 0xf0) >> 4];
1217			vpd->device_identifier[j++] =
1218				hex_str[page_83[i] & 0x0f];
1219			i++;
1220		}
1221		break;
1222	case 0x02: /* ASCII */
1223	case 0x03: /* UTF-8 */
1224		while (i < (4 + page_83[3]))
1225			vpd->device_identifier[j++] = page_83[i++];
1226		break;
1227	default:
1228		break;
1229	}
1230
1231	return transport_dump_vpd_ident(vpd, NULL, 0);
1232}
1233EXPORT_SYMBOL(transport_set_vpd_ident);
1234
1235static sense_reason_t
1236target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1237			       unsigned int size)
1238{
1239	u32 mtl;
1240
1241	if (!cmd->se_tfo->max_data_sg_nents)
1242		return TCM_NO_SENSE;
1243	/*
1244	 * Check if fabric enforced maximum SGL entries per I/O descriptor
1245	 * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1246	 * residual_count and reduce original cmd->data_length to maximum
1247	 * length based on single PAGE_SIZE entry scatter-lists.
1248	 */
1249	mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1250	if (cmd->data_length > mtl) {
1251		/*
1252		 * If an existing CDB overflow is present, calculate new residual
1253		 * based on CDB size minus fabric maximum transfer length.
1254		 *
1255		 * If an existing CDB underflow is present, calculate new residual
1256		 * based on original cmd->data_length minus fabric maximum transfer
1257		 * length.
1258		 *
1259		 * Otherwise, set the underflow residual based on cmd->data_length
1260		 * minus fabric maximum transfer length.
1261		 */
1262		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1263			cmd->residual_count = (size - mtl);
1264		} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1265			u32 orig_dl = size + cmd->residual_count;
1266			cmd->residual_count = (orig_dl - mtl);
1267		} else {
1268			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1269			cmd->residual_count = (cmd->data_length - mtl);
1270		}
1271		cmd->data_length = mtl;
1272		/*
1273		 * Reset sbc_check_prot() calculated protection payload
1274		 * length based upon the new smaller MTL.
1275		 */
1276		if (cmd->prot_length) {
1277			u32 sectors = (mtl / dev->dev_attrib.block_size);
1278			cmd->prot_length = dev->prot_length * sectors;
1279		}
1280	}
1281	return TCM_NO_SENSE;
1282}
1283
1284/**
1285 * target_cmd_size_check - Check whether there will be a residual.
1286 * @cmd: SCSI command.
1287 * @size: Data buffer size derived from CDB. The data buffer size provided by
1288 *   the SCSI transport driver is available in @cmd->data_length.
1289 *
1290 * Compare the data buffer size from the CDB with the data buffer limit from the transport
1291 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1292 *
1293 * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
1294 *
1295 * Return: TCM_NO_SENSE
1296 */
1297sense_reason_t
1298target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1299{
1300	struct se_device *dev = cmd->se_dev;
1301
1302	if (cmd->unknown_data_length) {
1303		cmd->data_length = size;
1304	} else if (size != cmd->data_length) {
1305		pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1306			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
1307			" 0x%02x\n", cmd->se_tfo->fabric_name,
1308				cmd->data_length, size, cmd->t_task_cdb[0]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1309
1310		if (cmd->data_direction == DMA_TO_DEVICE) {
1311			if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1312				pr_err_ratelimited("Rejecting underflow/overflow"
1313						   " for WRITE data CDB\n");
1314				return TCM_INVALID_CDB_FIELD;
1315			}
1316			/*
1317			 * Some fabric drivers like iscsi-target still expect to
1318			 * always reject overflow writes.  Reject this case until
1319			 * full fabric driver level support for overflow writes
1320			 * is introduced tree-wide.
1321			 */
1322			if (size > cmd->data_length) {
1323				pr_err_ratelimited("Rejecting overflow for"
1324						   " WRITE control CDB\n");
1325				return TCM_INVALID_CDB_FIELD;
1326			}
1327		}
1328		/*
1329		 * Reject READ_* or WRITE_* with overflow/underflow for
1330		 * type SCF_SCSI_DATA_CDB.
1331		 */
1332		if (dev->dev_attrib.block_size != 512)  {
1333			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1334				" CDB on non 512-byte sector setup subsystem"
1335				" plugin: %s\n", dev->transport->name);
1336			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1337			return TCM_INVALID_CDB_FIELD;
1338		}
1339		/*
1340		 * For the overflow case keep the existing fabric provided
1341		 * ->data_length.  Otherwise for the underflow case, reset
1342		 * ->data_length to the smaller SCSI expected data transfer
1343		 * length.
1344		 */
1345		if (size > cmd->data_length) {
1346			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1347			cmd->residual_count = (size - cmd->data_length);
1348		} else {
1349			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1350			cmd->residual_count = (cmd->data_length - size);
1351			cmd->data_length = size;
1352		}
1353	}
1354
1355	return target_check_max_data_sg_nents(cmd, dev, size);
1356
1357}
1358
1359/*
1360 * Used by fabric modules containing a local struct se_cmd within their
1361 * fabric dependent per I/O descriptor.
1362 *
1363 * Preserves the value of @cmd->tag.
1364 */
1365void transport_init_se_cmd(
1366	struct se_cmd *cmd,
1367	const struct target_core_fabric_ops *tfo,
1368	struct se_session *se_sess,
1369	u32 data_length,
1370	int data_direction,
1371	int task_attr,
1372	unsigned char *sense_buffer, u64 unpacked_lun)
1373{
1374	INIT_LIST_HEAD(&cmd->se_delayed_node);
1375	INIT_LIST_HEAD(&cmd->se_qf_node);
1376	INIT_LIST_HEAD(&cmd->se_cmd_list);
1377	INIT_LIST_HEAD(&cmd->state_list);
1378	init_completion(&cmd->t_transport_stop_comp);
1379	cmd->free_compl = NULL;
1380	cmd->abrt_compl = NULL;
1381	spin_lock_init(&cmd->t_state_lock);
1382	INIT_WORK(&cmd->work, NULL);
1383	kref_init(&cmd->cmd_kref);
1384
 
1385	cmd->se_tfo = tfo;
1386	cmd->se_sess = se_sess;
1387	cmd->data_length = data_length;
1388	cmd->data_direction = data_direction;
1389	cmd->sam_task_attr = task_attr;
1390	cmd->sense_buffer = sense_buffer;
1391	cmd->orig_fe_lun = unpacked_lun;
1392
 
 
 
1393	cmd->state_active = false;
1394}
1395EXPORT_SYMBOL(transport_init_se_cmd);
1396
1397static sense_reason_t
1398transport_check_alloc_task_attr(struct se_cmd *cmd)
1399{
1400	struct se_device *dev = cmd->se_dev;
1401
1402	/*
1403	 * Check if SAM Task Attribute emulation is enabled for this
1404	 * struct se_device storage object
1405	 */
1406	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1407		return 0;
1408
1409	if (cmd->sam_task_attr == TCM_ACA_TAG) {
1410		pr_debug("SAM Task Attribute ACA"
1411			" emulation is not supported\n");
1412		return TCM_INVALID_CDB_FIELD;
1413	}
1414
1415	return 0;
1416}
1417
1418sense_reason_t
1419target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
1420{
1421	sense_reason_t ret;
1422
1423	cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1424	/*
1425	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1426	 * for VARIABLE_LENGTH_CMD
1427	 */
1428	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1429		pr_err("Received SCSI CDB with command_size: %d that"
1430			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1431			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1432		ret = TCM_INVALID_CDB_FIELD;
1433		goto err;
1434	}
1435	/*
1436	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1437	 * allocate the additional extended CDB buffer now..  Otherwise
1438	 * setup the pointer from __t_task_cdb to t_task_cdb.
1439	 */
1440	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1441		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1442						GFP_KERNEL);
1443		if (!cmd->t_task_cdb) {
1444			pr_err("Unable to allocate cmd->t_task_cdb"
1445				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1446				scsi_command_size(cdb),
1447				(unsigned long)sizeof(cmd->__t_task_cdb));
1448			ret = TCM_OUT_OF_RESOURCES;
1449			goto err;
1450		}
1451	}
1452	/*
1453	 * Copy the original CDB into cmd->
1454	 */
1455	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1456
1457	trace_target_sequencer_start(cmd);
1458	return 0;
1459
1460err:
1461	/*
1462	 * Copy the CDB here to allow trace_target_cmd_complete() to
1463	 * print the cdb to the trace buffers.
1464	 */
1465	memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1466					 (unsigned int)TCM_MAX_COMMAND_SIZE));
1467	return ret;
1468}
1469EXPORT_SYMBOL(target_cmd_init_cdb);
1470
1471sense_reason_t
1472target_cmd_parse_cdb(struct se_cmd *cmd)
1473{
1474	struct se_device *dev = cmd->se_dev;
1475	sense_reason_t ret;
1476
1477	ret = dev->transport->parse_cdb(cmd);
1478	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1479		pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1480				    cmd->se_tfo->fabric_name,
1481				    cmd->se_sess->se_node_acl->initiatorname,
1482				    cmd->t_task_cdb[0]);
1483	if (ret)
1484		return ret;
1485
1486	ret = transport_check_alloc_task_attr(cmd);
1487	if (ret)
1488		return ret;
1489
1490	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1491	atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1492	return 0;
1493}
1494EXPORT_SYMBOL(target_cmd_parse_cdb);
1495
1496/*
1497 * Used by fabric module frontends to queue tasks directly.
1498 * May only be used from process context.
1499 */
1500int transport_handle_cdb_direct(
1501	struct se_cmd *cmd)
1502{
1503	sense_reason_t ret;
1504
 
 
1505	if (!cmd->se_lun) {
1506		dump_stack();
1507		pr_err("cmd->se_lun is NULL\n");
1508		return -EINVAL;
1509	}
1510	if (in_interrupt()) {
1511		dump_stack();
1512		pr_err("transport_generic_handle_cdb cannot be called"
1513				" from interrupt context\n");
1514		return -EINVAL;
1515	}
1516	/*
1517	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1518	 * outstanding descriptors are handled correctly during shutdown via
1519	 * transport_wait_for_tasks()
1520	 *
1521	 * Also, we don't take cmd->t_state_lock here as we only expect
1522	 * this to be called for initial descriptor submission.
1523	 */
1524	cmd->t_state = TRANSPORT_NEW_CMD;
1525	cmd->transport_state |= CMD_T_ACTIVE;
1526
1527	/*
1528	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1529	 * so follow TRANSPORT_NEW_CMD processing thread context usage
1530	 * and call transport_generic_request_failure() if necessary..
1531	 */
1532	ret = transport_generic_new_cmd(cmd);
1533	if (ret)
1534		transport_generic_request_failure(cmd, ret);
1535	return 0;
1536}
1537EXPORT_SYMBOL(transport_handle_cdb_direct);
1538
1539sense_reason_t
1540transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1541		u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1542{
1543	if (!sgl || !sgl_count)
1544		return 0;
1545
1546	/*
1547	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1548	 * scatterlists already have been set to follow what the fabric
1549	 * passes for the original expected data transfer length.
1550	 */
1551	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1552		pr_warn("Rejecting SCSI DATA overflow for fabric using"
1553			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1554		return TCM_INVALID_CDB_FIELD;
1555	}
1556
1557	cmd->t_data_sg = sgl;
1558	cmd->t_data_nents = sgl_count;
1559	cmd->t_bidi_data_sg = sgl_bidi;
1560	cmd->t_bidi_data_nents = sgl_bidi_count;
1561
1562	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1563	return 0;
1564}
1565
1566/**
1567 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1568 * 			 se_cmd + use pre-allocated SGL memory.
1569 *
1570 * @se_cmd: command descriptor to submit
1571 * @se_sess: associated se_sess for endpoint
1572 * @cdb: pointer to SCSI CDB
1573 * @sense: pointer to SCSI sense buffer
1574 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1575 * @data_length: fabric expected data transfer length
1576 * @task_attr: SAM task attribute
1577 * @data_dir: DMA data direction
1578 * @flags: flags for command submission from target_sc_flags_tables
1579 * @sgl: struct scatterlist memory for unidirectional mapping
1580 * @sgl_count: scatterlist count for unidirectional mapping
1581 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1582 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1583 * @sgl_prot: struct scatterlist memory protection information
1584 * @sgl_prot_count: scatterlist count for protection information
1585 *
1586 * Task tags are supported if the caller has set @se_cmd->tag.
1587 *
1588 * Returns non zero to signal active I/O shutdown failure.  All other
1589 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1590 * but still return zero here.
1591 *
1592 * This may only be called from process context, and also currently
1593 * assumes internal allocation of fabric payload buffer by target-core.
1594 */
1595int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1596		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1597		u32 data_length, int task_attr, int data_dir, int flags,
1598		struct scatterlist *sgl, u32 sgl_count,
1599		struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1600		struct scatterlist *sgl_prot, u32 sgl_prot_count)
1601{
1602	struct se_portal_group *se_tpg;
1603	sense_reason_t rc;
1604	int ret;
1605
1606	se_tpg = se_sess->se_tpg;
1607	BUG_ON(!se_tpg);
1608	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1609	BUG_ON(in_interrupt());
1610	/*
1611	 * Initialize se_cmd for target operation.  From this point
1612	 * exceptions are handled by sending exception status via
1613	 * target_core_fabric_ops->queue_status() callback
1614	 */
1615	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1616				data_length, data_dir, task_attr, sense,
1617				unpacked_lun);
1618
1619	if (flags & TARGET_SCF_USE_CPUID)
1620		se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1621	else
1622		se_cmd->cpuid = WORK_CPU_UNBOUND;
 
 
 
1623
1624	if (flags & TARGET_SCF_UNKNOWN_SIZE)
1625		se_cmd->unknown_data_length = 1;
1626	/*
1627	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1628	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
1629	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1630	 * kref_put() to happen during fabric packet acknowledgement.
1631	 */
1632	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1633	if (ret)
1634		return ret;
1635	/*
1636	 * Signal bidirectional data payloads to target-core
 
 
1637	 */
1638	if (flags & TARGET_SCF_BIDI_OP)
1639		se_cmd->se_cmd_flags |= SCF_BIDI;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640
1641	rc = target_cmd_init_cdb(se_cmd, cdb);
1642	if (rc) {
1643		transport_send_check_condition_and_sense(se_cmd, rc, 0);
1644		target_put_sess_cmd(se_cmd);
1645		return 0;
1646	}
1647
1648	/*
1649	 * Locate se_lun pointer and attach it to struct se_cmd
1650	 */
1651	rc = transport_lookup_cmd_lun(se_cmd);
1652	if (rc) {
1653		transport_send_check_condition_and_sense(se_cmd, rc, 0);
1654		target_put_sess_cmd(se_cmd);
1655		return 0;
1656	}
1657
1658	rc = target_cmd_parse_cdb(se_cmd);
1659	if (rc != 0) {
1660		transport_generic_request_failure(se_cmd, rc);
1661		return 0;
1662	}
1663
1664	/*
1665	 * Save pointers for SGLs containing protection information,
1666	 * if present.
1667	 */
1668	if (sgl_prot_count) {
1669		se_cmd->t_prot_sg = sgl_prot;
1670		se_cmd->t_prot_nents = sgl_prot_count;
1671		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1672	}
1673
1674	/*
1675	 * When a non zero sgl_count has been passed perform SGL passthrough
1676	 * mapping for pre-allocated fabric memory instead of having target
1677	 * core perform an internal SGL allocation..
1678	 */
1679	if (sgl_count != 0) {
1680		BUG_ON(!sgl);
1681
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1682		/*
1683		 * A work-around for tcm_loop as some userspace code via
1684		 * scsi-generic do not memset their associated read buffers,
1685		 * so go ahead and do that here for type non-data CDBs.  Also
1686		 * note that this is currently guaranteed to be a single SGL
1687		 * for this case by target core in target_setup_cmd_from_cdb()
1688		 * -> transport_generic_cmd_sequencer().
1689		 */
1690		if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1691		     se_cmd->data_direction == DMA_FROM_DEVICE) {
1692			unsigned char *buf = NULL;
1693
1694			if (sgl)
1695				buf = kmap(sg_page(sgl)) + sgl->offset;
1696
1697			if (buf) {
1698				memset(buf, 0, sgl->length);
1699				kunmap(sg_page(sgl));
1700			}
1701		}
1702
1703		rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1704				sgl_bidi, sgl_bidi_count);
1705		if (rc != 0) {
1706			transport_generic_request_failure(se_cmd, rc);
1707			return 0;
1708		}
1709	}
1710
1711	/*
1712	 * Check if we need to delay processing because of ALUA
1713	 * Active/NonOptimized primary access state..
1714	 */
1715	core_alua_check_nonop_delay(se_cmd);
1716
1717	transport_handle_cdb_direct(se_cmd);
1718	return 0;
1719}
1720EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1721
1722/**
1723 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1724 *
1725 * @se_cmd: command descriptor to submit
1726 * @se_sess: associated se_sess for endpoint
1727 * @cdb: pointer to SCSI CDB
1728 * @sense: pointer to SCSI sense buffer
1729 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1730 * @data_length: fabric expected data transfer length
1731 * @task_attr: SAM task attribute
1732 * @data_dir: DMA data direction
1733 * @flags: flags for command submission from target_sc_flags_tables
1734 *
1735 * Task tags are supported if the caller has set @se_cmd->tag.
1736 *
1737 * Returns non zero to signal active I/O shutdown failure.  All other
1738 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1739 * but still return zero here.
1740 *
1741 * This may only be called from process context, and also currently
1742 * assumes internal allocation of fabric payload buffer by target-core.
1743 *
1744 * It also assumes interal target core SGL memory allocation.
 
 
 
 
 
1745 */
1746int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1747		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1748		u32 data_length, int task_attr, int data_dir, int flags)
1749{
1750	return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1751			unpacked_lun, data_length, task_attr, data_dir,
1752			flags, NULL, 0, NULL, 0, NULL, 0);
 
 
 
 
 
 
 
 
 
 
1753}
1754EXPORT_SYMBOL(target_submit_cmd);
1755
1756static void target_complete_tmr_failure(struct work_struct *work)
 
1757{
1758	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759
1760	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1761	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
 
1762
1763	transport_lun_remove_cmd(se_cmd);
1764	transport_cmd_check_stop_to_fabric(se_cmd);
1765}
1766
1767static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1768				       u64 *unpacked_lun)
1769{
1770	struct se_cmd *se_cmd;
1771	unsigned long flags;
1772	bool ret = false;
 
 
1773
1774	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1775	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1776		if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1777			continue;
1778
1779		if (se_cmd->tag == tag) {
1780			*unpacked_lun = se_cmd->orig_fe_lun;
1781			ret = true;
1782			break;
 
1783		}
 
 
1784	}
1785	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1786
1787	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1788}
1789
1790/**
1791 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1792 *                     for TMR CDBs
1793 *
1794 * @se_cmd: command descriptor to submit
1795 * @se_sess: associated se_sess for endpoint
1796 * @sense: pointer to SCSI sense buffer
1797 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1798 * @fabric_tmr_ptr: fabric context for TMR req
1799 * @tm_type: Type of TM request
1800 * @gfp: gfp type for caller
1801 * @tag: referenced task tag for TMR_ABORT_TASK
1802 * @flags: submit cmd flags
1803 *
1804 * Callable from all contexts.
1805 **/
1806
1807int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1808		unsigned char *sense, u64 unpacked_lun,
1809		void *fabric_tmr_ptr, unsigned char tm_type,
1810		gfp_t gfp, u64 tag, int flags)
1811{
1812	struct se_portal_group *se_tpg;
1813	int ret;
1814
1815	se_tpg = se_sess->se_tpg;
1816	BUG_ON(!se_tpg);
1817
1818	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1819			      0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
1820	/*
1821	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1822	 * allocation failure.
1823	 */
1824	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1825	if (ret < 0)
1826		return -ENOMEM;
1827
1828	if (tm_type == TMR_ABORT_TASK)
1829		se_cmd->se_tmr_req->ref_task_tag = tag;
1830
1831	/* See target_submit_cmd for commentary */
1832	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1833	if (ret) {
1834		core_tmr_release_req(se_cmd->se_tmr_req);
1835		return ret;
1836	}
1837	/*
1838	 * If this is ABORT_TASK with no explicit fabric provided LUN,
1839	 * go ahead and search active session tags for a match to figure
1840	 * out unpacked_lun for the original se_cmd.
1841	 */
1842	if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1843		if (!target_lookup_lun_from_tag(se_sess, tag,
1844						&se_cmd->orig_fe_lun))
1845			goto failure;
1846	}
1847
1848	ret = transport_lookup_tmr_lun(se_cmd);
1849	if (ret)
1850		goto failure;
1851
1852	transport_generic_handle_tmr(se_cmd);
1853	return 0;
1854
1855	/*
1856	 * For callback during failure handling, push this work off
1857	 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1858	 */
1859failure:
1860	INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1861	schedule_work(&se_cmd->work);
1862	return 0;
1863}
1864EXPORT_SYMBOL(target_submit_tmr);
1865
1866/*
1867 * Handle SAM-esque emulation for generic transport request failures.
1868 */
1869void transport_generic_request_failure(struct se_cmd *cmd,
1870		sense_reason_t sense_reason)
1871{
1872	int ret = 0, post_ret;
1873
1874	pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1875		 sense_reason);
1876	target_show_cmd("-----[ ", cmd);
1877
1878	/*
1879	 * For SAM Task Attribute emulation for failed struct se_cmd
1880	 */
1881	transport_complete_task_attr(cmd);
1882
1883	if (cmd->transport_complete_callback)
1884		cmd->transport_complete_callback(cmd, false, &post_ret);
1885
1886	if (cmd->transport_state & CMD_T_ABORTED) {
1887		INIT_WORK(&cmd->work, target_abort_work);
1888		queue_work(target_completion_wq, &cmd->work);
1889		return;
1890	}
1891
1892	switch (sense_reason) {
1893	case TCM_NON_EXISTENT_LUN:
1894	case TCM_UNSUPPORTED_SCSI_OPCODE:
1895	case TCM_INVALID_CDB_FIELD:
1896	case TCM_INVALID_PARAMETER_LIST:
1897	case TCM_PARAMETER_LIST_LENGTH_ERROR:
1898	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1899	case TCM_UNKNOWN_MODE_PAGE:
1900	case TCM_WRITE_PROTECTED:
1901	case TCM_ADDRESS_OUT_OF_RANGE:
1902	case TCM_CHECK_CONDITION_ABORT_CMD:
1903	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1904	case TCM_CHECK_CONDITION_NOT_READY:
1905	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1906	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1907	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1908	case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1909	case TCM_TOO_MANY_TARGET_DESCS:
1910	case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1911	case TCM_TOO_MANY_SEGMENT_DESCS:
1912	case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
 
 
 
 
 
1913		break;
1914	case TCM_OUT_OF_RESOURCES:
1915		cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1916		goto queue_status;
1917	case TCM_LUN_BUSY:
1918		cmd->scsi_status = SAM_STAT_BUSY;
1919		goto queue_status;
1920	case TCM_RESERVATION_CONFLICT:
1921		/*
1922		 * No SENSE Data payload for this case, set SCSI Status
1923		 * and queue the response to $FABRIC_MOD.
1924		 *
1925		 * Uses linux/include/scsi/scsi.h SAM status codes defs
1926		 */
1927		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1928		/*
1929		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1930		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1931		 * CONFLICT STATUS.
1932		 *
1933		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1934		 */
1935		if (cmd->se_sess &&
1936		    cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1937					== TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
1938			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1939					       cmd->orig_fe_lun, 0x2C,
1940					ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1941		}
1942
1943		goto queue_status;
1944	default:
1945		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1946			cmd->t_task_cdb[0], sense_reason);
1947		sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1948		break;
1949	}
1950
1951	ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1952	if (ret)
1953		goto queue_full;
1954
1955check_stop:
1956	transport_lun_remove_cmd(cmd);
1957	transport_cmd_check_stop_to_fabric(cmd);
1958	return;
1959
1960queue_status:
1961	trace_target_cmd_complete(cmd);
1962	ret = cmd->se_tfo->queue_status(cmd);
1963	if (!ret)
1964		goto check_stop;
1965queue_full:
1966	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1967}
1968EXPORT_SYMBOL(transport_generic_request_failure);
1969
1970void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1971{
1972	sense_reason_t ret;
1973
1974	if (!cmd->execute_cmd) {
1975		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1976		goto err;
1977	}
1978	if (do_checks) {
1979		/*
1980		 * Check for an existing UNIT ATTENTION condition after
1981		 * target_handle_task_attr() has done SAM task attr
1982		 * checking, and possibly have already defered execution
1983		 * out to target_restart_delayed_cmds() context.
1984		 */
1985		ret = target_scsi3_ua_check(cmd);
1986		if (ret)
1987			goto err;
1988
1989		ret = target_alua_state_check(cmd);
1990		if (ret)
1991			goto err;
1992
1993		ret = target_check_reservation(cmd);
1994		if (ret) {
1995			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1996			goto err;
1997		}
1998	}
1999
2000	ret = cmd->execute_cmd(cmd);
2001	if (!ret)
2002		return;
2003err:
2004	spin_lock_irq(&cmd->t_state_lock);
2005	cmd->transport_state &= ~CMD_T_SENT;
2006	spin_unlock_irq(&cmd->t_state_lock);
2007
2008	transport_generic_request_failure(cmd, ret);
2009}
2010
2011static int target_write_prot_action(struct se_cmd *cmd)
2012{
2013	u32 sectors;
2014	/*
2015	 * Perform WRITE_INSERT of PI using software emulation when backend
2016	 * device has PI enabled, if the transport has not already generated
2017	 * PI using hardware WRITE_INSERT offload.
2018	 */
2019	switch (cmd->prot_op) {
2020	case TARGET_PROT_DOUT_INSERT:
2021		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
2022			sbc_dif_generate(cmd);
2023		break;
2024	case TARGET_PROT_DOUT_STRIP:
2025		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
2026			break;
2027
2028		sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
2029		cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2030					     sectors, 0, cmd->t_prot_sg, 0);
2031		if (unlikely(cmd->pi_err)) {
2032			spin_lock_irq(&cmd->t_state_lock);
2033			cmd->transport_state &= ~CMD_T_SENT;
2034			spin_unlock_irq(&cmd->t_state_lock);
2035			transport_generic_request_failure(cmd, cmd->pi_err);
2036			return -1;
2037		}
2038		break;
2039	default:
2040		break;
2041	}
2042
2043	return 0;
2044}
2045
2046static bool target_handle_task_attr(struct se_cmd *cmd)
2047{
2048	struct se_device *dev = cmd->se_dev;
2049
2050	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2051		return false;
2052
2053	cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2054
2055	/*
2056	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2057	 * to allow the passed struct se_cmd list of tasks to the front of the list.
2058	 */
2059	switch (cmd->sam_task_attr) {
2060	case TCM_HEAD_TAG:
 
2061		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2062			 cmd->t_task_cdb[0]);
2063		return false;
2064	case TCM_ORDERED_TAG:
2065		atomic_inc_mb(&dev->dev_ordered_sync);
2066
2067		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2068			 cmd->t_task_cdb[0]);
2069
 
2070		/*
2071		 * Execute an ORDERED command if no other older commands
2072		 * exist that need to be completed first.
2073		 */
2074		if (!atomic_read(&dev->simple_cmds))
 
 
2075			return false;
2076		break;
2077	default:
 
 
 
2078		/*
2079		 * For SIMPLE and UNTAGGED Task Attribute commands
 
2080		 */
2081		atomic_inc_mb(&dev->simple_cmds);
2082		break;
2083	}
2084
2085	if (atomic_read(&dev->dev_ordered_sync) == 0)
2086		return false;
 
2087
2088	spin_lock(&dev->delayed_cmd_lock);
2089	list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2090	spin_unlock(&dev->delayed_cmd_lock);
2091
2092	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2093		cmd->t_task_cdb[0], cmd->sam_task_attr);
 
 
 
 
 
 
2094	return true;
2095}
2096
2097void target_execute_cmd(struct se_cmd *cmd)
2098{
2099	/*
2100	 * Determine if frontend context caller is requesting the stopping of
2101	 * this command for frontend exceptions.
2102	 *
2103	 * If the received CDB has already been aborted stop processing it here.
2104	 */
2105	if (target_cmd_interrupted(cmd))
2106		return;
2107
2108	spin_lock_irq(&cmd->t_state_lock);
2109	cmd->t_state = TRANSPORT_PROCESSING;
2110	cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2111	spin_unlock_irq(&cmd->t_state_lock);
2112
2113	if (target_write_prot_action(cmd))
2114		return;
2115
2116	if (target_handle_task_attr(cmd)) {
2117		spin_lock_irq(&cmd->t_state_lock);
2118		cmd->transport_state &= ~CMD_T_SENT;
2119		spin_unlock_irq(&cmd->t_state_lock);
2120		return;
2121	}
2122
2123	__target_execute_cmd(cmd, true);
2124}
2125EXPORT_SYMBOL(target_execute_cmd);
2126
2127/*
2128 * Process all commands up to the last received ORDERED task attribute which
2129 * requires another blocking boundary
2130 */
2131static void target_restart_delayed_cmds(struct se_device *dev)
2132{
2133	for (;;) {
 
 
 
 
2134		struct se_cmd *cmd;
2135
2136		spin_lock(&dev->delayed_cmd_lock);
2137		if (list_empty(&dev->delayed_cmd_list)) {
2138			spin_unlock(&dev->delayed_cmd_lock);
2139			break;
2140		}
2141
2142		cmd = list_entry(dev->delayed_cmd_list.next,
2143				 struct se_cmd, se_delayed_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2144		list_del(&cmd->se_delayed_node);
 
2145		spin_unlock(&dev->delayed_cmd_lock);
2146
 
 
 
2147		cmd->transport_state |= CMD_T_SENT;
2148
2149		__target_execute_cmd(cmd, true);
2150
2151		if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2152			break;
2153	}
 
2154}
2155
2156/*
2157 * Called from I/O completion to determine which dormant/delayed
2158 * and ordered cmds need to have their tasks added to the execution queue.
2159 */
2160static void transport_complete_task_attr(struct se_cmd *cmd)
2161{
2162	struct se_device *dev = cmd->se_dev;
2163
2164	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2165		return;
2166
2167	if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2168		goto restart;
2169
2170	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2171		atomic_dec_mb(&dev->simple_cmds);
2172		dev->dev_cur_ordered_id++;
2173	} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
 
2174		dev->dev_cur_ordered_id++;
2175		pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2176			 dev->dev_cur_ordered_id);
2177	} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2178		atomic_dec_mb(&dev->dev_ordered_sync);
 
 
2179
2180		dev->dev_cur_ordered_id++;
2181		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2182			 dev->dev_cur_ordered_id);
2183	}
2184	cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2185
2186restart:
2187	target_restart_delayed_cmds(dev);
 
2188}
2189
2190static void transport_complete_qf(struct se_cmd *cmd)
2191{
2192	int ret = 0;
2193
2194	transport_complete_task_attr(cmd);
2195	/*
2196	 * If a fabric driver ->write_pending() or ->queue_data_in() callback
2197	 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2198	 * the same callbacks should not be retried.  Return CHECK_CONDITION
2199	 * if a scsi_status is not already set.
2200	 *
2201	 * If a fabric driver ->queue_status() has returned non zero, always
2202	 * keep retrying no matter what..
2203	 */
2204	if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2205		if (cmd->scsi_status)
2206			goto queue_status;
2207
2208		translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2209		goto queue_status;
2210	}
2211
2212	/*
2213	 * Check if we need to send a sense buffer from
2214	 * the struct se_cmd in question. We do NOT want
2215	 * to take this path of the IO has been marked as
2216	 * needing to be treated like a "normal read". This
2217	 * is the case if it's a tape read, and either the
2218	 * FM, EOM, or ILI bits are set, but there is no
2219	 * sense data.
2220	 */
2221	if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2222	    cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2223		goto queue_status;
2224
2225	switch (cmd->data_direction) {
2226	case DMA_FROM_DEVICE:
2227		/* queue status if not treating this as a normal read */
2228		if (cmd->scsi_status &&
2229		    !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2230			goto queue_status;
2231
2232		trace_target_cmd_complete(cmd);
2233		ret = cmd->se_tfo->queue_data_in(cmd);
2234		break;
2235	case DMA_TO_DEVICE:
2236		if (cmd->se_cmd_flags & SCF_BIDI) {
2237			ret = cmd->se_tfo->queue_data_in(cmd);
2238			break;
2239		}
2240		fallthrough;
2241	case DMA_NONE:
2242queue_status:
2243		trace_target_cmd_complete(cmd);
2244		ret = cmd->se_tfo->queue_status(cmd);
2245		break;
2246	default:
2247		break;
2248	}
2249
2250	if (ret < 0) {
2251		transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2252		return;
2253	}
2254	transport_lun_remove_cmd(cmd);
2255	transport_cmd_check_stop_to_fabric(cmd);
2256}
2257
2258static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2259					int err, bool write_pending)
2260{
2261	/*
2262	 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2263	 * ->queue_data_in() callbacks from new process context.
2264	 *
2265	 * Otherwise for other errors, transport_complete_qf() will send
2266	 * CHECK_CONDITION via ->queue_status() instead of attempting to
2267	 * retry associated fabric driver data-transfer callbacks.
2268	 */
2269	if (err == -EAGAIN || err == -ENOMEM) {
2270		cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2271						 TRANSPORT_COMPLETE_QF_OK;
2272	} else {
2273		pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2274		cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2275	}
2276
2277	spin_lock_irq(&dev->qf_cmd_lock);
2278	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2279	atomic_inc_mb(&dev->dev_qf_count);
2280	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2281
2282	schedule_work(&cmd->se_dev->qf_work_queue);
2283}
2284
2285static bool target_read_prot_action(struct se_cmd *cmd)
2286{
2287	switch (cmd->prot_op) {
2288	case TARGET_PROT_DIN_STRIP:
2289		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2290			u32 sectors = cmd->data_length >>
2291				  ilog2(cmd->se_dev->dev_attrib.block_size);
2292
2293			cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2294						     sectors, 0, cmd->t_prot_sg,
2295						     0);
2296			if (cmd->pi_err)
2297				return true;
2298		}
2299		break;
2300	case TARGET_PROT_DIN_INSERT:
2301		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2302			break;
2303
2304		sbc_dif_generate(cmd);
2305		break;
2306	default:
2307		break;
2308	}
2309
2310	return false;
2311}
2312
2313static void target_complete_ok_work(struct work_struct *work)
2314{
2315	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2316	int ret;
2317
2318	/*
2319	 * Check if we need to move delayed/dormant tasks from cmds on the
2320	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2321	 * Attribute.
2322	 */
2323	transport_complete_task_attr(cmd);
2324
2325	/*
2326	 * Check to schedule QUEUE_FULL work, or execute an existing
2327	 * cmd->transport_qf_callback()
2328	 */
2329	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2330		schedule_work(&cmd->se_dev->qf_work_queue);
2331
2332	/*
2333	 * Check if we need to send a sense buffer from
2334	 * the struct se_cmd in question. We do NOT want
2335	 * to take this path of the IO has been marked as
2336	 * needing to be treated like a "normal read". This
2337	 * is the case if it's a tape read, and either the
2338	 * FM, EOM, or ILI bits are set, but there is no
2339	 * sense data.
2340	 */
2341	if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2342	    cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2343		WARN_ON(!cmd->scsi_status);
2344		ret = transport_send_check_condition_and_sense(
2345					cmd, 0, 1);
2346		if (ret)
2347			goto queue_full;
2348
2349		transport_lun_remove_cmd(cmd);
2350		transport_cmd_check_stop_to_fabric(cmd);
2351		return;
2352	}
2353	/*
2354	 * Check for a callback, used by amongst other things
2355	 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2356	 */
2357	if (cmd->transport_complete_callback) {
2358		sense_reason_t rc;
2359		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2360		bool zero_dl = !(cmd->data_length);
2361		int post_ret = 0;
2362
2363		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2364		if (!rc && !post_ret) {
2365			if (caw && zero_dl)
2366				goto queue_rsp;
2367
2368			return;
2369		} else if (rc) {
2370			ret = transport_send_check_condition_and_sense(cmd,
2371						rc, 0);
2372			if (ret)
2373				goto queue_full;
2374
2375			transport_lun_remove_cmd(cmd);
2376			transport_cmd_check_stop_to_fabric(cmd);
2377			return;
2378		}
2379	}
2380
2381queue_rsp:
2382	switch (cmd->data_direction) {
2383	case DMA_FROM_DEVICE:
2384		/*
2385		 * if this is a READ-type IO, but SCSI status
2386		 * is set, then skip returning data and just
2387		 * return the status -- unless this IO is marked
2388		 * as needing to be treated as a normal read,
2389		 * in which case we want to go ahead and return
2390		 * the data. This happens, for example, for tape
2391		 * reads with the FM, EOM, or ILI bits set, with
2392		 * no sense data.
2393		 */
2394		if (cmd->scsi_status &&
2395		    !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2396			goto queue_status;
2397
2398		atomic_long_add(cmd->data_length,
2399				&cmd->se_lun->lun_stats.tx_data_octets);
2400		/*
2401		 * Perform READ_STRIP of PI using software emulation when
2402		 * backend had PI enabled, if the transport will not be
2403		 * performing hardware READ_STRIP offload.
2404		 */
2405		if (target_read_prot_action(cmd)) {
2406			ret = transport_send_check_condition_and_sense(cmd,
2407						cmd->pi_err, 0);
2408			if (ret)
2409				goto queue_full;
2410
2411			transport_lun_remove_cmd(cmd);
2412			transport_cmd_check_stop_to_fabric(cmd);
2413			return;
2414		}
2415
2416		trace_target_cmd_complete(cmd);
2417		ret = cmd->se_tfo->queue_data_in(cmd);
2418		if (ret)
2419			goto queue_full;
2420		break;
2421	case DMA_TO_DEVICE:
2422		atomic_long_add(cmd->data_length,
2423				&cmd->se_lun->lun_stats.rx_data_octets);
2424		/*
2425		 * Check if we need to send READ payload for BIDI-COMMAND
2426		 */
2427		if (cmd->se_cmd_flags & SCF_BIDI) {
2428			atomic_long_add(cmd->data_length,
2429					&cmd->se_lun->lun_stats.tx_data_octets);
2430			ret = cmd->se_tfo->queue_data_in(cmd);
2431			if (ret)
2432				goto queue_full;
2433			break;
2434		}
2435		fallthrough;
2436	case DMA_NONE:
2437queue_status:
2438		trace_target_cmd_complete(cmd);
2439		ret = cmd->se_tfo->queue_status(cmd);
2440		if (ret)
2441			goto queue_full;
2442		break;
2443	default:
2444		break;
2445	}
2446
2447	transport_lun_remove_cmd(cmd);
2448	transport_cmd_check_stop_to_fabric(cmd);
2449	return;
2450
2451queue_full:
2452	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2453		" data_direction: %d\n", cmd, cmd->data_direction);
2454
2455	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2456}
2457
2458void target_free_sgl(struct scatterlist *sgl, int nents)
2459{
2460	sgl_free_n_order(sgl, nents, 0);
2461}
2462EXPORT_SYMBOL(target_free_sgl);
2463
2464static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2465{
2466	/*
2467	 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2468	 * emulation, and free + reset pointers if necessary..
2469	 */
2470	if (!cmd->t_data_sg_orig)
2471		return;
2472
2473	kfree(cmd->t_data_sg);
2474	cmd->t_data_sg = cmd->t_data_sg_orig;
2475	cmd->t_data_sg_orig = NULL;
2476	cmd->t_data_nents = cmd->t_data_nents_orig;
2477	cmd->t_data_nents_orig = 0;
2478}
2479
2480static inline void transport_free_pages(struct se_cmd *cmd)
2481{
2482	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2483		target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2484		cmd->t_prot_sg = NULL;
2485		cmd->t_prot_nents = 0;
2486	}
2487
2488	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2489		/*
2490		 * Release special case READ buffer payload required for
2491		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2492		 */
2493		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2494			target_free_sgl(cmd->t_bidi_data_sg,
2495					   cmd->t_bidi_data_nents);
2496			cmd->t_bidi_data_sg = NULL;
2497			cmd->t_bidi_data_nents = 0;
2498		}
2499		transport_reset_sgl_orig(cmd);
2500		return;
2501	}
2502	transport_reset_sgl_orig(cmd);
2503
2504	target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2505	cmd->t_data_sg = NULL;
2506	cmd->t_data_nents = 0;
2507
2508	target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2509	cmd->t_bidi_data_sg = NULL;
2510	cmd->t_bidi_data_nents = 0;
2511}
2512
2513void *transport_kmap_data_sg(struct se_cmd *cmd)
2514{
2515	struct scatterlist *sg = cmd->t_data_sg;
2516	struct page **pages;
2517	int i;
2518
2519	/*
2520	 * We need to take into account a possible offset here for fabrics like
2521	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2522	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2523	 */
2524	if (!cmd->t_data_nents)
2525		return NULL;
2526
2527	BUG_ON(!sg);
2528	if (cmd->t_data_nents == 1)
2529		return kmap(sg_page(sg)) + sg->offset;
2530
2531	/* >1 page. use vmap */
2532	pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2533	if (!pages)
2534		return NULL;
2535
2536	/* convert sg[] to pages[] */
2537	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2538		pages[i] = sg_page(sg);
2539	}
2540
2541	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2542	kfree(pages);
2543	if (!cmd->t_data_vmap)
2544		return NULL;
2545
2546	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2547}
2548EXPORT_SYMBOL(transport_kmap_data_sg);
2549
2550void transport_kunmap_data_sg(struct se_cmd *cmd)
2551{
2552	if (!cmd->t_data_nents) {
2553		return;
2554	} else if (cmd->t_data_nents == 1) {
2555		kunmap(sg_page(cmd->t_data_sg));
2556		return;
2557	}
2558
2559	vunmap(cmd->t_data_vmap);
2560	cmd->t_data_vmap = NULL;
2561}
2562EXPORT_SYMBOL(transport_kunmap_data_sg);
2563
2564int
2565target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2566		 bool zero_page, bool chainable)
2567{
2568	gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2569
2570	*sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2571	return *sgl ? 0 : -ENOMEM;
2572}
2573EXPORT_SYMBOL(target_alloc_sgl);
2574
2575/*
2576 * Allocate any required resources to execute the command.  For writes we
2577 * might not have the payload yet, so notify the fabric via a call to
2578 * ->write_pending instead. Otherwise place it on the execution queue.
2579 */
2580sense_reason_t
2581transport_generic_new_cmd(struct se_cmd *cmd)
2582{
2583	unsigned long flags;
2584	int ret = 0;
2585	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2586
2587	if (cmd->prot_op != TARGET_PROT_NORMAL &&
2588	    !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2589		ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2590				       cmd->prot_length, true, false);
2591		if (ret < 0)
2592			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2593	}
2594
2595	/*
2596	 * Determine if the TCM fabric module has already allocated physical
2597	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2598	 * beforehand.
2599	 */
2600	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2601	    cmd->data_length) {
2602
2603		if ((cmd->se_cmd_flags & SCF_BIDI) ||
2604		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2605			u32 bidi_length;
2606
2607			if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2608				bidi_length = cmd->t_task_nolb *
2609					      cmd->se_dev->dev_attrib.block_size;
2610			else
2611				bidi_length = cmd->data_length;
2612
2613			ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2614					       &cmd->t_bidi_data_nents,
2615					       bidi_length, zero_flag, false);
2616			if (ret < 0)
2617				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2618		}
2619
2620		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2621				       cmd->data_length, zero_flag, false);
2622		if (ret < 0)
2623			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2624	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2625		    cmd->data_length) {
2626		/*
2627		 * Special case for COMPARE_AND_WRITE with fabrics
2628		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2629		 */
2630		u32 caw_length = cmd->t_task_nolb *
2631				 cmd->se_dev->dev_attrib.block_size;
2632
2633		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2634				       &cmd->t_bidi_data_nents,
2635				       caw_length, zero_flag, false);
2636		if (ret < 0)
2637			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2638	}
2639	/*
2640	 * If this command is not a write we can execute it right here,
2641	 * for write buffers we need to notify the fabric driver first
2642	 * and let it call back once the write buffers are ready.
2643	 */
2644	target_add_to_state_list(cmd);
2645	if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2646		target_execute_cmd(cmd);
2647		return 0;
2648	}
2649
2650	spin_lock_irqsave(&cmd->t_state_lock, flags);
2651	cmd->t_state = TRANSPORT_WRITE_PENDING;
2652	/*
2653	 * Determine if frontend context caller is requesting the stopping of
2654	 * this command for frontend exceptions.
2655	 */
2656	if (cmd->transport_state & CMD_T_STOP &&
2657	    !cmd->se_tfo->write_pending_must_be_called) {
2658		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2659			 __func__, __LINE__, cmd->tag);
2660
2661		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2662
2663		complete_all(&cmd->t_transport_stop_comp);
2664		return 0;
2665	}
2666	cmd->transport_state &= ~CMD_T_ACTIVE;
2667	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2668
2669	ret = cmd->se_tfo->write_pending(cmd);
2670	if (ret)
2671		goto queue_full;
2672
2673	return 0;
2674
2675queue_full:
2676	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2677	transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2678	return 0;
2679}
2680EXPORT_SYMBOL(transport_generic_new_cmd);
2681
2682static void transport_write_pending_qf(struct se_cmd *cmd)
2683{
2684	unsigned long flags;
2685	int ret;
2686	bool stop;
2687
2688	spin_lock_irqsave(&cmd->t_state_lock, flags);
2689	stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2690	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2691
2692	if (stop) {
2693		pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2694			__func__, __LINE__, cmd->tag);
2695		complete_all(&cmd->t_transport_stop_comp);
2696		return;
2697	}
2698
2699	ret = cmd->se_tfo->write_pending(cmd);
2700	if (ret) {
2701		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2702			 cmd);
2703		transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2704	}
2705}
2706
2707static bool
2708__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2709			   unsigned long *flags);
2710
2711static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2712{
2713	unsigned long flags;
2714
2715	spin_lock_irqsave(&cmd->t_state_lock, flags);
2716	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2717	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2718}
2719
2720/*
2721 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2722 * finished.
2723 */
2724void target_put_cmd_and_wait(struct se_cmd *cmd)
2725{
2726	DECLARE_COMPLETION_ONSTACK(compl);
2727
2728	WARN_ON_ONCE(cmd->abrt_compl);
2729	cmd->abrt_compl = &compl;
2730	target_put_sess_cmd(cmd);
2731	wait_for_completion(&compl);
2732}
2733
2734/*
2735 * This function is called by frontend drivers after processing of a command
2736 * has finished.
2737 *
2738 * The protocol for ensuring that either the regular frontend command
2739 * processing flow or target_handle_abort() code drops one reference is as
2740 * follows:
2741 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2742 *   the frontend driver to call this function synchronously or asynchronously.
2743 *   That will cause one reference to be dropped.
2744 * - During regular command processing the target core sets CMD_T_COMPLETE
2745 *   before invoking one of the .queue_*() functions.
2746 * - The code that aborts commands skips commands and TMFs for which
2747 *   CMD_T_COMPLETE has been set.
2748 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2749 *   commands that will be aborted.
2750 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2751 *   transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2752 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2753 *   be called and will drop a reference.
2754 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2755 *   will be called. target_handle_abort() will drop the final reference.
2756 */
2757int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2758{
2759	DECLARE_COMPLETION_ONSTACK(compl);
2760	int ret = 0;
2761	bool aborted = false, tas = false;
2762
2763	if (wait_for_tasks)
2764		target_wait_free_cmd(cmd, &aborted, &tas);
2765
2766	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2767		/*
2768		 * Handle WRITE failure case where transport_generic_new_cmd()
2769		 * has already added se_cmd to state_list, but fabric has
2770		 * failed command before I/O submission.
2771		 */
2772		if (cmd->state_active)
2773			target_remove_from_state_list(cmd);
2774
2775		if (cmd->se_lun)
2776			transport_lun_remove_cmd(cmd);
2777	}
2778	if (aborted)
2779		cmd->free_compl = &compl;
2780	ret = target_put_sess_cmd(cmd);
2781	if (aborted) {
2782		pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2783		wait_for_completion(&compl);
2784		ret = 1;
2785	}
2786	return ret;
2787}
2788EXPORT_SYMBOL(transport_generic_free_cmd);
2789
2790/**
2791 * target_get_sess_cmd - Add command to active ->sess_cmd_list
2792 * @se_cmd:	command descriptor to add
2793 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
2794 */
2795int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2796{
2797	struct se_session *se_sess = se_cmd->se_sess;
2798	unsigned long flags;
2799	int ret = 0;
2800
2801	/*
2802	 * Add a second kref if the fabric caller is expecting to handle
2803	 * fabric acknowledgement that requires two target_put_sess_cmd()
2804	 * invocations before se_cmd descriptor release.
2805	 */
2806	if (ack_kref) {
2807		if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2808			return -EINVAL;
2809
2810		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2811	}
2812
2813	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2814	if (se_sess->sess_tearing_down) {
2815		ret = -ESHUTDOWN;
2816		goto out;
2817	}
2818	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2819	percpu_ref_get(&se_sess->cmd_count);
2820out:
2821	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2822
2823	if (ret && ack_kref)
2824		target_put_sess_cmd(se_cmd);
2825
2826	return ret;
2827}
2828EXPORT_SYMBOL(target_get_sess_cmd);
2829
2830static void target_free_cmd_mem(struct se_cmd *cmd)
2831{
2832	transport_free_pages(cmd);
2833
2834	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2835		core_tmr_release_req(cmd->se_tmr_req);
2836	if (cmd->t_task_cdb != cmd->__t_task_cdb)
2837		kfree(cmd->t_task_cdb);
2838}
2839
2840static void target_release_cmd_kref(struct kref *kref)
2841{
2842	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2843	struct se_session *se_sess = se_cmd->se_sess;
2844	struct completion *free_compl = se_cmd->free_compl;
2845	struct completion *abrt_compl = se_cmd->abrt_compl;
2846	unsigned long flags;
2847
2848	if (se_sess) {
2849		spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2850		list_del_init(&se_cmd->se_cmd_list);
2851		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2852	}
2853
2854	target_free_cmd_mem(se_cmd);
2855	se_cmd->se_tfo->release_cmd(se_cmd);
2856	if (free_compl)
2857		complete(free_compl);
2858	if (abrt_compl)
2859		complete(abrt_compl);
2860
2861	percpu_ref_put(&se_sess->cmd_count);
2862}
2863
2864/**
2865 * target_put_sess_cmd - decrease the command reference count
2866 * @se_cmd:	command to drop a reference from
2867 *
2868 * Returns 1 if and only if this target_put_sess_cmd() call caused the
2869 * refcount to drop to zero. Returns zero otherwise.
2870 */
2871int target_put_sess_cmd(struct se_cmd *se_cmd)
2872{
2873	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2874}
2875EXPORT_SYMBOL(target_put_sess_cmd);
2876
2877static const char *data_dir_name(enum dma_data_direction d)
2878{
2879	switch (d) {
2880	case DMA_BIDIRECTIONAL:	return "BIDI";
2881	case DMA_TO_DEVICE:	return "WRITE";
2882	case DMA_FROM_DEVICE:	return "READ";
2883	case DMA_NONE:		return "NONE";
2884	}
2885
2886	return "(?)";
2887}
2888
2889static const char *cmd_state_name(enum transport_state_table t)
2890{
2891	switch (t) {
2892	case TRANSPORT_NO_STATE:	return "NO_STATE";
2893	case TRANSPORT_NEW_CMD:		return "NEW_CMD";
2894	case TRANSPORT_WRITE_PENDING:	return "WRITE_PENDING";
2895	case TRANSPORT_PROCESSING:	return "PROCESSING";
2896	case TRANSPORT_COMPLETE:	return "COMPLETE";
2897	case TRANSPORT_ISTATE_PROCESSING:
2898					return "ISTATE_PROCESSING";
2899	case TRANSPORT_COMPLETE_QF_WP:	return "COMPLETE_QF_WP";
2900	case TRANSPORT_COMPLETE_QF_OK:	return "COMPLETE_QF_OK";
2901	case TRANSPORT_COMPLETE_QF_ERR:	return "COMPLETE_QF_ERR";
2902	}
2903
2904	return "(?)";
2905}
2906
2907static void target_append_str(char **str, const char *txt)
2908{
2909	char *prev = *str;
2910
2911	*str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2912		kstrdup(txt, GFP_ATOMIC);
2913	kfree(prev);
2914}
2915
2916/*
2917 * Convert a transport state bitmask into a string. The caller is
2918 * responsible for freeing the returned pointer.
2919 */
2920static char *target_ts_to_str(u32 ts)
2921{
2922	char *str = NULL;
2923
2924	if (ts & CMD_T_ABORTED)
2925		target_append_str(&str, "aborted");
2926	if (ts & CMD_T_ACTIVE)
2927		target_append_str(&str, "active");
2928	if (ts & CMD_T_COMPLETE)
2929		target_append_str(&str, "complete");
2930	if (ts & CMD_T_SENT)
2931		target_append_str(&str, "sent");
2932	if (ts & CMD_T_STOP)
2933		target_append_str(&str, "stop");
2934	if (ts & CMD_T_FABRIC_STOP)
2935		target_append_str(&str, "fabric_stop");
2936
2937	return str;
2938}
2939
2940static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2941{
2942	switch (tmf) {
2943	case TMR_ABORT_TASK:		return "ABORT_TASK";
2944	case TMR_ABORT_TASK_SET:	return "ABORT_TASK_SET";
2945	case TMR_CLEAR_ACA:		return "CLEAR_ACA";
2946	case TMR_CLEAR_TASK_SET:	return "CLEAR_TASK_SET";
2947	case TMR_LUN_RESET:		return "LUN_RESET";
2948	case TMR_TARGET_WARM_RESET:	return "TARGET_WARM_RESET";
2949	case TMR_TARGET_COLD_RESET:	return "TARGET_COLD_RESET";
2950	case TMR_LUN_RESET_PRO:		return "LUN_RESET_PRO";
2951	case TMR_UNKNOWN:		break;
2952	}
2953	return "(?)";
2954}
2955
2956void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2957{
2958	char *ts_str = target_ts_to_str(cmd->transport_state);
2959	const u8 *cdb = cmd->t_task_cdb;
2960	struct se_tmr_req *tmf = cmd->se_tmr_req;
2961
2962	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2963		pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2964			 pfx, cdb[0], cdb[1], cmd->tag,
2965			 data_dir_name(cmd->data_direction),
2966			 cmd->se_tfo->get_cmd_state(cmd),
2967			 cmd_state_name(cmd->t_state), cmd->data_length,
2968			 kref_read(&cmd->cmd_kref), ts_str);
2969	} else {
2970		pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2971			 pfx, target_tmf_name(tmf->function), cmd->tag,
2972			 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2973			 cmd_state_name(cmd->t_state),
2974			 kref_read(&cmd->cmd_kref), ts_str);
2975	}
2976	kfree(ts_str);
2977}
2978EXPORT_SYMBOL(target_show_cmd);
2979
 
 
 
 
 
 
 
2980/**
2981 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
2982 * @se_sess:	session to flag
2983 */
2984void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2985{
2986	unsigned long flags;
2987
2988	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2989	se_sess->sess_tearing_down = 1;
2990	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2991
2992	percpu_ref_kill(&se_sess->cmd_count);
2993}
2994EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2995
2996/**
2997 * target_wait_for_sess_cmds - Wait for outstanding commands
2998 * @se_sess:    session to wait for active I/O
2999 */
3000void target_wait_for_sess_cmds(struct se_session *se_sess)
3001{
3002	struct se_cmd *cmd;
3003	int ret;
3004
3005	WARN_ON_ONCE(!se_sess->sess_tearing_down);
3006
3007	do {
3008		ret = wait_event_timeout(se_sess->cmd_list_wq,
 
3009				percpu_ref_is_zero(&se_sess->cmd_count),
3010				180 * HZ);
3011		list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
3012			target_show_cmd("session shutdown: still waiting for ",
3013					cmd);
3014	} while (ret <= 0);
 
 
 
3015}
3016EXPORT_SYMBOL(target_wait_for_sess_cmds);
3017
3018/*
3019 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
3020 * all references to the LUN have been released. Called during LUN shutdown.
3021 */
3022void transport_clear_lun_ref(struct se_lun *lun)
3023{
3024	percpu_ref_kill(&lun->lun_ref);
3025	wait_for_completion(&lun->lun_shutdown_comp);
3026}
3027
3028static bool
3029__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
3030			   bool *aborted, bool *tas, unsigned long *flags)
3031	__releases(&cmd->t_state_lock)
3032	__acquires(&cmd->t_state_lock)
3033{
3034
3035	assert_spin_locked(&cmd->t_state_lock);
3036	WARN_ON_ONCE(!irqs_disabled());
3037
3038	if (fabric_stop)
3039		cmd->transport_state |= CMD_T_FABRIC_STOP;
3040
3041	if (cmd->transport_state & CMD_T_ABORTED)
3042		*aborted = true;
3043
3044	if (cmd->transport_state & CMD_T_TAS)
3045		*tas = true;
3046
3047	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3048	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3049		return false;
3050
3051	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3052	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3053		return false;
3054
3055	if (!(cmd->transport_state & CMD_T_ACTIVE))
3056		return false;
3057
3058	if (fabric_stop && *aborted)
3059		return false;
3060
3061	cmd->transport_state |= CMD_T_STOP;
3062
3063	target_show_cmd("wait_for_tasks: Stopping ", cmd);
3064
3065	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3066
3067	while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3068					    180 * HZ))
3069		target_show_cmd("wait for tasks: ", cmd);
3070
3071	spin_lock_irqsave(&cmd->t_state_lock, *flags);
3072	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3073
3074	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3075		 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3076
3077	return true;
3078}
3079
3080/**
3081 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3082 * @cmd: command to wait on
3083 */
3084bool transport_wait_for_tasks(struct se_cmd *cmd)
3085{
3086	unsigned long flags;
3087	bool ret, aborted = false, tas = false;
3088
3089	spin_lock_irqsave(&cmd->t_state_lock, flags);
3090	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3091	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3092
3093	return ret;
3094}
3095EXPORT_SYMBOL(transport_wait_for_tasks);
3096
3097struct sense_info {
3098	u8 key;
3099	u8 asc;
3100	u8 ascq;
3101	bool add_sector_info;
3102};
3103
3104static const struct sense_info sense_info_table[] = {
3105	[TCM_NO_SENSE] = {
3106		.key = NOT_READY
3107	},
3108	[TCM_NON_EXISTENT_LUN] = {
3109		.key = ILLEGAL_REQUEST,
3110		.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3111	},
3112	[TCM_UNSUPPORTED_SCSI_OPCODE] = {
3113		.key = ILLEGAL_REQUEST,
3114		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3115	},
3116	[TCM_SECTOR_COUNT_TOO_MANY] = {
3117		.key = ILLEGAL_REQUEST,
3118		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3119	},
3120	[TCM_UNKNOWN_MODE_PAGE] = {
3121		.key = ILLEGAL_REQUEST,
3122		.asc = 0x24, /* INVALID FIELD IN CDB */
3123	},
3124	[TCM_CHECK_CONDITION_ABORT_CMD] = {
3125		.key = ABORTED_COMMAND,
3126		.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3127		.ascq = 0x03,
3128	},
3129	[TCM_INCORRECT_AMOUNT_OF_DATA] = {
3130		.key = ABORTED_COMMAND,
3131		.asc = 0x0c, /* WRITE ERROR */
3132		.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3133	},
3134	[TCM_INVALID_CDB_FIELD] = {
3135		.key = ILLEGAL_REQUEST,
3136		.asc = 0x24, /* INVALID FIELD IN CDB */
3137	},
3138	[TCM_INVALID_PARAMETER_LIST] = {
3139		.key = ILLEGAL_REQUEST,
3140		.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3141	},
3142	[TCM_TOO_MANY_TARGET_DESCS] = {
3143		.key = ILLEGAL_REQUEST,
3144		.asc = 0x26,
3145		.ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3146	},
3147	[TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3148		.key = ILLEGAL_REQUEST,
3149		.asc = 0x26,
3150		.ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3151	},
3152	[TCM_TOO_MANY_SEGMENT_DESCS] = {
3153		.key = ILLEGAL_REQUEST,
3154		.asc = 0x26,
3155		.ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3156	},
3157	[TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3158		.key = ILLEGAL_REQUEST,
3159		.asc = 0x26,
3160		.ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3161	},
3162	[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3163		.key = ILLEGAL_REQUEST,
3164		.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3165	},
3166	[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3167		.key = ILLEGAL_REQUEST,
3168		.asc = 0x0c, /* WRITE ERROR */
3169		.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3170	},
3171	[TCM_SERVICE_CRC_ERROR] = {
3172		.key = ABORTED_COMMAND,
3173		.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3174		.ascq = 0x05, /* N/A */
3175	},
3176	[TCM_SNACK_REJECTED] = {
3177		.key = ABORTED_COMMAND,
3178		.asc = 0x11, /* READ ERROR */
3179		.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3180	},
3181	[TCM_WRITE_PROTECTED] = {
3182		.key = DATA_PROTECT,
3183		.asc = 0x27, /* WRITE PROTECTED */
3184	},
3185	[TCM_ADDRESS_OUT_OF_RANGE] = {
3186		.key = ILLEGAL_REQUEST,
3187		.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3188	},
3189	[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3190		.key = UNIT_ATTENTION,
3191	},
3192	[TCM_CHECK_CONDITION_NOT_READY] = {
3193		.key = NOT_READY,
3194	},
3195	[TCM_MISCOMPARE_VERIFY] = {
3196		.key = MISCOMPARE,
3197		.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3198		.ascq = 0x00,
 
3199	},
3200	[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3201		.key = ABORTED_COMMAND,
3202		.asc = 0x10,
3203		.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3204		.add_sector_info = true,
3205	},
3206	[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3207		.key = ABORTED_COMMAND,
3208		.asc = 0x10,
3209		.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3210		.add_sector_info = true,
3211	},
3212	[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3213		.key = ABORTED_COMMAND,
3214		.asc = 0x10,
3215		.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3216		.add_sector_info = true,
3217	},
3218	[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3219		.key = COPY_ABORTED,
3220		.asc = 0x0d,
3221		.ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3222
3223	},
3224	[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3225		/*
3226		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
3227		 * Solaris initiators.  Returning NOT READY instead means the
3228		 * operations will be retried a finite number of times and we
3229		 * can survive intermittent errors.
3230		 */
3231		.key = NOT_READY,
3232		.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3233	},
3234	[TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3235		/*
3236		 * From spc4r22 section5.7.7,5.7.8
3237		 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3238		 * or a REGISTER AND IGNORE EXISTING KEY service action or
3239		 * REGISTER AND MOVE service actionis attempted,
3240		 * but there are insufficient device server resources to complete the
3241		 * operation, then the command shall be terminated with CHECK CONDITION
3242		 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3243		 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3244		 */
3245		.key = ILLEGAL_REQUEST,
3246		.asc = 0x55,
3247		.ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3248	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3249};
3250
3251/**
3252 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3253 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3254 *   be stored.
3255 * @reason: LIO sense reason code. If this argument has the value
3256 *   TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3257 *   dequeuing a unit attention fails due to multiple commands being processed
3258 *   concurrently, set the command status to BUSY.
3259 *
3260 * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3261 */
3262static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3263{
3264	const struct sense_info *si;
3265	u8 *buffer = cmd->sense_buffer;
3266	int r = (__force int)reason;
3267	u8 key, asc, ascq;
3268	bool desc_format = target_sense_desc_format(cmd->se_dev);
3269
3270	if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3271		si = &sense_info_table[r];
3272	else
3273		si = &sense_info_table[(__force int)
3274				       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3275
3276	key = si->key;
3277	if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3278		if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3279						       &ascq)) {
3280			cmd->scsi_status = SAM_STAT_BUSY;
3281			return;
3282		}
3283	} else if (si->asc == 0) {
3284		WARN_ON_ONCE(cmd->scsi_asc == 0);
3285		asc = cmd->scsi_asc;
3286		ascq = cmd->scsi_ascq;
3287	} else {
3288		asc = si->asc;
3289		ascq = si->ascq;
 
3290	}
3291
3292	cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3293	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3294	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
3295	scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3296	if (si->add_sector_info)
3297		WARN_ON_ONCE(scsi_set_sense_information(buffer,
3298							cmd->scsi_sense_length,
3299							cmd->bad_sector) < 0);
3300}
3301
3302int
3303transport_send_check_condition_and_sense(struct se_cmd *cmd,
3304		sense_reason_t reason, int from_transport)
3305{
3306	unsigned long flags;
3307
3308	WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3309
3310	spin_lock_irqsave(&cmd->t_state_lock, flags);
3311	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3312		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3313		return 0;
3314	}
3315	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3316	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3317
3318	if (!from_transport)
3319		translate_sense_reason(cmd, reason);
3320
3321	trace_target_cmd_complete(cmd);
3322	return cmd->se_tfo->queue_status(cmd);
3323}
3324EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3325
3326/**
3327 * target_send_busy - Send SCSI BUSY status back to the initiator
3328 * @cmd: SCSI command for which to send a BUSY reply.
3329 *
3330 * Note: Only call this function if target_submit_cmd*() failed.
3331 */
3332int target_send_busy(struct se_cmd *cmd)
3333{
3334	WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3335
3336	cmd->scsi_status = SAM_STAT_BUSY;
3337	trace_target_cmd_complete(cmd);
3338	return cmd->se_tfo->queue_status(cmd);
3339}
3340EXPORT_SYMBOL(target_send_busy);
3341
3342static void target_tmr_work(struct work_struct *work)
3343{
3344	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3345	struct se_device *dev = cmd->se_dev;
3346	struct se_tmr_req *tmr = cmd->se_tmr_req;
3347	int ret;
3348
3349	if (cmd->transport_state & CMD_T_ABORTED)
3350		goto aborted;
3351
3352	switch (tmr->function) {
3353	case TMR_ABORT_TASK:
3354		core_tmr_abort_task(dev, tmr, cmd->se_sess);
3355		break;
3356	case TMR_ABORT_TASK_SET:
3357	case TMR_CLEAR_ACA:
3358	case TMR_CLEAR_TASK_SET:
3359		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3360		break;
3361	case TMR_LUN_RESET:
3362		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3363		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3364					 TMR_FUNCTION_REJECTED;
3365		if (tmr->response == TMR_FUNCTION_COMPLETE) {
3366			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3367					       cmd->orig_fe_lun, 0x29,
3368					       ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3369		}
3370		break;
3371	case TMR_TARGET_WARM_RESET:
3372		tmr->response = TMR_FUNCTION_REJECTED;
3373		break;
3374	case TMR_TARGET_COLD_RESET:
3375		tmr->response = TMR_FUNCTION_REJECTED;
3376		break;
3377	default:
3378		pr_err("Unknown TMR function: 0x%02x.\n",
3379				tmr->function);
3380		tmr->response = TMR_FUNCTION_REJECTED;
3381		break;
3382	}
3383
3384	if (cmd->transport_state & CMD_T_ABORTED)
3385		goto aborted;
3386
3387	cmd->se_tfo->queue_tm_rsp(cmd);
3388
3389	transport_lun_remove_cmd(cmd);
3390	transport_cmd_check_stop_to_fabric(cmd);
3391	return;
3392
3393aborted:
3394	target_handle_abort(cmd);
3395}
3396
3397int transport_generic_handle_tmr(
3398	struct se_cmd *cmd)
3399{
3400	unsigned long flags;
3401	bool aborted = false;
3402
3403	spin_lock_irqsave(&cmd->t_state_lock, flags);
3404	if (cmd->transport_state & CMD_T_ABORTED) {
3405		aborted = true;
3406	} else {
3407		cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3408		cmd->transport_state |= CMD_T_ACTIVE;
3409	}
3410	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3411
3412	if (aborted) {
3413		pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3414				    cmd->se_tmr_req->function,
3415				    cmd->se_tmr_req->ref_task_tag, cmd->tag);
3416		target_handle_abort(cmd);
3417		return 0;
3418	}
3419
3420	INIT_WORK(&cmd->work, target_tmr_work);
3421	schedule_work(&cmd->work);
3422	return 0;
3423}
3424EXPORT_SYMBOL(transport_generic_handle_tmr);
3425
3426bool
3427target_check_wce(struct se_device *dev)
3428{
3429	bool wce = false;
3430
3431	if (dev->transport->get_write_cache)
3432		wce = dev->transport->get_write_cache(dev);
3433	else if (dev->dev_attrib.emulate_write_cache > 0)
3434		wce = true;
3435
3436	return wce;
3437}
3438
3439bool
3440target_check_fua(struct se_device *dev)
3441{
3442	return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3443}