Linux Audio

Check our new training course

Loading...
v5.9
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/vmalloc.h>
  39#include <linux/miscdevice.h>
  40#include <asm/unaligned.h>
  41#include <scsi/scsi_common.h>
  42#include <scsi/scsi_proto.h>
  43#include <target/target_core_base.h>
  44#include <target/target_core_fabric.h>
  45#include <linux/vhost.h>
  46#include <linux/virtio_scsi.h>
  47#include <linux/llist.h>
  48#include <linux/bitmap.h>
  49
  50#include "vhost.h"
  51
  52#define VHOST_SCSI_VERSION  "v0.1"
  53#define VHOST_SCSI_NAMELEN 256
  54#define VHOST_SCSI_MAX_CDB_SIZE 32
  55#define VHOST_SCSI_DEFAULT_TAGS 256
  56#define VHOST_SCSI_PREALLOC_SGLS 2048
  57#define VHOST_SCSI_PREALLOC_UPAGES 2048
  58#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
  59
  60/* Max number of requests before requeueing the job.
  61 * Using this limit prevents one virtqueue from starving others with
  62 * request.
  63 */
  64#define VHOST_SCSI_WEIGHT 256
  65
  66struct vhost_scsi_inflight {
  67	/* Wait for the flush operation to finish */
  68	struct completion comp;
  69	/* Refcount for the inflight reqs */
  70	struct kref kref;
  71};
  72
  73struct vhost_scsi_cmd {
  74	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  75	int tvc_vq_desc;
  76	/* virtio-scsi initiator task attribute */
  77	int tvc_task_attr;
  78	/* virtio-scsi response incoming iovecs */
  79	int tvc_in_iovs;
  80	/* virtio-scsi initiator data direction */
  81	enum dma_data_direction tvc_data_direction;
  82	/* Expected data transfer length from virtio-scsi header */
  83	u32 tvc_exp_data_len;
  84	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  85	u64 tvc_tag;
  86	/* The number of scatterlists associated with this cmd */
  87	u32 tvc_sgl_count;
  88	u32 tvc_prot_sgl_count;
  89	/* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  90	u32 tvc_lun;
  91	/* Pointer to the SGL formatted memory from virtio-scsi */
  92	struct scatterlist *tvc_sgl;
  93	struct scatterlist *tvc_prot_sgl;
  94	struct page **tvc_upages;
  95	/* Pointer to response header iovec */
  96	struct iovec tvc_resp_iov;
  97	/* Pointer to vhost_scsi for our device */
  98	struct vhost_scsi *tvc_vhost;
  99	/* Pointer to vhost_virtqueue for the cmd */
 100	struct vhost_virtqueue *tvc_vq;
 101	/* Pointer to vhost nexus memory */
 102	struct vhost_scsi_nexus *tvc_nexus;
 103	/* The TCM I/O descriptor that is accessed via container_of() */
 104	struct se_cmd tvc_se_cmd;
 105	/* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 106	struct work_struct work;
 107	/* Copy of the incoming SCSI command descriptor block (CDB) */
 108	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 109	/* Sense buffer that will be mapped into outgoing status */
 110	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 111	/* Completed commands list, serviced from vhost worker thread */
 112	struct llist_node tvc_completion_list;
 113	/* Used to track inflight cmd */
 114	struct vhost_scsi_inflight *inflight;
 115};
 116
 117struct vhost_scsi_nexus {
 118	/* Pointer to TCM session for I_T Nexus */
 119	struct se_session *tvn_se_sess;
 120};
 121
 122struct vhost_scsi_tpg {
 123	/* Vhost port target portal group tag for TCM */
 124	u16 tport_tpgt;
 125	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 126	int tv_tpg_port_count;
 127	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 128	int tv_tpg_vhost_count;
 129	/* Used for enabling T10-PI with legacy devices */
 130	int tv_fabric_prot_type;
 131	/* list for vhost_scsi_list */
 132	struct list_head tv_tpg_list;
 133	/* Used to protect access for tpg_nexus */
 134	struct mutex tv_tpg_mutex;
 135	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 136	struct vhost_scsi_nexus *tpg_nexus;
 137	/* Pointer back to vhost_scsi_tport */
 138	struct vhost_scsi_tport *tport;
 139	/* Returned by vhost_scsi_make_tpg() */
 140	struct se_portal_group se_tpg;
 141	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 142	struct vhost_scsi *vhost_scsi;
 143};
 144
 145struct vhost_scsi_tport {
 146	/* SCSI protocol the tport is providing */
 147	u8 tport_proto_id;
 148	/* Binary World Wide unique Port Name for Vhost Target port */
 149	u64 tport_wwpn;
 150	/* ASCII formatted WWPN for Vhost Target port */
 151	char tport_name[VHOST_SCSI_NAMELEN];
 152	/* Returned by vhost_scsi_make_tport() */
 153	struct se_wwn tport_wwn;
 154};
 155
 156struct vhost_scsi_evt {
 157	/* event to be sent to guest */
 158	struct virtio_scsi_event event;
 159	/* event list, serviced from vhost worker thread */
 160	struct llist_node list;
 161};
 162
 163enum {
 164	VHOST_SCSI_VQ_CTL = 0,
 165	VHOST_SCSI_VQ_EVT = 1,
 166	VHOST_SCSI_VQ_IO = 2,
 167};
 168
 169/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 170enum {
 171	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 172					       (1ULL << VIRTIO_SCSI_F_T10_PI)
 173};
 174
 175#define VHOST_SCSI_MAX_TARGET	256
 176#define VHOST_SCSI_MAX_VQ	128
 177#define VHOST_SCSI_MAX_EVENT	128
 178
 179struct vhost_scsi_virtqueue {
 180	struct vhost_virtqueue vq;
 181	/*
 182	 * Reference counting for inflight reqs, used for flush operation. At
 183	 * each time, one reference tracks new commands submitted, while we
 184	 * wait for another one to reach 0.
 185	 */
 186	struct vhost_scsi_inflight inflights[2];
 187	/*
 188	 * Indicate current inflight in use, protected by vq->mutex.
 189	 * Writers must also take dev mutex and flush under it.
 190	 */
 191	int inflight_idx;
 192};
 193
 194struct vhost_scsi {
 195	/* Protected by vhost_scsi->dev.mutex */
 196	struct vhost_scsi_tpg **vs_tpg;
 197	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 198
 199	struct vhost_dev dev;
 200	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 201
 202	struct vhost_work vs_completion_work; /* cmd completion work item */
 203	struct llist_head vs_completion_list; /* cmd completion queue */
 204
 205	struct vhost_work vs_event_work; /* evt injection work item */
 206	struct llist_head vs_event_list; /* evt injection queue */
 207
 208	bool vs_events_missed; /* any missed events, protected by vq->mutex */
 209	int vs_events_nr; /* num of pending events, protected by vq->mutex */
 210};
 211
 212/*
 213 * Context for processing request and control queue operations.
 214 */
 215struct vhost_scsi_ctx {
 216	int head;
 217	unsigned int out, in;
 218	size_t req_size, rsp_size;
 219	size_t out_size, in_size;
 220	u8 *target, *lunp;
 221	void *req;
 222	struct iov_iter out_iter;
 223};
 224
 225static struct workqueue_struct *vhost_scsi_workqueue;
 226
 227/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 228static DEFINE_MUTEX(vhost_scsi_mutex);
 229static LIST_HEAD(vhost_scsi_list);
 230
 231static void vhost_scsi_done_inflight(struct kref *kref)
 232{
 233	struct vhost_scsi_inflight *inflight;
 234
 235	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 236	complete(&inflight->comp);
 237}
 238
 239static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 240				    struct vhost_scsi_inflight *old_inflight[])
 241{
 242	struct vhost_scsi_inflight *new_inflight;
 243	struct vhost_virtqueue *vq;
 244	int idx, i;
 245
 246	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 247		vq = &vs->vqs[i].vq;
 248
 249		mutex_lock(&vq->mutex);
 250
 251		/* store old infight */
 252		idx = vs->vqs[i].inflight_idx;
 253		if (old_inflight)
 254			old_inflight[i] = &vs->vqs[i].inflights[idx];
 255
 256		/* setup new infight */
 257		vs->vqs[i].inflight_idx = idx ^ 1;
 258		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 259		kref_init(&new_inflight->kref);
 260		init_completion(&new_inflight->comp);
 261
 262		mutex_unlock(&vq->mutex);
 263	}
 264}
 265
 266static struct vhost_scsi_inflight *
 267vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 268{
 269	struct vhost_scsi_inflight *inflight;
 270	struct vhost_scsi_virtqueue *svq;
 271
 272	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 273	inflight = &svq->inflights[svq->inflight_idx];
 274	kref_get(&inflight->kref);
 275
 276	return inflight;
 277}
 278
 279static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 280{
 281	kref_put(&inflight->kref, vhost_scsi_done_inflight);
 282}
 283
 284static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 285{
 286	return 1;
 287}
 288
 289static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 290{
 291	return 0;
 292}
 293
 294static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 295{
 296	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 297				struct vhost_scsi_tpg, se_tpg);
 298	struct vhost_scsi_tport *tport = tpg->tport;
 299
 300	return &tport->tport_name[0];
 301}
 302
 303static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 304{
 305	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 306				struct vhost_scsi_tpg, se_tpg);
 307	return tpg->tport_tpgt;
 308}
 309
 310static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 311{
 312	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 313				struct vhost_scsi_tpg, se_tpg);
 314
 315	return tpg->tv_fabric_prot_type;
 316}
 317
 318static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 319{
 320	return 1;
 321}
 322
 323static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 324{
 325	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 326				struct vhost_scsi_cmd, tvc_se_cmd);
 327	struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 328	int i;
 329
 330	if (tv_cmd->tvc_sgl_count) {
 331		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 332			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 333	}
 334	if (tv_cmd->tvc_prot_sgl_count) {
 335		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 336			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 337	}
 338
 339	vhost_scsi_put_inflight(tv_cmd->inflight);
 340	target_free_tag(se_sess, se_cmd);
 341}
 342
 343static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 344{
 345	return 0;
 346}
 347
 348static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 349{
 350	/* Go ahead and process the write immediately */
 351	target_execute_cmd(se_cmd);
 352	return 0;
 353}
 354
 355static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 356{
 357	return;
 358}
 359
 360static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 361{
 362	return 0;
 363}
 364
 365static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 366{
 367	struct vhost_scsi *vs = cmd->tvc_vhost;
 368
 369	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 370
 371	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 372}
 373
 374static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 375{
 376	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 377				struct vhost_scsi_cmd, tvc_se_cmd);
 378	vhost_scsi_complete_cmd(cmd);
 379	return 0;
 380}
 381
 382static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 383{
 384	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 385				struct vhost_scsi_cmd, tvc_se_cmd);
 386	vhost_scsi_complete_cmd(cmd);
 387	return 0;
 388}
 389
 390static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 391{
 392	return;
 393}
 394
 395static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 396{
 397	return;
 398}
 399
 400static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 401{
 402	vs->vs_events_nr--;
 403	kfree(evt);
 404}
 405
 406static struct vhost_scsi_evt *
 407vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 408		       u32 event, u32 reason)
 409{
 410	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 411	struct vhost_scsi_evt *evt;
 412
 413	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 414		vs->vs_events_missed = true;
 415		return NULL;
 416	}
 417
 418	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 419	if (!evt) {
 420		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 421		vs->vs_events_missed = true;
 422		return NULL;
 423	}
 424
 425	evt->event.event = cpu_to_vhost32(vq, event);
 426	evt->event.reason = cpu_to_vhost32(vq, reason);
 427	vs->vs_events_nr++;
 428
 429	return evt;
 430}
 431
 432static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 433{
 434	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 435
 436	/* TODO locking against target/backend threads? */
 437	transport_generic_free_cmd(se_cmd, 0);
 438
 439}
 440
 441static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 442{
 443	return target_put_sess_cmd(se_cmd);
 444}
 445
 446static void
 447vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 448{
 449	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 450	struct virtio_scsi_event *event = &evt->event;
 451	struct virtio_scsi_event __user *eventp;
 452	unsigned out, in;
 453	int head, ret;
 454
 455	if (!vhost_vq_get_backend(vq)) {
 456		vs->vs_events_missed = true;
 457		return;
 458	}
 459
 460again:
 461	vhost_disable_notify(&vs->dev, vq);
 462	head = vhost_get_vq_desc(vq, vq->iov,
 463			ARRAY_SIZE(vq->iov), &out, &in,
 464			NULL, NULL);
 465	if (head < 0) {
 466		vs->vs_events_missed = true;
 467		return;
 468	}
 469	if (head == vq->num) {
 470		if (vhost_enable_notify(&vs->dev, vq))
 471			goto again;
 472		vs->vs_events_missed = true;
 473		return;
 474	}
 475
 476	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 477		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 478				vq->iov[out].iov_len);
 479		vs->vs_events_missed = true;
 480		return;
 481	}
 482
 483	if (vs->vs_events_missed) {
 484		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 485		vs->vs_events_missed = false;
 486	}
 487
 488	eventp = vq->iov[out].iov_base;
 489	ret = __copy_to_user(eventp, event, sizeof(*event));
 490	if (!ret)
 491		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 492	else
 493		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 494}
 495
 496static void vhost_scsi_evt_work(struct vhost_work *work)
 497{
 498	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 499					vs_event_work);
 500	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 501	struct vhost_scsi_evt *evt, *t;
 502	struct llist_node *llnode;
 503
 504	mutex_lock(&vq->mutex);
 505	llnode = llist_del_all(&vs->vs_event_list);
 506	llist_for_each_entry_safe(evt, t, llnode, list) {
 507		vhost_scsi_do_evt_work(vs, evt);
 508		vhost_scsi_free_evt(vs, evt);
 509	}
 510	mutex_unlock(&vq->mutex);
 511}
 512
 513/* Fill in status and signal that we are done processing this command
 514 *
 515 * This is scheduled in the vhost work queue so we are called with the owner
 516 * process mm and can access the vring.
 517 */
 518static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 519{
 520	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 521					vs_completion_work);
 522	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 523	struct virtio_scsi_cmd_resp v_rsp;
 524	struct vhost_scsi_cmd *cmd, *t;
 525	struct llist_node *llnode;
 526	struct se_cmd *se_cmd;
 527	struct iov_iter iov_iter;
 528	int ret, vq;
 529
 530	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 531	llnode = llist_del_all(&vs->vs_completion_list);
 532	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
 533		se_cmd = &cmd->tvc_se_cmd;
 534
 535		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 536			cmd, se_cmd->residual_count, se_cmd->scsi_status);
 537
 538		memset(&v_rsp, 0, sizeof(v_rsp));
 539		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 540		/* TODO is status_qualifier field needed? */
 541		v_rsp.status = se_cmd->scsi_status;
 542		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 543						 se_cmd->scsi_sense_length);
 544		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 545		       se_cmd->scsi_sense_length);
 546
 547		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
 548			      cmd->tvc_in_iovs, sizeof(v_rsp));
 549		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 550		if (likely(ret == sizeof(v_rsp))) {
 551			struct vhost_scsi_virtqueue *q;
 552			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 553			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 554			vq = q - vs->vqs;
 555			__set_bit(vq, signal);
 556		} else
 557			pr_err("Faulted on virtio_scsi_cmd_resp\n");
 558
 559		vhost_scsi_free_cmd(cmd);
 560	}
 561
 562	vq = -1;
 563	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 564		< VHOST_SCSI_MAX_VQ)
 565		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 566}
 567
 568static struct vhost_scsi_cmd *
 569vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 570		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 571		   u32 exp_data_len, int data_direction)
 572{
 573	struct vhost_scsi_cmd *cmd;
 574	struct vhost_scsi_nexus *tv_nexus;
 575	struct se_session *se_sess;
 576	struct scatterlist *sg, *prot_sg;
 577	struct page **pages;
 578	int tag, cpu;
 579
 580	tv_nexus = tpg->tpg_nexus;
 581	if (!tv_nexus) {
 582		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 583		return ERR_PTR(-EIO);
 584	}
 585	se_sess = tv_nexus->tvn_se_sess;
 586
 587	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
 588	if (tag < 0) {
 589		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 590		return ERR_PTR(-ENOMEM);
 591	}
 592
 593	cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 594	sg = cmd->tvc_sgl;
 595	prot_sg = cmd->tvc_prot_sgl;
 596	pages = cmd->tvc_upages;
 597	memset(cmd, 0, sizeof(*cmd));
 598	cmd->tvc_sgl = sg;
 599	cmd->tvc_prot_sgl = prot_sg;
 600	cmd->tvc_upages = pages;
 601	cmd->tvc_se_cmd.map_tag = tag;
 602	cmd->tvc_se_cmd.map_cpu = cpu;
 603	cmd->tvc_tag = scsi_tag;
 604	cmd->tvc_lun = lun;
 605	cmd->tvc_task_attr = task_attr;
 606	cmd->tvc_exp_data_len = exp_data_len;
 607	cmd->tvc_data_direction = data_direction;
 608	cmd->tvc_nexus = tv_nexus;
 609	cmd->inflight = vhost_scsi_get_inflight(vq);
 610
 611	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 612
 613	return cmd;
 614}
 615
 616/*
 617 * Map a user memory range into a scatterlist
 618 *
 619 * Returns the number of scatterlist entries used or -errno on error.
 620 */
 621static int
 622vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 623		      struct iov_iter *iter,
 624		      struct scatterlist *sgl,
 625		      bool write)
 626{
 627	struct page **pages = cmd->tvc_upages;
 628	struct scatterlist *sg = sgl;
 629	ssize_t bytes;
 630	size_t offset;
 631	unsigned int npages = 0;
 632
 633	bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
 634				VHOST_SCSI_PREALLOC_UPAGES, &offset);
 635	/* No pages were pinned */
 636	if (bytes <= 0)
 637		return bytes < 0 ? bytes : -EFAULT;
 638
 639	iov_iter_advance(iter, bytes);
 640
 641	while (bytes) {
 642		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
 643		sg_set_page(sg++, pages[npages++], n, offset);
 644		bytes -= n;
 645		offset = 0;
 646	}
 647	return npages;
 648}
 649
 650static int
 651vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 652{
 653	int sgl_count = 0;
 654
 655	if (!iter || !iter->iov) {
 656		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 657		       " present\n", __func__, bytes);
 658		return -EINVAL;
 659	}
 660
 661	sgl_count = iov_iter_npages(iter, 0xffff);
 662	if (sgl_count > max_sgls) {
 663		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 664		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 665		return -EINVAL;
 666	}
 667	return sgl_count;
 668}
 669
 670static int
 671vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 672		      struct iov_iter *iter,
 673		      struct scatterlist *sg, int sg_count)
 674{
 675	struct scatterlist *p = sg;
 676	int ret;
 677
 678	while (iov_iter_count(iter)) {
 679		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
 680		if (ret < 0) {
 681			while (p < sg) {
 682				struct page *page = sg_page(p++);
 683				if (page)
 684					put_page(page);
 685			}
 686			return ret;
 687		}
 688		sg += ret;
 689	}
 690	return 0;
 691}
 692
 693static int
 694vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 695		 size_t prot_bytes, struct iov_iter *prot_iter,
 696		 size_t data_bytes, struct iov_iter *data_iter)
 697{
 698	int sgl_count, ret;
 699	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 700
 701	if (prot_bytes) {
 702		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 703						 VHOST_SCSI_PREALLOC_PROT_SGLS);
 704		if (sgl_count < 0)
 705			return sgl_count;
 706
 707		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 708		cmd->tvc_prot_sgl_count = sgl_count;
 709		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 710			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 711
 712		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 713					    cmd->tvc_prot_sgl,
 714					    cmd->tvc_prot_sgl_count);
 715		if (ret < 0) {
 716			cmd->tvc_prot_sgl_count = 0;
 717			return ret;
 718		}
 719	}
 720	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 721					 VHOST_SCSI_PREALLOC_SGLS);
 722	if (sgl_count < 0)
 723		return sgl_count;
 724
 725	sg_init_table(cmd->tvc_sgl, sgl_count);
 726	cmd->tvc_sgl_count = sgl_count;
 727	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 728		  cmd->tvc_sgl, cmd->tvc_sgl_count);
 729
 730	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 731				    cmd->tvc_sgl, cmd->tvc_sgl_count);
 732	if (ret < 0) {
 733		cmd->tvc_sgl_count = 0;
 734		return ret;
 735	}
 736	return 0;
 737}
 738
 739static int vhost_scsi_to_tcm_attr(int attr)
 740{
 741	switch (attr) {
 742	case VIRTIO_SCSI_S_SIMPLE:
 743		return TCM_SIMPLE_TAG;
 744	case VIRTIO_SCSI_S_ORDERED:
 745		return TCM_ORDERED_TAG;
 746	case VIRTIO_SCSI_S_HEAD:
 747		return TCM_HEAD_TAG;
 748	case VIRTIO_SCSI_S_ACA:
 749		return TCM_ACA_TAG;
 750	default:
 751		break;
 752	}
 753	return TCM_SIMPLE_TAG;
 754}
 755
 756static void vhost_scsi_submission_work(struct work_struct *work)
 757{
 758	struct vhost_scsi_cmd *cmd =
 759		container_of(work, struct vhost_scsi_cmd, work);
 760	struct vhost_scsi_nexus *tv_nexus;
 761	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 762	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 763	int rc;
 764
 765	/* FIXME: BIDI operation */
 766	if (cmd->tvc_sgl_count) {
 767		sg_ptr = cmd->tvc_sgl;
 768
 769		if (cmd->tvc_prot_sgl_count)
 770			sg_prot_ptr = cmd->tvc_prot_sgl;
 771		else
 772			se_cmd->prot_pto = true;
 773	} else {
 774		sg_ptr = NULL;
 775	}
 776	tv_nexus = cmd->tvc_nexus;
 777
 778	se_cmd->tag = 0;
 779	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 780			cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 781			cmd->tvc_lun, cmd->tvc_exp_data_len,
 782			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 783			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 784			sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 785			cmd->tvc_prot_sgl_count);
 786	if (rc < 0) {
 787		transport_send_check_condition_and_sense(se_cmd,
 788				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 789		transport_generic_free_cmd(se_cmd, 0);
 790	}
 791}
 792
 793static void
 794vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 795			   struct vhost_virtqueue *vq,
 796			   int head, unsigned out)
 797{
 798	struct virtio_scsi_cmd_resp __user *resp;
 799	struct virtio_scsi_cmd_resp rsp;
 800	int ret;
 801
 802	memset(&rsp, 0, sizeof(rsp));
 803	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 804	resp = vq->iov[out].iov_base;
 805	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 806	if (!ret)
 807		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 808	else
 809		pr_err("Faulted on virtio_scsi_cmd_resp\n");
 810}
 811
 812static int
 813vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
 814		    struct vhost_scsi_ctx *vc)
 815{
 816	int ret = -ENXIO;
 817
 818	vc->head = vhost_get_vq_desc(vq, vq->iov,
 819				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
 820				     NULL, NULL);
 821
 822	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 823		 vc->head, vc->out, vc->in);
 824
 825	/* On error, stop handling until the next kick. */
 826	if (unlikely(vc->head < 0))
 827		goto done;
 828
 829	/* Nothing new?  Wait for eventfd to tell us they refilled. */
 830	if (vc->head == vq->num) {
 831		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 832			vhost_disable_notify(&vs->dev, vq);
 833			ret = -EAGAIN;
 834		}
 835		goto done;
 836	}
 837
 838	/*
 839	 * Get the size of request and response buffers.
 840	 * FIXME: Not correct for BIDI operation
 841	 */
 842	vc->out_size = iov_length(vq->iov, vc->out);
 843	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
 844
 845	/*
 846	 * Copy over the virtio-scsi request header, which for a
 847	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 848	 * single iovec may contain both the header + outgoing
 849	 * WRITE payloads.
 850	 *
 851	 * copy_from_iter() will advance out_iter, so that it will
 852	 * point at the start of the outgoing WRITE payload, if
 853	 * DMA_TO_DEVICE is set.
 854	 */
 855	iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
 856	ret = 0;
 857
 858done:
 859	return ret;
 860}
 861
 862static int
 863vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
 864{
 865	if (unlikely(vc->in_size < vc->rsp_size)) {
 866		vq_err(vq,
 867		       "Response buf too small, need min %zu bytes got %zu",
 868		       vc->rsp_size, vc->in_size);
 869		return -EINVAL;
 870	} else if (unlikely(vc->out_size < vc->req_size)) {
 871		vq_err(vq,
 872		       "Request buf too small, need min %zu bytes got %zu",
 873		       vc->req_size, vc->out_size);
 874		return -EIO;
 875	}
 876
 877	return 0;
 878}
 879
 880static int
 881vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
 882		   struct vhost_scsi_tpg **tpgp)
 883{
 884	int ret = -EIO;
 885
 886	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
 887					  &vc->out_iter))) {
 888		vq_err(vq, "Faulted on copy_from_iter_full\n");
 889	} else if (unlikely(*vc->lunp != 1)) {
 890		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
 891		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
 892	} else {
 893		struct vhost_scsi_tpg **vs_tpg, *tpg;
 894
 895		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
 896
 897		tpg = READ_ONCE(vs_tpg[*vc->target]);
 898		if (unlikely(!tpg)) {
 899			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
 900		} else {
 901			if (tpgp)
 902				*tpgp = tpg;
 903			ret = 0;
 904		}
 905	}
 906
 907	return ret;
 908}
 909
 910static void
 911vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 912{
 913	struct vhost_scsi_tpg **vs_tpg, *tpg;
 914	struct virtio_scsi_cmd_req v_req;
 915	struct virtio_scsi_cmd_req_pi v_req_pi;
 916	struct vhost_scsi_ctx vc;
 917	struct vhost_scsi_cmd *cmd;
 918	struct iov_iter in_iter, prot_iter, data_iter;
 919	u64 tag;
 920	u32 exp_data_len, data_direction;
 921	int ret, prot_bytes, c = 0;
 922	u16 lun;
 923	u8 task_attr;
 924	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 925	void *cdb;
 926
 927	mutex_lock(&vq->mutex);
 928	/*
 929	 * We can handle the vq only after the endpoint is setup by calling the
 930	 * VHOST_SCSI_SET_ENDPOINT ioctl.
 931	 */
 932	vs_tpg = vhost_vq_get_backend(vq);
 933	if (!vs_tpg)
 934		goto out;
 935
 936	memset(&vc, 0, sizeof(vc));
 937	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 938
 939	vhost_disable_notify(&vs->dev, vq);
 940
 941	do {
 942		ret = vhost_scsi_get_desc(vs, vq, &vc);
 943		if (ret)
 944			goto err;
 945
 946		/*
 947		 * Setup pointers and values based upon different virtio-scsi
 948		 * request header if T10_PI is enabled in KVM guest.
 949		 */
 950		if (t10_pi) {
 951			vc.req = &v_req_pi;
 952			vc.req_size = sizeof(v_req_pi);
 953			vc.lunp = &v_req_pi.lun[0];
 954			vc.target = &v_req_pi.lun[1];
 955		} else {
 956			vc.req = &v_req;
 957			vc.req_size = sizeof(v_req);
 958			vc.lunp = &v_req.lun[0];
 959			vc.target = &v_req.lun[1];
 960		}
 961
 962		/*
 963		 * Validate the size of request and response buffers.
 964		 * Check for a sane response buffer so we can report
 965		 * early errors back to the guest.
 966		 */
 967		ret = vhost_scsi_chk_size(vq, &vc);
 968		if (ret)
 969			goto err;
 970
 971		ret = vhost_scsi_get_req(vq, &vc, &tpg);
 972		if (ret)
 973			goto err;
 974
 975		ret = -EIO;	/* bad target on any error from here on */
 976
 977		/*
 978		 * Determine data_direction by calculating the total outgoing
 979		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
 980		 * response headers respectively.
 981		 *
 982		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
 983		 * to the right place.
 984		 *
 985		 * For DMA_FROM_DEVICE, the iovec will be just past the end
 986		 * of the virtio-scsi response header in either the same
 987		 * or immediately following iovec.
 988		 *
 989		 * Any associated T10_PI bytes for the outgoing / incoming
 990		 * payloads are included in calculation of exp_data_len here.
 991		 */
 992		prot_bytes = 0;
 993
 994		if (vc.out_size > vc.req_size) {
 995			data_direction = DMA_TO_DEVICE;
 996			exp_data_len = vc.out_size - vc.req_size;
 997			data_iter = vc.out_iter;
 998		} else if (vc.in_size > vc.rsp_size) {
 999			data_direction = DMA_FROM_DEVICE;
1000			exp_data_len = vc.in_size - vc.rsp_size;
1001
1002			iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1003				      vc.rsp_size + exp_data_len);
1004			iov_iter_advance(&in_iter, vc.rsp_size);
1005			data_iter = in_iter;
1006		} else {
1007			data_direction = DMA_NONE;
1008			exp_data_len = 0;
1009		}
1010		/*
1011		 * If T10_PI header + payload is present, setup prot_iter values
1012		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1013		 * host scatterlists via get_user_pages_fast().
1014		 */
1015		if (t10_pi) {
1016			if (v_req_pi.pi_bytesout) {
1017				if (data_direction != DMA_TO_DEVICE) {
1018					vq_err(vq, "Received non zero pi_bytesout,"
1019						" but wrong data_direction\n");
1020					goto err;
1021				}
1022				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1023			} else if (v_req_pi.pi_bytesin) {
1024				if (data_direction != DMA_FROM_DEVICE) {
1025					vq_err(vq, "Received non zero pi_bytesin,"
1026						" but wrong data_direction\n");
1027					goto err;
1028				}
1029				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1030			}
1031			/*
1032			 * Set prot_iter to data_iter and truncate it to
1033			 * prot_bytes, and advance data_iter past any
1034			 * preceeding prot_bytes that may be present.
1035			 *
1036			 * Also fix up the exp_data_len to reflect only the
1037			 * actual data payload length.
1038			 */
1039			if (prot_bytes) {
1040				exp_data_len -= prot_bytes;
1041				prot_iter = data_iter;
1042				iov_iter_truncate(&prot_iter, prot_bytes);
1043				iov_iter_advance(&data_iter, prot_bytes);
1044			}
1045			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1046			task_attr = v_req_pi.task_attr;
1047			cdb = &v_req_pi.cdb[0];
1048			lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1049		} else {
1050			tag = vhost64_to_cpu(vq, v_req.tag);
1051			task_attr = v_req.task_attr;
1052			cdb = &v_req.cdb[0];
1053			lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1054		}
1055		/*
1056		 * Check that the received CDB size does not exceeded our
1057		 * hardcoded max for vhost-scsi, then get a pre-allocated
1058		 * cmd descriptor for the new virtio-scsi tag.
1059		 *
1060		 * TODO what if cdb was too small for varlen cdb header?
1061		 */
1062		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1063			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1064				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1065				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1066				goto err;
1067		}
1068		cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1069					 exp_data_len + prot_bytes,
1070					 data_direction);
1071		if (IS_ERR(cmd)) {
1072			vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1073			       PTR_ERR(cmd));
1074			goto err;
1075		}
1076		cmd->tvc_vhost = vs;
1077		cmd->tvc_vq = vq;
1078		cmd->tvc_resp_iov = vq->iov[vc.out];
1079		cmd->tvc_in_iovs = vc.in;
1080
1081		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1082			 cmd->tvc_cdb[0], cmd->tvc_lun);
1083		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1084			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1085
1086		if (data_direction != DMA_NONE) {
1087			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1088						      &prot_iter, exp_data_len,
1089						      &data_iter))) {
1090				vq_err(vq, "Failed to map iov to sgl\n");
1091				vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1092				goto err;
1093			}
1094		}
1095		/*
1096		 * Save the descriptor from vhost_get_vq_desc() to be used to
1097		 * complete the virtio-scsi request in TCM callback context via
1098		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1099		 */
1100		cmd->tvc_vq_desc = vc.head;
1101		/*
1102		 * Dispatch cmd descriptor for cmwq execution in process
1103		 * context provided by vhost_scsi_workqueue.  This also ensures
1104		 * cmd is executed on the same kworker CPU as this vhost
1105		 * thread to gain positive L2 cache locality effects.
1106		 */
1107		INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1108		queue_work(vhost_scsi_workqueue, &cmd->work);
1109		ret = 0;
1110err:
1111		/*
1112		 * ENXIO:  No more requests, or read error, wait for next kick
1113		 * EINVAL: Invalid response buffer, drop the request
1114		 * EIO:    Respond with bad target
1115		 * EAGAIN: Pending request
1116		 */
1117		if (ret == -ENXIO)
1118			break;
1119		else if (ret == -EIO)
1120			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1121	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1122out:
1123	mutex_unlock(&vq->mutex);
1124}
1125
1126static void
1127vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1128			   struct vhost_virtqueue *vq,
1129			   struct vhost_scsi_ctx *vc)
1130{
1131	struct virtio_scsi_ctrl_tmf_resp rsp;
1132	struct iov_iter iov_iter;
1133	int ret;
1134
1135	pr_debug("%s\n", __func__);
1136	memset(&rsp, 0, sizeof(rsp));
1137	rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1138
1139	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1140
1141	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1142	if (likely(ret == sizeof(rsp)))
1143		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1144	else
1145		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1146}
1147
1148static void
1149vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1150			struct vhost_virtqueue *vq,
1151			struct vhost_scsi_ctx *vc)
1152{
1153	struct virtio_scsi_ctrl_an_resp rsp;
1154	struct iov_iter iov_iter;
1155	int ret;
1156
1157	pr_debug("%s\n", __func__);
1158	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1159	rsp.response = VIRTIO_SCSI_S_OK;
1160
1161	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1162
1163	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1164	if (likely(ret == sizeof(rsp)))
1165		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1166	else
1167		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1168}
1169
1170static void
1171vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1172{
1173	union {
1174		__virtio32 type;
1175		struct virtio_scsi_ctrl_an_req an;
1176		struct virtio_scsi_ctrl_tmf_req tmf;
1177	} v_req;
1178	struct vhost_scsi_ctx vc;
1179	size_t typ_size;
1180	int ret, c = 0;
1181
1182	mutex_lock(&vq->mutex);
1183	/*
1184	 * We can handle the vq only after the endpoint is setup by calling the
1185	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1186	 */
1187	if (!vhost_vq_get_backend(vq))
1188		goto out;
1189
1190	memset(&vc, 0, sizeof(vc));
1191
1192	vhost_disable_notify(&vs->dev, vq);
1193
1194	do {
1195		ret = vhost_scsi_get_desc(vs, vq, &vc);
1196		if (ret)
1197			goto err;
1198
1199		/*
1200		 * Get the request type first in order to setup
1201		 * other parameters dependent on the type.
1202		 */
1203		vc.req = &v_req.type;
1204		typ_size = sizeof(v_req.type);
1205
1206		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1207						  &vc.out_iter))) {
1208			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1209			/*
1210			 * The size of the response buffer depends on the
1211			 * request type and must be validated against it.
1212			 * Since the request type is not known, don't send
1213			 * a response.
1214			 */
1215			continue;
1216		}
1217
1218		switch (vhost32_to_cpu(vq, v_req.type)) {
1219		case VIRTIO_SCSI_T_TMF:
1220			vc.req = &v_req.tmf;
1221			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1222			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1223			vc.lunp = &v_req.tmf.lun[0];
1224			vc.target = &v_req.tmf.lun[1];
1225			break;
1226		case VIRTIO_SCSI_T_AN_QUERY:
1227		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1228			vc.req = &v_req.an;
1229			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1230			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1231			vc.lunp = &v_req.an.lun[0];
1232			vc.target = NULL;
1233			break;
1234		default:
1235			vq_err(vq, "Unknown control request %d", v_req.type);
1236			continue;
1237		}
1238
1239		/*
1240		 * Validate the size of request and response buffers.
1241		 * Check for a sane response buffer so we can report
1242		 * early errors back to the guest.
1243		 */
1244		ret = vhost_scsi_chk_size(vq, &vc);
1245		if (ret)
1246			goto err;
1247
1248		/*
1249		 * Get the rest of the request now that its size is known.
1250		 */
1251		vc.req += typ_size;
1252		vc.req_size -= typ_size;
1253
1254		ret = vhost_scsi_get_req(vq, &vc, NULL);
1255		if (ret)
1256			goto err;
1257
1258		if (v_req.type == VIRTIO_SCSI_T_TMF)
1259			vhost_scsi_send_tmf_reject(vs, vq, &vc);
1260		else
1261			vhost_scsi_send_an_resp(vs, vq, &vc);
1262err:
1263		/*
1264		 * ENXIO:  No more requests, or read error, wait for next kick
1265		 * EINVAL: Invalid response buffer, drop the request
1266		 * EIO:    Respond with bad target
1267		 * EAGAIN: Pending request
1268		 */
1269		if (ret == -ENXIO)
1270			break;
1271		else if (ret == -EIO)
1272			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1273	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1274out:
1275	mutex_unlock(&vq->mutex);
1276}
1277
1278static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1279{
1280	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1281						poll.work);
1282	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1283
1284	pr_debug("%s: The handling func for control queue.\n", __func__);
1285	vhost_scsi_ctl_handle_vq(vs, vq);
1286}
1287
1288static void
1289vhost_scsi_send_evt(struct vhost_scsi *vs,
1290		   struct vhost_scsi_tpg *tpg,
1291		   struct se_lun *lun,
1292		   u32 event,
1293		   u32 reason)
1294{
1295	struct vhost_scsi_evt *evt;
1296
1297	evt = vhost_scsi_allocate_evt(vs, event, reason);
1298	if (!evt)
1299		return;
1300
1301	if (tpg && lun) {
1302		/* TODO: share lun setup code with virtio-scsi.ko */
1303		/*
1304		 * Note: evt->event is zeroed when we allocate it and
1305		 * lun[4-7] need to be zero according to virtio-scsi spec.
1306		 */
1307		evt->event.lun[0] = 0x01;
1308		evt->event.lun[1] = tpg->tport_tpgt;
1309		if (lun->unpacked_lun >= 256)
1310			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1311		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1312	}
1313
1314	llist_add(&evt->list, &vs->vs_event_list);
1315	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1316}
1317
1318static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1319{
1320	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1321						poll.work);
1322	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1323
1324	mutex_lock(&vq->mutex);
1325	if (!vhost_vq_get_backend(vq))
1326		goto out;
1327
1328	if (vs->vs_events_missed)
1329		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1330out:
1331	mutex_unlock(&vq->mutex);
1332}
1333
1334static void vhost_scsi_handle_kick(struct vhost_work *work)
1335{
1336	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1337						poll.work);
1338	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1339
1340	vhost_scsi_handle_vq(vs, vq);
1341}
1342
1343static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1344{
1345	vhost_poll_flush(&vs->vqs[index].vq.poll);
1346}
1347
1348/* Callers must hold dev mutex */
1349static void vhost_scsi_flush(struct vhost_scsi *vs)
1350{
1351	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1352	int i;
1353
1354	/* Init new inflight and remember the old inflight */
1355	vhost_scsi_init_inflight(vs, old_inflight);
1356
1357	/*
1358	 * The inflight->kref was initialized to 1. We decrement it here to
1359	 * indicate the start of the flush operation so that it will reach 0
1360	 * when all the reqs are finished.
1361	 */
1362	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1363		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1364
1365	/* Flush both the vhost poll and vhost work */
1366	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1367		vhost_scsi_flush_vq(vs, i);
1368	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1369	vhost_work_flush(&vs->dev, &vs->vs_event_work);
1370
1371	/* Wait for all reqs issued before the flush to be finished */
1372	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1373		wait_for_completion(&old_inflight[i]->comp);
1374}
1375
1376/*
1377 * Called from vhost_scsi_ioctl() context to walk the list of available
1378 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1379 *
1380 *  The lock nesting rule is:
1381 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1382 */
1383static int
1384vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1385			struct vhost_scsi_target *t)
1386{
1387	struct se_portal_group *se_tpg;
1388	struct vhost_scsi_tport *tv_tport;
1389	struct vhost_scsi_tpg *tpg;
1390	struct vhost_scsi_tpg **vs_tpg;
1391	struct vhost_virtqueue *vq;
1392	int index, ret, i, len;
1393	bool match = false;
1394
1395	mutex_lock(&vhost_scsi_mutex);
1396	mutex_lock(&vs->dev.mutex);
1397
1398	/* Verify that ring has been setup correctly. */
1399	for (index = 0; index < vs->dev.nvqs; ++index) {
1400		/* Verify that ring has been setup correctly. */
1401		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1402			ret = -EFAULT;
1403			goto out;
1404		}
1405	}
1406
1407	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1408	vs_tpg = kzalloc(len, GFP_KERNEL);
1409	if (!vs_tpg) {
1410		ret = -ENOMEM;
1411		goto out;
1412	}
1413	if (vs->vs_tpg)
1414		memcpy(vs_tpg, vs->vs_tpg, len);
1415
1416	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1417		mutex_lock(&tpg->tv_tpg_mutex);
1418		if (!tpg->tpg_nexus) {
1419			mutex_unlock(&tpg->tv_tpg_mutex);
1420			continue;
1421		}
1422		if (tpg->tv_tpg_vhost_count != 0) {
1423			mutex_unlock(&tpg->tv_tpg_mutex);
1424			continue;
1425		}
1426		tv_tport = tpg->tport;
1427
1428		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1429			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1430				kfree(vs_tpg);
1431				mutex_unlock(&tpg->tv_tpg_mutex);
1432				ret = -EEXIST;
1433				goto out;
1434			}
1435			/*
1436			 * In order to ensure individual vhost-scsi configfs
1437			 * groups cannot be removed while in use by vhost ioctl,
1438			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1439			 * dependency now.
1440			 */
1441			se_tpg = &tpg->se_tpg;
1442			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1443			if (ret) {
1444				pr_warn("target_depend_item() failed: %d\n", ret);
1445				kfree(vs_tpg);
1446				mutex_unlock(&tpg->tv_tpg_mutex);
1447				goto out;
1448			}
1449			tpg->tv_tpg_vhost_count++;
1450			tpg->vhost_scsi = vs;
1451			vs_tpg[tpg->tport_tpgt] = tpg;
1452			match = true;
1453		}
1454		mutex_unlock(&tpg->tv_tpg_mutex);
1455	}
1456
1457	if (match) {
1458		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1459		       sizeof(vs->vs_vhost_wwpn));
1460		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1461			vq = &vs->vqs[i].vq;
1462			mutex_lock(&vq->mutex);
1463			vhost_vq_set_backend(vq, vs_tpg);
1464			vhost_vq_init_access(vq);
1465			mutex_unlock(&vq->mutex);
1466		}
1467		ret = 0;
1468	} else {
1469		ret = -EEXIST;
1470	}
1471
1472	/*
1473	 * Act as synchronize_rcu to make sure access to
1474	 * old vs->vs_tpg is finished.
1475	 */
1476	vhost_scsi_flush(vs);
1477	kfree(vs->vs_tpg);
1478	vs->vs_tpg = vs_tpg;
1479
1480out:
1481	mutex_unlock(&vs->dev.mutex);
1482	mutex_unlock(&vhost_scsi_mutex);
1483	return ret;
1484}
1485
1486static int
1487vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1488			  struct vhost_scsi_target *t)
1489{
1490	struct se_portal_group *se_tpg;
1491	struct vhost_scsi_tport *tv_tport;
1492	struct vhost_scsi_tpg *tpg;
1493	struct vhost_virtqueue *vq;
1494	bool match = false;
1495	int index, ret, i;
1496	u8 target;
1497
1498	mutex_lock(&vhost_scsi_mutex);
1499	mutex_lock(&vs->dev.mutex);
1500	/* Verify that ring has been setup correctly. */
1501	for (index = 0; index < vs->dev.nvqs; ++index) {
1502		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1503			ret = -EFAULT;
1504			goto err_dev;
1505		}
1506	}
1507
1508	if (!vs->vs_tpg) {
1509		ret = 0;
1510		goto err_dev;
1511	}
1512
1513	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1514		target = i;
1515		tpg = vs->vs_tpg[target];
1516		if (!tpg)
1517			continue;
1518
1519		mutex_lock(&tpg->tv_tpg_mutex);
1520		tv_tport = tpg->tport;
1521		if (!tv_tport) {
1522			ret = -ENODEV;
1523			goto err_tpg;
1524		}
1525
1526		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1527			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1528				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1529				tv_tport->tport_name, tpg->tport_tpgt,
1530				t->vhost_wwpn, t->vhost_tpgt);
1531			ret = -EINVAL;
1532			goto err_tpg;
1533		}
1534		tpg->tv_tpg_vhost_count--;
1535		tpg->vhost_scsi = NULL;
1536		vs->vs_tpg[target] = NULL;
1537		match = true;
1538		mutex_unlock(&tpg->tv_tpg_mutex);
1539		/*
1540		 * Release se_tpg->tpg_group.cg_item configfs dependency now
1541		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1542		 */
1543		se_tpg = &tpg->se_tpg;
1544		target_undepend_item(&se_tpg->tpg_group.cg_item);
1545	}
1546	if (match) {
1547		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1548			vq = &vs->vqs[i].vq;
1549			mutex_lock(&vq->mutex);
1550			vhost_vq_set_backend(vq, NULL);
1551			mutex_unlock(&vq->mutex);
1552		}
1553	}
1554	/*
1555	 * Act as synchronize_rcu to make sure access to
1556	 * old vs->vs_tpg is finished.
1557	 */
1558	vhost_scsi_flush(vs);
1559	kfree(vs->vs_tpg);
1560	vs->vs_tpg = NULL;
1561	WARN_ON(vs->vs_events_nr);
1562	mutex_unlock(&vs->dev.mutex);
1563	mutex_unlock(&vhost_scsi_mutex);
1564	return 0;
1565
1566err_tpg:
1567	mutex_unlock(&tpg->tv_tpg_mutex);
1568err_dev:
1569	mutex_unlock(&vs->dev.mutex);
1570	mutex_unlock(&vhost_scsi_mutex);
1571	return ret;
1572}
1573
1574static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1575{
1576	struct vhost_virtqueue *vq;
1577	int i;
1578
1579	if (features & ~VHOST_SCSI_FEATURES)
1580		return -EOPNOTSUPP;
1581
1582	mutex_lock(&vs->dev.mutex);
1583	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1584	    !vhost_log_access_ok(&vs->dev)) {
1585		mutex_unlock(&vs->dev.mutex);
1586		return -EFAULT;
1587	}
1588
1589	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1590		vq = &vs->vqs[i].vq;
1591		mutex_lock(&vq->mutex);
1592		vq->acked_features = features;
1593		mutex_unlock(&vq->mutex);
1594	}
1595	mutex_unlock(&vs->dev.mutex);
1596	return 0;
1597}
1598
1599static int vhost_scsi_open(struct inode *inode, struct file *f)
1600{
1601	struct vhost_scsi *vs;
1602	struct vhost_virtqueue **vqs;
1603	int r = -ENOMEM, i;
1604
1605	vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1606	if (!vs) {
1607		vs = vzalloc(sizeof(*vs));
1608		if (!vs)
1609			goto err_vs;
1610	}
1611
1612	vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1613	if (!vqs)
1614		goto err_vqs;
1615
1616	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1617	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1618
1619	vs->vs_events_nr = 0;
1620	vs->vs_events_missed = false;
1621
1622	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1623	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1624	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1625	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1626	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1627		vqs[i] = &vs->vqs[i].vq;
1628		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1629	}
1630	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1631		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1632
1633	vhost_scsi_init_inflight(vs, NULL);
1634
1635	f->private_data = vs;
1636	return 0;
1637
1638err_vqs:
1639	kvfree(vs);
1640err_vs:
1641	return r;
1642}
1643
1644static int vhost_scsi_release(struct inode *inode, struct file *f)
1645{
1646	struct vhost_scsi *vs = f->private_data;
1647	struct vhost_scsi_target t;
1648
1649	mutex_lock(&vs->dev.mutex);
1650	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1651	mutex_unlock(&vs->dev.mutex);
1652	vhost_scsi_clear_endpoint(vs, &t);
1653	vhost_dev_stop(&vs->dev);
1654	vhost_dev_cleanup(&vs->dev);
1655	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1656	vhost_scsi_flush(vs);
1657	kfree(vs->dev.vqs);
1658	kvfree(vs);
1659	return 0;
1660}
1661
1662static long
1663vhost_scsi_ioctl(struct file *f,
1664		 unsigned int ioctl,
1665		 unsigned long arg)
1666{
1667	struct vhost_scsi *vs = f->private_data;
1668	struct vhost_scsi_target backend;
1669	void __user *argp = (void __user *)arg;
1670	u64 __user *featurep = argp;
1671	u32 __user *eventsp = argp;
1672	u32 events_missed;
1673	u64 features;
1674	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1675	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1676
1677	switch (ioctl) {
1678	case VHOST_SCSI_SET_ENDPOINT:
1679		if (copy_from_user(&backend, argp, sizeof backend))
1680			return -EFAULT;
1681		if (backend.reserved != 0)
1682			return -EOPNOTSUPP;
1683
1684		return vhost_scsi_set_endpoint(vs, &backend);
1685	case VHOST_SCSI_CLEAR_ENDPOINT:
1686		if (copy_from_user(&backend, argp, sizeof backend))
1687			return -EFAULT;
1688		if (backend.reserved != 0)
1689			return -EOPNOTSUPP;
1690
1691		return vhost_scsi_clear_endpoint(vs, &backend);
1692	case VHOST_SCSI_GET_ABI_VERSION:
1693		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1694			return -EFAULT;
1695		return 0;
1696	case VHOST_SCSI_SET_EVENTS_MISSED:
1697		if (get_user(events_missed, eventsp))
1698			return -EFAULT;
1699		mutex_lock(&vq->mutex);
1700		vs->vs_events_missed = events_missed;
1701		mutex_unlock(&vq->mutex);
1702		return 0;
1703	case VHOST_SCSI_GET_EVENTS_MISSED:
1704		mutex_lock(&vq->mutex);
1705		events_missed = vs->vs_events_missed;
1706		mutex_unlock(&vq->mutex);
1707		if (put_user(events_missed, eventsp))
1708			return -EFAULT;
1709		return 0;
1710	case VHOST_GET_FEATURES:
1711		features = VHOST_SCSI_FEATURES;
1712		if (copy_to_user(featurep, &features, sizeof features))
1713			return -EFAULT;
1714		return 0;
1715	case VHOST_SET_FEATURES:
1716		if (copy_from_user(&features, featurep, sizeof features))
1717			return -EFAULT;
1718		return vhost_scsi_set_features(vs, features);
1719	default:
1720		mutex_lock(&vs->dev.mutex);
1721		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1722		/* TODO: flush backend after dev ioctl. */
1723		if (r == -ENOIOCTLCMD)
1724			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1725		mutex_unlock(&vs->dev.mutex);
1726		return r;
1727	}
1728}
1729
 
 
 
 
 
 
 
 
1730static const struct file_operations vhost_scsi_fops = {
1731	.owner          = THIS_MODULE,
1732	.release        = vhost_scsi_release,
1733	.unlocked_ioctl = vhost_scsi_ioctl,
1734	.compat_ioctl	= compat_ptr_ioctl,
 
 
1735	.open           = vhost_scsi_open,
1736	.llseek		= noop_llseek,
1737};
1738
1739static struct miscdevice vhost_scsi_misc = {
1740	MISC_DYNAMIC_MINOR,
1741	"vhost-scsi",
1742	&vhost_scsi_fops,
1743};
1744
1745static int __init vhost_scsi_register(void)
1746{
1747	return misc_register(&vhost_scsi_misc);
1748}
1749
1750static void vhost_scsi_deregister(void)
1751{
1752	misc_deregister(&vhost_scsi_misc);
1753}
1754
1755static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1756{
1757	switch (tport->tport_proto_id) {
1758	case SCSI_PROTOCOL_SAS:
1759		return "SAS";
1760	case SCSI_PROTOCOL_FCP:
1761		return "FCP";
1762	case SCSI_PROTOCOL_ISCSI:
1763		return "iSCSI";
1764	default:
1765		break;
1766	}
1767
1768	return "Unknown";
1769}
1770
1771static void
1772vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1773		  struct se_lun *lun, bool plug)
1774{
1775
1776	struct vhost_scsi *vs = tpg->vhost_scsi;
1777	struct vhost_virtqueue *vq;
1778	u32 reason;
1779
1780	if (!vs)
1781		return;
1782
1783	mutex_lock(&vs->dev.mutex);
1784
1785	if (plug)
1786		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1787	else
1788		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1789
1790	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1791	mutex_lock(&vq->mutex);
1792	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1793		vhost_scsi_send_evt(vs, tpg, lun,
1794				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1795	mutex_unlock(&vq->mutex);
1796	mutex_unlock(&vs->dev.mutex);
1797}
1798
1799static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1800{
1801	vhost_scsi_do_plug(tpg, lun, true);
1802}
1803
1804static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1805{
1806	vhost_scsi_do_plug(tpg, lun, false);
1807}
1808
1809static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1810			       struct se_lun *lun)
1811{
1812	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1813				struct vhost_scsi_tpg, se_tpg);
1814
1815	mutex_lock(&vhost_scsi_mutex);
1816
1817	mutex_lock(&tpg->tv_tpg_mutex);
1818	tpg->tv_tpg_port_count++;
1819	mutex_unlock(&tpg->tv_tpg_mutex);
1820
1821	vhost_scsi_hotplug(tpg, lun);
1822
1823	mutex_unlock(&vhost_scsi_mutex);
1824
1825	return 0;
1826}
1827
1828static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1829				  struct se_lun *lun)
1830{
1831	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1832				struct vhost_scsi_tpg, se_tpg);
1833
1834	mutex_lock(&vhost_scsi_mutex);
1835
1836	mutex_lock(&tpg->tv_tpg_mutex);
1837	tpg->tv_tpg_port_count--;
1838	mutex_unlock(&tpg->tv_tpg_mutex);
1839
1840	vhost_scsi_hotunplug(tpg, lun);
1841
1842	mutex_unlock(&vhost_scsi_mutex);
1843}
1844
1845static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1846{
1847	struct vhost_scsi_cmd *tv_cmd;
1848	unsigned int i;
1849
1850	if (!se_sess->sess_cmd_map)
1851		return;
1852
1853	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1854		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1855
1856		kfree(tv_cmd->tvc_sgl);
1857		kfree(tv_cmd->tvc_prot_sgl);
1858		kfree(tv_cmd->tvc_upages);
1859	}
1860}
1861
1862static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1863		struct config_item *item, const char *page, size_t count)
1864{
1865	struct se_portal_group *se_tpg = attrib_to_tpg(item);
1866	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1867				struct vhost_scsi_tpg, se_tpg);
1868	unsigned long val;
1869	int ret = kstrtoul(page, 0, &val);
1870
1871	if (ret) {
1872		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1873		return ret;
1874	}
1875	if (val != 0 && val != 1 && val != 3) {
1876		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1877		return -EINVAL;
1878	}
1879	tpg->tv_fabric_prot_type = val;
1880
1881	return count;
1882}
1883
1884static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1885		struct config_item *item, char *page)
1886{
1887	struct se_portal_group *se_tpg = attrib_to_tpg(item);
1888	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1889				struct vhost_scsi_tpg, se_tpg);
1890
1891	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1892}
1893
1894CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1895
1896static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1897	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1898	NULL,
1899};
1900
1901static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1902			       struct se_session *se_sess, void *p)
1903{
1904	struct vhost_scsi_cmd *tv_cmd;
1905	unsigned int i;
1906
1907	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1908		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1909
1910		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1911					  sizeof(struct scatterlist),
1912					  GFP_KERNEL);
1913		if (!tv_cmd->tvc_sgl) {
1914			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1915			goto out;
1916		}
1917
1918		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1919					     sizeof(struct page *),
1920					     GFP_KERNEL);
1921		if (!tv_cmd->tvc_upages) {
1922			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1923			goto out;
1924		}
1925
1926		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1927					       sizeof(struct scatterlist),
1928					       GFP_KERNEL);
1929		if (!tv_cmd->tvc_prot_sgl) {
1930			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1931			goto out;
1932		}
1933	}
1934	return 0;
1935out:
1936	vhost_scsi_free_cmd_map_res(se_sess);
1937	return -ENOMEM;
1938}
1939
1940static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1941				const char *name)
1942{
1943	struct vhost_scsi_nexus *tv_nexus;
1944
1945	mutex_lock(&tpg->tv_tpg_mutex);
1946	if (tpg->tpg_nexus) {
1947		mutex_unlock(&tpg->tv_tpg_mutex);
1948		pr_debug("tpg->tpg_nexus already exists\n");
1949		return -EEXIST;
1950	}
1951
1952	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1953	if (!tv_nexus) {
1954		mutex_unlock(&tpg->tv_tpg_mutex);
1955		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1956		return -ENOMEM;
1957	}
1958	/*
1959	 * Since we are running in 'demo mode' this call with generate a
1960	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1961	 * the SCSI Initiator port name of the passed configfs group 'name'.
1962	 */
1963	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1964					VHOST_SCSI_DEFAULT_TAGS,
1965					sizeof(struct vhost_scsi_cmd),
1966					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1967					(unsigned char *)name, tv_nexus,
1968					vhost_scsi_nexus_cb);
1969	if (IS_ERR(tv_nexus->tvn_se_sess)) {
1970		mutex_unlock(&tpg->tv_tpg_mutex);
1971		kfree(tv_nexus);
1972		return -ENOMEM;
1973	}
1974	tpg->tpg_nexus = tv_nexus;
1975
1976	mutex_unlock(&tpg->tv_tpg_mutex);
1977	return 0;
1978}
1979
1980static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1981{
1982	struct se_session *se_sess;
1983	struct vhost_scsi_nexus *tv_nexus;
1984
1985	mutex_lock(&tpg->tv_tpg_mutex);
1986	tv_nexus = tpg->tpg_nexus;
1987	if (!tv_nexus) {
1988		mutex_unlock(&tpg->tv_tpg_mutex);
1989		return -ENODEV;
1990	}
1991
1992	se_sess = tv_nexus->tvn_se_sess;
1993	if (!se_sess) {
1994		mutex_unlock(&tpg->tv_tpg_mutex);
1995		return -ENODEV;
1996	}
1997
1998	if (tpg->tv_tpg_port_count != 0) {
1999		mutex_unlock(&tpg->tv_tpg_mutex);
2000		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2001			" active TPG port count: %d\n",
2002			tpg->tv_tpg_port_count);
2003		return -EBUSY;
2004	}
2005
2006	if (tpg->tv_tpg_vhost_count != 0) {
2007		mutex_unlock(&tpg->tv_tpg_mutex);
2008		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2009			" active TPG vhost count: %d\n",
2010			tpg->tv_tpg_vhost_count);
2011		return -EBUSY;
2012	}
2013
2014	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2015		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2016		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2017
2018	vhost_scsi_free_cmd_map_res(se_sess);
2019	/*
2020	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2021	 */
2022	target_remove_session(se_sess);
2023	tpg->tpg_nexus = NULL;
2024	mutex_unlock(&tpg->tv_tpg_mutex);
2025
2026	kfree(tv_nexus);
2027	return 0;
2028}
2029
2030static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2031{
2032	struct se_portal_group *se_tpg = to_tpg(item);
2033	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2034				struct vhost_scsi_tpg, se_tpg);
2035	struct vhost_scsi_nexus *tv_nexus;
2036	ssize_t ret;
2037
2038	mutex_lock(&tpg->tv_tpg_mutex);
2039	tv_nexus = tpg->tpg_nexus;
2040	if (!tv_nexus) {
2041		mutex_unlock(&tpg->tv_tpg_mutex);
2042		return -ENODEV;
2043	}
2044	ret = snprintf(page, PAGE_SIZE, "%s\n",
2045			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2046	mutex_unlock(&tpg->tv_tpg_mutex);
2047
2048	return ret;
2049}
2050
2051static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2052		const char *page, size_t count)
2053{
2054	struct se_portal_group *se_tpg = to_tpg(item);
2055	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2056				struct vhost_scsi_tpg, se_tpg);
2057	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2058	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2059	int ret;
2060	/*
2061	 * Shutdown the active I_T nexus if 'NULL' is passed..
2062	 */
2063	if (!strncmp(page, "NULL", 4)) {
2064		ret = vhost_scsi_drop_nexus(tpg);
2065		return (!ret) ? count : ret;
2066	}
2067	/*
2068	 * Otherwise make sure the passed virtual Initiator port WWN matches
2069	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2070	 * vhost_scsi_make_nexus().
2071	 */
2072	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2073		pr_err("Emulated NAA Sas Address: %s, exceeds"
2074				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2075		return -EINVAL;
2076	}
2077	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2078
2079	ptr = strstr(i_port, "naa.");
2080	if (ptr) {
2081		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2082			pr_err("Passed SAS Initiator Port %s does not"
2083				" match target port protoid: %s\n", i_port,
2084				vhost_scsi_dump_proto_id(tport_wwn));
2085			return -EINVAL;
2086		}
2087		port_ptr = &i_port[0];
2088		goto check_newline;
2089	}
2090	ptr = strstr(i_port, "fc.");
2091	if (ptr) {
2092		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2093			pr_err("Passed FCP Initiator Port %s does not"
2094				" match target port protoid: %s\n", i_port,
2095				vhost_scsi_dump_proto_id(tport_wwn));
2096			return -EINVAL;
2097		}
2098		port_ptr = &i_port[3]; /* Skip over "fc." */
2099		goto check_newline;
2100	}
2101	ptr = strstr(i_port, "iqn.");
2102	if (ptr) {
2103		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2104			pr_err("Passed iSCSI Initiator Port %s does not"
2105				" match target port protoid: %s\n", i_port,
2106				vhost_scsi_dump_proto_id(tport_wwn));
2107			return -EINVAL;
2108		}
2109		port_ptr = &i_port[0];
2110		goto check_newline;
2111	}
2112	pr_err("Unable to locate prefix for emulated Initiator Port:"
2113			" %s\n", i_port);
2114	return -EINVAL;
2115	/*
2116	 * Clear any trailing newline for the NAA WWN
2117	 */
2118check_newline:
2119	if (i_port[strlen(i_port)-1] == '\n')
2120		i_port[strlen(i_port)-1] = '\0';
2121
2122	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2123	if (ret < 0)
2124		return ret;
2125
2126	return count;
2127}
2128
2129CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2130
2131static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2132	&vhost_scsi_tpg_attr_nexus,
2133	NULL,
2134};
2135
2136static struct se_portal_group *
2137vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2138{
2139	struct vhost_scsi_tport *tport = container_of(wwn,
2140			struct vhost_scsi_tport, tport_wwn);
2141
2142	struct vhost_scsi_tpg *tpg;
2143	u16 tpgt;
2144	int ret;
2145
2146	if (strstr(name, "tpgt_") != name)
2147		return ERR_PTR(-EINVAL);
2148	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2149		return ERR_PTR(-EINVAL);
2150
2151	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2152	if (!tpg) {
2153		pr_err("Unable to allocate struct vhost_scsi_tpg");
2154		return ERR_PTR(-ENOMEM);
2155	}
2156	mutex_init(&tpg->tv_tpg_mutex);
2157	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2158	tpg->tport = tport;
2159	tpg->tport_tpgt = tpgt;
2160
2161	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2162	if (ret < 0) {
2163		kfree(tpg);
2164		return NULL;
2165	}
2166	mutex_lock(&vhost_scsi_mutex);
2167	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2168	mutex_unlock(&vhost_scsi_mutex);
2169
2170	return &tpg->se_tpg;
2171}
2172
2173static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2174{
2175	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2176				struct vhost_scsi_tpg, se_tpg);
2177
2178	mutex_lock(&vhost_scsi_mutex);
2179	list_del(&tpg->tv_tpg_list);
2180	mutex_unlock(&vhost_scsi_mutex);
2181	/*
2182	 * Release the virtual I_T Nexus for this vhost TPG
2183	 */
2184	vhost_scsi_drop_nexus(tpg);
2185	/*
2186	 * Deregister the se_tpg from TCM..
2187	 */
2188	core_tpg_deregister(se_tpg);
2189	kfree(tpg);
2190}
2191
2192static struct se_wwn *
2193vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2194		     struct config_group *group,
2195		     const char *name)
2196{
2197	struct vhost_scsi_tport *tport;
2198	char *ptr;
2199	u64 wwpn = 0;
2200	int off = 0;
2201
2202	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2203		return ERR_PTR(-EINVAL); */
2204
2205	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2206	if (!tport) {
2207		pr_err("Unable to allocate struct vhost_scsi_tport");
2208		return ERR_PTR(-ENOMEM);
2209	}
2210	tport->tport_wwpn = wwpn;
2211	/*
2212	 * Determine the emulated Protocol Identifier and Target Port Name
2213	 * based on the incoming configfs directory name.
2214	 */
2215	ptr = strstr(name, "naa.");
2216	if (ptr) {
2217		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2218		goto check_len;
2219	}
2220	ptr = strstr(name, "fc.");
2221	if (ptr) {
2222		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2223		off = 3; /* Skip over "fc." */
2224		goto check_len;
2225	}
2226	ptr = strstr(name, "iqn.");
2227	if (ptr) {
2228		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2229		goto check_len;
2230	}
2231
2232	pr_err("Unable to locate prefix for emulated Target Port:"
2233			" %s\n", name);
2234	kfree(tport);
2235	return ERR_PTR(-EINVAL);
2236
2237check_len:
2238	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2239		pr_err("Emulated %s Address: %s, exceeds"
2240			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2241			VHOST_SCSI_NAMELEN);
2242		kfree(tport);
2243		return ERR_PTR(-EINVAL);
2244	}
2245	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2246
2247	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2248		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2249
2250	return &tport->tport_wwn;
2251}
2252
2253static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2254{
2255	struct vhost_scsi_tport *tport = container_of(wwn,
2256				struct vhost_scsi_tport, tport_wwn);
2257
2258	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2259		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2260		tport->tport_name);
2261
2262	kfree(tport);
2263}
2264
2265static ssize_t
2266vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2267{
2268	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2269		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2270		utsname()->machine);
2271}
2272
2273CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2274
2275static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2276	&vhost_scsi_wwn_attr_version,
2277	NULL,
2278};
2279
2280static const struct target_core_fabric_ops vhost_scsi_ops = {
2281	.module				= THIS_MODULE,
2282	.fabric_name			= "vhost",
2283	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2284	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2285	.tpg_get_tag			= vhost_scsi_get_tpgt,
2286	.tpg_check_demo_mode		= vhost_scsi_check_true,
2287	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2288	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2289	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2290	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2291	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
2292	.release_cmd			= vhost_scsi_release_cmd,
2293	.check_stop_free		= vhost_scsi_check_stop_free,
2294	.sess_get_index			= vhost_scsi_sess_get_index,
2295	.sess_get_initiator_sid		= NULL,
2296	.write_pending			= vhost_scsi_write_pending,
2297	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
2298	.get_cmd_state			= vhost_scsi_get_cmd_state,
2299	.queue_data_in			= vhost_scsi_queue_data_in,
2300	.queue_status			= vhost_scsi_queue_status,
2301	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2302	.aborted_task			= vhost_scsi_aborted_task,
2303	/*
2304	 * Setup callers for generic logic in target_core_fabric_configfs.c
2305	 */
2306	.fabric_make_wwn		= vhost_scsi_make_tport,
2307	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2308	.fabric_make_tpg		= vhost_scsi_make_tpg,
2309	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2310	.fabric_post_link		= vhost_scsi_port_link,
2311	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2312
2313	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2314	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2315	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2316};
2317
2318static int __init vhost_scsi_init(void)
2319{
2320	int ret = -ENOMEM;
2321
2322	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2323		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2324		utsname()->machine);
2325
2326	/*
2327	 * Use our own dedicated workqueue for submitting I/O into
2328	 * target core to avoid contention within system_wq.
2329	 */
2330	vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2331	if (!vhost_scsi_workqueue)
2332		goto out;
2333
2334	ret = vhost_scsi_register();
2335	if (ret < 0)
2336		goto out_destroy_workqueue;
2337
2338	ret = target_register_template(&vhost_scsi_ops);
2339	if (ret < 0)
2340		goto out_vhost_scsi_deregister;
2341
2342	return 0;
2343
2344out_vhost_scsi_deregister:
2345	vhost_scsi_deregister();
2346out_destroy_workqueue:
2347	destroy_workqueue(vhost_scsi_workqueue);
2348out:
2349	return ret;
2350};
2351
2352static void vhost_scsi_exit(void)
2353{
2354	target_unregister_template(&vhost_scsi_ops);
2355	vhost_scsi_deregister();
2356	destroy_workqueue(vhost_scsi_workqueue);
2357};
2358
2359MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2360MODULE_ALIAS("tcm_vhost");
2361MODULE_LICENSE("GPL");
2362module_init(vhost_scsi_init);
2363module_exit(vhost_scsi_exit);
v5.4
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/vmalloc.h>
  39#include <linux/miscdevice.h>
  40#include <asm/unaligned.h>
  41#include <scsi/scsi_common.h>
  42#include <scsi/scsi_proto.h>
  43#include <target/target_core_base.h>
  44#include <target/target_core_fabric.h>
  45#include <linux/vhost.h>
  46#include <linux/virtio_scsi.h>
  47#include <linux/llist.h>
  48#include <linux/bitmap.h>
  49
  50#include "vhost.h"
  51
  52#define VHOST_SCSI_VERSION  "v0.1"
  53#define VHOST_SCSI_NAMELEN 256
  54#define VHOST_SCSI_MAX_CDB_SIZE 32
  55#define VHOST_SCSI_DEFAULT_TAGS 256
  56#define VHOST_SCSI_PREALLOC_SGLS 2048
  57#define VHOST_SCSI_PREALLOC_UPAGES 2048
  58#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
  59
  60/* Max number of requests before requeueing the job.
  61 * Using this limit prevents one virtqueue from starving others with
  62 * request.
  63 */
  64#define VHOST_SCSI_WEIGHT 256
  65
  66struct vhost_scsi_inflight {
  67	/* Wait for the flush operation to finish */
  68	struct completion comp;
  69	/* Refcount for the inflight reqs */
  70	struct kref kref;
  71};
  72
  73struct vhost_scsi_cmd {
  74	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  75	int tvc_vq_desc;
  76	/* virtio-scsi initiator task attribute */
  77	int tvc_task_attr;
  78	/* virtio-scsi response incoming iovecs */
  79	int tvc_in_iovs;
  80	/* virtio-scsi initiator data direction */
  81	enum dma_data_direction tvc_data_direction;
  82	/* Expected data transfer length from virtio-scsi header */
  83	u32 tvc_exp_data_len;
  84	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  85	u64 tvc_tag;
  86	/* The number of scatterlists associated with this cmd */
  87	u32 tvc_sgl_count;
  88	u32 tvc_prot_sgl_count;
  89	/* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  90	u32 tvc_lun;
  91	/* Pointer to the SGL formatted memory from virtio-scsi */
  92	struct scatterlist *tvc_sgl;
  93	struct scatterlist *tvc_prot_sgl;
  94	struct page **tvc_upages;
  95	/* Pointer to response header iovec */
  96	struct iovec tvc_resp_iov;
  97	/* Pointer to vhost_scsi for our device */
  98	struct vhost_scsi *tvc_vhost;
  99	/* Pointer to vhost_virtqueue for the cmd */
 100	struct vhost_virtqueue *tvc_vq;
 101	/* Pointer to vhost nexus memory */
 102	struct vhost_scsi_nexus *tvc_nexus;
 103	/* The TCM I/O descriptor that is accessed via container_of() */
 104	struct se_cmd tvc_se_cmd;
 105	/* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 106	struct work_struct work;
 107	/* Copy of the incoming SCSI command descriptor block (CDB) */
 108	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 109	/* Sense buffer that will be mapped into outgoing status */
 110	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 111	/* Completed commands list, serviced from vhost worker thread */
 112	struct llist_node tvc_completion_list;
 113	/* Used to track inflight cmd */
 114	struct vhost_scsi_inflight *inflight;
 115};
 116
 117struct vhost_scsi_nexus {
 118	/* Pointer to TCM session for I_T Nexus */
 119	struct se_session *tvn_se_sess;
 120};
 121
 122struct vhost_scsi_tpg {
 123	/* Vhost port target portal group tag for TCM */
 124	u16 tport_tpgt;
 125	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 126	int tv_tpg_port_count;
 127	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 128	int tv_tpg_vhost_count;
 129	/* Used for enabling T10-PI with legacy devices */
 130	int tv_fabric_prot_type;
 131	/* list for vhost_scsi_list */
 132	struct list_head tv_tpg_list;
 133	/* Used to protect access for tpg_nexus */
 134	struct mutex tv_tpg_mutex;
 135	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 136	struct vhost_scsi_nexus *tpg_nexus;
 137	/* Pointer back to vhost_scsi_tport */
 138	struct vhost_scsi_tport *tport;
 139	/* Returned by vhost_scsi_make_tpg() */
 140	struct se_portal_group se_tpg;
 141	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 142	struct vhost_scsi *vhost_scsi;
 143};
 144
 145struct vhost_scsi_tport {
 146	/* SCSI protocol the tport is providing */
 147	u8 tport_proto_id;
 148	/* Binary World Wide unique Port Name for Vhost Target port */
 149	u64 tport_wwpn;
 150	/* ASCII formatted WWPN for Vhost Target port */
 151	char tport_name[VHOST_SCSI_NAMELEN];
 152	/* Returned by vhost_scsi_make_tport() */
 153	struct se_wwn tport_wwn;
 154};
 155
 156struct vhost_scsi_evt {
 157	/* event to be sent to guest */
 158	struct virtio_scsi_event event;
 159	/* event list, serviced from vhost worker thread */
 160	struct llist_node list;
 161};
 162
 163enum {
 164	VHOST_SCSI_VQ_CTL = 0,
 165	VHOST_SCSI_VQ_EVT = 1,
 166	VHOST_SCSI_VQ_IO = 2,
 167};
 168
 169/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 170enum {
 171	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 172					       (1ULL << VIRTIO_SCSI_F_T10_PI)
 173};
 174
 175#define VHOST_SCSI_MAX_TARGET	256
 176#define VHOST_SCSI_MAX_VQ	128
 177#define VHOST_SCSI_MAX_EVENT	128
 178
 179struct vhost_scsi_virtqueue {
 180	struct vhost_virtqueue vq;
 181	/*
 182	 * Reference counting for inflight reqs, used for flush operation. At
 183	 * each time, one reference tracks new commands submitted, while we
 184	 * wait for another one to reach 0.
 185	 */
 186	struct vhost_scsi_inflight inflights[2];
 187	/*
 188	 * Indicate current inflight in use, protected by vq->mutex.
 189	 * Writers must also take dev mutex and flush under it.
 190	 */
 191	int inflight_idx;
 192};
 193
 194struct vhost_scsi {
 195	/* Protected by vhost_scsi->dev.mutex */
 196	struct vhost_scsi_tpg **vs_tpg;
 197	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 198
 199	struct vhost_dev dev;
 200	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 201
 202	struct vhost_work vs_completion_work; /* cmd completion work item */
 203	struct llist_head vs_completion_list; /* cmd completion queue */
 204
 205	struct vhost_work vs_event_work; /* evt injection work item */
 206	struct llist_head vs_event_list; /* evt injection queue */
 207
 208	bool vs_events_missed; /* any missed events, protected by vq->mutex */
 209	int vs_events_nr; /* num of pending events, protected by vq->mutex */
 210};
 211
 212/*
 213 * Context for processing request and control queue operations.
 214 */
 215struct vhost_scsi_ctx {
 216	int head;
 217	unsigned int out, in;
 218	size_t req_size, rsp_size;
 219	size_t out_size, in_size;
 220	u8 *target, *lunp;
 221	void *req;
 222	struct iov_iter out_iter;
 223};
 224
 225static struct workqueue_struct *vhost_scsi_workqueue;
 226
 227/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 228static DEFINE_MUTEX(vhost_scsi_mutex);
 229static LIST_HEAD(vhost_scsi_list);
 230
 231static void vhost_scsi_done_inflight(struct kref *kref)
 232{
 233	struct vhost_scsi_inflight *inflight;
 234
 235	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 236	complete(&inflight->comp);
 237}
 238
 239static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 240				    struct vhost_scsi_inflight *old_inflight[])
 241{
 242	struct vhost_scsi_inflight *new_inflight;
 243	struct vhost_virtqueue *vq;
 244	int idx, i;
 245
 246	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 247		vq = &vs->vqs[i].vq;
 248
 249		mutex_lock(&vq->mutex);
 250
 251		/* store old infight */
 252		idx = vs->vqs[i].inflight_idx;
 253		if (old_inflight)
 254			old_inflight[i] = &vs->vqs[i].inflights[idx];
 255
 256		/* setup new infight */
 257		vs->vqs[i].inflight_idx = idx ^ 1;
 258		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 259		kref_init(&new_inflight->kref);
 260		init_completion(&new_inflight->comp);
 261
 262		mutex_unlock(&vq->mutex);
 263	}
 264}
 265
 266static struct vhost_scsi_inflight *
 267vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 268{
 269	struct vhost_scsi_inflight *inflight;
 270	struct vhost_scsi_virtqueue *svq;
 271
 272	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 273	inflight = &svq->inflights[svq->inflight_idx];
 274	kref_get(&inflight->kref);
 275
 276	return inflight;
 277}
 278
 279static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 280{
 281	kref_put(&inflight->kref, vhost_scsi_done_inflight);
 282}
 283
 284static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 285{
 286	return 1;
 287}
 288
 289static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 290{
 291	return 0;
 292}
 293
 294static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 295{
 296	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 297				struct vhost_scsi_tpg, se_tpg);
 298	struct vhost_scsi_tport *tport = tpg->tport;
 299
 300	return &tport->tport_name[0];
 301}
 302
 303static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 304{
 305	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 306				struct vhost_scsi_tpg, se_tpg);
 307	return tpg->tport_tpgt;
 308}
 309
 310static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 311{
 312	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 313				struct vhost_scsi_tpg, se_tpg);
 314
 315	return tpg->tv_fabric_prot_type;
 316}
 317
 318static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 319{
 320	return 1;
 321}
 322
 323static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 324{
 325	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 326				struct vhost_scsi_cmd, tvc_se_cmd);
 327	struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 328	int i;
 329
 330	if (tv_cmd->tvc_sgl_count) {
 331		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 332			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 333	}
 334	if (tv_cmd->tvc_prot_sgl_count) {
 335		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 336			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 337	}
 338
 339	vhost_scsi_put_inflight(tv_cmd->inflight);
 340	target_free_tag(se_sess, se_cmd);
 341}
 342
 343static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 344{
 345	return 0;
 346}
 347
 348static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 349{
 350	/* Go ahead and process the write immediately */
 351	target_execute_cmd(se_cmd);
 352	return 0;
 353}
 354
 355static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 356{
 357	return;
 358}
 359
 360static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 361{
 362	return 0;
 363}
 364
 365static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 366{
 367	struct vhost_scsi *vs = cmd->tvc_vhost;
 368
 369	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 370
 371	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 372}
 373
 374static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 375{
 376	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 377				struct vhost_scsi_cmd, tvc_se_cmd);
 378	vhost_scsi_complete_cmd(cmd);
 379	return 0;
 380}
 381
 382static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 383{
 384	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 385				struct vhost_scsi_cmd, tvc_se_cmd);
 386	vhost_scsi_complete_cmd(cmd);
 387	return 0;
 388}
 389
 390static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 391{
 392	return;
 393}
 394
 395static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 396{
 397	return;
 398}
 399
 400static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 401{
 402	vs->vs_events_nr--;
 403	kfree(evt);
 404}
 405
 406static struct vhost_scsi_evt *
 407vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 408		       u32 event, u32 reason)
 409{
 410	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 411	struct vhost_scsi_evt *evt;
 412
 413	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 414		vs->vs_events_missed = true;
 415		return NULL;
 416	}
 417
 418	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 419	if (!evt) {
 420		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 421		vs->vs_events_missed = true;
 422		return NULL;
 423	}
 424
 425	evt->event.event = cpu_to_vhost32(vq, event);
 426	evt->event.reason = cpu_to_vhost32(vq, reason);
 427	vs->vs_events_nr++;
 428
 429	return evt;
 430}
 431
 432static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 433{
 434	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 435
 436	/* TODO locking against target/backend threads? */
 437	transport_generic_free_cmd(se_cmd, 0);
 438
 439}
 440
 441static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 442{
 443	return target_put_sess_cmd(se_cmd);
 444}
 445
 446static void
 447vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 448{
 449	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 450	struct virtio_scsi_event *event = &evt->event;
 451	struct virtio_scsi_event __user *eventp;
 452	unsigned out, in;
 453	int head, ret;
 454
 455	if (!vq->private_data) {
 456		vs->vs_events_missed = true;
 457		return;
 458	}
 459
 460again:
 461	vhost_disable_notify(&vs->dev, vq);
 462	head = vhost_get_vq_desc(vq, vq->iov,
 463			ARRAY_SIZE(vq->iov), &out, &in,
 464			NULL, NULL);
 465	if (head < 0) {
 466		vs->vs_events_missed = true;
 467		return;
 468	}
 469	if (head == vq->num) {
 470		if (vhost_enable_notify(&vs->dev, vq))
 471			goto again;
 472		vs->vs_events_missed = true;
 473		return;
 474	}
 475
 476	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 477		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 478				vq->iov[out].iov_len);
 479		vs->vs_events_missed = true;
 480		return;
 481	}
 482
 483	if (vs->vs_events_missed) {
 484		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 485		vs->vs_events_missed = false;
 486	}
 487
 488	eventp = vq->iov[out].iov_base;
 489	ret = __copy_to_user(eventp, event, sizeof(*event));
 490	if (!ret)
 491		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 492	else
 493		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 494}
 495
 496static void vhost_scsi_evt_work(struct vhost_work *work)
 497{
 498	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 499					vs_event_work);
 500	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 501	struct vhost_scsi_evt *evt, *t;
 502	struct llist_node *llnode;
 503
 504	mutex_lock(&vq->mutex);
 505	llnode = llist_del_all(&vs->vs_event_list);
 506	llist_for_each_entry_safe(evt, t, llnode, list) {
 507		vhost_scsi_do_evt_work(vs, evt);
 508		vhost_scsi_free_evt(vs, evt);
 509	}
 510	mutex_unlock(&vq->mutex);
 511}
 512
 513/* Fill in status and signal that we are done processing this command
 514 *
 515 * This is scheduled in the vhost work queue so we are called with the owner
 516 * process mm and can access the vring.
 517 */
 518static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 519{
 520	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 521					vs_completion_work);
 522	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 523	struct virtio_scsi_cmd_resp v_rsp;
 524	struct vhost_scsi_cmd *cmd, *t;
 525	struct llist_node *llnode;
 526	struct se_cmd *se_cmd;
 527	struct iov_iter iov_iter;
 528	int ret, vq;
 529
 530	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 531	llnode = llist_del_all(&vs->vs_completion_list);
 532	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
 533		se_cmd = &cmd->tvc_se_cmd;
 534
 535		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 536			cmd, se_cmd->residual_count, se_cmd->scsi_status);
 537
 538		memset(&v_rsp, 0, sizeof(v_rsp));
 539		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 540		/* TODO is status_qualifier field needed? */
 541		v_rsp.status = se_cmd->scsi_status;
 542		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 543						 se_cmd->scsi_sense_length);
 544		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 545		       se_cmd->scsi_sense_length);
 546
 547		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
 548			      cmd->tvc_in_iovs, sizeof(v_rsp));
 549		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 550		if (likely(ret == sizeof(v_rsp))) {
 551			struct vhost_scsi_virtqueue *q;
 552			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 553			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 554			vq = q - vs->vqs;
 555			__set_bit(vq, signal);
 556		} else
 557			pr_err("Faulted on virtio_scsi_cmd_resp\n");
 558
 559		vhost_scsi_free_cmd(cmd);
 560	}
 561
 562	vq = -1;
 563	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 564		< VHOST_SCSI_MAX_VQ)
 565		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 566}
 567
 568static struct vhost_scsi_cmd *
 569vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 570		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 571		   u32 exp_data_len, int data_direction)
 572{
 573	struct vhost_scsi_cmd *cmd;
 574	struct vhost_scsi_nexus *tv_nexus;
 575	struct se_session *se_sess;
 576	struct scatterlist *sg, *prot_sg;
 577	struct page **pages;
 578	int tag, cpu;
 579
 580	tv_nexus = tpg->tpg_nexus;
 581	if (!tv_nexus) {
 582		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 583		return ERR_PTR(-EIO);
 584	}
 585	se_sess = tv_nexus->tvn_se_sess;
 586
 587	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
 588	if (tag < 0) {
 589		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 590		return ERR_PTR(-ENOMEM);
 591	}
 592
 593	cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 594	sg = cmd->tvc_sgl;
 595	prot_sg = cmd->tvc_prot_sgl;
 596	pages = cmd->tvc_upages;
 597	memset(cmd, 0, sizeof(*cmd));
 598	cmd->tvc_sgl = sg;
 599	cmd->tvc_prot_sgl = prot_sg;
 600	cmd->tvc_upages = pages;
 601	cmd->tvc_se_cmd.map_tag = tag;
 602	cmd->tvc_se_cmd.map_cpu = cpu;
 603	cmd->tvc_tag = scsi_tag;
 604	cmd->tvc_lun = lun;
 605	cmd->tvc_task_attr = task_attr;
 606	cmd->tvc_exp_data_len = exp_data_len;
 607	cmd->tvc_data_direction = data_direction;
 608	cmd->tvc_nexus = tv_nexus;
 609	cmd->inflight = vhost_scsi_get_inflight(vq);
 610
 611	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 612
 613	return cmd;
 614}
 615
 616/*
 617 * Map a user memory range into a scatterlist
 618 *
 619 * Returns the number of scatterlist entries used or -errno on error.
 620 */
 621static int
 622vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 623		      struct iov_iter *iter,
 624		      struct scatterlist *sgl,
 625		      bool write)
 626{
 627	struct page **pages = cmd->tvc_upages;
 628	struct scatterlist *sg = sgl;
 629	ssize_t bytes;
 630	size_t offset;
 631	unsigned int npages = 0;
 632
 633	bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
 634				VHOST_SCSI_PREALLOC_UPAGES, &offset);
 635	/* No pages were pinned */
 636	if (bytes <= 0)
 637		return bytes < 0 ? bytes : -EFAULT;
 638
 639	iov_iter_advance(iter, bytes);
 640
 641	while (bytes) {
 642		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
 643		sg_set_page(sg++, pages[npages++], n, offset);
 644		bytes -= n;
 645		offset = 0;
 646	}
 647	return npages;
 648}
 649
 650static int
 651vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 652{
 653	int sgl_count = 0;
 654
 655	if (!iter || !iter->iov) {
 656		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 657		       " present\n", __func__, bytes);
 658		return -EINVAL;
 659	}
 660
 661	sgl_count = iov_iter_npages(iter, 0xffff);
 662	if (sgl_count > max_sgls) {
 663		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 664		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 665		return -EINVAL;
 666	}
 667	return sgl_count;
 668}
 669
 670static int
 671vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 672		      struct iov_iter *iter,
 673		      struct scatterlist *sg, int sg_count)
 674{
 675	struct scatterlist *p = sg;
 676	int ret;
 677
 678	while (iov_iter_count(iter)) {
 679		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
 680		if (ret < 0) {
 681			while (p < sg) {
 682				struct page *page = sg_page(p++);
 683				if (page)
 684					put_page(page);
 685			}
 686			return ret;
 687		}
 688		sg += ret;
 689	}
 690	return 0;
 691}
 692
 693static int
 694vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 695		 size_t prot_bytes, struct iov_iter *prot_iter,
 696		 size_t data_bytes, struct iov_iter *data_iter)
 697{
 698	int sgl_count, ret;
 699	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 700
 701	if (prot_bytes) {
 702		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 703						 VHOST_SCSI_PREALLOC_PROT_SGLS);
 704		if (sgl_count < 0)
 705			return sgl_count;
 706
 707		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 708		cmd->tvc_prot_sgl_count = sgl_count;
 709		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 710			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 711
 712		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 713					    cmd->tvc_prot_sgl,
 714					    cmd->tvc_prot_sgl_count);
 715		if (ret < 0) {
 716			cmd->tvc_prot_sgl_count = 0;
 717			return ret;
 718		}
 719	}
 720	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 721					 VHOST_SCSI_PREALLOC_SGLS);
 722	if (sgl_count < 0)
 723		return sgl_count;
 724
 725	sg_init_table(cmd->tvc_sgl, sgl_count);
 726	cmd->tvc_sgl_count = sgl_count;
 727	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 728		  cmd->tvc_sgl, cmd->tvc_sgl_count);
 729
 730	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 731				    cmd->tvc_sgl, cmd->tvc_sgl_count);
 732	if (ret < 0) {
 733		cmd->tvc_sgl_count = 0;
 734		return ret;
 735	}
 736	return 0;
 737}
 738
 739static int vhost_scsi_to_tcm_attr(int attr)
 740{
 741	switch (attr) {
 742	case VIRTIO_SCSI_S_SIMPLE:
 743		return TCM_SIMPLE_TAG;
 744	case VIRTIO_SCSI_S_ORDERED:
 745		return TCM_ORDERED_TAG;
 746	case VIRTIO_SCSI_S_HEAD:
 747		return TCM_HEAD_TAG;
 748	case VIRTIO_SCSI_S_ACA:
 749		return TCM_ACA_TAG;
 750	default:
 751		break;
 752	}
 753	return TCM_SIMPLE_TAG;
 754}
 755
 756static void vhost_scsi_submission_work(struct work_struct *work)
 757{
 758	struct vhost_scsi_cmd *cmd =
 759		container_of(work, struct vhost_scsi_cmd, work);
 760	struct vhost_scsi_nexus *tv_nexus;
 761	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 762	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 763	int rc;
 764
 765	/* FIXME: BIDI operation */
 766	if (cmd->tvc_sgl_count) {
 767		sg_ptr = cmd->tvc_sgl;
 768
 769		if (cmd->tvc_prot_sgl_count)
 770			sg_prot_ptr = cmd->tvc_prot_sgl;
 771		else
 772			se_cmd->prot_pto = true;
 773	} else {
 774		sg_ptr = NULL;
 775	}
 776	tv_nexus = cmd->tvc_nexus;
 777
 778	se_cmd->tag = 0;
 779	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 780			cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 781			cmd->tvc_lun, cmd->tvc_exp_data_len,
 782			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 783			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 784			sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 785			cmd->tvc_prot_sgl_count);
 786	if (rc < 0) {
 787		transport_send_check_condition_and_sense(se_cmd,
 788				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 789		transport_generic_free_cmd(se_cmd, 0);
 790	}
 791}
 792
 793static void
 794vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 795			   struct vhost_virtqueue *vq,
 796			   int head, unsigned out)
 797{
 798	struct virtio_scsi_cmd_resp __user *resp;
 799	struct virtio_scsi_cmd_resp rsp;
 800	int ret;
 801
 802	memset(&rsp, 0, sizeof(rsp));
 803	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 804	resp = vq->iov[out].iov_base;
 805	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 806	if (!ret)
 807		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 808	else
 809		pr_err("Faulted on virtio_scsi_cmd_resp\n");
 810}
 811
 812static int
 813vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
 814		    struct vhost_scsi_ctx *vc)
 815{
 816	int ret = -ENXIO;
 817
 818	vc->head = vhost_get_vq_desc(vq, vq->iov,
 819				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
 820				     NULL, NULL);
 821
 822	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 823		 vc->head, vc->out, vc->in);
 824
 825	/* On error, stop handling until the next kick. */
 826	if (unlikely(vc->head < 0))
 827		goto done;
 828
 829	/* Nothing new?  Wait for eventfd to tell us they refilled. */
 830	if (vc->head == vq->num) {
 831		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 832			vhost_disable_notify(&vs->dev, vq);
 833			ret = -EAGAIN;
 834		}
 835		goto done;
 836	}
 837
 838	/*
 839	 * Get the size of request and response buffers.
 840	 * FIXME: Not correct for BIDI operation
 841	 */
 842	vc->out_size = iov_length(vq->iov, vc->out);
 843	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
 844
 845	/*
 846	 * Copy over the virtio-scsi request header, which for a
 847	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 848	 * single iovec may contain both the header + outgoing
 849	 * WRITE payloads.
 850	 *
 851	 * copy_from_iter() will advance out_iter, so that it will
 852	 * point at the start of the outgoing WRITE payload, if
 853	 * DMA_TO_DEVICE is set.
 854	 */
 855	iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
 856	ret = 0;
 857
 858done:
 859	return ret;
 860}
 861
 862static int
 863vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
 864{
 865	if (unlikely(vc->in_size < vc->rsp_size)) {
 866		vq_err(vq,
 867		       "Response buf too small, need min %zu bytes got %zu",
 868		       vc->rsp_size, vc->in_size);
 869		return -EINVAL;
 870	} else if (unlikely(vc->out_size < vc->req_size)) {
 871		vq_err(vq,
 872		       "Request buf too small, need min %zu bytes got %zu",
 873		       vc->req_size, vc->out_size);
 874		return -EIO;
 875	}
 876
 877	return 0;
 878}
 879
 880static int
 881vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
 882		   struct vhost_scsi_tpg **tpgp)
 883{
 884	int ret = -EIO;
 885
 886	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
 887					  &vc->out_iter))) {
 888		vq_err(vq, "Faulted on copy_from_iter_full\n");
 889	} else if (unlikely(*vc->lunp != 1)) {
 890		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
 891		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
 892	} else {
 893		struct vhost_scsi_tpg **vs_tpg, *tpg;
 894
 895		vs_tpg = vq->private_data;	/* validated at handler entry */
 896
 897		tpg = READ_ONCE(vs_tpg[*vc->target]);
 898		if (unlikely(!tpg)) {
 899			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
 900		} else {
 901			if (tpgp)
 902				*tpgp = tpg;
 903			ret = 0;
 904		}
 905	}
 906
 907	return ret;
 908}
 909
 910static void
 911vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 912{
 913	struct vhost_scsi_tpg **vs_tpg, *tpg;
 914	struct virtio_scsi_cmd_req v_req;
 915	struct virtio_scsi_cmd_req_pi v_req_pi;
 916	struct vhost_scsi_ctx vc;
 917	struct vhost_scsi_cmd *cmd;
 918	struct iov_iter in_iter, prot_iter, data_iter;
 919	u64 tag;
 920	u32 exp_data_len, data_direction;
 921	int ret, prot_bytes, c = 0;
 922	u16 lun;
 923	u8 task_attr;
 924	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 925	void *cdb;
 926
 927	mutex_lock(&vq->mutex);
 928	/*
 929	 * We can handle the vq only after the endpoint is setup by calling the
 930	 * VHOST_SCSI_SET_ENDPOINT ioctl.
 931	 */
 932	vs_tpg = vq->private_data;
 933	if (!vs_tpg)
 934		goto out;
 935
 936	memset(&vc, 0, sizeof(vc));
 937	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 938
 939	vhost_disable_notify(&vs->dev, vq);
 940
 941	do {
 942		ret = vhost_scsi_get_desc(vs, vq, &vc);
 943		if (ret)
 944			goto err;
 945
 946		/*
 947		 * Setup pointers and values based upon different virtio-scsi
 948		 * request header if T10_PI is enabled in KVM guest.
 949		 */
 950		if (t10_pi) {
 951			vc.req = &v_req_pi;
 952			vc.req_size = sizeof(v_req_pi);
 953			vc.lunp = &v_req_pi.lun[0];
 954			vc.target = &v_req_pi.lun[1];
 955		} else {
 956			vc.req = &v_req;
 957			vc.req_size = sizeof(v_req);
 958			vc.lunp = &v_req.lun[0];
 959			vc.target = &v_req.lun[1];
 960		}
 961
 962		/*
 963		 * Validate the size of request and response buffers.
 964		 * Check for a sane response buffer so we can report
 965		 * early errors back to the guest.
 966		 */
 967		ret = vhost_scsi_chk_size(vq, &vc);
 968		if (ret)
 969			goto err;
 970
 971		ret = vhost_scsi_get_req(vq, &vc, &tpg);
 972		if (ret)
 973			goto err;
 974
 975		ret = -EIO;	/* bad target on any error from here on */
 976
 977		/*
 978		 * Determine data_direction by calculating the total outgoing
 979		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
 980		 * response headers respectively.
 981		 *
 982		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
 983		 * to the right place.
 984		 *
 985		 * For DMA_FROM_DEVICE, the iovec will be just past the end
 986		 * of the virtio-scsi response header in either the same
 987		 * or immediately following iovec.
 988		 *
 989		 * Any associated T10_PI bytes for the outgoing / incoming
 990		 * payloads are included in calculation of exp_data_len here.
 991		 */
 992		prot_bytes = 0;
 993
 994		if (vc.out_size > vc.req_size) {
 995			data_direction = DMA_TO_DEVICE;
 996			exp_data_len = vc.out_size - vc.req_size;
 997			data_iter = vc.out_iter;
 998		} else if (vc.in_size > vc.rsp_size) {
 999			data_direction = DMA_FROM_DEVICE;
1000			exp_data_len = vc.in_size - vc.rsp_size;
1001
1002			iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1003				      vc.rsp_size + exp_data_len);
1004			iov_iter_advance(&in_iter, vc.rsp_size);
1005			data_iter = in_iter;
1006		} else {
1007			data_direction = DMA_NONE;
1008			exp_data_len = 0;
1009		}
1010		/*
1011		 * If T10_PI header + payload is present, setup prot_iter values
1012		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1013		 * host scatterlists via get_user_pages_fast().
1014		 */
1015		if (t10_pi) {
1016			if (v_req_pi.pi_bytesout) {
1017				if (data_direction != DMA_TO_DEVICE) {
1018					vq_err(vq, "Received non zero pi_bytesout,"
1019						" but wrong data_direction\n");
1020					goto err;
1021				}
1022				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1023			} else if (v_req_pi.pi_bytesin) {
1024				if (data_direction != DMA_FROM_DEVICE) {
1025					vq_err(vq, "Received non zero pi_bytesin,"
1026						" but wrong data_direction\n");
1027					goto err;
1028				}
1029				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1030			}
1031			/*
1032			 * Set prot_iter to data_iter and truncate it to
1033			 * prot_bytes, and advance data_iter past any
1034			 * preceeding prot_bytes that may be present.
1035			 *
1036			 * Also fix up the exp_data_len to reflect only the
1037			 * actual data payload length.
1038			 */
1039			if (prot_bytes) {
1040				exp_data_len -= prot_bytes;
1041				prot_iter = data_iter;
1042				iov_iter_truncate(&prot_iter, prot_bytes);
1043				iov_iter_advance(&data_iter, prot_bytes);
1044			}
1045			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1046			task_attr = v_req_pi.task_attr;
1047			cdb = &v_req_pi.cdb[0];
1048			lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1049		} else {
1050			tag = vhost64_to_cpu(vq, v_req.tag);
1051			task_attr = v_req.task_attr;
1052			cdb = &v_req.cdb[0];
1053			lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1054		}
1055		/*
1056		 * Check that the received CDB size does not exceeded our
1057		 * hardcoded max for vhost-scsi, then get a pre-allocated
1058		 * cmd descriptor for the new virtio-scsi tag.
1059		 *
1060		 * TODO what if cdb was too small for varlen cdb header?
1061		 */
1062		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1063			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1064				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1065				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1066				goto err;
1067		}
1068		cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1069					 exp_data_len + prot_bytes,
1070					 data_direction);
1071		if (IS_ERR(cmd)) {
1072			vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1073			       PTR_ERR(cmd));
1074			goto err;
1075		}
1076		cmd->tvc_vhost = vs;
1077		cmd->tvc_vq = vq;
1078		cmd->tvc_resp_iov = vq->iov[vc.out];
1079		cmd->tvc_in_iovs = vc.in;
1080
1081		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1082			 cmd->tvc_cdb[0], cmd->tvc_lun);
1083		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1084			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1085
1086		if (data_direction != DMA_NONE) {
1087			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1088						      &prot_iter, exp_data_len,
1089						      &data_iter))) {
1090				vq_err(vq, "Failed to map iov to sgl\n");
1091				vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1092				goto err;
1093			}
1094		}
1095		/*
1096		 * Save the descriptor from vhost_get_vq_desc() to be used to
1097		 * complete the virtio-scsi request in TCM callback context via
1098		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1099		 */
1100		cmd->tvc_vq_desc = vc.head;
1101		/*
1102		 * Dispatch cmd descriptor for cmwq execution in process
1103		 * context provided by vhost_scsi_workqueue.  This also ensures
1104		 * cmd is executed on the same kworker CPU as this vhost
1105		 * thread to gain positive L2 cache locality effects.
1106		 */
1107		INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1108		queue_work(vhost_scsi_workqueue, &cmd->work);
1109		ret = 0;
1110err:
1111		/*
1112		 * ENXIO:  No more requests, or read error, wait for next kick
1113		 * EINVAL: Invalid response buffer, drop the request
1114		 * EIO:    Respond with bad target
1115		 * EAGAIN: Pending request
1116		 */
1117		if (ret == -ENXIO)
1118			break;
1119		else if (ret == -EIO)
1120			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1121	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1122out:
1123	mutex_unlock(&vq->mutex);
1124}
1125
1126static void
1127vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1128			   struct vhost_virtqueue *vq,
1129			   struct vhost_scsi_ctx *vc)
1130{
1131	struct virtio_scsi_ctrl_tmf_resp rsp;
1132	struct iov_iter iov_iter;
1133	int ret;
1134
1135	pr_debug("%s\n", __func__);
1136	memset(&rsp, 0, sizeof(rsp));
1137	rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1138
1139	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1140
1141	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1142	if (likely(ret == sizeof(rsp)))
1143		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1144	else
1145		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1146}
1147
1148static void
1149vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1150			struct vhost_virtqueue *vq,
1151			struct vhost_scsi_ctx *vc)
1152{
1153	struct virtio_scsi_ctrl_an_resp rsp;
1154	struct iov_iter iov_iter;
1155	int ret;
1156
1157	pr_debug("%s\n", __func__);
1158	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1159	rsp.response = VIRTIO_SCSI_S_OK;
1160
1161	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1162
1163	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1164	if (likely(ret == sizeof(rsp)))
1165		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1166	else
1167		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1168}
1169
1170static void
1171vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1172{
1173	union {
1174		__virtio32 type;
1175		struct virtio_scsi_ctrl_an_req an;
1176		struct virtio_scsi_ctrl_tmf_req tmf;
1177	} v_req;
1178	struct vhost_scsi_ctx vc;
1179	size_t typ_size;
1180	int ret, c = 0;
1181
1182	mutex_lock(&vq->mutex);
1183	/*
1184	 * We can handle the vq only after the endpoint is setup by calling the
1185	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1186	 */
1187	if (!vq->private_data)
1188		goto out;
1189
1190	memset(&vc, 0, sizeof(vc));
1191
1192	vhost_disable_notify(&vs->dev, vq);
1193
1194	do {
1195		ret = vhost_scsi_get_desc(vs, vq, &vc);
1196		if (ret)
1197			goto err;
1198
1199		/*
1200		 * Get the request type first in order to setup
1201		 * other parameters dependent on the type.
1202		 */
1203		vc.req = &v_req.type;
1204		typ_size = sizeof(v_req.type);
1205
1206		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1207						  &vc.out_iter))) {
1208			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1209			/*
1210			 * The size of the response buffer depends on the
1211			 * request type and must be validated against it.
1212			 * Since the request type is not known, don't send
1213			 * a response.
1214			 */
1215			continue;
1216		}
1217
1218		switch (v_req.type) {
1219		case VIRTIO_SCSI_T_TMF:
1220			vc.req = &v_req.tmf;
1221			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1222			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1223			vc.lunp = &v_req.tmf.lun[0];
1224			vc.target = &v_req.tmf.lun[1];
1225			break;
1226		case VIRTIO_SCSI_T_AN_QUERY:
1227		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1228			vc.req = &v_req.an;
1229			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1230			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1231			vc.lunp = &v_req.an.lun[0];
1232			vc.target = NULL;
1233			break;
1234		default:
1235			vq_err(vq, "Unknown control request %d", v_req.type);
1236			continue;
1237		}
1238
1239		/*
1240		 * Validate the size of request and response buffers.
1241		 * Check for a sane response buffer so we can report
1242		 * early errors back to the guest.
1243		 */
1244		ret = vhost_scsi_chk_size(vq, &vc);
1245		if (ret)
1246			goto err;
1247
1248		/*
1249		 * Get the rest of the request now that its size is known.
1250		 */
1251		vc.req += typ_size;
1252		vc.req_size -= typ_size;
1253
1254		ret = vhost_scsi_get_req(vq, &vc, NULL);
1255		if (ret)
1256			goto err;
1257
1258		if (v_req.type == VIRTIO_SCSI_T_TMF)
1259			vhost_scsi_send_tmf_reject(vs, vq, &vc);
1260		else
1261			vhost_scsi_send_an_resp(vs, vq, &vc);
1262err:
1263		/*
1264		 * ENXIO:  No more requests, or read error, wait for next kick
1265		 * EINVAL: Invalid response buffer, drop the request
1266		 * EIO:    Respond with bad target
1267		 * EAGAIN: Pending request
1268		 */
1269		if (ret == -ENXIO)
1270			break;
1271		else if (ret == -EIO)
1272			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1273	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1274out:
1275	mutex_unlock(&vq->mutex);
1276}
1277
1278static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1279{
1280	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1281						poll.work);
1282	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1283
1284	pr_debug("%s: The handling func for control queue.\n", __func__);
1285	vhost_scsi_ctl_handle_vq(vs, vq);
1286}
1287
1288static void
1289vhost_scsi_send_evt(struct vhost_scsi *vs,
1290		   struct vhost_scsi_tpg *tpg,
1291		   struct se_lun *lun,
1292		   u32 event,
1293		   u32 reason)
1294{
1295	struct vhost_scsi_evt *evt;
1296
1297	evt = vhost_scsi_allocate_evt(vs, event, reason);
1298	if (!evt)
1299		return;
1300
1301	if (tpg && lun) {
1302		/* TODO: share lun setup code with virtio-scsi.ko */
1303		/*
1304		 * Note: evt->event is zeroed when we allocate it and
1305		 * lun[4-7] need to be zero according to virtio-scsi spec.
1306		 */
1307		evt->event.lun[0] = 0x01;
1308		evt->event.lun[1] = tpg->tport_tpgt;
1309		if (lun->unpacked_lun >= 256)
1310			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1311		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1312	}
1313
1314	llist_add(&evt->list, &vs->vs_event_list);
1315	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1316}
1317
1318static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1319{
1320	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1321						poll.work);
1322	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1323
1324	mutex_lock(&vq->mutex);
1325	if (!vq->private_data)
1326		goto out;
1327
1328	if (vs->vs_events_missed)
1329		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1330out:
1331	mutex_unlock(&vq->mutex);
1332}
1333
1334static void vhost_scsi_handle_kick(struct vhost_work *work)
1335{
1336	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1337						poll.work);
1338	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1339
1340	vhost_scsi_handle_vq(vs, vq);
1341}
1342
1343static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1344{
1345	vhost_poll_flush(&vs->vqs[index].vq.poll);
1346}
1347
1348/* Callers must hold dev mutex */
1349static void vhost_scsi_flush(struct vhost_scsi *vs)
1350{
1351	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1352	int i;
1353
1354	/* Init new inflight and remember the old inflight */
1355	vhost_scsi_init_inflight(vs, old_inflight);
1356
1357	/*
1358	 * The inflight->kref was initialized to 1. We decrement it here to
1359	 * indicate the start of the flush operation so that it will reach 0
1360	 * when all the reqs are finished.
1361	 */
1362	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1363		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1364
1365	/* Flush both the vhost poll and vhost work */
1366	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1367		vhost_scsi_flush_vq(vs, i);
1368	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1369	vhost_work_flush(&vs->dev, &vs->vs_event_work);
1370
1371	/* Wait for all reqs issued before the flush to be finished */
1372	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1373		wait_for_completion(&old_inflight[i]->comp);
1374}
1375
1376/*
1377 * Called from vhost_scsi_ioctl() context to walk the list of available
1378 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1379 *
1380 *  The lock nesting rule is:
1381 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1382 */
1383static int
1384vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1385			struct vhost_scsi_target *t)
1386{
1387	struct se_portal_group *se_tpg;
1388	struct vhost_scsi_tport *tv_tport;
1389	struct vhost_scsi_tpg *tpg;
1390	struct vhost_scsi_tpg **vs_tpg;
1391	struct vhost_virtqueue *vq;
1392	int index, ret, i, len;
1393	bool match = false;
1394
1395	mutex_lock(&vhost_scsi_mutex);
1396	mutex_lock(&vs->dev.mutex);
1397
1398	/* Verify that ring has been setup correctly. */
1399	for (index = 0; index < vs->dev.nvqs; ++index) {
1400		/* Verify that ring has been setup correctly. */
1401		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1402			ret = -EFAULT;
1403			goto out;
1404		}
1405	}
1406
1407	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1408	vs_tpg = kzalloc(len, GFP_KERNEL);
1409	if (!vs_tpg) {
1410		ret = -ENOMEM;
1411		goto out;
1412	}
1413	if (vs->vs_tpg)
1414		memcpy(vs_tpg, vs->vs_tpg, len);
1415
1416	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1417		mutex_lock(&tpg->tv_tpg_mutex);
1418		if (!tpg->tpg_nexus) {
1419			mutex_unlock(&tpg->tv_tpg_mutex);
1420			continue;
1421		}
1422		if (tpg->tv_tpg_vhost_count != 0) {
1423			mutex_unlock(&tpg->tv_tpg_mutex);
1424			continue;
1425		}
1426		tv_tport = tpg->tport;
1427
1428		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1429			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1430				kfree(vs_tpg);
1431				mutex_unlock(&tpg->tv_tpg_mutex);
1432				ret = -EEXIST;
1433				goto out;
1434			}
1435			/*
1436			 * In order to ensure individual vhost-scsi configfs
1437			 * groups cannot be removed while in use by vhost ioctl,
1438			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1439			 * dependency now.
1440			 */
1441			se_tpg = &tpg->se_tpg;
1442			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1443			if (ret) {
1444				pr_warn("target_depend_item() failed: %d\n", ret);
1445				kfree(vs_tpg);
1446				mutex_unlock(&tpg->tv_tpg_mutex);
1447				goto out;
1448			}
1449			tpg->tv_tpg_vhost_count++;
1450			tpg->vhost_scsi = vs;
1451			vs_tpg[tpg->tport_tpgt] = tpg;
1452			match = true;
1453		}
1454		mutex_unlock(&tpg->tv_tpg_mutex);
1455	}
1456
1457	if (match) {
1458		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1459		       sizeof(vs->vs_vhost_wwpn));
1460		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1461			vq = &vs->vqs[i].vq;
1462			mutex_lock(&vq->mutex);
1463			vq->private_data = vs_tpg;
1464			vhost_vq_init_access(vq);
1465			mutex_unlock(&vq->mutex);
1466		}
1467		ret = 0;
1468	} else {
1469		ret = -EEXIST;
1470	}
1471
1472	/*
1473	 * Act as synchronize_rcu to make sure access to
1474	 * old vs->vs_tpg is finished.
1475	 */
1476	vhost_scsi_flush(vs);
1477	kfree(vs->vs_tpg);
1478	vs->vs_tpg = vs_tpg;
1479
1480out:
1481	mutex_unlock(&vs->dev.mutex);
1482	mutex_unlock(&vhost_scsi_mutex);
1483	return ret;
1484}
1485
1486static int
1487vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1488			  struct vhost_scsi_target *t)
1489{
1490	struct se_portal_group *se_tpg;
1491	struct vhost_scsi_tport *tv_tport;
1492	struct vhost_scsi_tpg *tpg;
1493	struct vhost_virtqueue *vq;
1494	bool match = false;
1495	int index, ret, i;
1496	u8 target;
1497
1498	mutex_lock(&vhost_scsi_mutex);
1499	mutex_lock(&vs->dev.mutex);
1500	/* Verify that ring has been setup correctly. */
1501	for (index = 0; index < vs->dev.nvqs; ++index) {
1502		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1503			ret = -EFAULT;
1504			goto err_dev;
1505		}
1506	}
1507
1508	if (!vs->vs_tpg) {
1509		ret = 0;
1510		goto err_dev;
1511	}
1512
1513	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1514		target = i;
1515		tpg = vs->vs_tpg[target];
1516		if (!tpg)
1517			continue;
1518
1519		mutex_lock(&tpg->tv_tpg_mutex);
1520		tv_tport = tpg->tport;
1521		if (!tv_tport) {
1522			ret = -ENODEV;
1523			goto err_tpg;
1524		}
1525
1526		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1527			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1528				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1529				tv_tport->tport_name, tpg->tport_tpgt,
1530				t->vhost_wwpn, t->vhost_tpgt);
1531			ret = -EINVAL;
1532			goto err_tpg;
1533		}
1534		tpg->tv_tpg_vhost_count--;
1535		tpg->vhost_scsi = NULL;
1536		vs->vs_tpg[target] = NULL;
1537		match = true;
1538		mutex_unlock(&tpg->tv_tpg_mutex);
1539		/*
1540		 * Release se_tpg->tpg_group.cg_item configfs dependency now
1541		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1542		 */
1543		se_tpg = &tpg->se_tpg;
1544		target_undepend_item(&se_tpg->tpg_group.cg_item);
1545	}
1546	if (match) {
1547		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1548			vq = &vs->vqs[i].vq;
1549			mutex_lock(&vq->mutex);
1550			vq->private_data = NULL;
1551			mutex_unlock(&vq->mutex);
1552		}
1553	}
1554	/*
1555	 * Act as synchronize_rcu to make sure access to
1556	 * old vs->vs_tpg is finished.
1557	 */
1558	vhost_scsi_flush(vs);
1559	kfree(vs->vs_tpg);
1560	vs->vs_tpg = NULL;
1561	WARN_ON(vs->vs_events_nr);
1562	mutex_unlock(&vs->dev.mutex);
1563	mutex_unlock(&vhost_scsi_mutex);
1564	return 0;
1565
1566err_tpg:
1567	mutex_unlock(&tpg->tv_tpg_mutex);
1568err_dev:
1569	mutex_unlock(&vs->dev.mutex);
1570	mutex_unlock(&vhost_scsi_mutex);
1571	return ret;
1572}
1573
1574static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1575{
1576	struct vhost_virtqueue *vq;
1577	int i;
1578
1579	if (features & ~VHOST_SCSI_FEATURES)
1580		return -EOPNOTSUPP;
1581
1582	mutex_lock(&vs->dev.mutex);
1583	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1584	    !vhost_log_access_ok(&vs->dev)) {
1585		mutex_unlock(&vs->dev.mutex);
1586		return -EFAULT;
1587	}
1588
1589	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1590		vq = &vs->vqs[i].vq;
1591		mutex_lock(&vq->mutex);
1592		vq->acked_features = features;
1593		mutex_unlock(&vq->mutex);
1594	}
1595	mutex_unlock(&vs->dev.mutex);
1596	return 0;
1597}
1598
1599static int vhost_scsi_open(struct inode *inode, struct file *f)
1600{
1601	struct vhost_scsi *vs;
1602	struct vhost_virtqueue **vqs;
1603	int r = -ENOMEM, i;
1604
1605	vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1606	if (!vs) {
1607		vs = vzalloc(sizeof(*vs));
1608		if (!vs)
1609			goto err_vs;
1610	}
1611
1612	vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1613	if (!vqs)
1614		goto err_vqs;
1615
1616	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1617	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1618
1619	vs->vs_events_nr = 0;
1620	vs->vs_events_missed = false;
1621
1622	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1623	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1624	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1625	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1626	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1627		vqs[i] = &vs->vqs[i].vq;
1628		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1629	}
1630	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1631		       VHOST_SCSI_WEIGHT, 0);
1632
1633	vhost_scsi_init_inflight(vs, NULL);
1634
1635	f->private_data = vs;
1636	return 0;
1637
1638err_vqs:
1639	kvfree(vs);
1640err_vs:
1641	return r;
1642}
1643
1644static int vhost_scsi_release(struct inode *inode, struct file *f)
1645{
1646	struct vhost_scsi *vs = f->private_data;
1647	struct vhost_scsi_target t;
1648
1649	mutex_lock(&vs->dev.mutex);
1650	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1651	mutex_unlock(&vs->dev.mutex);
1652	vhost_scsi_clear_endpoint(vs, &t);
1653	vhost_dev_stop(&vs->dev);
1654	vhost_dev_cleanup(&vs->dev);
1655	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1656	vhost_scsi_flush(vs);
1657	kfree(vs->dev.vqs);
1658	kvfree(vs);
1659	return 0;
1660}
1661
1662static long
1663vhost_scsi_ioctl(struct file *f,
1664		 unsigned int ioctl,
1665		 unsigned long arg)
1666{
1667	struct vhost_scsi *vs = f->private_data;
1668	struct vhost_scsi_target backend;
1669	void __user *argp = (void __user *)arg;
1670	u64 __user *featurep = argp;
1671	u32 __user *eventsp = argp;
1672	u32 events_missed;
1673	u64 features;
1674	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1675	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1676
1677	switch (ioctl) {
1678	case VHOST_SCSI_SET_ENDPOINT:
1679		if (copy_from_user(&backend, argp, sizeof backend))
1680			return -EFAULT;
1681		if (backend.reserved != 0)
1682			return -EOPNOTSUPP;
1683
1684		return vhost_scsi_set_endpoint(vs, &backend);
1685	case VHOST_SCSI_CLEAR_ENDPOINT:
1686		if (copy_from_user(&backend, argp, sizeof backend))
1687			return -EFAULT;
1688		if (backend.reserved != 0)
1689			return -EOPNOTSUPP;
1690
1691		return vhost_scsi_clear_endpoint(vs, &backend);
1692	case VHOST_SCSI_GET_ABI_VERSION:
1693		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1694			return -EFAULT;
1695		return 0;
1696	case VHOST_SCSI_SET_EVENTS_MISSED:
1697		if (get_user(events_missed, eventsp))
1698			return -EFAULT;
1699		mutex_lock(&vq->mutex);
1700		vs->vs_events_missed = events_missed;
1701		mutex_unlock(&vq->mutex);
1702		return 0;
1703	case VHOST_SCSI_GET_EVENTS_MISSED:
1704		mutex_lock(&vq->mutex);
1705		events_missed = vs->vs_events_missed;
1706		mutex_unlock(&vq->mutex);
1707		if (put_user(events_missed, eventsp))
1708			return -EFAULT;
1709		return 0;
1710	case VHOST_GET_FEATURES:
1711		features = VHOST_SCSI_FEATURES;
1712		if (copy_to_user(featurep, &features, sizeof features))
1713			return -EFAULT;
1714		return 0;
1715	case VHOST_SET_FEATURES:
1716		if (copy_from_user(&features, featurep, sizeof features))
1717			return -EFAULT;
1718		return vhost_scsi_set_features(vs, features);
1719	default:
1720		mutex_lock(&vs->dev.mutex);
1721		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1722		/* TODO: flush backend after dev ioctl. */
1723		if (r == -ENOIOCTLCMD)
1724			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1725		mutex_unlock(&vs->dev.mutex);
1726		return r;
1727	}
1728}
1729
1730#ifdef CONFIG_COMPAT
1731static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1732				unsigned long arg)
1733{
1734	return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1735}
1736#endif
1737
1738static const struct file_operations vhost_scsi_fops = {
1739	.owner          = THIS_MODULE,
1740	.release        = vhost_scsi_release,
1741	.unlocked_ioctl = vhost_scsi_ioctl,
1742#ifdef CONFIG_COMPAT
1743	.compat_ioctl	= vhost_scsi_compat_ioctl,
1744#endif
1745	.open           = vhost_scsi_open,
1746	.llseek		= noop_llseek,
1747};
1748
1749static struct miscdevice vhost_scsi_misc = {
1750	MISC_DYNAMIC_MINOR,
1751	"vhost-scsi",
1752	&vhost_scsi_fops,
1753};
1754
1755static int __init vhost_scsi_register(void)
1756{
1757	return misc_register(&vhost_scsi_misc);
1758}
1759
1760static void vhost_scsi_deregister(void)
1761{
1762	misc_deregister(&vhost_scsi_misc);
1763}
1764
1765static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1766{
1767	switch (tport->tport_proto_id) {
1768	case SCSI_PROTOCOL_SAS:
1769		return "SAS";
1770	case SCSI_PROTOCOL_FCP:
1771		return "FCP";
1772	case SCSI_PROTOCOL_ISCSI:
1773		return "iSCSI";
1774	default:
1775		break;
1776	}
1777
1778	return "Unknown";
1779}
1780
1781static void
1782vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1783		  struct se_lun *lun, bool plug)
1784{
1785
1786	struct vhost_scsi *vs = tpg->vhost_scsi;
1787	struct vhost_virtqueue *vq;
1788	u32 reason;
1789
1790	if (!vs)
1791		return;
1792
1793	mutex_lock(&vs->dev.mutex);
1794
1795	if (plug)
1796		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1797	else
1798		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1799
1800	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1801	mutex_lock(&vq->mutex);
1802	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1803		vhost_scsi_send_evt(vs, tpg, lun,
1804				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1805	mutex_unlock(&vq->mutex);
1806	mutex_unlock(&vs->dev.mutex);
1807}
1808
1809static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1810{
1811	vhost_scsi_do_plug(tpg, lun, true);
1812}
1813
1814static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1815{
1816	vhost_scsi_do_plug(tpg, lun, false);
1817}
1818
1819static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1820			       struct se_lun *lun)
1821{
1822	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1823				struct vhost_scsi_tpg, se_tpg);
1824
1825	mutex_lock(&vhost_scsi_mutex);
1826
1827	mutex_lock(&tpg->tv_tpg_mutex);
1828	tpg->tv_tpg_port_count++;
1829	mutex_unlock(&tpg->tv_tpg_mutex);
1830
1831	vhost_scsi_hotplug(tpg, lun);
1832
1833	mutex_unlock(&vhost_scsi_mutex);
1834
1835	return 0;
1836}
1837
1838static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1839				  struct se_lun *lun)
1840{
1841	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1842				struct vhost_scsi_tpg, se_tpg);
1843
1844	mutex_lock(&vhost_scsi_mutex);
1845
1846	mutex_lock(&tpg->tv_tpg_mutex);
1847	tpg->tv_tpg_port_count--;
1848	mutex_unlock(&tpg->tv_tpg_mutex);
1849
1850	vhost_scsi_hotunplug(tpg, lun);
1851
1852	mutex_unlock(&vhost_scsi_mutex);
1853}
1854
1855static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1856{
1857	struct vhost_scsi_cmd *tv_cmd;
1858	unsigned int i;
1859
1860	if (!se_sess->sess_cmd_map)
1861		return;
1862
1863	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1864		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1865
1866		kfree(tv_cmd->tvc_sgl);
1867		kfree(tv_cmd->tvc_prot_sgl);
1868		kfree(tv_cmd->tvc_upages);
1869	}
1870}
1871
1872static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1873		struct config_item *item, const char *page, size_t count)
1874{
1875	struct se_portal_group *se_tpg = attrib_to_tpg(item);
1876	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1877				struct vhost_scsi_tpg, se_tpg);
1878	unsigned long val;
1879	int ret = kstrtoul(page, 0, &val);
1880
1881	if (ret) {
1882		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1883		return ret;
1884	}
1885	if (val != 0 && val != 1 && val != 3) {
1886		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1887		return -EINVAL;
1888	}
1889	tpg->tv_fabric_prot_type = val;
1890
1891	return count;
1892}
1893
1894static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1895		struct config_item *item, char *page)
1896{
1897	struct se_portal_group *se_tpg = attrib_to_tpg(item);
1898	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1899				struct vhost_scsi_tpg, se_tpg);
1900
1901	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1902}
1903
1904CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1905
1906static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1907	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1908	NULL,
1909};
1910
1911static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1912			       struct se_session *se_sess, void *p)
1913{
1914	struct vhost_scsi_cmd *tv_cmd;
1915	unsigned int i;
1916
1917	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1918		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1919
1920		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1921					  sizeof(struct scatterlist),
1922					  GFP_KERNEL);
1923		if (!tv_cmd->tvc_sgl) {
1924			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1925			goto out;
1926		}
1927
1928		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1929					     sizeof(struct page *),
1930					     GFP_KERNEL);
1931		if (!tv_cmd->tvc_upages) {
1932			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1933			goto out;
1934		}
1935
1936		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1937					       sizeof(struct scatterlist),
1938					       GFP_KERNEL);
1939		if (!tv_cmd->tvc_prot_sgl) {
1940			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1941			goto out;
1942		}
1943	}
1944	return 0;
1945out:
1946	vhost_scsi_free_cmd_map_res(se_sess);
1947	return -ENOMEM;
1948}
1949
1950static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1951				const char *name)
1952{
1953	struct vhost_scsi_nexus *tv_nexus;
1954
1955	mutex_lock(&tpg->tv_tpg_mutex);
1956	if (tpg->tpg_nexus) {
1957		mutex_unlock(&tpg->tv_tpg_mutex);
1958		pr_debug("tpg->tpg_nexus already exists\n");
1959		return -EEXIST;
1960	}
1961
1962	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1963	if (!tv_nexus) {
1964		mutex_unlock(&tpg->tv_tpg_mutex);
1965		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1966		return -ENOMEM;
1967	}
1968	/*
1969	 * Since we are running in 'demo mode' this call with generate a
1970	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1971	 * the SCSI Initiator port name of the passed configfs group 'name'.
1972	 */
1973	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1974					VHOST_SCSI_DEFAULT_TAGS,
1975					sizeof(struct vhost_scsi_cmd),
1976					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1977					(unsigned char *)name, tv_nexus,
1978					vhost_scsi_nexus_cb);
1979	if (IS_ERR(tv_nexus->tvn_se_sess)) {
1980		mutex_unlock(&tpg->tv_tpg_mutex);
1981		kfree(tv_nexus);
1982		return -ENOMEM;
1983	}
1984	tpg->tpg_nexus = tv_nexus;
1985
1986	mutex_unlock(&tpg->tv_tpg_mutex);
1987	return 0;
1988}
1989
1990static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1991{
1992	struct se_session *se_sess;
1993	struct vhost_scsi_nexus *tv_nexus;
1994
1995	mutex_lock(&tpg->tv_tpg_mutex);
1996	tv_nexus = tpg->tpg_nexus;
1997	if (!tv_nexus) {
1998		mutex_unlock(&tpg->tv_tpg_mutex);
1999		return -ENODEV;
2000	}
2001
2002	se_sess = tv_nexus->tvn_se_sess;
2003	if (!se_sess) {
2004		mutex_unlock(&tpg->tv_tpg_mutex);
2005		return -ENODEV;
2006	}
2007
2008	if (tpg->tv_tpg_port_count != 0) {
2009		mutex_unlock(&tpg->tv_tpg_mutex);
2010		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2011			" active TPG port count: %d\n",
2012			tpg->tv_tpg_port_count);
2013		return -EBUSY;
2014	}
2015
2016	if (tpg->tv_tpg_vhost_count != 0) {
2017		mutex_unlock(&tpg->tv_tpg_mutex);
2018		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2019			" active TPG vhost count: %d\n",
2020			tpg->tv_tpg_vhost_count);
2021		return -EBUSY;
2022	}
2023
2024	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2025		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2026		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2027
2028	vhost_scsi_free_cmd_map_res(se_sess);
2029	/*
2030	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2031	 */
2032	target_remove_session(se_sess);
2033	tpg->tpg_nexus = NULL;
2034	mutex_unlock(&tpg->tv_tpg_mutex);
2035
2036	kfree(tv_nexus);
2037	return 0;
2038}
2039
2040static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2041{
2042	struct se_portal_group *se_tpg = to_tpg(item);
2043	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2044				struct vhost_scsi_tpg, se_tpg);
2045	struct vhost_scsi_nexus *tv_nexus;
2046	ssize_t ret;
2047
2048	mutex_lock(&tpg->tv_tpg_mutex);
2049	tv_nexus = tpg->tpg_nexus;
2050	if (!tv_nexus) {
2051		mutex_unlock(&tpg->tv_tpg_mutex);
2052		return -ENODEV;
2053	}
2054	ret = snprintf(page, PAGE_SIZE, "%s\n",
2055			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2056	mutex_unlock(&tpg->tv_tpg_mutex);
2057
2058	return ret;
2059}
2060
2061static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2062		const char *page, size_t count)
2063{
2064	struct se_portal_group *se_tpg = to_tpg(item);
2065	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2066				struct vhost_scsi_tpg, se_tpg);
2067	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2068	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2069	int ret;
2070	/*
2071	 * Shutdown the active I_T nexus if 'NULL' is passed..
2072	 */
2073	if (!strncmp(page, "NULL", 4)) {
2074		ret = vhost_scsi_drop_nexus(tpg);
2075		return (!ret) ? count : ret;
2076	}
2077	/*
2078	 * Otherwise make sure the passed virtual Initiator port WWN matches
2079	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2080	 * vhost_scsi_make_nexus().
2081	 */
2082	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2083		pr_err("Emulated NAA Sas Address: %s, exceeds"
2084				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2085		return -EINVAL;
2086	}
2087	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2088
2089	ptr = strstr(i_port, "naa.");
2090	if (ptr) {
2091		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2092			pr_err("Passed SAS Initiator Port %s does not"
2093				" match target port protoid: %s\n", i_port,
2094				vhost_scsi_dump_proto_id(tport_wwn));
2095			return -EINVAL;
2096		}
2097		port_ptr = &i_port[0];
2098		goto check_newline;
2099	}
2100	ptr = strstr(i_port, "fc.");
2101	if (ptr) {
2102		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2103			pr_err("Passed FCP Initiator Port %s does not"
2104				" match target port protoid: %s\n", i_port,
2105				vhost_scsi_dump_proto_id(tport_wwn));
2106			return -EINVAL;
2107		}
2108		port_ptr = &i_port[3]; /* Skip over "fc." */
2109		goto check_newline;
2110	}
2111	ptr = strstr(i_port, "iqn.");
2112	if (ptr) {
2113		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2114			pr_err("Passed iSCSI Initiator Port %s does not"
2115				" match target port protoid: %s\n", i_port,
2116				vhost_scsi_dump_proto_id(tport_wwn));
2117			return -EINVAL;
2118		}
2119		port_ptr = &i_port[0];
2120		goto check_newline;
2121	}
2122	pr_err("Unable to locate prefix for emulated Initiator Port:"
2123			" %s\n", i_port);
2124	return -EINVAL;
2125	/*
2126	 * Clear any trailing newline for the NAA WWN
2127	 */
2128check_newline:
2129	if (i_port[strlen(i_port)-1] == '\n')
2130		i_port[strlen(i_port)-1] = '\0';
2131
2132	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2133	if (ret < 0)
2134		return ret;
2135
2136	return count;
2137}
2138
2139CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2140
2141static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2142	&vhost_scsi_tpg_attr_nexus,
2143	NULL,
2144};
2145
2146static struct se_portal_group *
2147vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2148{
2149	struct vhost_scsi_tport *tport = container_of(wwn,
2150			struct vhost_scsi_tport, tport_wwn);
2151
2152	struct vhost_scsi_tpg *tpg;
2153	u16 tpgt;
2154	int ret;
2155
2156	if (strstr(name, "tpgt_") != name)
2157		return ERR_PTR(-EINVAL);
2158	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2159		return ERR_PTR(-EINVAL);
2160
2161	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2162	if (!tpg) {
2163		pr_err("Unable to allocate struct vhost_scsi_tpg");
2164		return ERR_PTR(-ENOMEM);
2165	}
2166	mutex_init(&tpg->tv_tpg_mutex);
2167	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2168	tpg->tport = tport;
2169	tpg->tport_tpgt = tpgt;
2170
2171	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2172	if (ret < 0) {
2173		kfree(tpg);
2174		return NULL;
2175	}
2176	mutex_lock(&vhost_scsi_mutex);
2177	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2178	mutex_unlock(&vhost_scsi_mutex);
2179
2180	return &tpg->se_tpg;
2181}
2182
2183static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2184{
2185	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2186				struct vhost_scsi_tpg, se_tpg);
2187
2188	mutex_lock(&vhost_scsi_mutex);
2189	list_del(&tpg->tv_tpg_list);
2190	mutex_unlock(&vhost_scsi_mutex);
2191	/*
2192	 * Release the virtual I_T Nexus for this vhost TPG
2193	 */
2194	vhost_scsi_drop_nexus(tpg);
2195	/*
2196	 * Deregister the se_tpg from TCM..
2197	 */
2198	core_tpg_deregister(se_tpg);
2199	kfree(tpg);
2200}
2201
2202static struct se_wwn *
2203vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2204		     struct config_group *group,
2205		     const char *name)
2206{
2207	struct vhost_scsi_tport *tport;
2208	char *ptr;
2209	u64 wwpn = 0;
2210	int off = 0;
2211
2212	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2213		return ERR_PTR(-EINVAL); */
2214
2215	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2216	if (!tport) {
2217		pr_err("Unable to allocate struct vhost_scsi_tport");
2218		return ERR_PTR(-ENOMEM);
2219	}
2220	tport->tport_wwpn = wwpn;
2221	/*
2222	 * Determine the emulated Protocol Identifier and Target Port Name
2223	 * based on the incoming configfs directory name.
2224	 */
2225	ptr = strstr(name, "naa.");
2226	if (ptr) {
2227		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2228		goto check_len;
2229	}
2230	ptr = strstr(name, "fc.");
2231	if (ptr) {
2232		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2233		off = 3; /* Skip over "fc." */
2234		goto check_len;
2235	}
2236	ptr = strstr(name, "iqn.");
2237	if (ptr) {
2238		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2239		goto check_len;
2240	}
2241
2242	pr_err("Unable to locate prefix for emulated Target Port:"
2243			" %s\n", name);
2244	kfree(tport);
2245	return ERR_PTR(-EINVAL);
2246
2247check_len:
2248	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2249		pr_err("Emulated %s Address: %s, exceeds"
2250			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2251			VHOST_SCSI_NAMELEN);
2252		kfree(tport);
2253		return ERR_PTR(-EINVAL);
2254	}
2255	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2256
2257	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2258		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2259
2260	return &tport->tport_wwn;
2261}
2262
2263static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2264{
2265	struct vhost_scsi_tport *tport = container_of(wwn,
2266				struct vhost_scsi_tport, tport_wwn);
2267
2268	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2269		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2270		tport->tport_name);
2271
2272	kfree(tport);
2273}
2274
2275static ssize_t
2276vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2277{
2278	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2279		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2280		utsname()->machine);
2281}
2282
2283CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2284
2285static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2286	&vhost_scsi_wwn_attr_version,
2287	NULL,
2288};
2289
2290static const struct target_core_fabric_ops vhost_scsi_ops = {
2291	.module				= THIS_MODULE,
2292	.fabric_name			= "vhost",
 
2293	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2294	.tpg_get_tag			= vhost_scsi_get_tpgt,
2295	.tpg_check_demo_mode		= vhost_scsi_check_true,
2296	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2297	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2298	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2299	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2300	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
2301	.release_cmd			= vhost_scsi_release_cmd,
2302	.check_stop_free		= vhost_scsi_check_stop_free,
2303	.sess_get_index			= vhost_scsi_sess_get_index,
2304	.sess_get_initiator_sid		= NULL,
2305	.write_pending			= vhost_scsi_write_pending,
2306	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
2307	.get_cmd_state			= vhost_scsi_get_cmd_state,
2308	.queue_data_in			= vhost_scsi_queue_data_in,
2309	.queue_status			= vhost_scsi_queue_status,
2310	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2311	.aborted_task			= vhost_scsi_aborted_task,
2312	/*
2313	 * Setup callers for generic logic in target_core_fabric_configfs.c
2314	 */
2315	.fabric_make_wwn		= vhost_scsi_make_tport,
2316	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2317	.fabric_make_tpg		= vhost_scsi_make_tpg,
2318	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2319	.fabric_post_link		= vhost_scsi_port_link,
2320	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2321
2322	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2323	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2324	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2325};
2326
2327static int __init vhost_scsi_init(void)
2328{
2329	int ret = -ENOMEM;
2330
2331	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2332		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2333		utsname()->machine);
2334
2335	/*
2336	 * Use our own dedicated workqueue for submitting I/O into
2337	 * target core to avoid contention within system_wq.
2338	 */
2339	vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2340	if (!vhost_scsi_workqueue)
2341		goto out;
2342
2343	ret = vhost_scsi_register();
2344	if (ret < 0)
2345		goto out_destroy_workqueue;
2346
2347	ret = target_register_template(&vhost_scsi_ops);
2348	if (ret < 0)
2349		goto out_vhost_scsi_deregister;
2350
2351	return 0;
2352
2353out_vhost_scsi_deregister:
2354	vhost_scsi_deregister();
2355out_destroy_workqueue:
2356	destroy_workqueue(vhost_scsi_workqueue);
2357out:
2358	return ret;
2359};
2360
2361static void vhost_scsi_exit(void)
2362{
2363	target_unregister_template(&vhost_scsi_ops);
2364	vhost_scsi_deregister();
2365	destroy_workqueue(vhost_scsi_workqueue);
2366};
2367
2368MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2369MODULE_ALIAS("tcm_vhost");
2370MODULE_LICENSE("GPL");
2371module_init(vhost_scsi_init);
2372module_exit(vhost_scsi_exit);