Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (c) 2009, Microsoft Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15 * Place - Suite 330, Boston, MA 02111-1307 USA.
  16 *
  17 * Authors:
  18 *   Haiyang Zhang <haiyangz@microsoft.com>
  19 *   Hank Janssen  <hjanssen@microsoft.com>
  20 *   K. Y. Srinivasan <kys@microsoft.com>
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/wait.h>
  25#include <linux/sched.h>
  26#include <linux/completion.h>
  27#include <linux/string.h>
  28#include <linux/mm.h>
  29#include <linux/delay.h>
  30#include <linux/init.h>
  31#include <linux/slab.h>
  32#include <linux/module.h>
  33#include <linux/device.h>
  34#include <linux/hyperv.h>
  35#include <linux/mempool.h>
  36#include <scsi/scsi.h>
  37#include <scsi/scsi_cmnd.h>
  38#include <scsi/scsi_host.h>
  39#include <scsi/scsi_device.h>
  40#include <scsi/scsi_tcq.h>
  41#include <scsi/scsi_eh.h>
  42#include <scsi/scsi_devinfo.h>
  43#include <scsi/scsi_dbg.h>
  44
  45/*
  46 * All wire protocol details (storage protocol between the guest and the host)
  47 * are consolidated here.
  48 *
  49 * Begin protocol definitions.
  50 */
  51
  52/*
  53 * Version history:
  54 * V1 Beta: 0.1
  55 * V1 RC < 2008/1/31: 1.0
  56 * V1 RC > 2008/1/31:  2.0
  57 * Win7: 4.2
  58 */
  59
  60#define VMSTOR_CURRENT_MAJOR  4
  61#define VMSTOR_CURRENT_MINOR  2
  62
  63
  64/*  Packet structure describing virtual storage requests. */
  65enum vstor_packet_operation {
  66	VSTOR_OPERATION_COMPLETE_IO		= 1,
  67	VSTOR_OPERATION_REMOVE_DEVICE		= 2,
  68	VSTOR_OPERATION_EXECUTE_SRB		= 3,
  69	VSTOR_OPERATION_RESET_LUN		= 4,
  70	VSTOR_OPERATION_RESET_ADAPTER		= 5,
  71	VSTOR_OPERATION_RESET_BUS		= 6,
  72	VSTOR_OPERATION_BEGIN_INITIALIZATION	= 7,
  73	VSTOR_OPERATION_END_INITIALIZATION	= 8,
  74	VSTOR_OPERATION_QUERY_PROTOCOL_VERSION	= 9,
  75	VSTOR_OPERATION_QUERY_PROPERTIES	= 10,
  76	VSTOR_OPERATION_ENUMERATE_BUS		= 11,
  77	VSTOR_OPERATION_MAXIMUM			= 11
  78};
  79
  80/*
  81 * Platform neutral description of a scsi request -
  82 * this remains the same across the write regardless of 32/64 bit
  83 * note: it's patterned off the SCSI_PASS_THROUGH structure
  84 */
  85#define STORVSC_MAX_CMD_LEN			0x10
  86#define STORVSC_SENSE_BUFFER_SIZE		0x12
  87#define STORVSC_MAX_BUF_LEN_WITH_PADDING	0x14
  88
  89struct vmscsi_request {
  90	u16 length;
  91	u8 srb_status;
  92	u8 scsi_status;
  93
  94	u8  port_number;
  95	u8  path_id;
  96	u8  target_id;
  97	u8  lun;
  98
  99	u8  cdb_length;
 100	u8  sense_info_length;
 101	u8  data_in;
 102	u8  reserved;
 103
 104	u32 data_transfer_length;
 105
 106	union {
 107		u8 cdb[STORVSC_MAX_CMD_LEN];
 108		u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
 109		u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
 110	};
 111} __attribute((packed));
 112
 113
 114/*
 115 * This structure is sent during the intialization phase to get the different
 116 * properties of the channel.
 117 */
 118struct vmstorage_channel_properties {
 119	u16 protocol_version;
 120	u8  path_id;
 121	u8 target_id;
 122
 123	/* Note: port number is only really known on the client side */
 124	u32  port_number;
 125	u32  flags;
 126	u32   max_transfer_bytes;
 127
 128	/*
 129	 * This id is unique for each channel and will correspond with
 130	 * vendor specific data in the inquiry data.
 131	 */
 132
 133	u64  unique_id;
 134} __packed;
 135
 136/*  This structure is sent during the storage protocol negotiations. */
 137struct vmstorage_protocol_version {
 138	/* Major (MSW) and minor (LSW) version numbers. */
 139	u16 major_minor;
 140
 141	/*
 142	 * Revision number is auto-incremented whenever this file is changed
 143	 * (See FILL_VMSTOR_REVISION macro above).  Mismatch does not
 144	 * definitely indicate incompatibility--but it does indicate mismatched
 145	 * builds.
 146	 * This is only used on the windows side. Just set it to 0.
 147	 */
 148	u16 revision;
 149} __packed;
 150
 151/* Channel Property Flags */
 152#define STORAGE_CHANNEL_REMOVABLE_FLAG		0x1
 153#define STORAGE_CHANNEL_EMULATED_IDE_FLAG	0x2
 154
 155struct vstor_packet {
 156	/* Requested operation type */
 157	enum vstor_packet_operation operation;
 158
 159	/*  Flags - see below for values */
 160	u32 flags;
 161
 162	/* Status of the request returned from the server side. */
 163	u32 status;
 164
 165	/* Data payload area */
 166	union {
 167		/*
 168		 * Structure used to forward SCSI commands from the
 169		 * client to the server.
 170		 */
 171		struct vmscsi_request vm_srb;
 172
 173		/* Structure used to query channel properties. */
 174		struct vmstorage_channel_properties storage_channel_properties;
 175
 176		/* Used during version negotiations. */
 177		struct vmstorage_protocol_version version;
 178	};
 179} __packed;
 180
 181/*
 182 * Packet Flags:
 183 *
 184 * This flag indicates that the server should send back a completion for this
 185 * packet.
 186 */
 187
 188#define REQUEST_COMPLETION_FLAG	0x1
 189
 190/* Matches Windows-end */
 191enum storvsc_request_type {
 192	WRITE_TYPE = 0,
 193	READ_TYPE,
 194	UNKNOWN_TYPE,
 195};
 196
 197/*
 198 * SRB status codes and masks; a subset of the codes used here.
 199 */
 200
 201#define SRB_STATUS_AUTOSENSE_VALID	0x80
 202#define SRB_STATUS_INVALID_LUN	0x20
 203#define SRB_STATUS_SUCCESS	0x01
 204#define SRB_STATUS_ERROR	0x04
 205
 206/*
 207 * This is the end of Protocol specific defines.
 208 */
 209
 210
 211/*
 212 * We setup a mempool to allocate request structures for this driver
 213 * on a per-lun basis. The following define specifies the number of
 214 * elements in the pool.
 215 */
 216
 217#define STORVSC_MIN_BUF_NR				64
 218static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
 219
 220module_param(storvsc_ringbuffer_size, int, S_IRUGO);
 221MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
 222
 223#define STORVSC_MAX_IO_REQUESTS				128
 224
 225/*
 226 * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
 227 * reality, the path/target is not used (ie always set to 0) so our
 228 * scsi host adapter essentially has 1 bus with 1 target that contains
 229 * up to 256 luns.
 230 */
 231#define STORVSC_MAX_LUNS_PER_TARGET			64
 232#define STORVSC_MAX_TARGETS				1
 233#define STORVSC_MAX_CHANNELS				1
 234
 235
 236
 237struct storvsc_cmd_request {
 238	struct list_head entry;
 239	struct scsi_cmnd *cmd;
 240
 241	unsigned int bounce_sgl_count;
 242	struct scatterlist *bounce_sgl;
 243
 244	struct hv_device *device;
 245
 246	/* Synchronize the request/response if needed */
 247	struct completion wait_event;
 248
 249	unsigned char *sense_buffer;
 250	struct hv_multipage_buffer data_buffer;
 251	struct vstor_packet vstor_packet;
 252};
 253
 254
 255/* A storvsc device is a device object that contains a vmbus channel */
 256struct storvsc_device {
 257	struct hv_device *device;
 258
 259	bool	 destroy;
 260	bool	 drain_notify;
 261	atomic_t num_outstanding_req;
 262	struct Scsi_Host *host;
 263
 264	wait_queue_head_t waiting_to_drain;
 265
 266	/*
 267	 * Each unique Port/Path/Target represents 1 channel ie scsi
 268	 * controller. In reality, the pathid, targetid is always 0
 269	 * and the port is set by us
 270	 */
 271	unsigned int port_number;
 272	unsigned char path_id;
 273	unsigned char target_id;
 274
 275	/* Used for vsc/vsp channel reset process */
 276	struct storvsc_cmd_request init_request;
 277	struct storvsc_cmd_request reset_request;
 278};
 279
 280struct stor_mem_pools {
 281	struct kmem_cache *request_pool;
 282	mempool_t *request_mempool;
 283};
 284
 285struct hv_host_device {
 286	struct hv_device *dev;
 287	unsigned int port;
 288	unsigned char path;
 289	unsigned char target;
 290};
 291
 292struct storvsc_scan_work {
 293	struct work_struct work;
 294	struct Scsi_Host *host;
 295	uint lun;
 296};
 297
 298static void storvsc_bus_scan(struct work_struct *work)
 299{
 300	struct storvsc_scan_work *wrk;
 301	int id, order_id;
 302
 303	wrk = container_of(work, struct storvsc_scan_work, work);
 304	for (id = 0; id < wrk->host->max_id; ++id) {
 305		if (wrk->host->reverse_ordering)
 306			order_id = wrk->host->max_id - id - 1;
 307		else
 308			order_id = id;
 309
 310		scsi_scan_target(&wrk->host->shost_gendev, 0,
 311				order_id, SCAN_WILD_CARD, 1);
 312	}
 313	kfree(wrk);
 314}
 315
 316static void storvsc_remove_lun(struct work_struct *work)
 317{
 318	struct storvsc_scan_work *wrk;
 319	struct scsi_device *sdev;
 320
 321	wrk = container_of(work, struct storvsc_scan_work, work);
 322	if (!scsi_host_get(wrk->host))
 323		goto done;
 324
 325	sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
 326
 327	if (sdev) {
 328		scsi_remove_device(sdev);
 329		scsi_device_put(sdev);
 330	}
 331	scsi_host_put(wrk->host);
 332
 333done:
 334	kfree(wrk);
 335}
 336
 337/*
 338 * Major/minor macros.  Minor version is in LSB, meaning that earlier flat
 339 * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
 340 */
 341
 342static inline u16 storvsc_get_version(u8 major, u8 minor)
 343{
 344	u16 version;
 345
 346	version = ((major << 8) | minor);
 347	return version;
 348}
 349
 350/*
 351 * We can get incoming messages from the host that are not in response to
 352 * messages that we have sent out. An example of this would be messages
 353 * received by the guest to notify dynamic addition/removal of LUNs. To
 354 * deal with potential race conditions where the driver may be in the
 355 * midst of being unloaded when we might receive an unsolicited message
 356 * from the host, we have implemented a mechanism to gurantee sequential
 357 * consistency:
 358 *
 359 * 1) Once the device is marked as being destroyed, we will fail all
 360 *    outgoing messages.
 361 * 2) We permit incoming messages when the device is being destroyed,
 362 *    only to properly account for messages already sent out.
 363 */
 364
 365static inline struct storvsc_device *get_out_stor_device(
 366					struct hv_device *device)
 367{
 368	struct storvsc_device *stor_device;
 369
 370	stor_device = hv_get_drvdata(device);
 371
 372	if (stor_device && stor_device->destroy)
 373		stor_device = NULL;
 374
 375	return stor_device;
 376}
 377
 378
 379static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
 380{
 381	dev->drain_notify = true;
 382	wait_event(dev->waiting_to_drain,
 383		   atomic_read(&dev->num_outstanding_req) == 0);
 384	dev->drain_notify = false;
 385}
 386
 387static inline struct storvsc_device *get_in_stor_device(
 388					struct hv_device *device)
 389{
 390	struct storvsc_device *stor_device;
 391
 392	stor_device = hv_get_drvdata(device);
 393
 394	if (!stor_device)
 395		goto get_in_err;
 396
 397	/*
 398	 * If the device is being destroyed; allow incoming
 399	 * traffic only to cleanup outstanding requests.
 400	 */
 401
 402	if (stor_device->destroy  &&
 403		(atomic_read(&stor_device->num_outstanding_req) == 0))
 404		stor_device = NULL;
 405
 406get_in_err:
 407	return stor_device;
 408
 409}
 410
 411static void destroy_bounce_buffer(struct scatterlist *sgl,
 412				  unsigned int sg_count)
 413{
 414	int i;
 415	struct page *page_buf;
 416
 417	for (i = 0; i < sg_count; i++) {
 418		page_buf = sg_page((&sgl[i]));
 419		if (page_buf != NULL)
 420			__free_page(page_buf);
 421	}
 422
 423	kfree(sgl);
 424}
 425
 426static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
 427{
 428	int i;
 429
 430	/* No need to check */
 431	if (sg_count < 2)
 432		return -1;
 433
 434	/* We have at least 2 sg entries */
 435	for (i = 0; i < sg_count; i++) {
 436		if (i == 0) {
 437			/* make sure 1st one does not have hole */
 438			if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
 439				return i;
 440		} else if (i == sg_count - 1) {
 441			/* make sure last one does not have hole */
 442			if (sgl[i].offset != 0)
 443				return i;
 444		} else {
 445			/* make sure no hole in the middle */
 446			if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
 447				return i;
 448		}
 449	}
 450	return -1;
 451}
 452
 453static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
 454						unsigned int sg_count,
 455						unsigned int len,
 456						int write)
 457{
 458	int i;
 459	int num_pages;
 460	struct scatterlist *bounce_sgl;
 461	struct page *page_buf;
 462	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
 463
 464	num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
 465
 466	bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
 467	if (!bounce_sgl)
 468		return NULL;
 469
 470	for (i = 0; i < num_pages; i++) {
 471		page_buf = alloc_page(GFP_ATOMIC);
 472		if (!page_buf)
 473			goto cleanup;
 474		sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
 475	}
 476
 477	return bounce_sgl;
 478
 479cleanup:
 480	destroy_bounce_buffer(bounce_sgl, num_pages);
 481	return NULL;
 482}
 483
 484/* Disgusting wrapper functions */
 485static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
 486{
 487	void *addr = kmap_atomic(sg_page(sgl + idx));
 488	return (unsigned long)addr;
 489}
 490
 491static inline void sg_kunmap_atomic(unsigned long addr)
 492{
 493	kunmap_atomic((void *)addr);
 494}
 495
 496
 497/* Assume the original sgl has enough room */
 498static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
 499					    struct scatterlist *bounce_sgl,
 500					    unsigned int orig_sgl_count,
 501					    unsigned int bounce_sgl_count)
 502{
 503	int i;
 504	int j = 0;
 505	unsigned long src, dest;
 506	unsigned int srclen, destlen, copylen;
 507	unsigned int total_copied = 0;
 508	unsigned long bounce_addr = 0;
 509	unsigned long dest_addr = 0;
 510	unsigned long flags;
 511
 512	local_irq_save(flags);
 513
 514	for (i = 0; i < orig_sgl_count; i++) {
 515		dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
 516		dest = dest_addr;
 517		destlen = orig_sgl[i].length;
 518
 519		if (bounce_addr == 0)
 520			bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 521
 522		while (destlen) {
 523			src = bounce_addr + bounce_sgl[j].offset;
 524			srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
 525
 526			copylen = min(srclen, destlen);
 527			memcpy((void *)dest, (void *)src, copylen);
 528
 529			total_copied += copylen;
 530			bounce_sgl[j].offset += copylen;
 531			destlen -= copylen;
 532			dest += copylen;
 533
 534			if (bounce_sgl[j].offset == bounce_sgl[j].length) {
 535				/* full */
 536				sg_kunmap_atomic(bounce_addr);
 537				j++;
 538
 539				/*
 540				 * It is possible that the number of elements
 541				 * in the bounce buffer may not be equal to
 542				 * the number of elements in the original
 543				 * scatter list. Handle this correctly.
 544				 */
 545
 546				if (j == bounce_sgl_count) {
 547					/*
 548					 * We are done; cleanup and return.
 549					 */
 550					sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
 551					local_irq_restore(flags);
 552					return total_copied;
 553				}
 554
 555				/* if we need to use another bounce buffer */
 556				if (destlen || i != orig_sgl_count - 1)
 557					bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 558			} else if (destlen == 0 && i == orig_sgl_count - 1) {
 559				/* unmap the last bounce that is < PAGE_SIZE */
 560				sg_kunmap_atomic(bounce_addr);
 561			}
 562		}
 563
 564		sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
 565	}
 566
 567	local_irq_restore(flags);
 568
 569	return total_copied;
 570}
 571
 572/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
 573static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 574					  struct scatterlist *bounce_sgl,
 575					  unsigned int orig_sgl_count)
 576{
 577	int i;
 578	int j = 0;
 579	unsigned long src, dest;
 580	unsigned int srclen, destlen, copylen;
 581	unsigned int total_copied = 0;
 582	unsigned long bounce_addr = 0;
 583	unsigned long src_addr = 0;
 584	unsigned long flags;
 585
 586	local_irq_save(flags);
 587
 588	for (i = 0; i < orig_sgl_count; i++) {
 589		src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
 590		src = src_addr;
 591		srclen = orig_sgl[i].length;
 592
 593		if (bounce_addr == 0)
 594			bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 595
 596		while (srclen) {
 597			/* assume bounce offset always == 0 */
 598			dest = bounce_addr + bounce_sgl[j].length;
 599			destlen = PAGE_SIZE - bounce_sgl[j].length;
 600
 601			copylen = min(srclen, destlen);
 602			memcpy((void *)dest, (void *)src, copylen);
 603
 604			total_copied += copylen;
 605			bounce_sgl[j].length += copylen;
 606			srclen -= copylen;
 607			src += copylen;
 608
 609			if (bounce_sgl[j].length == PAGE_SIZE) {
 610				/* full..move to next entry */
 611				sg_kunmap_atomic(bounce_addr);
 612				j++;
 613
 614				/* if we need to use another bounce buffer */
 615				if (srclen || i != orig_sgl_count - 1)
 616					bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 617
 618			} else if (srclen == 0 && i == orig_sgl_count - 1) {
 619				/* unmap the last bounce that is < PAGE_SIZE */
 620				sg_kunmap_atomic(bounce_addr);
 621			}
 622		}
 623
 624		sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
 625	}
 626
 627	local_irq_restore(flags);
 628
 629	return total_copied;
 630}
 631
 632static int storvsc_channel_init(struct hv_device *device)
 633{
 634	struct storvsc_device *stor_device;
 635	struct storvsc_cmd_request *request;
 636	struct vstor_packet *vstor_packet;
 637	int ret, t;
 638
 639	stor_device = get_out_stor_device(device);
 640	if (!stor_device)
 641		return -ENODEV;
 642
 643	request = &stor_device->init_request;
 644	vstor_packet = &request->vstor_packet;
 645
 646	/*
 647	 * Now, initiate the vsc/vsp initialization protocol on the open
 648	 * channel
 649	 */
 650	memset(request, 0, sizeof(struct storvsc_cmd_request));
 651	init_completion(&request->wait_event);
 652	vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
 653	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 654
 655	ret = vmbus_sendpacket(device->channel, vstor_packet,
 656			       sizeof(struct vstor_packet),
 657			       (unsigned long)request,
 658			       VM_PKT_DATA_INBAND,
 659			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 660	if (ret != 0)
 661		goto cleanup;
 662
 663	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 664	if (t == 0) {
 665		ret = -ETIMEDOUT;
 666		goto cleanup;
 667	}
 668
 669	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 670	    vstor_packet->status != 0)
 671		goto cleanup;
 672
 673
 674	/* reuse the packet for version range supported */
 675	memset(vstor_packet, 0, sizeof(struct vstor_packet));
 676	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
 677	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 678
 679	vstor_packet->version.major_minor =
 680		storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
 681
 682	/*
 683	 * The revision number is only used in Windows; set it to 0.
 684	 */
 685	vstor_packet->version.revision = 0;
 686
 687	ret = vmbus_sendpacket(device->channel, vstor_packet,
 688			       sizeof(struct vstor_packet),
 689			       (unsigned long)request,
 690			       VM_PKT_DATA_INBAND,
 691			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 692	if (ret != 0)
 693		goto cleanup;
 694
 695	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 696	if (t == 0) {
 697		ret = -ETIMEDOUT;
 698		goto cleanup;
 699	}
 700
 701	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 702	    vstor_packet->status != 0)
 703		goto cleanup;
 704
 705
 706	memset(vstor_packet, 0, sizeof(struct vstor_packet));
 707	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
 708	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 709	vstor_packet->storage_channel_properties.port_number =
 710					stor_device->port_number;
 711
 712	ret = vmbus_sendpacket(device->channel, vstor_packet,
 713			       sizeof(struct vstor_packet),
 714			       (unsigned long)request,
 715			       VM_PKT_DATA_INBAND,
 716			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 717
 718	if (ret != 0)
 719		goto cleanup;
 720
 721	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 722	if (t == 0) {
 723		ret = -ETIMEDOUT;
 724		goto cleanup;
 725	}
 726
 727	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 728	    vstor_packet->status != 0)
 729		goto cleanup;
 730
 731	stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
 732	stor_device->target_id
 733		= vstor_packet->storage_channel_properties.target_id;
 734
 735	memset(vstor_packet, 0, sizeof(struct vstor_packet));
 736	vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
 737	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 738
 739	ret = vmbus_sendpacket(device->channel, vstor_packet,
 740			       sizeof(struct vstor_packet),
 741			       (unsigned long)request,
 742			       VM_PKT_DATA_INBAND,
 743			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 744
 745	if (ret != 0)
 746		goto cleanup;
 747
 748	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 749	if (t == 0) {
 750		ret = -ETIMEDOUT;
 751		goto cleanup;
 752	}
 753
 754	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 755	    vstor_packet->status != 0)
 756		goto cleanup;
 757
 758
 759cleanup:
 760	return ret;
 761}
 762
 763
 764static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
 765{
 766	struct scsi_cmnd *scmnd = cmd_request->cmd;
 767	struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
 768	void (*scsi_done_fn)(struct scsi_cmnd *);
 769	struct scsi_sense_hdr sense_hdr;
 770	struct vmscsi_request *vm_srb;
 771	struct storvsc_scan_work *wrk;
 772	struct stor_mem_pools *memp = scmnd->device->hostdata;
 773
 774	vm_srb = &cmd_request->vstor_packet.vm_srb;
 775	if (cmd_request->bounce_sgl_count) {
 776		if (vm_srb->data_in == READ_TYPE)
 777			copy_from_bounce_buffer(scsi_sglist(scmnd),
 778					cmd_request->bounce_sgl,
 779					scsi_sg_count(scmnd),
 780					cmd_request->bounce_sgl_count);
 781		destroy_bounce_buffer(cmd_request->bounce_sgl,
 782					cmd_request->bounce_sgl_count);
 783	}
 784
 785	/*
 786	 * If there is an error; offline the device since all
 787	 * error recovery strategies would have already been
 788	 * deployed on the host side. However, if the command
 789	 * were a pass-through command deal with it appropriately.
 790	 */
 791	scmnd->result = vm_srb->scsi_status;
 792
 793	if (vm_srb->srb_status == SRB_STATUS_ERROR) {
 794		switch (scmnd->cmnd[0]) {
 795		case ATA_16:
 796		case ATA_12:
 797			set_host_byte(scmnd, DID_PASSTHROUGH);
 798			break;
 799		default:
 800			set_host_byte(scmnd, DID_TARGET_FAILURE);
 801		}
 802	}
 803
 804
 805	/*
 806	 * If the LUN is invalid; remove the device.
 807	 */
 808	if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
 809		struct storvsc_device *stor_dev;
 810		struct hv_device *dev = host_dev->dev;
 811		struct Scsi_Host *host;
 812
 813		stor_dev = get_in_stor_device(dev);
 814		host = stor_dev->host;
 815
 816		wrk = kmalloc(sizeof(struct storvsc_scan_work),
 817				GFP_ATOMIC);
 818		if (!wrk) {
 819			scmnd->result = DID_TARGET_FAILURE << 16;
 820		} else {
 821			wrk->host = host;
 822			wrk->lun = vm_srb->lun;
 823			INIT_WORK(&wrk->work, storvsc_remove_lun);
 824			schedule_work(&wrk->work);
 825		}
 826	}
 827
 828	if (scmnd->result) {
 829		if (scsi_normalize_sense(scmnd->sense_buffer,
 830				SCSI_SENSE_BUFFERSIZE, &sense_hdr))
 831			scsi_print_sense_hdr("storvsc", &sense_hdr);
 832	}
 833
 834	scsi_set_resid(scmnd,
 835		cmd_request->data_buffer.len -
 836		vm_srb->data_transfer_length);
 837
 838	scsi_done_fn = scmnd->scsi_done;
 839
 840	scmnd->host_scribble = NULL;
 841	scmnd->scsi_done = NULL;
 842
 843	scsi_done_fn(scmnd);
 844
 845	mempool_free(cmd_request, memp->request_mempool);
 846}
 847
 848static void storvsc_on_io_completion(struct hv_device *device,
 849				  struct vstor_packet *vstor_packet,
 850				  struct storvsc_cmd_request *request)
 851{
 852	struct storvsc_device *stor_device;
 853	struct vstor_packet *stor_pkt;
 854
 855	stor_device = hv_get_drvdata(device);
 856	stor_pkt = &request->vstor_packet;
 857
 858	/*
 859	 * The current SCSI handling on the host side does
 860	 * not correctly handle:
 861	 * INQUIRY command with page code parameter set to 0x80
 862	 * MODE_SENSE command with cmd[2] == 0x1c
 863	 *
 864	 * Setup srb and scsi status so this won't be fatal.
 865	 * We do this so we can distinguish truly fatal failues
 866	 * (srb status == 0x4) and off-line the device in that case.
 867	 */
 868
 869	if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
 870	   (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
 871		vstor_packet->vm_srb.scsi_status = 0;
 872		vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
 873	}
 874
 875
 876	/* Copy over the status...etc */
 877	stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
 878	stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
 879	stor_pkt->vm_srb.sense_info_length =
 880	vstor_packet->vm_srb.sense_info_length;
 881
 882	if (vstor_packet->vm_srb.scsi_status != 0 ||
 883		vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
 884		dev_warn(&device->device,
 885			 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
 886			 stor_pkt->vm_srb.cdb[0],
 887			 vstor_packet->vm_srb.scsi_status,
 888			 vstor_packet->vm_srb.srb_status);
 889	}
 890
 891	if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
 892		/* CHECK_CONDITION */
 893		if (vstor_packet->vm_srb.srb_status &
 894			SRB_STATUS_AUTOSENSE_VALID) {
 895			/* autosense data available */
 896			dev_warn(&device->device,
 897				 "stor pkt %p autosense data valid - len %d\n",
 898				 request,
 899				 vstor_packet->vm_srb.sense_info_length);
 900
 901			memcpy(request->sense_buffer,
 902			       vstor_packet->vm_srb.sense_data,
 903			       vstor_packet->vm_srb.sense_info_length);
 904
 905		}
 906	}
 907
 908	stor_pkt->vm_srb.data_transfer_length =
 909	vstor_packet->vm_srb.data_transfer_length;
 910
 911	storvsc_command_completion(request);
 912
 913	if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
 914		stor_device->drain_notify)
 915		wake_up(&stor_device->waiting_to_drain);
 916
 917
 918}
 919
 920static void storvsc_on_receive(struct hv_device *device,
 921			     struct vstor_packet *vstor_packet,
 922			     struct storvsc_cmd_request *request)
 923{
 924	struct storvsc_scan_work *work;
 925	struct storvsc_device *stor_device;
 926
 927	switch (vstor_packet->operation) {
 928	case VSTOR_OPERATION_COMPLETE_IO:
 929		storvsc_on_io_completion(device, vstor_packet, request);
 930		break;
 931
 932	case VSTOR_OPERATION_REMOVE_DEVICE:
 933	case VSTOR_OPERATION_ENUMERATE_BUS:
 934		stor_device = get_in_stor_device(device);
 935		work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
 936		if (!work)
 937			return;
 938
 939		INIT_WORK(&work->work, storvsc_bus_scan);
 940		work->host = stor_device->host;
 941		schedule_work(&work->work);
 942		break;
 943
 944	default:
 945		break;
 946	}
 947}
 948
 949static void storvsc_on_channel_callback(void *context)
 950{
 951	struct hv_device *device = (struct hv_device *)context;
 952	struct storvsc_device *stor_device;
 953	u32 bytes_recvd;
 954	u64 request_id;
 955	unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
 956	struct storvsc_cmd_request *request;
 957	int ret;
 958
 959
 960	stor_device = get_in_stor_device(device);
 961	if (!stor_device)
 962		return;
 963
 964	do {
 965		ret = vmbus_recvpacket(device->channel, packet,
 966				       ALIGN(sizeof(struct vstor_packet), 8),
 967				       &bytes_recvd, &request_id);
 968		if (ret == 0 && bytes_recvd > 0) {
 969
 970			request = (struct storvsc_cmd_request *)
 971					(unsigned long)request_id;
 972
 973			if ((request == &stor_device->init_request) ||
 974			    (request == &stor_device->reset_request)) {
 975
 976				memcpy(&request->vstor_packet, packet,
 977				       sizeof(struct vstor_packet));
 978				complete(&request->wait_event);
 979			} else {
 980				storvsc_on_receive(device,
 981						(struct vstor_packet *)packet,
 982						request);
 983			}
 984		} else {
 985			break;
 986		}
 987	} while (1);
 988
 989	return;
 990}
 991
 992static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
 993{
 994	struct vmstorage_channel_properties props;
 995	int ret;
 996
 997	memset(&props, 0, sizeof(struct vmstorage_channel_properties));
 998
 999	ret = vmbus_open(device->channel,
1000			 ring_size,
1001			 ring_size,
1002			 (void *)&props,
1003			 sizeof(struct vmstorage_channel_properties),
1004			 storvsc_on_channel_callback, device);
1005
1006	if (ret != 0)
1007		return ret;
1008
1009	ret = storvsc_channel_init(device);
1010
1011	return ret;
1012}
1013
1014static int storvsc_dev_remove(struct hv_device *device)
1015{
1016	struct storvsc_device *stor_device;
1017	unsigned long flags;
1018
1019	stor_device = hv_get_drvdata(device);
1020
1021	spin_lock_irqsave(&device->channel->inbound_lock, flags);
1022	stor_device->destroy = true;
1023	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1024
1025	/*
1026	 * At this point, all outbound traffic should be disable. We
1027	 * only allow inbound traffic (responses) to proceed so that
1028	 * outstanding requests can be completed.
1029	 */
1030
1031	storvsc_wait_to_drain(stor_device);
1032
1033	/*
1034	 * Since we have already drained, we don't need to busy wait
1035	 * as was done in final_release_stor_device()
1036	 * Note that we cannot set the ext pointer to NULL until
1037	 * we have drained - to drain the outgoing packets, we need to
1038	 * allow incoming packets.
1039	 */
1040	spin_lock_irqsave(&device->channel->inbound_lock, flags);
1041	hv_set_drvdata(device, NULL);
1042	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1043
1044	/* Close the channel */
1045	vmbus_close(device->channel);
1046
1047	kfree(stor_device);
1048	return 0;
1049}
1050
1051static int storvsc_do_io(struct hv_device *device,
1052			      struct storvsc_cmd_request *request)
1053{
1054	struct storvsc_device *stor_device;
1055	struct vstor_packet *vstor_packet;
1056	int ret = 0;
1057
1058	vstor_packet = &request->vstor_packet;
1059	stor_device = get_out_stor_device(device);
1060
1061	if (!stor_device)
1062		return -ENODEV;
1063
1064
1065	request->device  = device;
1066
1067
1068	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1069
1070	vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
1071
1072
1073	vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
1074
1075
1076	vstor_packet->vm_srb.data_transfer_length =
1077	request->data_buffer.len;
1078
1079	vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
1080
1081	if (request->data_buffer.len) {
1082		ret = vmbus_sendpacket_multipagebuffer(device->channel,
1083				&request->data_buffer,
1084				vstor_packet,
1085				sizeof(struct vstor_packet),
1086				(unsigned long)request);
1087	} else {
1088		ret = vmbus_sendpacket(device->channel, vstor_packet,
1089			       sizeof(struct vstor_packet),
1090			       (unsigned long)request,
1091			       VM_PKT_DATA_INBAND,
1092			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1093	}
1094
1095	if (ret != 0)
1096		return ret;
1097
1098	atomic_inc(&stor_device->num_outstanding_req);
1099
1100	return ret;
1101}
1102
1103static int storvsc_device_alloc(struct scsi_device *sdevice)
1104{
1105	struct stor_mem_pools *memp;
1106	int number = STORVSC_MIN_BUF_NR;
1107
1108	memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
1109	if (!memp)
1110		return -ENOMEM;
1111
1112	memp->request_pool =
1113		kmem_cache_create(dev_name(&sdevice->sdev_dev),
1114				sizeof(struct storvsc_cmd_request), 0,
1115				SLAB_HWCACHE_ALIGN, NULL);
1116
1117	if (!memp->request_pool)
1118		goto err0;
1119
1120	memp->request_mempool = mempool_create(number, mempool_alloc_slab,
1121						mempool_free_slab,
1122						memp->request_pool);
1123
1124	if (!memp->request_mempool)
1125		goto err1;
1126
1127	sdevice->hostdata = memp;
1128
1129	return 0;
1130
1131err1:
1132	kmem_cache_destroy(memp->request_pool);
1133
1134err0:
1135	kfree(memp);
1136	return -ENOMEM;
1137}
1138
1139static void storvsc_device_destroy(struct scsi_device *sdevice)
1140{
1141	struct stor_mem_pools *memp = sdevice->hostdata;
1142
1143	mempool_destroy(memp->request_mempool);
1144	kmem_cache_destroy(memp->request_pool);
1145	kfree(memp);
1146	sdevice->hostdata = NULL;
1147}
1148
1149static int storvsc_device_configure(struct scsi_device *sdevice)
1150{
1151	scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
1152				STORVSC_MAX_IO_REQUESTS);
1153
1154	blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1155
1156	blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1157
1158	return 0;
1159}
1160
1161static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1162			   sector_t capacity, int *info)
1163{
1164	sector_t nsect = capacity;
1165	sector_t cylinders = nsect;
1166	int heads, sectors_pt;
1167
1168	/*
1169	 * We are making up these values; let us keep it simple.
1170	 */
1171	heads = 0xff;
1172	sectors_pt = 0x3f;      /* Sectors per track */
1173	sector_div(cylinders, heads * sectors_pt);
1174	if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1175		cylinders = 0xffff;
1176
1177	info[0] = heads;
1178	info[1] = sectors_pt;
1179	info[2] = (int)cylinders;
1180
1181	return 0;
1182}
1183
1184static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1185{
1186	struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1187	struct hv_device *device = host_dev->dev;
1188
1189	struct storvsc_device *stor_device;
1190	struct storvsc_cmd_request *request;
1191	struct vstor_packet *vstor_packet;
1192	int ret, t;
1193
1194
1195	stor_device = get_out_stor_device(device);
1196	if (!stor_device)
1197		return FAILED;
1198
1199	request = &stor_device->reset_request;
1200	vstor_packet = &request->vstor_packet;
1201
1202	init_completion(&request->wait_event);
1203
1204	vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1205	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1206	vstor_packet->vm_srb.path_id = stor_device->path_id;
1207
1208	ret = vmbus_sendpacket(device->channel, vstor_packet,
1209			       sizeof(struct vstor_packet),
1210			       (unsigned long)&stor_device->reset_request,
1211			       VM_PKT_DATA_INBAND,
1212			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1213	if (ret != 0)
1214		return FAILED;
1215
1216	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1217	if (t == 0)
1218		return TIMEOUT_ERROR;
1219
1220
1221	/*
1222	 * At this point, all outstanding requests in the adapter
1223	 * should have been flushed out and return to us
1224	 */
1225
1226	return SUCCESS;
1227}
1228
1229static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1230{
1231	bool allowed = true;
1232	u8 scsi_op = scmnd->cmnd[0];
1233
1234	switch (scsi_op) {
1235	/*
1236	 * smartd sends this command and the host does not handle
1237	 * this. So, don't send it.
1238	 */
1239	case SET_WINDOW:
1240		scmnd->result = ILLEGAL_REQUEST << 16;
1241		allowed = false;
1242		break;
1243	default:
1244		break;
1245	}
1246	return allowed;
1247}
1248
1249static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1250{
1251	int ret;
1252	struct hv_host_device *host_dev = shost_priv(host);
1253	struct hv_device *dev = host_dev->dev;
1254	struct storvsc_cmd_request *cmd_request;
1255	unsigned int request_size = 0;
1256	int i;
1257	struct scatterlist *sgl;
1258	unsigned int sg_count = 0;
1259	struct vmscsi_request *vm_srb;
1260	struct stor_mem_pools *memp = scmnd->device->hostdata;
1261
1262	if (!storvsc_scsi_cmd_ok(scmnd)) {
1263		scmnd->scsi_done(scmnd);
1264		return 0;
1265	}
1266
1267	request_size = sizeof(struct storvsc_cmd_request);
1268
1269	cmd_request = mempool_alloc(memp->request_mempool,
1270				       GFP_ATOMIC);
1271
1272	/*
1273	 * We might be invoked in an interrupt context; hence
1274	 * mempool_alloc() can fail.
1275	 */
1276	if (!cmd_request)
1277		return SCSI_MLQUEUE_DEVICE_BUSY;
1278
1279	memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
1280
1281	/* Setup the cmd request */
1282	cmd_request->cmd = scmnd;
1283
1284	scmnd->host_scribble = (unsigned char *)cmd_request;
1285
1286	vm_srb = &cmd_request->vstor_packet.vm_srb;
1287
1288
1289	/* Build the SRB */
1290	switch (scmnd->sc_data_direction) {
1291	case DMA_TO_DEVICE:
1292		vm_srb->data_in = WRITE_TYPE;
1293		break;
1294	case DMA_FROM_DEVICE:
1295		vm_srb->data_in = READ_TYPE;
1296		break;
1297	default:
1298		vm_srb->data_in = UNKNOWN_TYPE;
1299		break;
1300	}
1301
1302
1303	vm_srb->port_number = host_dev->port;
1304	vm_srb->path_id = scmnd->device->channel;
1305	vm_srb->target_id = scmnd->device->id;
1306	vm_srb->lun = scmnd->device->lun;
1307
1308	vm_srb->cdb_length = scmnd->cmd_len;
1309
1310	memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1311
1312	cmd_request->sense_buffer = scmnd->sense_buffer;
1313
1314
1315	cmd_request->data_buffer.len = scsi_bufflen(scmnd);
1316	if (scsi_sg_count(scmnd)) {
1317		sgl = (struct scatterlist *)scsi_sglist(scmnd);
1318		sg_count = scsi_sg_count(scmnd);
1319
1320		/* check if we need to bounce the sgl */
1321		if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1322			cmd_request->bounce_sgl =
1323				create_bounce_buffer(sgl, scsi_sg_count(scmnd),
1324						     scsi_bufflen(scmnd),
1325						     vm_srb->data_in);
1326			if (!cmd_request->bounce_sgl) {
1327				ret = SCSI_MLQUEUE_HOST_BUSY;
1328				goto queue_error;
1329			}
1330
1331			cmd_request->bounce_sgl_count =
1332				ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
1333					PAGE_SHIFT;
1334
1335			if (vm_srb->data_in == WRITE_TYPE)
1336				copy_to_bounce_buffer(sgl,
1337					cmd_request->bounce_sgl,
1338					scsi_sg_count(scmnd));
1339
1340			sgl = cmd_request->bounce_sgl;
1341			sg_count = cmd_request->bounce_sgl_count;
1342		}
1343
1344		cmd_request->data_buffer.offset = sgl[0].offset;
1345
1346		for (i = 0; i < sg_count; i++)
1347			cmd_request->data_buffer.pfn_array[i] =
1348				page_to_pfn(sg_page((&sgl[i])));
1349
1350	} else if (scsi_sglist(scmnd)) {
1351		cmd_request->data_buffer.offset =
1352			virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1353		cmd_request->data_buffer.pfn_array[0] =
1354			virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1355	}
1356
1357	/* Invokes the vsc to start an IO */
1358	ret = storvsc_do_io(dev, cmd_request);
1359
1360	if (ret == -EAGAIN) {
1361		/* no more space */
1362
1363		if (cmd_request->bounce_sgl_count) {
1364			destroy_bounce_buffer(cmd_request->bounce_sgl,
1365					cmd_request->bounce_sgl_count);
1366
1367			ret = SCSI_MLQUEUE_DEVICE_BUSY;
1368			goto queue_error;
1369		}
1370	}
1371
1372	return 0;
1373
1374queue_error:
1375	mempool_free(cmd_request, memp->request_mempool);
1376	scmnd->host_scribble = NULL;
1377	return ret;
1378}
1379
1380static struct scsi_host_template scsi_driver = {
1381	.module	=		THIS_MODULE,
1382	.name =			"storvsc_host_t",
1383	.bios_param =		storvsc_get_chs,
1384	.queuecommand =		storvsc_queuecommand,
1385	.eh_host_reset_handler =	storvsc_host_reset_handler,
1386	.slave_alloc =		storvsc_device_alloc,
1387	.slave_destroy =	storvsc_device_destroy,
1388	.slave_configure =	storvsc_device_configure,
1389	.cmd_per_lun =		1,
1390	/* 64 max_queue * 1 target */
1391	.can_queue =		STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
1392	.this_id =		-1,
1393	/* no use setting to 0 since ll_blk_rw reset it to 1 */
1394	/* currently 32 */
1395	.sg_tablesize =		MAX_MULTIPAGE_BUFFER_COUNT,
1396	.use_clustering =	DISABLE_CLUSTERING,
1397	/* Make sure we dont get a sg segment crosses a page boundary */
1398	.dma_boundary =		PAGE_SIZE-1,
1399};
1400
1401enum {
1402	SCSI_GUID,
1403	IDE_GUID,
1404};
1405
1406static const struct hv_vmbus_device_id id_table[] = {
1407	/* SCSI guid */
1408	{ VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
1409		       0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1410	  .driver_data = SCSI_GUID },
1411	/* IDE guid */
1412	{ VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
1413		       0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1414	  .driver_data = IDE_GUID },
1415	{ },
1416};
1417
1418MODULE_DEVICE_TABLE(vmbus, id_table);
1419
1420static int storvsc_probe(struct hv_device *device,
1421			const struct hv_vmbus_device_id *dev_id)
1422{
1423	int ret;
1424	struct Scsi_Host *host;
1425	struct hv_host_device *host_dev;
1426	bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1427	int target = 0;
1428	struct storvsc_device *stor_device;
1429
1430	host = scsi_host_alloc(&scsi_driver,
1431			       sizeof(struct hv_host_device));
1432	if (!host)
1433		return -ENOMEM;
1434
1435	host_dev = shost_priv(host);
1436	memset(host_dev, 0, sizeof(struct hv_host_device));
1437
1438	host_dev->port = host->host_no;
1439	host_dev->dev = device;
1440
1441
1442	stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1443	if (!stor_device) {
1444		ret = -ENOMEM;
1445		goto err_out0;
1446	}
1447
1448	stor_device->destroy = false;
1449	init_waitqueue_head(&stor_device->waiting_to_drain);
1450	stor_device->device = device;
1451	stor_device->host = host;
1452	hv_set_drvdata(device, stor_device);
1453
1454	stor_device->port_number = host->host_no;
1455	ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
1456	if (ret)
1457		goto err_out1;
1458
1459	host_dev->path = stor_device->path_id;
1460	host_dev->target = stor_device->target_id;
1461
1462	/* max # of devices per target */
1463	host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
1464	/* max # of targets per channel */
1465	host->max_id = STORVSC_MAX_TARGETS;
1466	/* max # of channels */
1467	host->max_channel = STORVSC_MAX_CHANNELS - 1;
1468	/* max cmd length */
1469	host->max_cmd_len = STORVSC_MAX_CMD_LEN;
1470
1471	/* Register the HBA and start the scsi bus scan */
1472	ret = scsi_add_host(host, &device->device);
1473	if (ret != 0)
1474		goto err_out2;
1475
1476	if (!dev_is_ide) {
1477		scsi_scan_host(host);
1478	} else {
1479		target = (device->dev_instance.b[5] << 8 |
1480			 device->dev_instance.b[4]);
1481		ret = scsi_add_device(host, 0, target, 0);
1482		if (ret) {
1483			scsi_remove_host(host);
1484			goto err_out2;
1485		}
1486	}
1487	return 0;
1488
1489err_out2:
1490	/*
1491	 * Once we have connected with the host, we would need to
1492	 * to invoke storvsc_dev_remove() to rollback this state and
1493	 * this call also frees up the stor_device; hence the jump around
1494	 * err_out1 label.
1495	 */
1496	storvsc_dev_remove(device);
1497	goto err_out0;
1498
1499err_out1:
1500	kfree(stor_device);
1501
1502err_out0:
1503	scsi_host_put(host);
1504	return ret;
1505}
1506
1507static int storvsc_remove(struct hv_device *dev)
1508{
1509	struct storvsc_device *stor_device = hv_get_drvdata(dev);
1510	struct Scsi_Host *host = stor_device->host;
1511
1512	scsi_remove_host(host);
1513	storvsc_dev_remove(dev);
1514	scsi_host_put(host);
1515
1516	return 0;
1517}
1518
1519static struct hv_driver storvsc_drv = {
1520	.name = KBUILD_MODNAME,
1521	.id_table = id_table,
1522	.probe = storvsc_probe,
1523	.remove = storvsc_remove,
1524};
1525
1526static int __init storvsc_drv_init(void)
1527{
1528	u32 max_outstanding_req_per_channel;
1529
1530	/*
1531	 * Divide the ring buffer data size (which is 1 page less
1532	 * than the ring buffer size since that page is reserved for
1533	 * the ring buffer indices) by the max request size (which is
1534	 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1535	 */
1536	max_outstanding_req_per_channel =
1537		((storvsc_ringbuffer_size - PAGE_SIZE) /
1538		ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1539		sizeof(struct vstor_packet) + sizeof(u64),
1540		sizeof(u64)));
1541
1542	if (max_outstanding_req_per_channel <
1543	    STORVSC_MAX_IO_REQUESTS)
1544		return -EINVAL;
1545
1546	return vmbus_driver_register(&storvsc_drv);
1547}
1548
1549static void __exit storvsc_drv_exit(void)
1550{
1551	vmbus_driver_unregister(&storvsc_drv);
1552}
1553
1554MODULE_LICENSE("GPL");
1555MODULE_VERSION(HV_DRV_VERSION);
1556MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
1557module_init(storvsc_drv_init);
1558module_exit(storvsc_drv_exit);