Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *
   4 * Copyright (c) 2011, Microsoft Corporation.
   5 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 * Authors:
   7 *   Haiyang Zhang <haiyangz@microsoft.com>
   8 *   Hank Janssen  <hjanssen@microsoft.com>
   9 *   K. Y. Srinivasan <kys@microsoft.com>
 
  10 */
  11
  12#ifndef _HYPERV_H
  13#define _HYPERV_H
  14
  15#include <uapi/linux/hyperv.h>
 
  16
  17#include <linux/mm.h>
  18#include <linux/types.h>
  19#include <linux/scatterlist.h>
  20#include <linux/list.h>
  21#include <linux/timer.h>
 
  22#include <linux/completion.h>
  23#include <linux/device.h>
  24#include <linux/mod_devicetable.h>
  25#include <linux/interrupt.h>
  26#include <linux/reciprocal_div.h>
  27#include <asm/hyperv-tlfs.h>
  28
  29#define MAX_PAGE_BUFFER_COUNT				32
  30#define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
  31
  32#pragma pack(push, 1)
  33
  34/*
  35 * Types for GPADL, decides is how GPADL header is created.
  36 *
  37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
  38 * same as HV_HYP_PAGE_SIZE.
  39 *
  40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
  41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
  42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
  43 * HV_HYP_PAGE will be different between different types of GPADL, for example
  44 * if PAGE_SIZE is 64K:
  45 *
  46 * BUFFER:
  47 *
  48 * gva:    |--       64k      --|--       64k      --| ... |
  49 * gpa:    | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
  50 * index:  0    1    2     15   16   17   18 .. 31   32 ...
  51 *         |    |    ...   |    |    |   ...    |   ...
  52 *         v    V          V    V    V          V
  53 * gpadl:  | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
  54 * index:  0    1    2 ... 15   16   17   18 .. 31   32 ...
  55 *
  56 * RING:
  57 *
  58 *         | header  |           data           | header  |     data      |
  59 * gva:    |-- 64k --|--       64k      --| ... |-- 64k --|-- 64k --| ... |
  60 * gpa:    | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
  61 * index:  0    1    16   17   18    31   ...   n   n+1  n+16 ...         2n
  62 *         |         /    /          /          |         /               /
  63 *         |        /    /          /           |        /               /
  64 *         |       /    /   ...    /    ...     |       /      ...      /
  65 *         |      /    /          /             |      /               /
  66 *         |     /    /          /              |     /               /
  67 *         V    V    V          V               V    V               v
  68 * gpadl:  | 4k | 4k |   ...    |    ...        | 4k | 4k |  ...     |
  69 * index:  0    1    2   ...    16   ...       n-15 n-14 n-13  ...  2n-30
  70 */
  71enum hv_gpadl_type {
  72	HV_GPADL_BUFFER,
  73	HV_GPADL_RING
  74};
  75
  76/* Single-page buffer */
  77struct hv_page_buffer {
  78	u32 len;
  79	u32 offset;
  80	u64 pfn;
  81};
  82
  83/* Multiple-page buffer */
  84struct hv_multipage_buffer {
  85	/* Length and Offset determines the # of pfns in the array */
  86	u32 len;
  87	u32 offset;
  88	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
  89};
  90
  91/*
  92 * Multiple-page buffer array; the pfn array is variable size:
  93 * The number of entries in the PFN array is determined by
  94 * "len" and "offset".
  95 */
  96struct hv_mpb_array {
  97	/* Length and Offset determines the # of pfns in the array */
  98	u32 len;
  99	u32 offset;
 100	u64 pfn_array[];
 101};
 102
 103/* 0x18 includes the proprietary packet header */
 104#define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
 105					(sizeof(struct hv_page_buffer) * \
 106					 MAX_PAGE_BUFFER_COUNT))
 107#define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
 108					 sizeof(struct hv_multipage_buffer))
 109
 110
 111#pragma pack(pop)
 112
 113struct hv_ring_buffer {
 114	/* Offset in bytes from the start of ring data below */
 115	u32 write_index;
 116
 117	/* Offset in bytes from the start of ring data below */
 118	u32 read_index;
 119
 120	u32 interrupt_mask;
 121
 122	/*
 123	 * WS2012/Win8 and later versions of Hyper-V implement interrupt
 124	 * driven flow management. The feature bit feat_pending_send_sz
 125	 * is set by the host on the host->guest ring buffer, and by the
 126	 * guest on the guest->host ring buffer.
 
 127	 *
 128	 * The meaning of the feature bit is a bit complex in that it has
 129	 * semantics that apply to both ring buffers.  If the guest sets
 130	 * the feature bit in the guest->host ring buffer, the guest is
 131	 * telling the host that:
 132	 * 1) It will set the pending_send_sz field in the guest->host ring
 133	 *    buffer when it is waiting for space to become available, and
 134	 * 2) It will read the pending_send_sz field in the host->guest
 135	 *    ring buffer and interrupt the host when it frees enough space
 136	 *
 137	 * Similarly, if the host sets the feature bit in the host->guest
 138	 * ring buffer, the host is telling the guest that:
 139	 * 1) It will set the pending_send_sz field in the host->guest ring
 140	 *    buffer when it is waiting for space to become available, and
 141	 * 2) It will read the pending_send_sz field in the guest->host
 142	 *    ring buffer and interrupt the guest when it frees enough space
 143	 *
 144	 * If either the guest or host does not set the feature bit that it
 145	 * owns, that guest or host must do polling if it encounters a full
 146	 * ring buffer, and not signal the other end with an interrupt.
 147	 */
 148	u32 pending_send_sz;
 
 149	u32 reserved1[12];
 
 150	union {
 151		struct {
 152			u32 feat_pending_send_sz:1;
 153		};
 154		u32 value;
 155	} feature_bits;
 156
 157	/* Pad it to PAGE_SIZE so that data starts on page boundary */
 158	u8	reserved2[PAGE_SIZE - 68];
 159
 160	/*
 161	 * Ring data starts here + RingDataStartOffset
 162	 * !!! DO NOT place any fields below this !!!
 163	 */
 164	u8 buffer[];
 165} __packed;
 166
 167/* Calculate the proper size of a ringbuffer, it must be page-aligned */
 168#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
 169					       (payload_sz))
 170
 171struct hv_ring_buffer_info {
 172	struct hv_ring_buffer *ring_buffer;
 173	u32 ring_size;			/* Include the shared header */
 174	struct reciprocal_value ring_size_div10_reciprocal;
 175	spinlock_t ring_lock;
 176
 177	u32 ring_datasize;		/* < ring_size */
 178	u32 priv_read_index;
 179	/*
 180	 * The ring buffer mutex lock. This lock prevents the ring buffer from
 181	 * being freed while the ring buffer is being accessed.
 182	 */
 183	struct mutex ring_buffer_mutex;
 184
 185	/* Buffer that holds a copy of an incoming host packet */
 186	void *pkt_buffer;
 187	u32 pkt_buffer_size;
 188};
 189
 190
 191static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 
 
 
 
 
 
 
 
 192{
 193	u32 read_loc, write_loc, dsize, read;
 194
 195	dsize = rbi->ring_datasize;
 196	read_loc = rbi->ring_buffer->read_index;
 197	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
 198
 199	read = write_loc >= read_loc ? (write_loc - read_loc) :
 200		(dsize - read_loc) + write_loc;
 201
 202	return read;
 203}
 204
 205static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
 206{
 207	u32 read_loc, write_loc, dsize, write;
 208
 209	dsize = rbi->ring_datasize;
 210	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
 211	write_loc = rbi->ring_buffer->write_index;
 
 212
 213	write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
 214		read_loc - write_loc;
 215	return write;
 216}
 217
 218static inline u32 hv_get_avail_to_write_percent(
 219		const struct hv_ring_buffer_info *rbi)
 220{
 221	u32 avail_write = hv_get_bytes_to_write(rbi);
 222
 223	return reciprocal_divide(
 224			(avail_write  << 3) + (avail_write << 1),
 225			rbi->ring_size_div10_reciprocal);
 226}
 227
 228/*
 229 * VMBUS version is 32 bit entity broken up into
 230 * two 16 bit quantities: major_number. minor_number.
 231 *
 232 * 0 . 13 (Windows Server 2008)
 233 * 1 . 1  (Windows 7)
 234 * 2 . 4  (Windows 8)
 235 * 3 . 0  (Windows 8 R2)
 236 * 4 . 0  (Windows 10)
 237 * 4 . 1  (Windows 10 RS3)
 238 * 5 . 0  (Newer Windows 10)
 239 * 5 . 1  (Windows 10 RS4)
 240 * 5 . 2  (Windows Server 2019, RS5)
 241 * 5 . 3  (Windows Server 2022)
 242 */
 243
 244#define VERSION_WS2008  ((0 << 16) | (13))
 245#define VERSION_WIN7    ((1 << 16) | (1))
 246#define VERSION_WIN8    ((2 << 16) | (4))
 247#define VERSION_WIN8_1    ((3 << 16) | (0))
 248#define VERSION_WIN10 ((4 << 16) | (0))
 249#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
 250#define VERSION_WIN10_V5 ((5 << 16) | (0))
 251#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
 252#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
 253#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
 254
 255/* Make maximum size of pipe payload of 16K */
 256#define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
 257
 258/* Define PipeMode values. */
 259#define VMBUS_PIPE_TYPE_BYTE		0x00000000
 260#define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
 261
 262/* The size of the user defined data buffer for non-pipe offers. */
 263#define MAX_USER_DEFINED_BYTES		120
 264
 265/* The size of the user defined data buffer for pipe offers. */
 266#define MAX_PIPE_USER_DEFINED_BYTES	116
 267
 268/*
 269 * At the center of the Channel Management library is the Channel Offer. This
 270 * struct contains the fundamental information about an offer.
 271 */
 272struct vmbus_channel_offer {
 273	guid_t if_type;
 274	guid_t if_instance;
 275
 276	/*
 277	 * These two fields are not currently used.
 278	 */
 279	u64 reserved1;
 280	u64 reserved2;
 281
 282	u16 chn_flags;
 283	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
 284
 285	union {
 286		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
 287		struct {
 288			unsigned char user_def[MAX_USER_DEFINED_BYTES];
 289		} std;
 290
 291		/*
 292		 * Pipes:
 293		 * The following structure is an integrated pipe protocol, which
 294		 * is implemented on top of standard user-defined data. Pipe
 295		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
 296		 * use.
 297		 */
 298		struct {
 299			u32  pipe_mode;
 300			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
 301		} pipe;
 302	} u;
 303	/*
 304	 * The sub_channel_index is defined in Win8: a value of zero means a
 305	 * primary channel and a value of non-zero means a sub-channel.
 306	 *
 307	 * Before Win8, the field is reserved, meaning it's always zero.
 308	 */
 309	u16 sub_channel_index;
 310	u16 reserved3;
 311} __packed;
 312
 313/* Server Flags */
 314#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE	1
 315#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES	2
 316#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS		4
 317#define VMBUS_CHANNEL_NAMED_PIPE_MODE			0x10
 318#define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
 319#define VMBUS_CHANNEL_PARENT_OFFER			0x200
 320#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
 321#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER		0x2000
 322
 323struct vmpacket_descriptor {
 324	u16 type;
 325	u16 offset8;
 326	u16 len8;
 327	u16 flags;
 328	u64 trans_id;
 329} __packed;
 330
 331struct vmpacket_header {
 332	u32 prev_pkt_start_offset;
 333	struct vmpacket_descriptor descriptor;
 334} __packed;
 335
 336struct vmtransfer_page_range {
 337	u32 byte_count;
 338	u32 byte_offset;
 339} __packed;
 340
 341struct vmtransfer_page_packet_header {
 342	struct vmpacket_descriptor d;
 343	u16 xfer_pageset_id;
 344	u8  sender_owns_set;
 345	u8 reserved;
 346	u32 range_cnt;
 347	struct vmtransfer_page_range ranges[1];
 348} __packed;
 349
 350struct vmgpadl_packet_header {
 351	struct vmpacket_descriptor d;
 352	u32 gpadl;
 353	u32 reserved;
 354} __packed;
 355
 356struct vmadd_remove_transfer_page_set {
 357	struct vmpacket_descriptor d;
 358	u32 gpadl;
 359	u16 xfer_pageset_id;
 360	u16 reserved;
 361} __packed;
 362
 363/*
 364 * This structure defines a range in guest physical space that can be made to
 365 * look virtually contiguous.
 366 */
 367struct gpa_range {
 368	u32 byte_count;
 369	u32 byte_offset;
 370	u64 pfn_array[];
 371};
 372
 373/*
 374 * This is the format for an Establish Gpadl packet, which contains a handle by
 375 * which this GPADL will be known and a set of GPA ranges associated with it.
 376 * This can be converted to a MDL by the guest OS.  If there are multiple GPA
 377 * ranges, then the resulting MDL will be "chained," representing multiple VA
 378 * ranges.
 379 */
 380struct vmestablish_gpadl {
 381	struct vmpacket_descriptor d;
 382	u32 gpadl;
 383	u32 range_cnt;
 384	struct gpa_range range[1];
 385} __packed;
 386
 387/*
 388 * This is the format for a Teardown Gpadl packet, which indicates that the
 389 * GPADL handle in the Establish Gpadl packet will never be referenced again.
 390 */
 391struct vmteardown_gpadl {
 392	struct vmpacket_descriptor d;
 393	u32 gpadl;
 394	u32 reserved;	/* for alignment to a 8-byte boundary */
 395} __packed;
 396
 397/*
 398 * This is the format for a GPA-Direct packet, which contains a set of GPA
 399 * ranges, in addition to commands and/or data.
 400 */
 401struct vmdata_gpa_direct {
 402	struct vmpacket_descriptor d;
 403	u32 reserved;
 404	u32 range_cnt;
 405	struct gpa_range range[1];
 406} __packed;
 407
 408/* This is the format for a Additional Data Packet. */
 409struct vmadditional_data {
 410	struct vmpacket_descriptor d;
 411	u64 total_bytes;
 412	u32 offset;
 413	u32 byte_cnt;
 414	unsigned char data[1];
 415} __packed;
 416
 417union vmpacket_largest_possible_header {
 418	struct vmpacket_descriptor simple_hdr;
 419	struct vmtransfer_page_packet_header xfer_page_hdr;
 420	struct vmgpadl_packet_header gpadl_hdr;
 421	struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
 422	struct vmestablish_gpadl establish_gpadl_hdr;
 423	struct vmteardown_gpadl teardown_gpadl_hdr;
 424	struct vmdata_gpa_direct data_gpa_direct_hdr;
 425};
 426
 427#define VMPACKET_DATA_START_ADDRESS(__packet)	\
 428	(void *)(((unsigned char *)__packet) +	\
 429	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
 430
 431#define VMPACKET_DATA_LENGTH(__packet)		\
 432	((((struct vmpacket_descriptor)__packet)->len8 -	\
 433	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
 434
 435#define VMPACKET_TRANSFER_MODE(__packet)	\
 436	(((struct IMPACT)__packet)->type)
 437
 438enum vmbus_packet_type {
 439	VM_PKT_INVALID				= 0x0,
 440	VM_PKT_SYNCH				= 0x1,
 441	VM_PKT_ADD_XFER_PAGESET			= 0x2,
 442	VM_PKT_RM_XFER_PAGESET			= 0x3,
 443	VM_PKT_ESTABLISH_GPADL			= 0x4,
 444	VM_PKT_TEARDOWN_GPADL			= 0x5,
 445	VM_PKT_DATA_INBAND			= 0x6,
 446	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
 447	VM_PKT_DATA_USING_GPADL			= 0x8,
 448	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
 449	VM_PKT_CANCEL_REQUEST			= 0xa,
 450	VM_PKT_COMP				= 0xb,
 451	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
 452	VM_PKT_ADDITIONAL_DATA			= 0xd
 453};
 454
 455#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
 456
 457
 458/* Version 1 messages */
 459enum vmbus_channel_message_type {
 460	CHANNELMSG_INVALID			=  0,
 461	CHANNELMSG_OFFERCHANNEL		=  1,
 462	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
 463	CHANNELMSG_REQUESTOFFERS		=  3,
 464	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
 465	CHANNELMSG_OPENCHANNEL		=  5,
 466	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
 467	CHANNELMSG_CLOSECHANNEL		=  7,
 468	CHANNELMSG_GPADL_HEADER		=  8,
 469	CHANNELMSG_GPADL_BODY			=  9,
 470	CHANNELMSG_GPADL_CREATED		= 10,
 471	CHANNELMSG_GPADL_TEARDOWN		= 11,
 472	CHANNELMSG_GPADL_TORNDOWN		= 12,
 473	CHANNELMSG_RELID_RELEASED		= 13,
 474	CHANNELMSG_INITIATE_CONTACT		= 14,
 475	CHANNELMSG_VERSION_RESPONSE		= 15,
 476	CHANNELMSG_UNLOAD			= 16,
 477	CHANNELMSG_UNLOAD_RESPONSE		= 17,
 478	CHANNELMSG_18				= 18,
 479	CHANNELMSG_19				= 19,
 480	CHANNELMSG_20				= 20,
 481	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
 482	CHANNELMSG_MODIFYCHANNEL		= 22,
 483	CHANNELMSG_TL_CONNECT_RESULT		= 23,
 484	CHANNELMSG_MODIFYCHANNEL_RESPONSE	= 24,
 485	CHANNELMSG_COUNT
 486};
 487
 488/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
 489#define INVALID_RELID	U32_MAX
 490
 491struct vmbus_channel_message_header {
 492	enum vmbus_channel_message_type msgtype;
 493	u32 padding;
 494} __packed;
 495
 496/* Query VMBus Version parameters */
 497struct vmbus_channel_query_vmbus_version {
 498	struct vmbus_channel_message_header header;
 499	u32 version;
 500} __packed;
 501
 502/* VMBus Version Supported parameters */
 503struct vmbus_channel_version_supported {
 504	struct vmbus_channel_message_header header;
 505	u8 version_supported;
 506} __packed;
 507
 508/* Offer Channel parameters */
 509struct vmbus_channel_offer_channel {
 510	struct vmbus_channel_message_header header;
 511	struct vmbus_channel_offer offer;
 512	u32 child_relid;
 513	u8 monitorid;
 514	/*
 515	 * win7 and beyond splits this field into a bit field.
 516	 */
 517	u8 monitor_allocated:1;
 518	u8 reserved:7;
 519	/*
 520	 * These are new fields added in win7 and later.
 521	 * Do not access these fields without checking the
 522	 * negotiated protocol.
 523	 *
 524	 * If "is_dedicated_interrupt" is set, we must not set the
 525	 * associated bit in the channel bitmap while sending the
 526	 * interrupt to the host.
 527	 *
 528	 * connection_id is to be used in signaling the host.
 529	 */
 530	u16 is_dedicated_interrupt:1;
 531	u16 reserved1:15;
 532	u32 connection_id;
 533} __packed;
 534
 535/* Rescind Offer parameters */
 536struct vmbus_channel_rescind_offer {
 537	struct vmbus_channel_message_header header;
 538	u32 child_relid;
 539} __packed;
 540
 541static inline u32
 542hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
 543{
 544	return rbi->ring_buffer->pending_send_sz;
 545}
 546
 547/*
 548 * Request Offer -- no parameters, SynIC message contains the partition ID
 549 * Set Snoop -- no parameters, SynIC message contains the partition ID
 550 * Clear Snoop -- no parameters, SynIC message contains the partition ID
 551 * All Offers Delivered -- no parameters, SynIC message contains the partition
 552 *		           ID
 553 * Flush Client -- no parameters, SynIC message contains the partition ID
 554 */
 555
 556/* Open Channel parameters */
 557struct vmbus_channel_open_channel {
 558	struct vmbus_channel_message_header header;
 559
 560	/* Identifies the specific VMBus channel that is being opened. */
 561	u32 child_relid;
 562
 563	/* ID making a particular open request at a channel offer unique. */
 564	u32 openid;
 565
 566	/* GPADL for the channel's ring buffer. */
 567	u32 ringbuffer_gpadlhandle;
 568
 569	/*
 570	 * Starting with win8, this field will be used to specify
 571	 * the target virtual processor on which to deliver the interrupt for
 572	 * the host to guest communication.
 573	 * Prior to win8, incoming channel interrupts would only
 574	 * be delivered on cpu 0. Setting this value to 0 would
 575	 * preserve the earlier behavior.
 576	 */
 577	u32 target_vp;
 578
 579	/*
 580	 * The upstream ring buffer begins at offset zero in the memory
 581	 * described by RingBufferGpadlHandle. The downstream ring buffer
 582	 * follows it at this offset (in pages).
 583	 */
 584	u32 downstream_ringbuffer_pageoffset;
 585
 586	/* User-specific data to be passed along to the server endpoint. */
 587	unsigned char userdata[MAX_USER_DEFINED_BYTES];
 588} __packed;
 589
 590/* Open Channel Result parameters */
 591struct vmbus_channel_open_result {
 592	struct vmbus_channel_message_header header;
 593	u32 child_relid;
 594	u32 openid;
 595	u32 status;
 596} __packed;
 597
 598/* Modify Channel Result parameters */
 599struct vmbus_channel_modifychannel_response {
 600	struct vmbus_channel_message_header header;
 601	u32 child_relid;
 602	u32 status;
 603} __packed;
 604
 605/* Close channel parameters; */
 606struct vmbus_channel_close_channel {
 607	struct vmbus_channel_message_header header;
 608	u32 child_relid;
 609} __packed;
 610
 611/* Channel Message GPADL */
 612#define GPADL_TYPE_RING_BUFFER		1
 613#define GPADL_TYPE_SERVER_SAVE_AREA	2
 614#define GPADL_TYPE_TRANSACTION		8
 615
 616/*
 617 * The number of PFNs in a GPADL message is defined by the number of
 618 * pages that would be spanned by ByteCount and ByteOffset.  If the
 619 * implied number of PFNs won't fit in this packet, there will be a
 620 * follow-up packet that contains more.
 621 */
 622struct vmbus_channel_gpadl_header {
 623	struct vmbus_channel_message_header header;
 624	u32 child_relid;
 625	u32 gpadl;
 626	u16 range_buflen;
 627	u16 rangecount;
 628	struct gpa_range range[];
 629} __packed;
 630
 631/* This is the followup packet that contains more PFNs. */
 632struct vmbus_channel_gpadl_body {
 633	struct vmbus_channel_message_header header;
 634	u32 msgnumber;
 635	u32 gpadl;
 636	u64 pfn[];
 637} __packed;
 638
 639struct vmbus_channel_gpadl_created {
 640	struct vmbus_channel_message_header header;
 641	u32 child_relid;
 642	u32 gpadl;
 643	u32 creation_status;
 644} __packed;
 645
 646struct vmbus_channel_gpadl_teardown {
 647	struct vmbus_channel_message_header header;
 648	u32 child_relid;
 649	u32 gpadl;
 650} __packed;
 651
 652struct vmbus_channel_gpadl_torndown {
 653	struct vmbus_channel_message_header header;
 654	u32 gpadl;
 655} __packed;
 656
 657struct vmbus_channel_relid_released {
 658	struct vmbus_channel_message_header header;
 659	u32 child_relid;
 660} __packed;
 661
 662struct vmbus_channel_initiate_contact {
 663	struct vmbus_channel_message_header header;
 664	u32 vmbus_version_requested;
 665	u32 target_vcpu; /* The VCPU the host should respond to */
 666	union {
 667		u64 interrupt_page;
 668		struct {
 669			u8	msg_sint;
 670			u8	padding1[3];
 671			u32	padding2;
 672		};
 673	};
 674	u64 monitor_page1;
 675	u64 monitor_page2;
 676} __packed;
 677
 678/* Hyper-V socket: guest's connect()-ing to host */
 679struct vmbus_channel_tl_connect_request {
 680	struct vmbus_channel_message_header header;
 681	guid_t guest_endpoint_id;
 682	guid_t host_service_id;
 683} __packed;
 684
 685/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
 686struct vmbus_channel_modifychannel {
 687	struct vmbus_channel_message_header header;
 688	u32 child_relid;
 689	u32 target_vp;
 690} __packed;
 691
 692struct vmbus_channel_version_response {
 693	struct vmbus_channel_message_header header;
 694	u8 version_supported;
 695
 696	u8 connection_state;
 697	u16 padding;
 698
 699	/*
 700	 * On new hosts that support VMBus protocol 5.0, we must use
 701	 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
 702	 * and for subsequent messages, we must use the Message Connection ID
 703	 * field in the host-returned Version Response Message.
 704	 *
 705	 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
 706	 */
 707	u32 msg_conn_id;
 708} __packed;
 709
 710enum vmbus_channel_state {
 711	CHANNEL_OFFER_STATE,
 712	CHANNEL_OPENING_STATE,
 713	CHANNEL_OPEN_STATE,
 714	CHANNEL_OPENED_STATE,
 715};
 716
 717/*
 718 * Represents each channel msg on the vmbus connection This is a
 719 * variable-size data structure depending on the msg type itself
 720 */
 721struct vmbus_channel_msginfo {
 722	/* Bookkeeping stuff */
 723	struct list_head msglistentry;
 724
 725	/* So far, this is only used to handle gpadl body message */
 726	struct list_head submsglist;
 727
 728	/* Synchronize the request/response if needed */
 729	struct completion  waitevent;
 730	struct vmbus_channel *waiting_channel;
 731	union {
 732		struct vmbus_channel_version_supported version_supported;
 733		struct vmbus_channel_open_result open_result;
 734		struct vmbus_channel_gpadl_torndown gpadl_torndown;
 735		struct vmbus_channel_gpadl_created gpadl_created;
 736		struct vmbus_channel_version_response version_response;
 737		struct vmbus_channel_modifychannel_response modify_response;
 738	} response;
 739
 740	u32 msgsize;
 741	/*
 742	 * The channel message that goes out on the "wire".
 743	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
 744	 */
 745	unsigned char msg[];
 746};
 747
 748struct vmbus_close_msg {
 749	struct vmbus_channel_msginfo info;
 750	struct vmbus_channel_close_channel msg;
 751};
 752
 753/* Define connection identifier type. */
 754union hv_connection_id {
 755	u32 asu32;
 756	struct {
 757		u32 id:24;
 758		u32 reserved:8;
 759	} u;
 760};
 761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762enum vmbus_device_type {
 763	HV_IDE = 0,
 764	HV_SCSI,
 765	HV_FC,
 766	HV_NIC,
 767	HV_ND,
 768	HV_PCIE,
 769	HV_FB,
 770	HV_KBD,
 771	HV_MOUSE,
 772	HV_KVP,
 773	HV_TS,
 774	HV_HB,
 775	HV_SHUTDOWN,
 776	HV_FCOPY,
 777	HV_BACKUP,
 778	HV_DM,
 779	HV_UNKNOWN,
 780};
 781
 782/*
 783 * Provides request ids for VMBus. Encapsulates guest memory
 784 * addresses and stores the next available slot in req_arr
 785 * to generate new ids in constant time.
 786 */
 787struct vmbus_requestor {
 788	u64 *req_arr;
 789	unsigned long *req_bitmap; /* is a given slot available? */
 790	u32 size;
 791	u64 next_request_id;
 792	spinlock_t req_lock; /* provides atomicity */
 793};
 794
 795#define VMBUS_NO_RQSTOR U64_MAX
 796#define VMBUS_RQST_ERROR (U64_MAX - 1)
 797/* NetVSC-specific */
 798#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
 799/* StorVSC-specific */
 800#define VMBUS_RQST_INIT (U64_MAX - 2)
 801#define VMBUS_RQST_RESET (U64_MAX - 3)
 802
 803struct vmbus_device {
 804	u16  dev_type;
 805	guid_t guid;
 806	bool perf_device;
 807	bool allowed_in_isolated;
 808};
 809
 810#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
 811
 812struct vmbus_channel {
 
 
 
 813	struct list_head listentry;
 814
 815	struct hv_device *device_obj;
 816
 817	enum vmbus_channel_state state;
 818
 819	struct vmbus_channel_offer_channel offermsg;
 820	/*
 821	 * These are based on the OfferMsg.MonitorId.
 822	 * Save it here for easy access.
 823	 */
 824	u8 monitor_grp;
 825	u8 monitor_bit;
 826
 827	bool rescind; /* got rescind msg */
 828	bool rescind_ref; /* got rescind msg, got channel reference */
 829	struct completion rescind_event;
 830
 831	u32 ringbuffer_gpadlhandle;
 832
 833	/* Allocated memory for ring buffer */
 834	struct page *ringbuffer_page;
 835	u32 ringbuffer_pagecount;
 836	u32 ringbuffer_send_offset;
 837	struct hv_ring_buffer_info outbound;	/* send to parent */
 838	struct hv_ring_buffer_info inbound;	/* receive from parent */
 
 839
 840	struct vmbus_close_msg close_msg;
 841
 842	/* Statistics */
 843	u64	interrupts;	/* Host to Guest interrupts */
 844	u64	sig_events;	/* Guest to Host events */
 845
 846	/*
 847	 * Guest to host interrupts caused by the outbound ring buffer changing
 848	 * from empty to not empty.
 849	 */
 850	u64 intr_out_empty;
 851
 852	/*
 853	 * Indicates that a full outbound ring buffer was encountered. The flag
 854	 * is set to true when a full outbound ring buffer is encountered and
 855	 * set to false when a write to the outbound ring buffer is completed.
 856	 */
 857	bool out_full_flag;
 858
 859	/* Channel callback's invoked in softirq context */
 860	struct tasklet_struct callback_event;
 861	void (*onchannel_callback)(void *context);
 862	void *channel_callback_context;
 863
 864	void (*change_target_cpu_callback)(struct vmbus_channel *channel,
 865			u32 old, u32 new);
 866
 867	/*
 868	 * Synchronize channel scheduling and channel removal; see the inline
 869	 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
 
 
 
 
 
 
 
 
 870	 */
 871	spinlock_t sched_lock;
 872
 873	/*
 874	 * A channel can be marked for one of three modes of reading:
 875	 *   BATCHED - callback called from taslket and should read
 876	 *            channel until empty. Interrupts from the host
 877	 *            are masked while read is in process (default).
 878	 *   DIRECT - callback called from tasklet (softirq).
 879	 *   ISR - callback called in interrupt context and must
 880	 *         invoke its own deferred processing.
 881	 *         Host interrupts are disabled and must be re-enabled
 882	 *         when ring is empty.
 883	 */
 884	enum hv_callback_mode {
 885		HV_CALL_BATCHED,
 886		HV_CALL_DIRECT,
 887		HV_CALL_ISR
 888	} callback_mode;
 889
 890	bool is_dedicated_interrupt;
 891	u64 sig_event;
 
 892
 893	/*
 894	 * Starting with win8, this field will be used to specify the
 895	 * target CPU on which to deliver the interrupt for the host
 896	 * to guest communication.
 897	 *
 898	 * Prior to win8, incoming channel interrupts would only be
 899	 * delivered on CPU 0. Setting this value to 0 would preserve
 900	 * the earlier behavior.
 901	 */
 
 
 902	u32 target_cpu;
 903	/*
 
 
 
 
 
 904	 * Support for sub-channels. For high performance devices,
 905	 * it will be useful to have multiple sub-channels to support
 906	 * a scalable communication infrastructure with the host.
 907	 * The support for sub-channels is implemented as an extension
 908	 * to the current infrastructure.
 909	 * The initial offer is considered the primary channel and this
 910	 * offer message will indicate if the host supports sub-channels.
 911	 * The guest is free to ask for sub-channels to be offered and can
 912	 * open these sub-channels as a normal "primary" channel. However,
 913	 * all sub-channels will have the same type and instance guids as the
 914	 * primary channel. Requests sent on a given channel will result in a
 915	 * response on the same channel.
 916	 */
 917
 918	/*
 919	 * Sub-channel creation callback. This callback will be called in
 920	 * process context when a sub-channel offer is received from the host.
 921	 * The guest can open the sub-channel in the context of this callback.
 922	 */
 923	void (*sc_creation_callback)(struct vmbus_channel *new_sc);
 924
 925	/*
 926	 * Channel rescind callback. Some channels (the hvsock ones), need to
 927	 * register a callback which is invoked in vmbus_onoffer_rescind().
 928	 */
 929	void (*chn_rescind_callback)(struct vmbus_channel *channel);
 930
 931	/*
 
 
 
 
 
 
 932	 * All Sub-channels of a primary channel are linked here.
 933	 */
 934	struct list_head sc_list;
 935	/*
 
 
 
 
 
 
 
 
 
 936	 * The primary channel this sub-channel belongs to.
 937	 * This will be NULL for the primary channel.
 938	 */
 939	struct vmbus_channel *primary_channel;
 940	/*
 941	 * Support per-channel state for use by vmbus drivers.
 942	 */
 943	void *per_channel_state;
 944
 945	/*
 946	 * Defer freeing channel until after all cpu's have
 947	 * gone through grace period.
 948	 */
 949	struct rcu_head rcu;
 950
 951	/*
 952	 * For sysfs per-channel properties.
 953	 */
 954	struct kobject			kobj;
 955
 956	/*
 957	 * For performance critical channels (storage, networking
 958	 * etc,), Hyper-V has a mechanism to enhance the throughput
 959	 * at the expense of latency:
 960	 * When the host is to be signaled, we just set a bit in a shared page
 961	 * and this bit will be inspected by the hypervisor within a certain
 962	 * window and if the bit is set, the host will be signaled. The window
 963	 * of time is the monitor latency - currently around 100 usecs. This
 964	 * mechanism improves throughput by:
 965	 *
 966	 * A) Making the host more efficient - each time it wakes up,
 967	 *    potentially it will process morev number of packets. The
 968	 *    monitor latency allows a batch to build up.
 969	 * B) By deferring the hypercall to signal, we will also minimize
 970	 *    the interrupts.
 971	 *
 972	 * Clearly, these optimizations improve throughput at the expense of
 973	 * latency. Furthermore, since the channel is shared for both
 974	 * control and data messages, control messages currently suffer
 975	 * unnecessary latency adversely impacting performance and boot
 976	 * time. To fix this issue, permit tagging the channel as being
 977	 * in "low latency" mode. In this mode, we will bypass the monitor
 978	 * mechanism.
 979	 */
 980	bool low_latency;
 981
 982	bool probe_done;
 983
 984	/*
 985	 * Cache the device ID here for easy access; this is useful, in
 986	 * particular, in situations where the channel's device_obj has
 987	 * not been allocated/initialized yet.
 988	 */
 989	u16 device_id;
 990
 991	/*
 992	 * We must offload the handling of the primary/sub channels
 993	 * from the single-threaded vmbus_connection.work_queue to
 994	 * two different workqueue, otherwise we can block
 995	 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
 996	 */
 997	struct work_struct add_channel_work;
 998
 999	/*
1000	 * Guest to host interrupts caused by the inbound ring buffer changing
1001	 * from full to not full while a packet is waiting.
1002	 */
1003	u64 intr_in_full;
1004
1005	/*
1006	 * The total number of write operations that encountered a full
1007	 * outbound ring buffer.
 
 
1008	 */
1009	u64 out_full_total;
1010
1011	/*
1012	 * The number of write operations that were the first to encounter a
1013	 * full outbound ring buffer.
1014	 */
1015	u64 out_full_first;
1016
1017	/* enabling/disabling fuzz testing on the channel (default is false)*/
1018	bool fuzz_testing_state;
1019
1020	/*
1021	 * Interrupt delay will delay the guest from emptying the ring buffer
1022	 * for a specific amount of time. The delay is in microseconds and will
1023	 * be between 1 to a maximum of 1000, its default is 0 (no delay).
1024	 * The  Message delay will delay guest reading on a per message basis
1025	 * in microseconds between 1 to 1000 with the default being 0
1026	 * (no delay).
 
1027	 */
1028	u32 fuzz_testing_interrupt_delay;
1029	u32 fuzz_testing_message_delay;
1030
1031	/* callback to generate a request ID from a request address */
1032	u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
1033	/* callback to retrieve a request address from a request ID */
1034	u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
1035
1036	/* request/transaction ids for VMBus */
1037	struct vmbus_requestor requestor;
1038	u32 rqstor_size;
1039
1040	/* The max size of a packet on this channel */
1041	u32 max_pkt_size;
1042};
1043
1044u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
1045u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
 
 
1046
1047static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1048{
1049	return !!(c->offermsg.offer.chn_flags &
1050		  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1051}
1052
1053static inline bool is_sub_channel(const struct vmbus_channel *c)
 
1054{
1055	return c->offermsg.offer.sub_channel_index != 0;
1056}
1057
1058static inline void set_channel_read_mode(struct vmbus_channel *c,
1059					enum hv_callback_mode mode)
1060{
1061	c->callback_mode = mode;
1062}
1063
1064static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1065{
1066	c->per_channel_state = s;
1067}
1068
1069static inline void *get_per_channel_state(struct vmbus_channel *c)
1070{
1071	return c->per_channel_state;
1072}
1073
1074static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1075						 u32 size)
1076{
1077	unsigned long flags;
1078
1079	if (size) {
1080		spin_lock_irqsave(&c->outbound.ring_lock, flags);
1081		++c->out_full_total;
1082
1083		if (!c->out_full_flag) {
1084			++c->out_full_first;
1085			c->out_full_flag = true;
1086		}
1087		spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1088	} else {
1089		c->out_full_flag = false;
1090	}
1091
1092	c->outbound.ring_buffer->pending_send_sz = size;
1093}
1094
1095static inline void set_low_latency_mode(struct vmbus_channel *c)
1096{
1097	c->low_latency = true;
1098}
1099
1100static inline void clear_low_latency_mode(struct vmbus_channel *c)
1101{
1102	c->low_latency = false;
1103}
1104
1105void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1106
1107int vmbus_request_offers(void);
1108
1109/*
1110 * APIs for managing sub-channels.
1111 */
1112
1113void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1114			void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1115
1116void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1117		void (*chn_rescind_cb)(struct vmbus_channel *));
1118
1119/*
 
 
 
 
 
 
 
 
1120 * Check if sub-channels have already been offerred. This API will be useful
1121 * when the driver is unloaded after establishing sub-channels. In this case,
1122 * when the driver is re-loaded, the driver would have to check if the
1123 * subchannels have already been established before attempting to request
1124 * the creation of sub-channels.
1125 * This function returns TRUE to indicate that subchannels have already been
1126 * created.
1127 * This function should be invoked after setting the callback function for
1128 * sub-channel creation.
1129 */
1130bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1131
1132/* The format must be the same as struct vmdata_gpa_direct */
1133struct vmbus_channel_packet_page_buffer {
1134	u16 type;
1135	u16 dataoffset8;
1136	u16 length8;
1137	u16 flags;
1138	u64 transactionid;
1139	u32 reserved;
1140	u32 rangecount;
1141	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1142} __packed;
1143
1144/* The format must be the same as struct vmdata_gpa_direct */
1145struct vmbus_channel_packet_multipage_buffer {
1146	u16 type;
1147	u16 dataoffset8;
1148	u16 length8;
1149	u16 flags;
1150	u64 transactionid;
1151	u32 reserved;
1152	u32 rangecount;		/* Always 1 in this case */
1153	struct hv_multipage_buffer range;
1154} __packed;
1155
1156/* The format must be the same as struct vmdata_gpa_direct */
1157struct vmbus_packet_mpb_array {
1158	u16 type;
1159	u16 dataoffset8;
1160	u16 length8;
1161	u16 flags;
1162	u64 transactionid;
1163	u32 reserved;
1164	u32 rangecount;         /* Always 1 in this case */
1165	struct hv_mpb_array range;
1166} __packed;
1167
1168int vmbus_alloc_ring(struct vmbus_channel *channel,
1169		     u32 send_size, u32 recv_size);
1170void vmbus_free_ring(struct vmbus_channel *channel);
1171
1172int vmbus_connect_ring(struct vmbus_channel *channel,
1173		       void (*onchannel_callback)(void *context),
1174		       void *context);
1175int vmbus_disconnect_ring(struct vmbus_channel *channel);
1176
1177extern int vmbus_open(struct vmbus_channel *channel,
1178			    u32 send_ringbuffersize,
1179			    u32 recv_ringbuffersize,
1180			    void *userdata,
1181			    u32 userdatalen,
1182			    void (*onchannel_callback)(void *context),
1183			    void *context);
1184
1185extern void vmbus_close(struct vmbus_channel *channel);
1186
1187extern int vmbus_sendpacket(struct vmbus_channel *channel,
1188				  void *buffer,
1189				  u32 bufferLen,
1190				  u64 requestid,
1191				  enum vmbus_packet_type type,
1192				  u32 flags);
1193
 
 
 
 
 
 
 
 
1194extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1195					    struct hv_page_buffer pagebuffers[],
1196					    u32 pagecount,
1197					    void *buffer,
1198					    u32 bufferlen,
1199					    u64 requestid);
1200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1201extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1202				     struct vmbus_packet_mpb_array *mpb,
1203				     u32 desc_size,
1204				     void *buffer,
1205				     u32 bufferlen,
1206				     u64 requestid);
1207
1208extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1209				      void *kbuffer,
1210				      u32 size,
1211				      u32 *gpadl_handle);
1212
1213extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1214				     u32 gpadl_handle);
1215
1216void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1217
1218extern int vmbus_recvpacket(struct vmbus_channel *channel,
1219				  void *buffer,
1220				  u32 bufferlen,
1221				  u32 *buffer_actual_len,
1222				  u64 *requestid);
1223
1224extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1225				     void *buffer,
1226				     u32 bufferlen,
1227				     u32 *buffer_actual_len,
1228				     u64 *requestid);
1229
1230
1231extern void vmbus_ontimer(unsigned long data);
1232
1233/* Base driver object */
1234struct hv_driver {
1235	const char *name;
1236
1237	/*
1238	 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1239	 * channel flag, actually doesn't mean a synthetic device because the
1240	 * offer's if_type/if_instance can change for every new hvsock
1241	 * connection.
1242	 *
1243	 * However, to facilitate the notification of new-offer/rescind-offer
1244	 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1245	 * a special vmbus device, and hence we need the below flag to
1246	 * indicate if the driver is the hvsock driver or not: we need to
1247	 * specially treat the hvosck offer & driver in vmbus_match().
1248	 */
1249	bool hvsock;
1250
1251	/* the device type supported by this driver */
1252	guid_t dev_type;
1253	const struct hv_vmbus_device_id *id_table;
1254
1255	struct device_driver driver;
1256
1257	/* dynamic device GUID's */
1258	struct  {
1259		spinlock_t lock;
1260		struct list_head list;
1261	} dynids;
1262
1263	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1264	int (*remove)(struct hv_device *);
1265	void (*shutdown)(struct hv_device *);
1266
1267	int (*suspend)(struct hv_device *);
1268	int (*resume)(struct hv_device *);
1269
1270};
1271
1272/* Base device object */
1273struct hv_device {
1274	/* the device type id of this device */
1275	guid_t dev_type;
1276
1277	/* the device instance id of this device */
1278	guid_t dev_instance;
1279	u16 vendor_id;
1280	u16 device_id;
1281
1282	struct device device;
1283	char *driver_override; /* Driver name to force a match */
1284
1285	struct vmbus_channel *channel;
1286	struct kset	     *channels_kset;
1287
1288	/* place holder to keep track of the dir for hv device in debugfs */
1289	struct dentry *debug_dir;
1290
1291};
1292
1293
1294static inline struct hv_device *device_to_hv_device(struct device *d)
1295{
1296	return container_of(d, struct hv_device, device);
1297}
1298
1299static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1300{
1301	return container_of(d, struct hv_driver, driver);
1302}
1303
1304static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1305{
1306	dev_set_drvdata(&dev->device, data);
1307}
1308
1309static inline void *hv_get_drvdata(struct hv_device *dev)
1310{
1311	return dev_get_drvdata(&dev->device);
1312}
1313
1314struct hv_ring_buffer_debug_info {
1315	u32 current_interrupt_mask;
1316	u32 current_read_index;
1317	u32 current_write_index;
1318	u32 bytes_avail_toread;
1319	u32 bytes_avail_towrite;
1320};
1321
1322
1323int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1324				struct hv_ring_buffer_debug_info *debug_info);
1325
1326/* Vmbus interface */
1327#define vmbus_driver_register(driver)	\
1328	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1329int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1330					 struct module *owner,
1331					 const char *mod_name);
1332void vmbus_driver_unregister(struct hv_driver *hv_driver);
1333
1334void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1335
1336int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1337			resource_size_t min, resource_size_t max,
1338			resource_size_t size, resource_size_t align,
1339			bool fb_overlap_ok);
1340void vmbus_free_mmio(resource_size_t start, resource_size_t size);
 
 
1341
1342/*
1343 * GUID definitions of various offer types - services offered to the guest.
1344 */
1345
1346/*
1347 * Network GUID
1348 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1349 */
1350#define HV_NIC_GUID \
1351	.guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1352			  0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1353
1354/*
1355 * IDE GUID
1356 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1357 */
1358#define HV_IDE_GUID \
1359	.guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1360			  0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1361
1362/*
1363 * SCSI GUID
1364 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1365 */
1366#define HV_SCSI_GUID \
1367	.guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1368			  0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1369
1370/*
1371 * Shutdown GUID
1372 * {0e0b6031-5213-4934-818b-38d90ced39db}
1373 */
1374#define HV_SHUTDOWN_GUID \
1375	.guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1376			  0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1377
1378/*
1379 * Time Synch GUID
1380 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1381 */
1382#define HV_TS_GUID \
1383	.guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1384			  0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1385
1386/*
1387 * Heartbeat GUID
1388 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1389 */
1390#define HV_HEART_BEAT_GUID \
1391	.guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1392			  0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1393
1394/*
1395 * KVP GUID
1396 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1397 */
1398#define HV_KVP_GUID \
1399	.guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1400			  0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1401
1402/*
1403 * Dynamic memory GUID
1404 * {525074dc-8985-46e2-8057-a307dc18a502}
1405 */
1406#define HV_DM_GUID \
1407	.guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1408			  0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1409
1410/*
1411 * Mouse GUID
1412 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1413 */
1414#define HV_MOUSE_GUID \
1415	.guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1416			  0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1417
1418/*
1419 * Keyboard GUID
1420 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1421 */
1422#define HV_KBD_GUID \
1423	.guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1424			  0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1425
1426/*
1427 * VSS (Backup/Restore) GUID
1428 */
1429#define HV_VSS_GUID \
1430	.guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1431			  0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1432/*
1433 * Synthetic Video GUID
1434 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1435 */
1436#define HV_SYNTHVID_GUID \
1437	.guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1438			  0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1439
1440/*
1441 * Synthetic FC GUID
1442 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1443 */
1444#define HV_SYNTHFC_GUID \
1445	.guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1446			  0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1447
1448/*
1449 * Guest File Copy Service
1450 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1451 */
1452
1453#define HV_FCOPY_GUID \
1454	.guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1455			  0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1456
1457/*
1458 * NetworkDirect. This is the guest RDMA service.
1459 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1460 */
1461#define HV_ND_GUID \
1462	.guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1463			  0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1464
1465/*
1466 * PCI Express Pass Through
1467 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1468 */
1469
1470#define HV_PCIE_GUID \
1471	.guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1472			  0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1473
1474/*
1475 * Linux doesn't support the 3 devices: the first two are for
1476 * Automatic Virtual Machine Activation, and the third is for
1477 * Remote Desktop Virtualization.
1478 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1479 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1480 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1481 */
1482
1483#define HV_AVMA1_GUID \
1484	.guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1485			  0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1486
1487#define HV_AVMA2_GUID \
1488	.guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1489			  0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1490
1491#define HV_RDV_GUID \
1492	.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1493			  0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1494
1495/*
1496 * Common header for Hyper-V ICs
1497 */
1498
1499#define ICMSGTYPE_NEGOTIATE		0
1500#define ICMSGTYPE_HEARTBEAT		1
1501#define ICMSGTYPE_KVPEXCHANGE		2
1502#define ICMSGTYPE_SHUTDOWN		3
1503#define ICMSGTYPE_TIMESYNC		4
1504#define ICMSGTYPE_VSS			5
1505#define ICMSGTYPE_FCOPY			7
1506
1507#define ICMSGHDRFLAG_TRANSACTION	1
1508#define ICMSGHDRFLAG_REQUEST		2
1509#define ICMSGHDRFLAG_RESPONSE		4
1510
1511
1512/*
1513 * While we want to handle util services as regular devices,
1514 * there is only one instance of each of these services; so
1515 * we statically allocate the service specific state.
1516 */
1517
1518struct hv_util_service {
1519	u8 *recv_buffer;
1520	void *channel;
1521	void (*util_cb)(void *);
1522	int (*util_init)(struct hv_util_service *);
1523	void (*util_deinit)(void);
1524	int (*util_pre_suspend)(void);
1525	int (*util_pre_resume)(void);
1526};
1527
1528struct vmbuspipe_hdr {
1529	u32 flags;
1530	u32 msgsize;
1531} __packed;
1532
1533struct ic_version {
1534	u16 major;
1535	u16 minor;
1536} __packed;
1537
1538struct icmsg_hdr {
1539	struct ic_version icverframe;
1540	u16 icmsgtype;
1541	struct ic_version icvermsg;
1542	u16 icmsgsize;
1543	u32 status;
1544	u8 ictransaction_id;
1545	u8 icflags;
1546	u8 reserved[2];
1547} __packed;
1548
1549#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1550#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1551#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1552	(ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1553	 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1554
1555struct icmsg_negotiate {
1556	u16 icframe_vercnt;
1557	u16 icmsg_vercnt;
1558	u32 reserved;
1559	struct ic_version icversion_data[]; /* any size array */
1560} __packed;
1561
1562struct shutdown_msg_data {
1563	u32 reason_code;
1564	u32 timeout_seconds;
1565	u32 flags;
1566	u8  display_message[2048];
1567} __packed;
1568
1569struct heartbeat_msg_data {
1570	u64 seq_num;
1571	u32 reserved[8];
1572} __packed;
1573
1574/* Time Sync IC defs */
1575#define ICTIMESYNCFLAG_PROBE	0
1576#define ICTIMESYNCFLAG_SYNC	1
1577#define ICTIMESYNCFLAG_SAMPLE	2
1578
1579#ifdef __x86_64__
1580#define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
1581#else
1582#define WLTIMEDELTA	116444736000000000LL
1583#endif
1584
1585struct ictimesync_data {
1586	u64 parenttime;
1587	u64 childtime;
1588	u64 roundtriptime;
1589	u8 flags;
1590} __packed;
1591
1592struct ictimesync_ref_data {
1593	u64 parenttime;
1594	u64 vmreferencetime;
1595	u8 flags;
1596	char leapflags;
1597	char stratum;
1598	u8 reserved[3];
1599} __packed;
1600
1601struct hyperv_service_callback {
1602	u8 msg_type;
1603	char *log_msg;
1604	guid_t data;
1605	struct vmbus_channel *channel;
1606	void (*callback)(void *context);
1607};
1608
1609#define MAX_SRV_VER	0x7ffffff
1610extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
1611				const int *fw_version, int fw_vercnt,
1612				const int *srv_version, int srv_vercnt,
1613				int *nego_fw_version, int *nego_srv_version);
1614
1615void hv_process_channel_removal(struct vmbus_channel *channel);
1616
1617void vmbus_setevent(struct vmbus_channel *channel);
1618/*
1619 * Negotiated version with the Host.
1620 */
1621
1622extern __u32 vmbus_proto_version;
1623
1624int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1625				  const guid_t *shv_host_servie_id);
1626int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
1627void vmbus_set_event(struct vmbus_channel *channel);
1628
1629/* Get the start of the ring buffer. */
1630static inline void *
1631hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1632{
1633	return ring_info->ring_buffer->buffer;
1634}
1635
1636/*
1637 * Mask off host interrupt callback notifications
1638 */
1639static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1640{
1641	rbi->ring_buffer->interrupt_mask = 1;
1642
1643	/* make sure mask update is not reordered */
1644	virt_mb();
1645}
1646
1647/*
1648 * Re-enable host callback and return number of outstanding bytes
1649 */
1650static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1651{
1652
1653	rbi->ring_buffer->interrupt_mask = 0;
1654
1655	/* make sure mask update is not reordered */
1656	virt_mb();
1657
1658	/*
1659	 * Now check to see if the ring buffer is still empty.
1660	 * If it is not, we raced and we need to process new
1661	 * incoming messages.
1662	 */
1663	return hv_get_bytes_to_read(rbi);
1664}
1665
1666/*
1667 * An API to support in-place processing of incoming VMBUS packets.
1668 */
1669
1670/* Get data payload associated with descriptor */
1671static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1672{
1673	return (void *)((unsigned long)desc + (desc->offset8 << 3));
1674}
1675
1676/* Get data size associated with descriptor */
1677static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1678{
1679	return (desc->len8 << 3) - (desc->offset8 << 3);
1680}
1681
1682
1683struct vmpacket_descriptor *
1684hv_pkt_iter_first_raw(struct vmbus_channel *channel);
1685
1686struct vmpacket_descriptor *
1687hv_pkt_iter_first(struct vmbus_channel *channel);
1688
1689struct vmpacket_descriptor *
1690__hv_pkt_iter_next(struct vmbus_channel *channel,
1691		   const struct vmpacket_descriptor *pkt,
1692		   bool copy);
1693
1694void hv_pkt_iter_close(struct vmbus_channel *channel);
1695
1696static inline struct vmpacket_descriptor *
1697hv_pkt_iter_next_pkt(struct vmbus_channel *channel,
1698		     const struct vmpacket_descriptor *pkt,
1699		     bool copy)
1700{
1701	struct vmpacket_descriptor *nxt;
1702
1703	nxt = __hv_pkt_iter_next(channel, pkt, copy);
1704	if (!nxt)
1705		hv_pkt_iter_close(channel);
1706
1707	return nxt;
1708}
1709
1710/*
1711 * Get next packet descriptor without copying it out of the ring buffer
1712 * If at end of list, return NULL and update host.
1713 */
1714static inline struct vmpacket_descriptor *
1715hv_pkt_iter_next_raw(struct vmbus_channel *channel,
1716		     const struct vmpacket_descriptor *pkt)
1717{
1718	return hv_pkt_iter_next_pkt(channel, pkt, false);
1719}
1720
1721/*
1722 * Get next packet descriptor from iterator
1723 * If at end of list, return NULL and update host.
1724 */
1725static inline struct vmpacket_descriptor *
1726hv_pkt_iter_next(struct vmbus_channel *channel,
1727		 const struct vmpacket_descriptor *pkt)
1728{
1729	return hv_pkt_iter_next_pkt(channel, pkt, true);
1730}
1731
1732#define foreach_vmbus_pkt(pkt, channel) \
1733	for (pkt = hv_pkt_iter_first(channel); pkt; \
1734	    pkt = hv_pkt_iter_next(channel, pkt))
1735
1736/*
1737 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1738 * sends requests to read and write blocks. Each block must be 128 bytes or
1739 * smaller. Optionally, the VF driver can register a callback function which
1740 * will be invoked when the host says that one or more of the first 64 block
1741 * IDs is "invalid" which means that the VF driver should reread them.
1742 */
1743#define HV_CONFIG_BLOCK_SIZE_MAX 128
1744
1745int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1746			unsigned int block_id, unsigned int *bytes_returned);
1747int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1748			 unsigned int block_id);
1749int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1750				void (*block_invalidate)(void *context,
1751							 u64 block_mask));
1752
1753struct hyperv_pci_block_ops {
1754	int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1755			  unsigned int block_id, unsigned int *bytes_returned);
1756	int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1757			   unsigned int block_id);
1758	int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1759				  void (*block_invalidate)(void *context,
1760							   u64 block_mask));
1761};
1762
1763extern struct hyperv_pci_block_ops hvpci_block_ops;
1764
1765static inline unsigned long virt_to_hvpfn(void *addr)
1766{
1767	phys_addr_t paddr;
1768
1769	if (is_vmalloc_addr(addr))
1770		paddr = page_to_phys(vmalloc_to_page(addr)) +
1771				     offset_in_page(addr);
1772	else
1773		paddr = __pa(addr);
1774
1775	return  paddr >> HV_HYP_PAGE_SHIFT;
1776}
1777
1778#define NR_HV_HYP_PAGES_IN_PAGE	(PAGE_SIZE / HV_HYP_PAGE_SIZE)
1779#define offset_in_hvpage(ptr)	((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1780#define HVPFN_UP(x)	(((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1781#define HVPFN_DOWN(x)	((x) >> HV_HYP_PAGE_SHIFT)
1782#define page_to_hvpfn(page)	(page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1783
1784#endif /* _HYPERV_H */
v4.6
 
   1/*
   2 *
   3 * Copyright (c) 2011, Microsoft Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16 * Place - Suite 330, Boston, MA 02111-1307 USA.
  17 *
  18 * Authors:
  19 *   Haiyang Zhang <haiyangz@microsoft.com>
  20 *   Hank Janssen  <hjanssen@microsoft.com>
  21 *   K. Y. Srinivasan <kys@microsoft.com>
  22 *
  23 */
  24
  25#ifndef _HYPERV_H
  26#define _HYPERV_H
  27
  28#include <uapi/linux/hyperv.h>
  29#include <uapi/asm/hyperv.h>
  30
 
  31#include <linux/types.h>
  32#include <linux/scatterlist.h>
  33#include <linux/list.h>
  34#include <linux/timer.h>
  35#include <linux/workqueue.h>
  36#include <linux/completion.h>
  37#include <linux/device.h>
  38#include <linux/mod_devicetable.h>
  39
 
 
  40
  41#define MAX_PAGE_BUFFER_COUNT				32
  42#define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
  43
  44#pragma pack(push, 1)
  45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46/* Single-page buffer */
  47struct hv_page_buffer {
  48	u32 len;
  49	u32 offset;
  50	u64 pfn;
  51};
  52
  53/* Multiple-page buffer */
  54struct hv_multipage_buffer {
  55	/* Length and Offset determines the # of pfns in the array */
  56	u32 len;
  57	u32 offset;
  58	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
  59};
  60
  61/*
  62 * Multiple-page buffer array; the pfn array is variable size:
  63 * The number of entries in the PFN array is determined by
  64 * "len" and "offset".
  65 */
  66struct hv_mpb_array {
  67	/* Length and Offset determines the # of pfns in the array */
  68	u32 len;
  69	u32 offset;
  70	u64 pfn_array[];
  71};
  72
  73/* 0x18 includes the proprietary packet header */
  74#define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
  75					(sizeof(struct hv_page_buffer) * \
  76					 MAX_PAGE_BUFFER_COUNT))
  77#define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
  78					 sizeof(struct hv_multipage_buffer))
  79
  80
  81#pragma pack(pop)
  82
  83struct hv_ring_buffer {
  84	/* Offset in bytes from the start of ring data below */
  85	u32 write_index;
  86
  87	/* Offset in bytes from the start of ring data below */
  88	u32 read_index;
  89
  90	u32 interrupt_mask;
  91
  92	/*
  93	 * Win8 uses some of the reserved bits to implement
  94	 * interrupt driven flow management. On the send side
  95	 * we can request that the receiver interrupt the sender
  96	 * when the ring transitions from being full to being able
  97	 * to handle a message of size "pending_send_sz".
  98	 *
  99	 * Add necessary state for this enhancement.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100	 */
 101	u32 pending_send_sz;
 102
 103	u32 reserved1[12];
 104
 105	union {
 106		struct {
 107			u32 feat_pending_send_sz:1;
 108		};
 109		u32 value;
 110	} feature_bits;
 111
 112	/* Pad it to PAGE_SIZE so that data starts on page boundary */
 113	u8	reserved2[4028];
 114
 115	/*
 116	 * Ring data starts here + RingDataStartOffset
 117	 * !!! DO NOT place any fields below this !!!
 118	 */
 119	u8 buffer[0];
 120} __packed;
 121
 
 
 
 
 122struct hv_ring_buffer_info {
 123	struct hv_ring_buffer *ring_buffer;
 124	u32 ring_size;			/* Include the shared header */
 
 125	spinlock_t ring_lock;
 126
 127	u32 ring_datasize;		/* < ring_size */
 128	u32 ring_data_startoffset;
 
 
 
 
 
 
 
 
 
 129};
 130
 131/*
 132 *
 133 * hv_get_ringbuffer_availbytes()
 134 *
 135 * Get number of bytes available to read and to write to
 136 * for the specified ring buffer
 137 */
 138static inline void
 139hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
 140			  u32 *read, u32 *write)
 141{
 142	u32 read_loc, write_loc, dsize;
 143
 144	/* Capture the read/write indices before they changed */
 145	read_loc = rbi->ring_buffer->read_index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146	write_loc = rbi->ring_buffer->write_index;
 147	dsize = rbi->ring_datasize;
 148
 149	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
 150		read_loc - write_loc;
 151	*read = dsize - *write;
 
 
 
 
 
 
 
 
 
 
 152}
 153
 154/*
 155 * VMBUS version is 32 bit entity broken up into
 156 * two 16 bit quantities: major_number. minor_number.
 157 *
 158 * 0 . 13 (Windows Server 2008)
 159 * 1 . 1  (Windows 7)
 160 * 2 . 4  (Windows 8)
 161 * 3 . 0  (Windows 8 R2)
 162 * 4 . 0  (Windows 10)
 
 
 
 
 
 163 */
 164
 165#define VERSION_WS2008  ((0 << 16) | (13))
 166#define VERSION_WIN7    ((1 << 16) | (1))
 167#define VERSION_WIN8    ((2 << 16) | (4))
 168#define VERSION_WIN8_1    ((3 << 16) | (0))
 169#define VERSION_WIN10	((4 << 16) | (0))
 170
 171#define VERSION_INVAL -1
 172
 173#define VERSION_CURRENT VERSION_WIN10
 
 174
 175/* Make maximum size of pipe payload of 16K */
 176#define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
 177
 178/* Define PipeMode values. */
 179#define VMBUS_PIPE_TYPE_BYTE		0x00000000
 180#define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
 181
 182/* The size of the user defined data buffer for non-pipe offers. */
 183#define MAX_USER_DEFINED_BYTES		120
 184
 185/* The size of the user defined data buffer for pipe offers. */
 186#define MAX_PIPE_USER_DEFINED_BYTES	116
 187
 188/*
 189 * At the center of the Channel Management library is the Channel Offer. This
 190 * struct contains the fundamental information about an offer.
 191 */
 192struct vmbus_channel_offer {
 193	uuid_le if_type;
 194	uuid_le if_instance;
 195
 196	/*
 197	 * These two fields are not currently used.
 198	 */
 199	u64 reserved1;
 200	u64 reserved2;
 201
 202	u16 chn_flags;
 203	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
 204
 205	union {
 206		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
 207		struct {
 208			unsigned char user_def[MAX_USER_DEFINED_BYTES];
 209		} std;
 210
 211		/*
 212		 * Pipes:
 213		 * The following sructure is an integrated pipe protocol, which
 214		 * is implemented on top of standard user-defined data. Pipe
 215		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
 216		 * use.
 217		 */
 218		struct {
 219			u32  pipe_mode;
 220			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
 221		} pipe;
 222	} u;
 223	/*
 224	 * The sub_channel_index is defined in win8.
 
 
 
 225	 */
 226	u16 sub_channel_index;
 227	u16 reserved3;
 228} __packed;
 229
 230/* Server Flags */
 231#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE	1
 232#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES	2
 233#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS		4
 234#define VMBUS_CHANNEL_NAMED_PIPE_MODE			0x10
 235#define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
 236#define VMBUS_CHANNEL_PARENT_OFFER			0x200
 237#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
 238#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER		0x2000
 239
 240struct vmpacket_descriptor {
 241	u16 type;
 242	u16 offset8;
 243	u16 len8;
 244	u16 flags;
 245	u64 trans_id;
 246} __packed;
 247
 248struct vmpacket_header {
 249	u32 prev_pkt_start_offset;
 250	struct vmpacket_descriptor descriptor;
 251} __packed;
 252
 253struct vmtransfer_page_range {
 254	u32 byte_count;
 255	u32 byte_offset;
 256} __packed;
 257
 258struct vmtransfer_page_packet_header {
 259	struct vmpacket_descriptor d;
 260	u16 xfer_pageset_id;
 261	u8  sender_owns_set;
 262	u8 reserved;
 263	u32 range_cnt;
 264	struct vmtransfer_page_range ranges[1];
 265} __packed;
 266
 267struct vmgpadl_packet_header {
 268	struct vmpacket_descriptor d;
 269	u32 gpadl;
 270	u32 reserved;
 271} __packed;
 272
 273struct vmadd_remove_transfer_page_set {
 274	struct vmpacket_descriptor d;
 275	u32 gpadl;
 276	u16 xfer_pageset_id;
 277	u16 reserved;
 278} __packed;
 279
 280/*
 281 * This structure defines a range in guest physical space that can be made to
 282 * look virtually contiguous.
 283 */
 284struct gpa_range {
 285	u32 byte_count;
 286	u32 byte_offset;
 287	u64 pfn_array[0];
 288};
 289
 290/*
 291 * This is the format for an Establish Gpadl packet, which contains a handle by
 292 * which this GPADL will be known and a set of GPA ranges associated with it.
 293 * This can be converted to a MDL by the guest OS.  If there are multiple GPA
 294 * ranges, then the resulting MDL will be "chained," representing multiple VA
 295 * ranges.
 296 */
 297struct vmestablish_gpadl {
 298	struct vmpacket_descriptor d;
 299	u32 gpadl;
 300	u32 range_cnt;
 301	struct gpa_range range[1];
 302} __packed;
 303
 304/*
 305 * This is the format for a Teardown Gpadl packet, which indicates that the
 306 * GPADL handle in the Establish Gpadl packet will never be referenced again.
 307 */
 308struct vmteardown_gpadl {
 309	struct vmpacket_descriptor d;
 310	u32 gpadl;
 311	u32 reserved;	/* for alignment to a 8-byte boundary */
 312} __packed;
 313
 314/*
 315 * This is the format for a GPA-Direct packet, which contains a set of GPA
 316 * ranges, in addition to commands and/or data.
 317 */
 318struct vmdata_gpa_direct {
 319	struct vmpacket_descriptor d;
 320	u32 reserved;
 321	u32 range_cnt;
 322	struct gpa_range range[1];
 323} __packed;
 324
 325/* This is the format for a Additional Data Packet. */
 326struct vmadditional_data {
 327	struct vmpacket_descriptor d;
 328	u64 total_bytes;
 329	u32 offset;
 330	u32 byte_cnt;
 331	unsigned char data[1];
 332} __packed;
 333
 334union vmpacket_largest_possible_header {
 335	struct vmpacket_descriptor simple_hdr;
 336	struct vmtransfer_page_packet_header xfer_page_hdr;
 337	struct vmgpadl_packet_header gpadl_hdr;
 338	struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
 339	struct vmestablish_gpadl establish_gpadl_hdr;
 340	struct vmteardown_gpadl teardown_gpadl_hdr;
 341	struct vmdata_gpa_direct data_gpa_direct_hdr;
 342};
 343
 344#define VMPACKET_DATA_START_ADDRESS(__packet)	\
 345	(void *)(((unsigned char *)__packet) +	\
 346	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
 347
 348#define VMPACKET_DATA_LENGTH(__packet)		\
 349	((((struct vmpacket_descriptor)__packet)->len8 -	\
 350	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
 351
 352#define VMPACKET_TRANSFER_MODE(__packet)	\
 353	(((struct IMPACT)__packet)->type)
 354
 355enum vmbus_packet_type {
 356	VM_PKT_INVALID				= 0x0,
 357	VM_PKT_SYNCH				= 0x1,
 358	VM_PKT_ADD_XFER_PAGESET			= 0x2,
 359	VM_PKT_RM_XFER_PAGESET			= 0x3,
 360	VM_PKT_ESTABLISH_GPADL			= 0x4,
 361	VM_PKT_TEARDOWN_GPADL			= 0x5,
 362	VM_PKT_DATA_INBAND			= 0x6,
 363	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
 364	VM_PKT_DATA_USING_GPADL			= 0x8,
 365	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
 366	VM_PKT_CANCEL_REQUEST			= 0xa,
 367	VM_PKT_COMP				= 0xb,
 368	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
 369	VM_PKT_ADDITIONAL_DATA			= 0xd
 370};
 371
 372#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
 373
 374
 375/* Version 1 messages */
 376enum vmbus_channel_message_type {
 377	CHANNELMSG_INVALID			=  0,
 378	CHANNELMSG_OFFERCHANNEL		=  1,
 379	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
 380	CHANNELMSG_REQUESTOFFERS		=  3,
 381	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
 382	CHANNELMSG_OPENCHANNEL		=  5,
 383	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
 384	CHANNELMSG_CLOSECHANNEL		=  7,
 385	CHANNELMSG_GPADL_HEADER		=  8,
 386	CHANNELMSG_GPADL_BODY			=  9,
 387	CHANNELMSG_GPADL_CREATED		= 10,
 388	CHANNELMSG_GPADL_TEARDOWN		= 11,
 389	CHANNELMSG_GPADL_TORNDOWN		= 12,
 390	CHANNELMSG_RELID_RELEASED		= 13,
 391	CHANNELMSG_INITIATE_CONTACT		= 14,
 392	CHANNELMSG_VERSION_RESPONSE		= 15,
 393	CHANNELMSG_UNLOAD			= 16,
 394	CHANNELMSG_UNLOAD_RESPONSE		= 17,
 395	CHANNELMSG_18				= 18,
 396	CHANNELMSG_19				= 19,
 397	CHANNELMSG_20				= 20,
 398	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
 
 
 
 399	CHANNELMSG_COUNT
 400};
 401
 
 
 
 402struct vmbus_channel_message_header {
 403	enum vmbus_channel_message_type msgtype;
 404	u32 padding;
 405} __packed;
 406
 407/* Query VMBus Version parameters */
 408struct vmbus_channel_query_vmbus_version {
 409	struct vmbus_channel_message_header header;
 410	u32 version;
 411} __packed;
 412
 413/* VMBus Version Supported parameters */
 414struct vmbus_channel_version_supported {
 415	struct vmbus_channel_message_header header;
 416	u8 version_supported;
 417} __packed;
 418
 419/* Offer Channel parameters */
 420struct vmbus_channel_offer_channel {
 421	struct vmbus_channel_message_header header;
 422	struct vmbus_channel_offer offer;
 423	u32 child_relid;
 424	u8 monitorid;
 425	/*
 426	 * win7 and beyond splits this field into a bit field.
 427	 */
 428	u8 monitor_allocated:1;
 429	u8 reserved:7;
 430	/*
 431	 * These are new fields added in win7 and later.
 432	 * Do not access these fields without checking the
 433	 * negotiated protocol.
 434	 *
 435	 * If "is_dedicated_interrupt" is set, we must not set the
 436	 * associated bit in the channel bitmap while sending the
 437	 * interrupt to the host.
 438	 *
 439	 * connection_id is to be used in signaling the host.
 440	 */
 441	u16 is_dedicated_interrupt:1;
 442	u16 reserved1:15;
 443	u32 connection_id;
 444} __packed;
 445
 446/* Rescind Offer parameters */
 447struct vmbus_channel_rescind_offer {
 448	struct vmbus_channel_message_header header;
 449	u32 child_relid;
 450} __packed;
 451
 
 
 
 
 
 
 452/*
 453 * Request Offer -- no parameters, SynIC message contains the partition ID
 454 * Set Snoop -- no parameters, SynIC message contains the partition ID
 455 * Clear Snoop -- no parameters, SynIC message contains the partition ID
 456 * All Offers Delivered -- no parameters, SynIC message contains the partition
 457 *		           ID
 458 * Flush Client -- no parameters, SynIC message contains the partition ID
 459 */
 460
 461/* Open Channel parameters */
 462struct vmbus_channel_open_channel {
 463	struct vmbus_channel_message_header header;
 464
 465	/* Identifies the specific VMBus channel that is being opened. */
 466	u32 child_relid;
 467
 468	/* ID making a particular open request at a channel offer unique. */
 469	u32 openid;
 470
 471	/* GPADL for the channel's ring buffer. */
 472	u32 ringbuffer_gpadlhandle;
 473
 474	/*
 475	 * Starting with win8, this field will be used to specify
 476	 * the target virtual processor on which to deliver the interrupt for
 477	 * the host to guest communication.
 478	 * Prior to win8, incoming channel interrupts would only
 479	 * be delivered on cpu 0. Setting this value to 0 would
 480	 * preserve the earlier behavior.
 481	 */
 482	u32 target_vp;
 483
 484	/*
 485	* The upstream ring buffer begins at offset zero in the memory
 486	* described by RingBufferGpadlHandle. The downstream ring buffer
 487	* follows it at this offset (in pages).
 488	*/
 489	u32 downstream_ringbuffer_pageoffset;
 490
 491	/* User-specific data to be passed along to the server endpoint. */
 492	unsigned char userdata[MAX_USER_DEFINED_BYTES];
 493} __packed;
 494
 495/* Open Channel Result parameters */
 496struct vmbus_channel_open_result {
 497	struct vmbus_channel_message_header header;
 498	u32 child_relid;
 499	u32 openid;
 500	u32 status;
 501} __packed;
 502
 
 
 
 
 
 
 
 503/* Close channel parameters; */
 504struct vmbus_channel_close_channel {
 505	struct vmbus_channel_message_header header;
 506	u32 child_relid;
 507} __packed;
 508
 509/* Channel Message GPADL */
 510#define GPADL_TYPE_RING_BUFFER		1
 511#define GPADL_TYPE_SERVER_SAVE_AREA	2
 512#define GPADL_TYPE_TRANSACTION		8
 513
 514/*
 515 * The number of PFNs in a GPADL message is defined by the number of
 516 * pages that would be spanned by ByteCount and ByteOffset.  If the
 517 * implied number of PFNs won't fit in this packet, there will be a
 518 * follow-up packet that contains more.
 519 */
 520struct vmbus_channel_gpadl_header {
 521	struct vmbus_channel_message_header header;
 522	u32 child_relid;
 523	u32 gpadl;
 524	u16 range_buflen;
 525	u16 rangecount;
 526	struct gpa_range range[0];
 527} __packed;
 528
 529/* This is the followup packet that contains more PFNs. */
 530struct vmbus_channel_gpadl_body {
 531	struct vmbus_channel_message_header header;
 532	u32 msgnumber;
 533	u32 gpadl;
 534	u64 pfn[0];
 535} __packed;
 536
 537struct vmbus_channel_gpadl_created {
 538	struct vmbus_channel_message_header header;
 539	u32 child_relid;
 540	u32 gpadl;
 541	u32 creation_status;
 542} __packed;
 543
 544struct vmbus_channel_gpadl_teardown {
 545	struct vmbus_channel_message_header header;
 546	u32 child_relid;
 547	u32 gpadl;
 548} __packed;
 549
 550struct vmbus_channel_gpadl_torndown {
 551	struct vmbus_channel_message_header header;
 552	u32 gpadl;
 553} __packed;
 554
 555struct vmbus_channel_relid_released {
 556	struct vmbus_channel_message_header header;
 557	u32 child_relid;
 558} __packed;
 559
 560struct vmbus_channel_initiate_contact {
 561	struct vmbus_channel_message_header header;
 562	u32 vmbus_version_requested;
 563	u32 target_vcpu; /* The VCPU the host should respond to */
 564	u64 interrupt_page;
 
 
 
 
 
 
 
 565	u64 monitor_page1;
 566	u64 monitor_page2;
 567} __packed;
 568
 569/* Hyper-V socket: guest's connect()-ing to host */
 570struct vmbus_channel_tl_connect_request {
 571	struct vmbus_channel_message_header header;
 572	uuid_le guest_endpoint_id;
 573	uuid_le host_service_id;
 
 
 
 
 
 
 
 574} __packed;
 575
 576struct vmbus_channel_version_response {
 577	struct vmbus_channel_message_header header;
 578	u8 version_supported;
 
 
 
 
 
 
 
 
 
 
 
 
 
 579} __packed;
 580
 581enum vmbus_channel_state {
 582	CHANNEL_OFFER_STATE,
 583	CHANNEL_OPENING_STATE,
 584	CHANNEL_OPEN_STATE,
 585	CHANNEL_OPENED_STATE,
 586};
 587
 588/*
 589 * Represents each channel msg on the vmbus connection This is a
 590 * variable-size data structure depending on the msg type itself
 591 */
 592struct vmbus_channel_msginfo {
 593	/* Bookkeeping stuff */
 594	struct list_head msglistentry;
 595
 596	/* So far, this is only used to handle gpadl body message */
 597	struct list_head submsglist;
 598
 599	/* Synchronize the request/response if needed */
 600	struct completion  waitevent;
 
 601	union {
 602		struct vmbus_channel_version_supported version_supported;
 603		struct vmbus_channel_open_result open_result;
 604		struct vmbus_channel_gpadl_torndown gpadl_torndown;
 605		struct vmbus_channel_gpadl_created gpadl_created;
 606		struct vmbus_channel_version_response version_response;
 
 607	} response;
 608
 609	u32 msgsize;
 610	/*
 611	 * The channel message that goes out on the "wire".
 612	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
 613	 */
 614	unsigned char msg[0];
 615};
 616
 617struct vmbus_close_msg {
 618	struct vmbus_channel_msginfo info;
 619	struct vmbus_channel_close_channel msg;
 620};
 621
 622/* Define connection identifier type. */
 623union hv_connection_id {
 624	u32 asu32;
 625	struct {
 626		u32 id:24;
 627		u32 reserved:8;
 628	} u;
 629};
 630
 631/* Definition of the hv_signal_event hypercall input structure. */
 632struct hv_input_signal_event {
 633	union hv_connection_id connectionid;
 634	u16 flag_number;
 635	u16 rsvdz;
 636};
 637
 638struct hv_input_signal_event_buffer {
 639	u64 align8;
 640	struct hv_input_signal_event event;
 641};
 642
 643enum hv_signal_policy {
 644	HV_SIGNAL_POLICY_DEFAULT = 0,
 645	HV_SIGNAL_POLICY_EXPLICIT,
 646};
 647
 648enum vmbus_device_type {
 649	HV_IDE = 0,
 650	HV_SCSI,
 651	HV_FC,
 652	HV_NIC,
 653	HV_ND,
 654	HV_PCIE,
 655	HV_FB,
 656	HV_KBD,
 657	HV_MOUSE,
 658	HV_KVP,
 659	HV_TS,
 660	HV_HB,
 661	HV_SHUTDOWN,
 662	HV_FCOPY,
 663	HV_BACKUP,
 664	HV_DM,
 665	HV_UNKOWN,
 
 
 
 
 
 
 
 
 
 
 
 
 
 666};
 667
 
 
 
 
 
 
 
 
 668struct vmbus_device {
 669	u16  dev_type;
 670	uuid_le guid;
 671	bool perf_device;
 
 672};
 673
 
 
 674struct vmbus_channel {
 675	/* Unique channel id */
 676	int id;
 677
 678	struct list_head listentry;
 679
 680	struct hv_device *device_obj;
 681
 682	enum vmbus_channel_state state;
 683
 684	struct vmbus_channel_offer_channel offermsg;
 685	/*
 686	 * These are based on the OfferMsg.MonitorId.
 687	 * Save it here for easy access.
 688	 */
 689	u8 monitor_grp;
 690	u8 monitor_bit;
 691
 692	bool rescind; /* got rescind msg */
 
 
 693
 694	u32 ringbuffer_gpadlhandle;
 695
 696	/* Allocated memory for ring buffer */
 697	void *ringbuffer_pages;
 698	u32 ringbuffer_pagecount;
 
 699	struct hv_ring_buffer_info outbound;	/* send to parent */
 700	struct hv_ring_buffer_info inbound;	/* receive from parent */
 701	spinlock_t inbound_lock;
 702
 703	struct vmbus_close_msg close_msg;
 704
 705	/* Channel callback are invoked in this workqueue context */
 706	/* HANDLE dataWorkQueue; */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707
 
 
 708	void (*onchannel_callback)(void *context);
 709	void *channel_callback_context;
 710
 
 
 
 711	/*
 712	 * A channel can be marked for efficient (batched)
 713	 * reading:
 714	 * If batched_reading is set to "true", we read until the
 715	 * channel is empty and hold off interrupts from the host
 716	 * during the entire read process.
 717	 * If batched_reading is set to "false", the client is not
 718	 * going to perform batched reading.
 719	 *
 720	 * By default we will enable batched reading; specific
 721	 * drivers that don't want this behavior can turn it off.
 722	 */
 
 723
 724	bool batched_reading;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726	bool is_dedicated_interrupt;
 727	struct hv_input_signal_event_buffer sig_buf;
 728	struct hv_input_signal_event *sig_event;
 729
 730	/*
 731	 * Starting with win8, this field will be used to specify
 732	 * the target virtual processor on which to deliver the interrupt for
 733	 * the host to guest communication.
 734	 * Prior to win8, incoming channel interrupts would only
 735	 * be delivered on cpu 0. Setting this value to 0 would
 736	 * preserve the earlier behavior.
 
 737	 */
 738	u32 target_vp;
 739	/* The corresponding CPUID in the guest */
 740	u32 target_cpu;
 741	/*
 742	 * State to manage the CPU affiliation of channels.
 743	 */
 744	struct cpumask alloced_cpus_in_node;
 745	int numa_node;
 746	/*
 747	 * Support for sub-channels. For high performance devices,
 748	 * it will be useful to have multiple sub-channels to support
 749	 * a scalable communication infrastructure with the host.
 750	 * The support for sub-channels is implemented as an extention
 751	 * to the current infrastructure.
 752	 * The initial offer is considered the primary channel and this
 753	 * offer message will indicate if the host supports sub-channels.
 754	 * The guest is free to ask for sub-channels to be offerred and can
 755	 * open these sub-channels as a normal "primary" channel. However,
 756	 * all sub-channels will have the same type and instance guids as the
 757	 * primary channel. Requests sent on a given channel will result in a
 758	 * response on the same channel.
 759	 */
 760
 761	/*
 762	 * Sub-channel creation callback. This callback will be called in
 763	 * process context when a sub-channel offer is received from the host.
 764	 * The guest can open the sub-channel in the context of this callback.
 765	 */
 766	void (*sc_creation_callback)(struct vmbus_channel *new_sc);
 767
 768	/*
 769	 * Channel rescind callback. Some channels (the hvsock ones), need to
 770	 * register a callback which is invoked in vmbus_onoffer_rescind().
 771	 */
 772	void (*chn_rescind_callback)(struct vmbus_channel *channel);
 773
 774	/*
 775	 * The spinlock to protect the structure. It is being used to protect
 776	 * test-and-set access to various attributes of the structure as well
 777	 * as all sc_list operations.
 778	 */
 779	spinlock_t lock;
 780	/*
 781	 * All Sub-channels of a primary channel are linked here.
 782	 */
 783	struct list_head sc_list;
 784	/*
 785	 * Current number of sub-channels.
 786	 */
 787	int num_sc;
 788	/*
 789	 * Number of a sub-channel (position within sc_list) which is supposed
 790	 * to be used as the next outgoing channel.
 791	 */
 792	int next_oc;
 793	/*
 794	 * The primary channel this sub-channel belongs to.
 795	 * This will be NULL for the primary channel.
 796	 */
 797	struct vmbus_channel *primary_channel;
 798	/*
 799	 * Support per-channel state for use by vmbus drivers.
 800	 */
 801	void *per_channel_state;
 
 
 
 
 
 
 
 
 
 
 
 
 802	/*
 803	 * To support per-cpu lookup mapping of relid to channel,
 804	 * link up channels based on their CPU affinity.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805	 */
 806	struct list_head percpu_list;
 
 807	/*
 808	 * Host signaling policy: The default policy will be
 809	 * based on the ring buffer state. We will also support
 810	 * a policy where the client driver can have explicit
 811	 * signaling control.
 812	 */
 813	enum hv_signal_policy  signal_policy;
 
 
 
 
 
 
 
 
 
 
 814	/*
 815	 * On the channel send side, many of the VMBUS
 816	 * device drivers explicity serialize access to the
 817	 * outgoing ring buffer. Give more control to the
 818	 * VMBUS device drivers in terms how to serialize
 819	 * accesss to the outgoing ring buffer.
 820	 * The default behavior will be to aquire the
 821	 * ring lock to preserve the current behavior.
 822	 */
 823	bool acquire_ring_lock;
 
 
 
 
 
 
 824
 
 
 
 
 
 
 825};
 826
 827static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
 828{
 829	c->acquire_ring_lock = state;
 830}
 831
 832static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 833{
 834	return !!(c->offermsg.offer.chn_flags &
 835		  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
 836}
 837
 838static inline void set_channel_signal_state(struct vmbus_channel *c,
 839					    enum hv_signal_policy policy)
 840{
 841	c->signal_policy = policy;
 842}
 843
 844static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
 
 845{
 846	c->batched_reading = state;
 847}
 848
 849static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
 850{
 851	c->per_channel_state = s;
 852}
 853
 854static inline void *get_per_channel_state(struct vmbus_channel *c)
 855{
 856	return c->per_channel_state;
 857}
 858
 859static inline void set_channel_pending_send_size(struct vmbus_channel *c,
 860						 u32 size)
 861{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862	c->outbound.ring_buffer->pending_send_sz = size;
 863}
 864
 865void vmbus_onmessage(void *context);
 
 
 
 
 
 
 
 
 
 
 866
 867int vmbus_request_offers(void);
 868
 869/*
 870 * APIs for managing sub-channels.
 871 */
 872
 873void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
 874			void (*sc_cr_cb)(struct vmbus_channel *new_sc));
 875
 876void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
 877		void (*chn_rescind_cb)(struct vmbus_channel *));
 878
 879/*
 880 * Retrieve the (sub) channel on which to send an outgoing request.
 881 * When a primary channel has multiple sub-channels, we choose a
 882 * channel whose VCPU binding is closest to the VCPU on which
 883 * this call is being made.
 884 */
 885struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
 886
 887/*
 888 * Check if sub-channels have already been offerred. This API will be useful
 889 * when the driver is unloaded after establishing sub-channels. In this case,
 890 * when the driver is re-loaded, the driver would have to check if the
 891 * subchannels have already been established before attempting to request
 892 * the creation of sub-channels.
 893 * This function returns TRUE to indicate that subchannels have already been
 894 * created.
 895 * This function should be invoked after setting the callback function for
 896 * sub-channel creation.
 897 */
 898bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
 899
 900/* The format must be the same as struct vmdata_gpa_direct */
 901struct vmbus_channel_packet_page_buffer {
 902	u16 type;
 903	u16 dataoffset8;
 904	u16 length8;
 905	u16 flags;
 906	u64 transactionid;
 907	u32 reserved;
 908	u32 rangecount;
 909	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
 910} __packed;
 911
 912/* The format must be the same as struct vmdata_gpa_direct */
 913struct vmbus_channel_packet_multipage_buffer {
 914	u16 type;
 915	u16 dataoffset8;
 916	u16 length8;
 917	u16 flags;
 918	u64 transactionid;
 919	u32 reserved;
 920	u32 rangecount;		/* Always 1 in this case */
 921	struct hv_multipage_buffer range;
 922} __packed;
 923
 924/* The format must be the same as struct vmdata_gpa_direct */
 925struct vmbus_packet_mpb_array {
 926	u16 type;
 927	u16 dataoffset8;
 928	u16 length8;
 929	u16 flags;
 930	u64 transactionid;
 931	u32 reserved;
 932	u32 rangecount;         /* Always 1 in this case */
 933	struct hv_mpb_array range;
 934} __packed;
 935
 
 
 
 
 
 
 
 
 936
 937extern int vmbus_open(struct vmbus_channel *channel,
 938			    u32 send_ringbuffersize,
 939			    u32 recv_ringbuffersize,
 940			    void *userdata,
 941			    u32 userdatalen,
 942			    void(*onchannel_callback)(void *context),
 943			    void *context);
 944
 945extern void vmbus_close(struct vmbus_channel *channel);
 946
 947extern int vmbus_sendpacket(struct vmbus_channel *channel,
 948				  void *buffer,
 949				  u32 bufferLen,
 950				  u64 requestid,
 951				  enum vmbus_packet_type type,
 952				  u32 flags);
 953
 954extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
 955				  void *buffer,
 956				  u32 bufferLen,
 957				  u64 requestid,
 958				  enum vmbus_packet_type type,
 959				  u32 flags,
 960				  bool kick_q);
 961
 962extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
 963					    struct hv_page_buffer pagebuffers[],
 964					    u32 pagecount,
 965					    void *buffer,
 966					    u32 bufferlen,
 967					    u64 requestid);
 968
 969extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 970					   struct hv_page_buffer pagebuffers[],
 971					   u32 pagecount,
 972					   void *buffer,
 973					   u32 bufferlen,
 974					   u64 requestid,
 975					   u32 flags,
 976					   bool kick_q);
 977
 978extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 979					struct hv_multipage_buffer *mpb,
 980					void *buffer,
 981					u32 bufferlen,
 982					u64 requestid);
 983
 984extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 985				     struct vmbus_packet_mpb_array *mpb,
 986				     u32 desc_size,
 987				     void *buffer,
 988				     u32 bufferlen,
 989				     u64 requestid);
 990
 991extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
 992				      void *kbuffer,
 993				      u32 size,
 994				      u32 *gpadl_handle);
 995
 996extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
 997				     u32 gpadl_handle);
 998
 
 
 999extern int vmbus_recvpacket(struct vmbus_channel *channel,
1000				  void *buffer,
1001				  u32 bufferlen,
1002				  u32 *buffer_actual_len,
1003				  u64 *requestid);
1004
1005extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1006				     void *buffer,
1007				     u32 bufferlen,
1008				     u32 *buffer_actual_len,
1009				     u64 *requestid);
1010
1011
1012extern void vmbus_ontimer(unsigned long data);
1013
1014/* Base driver object */
1015struct hv_driver {
1016	const char *name;
1017
1018	/*
1019	 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1020	 * channel flag, actually doesn't mean a synthetic device because the
1021	 * offer's if_type/if_instance can change for every new hvsock
1022	 * connection.
1023	 *
1024	 * However, to facilitate the notification of new-offer/rescind-offer
1025	 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1026	 * a special vmbus device, and hence we need the below flag to
1027	 * indicate if the driver is the hvsock driver or not: we need to
1028	 * specially treat the hvosck offer & driver in vmbus_match().
1029	 */
1030	bool hvsock;
1031
1032	/* the device type supported by this driver */
1033	uuid_le dev_type;
1034	const struct hv_vmbus_device_id *id_table;
1035
1036	struct device_driver driver;
1037
 
 
 
 
 
 
1038	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1039	int (*remove)(struct hv_device *);
1040	void (*shutdown)(struct hv_device *);
1041
 
 
 
1042};
1043
1044/* Base device object */
1045struct hv_device {
1046	/* the device type id of this device */
1047	uuid_le dev_type;
1048
1049	/* the device instance id of this device */
1050	uuid_le dev_instance;
1051	u16 vendor_id;
1052	u16 device_id;
1053
1054	struct device device;
 
1055
1056	struct vmbus_channel *channel;
 
 
 
 
 
1057};
1058
1059
1060static inline struct hv_device *device_to_hv_device(struct device *d)
1061{
1062	return container_of(d, struct hv_device, device);
1063}
1064
1065static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1066{
1067	return container_of(d, struct hv_driver, driver);
1068}
1069
1070static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1071{
1072	dev_set_drvdata(&dev->device, data);
1073}
1074
1075static inline void *hv_get_drvdata(struct hv_device *dev)
1076{
1077	return dev_get_drvdata(&dev->device);
1078}
1079
 
 
 
 
 
 
 
 
 
 
 
 
1080/* Vmbus interface */
1081#define vmbus_driver_register(driver)	\
1082	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1083int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1084					 struct module *owner,
1085					 const char *mod_name);
1086void vmbus_driver_unregister(struct hv_driver *hv_driver);
1087
1088void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1089
1090int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1091			resource_size_t min, resource_size_t max,
1092			resource_size_t size, resource_size_t align,
1093			bool fb_overlap_ok);
1094
1095int vmbus_cpu_number_to_vp_number(int cpu_number);
1096u64 hv_do_hypercall(u64 control, void *input, void *output);
1097
1098/*
1099 * GUID definitions of various offer types - services offered to the guest.
1100 */
1101
1102/*
1103 * Network GUID
1104 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1105 */
1106#define HV_NIC_GUID \
1107	.guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1108			0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1109
1110/*
1111 * IDE GUID
1112 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1113 */
1114#define HV_IDE_GUID \
1115	.guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1116			0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1117
1118/*
1119 * SCSI GUID
1120 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1121 */
1122#define HV_SCSI_GUID \
1123	.guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1124			0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1125
1126/*
1127 * Shutdown GUID
1128 * {0e0b6031-5213-4934-818b-38d90ced39db}
1129 */
1130#define HV_SHUTDOWN_GUID \
1131	.guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1132			0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1133
1134/*
1135 * Time Synch GUID
1136 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1137 */
1138#define HV_TS_GUID \
1139	.guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1140			0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1141
1142/*
1143 * Heartbeat GUID
1144 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1145 */
1146#define HV_HEART_BEAT_GUID \
1147	.guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1148			0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1149
1150/*
1151 * KVP GUID
1152 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1153 */
1154#define HV_KVP_GUID \
1155	.guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1156			0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1157
1158/*
1159 * Dynamic memory GUID
1160 * {525074dc-8985-46e2-8057-a307dc18a502}
1161 */
1162#define HV_DM_GUID \
1163	.guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1164			0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1165
1166/*
1167 * Mouse GUID
1168 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1169 */
1170#define HV_MOUSE_GUID \
1171	.guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1172			0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1173
1174/*
1175 * Keyboard GUID
1176 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1177 */
1178#define HV_KBD_GUID \
1179	.guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1180			0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1181
1182/*
1183 * VSS (Backup/Restore) GUID
1184 */
1185#define HV_VSS_GUID \
1186	.guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1187			0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1188/*
1189 * Synthetic Video GUID
1190 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1191 */
1192#define HV_SYNTHVID_GUID \
1193	.guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1194			0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1195
1196/*
1197 * Synthetic FC GUID
1198 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1199 */
1200#define HV_SYNTHFC_GUID \
1201	.guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1202			0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1203
1204/*
1205 * Guest File Copy Service
1206 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1207 */
1208
1209#define HV_FCOPY_GUID \
1210	.guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1211			0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1212
1213/*
1214 * NetworkDirect. This is the guest RDMA service.
1215 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1216 */
1217#define HV_ND_GUID \
1218	.guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1219			0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1220
1221/*
1222 * PCI Express Pass Through
1223 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1224 */
1225
1226#define HV_PCIE_GUID \
1227	.guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1228			0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1229
1230/*
1231 * Common header for Hyper-V ICs
1232 */
1233
1234#define ICMSGTYPE_NEGOTIATE		0
1235#define ICMSGTYPE_HEARTBEAT		1
1236#define ICMSGTYPE_KVPEXCHANGE		2
1237#define ICMSGTYPE_SHUTDOWN		3
1238#define ICMSGTYPE_TIMESYNC		4
1239#define ICMSGTYPE_VSS			5
 
1240
1241#define ICMSGHDRFLAG_TRANSACTION	1
1242#define ICMSGHDRFLAG_REQUEST		2
1243#define ICMSGHDRFLAG_RESPONSE		4
1244
1245
1246/*
1247 * While we want to handle util services as regular devices,
1248 * there is only one instance of each of these services; so
1249 * we statically allocate the service specific state.
1250 */
1251
1252struct hv_util_service {
1253	u8 *recv_buffer;
1254	void *channel;
1255	void (*util_cb)(void *);
1256	int (*util_init)(struct hv_util_service *);
1257	void (*util_deinit)(void);
 
 
1258};
1259
1260struct vmbuspipe_hdr {
1261	u32 flags;
1262	u32 msgsize;
1263} __packed;
1264
1265struct ic_version {
1266	u16 major;
1267	u16 minor;
1268} __packed;
1269
1270struct icmsg_hdr {
1271	struct ic_version icverframe;
1272	u16 icmsgtype;
1273	struct ic_version icvermsg;
1274	u16 icmsgsize;
1275	u32 status;
1276	u8 ictransaction_id;
1277	u8 icflags;
1278	u8 reserved[2];
1279} __packed;
1280
 
 
 
 
 
 
1281struct icmsg_negotiate {
1282	u16 icframe_vercnt;
1283	u16 icmsg_vercnt;
1284	u32 reserved;
1285	struct ic_version icversion_data[1]; /* any size array */
1286} __packed;
1287
1288struct shutdown_msg_data {
1289	u32 reason_code;
1290	u32 timeout_seconds;
1291	u32 flags;
1292	u8  display_message[2048];
1293} __packed;
1294
1295struct heartbeat_msg_data {
1296	u64 seq_num;
1297	u32 reserved[8];
1298} __packed;
1299
1300/* Time Sync IC defs */
1301#define ICTIMESYNCFLAG_PROBE	0
1302#define ICTIMESYNCFLAG_SYNC	1
1303#define ICTIMESYNCFLAG_SAMPLE	2
1304
1305#ifdef __x86_64__
1306#define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
1307#else
1308#define WLTIMEDELTA	116444736000000000LL
1309#endif
1310
1311struct ictimesync_data {
1312	u64 parenttime;
1313	u64 childtime;
1314	u64 roundtriptime;
1315	u8 flags;
1316} __packed;
1317
 
 
 
 
 
 
 
 
 
1318struct hyperv_service_callback {
1319	u8 msg_type;
1320	char *log_msg;
1321	uuid_le data;
1322	struct vmbus_channel *channel;
1323	void (*callback) (void *context);
1324};
1325
1326#define MAX_SRV_VER	0x7ffffff
1327extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1328					struct icmsg_negotiate *, u8 *, int,
1329					int);
 
1330
1331void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1332
 
1333/*
1334 * Negotiated version with the Host.
1335 */
1336
1337extern __u32 vmbus_proto_version;
1338
1339int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1340				  const uuid_le *shv_host_servie_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341#endif /* _HYPERV_H */