Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/mm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/hyperv.h>
  18#include <linux/uio.h>
  19#include <linux/interrupt.h>
  20#include <linux/set_memory.h>
  21#include <asm/page.h>
  22#include <asm/mshyperv.h>
  23
  24#include "hyperv_vmbus.h"
  25
  26/*
  27 * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
  28 *
  29 * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
  30 *
  31 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
  32 * (because of the alignment requirement), however, the hypervisor only
  33 * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
  34 * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
  35 * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
  36 * total size that the guest uses minus twice of the gap size.
  37 */
  38static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
  39{
  40	switch (type) {
  41	case HV_GPADL_BUFFER:
  42		return size;
  43	case HV_GPADL_RING:
  44		/* The size of a ringbuffer must be page-aligned */
  45		BUG_ON(size % PAGE_SIZE);
  46		/*
  47		 * Two things to notice here:
  48		 * 1) We're processing two ring buffers as a unit
  49		 * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
  50		 * the first guest-size page of each of the two ring buffers.
  51		 * So we effectively subtract out two guest-size pages, and add
  52		 * back two Hyper-V size pages.
  53		 */
  54		return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
  55	}
  56	BUG();
  57	return 0;
  58}
  59
  60/*
  61 * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
  62 *                                 HV_HYP_PAGE) in a ring gpadl based on the
  63 *                                 offset in the guest
  64 *
  65 * @offset: the offset (in bytes) where the send ringbuffer starts in the
  66 *               virtual address space of the guest
  67 */
  68static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
  69{
  70
  71	/*
  72	 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
  73	 * header (because of the alignment requirement), however, the
  74	 * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
  75	 * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
  76	 *
  77	 * And to calculate the effective send offset in gpadl, we need to
  78	 * substract this gap.
  79	 */
  80	return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
  81}
  82
  83/*
  84 * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
  85 *                  the gpadl
  86 *
  87 * @type: the type of the gpadl
  88 * @kbuffer: the pointer to the gpadl in the guest
  89 * @size: the total size (in bytes) of the gpadl
  90 * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
  91 *               virtual address space of the guest
  92 * @i: the index
  93 */
  94static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
  95				 u32 size, u32 send_offset, int i)
  96{
  97	int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
  98	unsigned long delta = 0UL;
  99
 100	switch (type) {
 101	case HV_GPADL_BUFFER:
 102		break;
 103	case HV_GPADL_RING:
 104		if (i == 0)
 105			delta = 0;
 106		else if (i <= send_idx)
 107			delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
 108		else
 109			delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
 110		break;
 111	default:
 112		BUG();
 113		break;
 114	}
 115
 116	return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
 117}
 118
 119/*
 120 * vmbus_setevent- Trigger an event notification on the specified
 121 * channel.
 122 */
 123void vmbus_setevent(struct vmbus_channel *channel)
 124{
 125	struct hv_monitor_page *monitorpage;
 126
 127	trace_vmbus_setevent(channel);
 128
 129	/*
 130	 * For channels marked as in "low latency" mode
 131	 * bypass the monitor page mechanism.
 132	 */
 133	if (channel->offermsg.monitor_allocated && !channel->low_latency) {
 134		vmbus_send_interrupt(channel->offermsg.child_relid);
 135
 136		/* Get the child to parent monitor page */
 137		monitorpage = vmbus_connection.monitor_pages[1];
 138
 139		sync_set_bit(channel->monitor_bit,
 140			(unsigned long *)&monitorpage->trigger_group
 141					[channel->monitor_grp].pending);
 142
 143	} else {
 144		vmbus_set_event(channel);
 145	}
 146}
 147EXPORT_SYMBOL_GPL(vmbus_setevent);
 148
 149/* vmbus_free_ring - drop mapping of ring buffer */
 150void vmbus_free_ring(struct vmbus_channel *channel)
 151{
 152	hv_ringbuffer_cleanup(&channel->outbound);
 153	hv_ringbuffer_cleanup(&channel->inbound);
 154
 155	if (channel->ringbuffer_page) {
 156		__free_pages(channel->ringbuffer_page,
 157			     get_order(channel->ringbuffer_pagecount
 158				       << PAGE_SHIFT));
 159		channel->ringbuffer_page = NULL;
 160	}
 161}
 162EXPORT_SYMBOL_GPL(vmbus_free_ring);
 163
 164/* vmbus_alloc_ring - allocate and map pages for ring buffer */
 165int vmbus_alloc_ring(struct vmbus_channel *newchannel,
 166		     u32 send_size, u32 recv_size)
 167{
 168	struct page *page;
 169	int order;
 170
 171	if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
 172		return -EINVAL;
 173
 174	/* Allocate the ring buffer */
 175	order = get_order(send_size + recv_size);
 176	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
 177				GFP_KERNEL|__GFP_ZERO, order);
 178
 179	if (!page)
 180		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
 181
 182	if (!page)
 183		return -ENOMEM;
 184
 185	newchannel->ringbuffer_page = page;
 186	newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
 187	newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
 188
 189	return 0;
 190}
 191EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
 192
 193/* Used for Hyper-V Socket: a guest client's connect() to the host */
 194int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
 195				  const guid_t *shv_host_servie_id)
 196{
 197	struct vmbus_channel_tl_connect_request conn_msg;
 198	int ret;
 199
 200	memset(&conn_msg, 0, sizeof(conn_msg));
 201	conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
 202	conn_msg.guest_endpoint_id = *shv_guest_servie_id;
 203	conn_msg.host_service_id = *shv_host_servie_id;
 204
 205	ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
 206
 207	trace_vmbus_send_tl_connect_request(&conn_msg, ret);
 208
 209	return ret;
 210}
 211EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
 212
 213static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
 214{
 215	struct vmbus_channel_modifychannel msg;
 216	int ret;
 217
 218	memset(&msg, 0, sizeof(msg));
 219	msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
 220	msg.child_relid = channel->offermsg.child_relid;
 221	msg.target_vp = target_vp;
 222
 223	ret = vmbus_post_msg(&msg, sizeof(msg), true);
 224	trace_vmbus_send_modifychannel(&msg, ret);
 225
 226	return ret;
 227}
 228
 229static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
 230{
 231	struct vmbus_channel_modifychannel *msg;
 232	struct vmbus_channel_msginfo *info;
 233	unsigned long flags;
 234	int ret;
 235
 236	info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
 237				sizeof(struct vmbus_channel_modifychannel),
 238		       GFP_KERNEL);
 239	if (!info)
 240		return -ENOMEM;
 241
 242	init_completion(&info->waitevent);
 243	info->waiting_channel = channel;
 244
 245	msg = (struct vmbus_channel_modifychannel *)info->msg;
 246	msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
 247	msg->child_relid = channel->offermsg.child_relid;
 248	msg->target_vp = target_vp;
 249
 250	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 251	list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
 252	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 253
 254	ret = vmbus_post_msg(msg, sizeof(*msg), true);
 255	trace_vmbus_send_modifychannel(msg, ret);
 256	if (ret != 0) {
 257		spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 258		list_del(&info->msglistentry);
 259		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 260		goto free_info;
 261	}
 262
 263	/*
 264	 * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
 265	 * the mutex and be unable to signal the completion.
 266	 *
 267	 * See the caller target_cpu_store() for information about the usage of the
 268	 * mutex.
 269	 */
 270	mutex_unlock(&vmbus_connection.channel_mutex);
 271	wait_for_completion(&info->waitevent);
 272	mutex_lock(&vmbus_connection.channel_mutex);
 273
 274	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 275	list_del(&info->msglistentry);
 276	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 277
 278	if (info->response.modify_response.status)
 279		ret = -EAGAIN;
 280
 281free_info:
 282	kfree(info);
 283	return ret;
 284}
 285
 286/*
 287 * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
 288 *
 289 * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  When VMbus version 5.3
 290 * or later is negotiated, Hyper-V always sends an ACK in response to such a
 291 * message.  For VMbus version 5.2 and earlier, it never sends an ACK.  With-
 292 * out an ACK, we can not know when the host will stop interrupting the "old"
 293 * vCPU and start interrupting the "new" vCPU for the given channel.
 294 *
 295 * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
 296 * VERSION_WIN10_V4_1.
 297 */
 298int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
 299{
 300	if (vmbus_proto_version >= VERSION_WIN10_V5_3)
 301		return send_modifychannel_with_ack(channel, target_vp);
 302	return send_modifychannel_without_ack(channel, target_vp);
 303}
 304EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
 305
 306/*
 307 * create_gpadl_header - Creates a gpadl for the specified buffer
 308 */
 309static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
 310			       u32 size, u32 send_offset,
 311			       struct vmbus_channel_msginfo **msginfo)
 312{
 313	int i;
 314	int pagecount;
 315	struct vmbus_channel_gpadl_header *gpadl_header;
 316	struct vmbus_channel_gpadl_body *gpadl_body;
 317	struct vmbus_channel_msginfo *msgheader;
 318	struct vmbus_channel_msginfo *msgbody = NULL;
 319	u32 msgsize;
 320
 321	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 322
 323	pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
 324
 325	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 326		  sizeof(struct vmbus_channel_gpadl_header) -
 327		  sizeof(struct gpa_range);
 328	pfncount = umin(pagecount, pfnsize / sizeof(u64));
 329
 330	msgsize = sizeof(struct vmbus_channel_msginfo) +
 331		  sizeof(struct vmbus_channel_gpadl_header) +
 332		  sizeof(struct gpa_range) + pfncount * sizeof(u64);
 333	msgheader =  kzalloc(msgsize, GFP_KERNEL);
 334	if (!msgheader)
 335		return -ENOMEM;
 336
 337	INIT_LIST_HEAD(&msgheader->submsglist);
 338	msgheader->msgsize = msgsize;
 339
 340	gpadl_header = (struct vmbus_channel_gpadl_header *)
 341		msgheader->msg;
 342	gpadl_header->rangecount = 1;
 343	gpadl_header->range_buflen = sizeof(struct gpa_range) +
 344				 pagecount * sizeof(u64);
 345	gpadl_header->range[0].byte_offset = 0;
 346	gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
 347	for (i = 0; i < pfncount; i++)
 348		gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
 349			type, kbuffer, size, send_offset, i);
 350	*msginfo = msgheader;
 351
 352	pfnsum = pfncount;
 353	pfnleft = pagecount - pfncount;
 354
 355	/* how many pfns can we fit in a body message */
 356	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 357		  sizeof(struct vmbus_channel_gpadl_body);
 358	pfncount = pfnsize / sizeof(u64);
 359
 360	/*
 361	 * If pfnleft is zero, everything fits in the header and no body
 362	 * messages are needed
 363	 */
 364	while (pfnleft) {
 365		pfncurr = umin(pfncount, pfnleft);
 366		msgsize = sizeof(struct vmbus_channel_msginfo) +
 367			  sizeof(struct vmbus_channel_gpadl_body) +
 368			  pfncurr * sizeof(u64);
 369		msgbody = kzalloc(msgsize, GFP_KERNEL);
 370
 371		if (!msgbody) {
 372			struct vmbus_channel_msginfo *pos = NULL;
 373			struct vmbus_channel_msginfo *tmp = NULL;
 374			/*
 375			 * Free up all the allocated messages.
 376			 */
 377			list_for_each_entry_safe(pos, tmp,
 378				&msgheader->submsglist,
 379				msglistentry) {
 380
 381				list_del(&pos->msglistentry);
 382				kfree(pos);
 383			}
 384			kfree(msgheader);
 385			return -ENOMEM;
 386		}
 387
 388		msgbody->msgsize = msgsize;
 389		gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
 390
 391		/*
 392		 * Gpadl is u32 and we are using a pointer which could
 393		 * be 64-bit
 394		 * This is governed by the guest/host protocol and
 395		 * so the hypervisor guarantees that this is ok.
 396		 */
 397		for (i = 0; i < pfncurr; i++)
 398			gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
 399				kbuffer, size, send_offset, pfnsum + i);
 400
 401		/* add to msg header */
 402		list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
 403		pfnsum += pfncurr;
 404		pfnleft -= pfncurr;
 405	}
 406
 407	return 0;
 408}
 409
 410/*
 411 * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
 412 *
 413 * @channel: a channel
 414 * @type: the type of the corresponding GPADL, only meaningful for the guest.
 415 * @kbuffer: from kmalloc or vmalloc
 416 * @size: page-size multiple
 417 * @send_offset: the offset (in bytes) where the send ring buffer starts,
 418 *              should be 0 for BUFFER type gpadl
 419 * @gpadl_handle: some funky thing
 420 */
 421static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
 422				   enum hv_gpadl_type type, void *kbuffer,
 423				   u32 size, u32 send_offset,
 424				   struct vmbus_gpadl *gpadl)
 425{
 426	struct vmbus_channel_gpadl_header *gpadlmsg;
 427	struct vmbus_channel_gpadl_body *gpadl_body;
 428	struct vmbus_channel_msginfo *msginfo = NULL;
 429	struct vmbus_channel_msginfo *submsginfo, *tmp;
 430	struct list_head *curr;
 431	u32 next_gpadl_handle;
 432	unsigned long flags;
 433	int ret = 0;
 434
 435	next_gpadl_handle =
 436		(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
 437
 438	ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
 439	if (ret)
 440		return ret;
 441
 442	ret = set_memory_decrypted((unsigned long)kbuffer,
 443				   PFN_UP(size));
 444	if (ret) {
 445		dev_warn(&channel->device_obj->device,
 446			 "Failed to set host visibility for new GPADL %d.\n",
 447			 ret);
 448		return ret;
 449	}
 450
 451	init_completion(&msginfo->waitevent);
 452	msginfo->waiting_channel = channel;
 453
 454	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
 455	gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
 456	gpadlmsg->child_relid = channel->offermsg.child_relid;
 457	gpadlmsg->gpadl = next_gpadl_handle;
 458
 459
 460	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 461	list_add_tail(&msginfo->msglistentry,
 462		      &vmbus_connection.chn_msg_list);
 463
 464	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 465
 466	if (channel->rescind) {
 467		ret = -ENODEV;
 468		goto cleanup;
 469	}
 470
 471	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
 472			     sizeof(*msginfo), true);
 473
 474	trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
 475
 476	if (ret != 0)
 477		goto cleanup;
 478
 479	list_for_each(curr, &msginfo->submsglist) {
 480		submsginfo = (struct vmbus_channel_msginfo *)curr;
 481		gpadl_body =
 482			(struct vmbus_channel_gpadl_body *)submsginfo->msg;
 483
 484		gpadl_body->header.msgtype =
 485			CHANNELMSG_GPADL_BODY;
 486		gpadl_body->gpadl = next_gpadl_handle;
 487
 488		ret = vmbus_post_msg(gpadl_body,
 489				     submsginfo->msgsize - sizeof(*submsginfo),
 490				     true);
 491
 492		trace_vmbus_establish_gpadl_body(gpadl_body, ret);
 493
 494		if (ret != 0)
 495			goto cleanup;
 496
 497	}
 498	wait_for_completion(&msginfo->waitevent);
 499
 500	if (msginfo->response.gpadl_created.creation_status != 0) {
 501		pr_err("Failed to establish GPADL: err = 0x%x\n",
 502		       msginfo->response.gpadl_created.creation_status);
 503
 504		ret = -EDQUOT;
 505		goto cleanup;
 506	}
 507
 508	if (channel->rescind) {
 509		ret = -ENODEV;
 510		goto cleanup;
 511	}
 512
 513	/* At this point, we received the gpadl created msg */
 514	gpadl->gpadl_handle = gpadlmsg->gpadl;
 515	gpadl->buffer = kbuffer;
 516	gpadl->size = size;
 517
 518
 519cleanup:
 520	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 521	list_del(&msginfo->msglistentry);
 522	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 523	list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
 524				 msglistentry) {
 525		kfree(submsginfo);
 526	}
 527
 528	kfree(msginfo);
 529
 530	if (ret)
 531		set_memory_encrypted((unsigned long)kbuffer,
 532				     PFN_UP(size));
 533
 534	return ret;
 535}
 536
 537/*
 538 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
 539 *
 540 * @channel: a channel
 541 * @kbuffer: from kmalloc or vmalloc
 542 * @size: page-size multiple
 543 * @gpadl_handle: some funky thing
 544 */
 545int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
 546			  u32 size, struct vmbus_gpadl *gpadl)
 547{
 548	return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
 549				       0U, gpadl);
 550}
 551EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
 552
 553/**
 554 * request_arr_init - Allocates memory for the requestor array. Each slot
 555 * keeps track of the next available slot in the array. Initially, each
 556 * slot points to the next one (as in a Linked List). The last slot
 557 * does not point to anything, so its value is U64_MAX by default.
 558 * @size The size of the array
 559 */
 560static u64 *request_arr_init(u32 size)
 561{
 562	int i;
 563	u64 *req_arr;
 564
 565	req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
 566	if (!req_arr)
 567		return NULL;
 568
 569	for (i = 0; i < size - 1; i++)
 570		req_arr[i] = i + 1;
 571
 572	/* Last slot (no more available slots) */
 573	req_arr[i] = U64_MAX;
 574
 575	return req_arr;
 576}
 577
 578/*
 579 * vmbus_alloc_requestor - Initializes @rqstor's fields.
 580 * Index 0 is the first free slot
 581 * @size: Size of the requestor array
 582 */
 583static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
 584{
 585	u64 *rqst_arr;
 586	unsigned long *bitmap;
 587
 588	rqst_arr = request_arr_init(size);
 589	if (!rqst_arr)
 590		return -ENOMEM;
 591
 592	bitmap = bitmap_zalloc(size, GFP_KERNEL);
 593	if (!bitmap) {
 594		kfree(rqst_arr);
 595		return -ENOMEM;
 596	}
 597
 598	rqstor->req_arr = rqst_arr;
 599	rqstor->req_bitmap = bitmap;
 600	rqstor->size = size;
 601	rqstor->next_request_id = 0;
 602	spin_lock_init(&rqstor->req_lock);
 603
 604	return 0;
 605}
 606
 607/*
 608 * vmbus_free_requestor - Frees memory allocated for @rqstor
 609 * @rqstor: Pointer to the requestor struct
 610 */
 611static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
 612{
 613	kfree(rqstor->req_arr);
 614	bitmap_free(rqstor->req_bitmap);
 615}
 616
 617static int __vmbus_open(struct vmbus_channel *newchannel,
 618		       void *userdata, u32 userdatalen,
 619		       void (*onchannelcallback)(void *context), void *context)
 620{
 621	struct vmbus_channel_open_channel *open_msg;
 622	struct vmbus_channel_msginfo *open_info = NULL;
 623	struct page *page = newchannel->ringbuffer_page;
 624	u32 send_pages, recv_pages;
 625	unsigned long flags;
 626	int err;
 627
 628	if (userdatalen > MAX_USER_DEFINED_BYTES)
 629		return -EINVAL;
 630
 631	send_pages = newchannel->ringbuffer_send_offset;
 632	recv_pages = newchannel->ringbuffer_pagecount - send_pages;
 633
 634	if (newchannel->state != CHANNEL_OPEN_STATE)
 635		return -EINVAL;
 636
 637	/* Create and init requestor */
 638	if (newchannel->rqstor_size) {
 639		if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
 640			return -ENOMEM;
 641	}
 642
 643	newchannel->state = CHANNEL_OPENING_STATE;
 644	newchannel->onchannel_callback = onchannelcallback;
 645	newchannel->channel_callback_context = context;
 646
 647	if (!newchannel->max_pkt_size)
 648		newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
 649
 650	/* Establish the gpadl for the ring buffer */
 651	newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
 652
 653	err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
 654				      page_address(newchannel->ringbuffer_page),
 655				      (send_pages + recv_pages) << PAGE_SHIFT,
 656				      newchannel->ringbuffer_send_offset << PAGE_SHIFT,
 657				      &newchannel->ringbuffer_gpadlhandle);
 658	if (err)
 659		goto error_clean_ring;
 660
 661	err = hv_ringbuffer_init(&newchannel->outbound,
 662				 page, send_pages, 0);
 663	if (err)
 664		goto error_free_gpadl;
 665
 666	err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
 667				 recv_pages, newchannel->max_pkt_size);
 668	if (err)
 669		goto error_free_gpadl;
 670
 671	/* Create and init the channel open message */
 672	open_info = kzalloc(sizeof(*open_info) +
 673			   sizeof(struct vmbus_channel_open_channel),
 674			   GFP_KERNEL);
 675	if (!open_info) {
 676		err = -ENOMEM;
 677		goto error_free_gpadl;
 678	}
 679
 680	init_completion(&open_info->waitevent);
 681	open_info->waiting_channel = newchannel;
 682
 683	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
 684	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
 685	open_msg->openid = newchannel->offermsg.child_relid;
 686	open_msg->child_relid = newchannel->offermsg.child_relid;
 687	open_msg->ringbuffer_gpadlhandle
 688		= newchannel->ringbuffer_gpadlhandle.gpadl_handle;
 689	/*
 690	 * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
 691	 * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
 692	 * here we calculate it into HV_HYP_PAGE.
 693	 */
 694	open_msg->downstream_ringbuffer_pageoffset =
 695		hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
 696	open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
 697
 698	if (userdatalen)
 699		memcpy(open_msg->userdata, userdata, userdatalen);
 700
 701	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 702	list_add_tail(&open_info->msglistentry,
 703		      &vmbus_connection.chn_msg_list);
 704	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 705
 706	if (newchannel->rescind) {
 707		err = -ENODEV;
 708		goto error_clean_msglist;
 709	}
 710
 711	err = vmbus_post_msg(open_msg,
 712			     sizeof(struct vmbus_channel_open_channel), true);
 713
 714	trace_vmbus_open(open_msg, err);
 715
 716	if (err != 0)
 717		goto error_clean_msglist;
 718
 719	wait_for_completion(&open_info->waitevent);
 720
 721	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 722	list_del(&open_info->msglistentry);
 723	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 724
 725	if (newchannel->rescind) {
 726		err = -ENODEV;
 727		goto error_free_info;
 728	}
 729
 730	if (open_info->response.open_result.status) {
 731		err = -EAGAIN;
 732		goto error_free_info;
 733	}
 734
 735	newchannel->state = CHANNEL_OPENED_STATE;
 736	kfree(open_info);
 737	return 0;
 738
 739error_clean_msglist:
 740	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 741	list_del(&open_info->msglistentry);
 742	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 743error_free_info:
 744	kfree(open_info);
 745error_free_gpadl:
 746	vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
 747error_clean_ring:
 748	hv_ringbuffer_cleanup(&newchannel->outbound);
 749	hv_ringbuffer_cleanup(&newchannel->inbound);
 750	vmbus_free_requestor(&newchannel->requestor);
 751	newchannel->state = CHANNEL_OPEN_STATE;
 752	return err;
 753}
 754
 755/*
 756 * vmbus_connect_ring - Open the channel but reuse ring buffer
 757 */
 758int vmbus_connect_ring(struct vmbus_channel *newchannel,
 759		       void (*onchannelcallback)(void *context), void *context)
 760{
 761	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
 762}
 763EXPORT_SYMBOL_GPL(vmbus_connect_ring);
 764
 765/*
 766 * vmbus_open - Open the specified channel.
 767 */
 768int vmbus_open(struct vmbus_channel *newchannel,
 769	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
 770	       void *userdata, u32 userdatalen,
 771	       void (*onchannelcallback)(void *context), void *context)
 772{
 773	int err;
 774
 775	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
 776			       recv_ringbuffer_size);
 777	if (err)
 778		return err;
 779
 780	err = __vmbus_open(newchannel, userdata, userdatalen,
 781			   onchannelcallback, context);
 782	if (err)
 783		vmbus_free_ring(newchannel);
 784
 785	return err;
 786}
 787EXPORT_SYMBOL_GPL(vmbus_open);
 788
 789/*
 790 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
 791 */
 792int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
 793{
 794	struct vmbus_channel_gpadl_teardown *msg;
 795	struct vmbus_channel_msginfo *info;
 796	unsigned long flags;
 797	int ret;
 798
 799	info = kzalloc(sizeof(*info) +
 800		       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
 801	if (!info)
 802		return -ENOMEM;
 803
 804	init_completion(&info->waitevent);
 805	info->waiting_channel = channel;
 806
 807	msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
 808
 809	msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
 810	msg->child_relid = channel->offermsg.child_relid;
 811	msg->gpadl = gpadl->gpadl_handle;
 812
 813	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 814	list_add_tail(&info->msglistentry,
 815		      &vmbus_connection.chn_msg_list);
 816	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 817
 818	if (channel->rescind)
 819		goto post_msg_err;
 820
 821	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
 822			     true);
 823
 824	trace_vmbus_teardown_gpadl(msg, ret);
 825
 826	if (ret)
 827		goto post_msg_err;
 828
 829	wait_for_completion(&info->waitevent);
 830
 831	gpadl->gpadl_handle = 0;
 832
 833post_msg_err:
 834	/*
 835	 * If the channel has been rescinded;
 836	 * we will be awakened by the rescind
 837	 * handler; set the error code to zero so we don't leak memory.
 838	 */
 839	if (channel->rescind)
 840		ret = 0;
 841
 842	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 843	list_del(&info->msglistentry);
 844	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 845
 846	kfree(info);
 847
 848	ret = set_memory_encrypted((unsigned long)gpadl->buffer,
 849				   PFN_UP(gpadl->size));
 850	if (ret)
 851		pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
 852
 853	return ret;
 854}
 855EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
 856
 857void vmbus_reset_channel_cb(struct vmbus_channel *channel)
 858{
 859	unsigned long flags;
 860
 861	/*
 862	 * vmbus_on_event(), running in the per-channel tasklet, can race
 863	 * with vmbus_close_internal() in the case of SMP guest, e.g., when
 864	 * the former is accessing channel->inbound.ring_buffer, the latter
 865	 * could be freeing the ring_buffer pages, so here we must stop it
 866	 * first.
 867	 *
 868	 * vmbus_chan_sched() might call the netvsc driver callback function
 869	 * that ends up scheduling NAPI work that accesses the ring buffer.
 870	 * At this point, we have to ensure that any such work is completed
 871	 * and that the channel ring buffer is no longer being accessed, cf.
 872	 * the calls to napi_disable() in netvsc_device_remove().
 873	 */
 874	tasklet_disable(&channel->callback_event);
 875
 876	/* See the inline comments in vmbus_chan_sched(). */
 877	spin_lock_irqsave(&channel->sched_lock, flags);
 878	channel->onchannel_callback = NULL;
 879	spin_unlock_irqrestore(&channel->sched_lock, flags);
 880
 881	channel->sc_creation_callback = NULL;
 882
 883	/* Re-enable tasklet for use on re-open */
 884	tasklet_enable(&channel->callback_event);
 885}
 886
 887static int vmbus_close_internal(struct vmbus_channel *channel)
 888{
 889	struct vmbus_channel_close_channel *msg;
 890	int ret;
 891
 892	vmbus_reset_channel_cb(channel);
 893
 894	/*
 895	 * In case a device driver's probe() fails (e.g.,
 896	 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
 897	 * rescinded later (e.g., we dynamically disable an Integrated Service
 898	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
 899	 * here we should skip most of the below cleanup work.
 900	 */
 901	if (channel->state != CHANNEL_OPENED_STATE)
 902		return -EINVAL;
 903
 904	channel->state = CHANNEL_OPEN_STATE;
 905
 906	/* Send a closing message */
 907
 908	msg = &channel->close_msg.msg;
 909
 910	msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
 911	msg->child_relid = channel->offermsg.child_relid;
 912
 913	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
 914			     true);
 915
 916	trace_vmbus_close_internal(msg, ret);
 917
 918	if (ret) {
 919		pr_err("Close failed: close post msg return is %d\n", ret);
 920		/*
 921		 * If we failed to post the close msg,
 922		 * it is perhaps better to leak memory.
 923		 */
 924	}
 925
 926	/* Tear down the gpadl for the channel's ring buffer */
 927	else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
 928		ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
 929		if (ret) {
 930			pr_err("Close failed: teardown gpadl return %d\n", ret);
 931			/*
 932			 * If we failed to teardown gpadl,
 933			 * it is perhaps better to leak memory.
 934			 */
 935		}
 936	}
 937
 938	if (!ret)
 939		vmbus_free_requestor(&channel->requestor);
 940
 941	return ret;
 942}
 943
 944/* disconnect ring - close all channels */
 945int vmbus_disconnect_ring(struct vmbus_channel *channel)
 946{
 947	struct vmbus_channel *cur_channel, *tmp;
 948	int ret;
 949
 950	if (channel->primary_channel != NULL)
 951		return -EINVAL;
 952
 953	list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
 954		if (cur_channel->rescind)
 955			wait_for_completion(&cur_channel->rescind_event);
 956
 957		mutex_lock(&vmbus_connection.channel_mutex);
 958		if (vmbus_close_internal(cur_channel) == 0) {
 959			vmbus_free_ring(cur_channel);
 960
 961			if (cur_channel->rescind)
 962				hv_process_channel_removal(cur_channel);
 963		}
 964		mutex_unlock(&vmbus_connection.channel_mutex);
 965	}
 966
 967	/*
 968	 * Now close the primary.
 969	 */
 970	mutex_lock(&vmbus_connection.channel_mutex);
 971	ret = vmbus_close_internal(channel);
 972	mutex_unlock(&vmbus_connection.channel_mutex);
 973
 974	return ret;
 975}
 976EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
 977
 978/*
 979 * vmbus_close - Close the specified channel
 980 */
 981void vmbus_close(struct vmbus_channel *channel)
 982{
 983	if (vmbus_disconnect_ring(channel) == 0)
 984		vmbus_free_ring(channel);
 985}
 986EXPORT_SYMBOL_GPL(vmbus_close);
 987
 988/**
 989 * vmbus_sendpacket_getid() - Send the specified buffer on the given channel
 990 * @channel: Pointer to vmbus_channel structure
 991 * @buffer: Pointer to the buffer you want to send the data from.
 992 * @bufferlen: Maximum size of what the buffer holds.
 993 * @requestid: Identifier of the request
 994 * @trans_id: Identifier of the transaction associated to this request, if
 995 *            the send is successful; undefined, otherwise.
 996 * @type: Type of packet that is being sent e.g. negotiate, time
 997 *	  packet etc.
 998 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
 999 *
1000 * Sends data in @buffer directly to Hyper-V via the vmbus.
1001 * This will send the data unparsed to Hyper-V.
1002 *
1003 * Mainly used by Hyper-V drivers.
1004 */
1005int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer,
1006			   u32 bufferlen, u64 requestid, u64 *trans_id,
1007			   enum vmbus_packet_type type, u32 flags)
1008{
1009	struct vmpacket_descriptor desc;
1010	u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
1011	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1012	struct kvec bufferlist[3];
1013	u64 aligned_data = 0;
1014	int num_vecs = ((bufferlen != 0) ? 3 : 1);
1015
1016
1017	/* Setup the descriptor */
1018	desc.type = type; /* VmbusPacketTypeDataInBand; */
1019	desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
1020	/* in 8-bytes granularity */
1021	desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
1022	desc.len8 = (u16)(packetlen_aligned >> 3);
1023	desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1024
1025	bufferlist[0].iov_base = &desc;
1026	bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
1027	bufferlist[1].iov_base = buffer;
1028	bufferlist[1].iov_len = bufferlen;
1029	bufferlist[2].iov_base = &aligned_data;
1030	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1031
1032	return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id);
1033}
1034EXPORT_SYMBOL(vmbus_sendpacket_getid);
1035
1036/**
1037 * vmbus_sendpacket() - Send the specified buffer on the given channel
1038 * @channel: Pointer to vmbus_channel structure
1039 * @buffer: Pointer to the buffer you want to send the data from.
1040 * @bufferlen: Maximum size of what the buffer holds.
1041 * @requestid: Identifier of the request
1042 * @type: Type of packet that is being sent e.g. negotiate, time
1043 *	  packet etc.
1044 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
1045 *
1046 * Sends data in @buffer directly to Hyper-V via the vmbus.
1047 * This will send the data unparsed to Hyper-V.
1048 *
1049 * Mainly used by Hyper-V drivers.
1050 */
1051int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
1052		     u32 bufferlen, u64 requestid,
1053		     enum vmbus_packet_type type, u32 flags)
1054{
1055	return vmbus_sendpacket_getid(channel, buffer, bufferlen,
1056				      requestid, NULL, type, flags);
1057}
1058EXPORT_SYMBOL(vmbus_sendpacket);
1059
1060/*
1061 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
1062 * packets using a GPADL Direct packet type. This interface allows you
1063 * to control notifying the host. This will be useful for sending
1064 * batched data. Also the sender can control the send flags
1065 * explicitly.
1066 */
1067int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1068				struct hv_page_buffer pagebuffers[],
1069				u32 pagecount, void *buffer, u32 bufferlen,
1070				u64 requestid)
1071{
1072	int i;
1073	struct vmbus_channel_packet_page_buffer desc;
1074	u32 descsize;
1075	u32 packetlen;
1076	u32 packetlen_aligned;
1077	struct kvec bufferlist[3];
1078	u64 aligned_data = 0;
1079
1080	if (pagecount > MAX_PAGE_BUFFER_COUNT)
1081		return -EINVAL;
1082
1083	/*
1084	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
1085	 * largest size we support
1086	 */
1087	descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
1088			  ((MAX_PAGE_BUFFER_COUNT - pagecount) *
1089			  sizeof(struct hv_page_buffer));
1090	packetlen = descsize + bufferlen;
1091	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1092
1093	/* Setup the descriptor */
1094	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
1095	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
1096	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
1097	desc.length8 = (u16)(packetlen_aligned >> 3);
1098	desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1099	desc.reserved = 0;
1100	desc.rangecount = pagecount;
1101
1102	for (i = 0; i < pagecount; i++) {
1103		desc.range[i].len = pagebuffers[i].len;
1104		desc.range[i].offset = pagebuffers[i].offset;
1105		desc.range[i].pfn	 = pagebuffers[i].pfn;
1106	}
1107
1108	bufferlist[0].iov_base = &desc;
1109	bufferlist[0].iov_len = descsize;
1110	bufferlist[1].iov_base = buffer;
1111	bufferlist[1].iov_len = bufferlen;
1112	bufferlist[2].iov_base = &aligned_data;
1113	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1114
1115	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
1116}
1117EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
1118
1119/*
1120 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
1121 * using a GPADL Direct packet type.
1122 * The buffer includes the vmbus descriptor.
1123 */
1124int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1125			      struct vmbus_packet_mpb_array *desc,
1126			      u32 desc_size,
1127			      void *buffer, u32 bufferlen, u64 requestid)
1128{
1129	u32 packetlen;
1130	u32 packetlen_aligned;
1131	struct kvec bufferlist[3];
1132	u64 aligned_data = 0;
1133
1134	packetlen = desc_size + bufferlen;
1135	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1136
1137	/* Setup the descriptor */
1138	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
1139	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
1140	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
1141	desc->length8 = (u16)(packetlen_aligned >> 3);
1142	desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1143	desc->reserved = 0;
1144	desc->rangecount = 1;
1145
1146	bufferlist[0].iov_base = desc;
1147	bufferlist[0].iov_len = desc_size;
1148	bufferlist[1].iov_base = buffer;
1149	bufferlist[1].iov_len = bufferlen;
1150	bufferlist[2].iov_base = &aligned_data;
1151	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1152
1153	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
1154}
1155EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
1156
1157/**
1158 * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
1159 * @channel: Pointer to vmbus_channel structure
1160 * @buffer: Pointer to the buffer you want to receive the data into.
1161 * @bufferlen: Maximum size of what the buffer can hold.
1162 * @buffer_actual_len: The actual size of the data after it was received.
1163 * @requestid: Identifier of the request
1164 * @raw: true means keep the vmpacket_descriptor header in the received data.
1165 *
1166 * Receives directly from the hyper-v vmbus and puts the data it received
1167 * into Buffer. This will receive the data unparsed from hyper-v.
1168 *
1169 * Mainly used by Hyper-V drivers.
1170 */
1171static inline int
1172__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
1173		   u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
1174		   bool raw)
1175{
1176	return hv_ringbuffer_read(channel, buffer, bufferlen,
1177				  buffer_actual_len, requestid, raw);
1178
1179}
1180
1181int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
1182		     u32 bufferlen, u32 *buffer_actual_len,
1183		     u64 *requestid)
1184{
1185	return __vmbus_recvpacket(channel, buffer, bufferlen,
1186				  buffer_actual_len, requestid, false);
1187}
1188EXPORT_SYMBOL(vmbus_recvpacket);
1189
1190/*
1191 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
1192 */
1193int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
1194			      u32 bufferlen, u32 *buffer_actual_len,
1195			      u64 *requestid)
1196{
1197	return __vmbus_recvpacket(channel, buffer, bufferlen,
1198				  buffer_actual_len, requestid, true);
1199}
1200EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
1201
1202/*
1203 * vmbus_next_request_id - Returns a new request id. It is also
1204 * the index at which the guest memory address is stored.
1205 * Uses a spin lock to avoid race conditions.
1206 * @channel: Pointer to the VMbus channel struct
1207 * @rqst_add: Guest memory address to be stored in the array
1208 */
1209u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
1210{
1211	struct vmbus_requestor *rqstor = &channel->requestor;
1212	unsigned long flags;
1213	u64 current_id;
1214
1215	/* Check rqstor has been initialized */
1216	if (!channel->rqstor_size)
1217		return VMBUS_NO_RQSTOR;
1218
1219	lock_requestor(channel, flags);
1220	current_id = rqstor->next_request_id;
1221
1222	/* Requestor array is full */
1223	if (current_id >= rqstor->size) {
1224		unlock_requestor(channel, flags);
1225		return VMBUS_RQST_ERROR;
1226	}
1227
1228	rqstor->next_request_id = rqstor->req_arr[current_id];
1229	rqstor->req_arr[current_id] = rqst_addr;
1230
1231	/* The already held spin lock provides atomicity */
1232	bitmap_set(rqstor->req_bitmap, current_id, 1);
1233
1234	unlock_requestor(channel, flags);
1235
1236	/*
1237	 * Cannot return an ID of 0, which is reserved for an unsolicited
1238	 * message from Hyper-V; Hyper-V does not acknowledge (respond to)
1239	 * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
1240	 * 0 sent by the guest.
1241	 */
1242	return current_id + 1;
1243}
1244EXPORT_SYMBOL_GPL(vmbus_next_request_id);
1245
1246/* As in vmbus_request_addr_match() but without the requestor lock */
1247u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1248			       u64 rqst_addr)
1249{
1250	struct vmbus_requestor *rqstor = &channel->requestor;
1251	u64 req_addr;
1252
1253	/* Check rqstor has been initialized */
1254	if (!channel->rqstor_size)
1255		return VMBUS_NO_RQSTOR;
1256
1257	/* Hyper-V can send an unsolicited message with ID of 0 */
1258	if (!trans_id)
1259		return VMBUS_RQST_ERROR;
1260
1261	/* Data corresponding to trans_id is stored at trans_id - 1 */
1262	trans_id--;
1263
1264	/* Invalid trans_id */
1265	if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap))
1266		return VMBUS_RQST_ERROR;
1267
1268	req_addr = rqstor->req_arr[trans_id];
1269	if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) {
1270		rqstor->req_arr[trans_id] = rqstor->next_request_id;
1271		rqstor->next_request_id = trans_id;
1272
1273		/* The already held spin lock provides atomicity */
1274		bitmap_clear(rqstor->req_bitmap, trans_id, 1);
1275	}
1276
1277	return req_addr;
1278}
1279EXPORT_SYMBOL_GPL(__vmbus_request_addr_match);
1280
1281/*
1282 * vmbus_request_addr_match - Clears/removes @trans_id from the @channel's
1283 * requestor, provided the memory address stored at @trans_id equals @rqst_addr
1284 * (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY).
1285 *
1286 * Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if
1287 * @trans_id is not contained in the requestor.
1288 *
1289 * Acquires and releases the requestor spin lock.
1290 */
1291u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1292			     u64 rqst_addr)
1293{
1294	unsigned long flags;
1295	u64 req_addr;
1296
1297	lock_requestor(channel, flags);
1298	req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr);
1299	unlock_requestor(channel, flags);
1300
1301	return req_addr;
1302}
1303EXPORT_SYMBOL_GPL(vmbus_request_addr_match);
1304
1305/*
1306 * vmbus_request_addr - Returns the memory address stored at @trans_id
1307 * in @rqstor. Uses a spin lock to avoid race conditions.
1308 * @channel: Pointer to the VMbus channel struct
1309 * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
1310 * next request id.
1311 */
1312u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
1313{
1314	return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY);
1315}
1316EXPORT_SYMBOL_GPL(vmbus_request_addr);