Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/mm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/hyperv.h>
  18#include <linux/uio.h>
  19#include <linux/interrupt.h>
  20#include <linux/set_memory.h>
  21#include <asm/page.h>
  22#include <asm/mshyperv.h>
  23
  24#include "hyperv_vmbus.h"
  25
  26/*
  27 * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
  28 *
  29 * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
  30 *
  31 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
  32 * (because of the alignment requirement), however, the hypervisor only
  33 * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
  34 * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
  35 * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
  36 * total size that the guest uses minus twice of the gap size.
  37 */
  38static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
  39{
  40	switch (type) {
  41	case HV_GPADL_BUFFER:
  42		return size;
  43	case HV_GPADL_RING:
  44		/* The size of a ringbuffer must be page-aligned */
  45		BUG_ON(size % PAGE_SIZE);
  46		/*
  47		 * Two things to notice here:
  48		 * 1) We're processing two ring buffers as a unit
  49		 * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
  50		 * the first guest-size page of each of the two ring buffers.
  51		 * So we effectively subtract out two guest-size pages, and add
  52		 * back two Hyper-V size pages.
  53		 */
  54		return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
  55	}
  56	BUG();
  57	return 0;
  58}
  59
  60/*
  61 * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
  62 *                                 HV_HYP_PAGE) in a ring gpadl based on the
  63 *                                 offset in the guest
  64 *
  65 * @offset: the offset (in bytes) where the send ringbuffer starts in the
  66 *               virtual address space of the guest
  67 */
  68static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
  69{
  70
  71	/*
  72	 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
  73	 * header (because of the alignment requirement), however, the
  74	 * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
  75	 * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
  76	 *
  77	 * And to calculate the effective send offset in gpadl, we need to
  78	 * substract this gap.
  79	 */
  80	return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
  81}
  82
  83/*
  84 * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
  85 *                  the gpadl
  86 *
  87 * @type: the type of the gpadl
  88 * @kbuffer: the pointer to the gpadl in the guest
  89 * @size: the total size (in bytes) of the gpadl
  90 * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
  91 *               virtual address space of the guest
  92 * @i: the index
  93 */
  94static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
  95				 u32 size, u32 send_offset, int i)
  96{
  97	int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
  98	unsigned long delta = 0UL;
  99
 100	switch (type) {
 101	case HV_GPADL_BUFFER:
 102		break;
 103	case HV_GPADL_RING:
 104		if (i == 0)
 105			delta = 0;
 106		else if (i <= send_idx)
 107			delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
 108		else
 109			delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
 110		break;
 111	default:
 112		BUG();
 113		break;
 114	}
 115
 116	return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
 117}
 118
 119/*
 120 * vmbus_setevent- Trigger an event notification on the specified
 121 * channel.
 122 */
 123void vmbus_setevent(struct vmbus_channel *channel)
 124{
 125	struct hv_monitor_page *monitorpage;
 126
 127	trace_vmbus_setevent(channel);
 128
 129	/*
 130	 * For channels marked as in "low latency" mode
 131	 * bypass the monitor page mechanism.
 132	 */
 133	if (channel->offermsg.monitor_allocated && !channel->low_latency) {
 134		vmbus_send_interrupt(channel->offermsg.child_relid);
 135
 136		/* Get the child to parent monitor page */
 137		monitorpage = vmbus_connection.monitor_pages[1];
 138
 139		sync_set_bit(channel->monitor_bit,
 140			(unsigned long *)&monitorpage->trigger_group
 141					[channel->monitor_grp].pending);
 142
 143	} else {
 144		vmbus_set_event(channel);
 145	}
 146}
 147EXPORT_SYMBOL_GPL(vmbus_setevent);
 148
 149/* vmbus_free_ring - drop mapping of ring buffer */
 150void vmbus_free_ring(struct vmbus_channel *channel)
 151{
 152	hv_ringbuffer_cleanup(&channel->outbound);
 153	hv_ringbuffer_cleanup(&channel->inbound);
 154
 155	if (channel->ringbuffer_page) {
 156		__free_pages(channel->ringbuffer_page,
 
 
 157			     get_order(channel->ringbuffer_pagecount
 158				       << PAGE_SHIFT));
 159		channel->ringbuffer_page = NULL;
 160	}
 161}
 162EXPORT_SYMBOL_GPL(vmbus_free_ring);
 163
 164/* vmbus_alloc_ring - allocate and map pages for ring buffer */
 165int vmbus_alloc_ring(struct vmbus_channel *newchannel,
 166		     u32 send_size, u32 recv_size)
 167{
 168	struct page *page;
 169	int order;
 170
 171	if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
 172		return -EINVAL;
 173
 174	/* Allocate the ring buffer */
 175	order = get_order(send_size + recv_size);
 176	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
 177				GFP_KERNEL|__GFP_ZERO, order);
 178
 179	if (!page)
 180		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
 181
 182	if (!page)
 183		return -ENOMEM;
 184
 185	newchannel->ringbuffer_page = page;
 186	newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
 187	newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
 188
 189	return 0;
 190}
 191EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
 192
 193/* Used for Hyper-V Socket: a guest client's connect() to the host */
 194int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
 195				  const guid_t *shv_host_servie_id)
 196{
 197	struct vmbus_channel_tl_connect_request conn_msg;
 198	int ret;
 199
 200	memset(&conn_msg, 0, sizeof(conn_msg));
 201	conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
 202	conn_msg.guest_endpoint_id = *shv_guest_servie_id;
 203	conn_msg.host_service_id = *shv_host_servie_id;
 204
 205	ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
 206
 207	trace_vmbus_send_tl_connect_request(&conn_msg, ret);
 208
 209	return ret;
 210}
 211EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
 212
 213static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
 214{
 215	struct vmbus_channel_modifychannel msg;
 216	int ret;
 217
 218	memset(&msg, 0, sizeof(msg));
 219	msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
 220	msg.child_relid = channel->offermsg.child_relid;
 221	msg.target_vp = target_vp;
 222
 223	ret = vmbus_post_msg(&msg, sizeof(msg), true);
 224	trace_vmbus_send_modifychannel(&msg, ret);
 225
 226	return ret;
 227}
 228
 229static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
 230{
 231	struct vmbus_channel_modifychannel *msg;
 232	struct vmbus_channel_msginfo *info;
 233	unsigned long flags;
 234	int ret;
 235
 236	info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
 237				sizeof(struct vmbus_channel_modifychannel),
 238		       GFP_KERNEL);
 239	if (!info)
 240		return -ENOMEM;
 241
 242	init_completion(&info->waitevent);
 243	info->waiting_channel = channel;
 244
 245	msg = (struct vmbus_channel_modifychannel *)info->msg;
 246	msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
 247	msg->child_relid = channel->offermsg.child_relid;
 248	msg->target_vp = target_vp;
 249
 250	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 251	list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
 252	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 253
 254	ret = vmbus_post_msg(msg, sizeof(*msg), true);
 255	trace_vmbus_send_modifychannel(msg, ret);
 256	if (ret != 0) {
 257		spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 258		list_del(&info->msglistentry);
 259		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 260		goto free_info;
 261	}
 262
 263	/*
 264	 * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
 265	 * the mutex and be unable to signal the completion.
 266	 *
 267	 * See the caller target_cpu_store() for information about the usage of the
 268	 * mutex.
 269	 */
 270	mutex_unlock(&vmbus_connection.channel_mutex);
 271	wait_for_completion(&info->waitevent);
 272	mutex_lock(&vmbus_connection.channel_mutex);
 273
 274	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 275	list_del(&info->msglistentry);
 276	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 277
 278	if (info->response.modify_response.status)
 279		ret = -EAGAIN;
 280
 281free_info:
 282	kfree(info);
 283	return ret;
 284}
 285
 286/*
 287 * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
 288 *
 289 * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  When VMbus version 5.3
 290 * or later is negotiated, Hyper-V always sends an ACK in response to such a
 291 * message.  For VMbus version 5.2 and earlier, it never sends an ACK.  With-
 292 * out an ACK, we can not know when the host will stop interrupting the "old"
 293 * vCPU and start interrupting the "new" vCPU for the given channel.
 294 *
 295 * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
 296 * VERSION_WIN10_V4_1.
 297 */
 298int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
 299{
 300	if (vmbus_proto_version >= VERSION_WIN10_V5_3)
 301		return send_modifychannel_with_ack(channel, target_vp);
 302	return send_modifychannel_without_ack(channel, target_vp);
 303}
 304EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
 305
 306/*
 307 * create_gpadl_header - Creates a gpadl for the specified buffer
 308 */
 309static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
 310			       u32 size, u32 send_offset,
 311			       struct vmbus_channel_msginfo **msginfo)
 312{
 313	int i;
 314	int pagecount;
 315	struct vmbus_channel_gpadl_header *gpadl_header;
 316	struct vmbus_channel_gpadl_body *gpadl_body;
 317	struct vmbus_channel_msginfo *msgheader;
 318	struct vmbus_channel_msginfo *msgbody = NULL;
 319	u32 msgsize;
 320
 321	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 322
 323	pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
 324
 325	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 326		  sizeof(struct vmbus_channel_gpadl_header) -
 327		  sizeof(struct gpa_range);
 328	pfncount = umin(pagecount, pfnsize / sizeof(u64));
 329
 330	msgsize = sizeof(struct vmbus_channel_msginfo) +
 331		  sizeof(struct vmbus_channel_gpadl_header) +
 332		  sizeof(struct gpa_range) + pfncount * sizeof(u64);
 333	msgheader =  kzalloc(msgsize, GFP_KERNEL);
 334	if (!msgheader)
 335		return -ENOMEM;
 336
 337	INIT_LIST_HEAD(&msgheader->submsglist);
 338	msgheader->msgsize = msgsize;
 339
 340	gpadl_header = (struct vmbus_channel_gpadl_header *)
 341		msgheader->msg;
 342	gpadl_header->rangecount = 1;
 343	gpadl_header->range_buflen = sizeof(struct gpa_range) +
 344				 pagecount * sizeof(u64);
 345	gpadl_header->range[0].byte_offset = 0;
 346	gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
 347	for (i = 0; i < pfncount; i++)
 348		gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
 349			type, kbuffer, size, send_offset, i);
 350	*msginfo = msgheader;
 351
 352	pfnsum = pfncount;
 353	pfnleft = pagecount - pfncount;
 354
 355	/* how many pfns can we fit in a body message */
 356	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 357		  sizeof(struct vmbus_channel_gpadl_body);
 358	pfncount = pfnsize / sizeof(u64);
 359
 360	/*
 361	 * If pfnleft is zero, everything fits in the header and no body
 362	 * messages are needed
 363	 */
 364	while (pfnleft) {
 365		pfncurr = umin(pfncount, pfnleft);
 366		msgsize = sizeof(struct vmbus_channel_msginfo) +
 367			  sizeof(struct vmbus_channel_gpadl_body) +
 368			  pfncurr * sizeof(u64);
 369		msgbody = kzalloc(msgsize, GFP_KERNEL);
 370
 371		if (!msgbody) {
 372			struct vmbus_channel_msginfo *pos = NULL;
 373			struct vmbus_channel_msginfo *tmp = NULL;
 374			/*
 375			 * Free up all the allocated messages.
 376			 */
 377			list_for_each_entry_safe(pos, tmp,
 378				&msgheader->submsglist,
 379				msglistentry) {
 380
 381				list_del(&pos->msglistentry);
 382				kfree(pos);
 383			}
 384			kfree(msgheader);
 385			return -ENOMEM;
 386		}
 387
 388		msgbody->msgsize = msgsize;
 389		gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
 390
 391		/*
 392		 * Gpadl is u32 and we are using a pointer which could
 393		 * be 64-bit
 394		 * This is governed by the guest/host protocol and
 395		 * so the hypervisor guarantees that this is ok.
 396		 */
 397		for (i = 0; i < pfncurr; i++)
 398			gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
 399				kbuffer, size, send_offset, pfnsum + i);
 400
 401		/* add to msg header */
 402		list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
 403		pfnsum += pfncurr;
 404		pfnleft -= pfncurr;
 405	}
 406
 407	return 0;
 408}
 409
 410/*
 411 * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
 412 *
 413 * @channel: a channel
 414 * @type: the type of the corresponding GPADL, only meaningful for the guest.
 415 * @kbuffer: from kmalloc or vmalloc
 416 * @size: page-size multiple
 417 * @send_offset: the offset (in bytes) where the send ring buffer starts,
 418 *              should be 0 for BUFFER type gpadl
 419 * @gpadl_handle: some funky thing
 420 */
 421static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
 422				   enum hv_gpadl_type type, void *kbuffer,
 423				   u32 size, u32 send_offset,
 424				   struct vmbus_gpadl *gpadl)
 425{
 426	struct vmbus_channel_gpadl_header *gpadlmsg;
 427	struct vmbus_channel_gpadl_body *gpadl_body;
 428	struct vmbus_channel_msginfo *msginfo = NULL;
 429	struct vmbus_channel_msginfo *submsginfo, *tmp;
 430	struct list_head *curr;
 431	u32 next_gpadl_handle;
 432	unsigned long flags;
 433	int ret = 0;
 434
 435	next_gpadl_handle =
 436		(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
 437
 438	ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
 439	if (ret)
 
 440		return ret;
 
 441
 
 
 
 
 
 
 
 442	ret = set_memory_decrypted((unsigned long)kbuffer,
 443				   PFN_UP(size));
 444	if (ret) {
 445		dev_warn(&channel->device_obj->device,
 446			 "Failed to set host visibility for new GPADL %d.\n",
 447			 ret);
 448		return ret;
 449	}
 450
 451	init_completion(&msginfo->waitevent);
 452	msginfo->waiting_channel = channel;
 453
 454	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
 455	gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
 456	gpadlmsg->child_relid = channel->offermsg.child_relid;
 457	gpadlmsg->gpadl = next_gpadl_handle;
 458
 459
 460	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 461	list_add_tail(&msginfo->msglistentry,
 462		      &vmbus_connection.chn_msg_list);
 463
 464	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 465
 466	if (channel->rescind) {
 467		ret = -ENODEV;
 468		goto cleanup;
 469	}
 470
 471	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
 472			     sizeof(*msginfo), true);
 473
 474	trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
 475
 476	if (ret != 0)
 477		goto cleanup;
 478
 479	list_for_each(curr, &msginfo->submsglist) {
 480		submsginfo = (struct vmbus_channel_msginfo *)curr;
 481		gpadl_body =
 482			(struct vmbus_channel_gpadl_body *)submsginfo->msg;
 483
 484		gpadl_body->header.msgtype =
 485			CHANNELMSG_GPADL_BODY;
 486		gpadl_body->gpadl = next_gpadl_handle;
 487
 488		ret = vmbus_post_msg(gpadl_body,
 489				     submsginfo->msgsize - sizeof(*submsginfo),
 490				     true);
 491
 492		trace_vmbus_establish_gpadl_body(gpadl_body, ret);
 493
 494		if (ret != 0)
 495			goto cleanup;
 496
 497	}
 498	wait_for_completion(&msginfo->waitevent);
 499
 500	if (msginfo->response.gpadl_created.creation_status != 0) {
 501		pr_err("Failed to establish GPADL: err = 0x%x\n",
 502		       msginfo->response.gpadl_created.creation_status);
 503
 504		ret = -EDQUOT;
 505		goto cleanup;
 506	}
 507
 508	if (channel->rescind) {
 509		ret = -ENODEV;
 510		goto cleanup;
 511	}
 512
 513	/* At this point, we received the gpadl created msg */
 514	gpadl->gpadl_handle = gpadlmsg->gpadl;
 515	gpadl->buffer = kbuffer;
 516	gpadl->size = size;
 517
 518
 519cleanup:
 520	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 521	list_del(&msginfo->msglistentry);
 522	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 523	list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
 524				 msglistentry) {
 525		kfree(submsginfo);
 526	}
 527
 528	kfree(msginfo);
 529
 530	if (ret)
 531		set_memory_encrypted((unsigned long)kbuffer,
 532				     PFN_UP(size));
 
 
 
 
 
 
 533
 534	return ret;
 535}
 536
 537/*
 538 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
 539 *
 540 * @channel: a channel
 541 * @kbuffer: from kmalloc or vmalloc
 542 * @size: page-size multiple
 543 * @gpadl_handle: some funky thing
 544 */
 545int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
 546			  u32 size, struct vmbus_gpadl *gpadl)
 547{
 548	return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
 549				       0U, gpadl);
 550}
 551EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
 552
 553/**
 554 * request_arr_init - Allocates memory for the requestor array. Each slot
 555 * keeps track of the next available slot in the array. Initially, each
 556 * slot points to the next one (as in a Linked List). The last slot
 557 * does not point to anything, so its value is U64_MAX by default.
 558 * @size The size of the array
 559 */
 560static u64 *request_arr_init(u32 size)
 561{
 562	int i;
 563	u64 *req_arr;
 564
 565	req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
 566	if (!req_arr)
 567		return NULL;
 568
 569	for (i = 0; i < size - 1; i++)
 570		req_arr[i] = i + 1;
 571
 572	/* Last slot (no more available slots) */
 573	req_arr[i] = U64_MAX;
 574
 575	return req_arr;
 576}
 577
 578/*
 579 * vmbus_alloc_requestor - Initializes @rqstor's fields.
 580 * Index 0 is the first free slot
 581 * @size: Size of the requestor array
 582 */
 583static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
 584{
 585	u64 *rqst_arr;
 586	unsigned long *bitmap;
 587
 588	rqst_arr = request_arr_init(size);
 589	if (!rqst_arr)
 590		return -ENOMEM;
 591
 592	bitmap = bitmap_zalloc(size, GFP_KERNEL);
 593	if (!bitmap) {
 594		kfree(rqst_arr);
 595		return -ENOMEM;
 596	}
 597
 598	rqstor->req_arr = rqst_arr;
 599	rqstor->req_bitmap = bitmap;
 600	rqstor->size = size;
 601	rqstor->next_request_id = 0;
 602	spin_lock_init(&rqstor->req_lock);
 603
 604	return 0;
 605}
 606
 607/*
 608 * vmbus_free_requestor - Frees memory allocated for @rqstor
 609 * @rqstor: Pointer to the requestor struct
 610 */
 611static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
 612{
 613	kfree(rqstor->req_arr);
 614	bitmap_free(rqstor->req_bitmap);
 615}
 616
 617static int __vmbus_open(struct vmbus_channel *newchannel,
 618		       void *userdata, u32 userdatalen,
 619		       void (*onchannelcallback)(void *context), void *context)
 620{
 621	struct vmbus_channel_open_channel *open_msg;
 622	struct vmbus_channel_msginfo *open_info = NULL;
 623	struct page *page = newchannel->ringbuffer_page;
 624	u32 send_pages, recv_pages;
 625	unsigned long flags;
 626	int err;
 627
 628	if (userdatalen > MAX_USER_DEFINED_BYTES)
 629		return -EINVAL;
 630
 631	send_pages = newchannel->ringbuffer_send_offset;
 632	recv_pages = newchannel->ringbuffer_pagecount - send_pages;
 633
 634	if (newchannel->state != CHANNEL_OPEN_STATE)
 635		return -EINVAL;
 636
 637	/* Create and init requestor */
 638	if (newchannel->rqstor_size) {
 639		if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
 640			return -ENOMEM;
 641	}
 642
 643	newchannel->state = CHANNEL_OPENING_STATE;
 644	newchannel->onchannel_callback = onchannelcallback;
 645	newchannel->channel_callback_context = context;
 646
 647	if (!newchannel->max_pkt_size)
 648		newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
 649
 650	/* Establish the gpadl for the ring buffer */
 651	newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
 652
 653	err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
 654				      page_address(newchannel->ringbuffer_page),
 655				      (send_pages + recv_pages) << PAGE_SHIFT,
 656				      newchannel->ringbuffer_send_offset << PAGE_SHIFT,
 657				      &newchannel->ringbuffer_gpadlhandle);
 658	if (err)
 659		goto error_clean_ring;
 660
 661	err = hv_ringbuffer_init(&newchannel->outbound,
 662				 page, send_pages, 0);
 663	if (err)
 664		goto error_free_gpadl;
 665
 666	err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
 667				 recv_pages, newchannel->max_pkt_size);
 668	if (err)
 669		goto error_free_gpadl;
 670
 671	/* Create and init the channel open message */
 672	open_info = kzalloc(sizeof(*open_info) +
 673			   sizeof(struct vmbus_channel_open_channel),
 674			   GFP_KERNEL);
 675	if (!open_info) {
 676		err = -ENOMEM;
 677		goto error_free_gpadl;
 678	}
 679
 680	init_completion(&open_info->waitevent);
 681	open_info->waiting_channel = newchannel;
 682
 683	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
 684	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
 685	open_msg->openid = newchannel->offermsg.child_relid;
 686	open_msg->child_relid = newchannel->offermsg.child_relid;
 687	open_msg->ringbuffer_gpadlhandle
 688		= newchannel->ringbuffer_gpadlhandle.gpadl_handle;
 689	/*
 690	 * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
 691	 * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
 692	 * here we calculate it into HV_HYP_PAGE.
 693	 */
 694	open_msg->downstream_ringbuffer_pageoffset =
 695		hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
 696	open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
 697
 698	if (userdatalen)
 699		memcpy(open_msg->userdata, userdata, userdatalen);
 700
 701	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 702	list_add_tail(&open_info->msglistentry,
 703		      &vmbus_connection.chn_msg_list);
 704	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 705
 706	if (newchannel->rescind) {
 707		err = -ENODEV;
 708		goto error_clean_msglist;
 709	}
 710
 711	err = vmbus_post_msg(open_msg,
 712			     sizeof(struct vmbus_channel_open_channel), true);
 713
 714	trace_vmbus_open(open_msg, err);
 715
 716	if (err != 0)
 717		goto error_clean_msglist;
 718
 719	wait_for_completion(&open_info->waitevent);
 720
 721	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 722	list_del(&open_info->msglistentry);
 723	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 724
 725	if (newchannel->rescind) {
 726		err = -ENODEV;
 727		goto error_free_info;
 728	}
 729
 730	if (open_info->response.open_result.status) {
 731		err = -EAGAIN;
 732		goto error_free_info;
 733	}
 734
 735	newchannel->state = CHANNEL_OPENED_STATE;
 736	kfree(open_info);
 737	return 0;
 738
 739error_clean_msglist:
 740	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 741	list_del(&open_info->msglistentry);
 742	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 743error_free_info:
 744	kfree(open_info);
 745error_free_gpadl:
 746	vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
 747error_clean_ring:
 748	hv_ringbuffer_cleanup(&newchannel->outbound);
 749	hv_ringbuffer_cleanup(&newchannel->inbound);
 750	vmbus_free_requestor(&newchannel->requestor);
 751	newchannel->state = CHANNEL_OPEN_STATE;
 752	return err;
 753}
 754
 755/*
 756 * vmbus_connect_ring - Open the channel but reuse ring buffer
 757 */
 758int vmbus_connect_ring(struct vmbus_channel *newchannel,
 759		       void (*onchannelcallback)(void *context), void *context)
 760{
 761	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
 762}
 763EXPORT_SYMBOL_GPL(vmbus_connect_ring);
 764
 765/*
 766 * vmbus_open - Open the specified channel.
 767 */
 768int vmbus_open(struct vmbus_channel *newchannel,
 769	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
 770	       void *userdata, u32 userdatalen,
 771	       void (*onchannelcallback)(void *context), void *context)
 772{
 773	int err;
 774
 775	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
 776			       recv_ringbuffer_size);
 777	if (err)
 778		return err;
 779
 780	err = __vmbus_open(newchannel, userdata, userdatalen,
 781			   onchannelcallback, context);
 782	if (err)
 783		vmbus_free_ring(newchannel);
 784
 785	return err;
 786}
 787EXPORT_SYMBOL_GPL(vmbus_open);
 788
 789/*
 790 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
 791 */
 792int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
 793{
 794	struct vmbus_channel_gpadl_teardown *msg;
 795	struct vmbus_channel_msginfo *info;
 796	unsigned long flags;
 797	int ret;
 798
 799	info = kzalloc(sizeof(*info) +
 800		       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
 801	if (!info)
 802		return -ENOMEM;
 803
 804	init_completion(&info->waitevent);
 805	info->waiting_channel = channel;
 806
 807	msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
 808
 809	msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
 810	msg->child_relid = channel->offermsg.child_relid;
 811	msg->gpadl = gpadl->gpadl_handle;
 812
 813	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 814	list_add_tail(&info->msglistentry,
 815		      &vmbus_connection.chn_msg_list);
 816	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 817
 818	if (channel->rescind)
 819		goto post_msg_err;
 820
 821	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
 822			     true);
 823
 824	trace_vmbus_teardown_gpadl(msg, ret);
 825
 826	if (ret)
 827		goto post_msg_err;
 828
 829	wait_for_completion(&info->waitevent);
 830
 831	gpadl->gpadl_handle = 0;
 832
 833post_msg_err:
 834	/*
 835	 * If the channel has been rescinded;
 836	 * we will be awakened by the rescind
 837	 * handler; set the error code to zero so we don't leak memory.
 838	 */
 839	if (channel->rescind)
 840		ret = 0;
 841
 842	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 843	list_del(&info->msglistentry);
 844	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 845
 846	kfree(info);
 847
 848	ret = set_memory_encrypted((unsigned long)gpadl->buffer,
 849				   PFN_UP(gpadl->size));
 850	if (ret)
 851		pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
 
 
 852
 853	return ret;
 854}
 855EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
 856
 857void vmbus_reset_channel_cb(struct vmbus_channel *channel)
 858{
 859	unsigned long flags;
 860
 861	/*
 862	 * vmbus_on_event(), running in the per-channel tasklet, can race
 863	 * with vmbus_close_internal() in the case of SMP guest, e.g., when
 864	 * the former is accessing channel->inbound.ring_buffer, the latter
 865	 * could be freeing the ring_buffer pages, so here we must stop it
 866	 * first.
 867	 *
 868	 * vmbus_chan_sched() might call the netvsc driver callback function
 869	 * that ends up scheduling NAPI work that accesses the ring buffer.
 870	 * At this point, we have to ensure that any such work is completed
 871	 * and that the channel ring buffer is no longer being accessed, cf.
 872	 * the calls to napi_disable() in netvsc_device_remove().
 873	 */
 874	tasklet_disable(&channel->callback_event);
 875
 876	/* See the inline comments in vmbus_chan_sched(). */
 877	spin_lock_irqsave(&channel->sched_lock, flags);
 878	channel->onchannel_callback = NULL;
 879	spin_unlock_irqrestore(&channel->sched_lock, flags);
 880
 881	channel->sc_creation_callback = NULL;
 882
 883	/* Re-enable tasklet for use on re-open */
 884	tasklet_enable(&channel->callback_event);
 885}
 886
 887static int vmbus_close_internal(struct vmbus_channel *channel)
 888{
 889	struct vmbus_channel_close_channel *msg;
 890	int ret;
 891
 892	vmbus_reset_channel_cb(channel);
 893
 894	/*
 895	 * In case a device driver's probe() fails (e.g.,
 896	 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
 897	 * rescinded later (e.g., we dynamically disable an Integrated Service
 898	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
 899	 * here we should skip most of the below cleanup work.
 900	 */
 901	if (channel->state != CHANNEL_OPENED_STATE)
 902		return -EINVAL;
 903
 904	channel->state = CHANNEL_OPEN_STATE;
 905
 906	/* Send a closing message */
 907
 908	msg = &channel->close_msg.msg;
 909
 910	msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
 911	msg->child_relid = channel->offermsg.child_relid;
 912
 913	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
 914			     true);
 915
 916	trace_vmbus_close_internal(msg, ret);
 917
 918	if (ret) {
 919		pr_err("Close failed: close post msg return is %d\n", ret);
 920		/*
 921		 * If we failed to post the close msg,
 922		 * it is perhaps better to leak memory.
 923		 */
 924	}
 925
 926	/* Tear down the gpadl for the channel's ring buffer */
 927	else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
 928		ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
 929		if (ret) {
 930			pr_err("Close failed: teardown gpadl return %d\n", ret);
 931			/*
 932			 * If we failed to teardown gpadl,
 933			 * it is perhaps better to leak memory.
 934			 */
 935		}
 936	}
 937
 938	if (!ret)
 939		vmbus_free_requestor(&channel->requestor);
 940
 941	return ret;
 942}
 943
 944/* disconnect ring - close all channels */
 945int vmbus_disconnect_ring(struct vmbus_channel *channel)
 946{
 947	struct vmbus_channel *cur_channel, *tmp;
 948	int ret;
 949
 950	if (channel->primary_channel != NULL)
 951		return -EINVAL;
 952
 953	list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
 954		if (cur_channel->rescind)
 955			wait_for_completion(&cur_channel->rescind_event);
 956
 957		mutex_lock(&vmbus_connection.channel_mutex);
 958		if (vmbus_close_internal(cur_channel) == 0) {
 959			vmbus_free_ring(cur_channel);
 960
 961			if (cur_channel->rescind)
 962				hv_process_channel_removal(cur_channel);
 963		}
 964		mutex_unlock(&vmbus_connection.channel_mutex);
 965	}
 966
 967	/*
 968	 * Now close the primary.
 969	 */
 970	mutex_lock(&vmbus_connection.channel_mutex);
 971	ret = vmbus_close_internal(channel);
 972	mutex_unlock(&vmbus_connection.channel_mutex);
 973
 974	return ret;
 975}
 976EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
 977
 978/*
 979 * vmbus_close - Close the specified channel
 980 */
 981void vmbus_close(struct vmbus_channel *channel)
 982{
 983	if (vmbus_disconnect_ring(channel) == 0)
 984		vmbus_free_ring(channel);
 985}
 986EXPORT_SYMBOL_GPL(vmbus_close);
 987
 988/**
 989 * vmbus_sendpacket_getid() - Send the specified buffer on the given channel
 990 * @channel: Pointer to vmbus_channel structure
 991 * @buffer: Pointer to the buffer you want to send the data from.
 992 * @bufferlen: Maximum size of what the buffer holds.
 993 * @requestid: Identifier of the request
 994 * @trans_id: Identifier of the transaction associated to this request, if
 995 *            the send is successful; undefined, otherwise.
 996 * @type: Type of packet that is being sent e.g. negotiate, time
 997 *	  packet etc.
 998 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
 999 *
1000 * Sends data in @buffer directly to Hyper-V via the vmbus.
1001 * This will send the data unparsed to Hyper-V.
1002 *
1003 * Mainly used by Hyper-V drivers.
1004 */
1005int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer,
1006			   u32 bufferlen, u64 requestid, u64 *trans_id,
1007			   enum vmbus_packet_type type, u32 flags)
1008{
1009	struct vmpacket_descriptor desc;
1010	u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
1011	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1012	struct kvec bufferlist[3];
1013	u64 aligned_data = 0;
1014	int num_vecs = ((bufferlen != 0) ? 3 : 1);
1015
1016
1017	/* Setup the descriptor */
1018	desc.type = type; /* VmbusPacketTypeDataInBand; */
1019	desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
1020	/* in 8-bytes granularity */
1021	desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
1022	desc.len8 = (u16)(packetlen_aligned >> 3);
1023	desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1024
1025	bufferlist[0].iov_base = &desc;
1026	bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
1027	bufferlist[1].iov_base = buffer;
1028	bufferlist[1].iov_len = bufferlen;
1029	bufferlist[2].iov_base = &aligned_data;
1030	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1031
1032	return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id);
1033}
1034EXPORT_SYMBOL(vmbus_sendpacket_getid);
1035
1036/**
1037 * vmbus_sendpacket() - Send the specified buffer on the given channel
1038 * @channel: Pointer to vmbus_channel structure
1039 * @buffer: Pointer to the buffer you want to send the data from.
1040 * @bufferlen: Maximum size of what the buffer holds.
1041 * @requestid: Identifier of the request
1042 * @type: Type of packet that is being sent e.g. negotiate, time
1043 *	  packet etc.
1044 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
1045 *
1046 * Sends data in @buffer directly to Hyper-V via the vmbus.
1047 * This will send the data unparsed to Hyper-V.
1048 *
1049 * Mainly used by Hyper-V drivers.
1050 */
1051int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
1052		     u32 bufferlen, u64 requestid,
1053		     enum vmbus_packet_type type, u32 flags)
1054{
1055	return vmbus_sendpacket_getid(channel, buffer, bufferlen,
1056				      requestid, NULL, type, flags);
1057}
1058EXPORT_SYMBOL(vmbus_sendpacket);
1059
1060/*
1061 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
1062 * packets using a GPADL Direct packet type. This interface allows you
1063 * to control notifying the host. This will be useful for sending
1064 * batched data. Also the sender can control the send flags
1065 * explicitly.
1066 */
1067int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1068				struct hv_page_buffer pagebuffers[],
1069				u32 pagecount, void *buffer, u32 bufferlen,
1070				u64 requestid)
1071{
1072	int i;
1073	struct vmbus_channel_packet_page_buffer desc;
1074	u32 descsize;
1075	u32 packetlen;
1076	u32 packetlen_aligned;
1077	struct kvec bufferlist[3];
1078	u64 aligned_data = 0;
1079
1080	if (pagecount > MAX_PAGE_BUFFER_COUNT)
1081		return -EINVAL;
1082
1083	/*
1084	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
1085	 * largest size we support
1086	 */
1087	descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
1088			  ((MAX_PAGE_BUFFER_COUNT - pagecount) *
1089			  sizeof(struct hv_page_buffer));
1090	packetlen = descsize + bufferlen;
1091	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1092
1093	/* Setup the descriptor */
1094	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
1095	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
1096	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
1097	desc.length8 = (u16)(packetlen_aligned >> 3);
1098	desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1099	desc.reserved = 0;
1100	desc.rangecount = pagecount;
1101
1102	for (i = 0; i < pagecount; i++) {
1103		desc.range[i].len = pagebuffers[i].len;
1104		desc.range[i].offset = pagebuffers[i].offset;
1105		desc.range[i].pfn	 = pagebuffers[i].pfn;
1106	}
1107
1108	bufferlist[0].iov_base = &desc;
1109	bufferlist[0].iov_len = descsize;
1110	bufferlist[1].iov_base = buffer;
1111	bufferlist[1].iov_len = bufferlen;
1112	bufferlist[2].iov_base = &aligned_data;
1113	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1114
1115	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
1116}
1117EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
1118
1119/*
1120 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
1121 * using a GPADL Direct packet type.
1122 * The buffer includes the vmbus descriptor.
1123 */
1124int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1125			      struct vmbus_packet_mpb_array *desc,
1126			      u32 desc_size,
1127			      void *buffer, u32 bufferlen, u64 requestid)
1128{
1129	u32 packetlen;
1130	u32 packetlen_aligned;
1131	struct kvec bufferlist[3];
1132	u64 aligned_data = 0;
1133
1134	packetlen = desc_size + bufferlen;
1135	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1136
1137	/* Setup the descriptor */
1138	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
1139	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
1140	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
1141	desc->length8 = (u16)(packetlen_aligned >> 3);
1142	desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1143	desc->reserved = 0;
1144	desc->rangecount = 1;
1145
1146	bufferlist[0].iov_base = desc;
1147	bufferlist[0].iov_len = desc_size;
1148	bufferlist[1].iov_base = buffer;
1149	bufferlist[1].iov_len = bufferlen;
1150	bufferlist[2].iov_base = &aligned_data;
1151	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1152
1153	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
1154}
1155EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
1156
1157/**
1158 * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
1159 * @channel: Pointer to vmbus_channel structure
1160 * @buffer: Pointer to the buffer you want to receive the data into.
1161 * @bufferlen: Maximum size of what the buffer can hold.
1162 * @buffer_actual_len: The actual size of the data after it was received.
1163 * @requestid: Identifier of the request
1164 * @raw: true means keep the vmpacket_descriptor header in the received data.
1165 *
1166 * Receives directly from the hyper-v vmbus and puts the data it received
1167 * into Buffer. This will receive the data unparsed from hyper-v.
1168 *
1169 * Mainly used by Hyper-V drivers.
1170 */
1171static inline int
1172__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
1173		   u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
1174		   bool raw)
1175{
1176	return hv_ringbuffer_read(channel, buffer, bufferlen,
1177				  buffer_actual_len, requestid, raw);
1178
1179}
1180
1181int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
1182		     u32 bufferlen, u32 *buffer_actual_len,
1183		     u64 *requestid)
1184{
1185	return __vmbus_recvpacket(channel, buffer, bufferlen,
1186				  buffer_actual_len, requestid, false);
1187}
1188EXPORT_SYMBOL(vmbus_recvpacket);
1189
1190/*
1191 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
1192 */
1193int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
1194			      u32 bufferlen, u32 *buffer_actual_len,
1195			      u64 *requestid)
1196{
1197	return __vmbus_recvpacket(channel, buffer, bufferlen,
1198				  buffer_actual_len, requestid, true);
1199}
1200EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
1201
1202/*
1203 * vmbus_next_request_id - Returns a new request id. It is also
1204 * the index at which the guest memory address is stored.
1205 * Uses a spin lock to avoid race conditions.
1206 * @channel: Pointer to the VMbus channel struct
1207 * @rqst_add: Guest memory address to be stored in the array
1208 */
1209u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
1210{
1211	struct vmbus_requestor *rqstor = &channel->requestor;
1212	unsigned long flags;
1213	u64 current_id;
1214
1215	/* Check rqstor has been initialized */
1216	if (!channel->rqstor_size)
1217		return VMBUS_NO_RQSTOR;
1218
1219	lock_requestor(channel, flags);
1220	current_id = rqstor->next_request_id;
1221
1222	/* Requestor array is full */
1223	if (current_id >= rqstor->size) {
1224		unlock_requestor(channel, flags);
1225		return VMBUS_RQST_ERROR;
1226	}
1227
1228	rqstor->next_request_id = rqstor->req_arr[current_id];
1229	rqstor->req_arr[current_id] = rqst_addr;
1230
1231	/* The already held spin lock provides atomicity */
1232	bitmap_set(rqstor->req_bitmap, current_id, 1);
1233
1234	unlock_requestor(channel, flags);
1235
1236	/*
1237	 * Cannot return an ID of 0, which is reserved for an unsolicited
1238	 * message from Hyper-V; Hyper-V does not acknowledge (respond to)
1239	 * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
1240	 * 0 sent by the guest.
1241	 */
1242	return current_id + 1;
1243}
1244EXPORT_SYMBOL_GPL(vmbus_next_request_id);
1245
1246/* As in vmbus_request_addr_match() but without the requestor lock */
1247u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1248			       u64 rqst_addr)
1249{
1250	struct vmbus_requestor *rqstor = &channel->requestor;
1251	u64 req_addr;
1252
1253	/* Check rqstor has been initialized */
1254	if (!channel->rqstor_size)
1255		return VMBUS_NO_RQSTOR;
1256
1257	/* Hyper-V can send an unsolicited message with ID of 0 */
1258	if (!trans_id)
1259		return VMBUS_RQST_ERROR;
1260
1261	/* Data corresponding to trans_id is stored at trans_id - 1 */
1262	trans_id--;
1263
1264	/* Invalid trans_id */
1265	if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap))
1266		return VMBUS_RQST_ERROR;
1267
1268	req_addr = rqstor->req_arr[trans_id];
1269	if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) {
1270		rqstor->req_arr[trans_id] = rqstor->next_request_id;
1271		rqstor->next_request_id = trans_id;
1272
1273		/* The already held spin lock provides atomicity */
1274		bitmap_clear(rqstor->req_bitmap, trans_id, 1);
1275	}
1276
1277	return req_addr;
1278}
1279EXPORT_SYMBOL_GPL(__vmbus_request_addr_match);
1280
1281/*
1282 * vmbus_request_addr_match - Clears/removes @trans_id from the @channel's
1283 * requestor, provided the memory address stored at @trans_id equals @rqst_addr
1284 * (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY).
1285 *
1286 * Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if
1287 * @trans_id is not contained in the requestor.
1288 *
1289 * Acquires and releases the requestor spin lock.
1290 */
1291u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1292			     u64 rqst_addr)
1293{
1294	unsigned long flags;
1295	u64 req_addr;
1296
1297	lock_requestor(channel, flags);
1298	req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr);
1299	unlock_requestor(channel, flags);
1300
1301	return req_addr;
1302}
1303EXPORT_SYMBOL_GPL(vmbus_request_addr_match);
1304
1305/*
1306 * vmbus_request_addr - Returns the memory address stored at @trans_id
1307 * in @rqstor. Uses a spin lock to avoid race conditions.
1308 * @channel: Pointer to the VMbus channel struct
1309 * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
1310 * next request id.
1311 */
1312u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
1313{
1314	return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY);
1315}
1316EXPORT_SYMBOL_GPL(vmbus_request_addr);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/mm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/hyperv.h>
  18#include <linux/uio.h>
  19#include <linux/interrupt.h>
  20#include <linux/set_memory.h>
  21#include <asm/page.h>
  22#include <asm/mshyperv.h>
  23
  24#include "hyperv_vmbus.h"
  25
  26/*
  27 * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
  28 *
  29 * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
  30 *
  31 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
  32 * (because of the alignment requirement), however, the hypervisor only
  33 * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
  34 * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
  35 * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
  36 * total size that the guest uses minus twice of the gap size.
  37 */
  38static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
  39{
  40	switch (type) {
  41	case HV_GPADL_BUFFER:
  42		return size;
  43	case HV_GPADL_RING:
  44		/* The size of a ringbuffer must be page-aligned */
  45		BUG_ON(size % PAGE_SIZE);
  46		/*
  47		 * Two things to notice here:
  48		 * 1) We're processing two ring buffers as a unit
  49		 * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
  50		 * the first guest-size page of each of the two ring buffers.
  51		 * So we effectively subtract out two guest-size pages, and add
  52		 * back two Hyper-V size pages.
  53		 */
  54		return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
  55	}
  56	BUG();
  57	return 0;
  58}
  59
  60/*
  61 * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
  62 *                                 HV_HYP_PAGE) in a ring gpadl based on the
  63 *                                 offset in the guest
  64 *
  65 * @offset: the offset (in bytes) where the send ringbuffer starts in the
  66 *               virtual address space of the guest
  67 */
  68static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
  69{
  70
  71	/*
  72	 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
  73	 * header (because of the alignment requirement), however, the
  74	 * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
  75	 * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
  76	 *
  77	 * And to calculate the effective send offset in gpadl, we need to
  78	 * substract this gap.
  79	 */
  80	return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
  81}
  82
  83/*
  84 * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
  85 *                  the gpadl
  86 *
  87 * @type: the type of the gpadl
  88 * @kbuffer: the pointer to the gpadl in the guest
  89 * @size: the total size (in bytes) of the gpadl
  90 * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
  91 *               virtual address space of the guest
  92 * @i: the index
  93 */
  94static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
  95				 u32 size, u32 send_offset, int i)
  96{
  97	int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
  98	unsigned long delta = 0UL;
  99
 100	switch (type) {
 101	case HV_GPADL_BUFFER:
 102		break;
 103	case HV_GPADL_RING:
 104		if (i == 0)
 105			delta = 0;
 106		else if (i <= send_idx)
 107			delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
 108		else
 109			delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
 110		break;
 111	default:
 112		BUG();
 113		break;
 114	}
 115
 116	return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
 117}
 118
 119/*
 120 * vmbus_setevent- Trigger an event notification on the specified
 121 * channel.
 122 */
 123void vmbus_setevent(struct vmbus_channel *channel)
 124{
 125	struct hv_monitor_page *monitorpage;
 126
 127	trace_vmbus_setevent(channel);
 128
 129	/*
 130	 * For channels marked as in "low latency" mode
 131	 * bypass the monitor page mechanism.
 132	 */
 133	if (channel->offermsg.monitor_allocated && !channel->low_latency) {
 134		vmbus_send_interrupt(channel->offermsg.child_relid);
 135
 136		/* Get the child to parent monitor page */
 137		monitorpage = vmbus_connection.monitor_pages[1];
 138
 139		sync_set_bit(channel->monitor_bit,
 140			(unsigned long *)&monitorpage->trigger_group
 141					[channel->monitor_grp].pending);
 142
 143	} else {
 144		vmbus_set_event(channel);
 145	}
 146}
 147EXPORT_SYMBOL_GPL(vmbus_setevent);
 148
 149/* vmbus_free_ring - drop mapping of ring buffer */
 150void vmbus_free_ring(struct vmbus_channel *channel)
 151{
 152	hv_ringbuffer_cleanup(&channel->outbound);
 153	hv_ringbuffer_cleanup(&channel->inbound);
 154
 155	if (channel->ringbuffer_page) {
 156		/* In a CoCo VM leak the memory if it didn't get re-encrypted */
 157		if (!channel->ringbuffer_gpadlhandle.decrypted)
 158			__free_pages(channel->ringbuffer_page,
 159			     get_order(channel->ringbuffer_pagecount
 160				       << PAGE_SHIFT));
 161		channel->ringbuffer_page = NULL;
 162	}
 163}
 164EXPORT_SYMBOL_GPL(vmbus_free_ring);
 165
 166/* vmbus_alloc_ring - allocate and map pages for ring buffer */
 167int vmbus_alloc_ring(struct vmbus_channel *newchannel,
 168		     u32 send_size, u32 recv_size)
 169{
 170	struct page *page;
 171	int order;
 172
 173	if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
 174		return -EINVAL;
 175
 176	/* Allocate the ring buffer */
 177	order = get_order(send_size + recv_size);
 178	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
 179				GFP_KERNEL|__GFP_ZERO, order);
 180
 181	if (!page)
 182		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
 183
 184	if (!page)
 185		return -ENOMEM;
 186
 187	newchannel->ringbuffer_page = page;
 188	newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
 189	newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
 190
 191	return 0;
 192}
 193EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
 194
 195/* Used for Hyper-V Socket: a guest client's connect() to the host */
 196int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
 197				  const guid_t *shv_host_servie_id)
 198{
 199	struct vmbus_channel_tl_connect_request conn_msg;
 200	int ret;
 201
 202	memset(&conn_msg, 0, sizeof(conn_msg));
 203	conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
 204	conn_msg.guest_endpoint_id = *shv_guest_servie_id;
 205	conn_msg.host_service_id = *shv_host_servie_id;
 206
 207	ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
 208
 209	trace_vmbus_send_tl_connect_request(&conn_msg, ret);
 210
 211	return ret;
 212}
 213EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
 214
 215static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
 216{
 217	struct vmbus_channel_modifychannel msg;
 218	int ret;
 219
 220	memset(&msg, 0, sizeof(msg));
 221	msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
 222	msg.child_relid = channel->offermsg.child_relid;
 223	msg.target_vp = target_vp;
 224
 225	ret = vmbus_post_msg(&msg, sizeof(msg), true);
 226	trace_vmbus_send_modifychannel(&msg, ret);
 227
 228	return ret;
 229}
 230
 231static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
 232{
 233	struct vmbus_channel_modifychannel *msg;
 234	struct vmbus_channel_msginfo *info;
 235	unsigned long flags;
 236	int ret;
 237
 238	info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
 239				sizeof(struct vmbus_channel_modifychannel),
 240		       GFP_KERNEL);
 241	if (!info)
 242		return -ENOMEM;
 243
 244	init_completion(&info->waitevent);
 245	info->waiting_channel = channel;
 246
 247	msg = (struct vmbus_channel_modifychannel *)info->msg;
 248	msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
 249	msg->child_relid = channel->offermsg.child_relid;
 250	msg->target_vp = target_vp;
 251
 252	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 253	list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
 254	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 255
 256	ret = vmbus_post_msg(msg, sizeof(*msg), true);
 257	trace_vmbus_send_modifychannel(msg, ret);
 258	if (ret != 0) {
 259		spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 260		list_del(&info->msglistentry);
 261		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 262		goto free_info;
 263	}
 264
 265	/*
 266	 * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
 267	 * the mutex and be unable to signal the completion.
 268	 *
 269	 * See the caller target_cpu_store() for information about the usage of the
 270	 * mutex.
 271	 */
 272	mutex_unlock(&vmbus_connection.channel_mutex);
 273	wait_for_completion(&info->waitevent);
 274	mutex_lock(&vmbus_connection.channel_mutex);
 275
 276	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 277	list_del(&info->msglistentry);
 278	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 279
 280	if (info->response.modify_response.status)
 281		ret = -EAGAIN;
 282
 283free_info:
 284	kfree(info);
 285	return ret;
 286}
 287
 288/*
 289 * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
 290 *
 291 * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  When VMbus version 5.3
 292 * or later is negotiated, Hyper-V always sends an ACK in response to such a
 293 * message.  For VMbus version 5.2 and earlier, it never sends an ACK.  With-
 294 * out an ACK, we can not know when the host will stop interrupting the "old"
 295 * vCPU and start interrupting the "new" vCPU for the given channel.
 296 *
 297 * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
 298 * VERSION_WIN10_V4_1.
 299 */
 300int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
 301{
 302	if (vmbus_proto_version >= VERSION_WIN10_V5_3)
 303		return send_modifychannel_with_ack(channel, target_vp);
 304	return send_modifychannel_without_ack(channel, target_vp);
 305}
 306EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
 307
 308/*
 309 * create_gpadl_header - Creates a gpadl for the specified buffer
 310 */
 311static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
 312			       u32 size, u32 send_offset,
 313			       struct vmbus_channel_msginfo **msginfo)
 314{
 315	int i;
 316	int pagecount;
 317	struct vmbus_channel_gpadl_header *gpadl_header;
 318	struct vmbus_channel_gpadl_body *gpadl_body;
 319	struct vmbus_channel_msginfo *msgheader;
 320	struct vmbus_channel_msginfo *msgbody = NULL;
 321	u32 msgsize;
 322
 323	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 324
 325	pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
 326
 327	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 328		  sizeof(struct vmbus_channel_gpadl_header) -
 329		  sizeof(struct gpa_range);
 330	pfncount = umin(pagecount, pfnsize / sizeof(u64));
 331
 332	msgsize = sizeof(struct vmbus_channel_msginfo) +
 333		  sizeof(struct vmbus_channel_gpadl_header) +
 334		  sizeof(struct gpa_range) + pfncount * sizeof(u64);
 335	msgheader =  kzalloc(msgsize, GFP_KERNEL);
 336	if (!msgheader)
 337		return -ENOMEM;
 338
 339	INIT_LIST_HEAD(&msgheader->submsglist);
 340	msgheader->msgsize = msgsize;
 341
 342	gpadl_header = (struct vmbus_channel_gpadl_header *)
 343		msgheader->msg;
 344	gpadl_header->rangecount = 1;
 345	gpadl_header->range_buflen = sizeof(struct gpa_range) +
 346				 pagecount * sizeof(u64);
 347	gpadl_header->range[0].byte_offset = 0;
 348	gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
 349	for (i = 0; i < pfncount; i++)
 350		gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
 351			type, kbuffer, size, send_offset, i);
 352	*msginfo = msgheader;
 353
 354	pfnsum = pfncount;
 355	pfnleft = pagecount - pfncount;
 356
 357	/* how many pfns can we fit in a body message */
 358	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
 359		  sizeof(struct vmbus_channel_gpadl_body);
 360	pfncount = pfnsize / sizeof(u64);
 361
 362	/*
 363	 * If pfnleft is zero, everything fits in the header and no body
 364	 * messages are needed
 365	 */
 366	while (pfnleft) {
 367		pfncurr = umin(pfncount, pfnleft);
 368		msgsize = sizeof(struct vmbus_channel_msginfo) +
 369			  sizeof(struct vmbus_channel_gpadl_body) +
 370			  pfncurr * sizeof(u64);
 371		msgbody = kzalloc(msgsize, GFP_KERNEL);
 372
 373		if (!msgbody) {
 374			struct vmbus_channel_msginfo *pos = NULL;
 375			struct vmbus_channel_msginfo *tmp = NULL;
 376			/*
 377			 * Free up all the allocated messages.
 378			 */
 379			list_for_each_entry_safe(pos, tmp,
 380				&msgheader->submsglist,
 381				msglistentry) {
 382
 383				list_del(&pos->msglistentry);
 384				kfree(pos);
 385			}
 386			kfree(msgheader);
 387			return -ENOMEM;
 388		}
 389
 390		msgbody->msgsize = msgsize;
 391		gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
 392
 393		/*
 394		 * Gpadl is u32 and we are using a pointer which could
 395		 * be 64-bit
 396		 * This is governed by the guest/host protocol and
 397		 * so the hypervisor guarantees that this is ok.
 398		 */
 399		for (i = 0; i < pfncurr; i++)
 400			gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
 401				kbuffer, size, send_offset, pfnsum + i);
 402
 403		/* add to msg header */
 404		list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
 405		pfnsum += pfncurr;
 406		pfnleft -= pfncurr;
 407	}
 408
 409	return 0;
 410}
 411
 412/*
 413 * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
 414 *
 415 * @channel: a channel
 416 * @type: the type of the corresponding GPADL, only meaningful for the guest.
 417 * @kbuffer: from kmalloc or vmalloc
 418 * @size: page-size multiple
 419 * @send_offset: the offset (in bytes) where the send ring buffer starts,
 420 *              should be 0 for BUFFER type gpadl
 421 * @gpadl_handle: some funky thing
 422 */
 423static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
 424				   enum hv_gpadl_type type, void *kbuffer,
 425				   u32 size, u32 send_offset,
 426				   struct vmbus_gpadl *gpadl)
 427{
 428	struct vmbus_channel_gpadl_header *gpadlmsg;
 429	struct vmbus_channel_gpadl_body *gpadl_body;
 430	struct vmbus_channel_msginfo *msginfo = NULL;
 431	struct vmbus_channel_msginfo *submsginfo, *tmp;
 432	struct list_head *curr;
 433	u32 next_gpadl_handle;
 434	unsigned long flags;
 435	int ret = 0;
 436
 437	next_gpadl_handle =
 438		(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
 439
 440	ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
 441	if (ret) {
 442		gpadl->decrypted = false;
 443		return ret;
 444	}
 445
 446	/*
 447	 * Set the "decrypted" flag to true for the set_memory_decrypted()
 448	 * success case. In the failure case, the encryption state of the
 449	 * memory is unknown. Leave "decrypted" as true to ensure the
 450	 * memory will be leaked instead of going back on the free list.
 451	 */
 452	gpadl->decrypted = true;
 453	ret = set_memory_decrypted((unsigned long)kbuffer,
 454				   PFN_UP(size));
 455	if (ret) {
 456		dev_warn(&channel->device_obj->device,
 457			 "Failed to set host visibility for new GPADL %d.\n",
 458			 ret);
 459		return ret;
 460	}
 461
 462	init_completion(&msginfo->waitevent);
 463	msginfo->waiting_channel = channel;
 464
 465	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
 466	gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
 467	gpadlmsg->child_relid = channel->offermsg.child_relid;
 468	gpadlmsg->gpadl = next_gpadl_handle;
 469
 470
 471	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 472	list_add_tail(&msginfo->msglistentry,
 473		      &vmbus_connection.chn_msg_list);
 474
 475	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 476
 477	if (channel->rescind) {
 478		ret = -ENODEV;
 479		goto cleanup;
 480	}
 481
 482	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
 483			     sizeof(*msginfo), true);
 484
 485	trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
 486
 487	if (ret != 0)
 488		goto cleanup;
 489
 490	list_for_each(curr, &msginfo->submsglist) {
 491		submsginfo = (struct vmbus_channel_msginfo *)curr;
 492		gpadl_body =
 493			(struct vmbus_channel_gpadl_body *)submsginfo->msg;
 494
 495		gpadl_body->header.msgtype =
 496			CHANNELMSG_GPADL_BODY;
 497		gpadl_body->gpadl = next_gpadl_handle;
 498
 499		ret = vmbus_post_msg(gpadl_body,
 500				     submsginfo->msgsize - sizeof(*submsginfo),
 501				     true);
 502
 503		trace_vmbus_establish_gpadl_body(gpadl_body, ret);
 504
 505		if (ret != 0)
 506			goto cleanup;
 507
 508	}
 509	wait_for_completion(&msginfo->waitevent);
 510
 511	if (msginfo->response.gpadl_created.creation_status != 0) {
 512		pr_err("Failed to establish GPADL: err = 0x%x\n",
 513		       msginfo->response.gpadl_created.creation_status);
 514
 515		ret = -EDQUOT;
 516		goto cleanup;
 517	}
 518
 519	if (channel->rescind) {
 520		ret = -ENODEV;
 521		goto cleanup;
 522	}
 523
 524	/* At this point, we received the gpadl created msg */
 525	gpadl->gpadl_handle = gpadlmsg->gpadl;
 526	gpadl->buffer = kbuffer;
 527	gpadl->size = size;
 528
 529
 530cleanup:
 531	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 532	list_del(&msginfo->msglistentry);
 533	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 534	list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
 535				 msglistentry) {
 536		kfree(submsginfo);
 537	}
 538
 539	kfree(msginfo);
 540
 541	if (ret) {
 542		/*
 543		 * If set_memory_encrypted() fails, the decrypted flag is
 544		 * left as true so the memory is leaked instead of being
 545		 * put back on the free list.
 546		 */
 547		if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
 548			gpadl->decrypted = false;
 549	}
 550
 551	return ret;
 552}
 553
 554/*
 555 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
 556 *
 557 * @channel: a channel
 558 * @kbuffer: from kmalloc or vmalloc
 559 * @size: page-size multiple
 560 * @gpadl_handle: some funky thing
 561 */
 562int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
 563			  u32 size, struct vmbus_gpadl *gpadl)
 564{
 565	return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
 566				       0U, gpadl);
 567}
 568EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
 569
 570/**
 571 * request_arr_init - Allocates memory for the requestor array. Each slot
 572 * keeps track of the next available slot in the array. Initially, each
 573 * slot points to the next one (as in a Linked List). The last slot
 574 * does not point to anything, so its value is U64_MAX by default.
 575 * @size The size of the array
 576 */
 577static u64 *request_arr_init(u32 size)
 578{
 579	int i;
 580	u64 *req_arr;
 581
 582	req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
 583	if (!req_arr)
 584		return NULL;
 585
 586	for (i = 0; i < size - 1; i++)
 587		req_arr[i] = i + 1;
 588
 589	/* Last slot (no more available slots) */
 590	req_arr[i] = U64_MAX;
 591
 592	return req_arr;
 593}
 594
 595/*
 596 * vmbus_alloc_requestor - Initializes @rqstor's fields.
 597 * Index 0 is the first free slot
 598 * @size: Size of the requestor array
 599 */
 600static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
 601{
 602	u64 *rqst_arr;
 603	unsigned long *bitmap;
 604
 605	rqst_arr = request_arr_init(size);
 606	if (!rqst_arr)
 607		return -ENOMEM;
 608
 609	bitmap = bitmap_zalloc(size, GFP_KERNEL);
 610	if (!bitmap) {
 611		kfree(rqst_arr);
 612		return -ENOMEM;
 613	}
 614
 615	rqstor->req_arr = rqst_arr;
 616	rqstor->req_bitmap = bitmap;
 617	rqstor->size = size;
 618	rqstor->next_request_id = 0;
 619	spin_lock_init(&rqstor->req_lock);
 620
 621	return 0;
 622}
 623
 624/*
 625 * vmbus_free_requestor - Frees memory allocated for @rqstor
 626 * @rqstor: Pointer to the requestor struct
 627 */
 628static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
 629{
 630	kfree(rqstor->req_arr);
 631	bitmap_free(rqstor->req_bitmap);
 632}
 633
 634static int __vmbus_open(struct vmbus_channel *newchannel,
 635		       void *userdata, u32 userdatalen,
 636		       void (*onchannelcallback)(void *context), void *context)
 637{
 638	struct vmbus_channel_open_channel *open_msg;
 639	struct vmbus_channel_msginfo *open_info = NULL;
 640	struct page *page = newchannel->ringbuffer_page;
 641	u32 send_pages, recv_pages;
 642	unsigned long flags;
 643	int err;
 644
 645	if (userdatalen > MAX_USER_DEFINED_BYTES)
 646		return -EINVAL;
 647
 648	send_pages = newchannel->ringbuffer_send_offset;
 649	recv_pages = newchannel->ringbuffer_pagecount - send_pages;
 650
 651	if (newchannel->state != CHANNEL_OPEN_STATE)
 652		return -EINVAL;
 653
 654	/* Create and init requestor */
 655	if (newchannel->rqstor_size) {
 656		if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
 657			return -ENOMEM;
 658	}
 659
 660	newchannel->state = CHANNEL_OPENING_STATE;
 661	newchannel->onchannel_callback = onchannelcallback;
 662	newchannel->channel_callback_context = context;
 663
 664	if (!newchannel->max_pkt_size)
 665		newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
 666
 667	/* Establish the gpadl for the ring buffer */
 668	newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
 669
 670	err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
 671				      page_address(newchannel->ringbuffer_page),
 672				      (send_pages + recv_pages) << PAGE_SHIFT,
 673				      newchannel->ringbuffer_send_offset << PAGE_SHIFT,
 674				      &newchannel->ringbuffer_gpadlhandle);
 675	if (err)
 676		goto error_clean_ring;
 677
 678	err = hv_ringbuffer_init(&newchannel->outbound,
 679				 page, send_pages, 0);
 680	if (err)
 681		goto error_free_gpadl;
 682
 683	err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
 684				 recv_pages, newchannel->max_pkt_size);
 685	if (err)
 686		goto error_free_gpadl;
 687
 688	/* Create and init the channel open message */
 689	open_info = kzalloc(sizeof(*open_info) +
 690			   sizeof(struct vmbus_channel_open_channel),
 691			   GFP_KERNEL);
 692	if (!open_info) {
 693		err = -ENOMEM;
 694		goto error_free_gpadl;
 695	}
 696
 697	init_completion(&open_info->waitevent);
 698	open_info->waiting_channel = newchannel;
 699
 700	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
 701	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
 702	open_msg->openid = newchannel->offermsg.child_relid;
 703	open_msg->child_relid = newchannel->offermsg.child_relid;
 704	open_msg->ringbuffer_gpadlhandle
 705		= newchannel->ringbuffer_gpadlhandle.gpadl_handle;
 706	/*
 707	 * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
 708	 * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
 709	 * here we calculate it into HV_HYP_PAGE.
 710	 */
 711	open_msg->downstream_ringbuffer_pageoffset =
 712		hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
 713	open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
 714
 715	if (userdatalen)
 716		memcpy(open_msg->userdata, userdata, userdatalen);
 717
 718	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 719	list_add_tail(&open_info->msglistentry,
 720		      &vmbus_connection.chn_msg_list);
 721	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 722
 723	if (newchannel->rescind) {
 724		err = -ENODEV;
 725		goto error_clean_msglist;
 726	}
 727
 728	err = vmbus_post_msg(open_msg,
 729			     sizeof(struct vmbus_channel_open_channel), true);
 730
 731	trace_vmbus_open(open_msg, err);
 732
 733	if (err != 0)
 734		goto error_clean_msglist;
 735
 736	wait_for_completion(&open_info->waitevent);
 737
 738	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 739	list_del(&open_info->msglistentry);
 740	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 741
 742	if (newchannel->rescind) {
 743		err = -ENODEV;
 744		goto error_free_info;
 745	}
 746
 747	if (open_info->response.open_result.status) {
 748		err = -EAGAIN;
 749		goto error_free_info;
 750	}
 751
 752	newchannel->state = CHANNEL_OPENED_STATE;
 753	kfree(open_info);
 754	return 0;
 755
 756error_clean_msglist:
 757	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 758	list_del(&open_info->msglistentry);
 759	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 760error_free_info:
 761	kfree(open_info);
 762error_free_gpadl:
 763	vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
 764error_clean_ring:
 765	hv_ringbuffer_cleanup(&newchannel->outbound);
 766	hv_ringbuffer_cleanup(&newchannel->inbound);
 767	vmbus_free_requestor(&newchannel->requestor);
 768	newchannel->state = CHANNEL_OPEN_STATE;
 769	return err;
 770}
 771
 772/*
 773 * vmbus_connect_ring - Open the channel but reuse ring buffer
 774 */
 775int vmbus_connect_ring(struct vmbus_channel *newchannel,
 776		       void (*onchannelcallback)(void *context), void *context)
 777{
 778	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
 779}
 780EXPORT_SYMBOL_GPL(vmbus_connect_ring);
 781
 782/*
 783 * vmbus_open - Open the specified channel.
 784 */
 785int vmbus_open(struct vmbus_channel *newchannel,
 786	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
 787	       void *userdata, u32 userdatalen,
 788	       void (*onchannelcallback)(void *context), void *context)
 789{
 790	int err;
 791
 792	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
 793			       recv_ringbuffer_size);
 794	if (err)
 795		return err;
 796
 797	err = __vmbus_open(newchannel, userdata, userdatalen,
 798			   onchannelcallback, context);
 799	if (err)
 800		vmbus_free_ring(newchannel);
 801
 802	return err;
 803}
 804EXPORT_SYMBOL_GPL(vmbus_open);
 805
 806/*
 807 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
 808 */
 809int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
 810{
 811	struct vmbus_channel_gpadl_teardown *msg;
 812	struct vmbus_channel_msginfo *info;
 813	unsigned long flags;
 814	int ret;
 815
 816	info = kzalloc(sizeof(*info) +
 817		       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
 818	if (!info)
 819		return -ENOMEM;
 820
 821	init_completion(&info->waitevent);
 822	info->waiting_channel = channel;
 823
 824	msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
 825
 826	msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
 827	msg->child_relid = channel->offermsg.child_relid;
 828	msg->gpadl = gpadl->gpadl_handle;
 829
 830	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 831	list_add_tail(&info->msglistentry,
 832		      &vmbus_connection.chn_msg_list);
 833	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 834
 835	if (channel->rescind)
 836		goto post_msg_err;
 837
 838	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
 839			     true);
 840
 841	trace_vmbus_teardown_gpadl(msg, ret);
 842
 843	if (ret)
 844		goto post_msg_err;
 845
 846	wait_for_completion(&info->waitevent);
 847
 848	gpadl->gpadl_handle = 0;
 849
 850post_msg_err:
 851	/*
 852	 * If the channel has been rescinded;
 853	 * we will be awakened by the rescind
 854	 * handler; set the error code to zero so we don't leak memory.
 855	 */
 856	if (channel->rescind)
 857		ret = 0;
 858
 859	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 860	list_del(&info->msglistentry);
 861	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 862
 863	kfree(info);
 864
 865	ret = set_memory_encrypted((unsigned long)gpadl->buffer,
 866				   PFN_UP(gpadl->size));
 867	if (ret)
 868		pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
 869
 870	gpadl->decrypted = ret;
 871
 872	return ret;
 873}
 874EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
 875
 876void vmbus_reset_channel_cb(struct vmbus_channel *channel)
 877{
 878	unsigned long flags;
 879
 880	/*
 881	 * vmbus_on_event(), running in the per-channel tasklet, can race
 882	 * with vmbus_close_internal() in the case of SMP guest, e.g., when
 883	 * the former is accessing channel->inbound.ring_buffer, the latter
 884	 * could be freeing the ring_buffer pages, so here we must stop it
 885	 * first.
 886	 *
 887	 * vmbus_chan_sched() might call the netvsc driver callback function
 888	 * that ends up scheduling NAPI work that accesses the ring buffer.
 889	 * At this point, we have to ensure that any such work is completed
 890	 * and that the channel ring buffer is no longer being accessed, cf.
 891	 * the calls to napi_disable() in netvsc_device_remove().
 892	 */
 893	tasklet_disable(&channel->callback_event);
 894
 895	/* See the inline comments in vmbus_chan_sched(). */
 896	spin_lock_irqsave(&channel->sched_lock, flags);
 897	channel->onchannel_callback = NULL;
 898	spin_unlock_irqrestore(&channel->sched_lock, flags);
 899
 900	channel->sc_creation_callback = NULL;
 901
 902	/* Re-enable tasklet for use on re-open */
 903	tasklet_enable(&channel->callback_event);
 904}
 905
 906static int vmbus_close_internal(struct vmbus_channel *channel)
 907{
 908	struct vmbus_channel_close_channel *msg;
 909	int ret;
 910
 911	vmbus_reset_channel_cb(channel);
 912
 913	/*
 914	 * In case a device driver's probe() fails (e.g.,
 915	 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
 916	 * rescinded later (e.g., we dynamically disable an Integrated Service
 917	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
 918	 * here we should skip most of the below cleanup work.
 919	 */
 920	if (channel->state != CHANNEL_OPENED_STATE)
 921		return -EINVAL;
 922
 923	channel->state = CHANNEL_OPEN_STATE;
 924
 925	/* Send a closing message */
 926
 927	msg = &channel->close_msg.msg;
 928
 929	msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
 930	msg->child_relid = channel->offermsg.child_relid;
 931
 932	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
 933			     true);
 934
 935	trace_vmbus_close_internal(msg, ret);
 936
 937	if (ret) {
 938		pr_err("Close failed: close post msg return is %d\n", ret);
 939		/*
 940		 * If we failed to post the close msg,
 941		 * it is perhaps better to leak memory.
 942		 */
 943	}
 944
 945	/* Tear down the gpadl for the channel's ring buffer */
 946	else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
 947		ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
 948		if (ret) {
 949			pr_err("Close failed: teardown gpadl return %d\n", ret);
 950			/*
 951			 * If we failed to teardown gpadl,
 952			 * it is perhaps better to leak memory.
 953			 */
 954		}
 955	}
 956
 957	if (!ret)
 958		vmbus_free_requestor(&channel->requestor);
 959
 960	return ret;
 961}
 962
 963/* disconnect ring - close all channels */
 964int vmbus_disconnect_ring(struct vmbus_channel *channel)
 965{
 966	struct vmbus_channel *cur_channel, *tmp;
 967	int ret;
 968
 969	if (channel->primary_channel != NULL)
 970		return -EINVAL;
 971
 972	list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
 973		if (cur_channel->rescind)
 974			wait_for_completion(&cur_channel->rescind_event);
 975
 976		mutex_lock(&vmbus_connection.channel_mutex);
 977		if (vmbus_close_internal(cur_channel) == 0) {
 978			vmbus_free_ring(cur_channel);
 979
 980			if (cur_channel->rescind)
 981				hv_process_channel_removal(cur_channel);
 982		}
 983		mutex_unlock(&vmbus_connection.channel_mutex);
 984	}
 985
 986	/*
 987	 * Now close the primary.
 988	 */
 989	mutex_lock(&vmbus_connection.channel_mutex);
 990	ret = vmbus_close_internal(channel);
 991	mutex_unlock(&vmbus_connection.channel_mutex);
 992
 993	return ret;
 994}
 995EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
 996
 997/*
 998 * vmbus_close - Close the specified channel
 999 */
1000void vmbus_close(struct vmbus_channel *channel)
1001{
1002	if (vmbus_disconnect_ring(channel) == 0)
1003		vmbus_free_ring(channel);
1004}
1005EXPORT_SYMBOL_GPL(vmbus_close);
1006
1007/**
1008 * vmbus_sendpacket_getid() - Send the specified buffer on the given channel
1009 * @channel: Pointer to vmbus_channel structure
1010 * @buffer: Pointer to the buffer you want to send the data from.
1011 * @bufferlen: Maximum size of what the buffer holds.
1012 * @requestid: Identifier of the request
1013 * @trans_id: Identifier of the transaction associated to this request, if
1014 *            the send is successful; undefined, otherwise.
1015 * @type: Type of packet that is being sent e.g. negotiate, time
1016 *	  packet etc.
1017 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
1018 *
1019 * Sends data in @buffer directly to Hyper-V via the vmbus.
1020 * This will send the data unparsed to Hyper-V.
1021 *
1022 * Mainly used by Hyper-V drivers.
1023 */
1024int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer,
1025			   u32 bufferlen, u64 requestid, u64 *trans_id,
1026			   enum vmbus_packet_type type, u32 flags)
1027{
1028	struct vmpacket_descriptor desc;
1029	u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
1030	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1031	struct kvec bufferlist[3];
1032	u64 aligned_data = 0;
1033	int num_vecs = ((bufferlen != 0) ? 3 : 1);
1034
1035
1036	/* Setup the descriptor */
1037	desc.type = type; /* VmbusPacketTypeDataInBand; */
1038	desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
1039	/* in 8-bytes granularity */
1040	desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
1041	desc.len8 = (u16)(packetlen_aligned >> 3);
1042	desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1043
1044	bufferlist[0].iov_base = &desc;
1045	bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
1046	bufferlist[1].iov_base = buffer;
1047	bufferlist[1].iov_len = bufferlen;
1048	bufferlist[2].iov_base = &aligned_data;
1049	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1050
1051	return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id);
1052}
1053EXPORT_SYMBOL(vmbus_sendpacket_getid);
1054
1055/**
1056 * vmbus_sendpacket() - Send the specified buffer on the given channel
1057 * @channel: Pointer to vmbus_channel structure
1058 * @buffer: Pointer to the buffer you want to send the data from.
1059 * @bufferlen: Maximum size of what the buffer holds.
1060 * @requestid: Identifier of the request
1061 * @type: Type of packet that is being sent e.g. negotiate, time
1062 *	  packet etc.
1063 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
1064 *
1065 * Sends data in @buffer directly to Hyper-V via the vmbus.
1066 * This will send the data unparsed to Hyper-V.
1067 *
1068 * Mainly used by Hyper-V drivers.
1069 */
1070int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
1071		     u32 bufferlen, u64 requestid,
1072		     enum vmbus_packet_type type, u32 flags)
1073{
1074	return vmbus_sendpacket_getid(channel, buffer, bufferlen,
1075				      requestid, NULL, type, flags);
1076}
1077EXPORT_SYMBOL(vmbus_sendpacket);
1078
1079/*
1080 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
1081 * packets using a GPADL Direct packet type. This interface allows you
1082 * to control notifying the host. This will be useful for sending
1083 * batched data. Also the sender can control the send flags
1084 * explicitly.
1085 */
1086int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1087				struct hv_page_buffer pagebuffers[],
1088				u32 pagecount, void *buffer, u32 bufferlen,
1089				u64 requestid)
1090{
1091	int i;
1092	struct vmbus_channel_packet_page_buffer desc;
1093	u32 descsize;
1094	u32 packetlen;
1095	u32 packetlen_aligned;
1096	struct kvec bufferlist[3];
1097	u64 aligned_data = 0;
1098
1099	if (pagecount > MAX_PAGE_BUFFER_COUNT)
1100		return -EINVAL;
1101
1102	/*
1103	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
1104	 * largest size we support
1105	 */
1106	descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
1107			  ((MAX_PAGE_BUFFER_COUNT - pagecount) *
1108			  sizeof(struct hv_page_buffer));
1109	packetlen = descsize + bufferlen;
1110	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1111
1112	/* Setup the descriptor */
1113	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
1114	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
1115	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
1116	desc.length8 = (u16)(packetlen_aligned >> 3);
1117	desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1118	desc.reserved = 0;
1119	desc.rangecount = pagecount;
1120
1121	for (i = 0; i < pagecount; i++) {
1122		desc.range[i].len = pagebuffers[i].len;
1123		desc.range[i].offset = pagebuffers[i].offset;
1124		desc.range[i].pfn	 = pagebuffers[i].pfn;
1125	}
1126
1127	bufferlist[0].iov_base = &desc;
1128	bufferlist[0].iov_len = descsize;
1129	bufferlist[1].iov_base = buffer;
1130	bufferlist[1].iov_len = bufferlen;
1131	bufferlist[2].iov_base = &aligned_data;
1132	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1133
1134	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
1135}
1136EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
1137
1138/*
1139 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
1140 * using a GPADL Direct packet type.
1141 * The buffer includes the vmbus descriptor.
1142 */
1143int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1144			      struct vmbus_packet_mpb_array *desc,
1145			      u32 desc_size,
1146			      void *buffer, u32 bufferlen, u64 requestid)
1147{
1148	u32 packetlen;
1149	u32 packetlen_aligned;
1150	struct kvec bufferlist[3];
1151	u64 aligned_data = 0;
1152
1153	packetlen = desc_size + bufferlen;
1154	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
1155
1156	/* Setup the descriptor */
1157	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
1158	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
1159	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
1160	desc->length8 = (u16)(packetlen_aligned >> 3);
1161	desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
1162	desc->reserved = 0;
1163	desc->rangecount = 1;
1164
1165	bufferlist[0].iov_base = desc;
1166	bufferlist[0].iov_len = desc_size;
1167	bufferlist[1].iov_base = buffer;
1168	bufferlist[1].iov_len = bufferlen;
1169	bufferlist[2].iov_base = &aligned_data;
1170	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
1171
1172	return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
1173}
1174EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
1175
1176/**
1177 * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
1178 * @channel: Pointer to vmbus_channel structure
1179 * @buffer: Pointer to the buffer you want to receive the data into.
1180 * @bufferlen: Maximum size of what the buffer can hold.
1181 * @buffer_actual_len: The actual size of the data after it was received.
1182 * @requestid: Identifier of the request
1183 * @raw: true means keep the vmpacket_descriptor header in the received data.
1184 *
1185 * Receives directly from the hyper-v vmbus and puts the data it received
1186 * into Buffer. This will receive the data unparsed from hyper-v.
1187 *
1188 * Mainly used by Hyper-V drivers.
1189 */
1190static inline int
1191__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
1192		   u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
1193		   bool raw)
1194{
1195	return hv_ringbuffer_read(channel, buffer, bufferlen,
1196				  buffer_actual_len, requestid, raw);
1197
1198}
1199
1200int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
1201		     u32 bufferlen, u32 *buffer_actual_len,
1202		     u64 *requestid)
1203{
1204	return __vmbus_recvpacket(channel, buffer, bufferlen,
1205				  buffer_actual_len, requestid, false);
1206}
1207EXPORT_SYMBOL(vmbus_recvpacket);
1208
1209/*
1210 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
1211 */
1212int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
1213			      u32 bufferlen, u32 *buffer_actual_len,
1214			      u64 *requestid)
1215{
1216	return __vmbus_recvpacket(channel, buffer, bufferlen,
1217				  buffer_actual_len, requestid, true);
1218}
1219EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
1220
1221/*
1222 * vmbus_next_request_id - Returns a new request id. It is also
1223 * the index at which the guest memory address is stored.
1224 * Uses a spin lock to avoid race conditions.
1225 * @channel: Pointer to the VMbus channel struct
1226 * @rqst_add: Guest memory address to be stored in the array
1227 */
1228u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
1229{
1230	struct vmbus_requestor *rqstor = &channel->requestor;
1231	unsigned long flags;
1232	u64 current_id;
1233
1234	/* Check rqstor has been initialized */
1235	if (!channel->rqstor_size)
1236		return VMBUS_NO_RQSTOR;
1237
1238	lock_requestor(channel, flags);
1239	current_id = rqstor->next_request_id;
1240
1241	/* Requestor array is full */
1242	if (current_id >= rqstor->size) {
1243		unlock_requestor(channel, flags);
1244		return VMBUS_RQST_ERROR;
1245	}
1246
1247	rqstor->next_request_id = rqstor->req_arr[current_id];
1248	rqstor->req_arr[current_id] = rqst_addr;
1249
1250	/* The already held spin lock provides atomicity */
1251	bitmap_set(rqstor->req_bitmap, current_id, 1);
1252
1253	unlock_requestor(channel, flags);
1254
1255	/*
1256	 * Cannot return an ID of 0, which is reserved for an unsolicited
1257	 * message from Hyper-V; Hyper-V does not acknowledge (respond to)
1258	 * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
1259	 * 0 sent by the guest.
1260	 */
1261	return current_id + 1;
1262}
1263EXPORT_SYMBOL_GPL(vmbus_next_request_id);
1264
1265/* As in vmbus_request_addr_match() but without the requestor lock */
1266u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1267			       u64 rqst_addr)
1268{
1269	struct vmbus_requestor *rqstor = &channel->requestor;
1270	u64 req_addr;
1271
1272	/* Check rqstor has been initialized */
1273	if (!channel->rqstor_size)
1274		return VMBUS_NO_RQSTOR;
1275
1276	/* Hyper-V can send an unsolicited message with ID of 0 */
1277	if (!trans_id)
1278		return VMBUS_RQST_ERROR;
1279
1280	/* Data corresponding to trans_id is stored at trans_id - 1 */
1281	trans_id--;
1282
1283	/* Invalid trans_id */
1284	if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap))
1285		return VMBUS_RQST_ERROR;
1286
1287	req_addr = rqstor->req_arr[trans_id];
1288	if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) {
1289		rqstor->req_arr[trans_id] = rqstor->next_request_id;
1290		rqstor->next_request_id = trans_id;
1291
1292		/* The already held spin lock provides atomicity */
1293		bitmap_clear(rqstor->req_bitmap, trans_id, 1);
1294	}
1295
1296	return req_addr;
1297}
1298EXPORT_SYMBOL_GPL(__vmbus_request_addr_match);
1299
1300/*
1301 * vmbus_request_addr_match - Clears/removes @trans_id from the @channel's
1302 * requestor, provided the memory address stored at @trans_id equals @rqst_addr
1303 * (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY).
1304 *
1305 * Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if
1306 * @trans_id is not contained in the requestor.
1307 *
1308 * Acquires and releases the requestor spin lock.
1309 */
1310u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1311			     u64 rqst_addr)
1312{
1313	unsigned long flags;
1314	u64 req_addr;
1315
1316	lock_requestor(channel, flags);
1317	req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr);
1318	unlock_requestor(channel, flags);
1319
1320	return req_addr;
1321}
1322EXPORT_SYMBOL_GPL(vmbus_request_addr_match);
1323
1324/*
1325 * vmbus_request_addr - Returns the memory address stored at @trans_id
1326 * in @rqstor. Uses a spin lock to avoid race conditions.
1327 * @channel: Pointer to the VMbus channel struct
1328 * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
1329 * next request id.
1330 */
1331u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
1332{
1333	return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY);
1334}
1335EXPORT_SYMBOL_GPL(vmbus_request_addr);