Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VMware VMCI Driver
   4 *
   5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
   6 */
   7
   8#include <linux/vmw_vmci_defs.h>
   9#include <linux/vmw_vmci_api.h>
  10#include <linux/miscdevice.h>
  11#include <linux/interrupt.h>
  12#include <linux/highmem.h>
  13#include <linux/atomic.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/sched.h>
  18#include <linux/cred.h>
  19#include <linux/slab.h>
  20#include <linux/file.h>
  21#include <linux/init.h>
  22#include <linux/poll.h>
  23#include <linux/pci.h>
  24#include <linux/smp.h>
  25#include <linux/fs.h>
  26#include <linux/io.h>
  27
  28#include "vmci_handle_array.h"
  29#include "vmci_queue_pair.h"
  30#include "vmci_datagram.h"
  31#include "vmci_doorbell.h"
  32#include "vmci_resource.h"
  33#include "vmci_context.h"
  34#include "vmci_driver.h"
  35#include "vmci_event.h"
  36
  37#define VMCI_UTIL_NUM_RESOURCES 1
  38
  39enum {
  40	VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
  41	VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
  42};
  43
  44enum {
  45	VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
  46	VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
  47	VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
  48};
  49
  50/*
  51 * VMCI driver initialization. This block can also be used to
  52 * pass initial group membership etc.
  53 */
  54struct vmci_init_blk {
  55	u32 cid;
  56	u32 flags;
  57};
  58
  59/* VMCIqueue_pairAllocInfo_VMToVM */
  60struct vmci_qp_alloc_info_vmvm {
  61	struct vmci_handle handle;
  62	u32 peer;
  63	u32 flags;
  64	u64 produce_size;
  65	u64 consume_size;
  66	u64 produce_page_file;	  /* User VA. */
  67	u64 consume_page_file;	  /* User VA. */
  68	u64 produce_page_file_size;  /* Size of the file name array. */
  69	u64 consume_page_file_size;  /* Size of the file name array. */
  70	s32 result;
  71	u32 _pad;
  72};
  73
  74/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
  75struct vmci_set_notify_info {
  76	u64 notify_uva;
  77	s32 result;
  78	u32 _pad;
  79};
  80
  81/*
  82 * Per-instance host state
  83 */
  84struct vmci_host_dev {
  85	struct vmci_ctx *context;
  86	int user_version;
  87	enum vmci_obj_type ct_type;
  88	struct mutex lock;  /* Mutex lock for vmci context access */
  89};
  90
  91static struct vmci_ctx *host_context;
  92static bool vmci_host_device_initialized;
  93static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
  94
  95/*
  96 * Determines whether the VMCI host personality is
  97 * available. Since the core functionality of the host driver is
  98 * always present, all guests could possibly use the host
  99 * personality. However, to minimize the deviation from the
 100 * pre-unified driver state of affairs, we only consider the host
 101 * device active if there is no active guest device or if there
 102 * are VMX'en with active VMCI contexts using the host device.
 103 */
 104bool vmci_host_code_active(void)
 105{
 106	return vmci_host_device_initialized &&
 107	    (!vmci_guest_code_active() ||
 108	     atomic_read(&vmci_host_active_users) > 0);
 109}
 110
 111int vmci_host_users(void)
 112{
 113	return atomic_read(&vmci_host_active_users);
 114}
 115
 116/*
 117 * Called on open of /dev/vmci.
 118 */
 119static int vmci_host_open(struct inode *inode, struct file *filp)
 120{
 121	struct vmci_host_dev *vmci_host_dev;
 122
 123	vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
 124	if (vmci_host_dev == NULL)
 125		return -ENOMEM;
 126
 127	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
 128	mutex_init(&vmci_host_dev->lock);
 129	filp->private_data = vmci_host_dev;
 130
 131	return 0;
 132}
 133
 134/*
 135 * Called on close of /dev/vmci, most often when the process
 136 * exits.
 137 */
 138static int vmci_host_close(struct inode *inode, struct file *filp)
 139{
 140	struct vmci_host_dev *vmci_host_dev = filp->private_data;
 141
 142	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
 143		vmci_ctx_destroy(vmci_host_dev->context);
 144		vmci_host_dev->context = NULL;
 145
 146		/*
 147		 * The number of active contexts is used to track whether any
 148		 * VMX'en are using the host personality. It is incremented when
 149		 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
 150		 * ioctl.
 151		 */
 152		atomic_dec(&vmci_host_active_users);
 153	}
 154	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
 155
 156	kfree(vmci_host_dev);
 157	filp->private_data = NULL;
 158	return 0;
 159}
 160
 161/*
 162 * This is used to wake up the VMX when a VMCI call arrives, or
 163 * to wake up select() or poll() at the next clock tick.
 164 */
 165static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
 166{
 167	struct vmci_host_dev *vmci_host_dev = filp->private_data;
 168	struct vmci_ctx *context = vmci_host_dev->context;
 169	__poll_t mask = 0;
 170
 171	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
 172		/* Check for VMCI calls to this VM context. */
 173		if (wait)
 174			poll_wait(filp, &context->host_context.wait_queue,
 175				  wait);
 176
 177		spin_lock(&context->lock);
 178		if (context->pending_datagrams > 0 ||
 179		    vmci_handle_arr_get_size(
 180				context->pending_doorbell_array) > 0) {
 181			mask = EPOLLIN;
 182		}
 183		spin_unlock(&context->lock);
 184	}
 185	return mask;
 186}
 187
 188/*
 189 * Copies the handles of a handle array into a user buffer, and
 190 * returns the new length in userBufferSize. If the copy to the
 191 * user buffer fails, the functions still returns VMCI_SUCCESS,
 192 * but retval != 0.
 193 */
 194static int drv_cp_harray_to_user(void __user *user_buf_uva,
 195				 u64 *user_buf_size,
 196				 struct vmci_handle_arr *handle_array,
 197				 int *retval)
 198{
 199	u32 array_size = 0;
 200	struct vmci_handle *handles;
 201
 202	if (handle_array)
 203		array_size = vmci_handle_arr_get_size(handle_array);
 204
 205	if (array_size * sizeof(*handles) > *user_buf_size)
 206		return VMCI_ERROR_MORE_DATA;
 207
 208	*user_buf_size = array_size * sizeof(*handles);
 209	if (*user_buf_size)
 210		*retval = copy_to_user(user_buf_uva,
 211				       vmci_handle_arr_get_handles
 212				       (handle_array), *user_buf_size);
 213
 214	return VMCI_SUCCESS;
 215}
 216
 217/*
 218 * Sets up a given context for notify to work. Maps the notify
 219 * boolean in user VA into kernel space.
 220 */
 221static int vmci_host_setup_notify(struct vmci_ctx *context,
 222				  unsigned long uva)
 223{
 224	int retval;
 225
 226	if (context->notify_page) {
 227		pr_devel("%s: Notify mechanism is already set up\n", __func__);
 228		return VMCI_ERROR_DUPLICATE_ENTRY;
 229	}
 230
 231	/*
 232	 * We are using 'bool' internally, but let's make sure we explicit
 233	 * about the size.
 234	 */
 235	BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
 236
 237	/*
 238	 * Lock physical page backing a given user VA.
 239	 */
 240	retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
 241	if (retval != 1) {
 242		context->notify_page = NULL;
 243		return VMCI_ERROR_GENERIC;
 244	}
 245
 246	/*
 247	 * Map the locked page and set up notify pointer.
 248	 */
 249	context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
 250	vmci_ctx_check_signal_notify(context);
 251
 252	return VMCI_SUCCESS;
 253}
 254
 255static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
 256				 unsigned int cmd, void __user *uptr)
 257{
 258	if (cmd == IOCTL_VMCI_VERSION2) {
 259		int __user *vptr = uptr;
 260		if (get_user(vmci_host_dev->user_version, vptr))
 261			return -EFAULT;
 262	}
 263
 264	/*
 265	 * The basic logic here is:
 266	 *
 267	 * If the user sends in a version of 0 tell it our version.
 268	 * If the user didn't send in a version, tell it our version.
 269	 * If the user sent in an old version, tell it -its- version.
 270	 * If the user sent in an newer version, tell it our version.
 271	 *
 272	 * The rationale behind telling the caller its version is that
 273	 * Workstation 6.5 required that VMX and VMCI kernel module were
 274	 * version sync'd.  All new VMX users will be programmed to
 275	 * handle the VMCI kernel module version.
 276	 */
 277
 278	if (vmci_host_dev->user_version > 0 &&
 279	    vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
 280		return vmci_host_dev->user_version;
 281	}
 282
 283	return VMCI_VERSION;
 284}
 285
 286#define vmci_ioctl_err(fmt, ...)	\
 287	pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
 288
 289static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
 290				     const char *ioctl_name,
 291				     void __user *uptr)
 292{
 293	struct vmci_init_blk init_block;
 294	const struct cred *cred;
 295	int retval;
 296
 297	if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
 298		vmci_ioctl_err("error reading init block\n");
 299		return -EFAULT;
 300	}
 301
 302	mutex_lock(&vmci_host_dev->lock);
 303
 304	if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
 305		vmci_ioctl_err("received VMCI init on initialized handle\n");
 306		retval = -EINVAL;
 307		goto out;
 308	}
 309
 310	if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
 311		vmci_ioctl_err("unsupported VMCI restriction flag\n");
 312		retval = -EINVAL;
 313		goto out;
 314	}
 315
 316	cred = get_current_cred();
 317	vmci_host_dev->context = vmci_ctx_create(init_block.cid,
 318						 init_block.flags, 0,
 319						 vmci_host_dev->user_version,
 320						 cred);
 321	put_cred(cred);
 322	if (IS_ERR(vmci_host_dev->context)) {
 323		retval = PTR_ERR(vmci_host_dev->context);
 324		vmci_ioctl_err("error initializing context\n");
 325		goto out;
 326	}
 327
 328	/*
 329	 * Copy cid to userlevel, we do this to allow the VMX
 330	 * to enforce its policy on cid generation.
 331	 */
 332	init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
 333	if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
 334		vmci_ctx_destroy(vmci_host_dev->context);
 335		vmci_host_dev->context = NULL;
 336		vmci_ioctl_err("error writing init block\n");
 337		retval = -EFAULT;
 338		goto out;
 339	}
 340
 341	vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
 342	atomic_inc(&vmci_host_active_users);
 343
 344	vmci_call_vsock_callback(true);
 345
 346	retval = 0;
 347
 348out:
 349	mutex_unlock(&vmci_host_dev->lock);
 350	return retval;
 351}
 352
 353static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
 354				      const char *ioctl_name,
 355				      void __user *uptr)
 356{
 357	struct vmci_datagram_snd_rcv_info send_info;
 358	struct vmci_datagram *dg = NULL;
 359	u32 cid;
 360
 361	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 362		vmci_ioctl_err("only valid for contexts\n");
 363		return -EINVAL;
 364	}
 365
 366	if (copy_from_user(&send_info, uptr, sizeof(send_info)))
 367		return -EFAULT;
 368
 369	if (send_info.len > VMCI_MAX_DG_SIZE) {
 370		vmci_ioctl_err("datagram is too big (size=%d)\n",
 371			       send_info.len);
 372		return -EINVAL;
 373	}
 374
 375	if (send_info.len < sizeof(*dg)) {
 376		vmci_ioctl_err("datagram is too small (size=%d)\n",
 377			       send_info.len);
 378		return -EINVAL;
 379	}
 380
 381	dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
 382			 send_info.len);
 383	if (IS_ERR(dg)) {
 384		vmci_ioctl_err(
 385			"cannot allocate memory to dispatch datagram\n");
 386		return PTR_ERR(dg);
 387	}
 388
 389	if (VMCI_DG_SIZE(dg) != send_info.len) {
 390		vmci_ioctl_err("datagram size mismatch\n");
 391		kfree(dg);
 392		return -EINVAL;
 393	}
 394
 395	pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
 396		 dg->dst.context, dg->dst.resource,
 397		 dg->src.context, dg->src.resource,
 398		 (unsigned long long)dg->payload_size);
 399
 400	/* Get source context id. */
 401	cid = vmci_ctx_get_id(vmci_host_dev->context);
 402	send_info.result = vmci_datagram_dispatch(cid, dg, true);
 403	kfree(dg);
 404
 405	return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
 406}
 407
 408static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
 409					 const char *ioctl_name,
 410					 void __user *uptr)
 411{
 412	struct vmci_datagram_snd_rcv_info recv_info;
 413	struct vmci_datagram *dg = NULL;
 414	int retval;
 415	size_t size;
 416
 417	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 418		vmci_ioctl_err("only valid for contexts\n");
 419		return -EINVAL;
 420	}
 421
 422	if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
 423		return -EFAULT;
 424
 425	size = recv_info.len;
 426	recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
 427						     &size, &dg);
 428
 429	if (recv_info.result >= VMCI_SUCCESS) {
 430		void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
 431		retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
 432		kfree(dg);
 433		if (retval != 0)
 434			return -EFAULT;
 435	}
 436
 437	return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
 438}
 439
 440static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
 441					const char *ioctl_name,
 442					void __user *uptr)
 443{
 444	struct vmci_handle handle;
 445	int vmci_status;
 446	int __user *retptr;
 447
 448	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 449		vmci_ioctl_err("only valid for contexts\n");
 450		return -EINVAL;
 451	}
 452
 453	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
 454		struct vmci_qp_alloc_info_vmvm alloc_info;
 455		struct vmci_qp_alloc_info_vmvm __user *info = uptr;
 456
 457		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
 458			return -EFAULT;
 459
 460		handle = alloc_info.handle;
 461		retptr = &info->result;
 462
 463		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
 464						alloc_info.peer,
 465						alloc_info.flags,
 466						VMCI_NO_PRIVILEGE_FLAGS,
 467						alloc_info.produce_size,
 468						alloc_info.consume_size,
 469						NULL,
 470						vmci_host_dev->context);
 471
 472		if (vmci_status == VMCI_SUCCESS)
 473			vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
 474	} else {
 475		struct vmci_qp_alloc_info alloc_info;
 476		struct vmci_qp_alloc_info __user *info = uptr;
 477		struct vmci_qp_page_store page_store;
 478
 479		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
 480			return -EFAULT;
 481
 482		handle = alloc_info.handle;
 483		retptr = &info->result;
 484
 485		page_store.pages = alloc_info.ppn_va;
 486		page_store.len = alloc_info.num_ppns;
 487
 488		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
 489						alloc_info.peer,
 490						alloc_info.flags,
 491						VMCI_NO_PRIVILEGE_FLAGS,
 492						alloc_info.produce_size,
 493						alloc_info.consume_size,
 494						&page_store,
 495						vmci_host_dev->context);
 496	}
 497
 498	if (put_user(vmci_status, retptr)) {
 499		if (vmci_status >= VMCI_SUCCESS) {
 500			vmci_status = vmci_qp_broker_detach(handle,
 501							vmci_host_dev->context);
 502		}
 503		return -EFAULT;
 504	}
 505
 506	return 0;
 507}
 508
 509static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
 510					const char *ioctl_name,
 511					void __user *uptr)
 512{
 513	struct vmci_qp_set_va_info set_va_info;
 514	struct vmci_qp_set_va_info __user *info = uptr;
 515	s32 result;
 516
 517	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 518		vmci_ioctl_err("only valid for contexts\n");
 519		return -EINVAL;
 520	}
 521
 522	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
 523		vmci_ioctl_err("is not allowed\n");
 524		return -EINVAL;
 525	}
 526
 527	if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
 528		return -EFAULT;
 529
 530	if (set_va_info.va) {
 531		/*
 532		 * VMX is passing down a new VA for the queue
 533		 * pair mapping.
 534		 */
 535		result = vmci_qp_broker_map(set_va_info.handle,
 536					    vmci_host_dev->context,
 537					    set_va_info.va);
 538	} else {
 539		/*
 540		 * The queue pair is about to be unmapped by
 541		 * the VMX.
 542		 */
 543		result = vmci_qp_broker_unmap(set_va_info.handle,
 544					 vmci_host_dev->context, 0);
 545	}
 546
 547	return put_user(result, &info->result) ? -EFAULT : 0;
 548}
 549
 550static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
 551					const char *ioctl_name,
 552					void __user *uptr)
 553{
 554	struct vmci_qp_page_file_info page_file_info;
 555	struct vmci_qp_page_file_info __user *info = uptr;
 556	s32 result;
 557
 558	if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
 559	    vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
 560		vmci_ioctl_err("not supported on this VMX (version=%d)\n",
 561			       vmci_host_dev->user_version);
 562		return -EINVAL;
 563	}
 564
 565	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 566		vmci_ioctl_err("only valid for contexts\n");
 567		return -EINVAL;
 568	}
 569
 570	if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
 571		return -EFAULT;
 572
 573	/*
 574	 * Communicate success pre-emptively to the caller.  Note that the
 575	 * basic premise is that it is incumbent upon the caller not to look at
 576	 * the info.result field until after the ioctl() returns.  And then,
 577	 * only if the ioctl() result indicates no error.  We send up the
 578	 * SUCCESS status before calling SetPageStore() store because failing
 579	 * to copy up the result code means unwinding the SetPageStore().
 580	 *
 581	 * It turns out the logic to unwind a SetPageStore() opens a can of
 582	 * worms.  For example, if a host had created the queue_pair and a
 583	 * guest attaches and SetPageStore() is successful but writing success
 584	 * fails, then ... the host has to be stopped from writing (anymore)
 585	 * data into the queue_pair.  That means an additional test in the
 586	 * VMCI_Enqueue() code path.  Ugh.
 587	 */
 588
 589	if (put_user(VMCI_SUCCESS, &info->result)) {
 590		/*
 591		 * In this case, we can't write a result field of the
 592		 * caller's info block.  So, we don't even try to
 593		 * SetPageStore().
 594		 */
 595		return -EFAULT;
 596	}
 597
 598	result = vmci_qp_broker_set_page_store(page_file_info.handle,
 599						page_file_info.produce_va,
 600						page_file_info.consume_va,
 601						vmci_host_dev->context);
 602	if (result < VMCI_SUCCESS) {
 603		if (put_user(result, &info->result)) {
 604			/*
 605			 * Note that in this case the SetPageStore()
 606			 * call failed but we were unable to
 607			 * communicate that to the caller (because the
 608			 * copy_to_user() call failed).  So, if we
 609			 * simply return an error (in this case
 610			 * -EFAULT) then the caller will know that the
 611			 *  SetPageStore failed even though we couldn't
 612			 *  put the result code in the result field and
 613			 *  indicate exactly why it failed.
 614			 *
 615			 * That says nothing about the issue where we
 616			 * were once able to write to the caller's info
 617			 * memory and now can't.  Something more
 618			 * serious is probably going on than the fact
 619			 * that SetPageStore() didn't work.
 620			 */
 621			return -EFAULT;
 622		}
 623	}
 624
 625	return 0;
 626}
 627
 628static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
 629				  const char *ioctl_name,
 630				  void __user *uptr)
 631{
 632	struct vmci_qp_dtch_info detach_info;
 633	struct vmci_qp_dtch_info __user *info = uptr;
 634	s32 result;
 635
 636	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 637		vmci_ioctl_err("only valid for contexts\n");
 638		return -EINVAL;
 639	}
 640
 641	if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
 642		return -EFAULT;
 643
 644	result = vmci_qp_broker_detach(detach_info.handle,
 645				       vmci_host_dev->context);
 646	if (result == VMCI_SUCCESS &&
 647	    vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
 648		result = VMCI_SUCCESS_LAST_DETACH;
 649	}
 650
 651	return put_user(result, &info->result) ? -EFAULT : 0;
 652}
 653
 654static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
 655				       const char *ioctl_name,
 656				       void __user *uptr)
 657{
 658	struct vmci_ctx_info ar_info;
 659	struct vmci_ctx_info __user *info = uptr;
 660	s32 result;
 661	u32 cid;
 662
 663	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 664		vmci_ioctl_err("only valid for contexts\n");
 665		return -EINVAL;
 666	}
 667
 668	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
 669		return -EFAULT;
 670
 671	cid = vmci_ctx_get_id(vmci_host_dev->context);
 672	result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
 673
 674	return put_user(result, &info->result) ? -EFAULT : 0;
 675}
 676
 677static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
 678					  const char *ioctl_name,
 679					  void __user *uptr)
 680{
 681	struct vmci_ctx_info ar_info;
 682	struct vmci_ctx_info __user *info = uptr;
 683	u32 cid;
 684	int result;
 685
 686	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 687		vmci_ioctl_err("only valid for contexts\n");
 688		return -EINVAL;
 689	}
 690
 691	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
 692		return -EFAULT;
 693
 694	cid = vmci_ctx_get_id(vmci_host_dev->context);
 695	result = vmci_ctx_remove_notification(cid,
 696					      ar_info.remote_cid);
 697
 698	return put_user(result, &info->result) ? -EFAULT : 0;
 699}
 700
 701static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
 702					  const char *ioctl_name,
 703					  void __user *uptr)
 704{
 705	struct vmci_ctx_chkpt_buf_info get_info;
 706	u32 cid;
 707	void *cpt_buf;
 708	int retval;
 709
 710	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 711		vmci_ioctl_err("only valid for contexts\n");
 712		return -EINVAL;
 713	}
 714
 715	if (copy_from_user(&get_info, uptr, sizeof(get_info)))
 716		return -EFAULT;
 717
 718	cid = vmci_ctx_get_id(vmci_host_dev->context);
 719	get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
 720						&get_info.buf_size, &cpt_buf);
 721	if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
 722		void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
 723		retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
 724		kfree(cpt_buf);
 725
 726		if (retval)
 727			return -EFAULT;
 728	}
 729
 730	return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
 731}
 732
 733static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
 734					  const char *ioctl_name,
 735					  void __user *uptr)
 736{
 737	struct vmci_ctx_chkpt_buf_info set_info;
 738	u32 cid;
 739	void *cpt_buf;
 740	int retval;
 741
 742	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 743		vmci_ioctl_err("only valid for contexts\n");
 744		return -EINVAL;
 745	}
 746
 747	if (copy_from_user(&set_info, uptr, sizeof(set_info)))
 748		return -EFAULT;
 749
 750	cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
 751				set_info.buf_size);
 752	if (IS_ERR(cpt_buf))
 753		return PTR_ERR(cpt_buf);
 754
 755	cid = vmci_ctx_get_id(vmci_host_dev->context);
 756	set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
 757						   set_info.buf_size, cpt_buf);
 758
 759	retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
 760
 761	kfree(cpt_buf);
 762	return retval;
 763}
 764
 765static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
 766				       const char *ioctl_name,
 767				       void __user *uptr)
 768{
 769	u32 __user *u32ptr = uptr;
 770
 771	return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
 772}
 773
 774static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
 775				   const char *ioctl_name,
 776				   void __user *uptr)
 777{
 778	struct vmci_set_notify_info notify_info;
 779
 780	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 781		vmci_ioctl_err("only valid for contexts\n");
 782		return -EINVAL;
 783	}
 784
 785	if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
 786		return -EFAULT;
 787
 788	if (notify_info.notify_uva) {
 789		notify_info.result =
 790			vmci_host_setup_notify(vmci_host_dev->context,
 791					       notify_info.notify_uva);
 792	} else {
 793		vmci_ctx_unset_notify(vmci_host_dev->context);
 794		notify_info.result = VMCI_SUCCESS;
 795	}
 796
 797	return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
 798		-EFAULT : 0;
 799}
 800
 801static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
 802					const char *ioctl_name,
 803					void __user *uptr)
 804{
 805	struct vmci_dbell_notify_resource_info info;
 806	u32 cid;
 807
 808	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
 809		vmci_ioctl_err("invalid for current VMX versions\n");
 810		return -EINVAL;
 811	}
 812
 813	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 814		vmci_ioctl_err("only valid for contexts\n");
 815		return -EINVAL;
 816	}
 817
 818	if (copy_from_user(&info, uptr, sizeof(info)))
 819		return -EFAULT;
 820
 821	cid = vmci_ctx_get_id(vmci_host_dev->context);
 822
 823	switch (info.action) {
 824	case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
 825		if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
 826			u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
 827			info.result = vmci_ctx_notify_dbell(cid, info.handle,
 828							    flags);
 829		} else {
 830			info.result = VMCI_ERROR_UNAVAILABLE;
 831		}
 832		break;
 833
 834	case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
 835		info.result = vmci_ctx_dbell_create(cid, info.handle);
 836		break;
 837
 838	case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
 839		info.result = vmci_ctx_dbell_destroy(cid, info.handle);
 840		break;
 841
 842	default:
 843		vmci_ioctl_err("got unknown action (action=%d)\n",
 844			       info.action);
 845		info.result = VMCI_ERROR_INVALID_ARGS;
 846	}
 847
 848	return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
 849}
 850
 851static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
 852					   const char *ioctl_name,
 853					   void __user *uptr)
 854{
 855	struct vmci_ctx_notify_recv_info info;
 856	struct vmci_handle_arr *db_handle_array;
 857	struct vmci_handle_arr *qp_handle_array;
 858	void __user *ubuf;
 859	u32 cid;
 860	int retval = 0;
 861
 862	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
 863		vmci_ioctl_err("only valid for contexts\n");
 864		return -EINVAL;
 865	}
 866
 867	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
 868		vmci_ioctl_err("not supported for the current vmx version\n");
 869		return -EINVAL;
 870	}
 871
 872	if (copy_from_user(&info, uptr, sizeof(info)))
 873		return -EFAULT;
 874
 875	if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
 876	    (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
 877		return -EINVAL;
 878	}
 879
 880	cid = vmci_ctx_get_id(vmci_host_dev->context);
 881
 882	info.result = vmci_ctx_rcv_notifications_get(cid,
 883				&db_handle_array, &qp_handle_array);
 884	if (info.result != VMCI_SUCCESS)
 885		return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
 886
 887	ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
 888	info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
 889					    db_handle_array, &retval);
 890	if (info.result == VMCI_SUCCESS && !retval) {
 891		ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
 892		info.result = drv_cp_harray_to_user(ubuf,
 893						    &info.qp_handle_buf_size,
 894						    qp_handle_array, &retval);
 895	}
 896
 897	if (!retval && copy_to_user(uptr, &info, sizeof(info)))
 898		retval = -EFAULT;
 899
 900	vmci_ctx_rcv_notifications_release(cid,
 901				db_handle_array, qp_handle_array,
 902				info.result == VMCI_SUCCESS && !retval);
 903
 904	return retval;
 905}
 906
 907static long vmci_host_unlocked_ioctl(struct file *filp,
 908				     unsigned int iocmd, unsigned long ioarg)
 909{
 910#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {			\
 911		char *name = "IOCTL_VMCI_" # ioctl_name;		\
 912		return vmci_host_do_ ## ioctl_fn(			\
 913			vmci_host_dev, name, uptr);			\
 914	} while (0)
 915
 916	struct vmci_host_dev *vmci_host_dev = filp->private_data;
 917	void __user *uptr = (void __user *)ioarg;
 918
 919	switch (iocmd) {
 920	case IOCTL_VMCI_INIT_CONTEXT:
 921		VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
 922	case IOCTL_VMCI_DATAGRAM_SEND:
 923		VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
 924	case IOCTL_VMCI_DATAGRAM_RECEIVE:
 925		VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
 926	case IOCTL_VMCI_QUEUEPAIR_ALLOC:
 927		VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
 928	case IOCTL_VMCI_QUEUEPAIR_SETVA:
 929		VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
 930	case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
 931		VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
 932	case IOCTL_VMCI_QUEUEPAIR_DETACH:
 933		VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
 934	case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
 935		VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
 936	case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
 937		VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
 938	case IOCTL_VMCI_CTX_GET_CPT_STATE:
 939		VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
 940	case IOCTL_VMCI_CTX_SET_CPT_STATE:
 941		VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
 942	case IOCTL_VMCI_GET_CONTEXT_ID:
 943		VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
 944	case IOCTL_VMCI_SET_NOTIFY:
 945		VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
 946	case IOCTL_VMCI_NOTIFY_RESOURCE:
 947		VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
 948	case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
 949		VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
 950
 951	case IOCTL_VMCI_VERSION:
 952	case IOCTL_VMCI_VERSION2:
 953		return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
 954
 955	default:
 956		pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
 957		return -EINVAL;
 958	}
 959
 960#undef VMCI_DO_IOCTL
 961}
 962
 963static const struct file_operations vmuser_fops = {
 964	.owner		= THIS_MODULE,
 965	.open		= vmci_host_open,
 966	.release	= vmci_host_close,
 967	.poll		= vmci_host_poll,
 968	.unlocked_ioctl	= vmci_host_unlocked_ioctl,
 969	.compat_ioctl	= compat_ptr_ioctl,
 970};
 971
 972static struct miscdevice vmci_host_miscdev = {
 973	 .name = "vmci",
 974	 .minor = MISC_DYNAMIC_MINOR,
 975	 .fops = &vmuser_fops,
 976};
 977
 978int __init vmci_host_init(void)
 979{
 980	int error;
 981
 982	host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
 983					VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
 984					-1, VMCI_VERSION, NULL);
 985	if (IS_ERR(host_context)) {
 986		error = PTR_ERR(host_context);
 987		pr_warn("Failed to initialize VMCIContext (error%d)\n",
 988			error);
 989		return error;
 990	}
 991
 992	error = misc_register(&vmci_host_miscdev);
 993	if (error) {
 994		pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
 995			vmci_host_miscdev.name,
 996			MISC_MAJOR, vmci_host_miscdev.minor,
 997			error);
 998		pr_warn("Unable to initialize host personality\n");
 999		vmci_ctx_destroy(host_context);
1000		return error;
1001	}
1002
1003	pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1004		vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1005
1006	vmci_host_device_initialized = true;
1007	return 0;
1008}
1009
1010void __exit vmci_host_exit(void)
1011{
1012	vmci_host_device_initialized = false;
1013
1014	misc_deregister(&vmci_host_miscdev);
1015	vmci_ctx_destroy(host_context);
1016	vmci_qp_broker_exit();
1017
1018	pr_debug("VMCI host driver module unloaded\n");
1019}