Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2009 Red Hat, Inc.
   3 * Copyright (C) 2006 Rusty Russell IBM Corporation
   4 *
   5 * Author: Michael S. Tsirkin <mst@redhat.com>
   6 *
   7 * Inspiration, some code, and most witty comments come from
   8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   9 *
 
 
  10 * Generic code for virtio server in host kernel.
  11 */
  12
  13#include <linux/eventfd.h>
  14#include <linux/vhost.h>
  15#include <linux/uio.h>
  16#include <linux/mm.h>
 
  17#include <linux/miscdevice.h>
  18#include <linux/mutex.h>
  19#include <linux/poll.h>
  20#include <linux/file.h>
  21#include <linux/highmem.h>
  22#include <linux/slab.h>
  23#include <linux/vmalloc.h>
  24#include <linux/kthread.h>
 
  25#include <linux/module.h>
  26#include <linux/sort.h>
  27#include <linux/sched/mm.h>
  28#include <linux/sched/signal.h>
  29#include <linux/sched/vhost_task.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/nospec.h>
  32#include <linux/kcov.h>
  33
  34#include "vhost.h"
  35
  36static ushort max_mem_regions = 64;
  37module_param(max_mem_regions, ushort, 0444);
  38MODULE_PARM_DESC(max_mem_regions,
  39	"Maximum number of memory regions in memory map. (default: 64)");
  40static int max_iotlb_entries = 2048;
  41module_param(max_iotlb_entries, int, 0444);
  42MODULE_PARM_DESC(max_iotlb_entries,
  43	"Maximum number of iotlb entries. (default: 2048)");
  44
  45enum {
  46	VHOST_MEMORY_F_LOG = 0x1,
  47};
  48
  49#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  50#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  51
 
 
 
 
  52#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  53static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  54{
  55	vq->user_be = !virtio_legacy_is_little_endian();
  56}
  57
  58static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  59{
  60	vq->user_be = true;
  61}
  62
  63static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  64{
  65	vq->user_be = false;
  66}
  67
  68static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  69{
  70	struct vhost_vring_state s;
  71
  72	if (vq->private_data)
  73		return -EBUSY;
  74
  75	if (copy_from_user(&s, argp, sizeof(s)))
  76		return -EFAULT;
  77
  78	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  79	    s.num != VHOST_VRING_BIG_ENDIAN)
  80		return -EINVAL;
  81
  82	if (s.num == VHOST_VRING_BIG_ENDIAN)
  83		vhost_enable_cross_endian_big(vq);
  84	else
  85		vhost_enable_cross_endian_little(vq);
  86
  87	return 0;
  88}
  89
  90static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  91				   int __user *argp)
  92{
  93	struct vhost_vring_state s = {
  94		.index = idx,
  95		.num = vq->user_be
  96	};
  97
  98	if (copy_to_user(argp, &s, sizeof(s)))
  99		return -EFAULT;
 100
 101	return 0;
 102}
 103
 104static void vhost_init_is_le(struct vhost_virtqueue *vq)
 105{
 106	/* Note for legacy virtio: user_be is initialized at reset time
 107	 * according to the host endianness. If userspace does not set an
 108	 * explicit endianness, the default behavior is native endian, as
 109	 * expected by legacy virtio.
 110	 */
 111	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 112}
 113#else
 114static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 115{
 116}
 117
 118static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 119{
 120	return -ENOIOCTLCMD;
 121}
 122
 123static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 124				   int __user *argp)
 125{
 126	return -ENOIOCTLCMD;
 127}
 128
 129static void vhost_init_is_le(struct vhost_virtqueue *vq)
 130{
 131	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
 132		|| virtio_legacy_is_little_endian();
 133}
 134#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 135
 136static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 137{
 138	vhost_init_is_le(vq);
 139}
 140
 141struct vhost_flush_struct {
 142	struct vhost_work work;
 143	struct completion wait_event;
 144};
 145
 146static void vhost_flush_work(struct vhost_work *work)
 147{
 148	struct vhost_flush_struct *s;
 149
 150	s = container_of(work, struct vhost_flush_struct, work);
 151	complete(&s->wait_event);
 152}
 153
 154static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 155			    poll_table *pt)
 156{
 157	struct vhost_poll *poll;
 158
 159	poll = container_of(pt, struct vhost_poll, table);
 160	poll->wqh = wqh;
 161	add_wait_queue(wqh, &poll->wait);
 162}
 163
 164static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
 165			     void *key)
 166{
 167	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 168	struct vhost_work *work = &poll->work;
 169
 170	if (!(key_to_poll(key) & poll->mask))
 171		return 0;
 172
 173	if (!poll->dev->use_worker)
 174		work->fn(work);
 175	else
 176		vhost_poll_queue(poll);
 177
 178	return 0;
 179}
 180
 181void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 182{
 183	clear_bit(VHOST_WORK_QUEUED, &work->flags);
 184	work->fn = fn;
 
 185}
 186EXPORT_SYMBOL_GPL(vhost_work_init);
 187
 188/* Init poll structure */
 189void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 190		     __poll_t mask, struct vhost_dev *dev,
 191		     struct vhost_virtqueue *vq)
 192{
 193	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 194	init_poll_funcptr(&poll->table, vhost_poll_func);
 195	poll->mask = mask;
 196	poll->dev = dev;
 197	poll->wqh = NULL;
 198	poll->vq = vq;
 199
 200	vhost_work_init(&poll->work, fn);
 201}
 202EXPORT_SYMBOL_GPL(vhost_poll_init);
 203
 204/* Start polling a file. We add ourselves to file's wait queue. The caller must
 205 * keep a reference to a file until after vhost_poll_stop is called. */
 206int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 207{
 208	__poll_t mask;
 
 209
 210	if (poll->wqh)
 211		return 0;
 212
 213	mask = vfs_poll(file, &poll->table);
 214	if (mask)
 215		vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
 216	if (mask & EPOLLERR) {
 217		vhost_poll_stop(poll);
 218		return -EINVAL;
 
 219	}
 220
 221	return 0;
 222}
 223EXPORT_SYMBOL_GPL(vhost_poll_start);
 224
 225/* Stop polling a file. After this function returns, it becomes safe to drop the
 226 * file reference. You must also flush afterwards. */
 227void vhost_poll_stop(struct vhost_poll *poll)
 228{
 229	if (poll->wqh) {
 230		remove_wait_queue(poll->wqh, &poll->wait);
 231		poll->wqh = NULL;
 232	}
 233}
 234EXPORT_SYMBOL_GPL(vhost_poll_stop);
 235
 236static void vhost_worker_queue(struct vhost_worker *worker,
 237			       struct vhost_work *work)
 238{
 239	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 240		/* We can only add the work to the list after we're
 241		 * sure it was not in the list.
 242		 * test_and_set_bit() implies a memory barrier.
 243		 */
 244		llist_add(&work->node, &worker->work_list);
 245		vhost_task_wake(worker->vtsk);
 246	}
 247}
 248
 249bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
 250{
 251	struct vhost_worker *worker;
 252	bool queued = false;
 253
 254	rcu_read_lock();
 255	worker = rcu_dereference(vq->worker);
 256	if (worker) {
 257		queued = true;
 258		vhost_worker_queue(worker, work);
 259	}
 260	rcu_read_unlock();
 261
 262	return queued;
 263}
 264EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
 265
 266void vhost_vq_flush(struct vhost_virtqueue *vq)
 267{
 268	struct vhost_flush_struct flush;
 269
 270	init_completion(&flush.wait_event);
 271	vhost_work_init(&flush.work, vhost_flush_work);
 
 272
 273	if (vhost_vq_work_queue(vq, &flush.work))
 274		wait_for_completion(&flush.wait_event);
 
 275}
 276EXPORT_SYMBOL_GPL(vhost_vq_flush);
 277
 278/**
 279 * vhost_worker_flush - flush a worker
 280 * @worker: worker to flush
 281 *
 282 * This does not use RCU to protect the worker, so the device or worker
 283 * mutex must be held.
 284 */
 285static void vhost_worker_flush(struct vhost_worker *worker)
 286{
 287	struct vhost_flush_struct flush;
 288
 289	init_completion(&flush.wait_event);
 290	vhost_work_init(&flush.work, vhost_flush_work);
 291
 292	vhost_worker_queue(worker, &flush.work);
 293	wait_for_completion(&flush.wait_event);
 294}
 
 295
 296void vhost_dev_flush(struct vhost_dev *dev)
 297{
 298	struct vhost_worker *worker;
 299	unsigned long i;
 300
 301	xa_for_each(&dev->worker_xa, i, worker) {
 302		mutex_lock(&worker->mutex);
 303		if (!worker->attachment_cnt) {
 304			mutex_unlock(&worker->mutex);
 305			continue;
 306		}
 307		vhost_worker_flush(worker);
 308		mutex_unlock(&worker->mutex);
 309	}
 310}
 311EXPORT_SYMBOL_GPL(vhost_dev_flush);
 312
 313/* A lockless hint for busy polling code to exit the loop */
 314bool vhost_vq_has_work(struct vhost_virtqueue *vq)
 315{
 316	struct vhost_worker *worker;
 317	bool has_work = false;
 318
 319	rcu_read_lock();
 320	worker = rcu_dereference(vq->worker);
 321	if (worker && !llist_empty(&worker->work_list))
 322		has_work = true;
 323	rcu_read_unlock();
 324
 325	return has_work;
 326}
 327EXPORT_SYMBOL_GPL(vhost_vq_has_work);
 328
 329void vhost_poll_queue(struct vhost_poll *poll)
 330{
 331	vhost_vq_work_queue(poll->vq, &poll->work);
 332}
 333EXPORT_SYMBOL_GPL(vhost_poll_queue);
 334
 335static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
 336{
 337	int j;
 338
 339	for (j = 0; j < VHOST_NUM_ADDRS; j++)
 340		vq->meta_iotlb[j] = NULL;
 341}
 342
 343static void vhost_vq_meta_reset(struct vhost_dev *d)
 344{
 345	int i;
 346
 347	for (i = 0; i < d->nvqs; ++i)
 348		__vhost_vq_meta_reset(d->vqs[i]);
 349}
 350
 351static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
 352{
 353	call_ctx->ctx = NULL;
 354	memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
 355}
 356
 357bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
 358{
 359	return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
 360}
 361EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
 362
 363static void vhost_vq_reset(struct vhost_dev *dev,
 364			   struct vhost_virtqueue *vq)
 365{
 366	vq->num = 1;
 367	vq->desc = NULL;
 368	vq->avail = NULL;
 369	vq->used = NULL;
 370	vq->last_avail_idx = 0;
 
 371	vq->avail_idx = 0;
 372	vq->last_used_idx = 0;
 373	vq->signalled_used = 0;
 374	vq->signalled_used_valid = false;
 375	vq->used_flags = 0;
 376	vq->log_used = false;
 377	vq->log_addr = -1ull;
 378	vq->private_data = NULL;
 379	vq->acked_features = 0;
 380	vq->acked_backend_features = 0;
 381	vq->log_base = NULL;
 382	vq->error_ctx = NULL;
 
 383	vq->kick = NULL;
 
 
 384	vq->log_ctx = NULL;
 385	vhost_disable_cross_endian(vq);
 386	vhost_reset_is_le(vq);
 
 387	vq->busyloop_timeout = 0;
 388	vq->umem = NULL;
 389	vq->iotlb = NULL;
 390	rcu_assign_pointer(vq->worker, NULL);
 391	vhost_vring_call_reset(&vq->call_ctx);
 392	__vhost_vq_meta_reset(vq);
 393}
 394
 395static bool vhost_worker(void *data)
 396{
 397	struct vhost_worker *worker = data;
 398	struct vhost_work *work, *work_next;
 399	struct llist_node *node;
 
 400
 401	node = llist_del_all(&worker->work_list);
 402	if (node) {
 403		__set_current_state(TASK_RUNNING);
 
 
 
 
 
 
 
 
 
 
 
 
 404
 405		node = llist_reverse_order(node);
 406		/* make sure flag is seen after deletion */
 407		smp_wmb();
 408		llist_for_each_entry_safe(work, work_next, node, node) {
 409			clear_bit(VHOST_WORK_QUEUED, &work->flags);
 410			kcov_remote_start_common(worker->kcov_handle);
 411			work->fn(work);
 412			kcov_remote_stop();
 413			cond_resched();
 414		}
 415	}
 416
 417	return !!node;
 
 418}
 419
 420static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 421{
 422	kfree(vq->indirect);
 423	vq->indirect = NULL;
 424	kfree(vq->log);
 425	vq->log = NULL;
 426	kfree(vq->heads);
 427	vq->heads = NULL;
 428}
 429
 430/* Helper to allocate iovec buffers for all vqs. */
 431static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 432{
 433	struct vhost_virtqueue *vq;
 434	int i;
 435
 436	for (i = 0; i < dev->nvqs; ++i) {
 437		vq = dev->vqs[i];
 438		vq->indirect = kmalloc_array(UIO_MAXIOV,
 439					     sizeof(*vq->indirect),
 440					     GFP_KERNEL);
 441		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
 442					GFP_KERNEL);
 443		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
 444					  GFP_KERNEL);
 445		if (!vq->indirect || !vq->log || !vq->heads)
 446			goto err_nomem;
 447	}
 448	return 0;
 449
 450err_nomem:
 451	for (; i >= 0; --i)
 452		vhost_vq_free_iovecs(dev->vqs[i]);
 453	return -ENOMEM;
 454}
 455
 456static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 457{
 458	int i;
 459
 460	for (i = 0; i < dev->nvqs; ++i)
 461		vhost_vq_free_iovecs(dev->vqs[i]);
 462}
 463
 464bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
 465			  int pkts, int total_len)
 466{
 467	struct vhost_dev *dev = vq->dev;
 468
 469	if ((dev->byte_weight && total_len >= dev->byte_weight) ||
 470	    pkts >= dev->weight) {
 471		vhost_poll_queue(&vq->poll);
 472		return true;
 473	}
 474
 475	return false;
 476}
 477EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
 478
 479static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
 480				   unsigned int num)
 481{
 482	size_t event __maybe_unused =
 483	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 484
 485	return size_add(struct_size(vq->avail, ring, num), event);
 486}
 487
 488static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
 489				  unsigned int num)
 490{
 491	size_t event __maybe_unused =
 492	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 493
 494	return size_add(struct_size(vq->used, ring, num), event);
 495}
 496
 497static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
 498				  unsigned int num)
 499{
 500	return sizeof(*vq->desc) * num;
 501}
 502
 503void vhost_dev_init(struct vhost_dev *dev,
 504		    struct vhost_virtqueue **vqs, int nvqs,
 505		    int iov_limit, int weight, int byte_weight,
 506		    bool use_worker,
 507		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
 508				       struct vhost_iotlb_msg *msg))
 509{
 510	struct vhost_virtqueue *vq;
 511	int i;
 512
 513	dev->vqs = vqs;
 514	dev->nvqs = nvqs;
 515	mutex_init(&dev->mutex);
 516	dev->log_ctx = NULL;
 
 517	dev->umem = NULL;
 518	dev->iotlb = NULL;
 519	dev->mm = NULL;
 520	dev->iov_limit = iov_limit;
 521	dev->weight = weight;
 522	dev->byte_weight = byte_weight;
 523	dev->use_worker = use_worker;
 524	dev->msg_handler = msg_handler;
 525	init_waitqueue_head(&dev->wait);
 526	INIT_LIST_HEAD(&dev->read_list);
 527	INIT_LIST_HEAD(&dev->pending_list);
 528	spin_lock_init(&dev->iotlb_lock);
 529	xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC);
 530
 531	for (i = 0; i < dev->nvqs; ++i) {
 532		vq = dev->vqs[i];
 533		vq->log = NULL;
 534		vq->indirect = NULL;
 535		vq->heads = NULL;
 536		vq->dev = dev;
 537		mutex_init(&vq->mutex);
 538		vhost_vq_reset(dev, vq);
 539		if (vq->handle_kick)
 540			vhost_poll_init(&vq->poll, vq->handle_kick,
 541					EPOLLIN, dev, vq);
 542	}
 543}
 544EXPORT_SYMBOL_GPL(vhost_dev_init);
 545
 546/* Caller should have device mutex */
 547long vhost_dev_check_owner(struct vhost_dev *dev)
 548{
 549	/* Are you the owner? If not, I don't think you mean to do that */
 550	return dev->mm == current->mm ? 0 : -EPERM;
 551}
 552EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 553
 554/* Caller should have device mutex */
 555bool vhost_dev_has_owner(struct vhost_dev *dev)
 556{
 557	return dev->mm;
 558}
 559EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 560
 561static void vhost_attach_mm(struct vhost_dev *dev)
 562{
 563	/* No owner, become one */
 564	if (dev->use_worker) {
 565		dev->mm = get_task_mm(current);
 566	} else {
 567		/* vDPA device does not use worker thead, so there's
 568		 * no need to hold the address space for mm. This help
 569		 * to avoid deadlock in the case of mmap() which may
 570		 * held the refcnt of the file and depends on release
 571		 * method to remove vma.
 572		 */
 573		dev->mm = current->mm;
 574		mmgrab(dev->mm);
 575	}
 576}
 577
 578static void vhost_detach_mm(struct vhost_dev *dev)
 579{
 580	if (!dev->mm)
 581		return;
 582
 583	if (dev->use_worker)
 584		mmput(dev->mm);
 585	else
 586		mmdrop(dev->mm);
 587
 588	dev->mm = NULL;
 589}
 590
 591static void vhost_worker_destroy(struct vhost_dev *dev,
 592				 struct vhost_worker *worker)
 593{
 594	if (!worker)
 595		return;
 596
 597	WARN_ON(!llist_empty(&worker->work_list));
 598	xa_erase(&dev->worker_xa, worker->id);
 599	vhost_task_stop(worker->vtsk);
 600	kfree(worker);
 601}
 602
 603static void vhost_workers_free(struct vhost_dev *dev)
 604{
 605	struct vhost_worker *worker;
 606	unsigned long i;
 607
 608	if (!dev->use_worker)
 609		return;
 610
 611	for (i = 0; i < dev->nvqs; i++)
 612		rcu_assign_pointer(dev->vqs[i]->worker, NULL);
 613	/*
 614	 * Free the default worker we created and cleanup workers userspace
 615	 * created but couldn't clean up (it forgot or crashed).
 616	 */
 617	xa_for_each(&dev->worker_xa, i, worker)
 618		vhost_worker_destroy(dev, worker);
 619	xa_destroy(&dev->worker_xa);
 620}
 621
 622static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
 623{
 624	struct vhost_worker *worker;
 625	struct vhost_task *vtsk;
 626	char name[TASK_COMM_LEN];
 627	int ret;
 628	u32 id;
 629
 630	worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
 631	if (!worker)
 632		return NULL;
 633
 634	snprintf(name, sizeof(name), "vhost-%d", current->pid);
 635
 636	vtsk = vhost_task_create(vhost_worker, worker, name);
 637	if (!vtsk)
 638		goto free_worker;
 639
 640	mutex_init(&worker->mutex);
 641	init_llist_head(&worker->work_list);
 642	worker->kcov_handle = kcov_common_handle();
 643	worker->vtsk = vtsk;
 644
 645	vhost_task_start(vtsk);
 646
 647	ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
 648	if (ret < 0)
 649		goto stop_worker;
 650	worker->id = id;
 651
 652	return worker;
 653
 654stop_worker:
 655	vhost_task_stop(vtsk);
 656free_worker:
 657	kfree(worker);
 658	return NULL;
 659}
 660
 661/* Caller must have device mutex */
 662static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
 663				     struct vhost_worker *worker)
 664{
 665	struct vhost_worker *old_worker;
 666
 667	old_worker = rcu_dereference_check(vq->worker,
 668					   lockdep_is_held(&vq->dev->mutex));
 669
 670	mutex_lock(&worker->mutex);
 671	worker->attachment_cnt++;
 672	mutex_unlock(&worker->mutex);
 673	rcu_assign_pointer(vq->worker, worker);
 674
 675	if (!old_worker)
 676		return;
 677	/*
 678	 * Take the worker mutex to make sure we see the work queued from
 679	 * device wide flushes which doesn't use RCU for execution.
 680	 */
 681	mutex_lock(&old_worker->mutex);
 682	old_worker->attachment_cnt--;
 683	/*
 684	 * We don't want to call synchronize_rcu for every vq during setup
 685	 * because it will slow down VM startup. If we haven't done
 686	 * VHOST_SET_VRING_KICK and not done the driver specific
 687	 * SET_ENDPOINT/RUNNUNG then we can skip the sync since there will
 688	 * not be any works queued for scsi and net.
 689	 */
 690	mutex_lock(&vq->mutex);
 691	if (!vhost_vq_get_backend(vq) && !vq->kick) {
 692		mutex_unlock(&vq->mutex);
 693		mutex_unlock(&old_worker->mutex);
 694		/*
 695		 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
 696		 * Warn if it adds support for multiple workers but forgets to
 697		 * handle the early queueing case.
 698		 */
 699		WARN_ON(!old_worker->attachment_cnt &&
 700			!llist_empty(&old_worker->work_list));
 701		return;
 702	}
 703	mutex_unlock(&vq->mutex);
 704
 705	/* Make sure new vq queue/flush/poll calls see the new worker */
 706	synchronize_rcu();
 707	/* Make sure whatever was queued gets run */
 708	vhost_worker_flush(old_worker);
 709	mutex_unlock(&old_worker->mutex);
 710}
 711
 712 /* Caller must have device mutex */
 713static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
 714				  struct vhost_vring_worker *info)
 715{
 716	unsigned long index = info->worker_id;
 717	struct vhost_dev *dev = vq->dev;
 718	struct vhost_worker *worker;
 719
 720	if (!dev->use_worker)
 721		return -EINVAL;
 722
 723	worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
 724	if (!worker || worker->id != info->worker_id)
 725		return -ENODEV;
 726
 727	__vhost_vq_attach_worker(vq, worker);
 728	return 0;
 729}
 730
 731/* Caller must have device mutex */
 732static int vhost_new_worker(struct vhost_dev *dev,
 733			    struct vhost_worker_state *info)
 734{
 735	struct vhost_worker *worker;
 736
 737	worker = vhost_worker_create(dev);
 738	if (!worker)
 739		return -ENOMEM;
 740
 741	info->worker_id = worker->id;
 742	return 0;
 743}
 744
 745/* Caller must have device mutex */
 746static int vhost_free_worker(struct vhost_dev *dev,
 747			     struct vhost_worker_state *info)
 748{
 749	unsigned long index = info->worker_id;
 750	struct vhost_worker *worker;
 751
 752	worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
 753	if (!worker || worker->id != info->worker_id)
 754		return -ENODEV;
 755
 756	mutex_lock(&worker->mutex);
 757	if (worker->attachment_cnt) {
 758		mutex_unlock(&worker->mutex);
 759		return -EBUSY;
 760	}
 761	mutex_unlock(&worker->mutex);
 762
 763	vhost_worker_destroy(dev, worker);
 764	return 0;
 765}
 766
 767static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
 768				  struct vhost_virtqueue **vq, u32 *id)
 769{
 770	u32 __user *idxp = argp;
 771	u32 idx;
 772	long r;
 773
 774	r = get_user(idx, idxp);
 775	if (r < 0)
 776		return r;
 777
 778	if (idx >= dev->nvqs)
 779		return -ENOBUFS;
 780
 781	idx = array_index_nospec(idx, dev->nvqs);
 782
 783	*vq = dev->vqs[idx];
 784	*id = idx;
 785	return 0;
 786}
 787
 788/* Caller must have device mutex */
 789long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
 790			void __user *argp)
 791{
 792	struct vhost_vring_worker ring_worker;
 793	struct vhost_worker_state state;
 794	struct vhost_worker *worker;
 795	struct vhost_virtqueue *vq;
 796	long ret;
 797	u32 idx;
 798
 799	if (!dev->use_worker)
 800		return -EINVAL;
 801
 802	if (!vhost_dev_has_owner(dev))
 803		return -EINVAL;
 804
 805	ret = vhost_dev_check_owner(dev);
 806	if (ret)
 807		return ret;
 808
 809	switch (ioctl) {
 810	/* dev worker ioctls */
 811	case VHOST_NEW_WORKER:
 812		ret = vhost_new_worker(dev, &state);
 813		if (!ret && copy_to_user(argp, &state, sizeof(state)))
 814			ret = -EFAULT;
 815		return ret;
 816	case VHOST_FREE_WORKER:
 817		if (copy_from_user(&state, argp, sizeof(state)))
 818			return -EFAULT;
 819		return vhost_free_worker(dev, &state);
 820	/* vring worker ioctls */
 821	case VHOST_ATTACH_VRING_WORKER:
 822	case VHOST_GET_VRING_WORKER:
 823		break;
 824	default:
 825		return -ENOIOCTLCMD;
 826	}
 827
 828	ret = vhost_get_vq_from_user(dev, argp, &vq, &idx);
 829	if (ret)
 830		return ret;
 831
 832	switch (ioctl) {
 833	case VHOST_ATTACH_VRING_WORKER:
 834		if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) {
 835			ret = -EFAULT;
 836			break;
 837		}
 838
 839		ret = vhost_vq_attach_worker(vq, &ring_worker);
 840		break;
 841	case VHOST_GET_VRING_WORKER:
 842		worker = rcu_dereference_check(vq->worker,
 843					       lockdep_is_held(&dev->mutex));
 844		if (!worker) {
 845			ret = -EINVAL;
 846			break;
 847		}
 848
 849		ring_worker.index = idx;
 850		ring_worker.worker_id = worker->id;
 851
 852		if (copy_to_user(argp, &ring_worker, sizeof(ring_worker)))
 853			ret = -EFAULT;
 854		break;
 855	default:
 856		ret = -ENOIOCTLCMD;
 857		break;
 858	}
 859
 860	return ret;
 861}
 862EXPORT_SYMBOL_GPL(vhost_worker_ioctl);
 863
 864/* Caller should have device mutex */
 865long vhost_dev_set_owner(struct vhost_dev *dev)
 866{
 867	struct vhost_worker *worker;
 868	int err, i;
 869
 870	/* Is there an owner already? */
 871	if (vhost_dev_has_owner(dev)) {
 872		err = -EBUSY;
 873		goto err_mm;
 874	}
 875
 876	vhost_attach_mm(dev);
 
 
 
 
 
 
 877
 878	err = vhost_dev_alloc_iovecs(dev);
 879	if (err)
 880		goto err_iovecs;
 881
 882	if (dev->use_worker) {
 883		/*
 884		 * This should be done last, because vsock can queue work
 885		 * before VHOST_SET_OWNER so it simplifies the failure path
 886		 * below since we don't have to worry about vsock queueing
 887		 * while we free the worker.
 888		 */
 889		worker = vhost_worker_create(dev);
 890		if (!worker) {
 891			err = -ENOMEM;
 892			goto err_worker;
 893		}
 894
 895		for (i = 0; i < dev->nvqs; i++)
 896			__vhost_vq_attach_worker(dev->vqs[i], worker);
 897	}
 898
 899	return 0;
 900
 
 
 901err_worker:
 902	vhost_dev_free_iovecs(dev);
 903err_iovecs:
 904	vhost_detach_mm(dev);
 905err_mm:
 906	return err;
 907}
 908EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 909
 910static struct vhost_iotlb *iotlb_alloc(void)
 911{
 912	return vhost_iotlb_alloc(max_iotlb_entries,
 913				 VHOST_IOTLB_FLAG_RETIRE);
 
 
 
 914}
 915
 916struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
 917{
 918	return iotlb_alloc();
 919}
 920EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 921
 922/* Caller should have device mutex */
 923void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
 924{
 925	int i;
 926
 927	vhost_dev_cleanup(dev);
 928
 
 
 929	dev->umem = umem;
 930	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
 931	 * VQs aren't running.
 932	 */
 933	for (i = 0; i < dev->nvqs; ++i)
 934		dev->vqs[i]->umem = umem;
 935}
 936EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 937
 938void vhost_dev_stop(struct vhost_dev *dev)
 939{
 940	int i;
 941
 942	for (i = 0; i < dev->nvqs; ++i) {
 943		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
 944			vhost_poll_stop(&dev->vqs[i]->poll);
 
 
 945	}
 946
 947	vhost_dev_flush(dev);
 948}
 949EXPORT_SYMBOL_GPL(vhost_dev_stop);
 950
 951void vhost_clear_msg(struct vhost_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952{
 953	struct vhost_msg_node *node, *n;
 954
 955	spin_lock(&dev->iotlb_lock);
 956
 957	list_for_each_entry_safe(node, n, &dev->read_list, node) {
 958		list_del(&node->node);
 959		kfree(node);
 960	}
 961
 962	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
 963		list_del(&node->node);
 964		kfree(node);
 965	}
 966
 967	spin_unlock(&dev->iotlb_lock);
 968}
 969EXPORT_SYMBOL_GPL(vhost_clear_msg);
 970
 971void vhost_dev_cleanup(struct vhost_dev *dev)
 
 972{
 973	int i;
 974
 975	for (i = 0; i < dev->nvqs; ++i) {
 976		if (dev->vqs[i]->error_ctx)
 977			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 
 
 978		if (dev->vqs[i]->kick)
 979			fput(dev->vqs[i]->kick);
 980		if (dev->vqs[i]->call_ctx.ctx)
 981			eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
 
 
 982		vhost_vq_reset(dev, dev->vqs[i]);
 983	}
 984	vhost_dev_free_iovecs(dev);
 985	if (dev->log_ctx)
 986		eventfd_ctx_put(dev->log_ctx);
 987	dev->log_ctx = NULL;
 
 
 
 988	/* No one will access memory at this point */
 989	vhost_iotlb_free(dev->umem);
 990	dev->umem = NULL;
 991	vhost_iotlb_free(dev->iotlb);
 992	dev->iotlb = NULL;
 993	vhost_clear_msg(dev);
 994	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
 995	vhost_workers_free(dev);
 996	vhost_detach_mm(dev);
 
 
 
 
 
 
 997}
 998EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 999
1000static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
1001{
1002	u64 a = addr / VHOST_PAGE_SIZE / 8;
1003
1004	/* Make sure 64 bit math will not overflow. */
1005	if (a > ULONG_MAX - (unsigned long)log_base ||
1006	    a + (unsigned long)log_base > ULONG_MAX)
1007		return false;
1008
1009	return access_ok(log_base + a,
1010			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
1011}
1012
1013/* Make sure 64 bit math will not overflow. */
1014static bool vhost_overflow(u64 uaddr, u64 size)
1015{
1016	if (uaddr > ULONG_MAX || size > ULONG_MAX)
1017		return true;
1018
1019	if (!size)
1020		return false;
1021
1022	return uaddr > ULONG_MAX - size + 1;
1023}
1024
1025/* Caller should have vq mutex and device mutex. */
1026static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
1027				int log_all)
1028{
1029	struct vhost_iotlb_map *map;
1030
1031	if (!umem)
1032		return false;
1033
1034	list_for_each_entry(map, &umem->list, link) {
1035		unsigned long a = map->addr;
1036
1037		if (vhost_overflow(map->addr, map->size))
1038			return false;
1039
1040
1041		if (!access_ok((void __user *)a, map->size))
1042			return false;
 
1043		else if (log_all && !log_access_ok(log_base,
1044						   map->start,
1045						   map->size))
1046			return false;
1047	}
1048	return true;
1049}
1050
1051static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
1052					       u64 addr, unsigned int size,
1053					       int type)
1054{
1055	const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
1056
1057	if (!map)
1058		return NULL;
1059
1060	return (void __user *)(uintptr_t)(map->addr + addr - map->start);
1061}
1062
1063/* Can we switch to this memory table? */
1064/* Caller should have device mutex but not vq mutex */
1065static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
1066			     int log_all)
1067{
1068	int i;
1069
1070	for (i = 0; i < d->nvqs; ++i) {
1071		bool ok;
1072		bool log;
1073
1074		mutex_lock(&d->vqs[i]->mutex);
1075		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
1076		/* If ring is inactive, will check when it's enabled. */
1077		if (d->vqs[i]->private_data)
1078			ok = vq_memory_access_ok(d->vqs[i]->log_base,
1079						 umem, log);
1080		else
1081			ok = true;
1082		mutex_unlock(&d->vqs[i]->mutex);
1083		if (!ok)
1084			return false;
1085	}
1086	return true;
1087}
1088
1089static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1090			  struct iovec iov[], int iov_size, int access);
1091
1092static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
1093			      const void *from, unsigned size)
1094{
1095	int ret;
1096
1097	if (!vq->iotlb)
1098		return __copy_to_user(to, from, size);
1099	else {
1100		/* This function should be called after iotlb
1101		 * prefetch, which means we're sure that all vq
1102		 * could be access through iotlb. So -EAGAIN should
1103		 * not happen in this case.
1104		 */
 
1105		struct iov_iter t;
1106		void __user *uaddr = vhost_vq_meta_fetch(vq,
1107				     (u64)(uintptr_t)to, size,
1108				     VHOST_ADDR_USED);
1109
1110		if (uaddr)
1111			return __copy_to_user(uaddr, from, size);
1112
1113		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
1114				     ARRAY_SIZE(vq->iotlb_iov),
1115				     VHOST_ACCESS_WO);
1116		if (ret < 0)
1117			goto out;
1118		iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size);
1119		ret = copy_to_iter(from, size, &t);
1120		if (ret == size)
1121			ret = 0;
1122	}
1123out:
1124	return ret;
1125}
1126
1127static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
1128				void __user *from, unsigned size)
1129{
1130	int ret;
1131
1132	if (!vq->iotlb)
1133		return __copy_from_user(to, from, size);
1134	else {
1135		/* This function should be called after iotlb
1136		 * prefetch, which means we're sure that vq
1137		 * could be access through iotlb. So -EAGAIN should
1138		 * not happen in this case.
1139		 */
1140		void __user *uaddr = vhost_vq_meta_fetch(vq,
1141				     (u64)(uintptr_t)from, size,
1142				     VHOST_ADDR_DESC);
1143		struct iov_iter f;
1144
1145		if (uaddr)
1146			return __copy_from_user(to, uaddr, size);
1147
1148		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
1149				     ARRAY_SIZE(vq->iotlb_iov),
1150				     VHOST_ACCESS_RO);
1151		if (ret < 0) {
1152			vq_err(vq, "IOTLB translation failure: uaddr "
1153			       "%p size 0x%llx\n", from,
1154			       (unsigned long long) size);
1155			goto out;
1156		}
1157		iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size);
1158		ret = copy_from_iter(to, size, &f);
1159		if (ret == size)
1160			ret = 0;
1161	}
1162
1163out:
1164	return ret;
1165}
1166
1167static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
1168					  void __user *addr, unsigned int size,
1169					  int type)
1170{
1171	int ret;
1172
 
 
 
 
 
 
1173	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
1174			     ARRAY_SIZE(vq->iotlb_iov),
1175			     VHOST_ACCESS_RO);
1176	if (ret < 0) {
1177		vq_err(vq, "IOTLB translation failure: uaddr "
1178			"%p size 0x%llx\n", addr,
1179			(unsigned long long) size);
1180		return NULL;
1181	}
1182
1183	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
1184		vq_err(vq, "Non atomic userspace memory access: uaddr "
1185			"%p size 0x%llx\n", addr,
1186			(unsigned long long) size);
1187		return NULL;
1188	}
1189
1190	return vq->iotlb_iov[0].iov_base;
1191}
1192
1193/* This function should be called after iotlb
1194 * prefetch, which means we're sure that vq
1195 * could be access through iotlb. So -EAGAIN should
1196 * not happen in this case.
1197 */
1198static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
1199					    void __user *addr, unsigned int size,
1200					    int type)
1201{
1202	void __user *uaddr = vhost_vq_meta_fetch(vq,
1203			     (u64)(uintptr_t)addr, size, type);
1204	if (uaddr)
1205		return uaddr;
1206
1207	return __vhost_get_user_slow(vq, addr, size, type);
1208}
1209
1210#define vhost_put_user(vq, x, ptr)		\
1211({ \
1212	int ret; \
1213	if (!vq->iotlb) { \
1214		ret = __put_user(x, ptr); \
1215	} else { \
1216		__typeof__(ptr) to = \
1217			(__typeof__(ptr)) __vhost_get_user(vq, ptr,	\
1218					  sizeof(*ptr), VHOST_ADDR_USED); \
1219		if (to != NULL) \
1220			ret = __put_user(x, to); \
1221		else \
1222			ret = -EFAULT;	\
1223	} \
1224	ret; \
1225})
1226
1227static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
1228{
1229	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1230			      vhost_avail_event(vq));
1231}
1232
1233static inline int vhost_put_used(struct vhost_virtqueue *vq,
1234				 struct vring_used_elem *head, int idx,
1235				 int count)
1236{
1237	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
1238				  count * sizeof(*head));
1239}
1240
1241static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1242
1243{
1244	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1245			      &vq->used->flags);
1246}
1247
1248static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
1249
1250{
1251	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
1252			      &vq->used->idx);
1253}
1254
1255#define vhost_get_user(vq, x, ptr, type)		\
1256({ \
1257	int ret; \
1258	if (!vq->iotlb) { \
1259		ret = __get_user(x, ptr); \
1260	} else { \
1261		__typeof__(ptr) from = \
1262			(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1263							   sizeof(*ptr), \
1264							   type); \
1265		if (from != NULL) \
1266			ret = __get_user(x, from); \
1267		else \
1268			ret = -EFAULT; \
1269	} \
1270	ret; \
1271})
1272
1273#define vhost_get_avail(vq, x, ptr) \
1274	vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1275
1276#define vhost_get_used(vq, x, ptr) \
1277	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1278
1279static void vhost_dev_lock_vqs(struct vhost_dev *d)
1280{
1281	int i = 0;
1282	for (i = 0; i < d->nvqs; ++i)
1283		mutex_lock_nested(&d->vqs[i]->mutex, i);
1284}
1285
1286static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1287{
1288	int i = 0;
1289	for (i = 0; i < d->nvqs; ++i)
1290		mutex_unlock(&d->vqs[i]->mutex);
1291}
1292
1293static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1294				      __virtio16 *idx)
 
1295{
1296	return vhost_get_avail(vq, *idx, &vq->avail->idx);
1297}
1298
1299static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1300				       __virtio16 *head, int idx)
1301{
1302	return vhost_get_avail(vq, *head,
1303			       &vq->avail->ring[idx & (vq->num - 1)]);
1304}
1305
1306static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1307					__virtio16 *flags)
1308{
1309	return vhost_get_avail(vq, *flags, &vq->avail->flags);
1310}
1311
1312static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1313				       __virtio16 *event)
1314{
1315	return vhost_get_avail(vq, *event, vhost_used_event(vq));
1316}
 
 
 
 
1317
1318static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1319				     __virtio16 *idx)
1320{
1321	return vhost_get_used(vq, *idx, &vq->used->idx);
1322}
1323
1324static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1325				 struct vring_desc *desc, int idx)
1326{
1327	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
 
 
 
 
1328}
1329
1330static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1331				  struct vhost_iotlb_msg *msg)
1332{
1333	struct vhost_msg_node *node, *n;
1334
1335	spin_lock(&d->iotlb_lock);
1336
1337	list_for_each_entry_safe(node, n, &d->pending_list, node) {
1338		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1339		if (msg->iova <= vq_msg->iova &&
1340		    msg->iova + msg->size - 1 >= vq_msg->iova &&
1341		    vq_msg->type == VHOST_IOTLB_MISS) {
1342			vhost_poll_queue(&node->vq->poll);
1343			list_del(&node->node);
1344			kfree(node);
1345		}
1346	}
1347
1348	spin_unlock(&d->iotlb_lock);
1349}
1350
1351static bool umem_access_ok(u64 uaddr, u64 size, int access)
1352{
1353	unsigned long a = uaddr;
1354
1355	/* Make sure 64 bit math will not overflow. */
1356	if (vhost_overflow(uaddr, size))
1357		return false;
1358
1359	if ((access & VHOST_ACCESS_RO) &&
1360	    !access_ok((void __user *)a, size))
1361		return false;
1362	if ((access & VHOST_ACCESS_WO) &&
1363	    !access_ok((void __user *)a, size))
1364		return false;
1365	return true;
1366}
1367
1368static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1369				   struct vhost_iotlb_msg *msg)
1370{
1371	int ret = 0;
1372
1373	if (asid != 0)
1374		return -EINVAL;
1375
1376	mutex_lock(&dev->mutex);
1377	vhost_dev_lock_vqs(dev);
1378	switch (msg->type) {
1379	case VHOST_IOTLB_UPDATE:
1380		if (!dev->iotlb) {
1381			ret = -EFAULT;
1382			break;
1383		}
1384		if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1385			ret = -EFAULT;
1386			break;
1387		}
1388		vhost_vq_meta_reset(dev);
1389		if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1390					  msg->iova + msg->size - 1,
1391					  msg->uaddr, msg->perm)) {
1392			ret = -ENOMEM;
1393			break;
1394		}
1395		vhost_iotlb_notify_vq(dev, msg);
1396		break;
1397	case VHOST_IOTLB_INVALIDATE:
1398		if (!dev->iotlb) {
1399			ret = -EFAULT;
1400			break;
1401		}
1402		vhost_vq_meta_reset(dev);
1403		vhost_iotlb_del_range(dev->iotlb, msg->iova,
1404				      msg->iova + msg->size - 1);
1405		break;
1406	default:
1407		ret = -EINVAL;
1408		break;
1409	}
1410
1411	vhost_dev_unlock_vqs(dev);
1412	mutex_unlock(&dev->mutex);
1413
1414	return ret;
1415}
1416ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1417			     struct iov_iter *from)
1418{
1419	struct vhost_iotlb_msg msg;
1420	size_t offset;
1421	int type, ret;
1422	u32 asid = 0;
1423
1424	ret = copy_from_iter(&type, sizeof(type), from);
1425	if (ret != sizeof(type)) {
1426		ret = -EINVAL;
 
1427		goto done;
1428	}
1429
1430	switch (type) {
1431	case VHOST_IOTLB_MSG:
1432		/* There maybe a hole after type for V1 message type,
1433		 * so skip it here.
1434		 */
1435		offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1436		break;
1437	case VHOST_IOTLB_MSG_V2:
1438		if (vhost_backend_has_feature(dev->vqs[0],
1439					      VHOST_BACKEND_F_IOTLB_ASID)) {
1440			ret = copy_from_iter(&asid, sizeof(asid), from);
1441			if (ret != sizeof(asid)) {
1442				ret = -EINVAL;
1443				goto done;
1444			}
1445			offset = 0;
1446		} else
1447			offset = sizeof(__u32);
1448		break;
1449	default:
1450		ret = -EINVAL;
1451		goto done;
1452	}
1453
1454	iov_iter_advance(from, offset);
1455	ret = copy_from_iter(&msg, sizeof(msg), from);
1456	if (ret != sizeof(msg)) {
1457		ret = -EINVAL;
1458		goto done;
1459	}
1460
1461	if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
1462		ret = -EINVAL;
1463		goto done;
1464	}
1465
1466	if (dev->msg_handler)
1467		ret = dev->msg_handler(dev, asid, &msg);
1468	else
1469		ret = vhost_process_iotlb_msg(dev, asid, &msg);
1470	if (ret) {
1471		ret = -EFAULT;
1472		goto done;
1473	}
1474
1475	ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1476	      sizeof(struct vhost_msg_v2);
1477done:
1478	return ret;
1479}
1480EXPORT_SYMBOL(vhost_chr_write_iter);
1481
1482__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1483			    poll_table *wait)
1484{
1485	__poll_t mask = 0;
1486
1487	poll_wait(file, &dev->wait, wait);
1488
1489	if (!list_empty(&dev->read_list))
1490		mask |= EPOLLIN | EPOLLRDNORM;
1491
1492	return mask;
1493}
1494EXPORT_SYMBOL(vhost_chr_poll);
1495
1496ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1497			    int noblock)
1498{
1499	DEFINE_WAIT(wait);
1500	struct vhost_msg_node *node;
1501	ssize_t ret = 0;
1502	unsigned size = sizeof(struct vhost_msg);
1503
1504	if (iov_iter_count(to) < size)
1505		return 0;
1506
1507	while (1) {
1508		if (!noblock)
1509			prepare_to_wait(&dev->wait, &wait,
1510					TASK_INTERRUPTIBLE);
1511
1512		node = vhost_dequeue_msg(dev, &dev->read_list);
1513		if (node)
1514			break;
1515		if (noblock) {
1516			ret = -EAGAIN;
1517			break;
1518		}
1519		if (signal_pending(current)) {
1520			ret = -ERESTARTSYS;
1521			break;
1522		}
1523		if (!dev->iotlb) {
1524			ret = -EBADFD;
1525			break;
1526		}
1527
1528		schedule();
1529	}
1530
1531	if (!noblock)
1532		finish_wait(&dev->wait, &wait);
1533
1534	if (node) {
1535		struct vhost_iotlb_msg *msg;
1536		void *start = &node->msg;
1537
1538		switch (node->msg.type) {
1539		case VHOST_IOTLB_MSG:
1540			size = sizeof(node->msg);
1541			msg = &node->msg.iotlb;
1542			break;
1543		case VHOST_IOTLB_MSG_V2:
1544			size = sizeof(node->msg_v2);
1545			msg = &node->msg_v2.iotlb;
1546			break;
1547		default:
1548			BUG();
1549			break;
1550		}
1551
1552		ret = copy_to_iter(start, size, to);
1553		if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1554			kfree(node);
1555			return ret;
1556		}
 
1557		vhost_enqueue_msg(dev, &dev->pending_list, node);
1558	}
1559
1560	return ret;
1561}
1562EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1563
1564static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1565{
1566	struct vhost_dev *dev = vq->dev;
1567	struct vhost_msg_node *node;
1568	struct vhost_iotlb_msg *msg;
1569	bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1570
1571	node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1572	if (!node)
1573		return -ENOMEM;
1574
1575	if (v2) {
1576		node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1577		msg = &node->msg_v2.iotlb;
1578	} else {
1579		msg = &node->msg.iotlb;
1580	}
1581
1582	msg->type = VHOST_IOTLB_MISS;
1583	msg->iova = iova;
1584	msg->perm = access;
1585
1586	vhost_enqueue_msg(dev, &dev->read_list, node);
1587
1588	return 0;
1589}
1590
1591static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1592			 vring_desc_t __user *desc,
1593			 vring_avail_t __user *avail,
1594			 vring_used_t __user *used)
1595
1596{
1597	/* If an IOTLB device is present, the vring addresses are
1598	 * GIOVAs. Access validation occurs at prefetch time. */
1599	if (vq->iotlb)
1600		return true;
1601
1602	return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1603	       access_ok(avail, vhost_get_avail_size(vq, num)) &&
1604	       access_ok(used, vhost_get_used_size(vq, num));
 
 
1605}
1606
1607static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1608				 const struct vhost_iotlb_map *map,
1609				 int type)
1610{
1611	int access = (type == VHOST_ADDR_USED) ?
1612		     VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1613
1614	if (likely(map->perm & access))
1615		vq->meta_iotlb[type] = map;
1616}
1617
1618static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1619			    int access, u64 addr, u64 len, int type)
1620{
1621	const struct vhost_iotlb_map *map;
1622	struct vhost_iotlb *umem = vq->iotlb;
1623	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1624
1625	if (vhost_vq_meta_fetch(vq, addr, len, type))
1626		return true;
1627
1628	while (len > s) {
1629		map = vhost_iotlb_itree_first(umem, addr, last);
1630		if (map == NULL || map->start > addr) {
 
 
1631			vhost_iotlb_miss(vq, addr, access);
1632			return false;
1633		} else if (!(map->perm & access)) {
1634			/* Report the possible access violation by
1635			 * request another translation from userspace.
1636			 */
1637			return false;
1638		}
1639
1640		size = map->size - addr + map->start;
1641
1642		if (orig_addr == addr && size >= len)
1643			vhost_vq_meta_update(vq, map, type);
1644
1645		s += size;
1646		addr += size;
1647	}
1648
1649	return true;
1650}
1651
1652int vq_meta_prefetch(struct vhost_virtqueue *vq)
1653{
 
1654	unsigned int num = vq->num;
1655
1656	if (!vq->iotlb)
1657		return 1;
1658
1659	return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1660			       vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1661	       iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1662			       vhost_get_avail_size(vq, num),
1663			       VHOST_ADDR_AVAIL) &&
1664	       iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1665			       vhost_get_used_size(vq, num), VHOST_ADDR_USED);
 
1666}
1667EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1668
1669/* Can we log writes? */
1670/* Caller should have device mutex but not vq mutex */
1671bool vhost_log_access_ok(struct vhost_dev *dev)
1672{
1673	return memory_access_ok(dev, dev->umem, 1);
1674}
1675EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1676
1677static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1678				  void __user *log_base,
1679				  bool log_used,
1680				  u64 log_addr)
1681{
1682	/* If an IOTLB device is present, log_addr is a GIOVA that
1683	 * will never be logged by log_used(). */
1684	if (vq->iotlb)
1685		return true;
1686
1687	return !log_used || log_access_ok(log_base, log_addr,
1688					  vhost_get_used_size(vq, vq->num));
1689}
1690
1691/* Verify access for write logging. */
1692/* Caller should have vq mutex and device mutex */
1693static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1694			     void __user *log_base)
1695{
 
 
1696	return vq_memory_access_ok(log_base, vq->umem,
1697				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1698		vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
 
 
1699}
1700
1701/* Can we start vq? */
1702/* Caller should have vq mutex and device mutex */
1703bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1704{
1705	if (!vq_log_access_ok(vq, vq->log_base))
1706		return false;
1707
1708	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
 
 
 
 
1709}
1710EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1712static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1713{
1714	struct vhost_memory mem, *newmem;
1715	struct vhost_memory_region *region;
1716	struct vhost_iotlb *newumem, *oldumem;
1717	unsigned long size = offsetof(struct vhost_memory, regions);
1718	int i;
1719
1720	if (copy_from_user(&mem, m, size))
1721		return -EFAULT;
1722	if (mem.padding)
1723		return -EOPNOTSUPP;
1724	if (mem.nregions > max_mem_regions)
1725		return -E2BIG;
1726	newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1727			GFP_KERNEL);
1728	if (!newmem)
1729		return -ENOMEM;
1730
1731	memcpy(newmem, &mem, size);
1732	if (copy_from_user(newmem->regions, m->regions,
1733			   flex_array_size(newmem, regions, mem.nregions))) {
1734		kvfree(newmem);
1735		return -EFAULT;
1736	}
1737
1738	newumem = iotlb_alloc();
1739	if (!newumem) {
1740		kvfree(newmem);
1741		return -ENOMEM;
1742	}
1743
1744	for (region = newmem->regions;
1745	     region < newmem->regions + mem.nregions;
1746	     region++) {
1747		if (vhost_iotlb_add_range(newumem,
1748					  region->guest_phys_addr,
1749					  region->guest_phys_addr +
1750					  region->memory_size - 1,
1751					  region->userspace_addr,
1752					  VHOST_MAP_RW))
 
1753			goto err;
1754	}
1755
1756	if (!memory_access_ok(d, newumem, 0))
1757		goto err;
1758
1759	oldumem = d->umem;
1760	d->umem = newumem;
1761
1762	/* All memory accesses are done under some VQ mutex. */
1763	for (i = 0; i < d->nvqs; ++i) {
1764		mutex_lock(&d->vqs[i]->mutex);
1765		d->vqs[i]->umem = newumem;
1766		mutex_unlock(&d->vqs[i]->mutex);
1767	}
1768
1769	kvfree(newmem);
1770	vhost_iotlb_free(oldumem);
1771	return 0;
1772
1773err:
1774	vhost_iotlb_free(newumem);
1775	kvfree(newmem);
1776	return -EFAULT;
1777}
1778
1779static long vhost_vring_set_num(struct vhost_dev *d,
1780				struct vhost_virtqueue *vq,
1781				void __user *argp)
1782{
1783	struct vhost_vring_state s;
1784
1785	/* Resizing ring with an active backend?
1786	 * You don't want to do that. */
1787	if (vq->private_data)
1788		return -EBUSY;
1789
1790	if (copy_from_user(&s, argp, sizeof s))
1791		return -EFAULT;
1792
1793	if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1794		return -EINVAL;
1795	vq->num = s.num;
1796
1797	return 0;
1798}
1799
1800static long vhost_vring_set_addr(struct vhost_dev *d,
1801				 struct vhost_virtqueue *vq,
1802				 void __user *argp)
1803{
1804	struct vhost_vring_addr a;
1805
1806	if (copy_from_user(&a, argp, sizeof a))
1807		return -EFAULT;
1808	if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1809		return -EOPNOTSUPP;
1810
1811	/* For 32bit, verify that the top 32bits of the user
1812	   data are set to zero. */
1813	if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1814	    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1815	    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1816		return -EFAULT;
1817
1818	/* Make sure it's safe to cast pointers to vring types. */
1819	BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1820	BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1821	if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1822	    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1823	    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1824		return -EINVAL;
1825
1826	/* We only verify access here if backend is configured.
1827	 * If it is not, we don't as size might not have been setup.
1828	 * We will verify when backend is configured. */
1829	if (vq->private_data) {
1830		if (!vq_access_ok(vq, vq->num,
1831			(void __user *)(unsigned long)a.desc_user_addr,
1832			(void __user *)(unsigned long)a.avail_user_addr,
1833			(void __user *)(unsigned long)a.used_user_addr))
1834			return -EINVAL;
1835
1836		/* Also validate log access for used ring if enabled. */
1837		if (!vq_log_used_access_ok(vq, vq->log_base,
1838				a.flags & (0x1 << VHOST_VRING_F_LOG),
1839				a.log_guest_addr))
1840			return -EINVAL;
1841	}
1842
1843	vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1844	vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1845	vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1846	vq->log_addr = a.log_guest_addr;
1847	vq->used = (void __user *)(unsigned long)a.used_user_addr;
1848
1849	return 0;
1850}
1851
1852static long vhost_vring_set_num_addr(struct vhost_dev *d,
1853				     struct vhost_virtqueue *vq,
1854				     unsigned int ioctl,
1855				     void __user *argp)
1856{
1857	long r;
1858
1859	mutex_lock(&vq->mutex);
1860
1861	switch (ioctl) {
1862	case VHOST_SET_VRING_NUM:
1863		r = vhost_vring_set_num(d, vq, argp);
1864		break;
1865	case VHOST_SET_VRING_ADDR:
1866		r = vhost_vring_set_addr(d, vq, argp);
1867		break;
1868	default:
1869		BUG();
1870	}
1871
1872	mutex_unlock(&vq->mutex);
1873
1874	return r;
1875}
1876long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1877{
1878	struct file *eventfp, *filep = NULL;
1879	bool pollstart = false, pollstop = false;
1880	struct eventfd_ctx *ctx = NULL;
 
1881	struct vhost_virtqueue *vq;
1882	struct vhost_vring_state s;
1883	struct vhost_vring_file f;
 
1884	u32 idx;
1885	long r;
1886
1887	r = vhost_get_vq_from_user(d, argp, &vq, &idx);
1888	if (r < 0)
1889		return r;
 
 
1890
1891	if (ioctl == VHOST_SET_VRING_NUM ||
1892	    ioctl == VHOST_SET_VRING_ADDR) {
1893		return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1894	}
1895
1896	mutex_lock(&vq->mutex);
1897
1898	switch (ioctl) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1899	case VHOST_SET_VRING_BASE:
1900		/* Moving base with an active backend?
1901		 * You don't want to do that. */
1902		if (vq->private_data) {
1903			r = -EBUSY;
1904			break;
1905		}
1906		if (copy_from_user(&s, argp, sizeof s)) {
1907			r = -EFAULT;
1908			break;
1909		}
1910		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
1911			vq->last_avail_idx = s.num & 0xffff;
1912			vq->last_used_idx = (s.num >> 16) & 0xffff;
1913		} else {
1914			if (s.num > 0xffff) {
1915				r = -EINVAL;
1916				break;
1917			}
1918			vq->last_avail_idx = s.num;
1919		}
 
1920		/* Forget the cached index value. */
1921		vq->avail_idx = vq->last_avail_idx;
1922		break;
1923	case VHOST_GET_VRING_BASE:
1924		s.index = idx;
1925		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
1926			s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
1927		else
1928			s.num = vq->last_avail_idx;
1929		if (copy_to_user(argp, &s, sizeof s))
1930			r = -EFAULT;
1931		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1932	case VHOST_SET_VRING_KICK:
1933		if (copy_from_user(&f, argp, sizeof f)) {
1934			r = -EFAULT;
1935			break;
1936		}
1937		eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1938		if (IS_ERR(eventfp)) {
1939			r = PTR_ERR(eventfp);
1940			break;
1941		}
1942		if (eventfp != vq->kick) {
1943			pollstop = (filep = vq->kick) != NULL;
1944			pollstart = (vq->kick = eventfp) != NULL;
1945		} else
1946			filep = eventfp;
1947		break;
1948	case VHOST_SET_VRING_CALL:
1949		if (copy_from_user(&f, argp, sizeof f)) {
1950			r = -EFAULT;
1951			break;
1952		}
1953		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1954		if (IS_ERR(ctx)) {
1955			r = PTR_ERR(ctx);
1956			break;
1957		}
1958
1959		swap(ctx, vq->call_ctx.ctx);
 
 
 
 
 
 
1960		break;
1961	case VHOST_SET_VRING_ERR:
1962		if (copy_from_user(&f, argp, sizeof f)) {
1963			r = -EFAULT;
1964			break;
1965		}
1966		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1967		if (IS_ERR(ctx)) {
1968			r = PTR_ERR(ctx);
1969			break;
1970		}
1971		swap(ctx, vq->error_ctx);
 
 
 
 
 
 
 
1972		break;
1973	case VHOST_SET_VRING_ENDIAN:
1974		r = vhost_set_vring_endian(vq, argp);
1975		break;
1976	case VHOST_GET_VRING_ENDIAN:
1977		r = vhost_get_vring_endian(vq, idx, argp);
1978		break;
1979	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1980		if (copy_from_user(&s, argp, sizeof(s))) {
1981			r = -EFAULT;
1982			break;
1983		}
1984		vq->busyloop_timeout = s.num;
1985		break;
1986	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1987		s.index = idx;
1988		s.num = vq->busyloop_timeout;
1989		if (copy_to_user(argp, &s, sizeof(s)))
1990			r = -EFAULT;
1991		break;
1992	default:
1993		r = -ENOIOCTLCMD;
1994	}
1995
1996	if (pollstop && vq->handle_kick)
1997		vhost_poll_stop(&vq->poll);
1998
1999	if (!IS_ERR_OR_NULL(ctx))
2000		eventfd_ctx_put(ctx);
2001	if (filep)
2002		fput(filep);
2003
2004	if (pollstart && vq->handle_kick)
2005		r = vhost_poll_start(&vq->poll, vq->kick);
2006
2007	mutex_unlock(&vq->mutex);
2008
2009	if (pollstop && vq->handle_kick)
2010		vhost_dev_flush(vq->poll.dev);
2011	return r;
2012}
2013EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
2014
2015int vhost_init_device_iotlb(struct vhost_dev *d)
2016{
2017	struct vhost_iotlb *niotlb, *oiotlb;
2018	int i;
2019
2020	niotlb = iotlb_alloc();
2021	if (!niotlb)
2022		return -ENOMEM;
2023
2024	oiotlb = d->iotlb;
2025	d->iotlb = niotlb;
2026
2027	for (i = 0; i < d->nvqs; ++i) {
2028		struct vhost_virtqueue *vq = d->vqs[i];
2029
2030		mutex_lock(&vq->mutex);
2031		vq->iotlb = niotlb;
2032		__vhost_vq_meta_reset(vq);
2033		mutex_unlock(&vq->mutex);
2034	}
2035
2036	vhost_iotlb_free(oiotlb);
2037
2038	return 0;
2039}
2040EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
2041
2042/* Caller must have device mutex */
2043long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
2044{
2045	struct eventfd_ctx *ctx;
 
2046	u64 p;
2047	long r;
2048	int i, fd;
2049
2050	/* If you are not the owner, you can become one */
2051	if (ioctl == VHOST_SET_OWNER) {
2052		r = vhost_dev_set_owner(d);
2053		goto done;
2054	}
2055
2056	/* You must be the owner to do anything else */
2057	r = vhost_dev_check_owner(d);
2058	if (r)
2059		goto done;
2060
2061	switch (ioctl) {
2062	case VHOST_SET_MEM_TABLE:
2063		r = vhost_set_memory(d, argp);
2064		break;
2065	case VHOST_SET_LOG_BASE:
2066		if (copy_from_user(&p, argp, sizeof p)) {
2067			r = -EFAULT;
2068			break;
2069		}
2070		if ((u64)(unsigned long)p != p) {
2071			r = -EFAULT;
2072			break;
2073		}
2074		for (i = 0; i < d->nvqs; ++i) {
2075			struct vhost_virtqueue *vq;
2076			void __user *base = (void __user *)(unsigned long)p;
2077			vq = d->vqs[i];
2078			mutex_lock(&vq->mutex);
2079			/* If ring is inactive, will check when it's enabled. */
2080			if (vq->private_data && !vq_log_access_ok(vq, base))
2081				r = -EFAULT;
2082			else
2083				vq->log_base = base;
2084			mutex_unlock(&vq->mutex);
2085		}
2086		break;
2087	case VHOST_SET_LOG_FD:
2088		r = get_user(fd, (int __user *)argp);
2089		if (r < 0)
2090			break;
2091		ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
2092		if (IS_ERR(ctx)) {
2093			r = PTR_ERR(ctx);
2094			break;
2095		}
2096		swap(ctx, d->log_ctx);
 
 
 
 
 
 
 
2097		for (i = 0; i < d->nvqs; ++i) {
2098			mutex_lock(&d->vqs[i]->mutex);
2099			d->vqs[i]->log_ctx = d->log_ctx;
2100			mutex_unlock(&d->vqs[i]->mutex);
2101		}
2102		if (ctx)
2103			eventfd_ctx_put(ctx);
 
 
2104		break;
2105	default:
2106		r = -ENOIOCTLCMD;
2107		break;
2108	}
2109done:
2110	return r;
2111}
2112EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
2113
2114/* TODO: This is really inefficient.  We need something like get_user()
2115 * (instruction directly accesses the data, with an exception table entry
2116 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
2117 */
2118static int set_bit_to_user(int nr, void __user *addr)
2119{
2120	unsigned long log = (unsigned long)addr;
2121	struct page *page;
2122	void *base;
2123	int bit = nr + (log % PAGE_SIZE) * 8;
2124	int r;
2125
2126	r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
2127	if (r < 0)
2128		return r;
2129	BUG_ON(r != 1);
2130	base = kmap_atomic(page);
2131	set_bit(bit, base);
2132	kunmap_atomic(base);
2133	unpin_user_pages_dirty_lock(&page, 1, true);
 
2134	return 0;
2135}
2136
2137static int log_write(void __user *log_base,
2138		     u64 write_address, u64 write_length)
2139{
2140	u64 write_page = write_address / VHOST_PAGE_SIZE;
2141	int r;
2142
2143	if (!write_length)
2144		return 0;
2145	write_length += write_address % VHOST_PAGE_SIZE;
2146	for (;;) {
2147		u64 base = (u64)(unsigned long)log_base;
2148		u64 log = base + write_page / 8;
2149		int bit = write_page % 8;
2150		if ((u64)(unsigned long)log != log)
2151			return -EFAULT;
2152		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
2153		if (r < 0)
2154			return r;
2155		if (write_length <= VHOST_PAGE_SIZE)
2156			break;
2157		write_length -= VHOST_PAGE_SIZE;
2158		write_page += 1;
2159	}
2160	return r;
2161}
2162
2163static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
2164{
2165	struct vhost_iotlb *umem = vq->umem;
2166	struct vhost_iotlb_map *u;
2167	u64 start, end, l, min;
2168	int r;
2169	bool hit = false;
2170
2171	while (len) {
2172		min = len;
2173		/* More than one GPAs can be mapped into a single HVA. So
2174		 * iterate all possible umems here to be safe.
2175		 */
2176		list_for_each_entry(u, &umem->list, link) {
2177			if (u->addr > hva - 1 + len ||
2178			    u->addr - 1 + u->size < hva)
2179				continue;
2180			start = max(u->addr, hva);
2181			end = min(u->addr - 1 + u->size, hva - 1 + len);
2182			l = end - start + 1;
2183			r = log_write(vq->log_base,
2184				      u->start + start - u->addr,
2185				      l);
2186			if (r < 0)
2187				return r;
2188			hit = true;
2189			min = min(l, min);
2190		}
2191
2192		if (!hit)
2193			return -EFAULT;
2194
2195		len -= min;
2196		hva += min;
2197	}
2198
2199	return 0;
2200}
2201
2202static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
2203{
2204	struct iovec *iov = vq->log_iov;
2205	int i, ret;
2206
2207	if (!vq->iotlb)
2208		return log_write(vq->log_base, vq->log_addr + used_offset, len);
2209
2210	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
2211			     len, iov, 64, VHOST_ACCESS_WO);
2212	if (ret < 0)
2213		return ret;
2214
2215	for (i = 0; i < ret; i++) {
2216		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
2217				    iov[i].iov_len);
2218		if (ret)
2219			return ret;
2220	}
2221
2222	return 0;
2223}
2224
2225int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2226		    unsigned int log_num, u64 len, struct iovec *iov, int count)
2227{
2228	int i, r;
2229
2230	/* Make sure data written is seen before log. */
2231	smp_wmb();
2232
2233	if (vq->iotlb) {
2234		for (i = 0; i < count; i++) {
2235			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2236					  iov[i].iov_len);
2237			if (r < 0)
2238				return r;
2239		}
2240		return 0;
2241	}
2242
2243	for (i = 0; i < log_num; ++i) {
2244		u64 l = min(log[i].len, len);
2245		r = log_write(vq->log_base, log[i].addr, l);
2246		if (r < 0)
2247			return r;
2248		len -= l;
2249		if (!len) {
2250			if (vq->log_ctx)
2251				eventfd_signal(vq->log_ctx);
2252			return 0;
2253		}
2254	}
2255	/* Length written exceeds what we have stored. This is a bug. */
2256	BUG();
2257	return 0;
2258}
2259EXPORT_SYMBOL_GPL(vhost_log_write);
2260
2261static int vhost_update_used_flags(struct vhost_virtqueue *vq)
2262{
2263	void __user *used;
2264	if (vhost_put_used_flags(vq))
 
2265		return -EFAULT;
2266	if (unlikely(vq->log_used)) {
2267		/* Make sure the flag is seen before log. */
2268		smp_wmb();
2269		/* Log used flag write. */
2270		used = &vq->used->flags;
2271		log_used(vq, (used - (void __user *)vq->used),
2272			 sizeof vq->used->flags);
 
2273		if (vq->log_ctx)
2274			eventfd_signal(vq->log_ctx);
2275	}
2276	return 0;
2277}
2278
2279static int vhost_update_avail_event(struct vhost_virtqueue *vq)
2280{
2281	if (vhost_put_avail_event(vq))
 
2282		return -EFAULT;
2283	if (unlikely(vq->log_used)) {
2284		void __user *used;
2285		/* Make sure the event is seen before log. */
2286		smp_wmb();
2287		/* Log avail event write */
2288		used = vhost_avail_event(vq);
2289		log_used(vq, (used - (void __user *)vq->used),
2290			 sizeof *vhost_avail_event(vq));
 
2291		if (vq->log_ctx)
2292			eventfd_signal(vq->log_ctx);
2293	}
2294	return 0;
2295}
2296
2297int vhost_vq_init_access(struct vhost_virtqueue *vq)
2298{
2299	__virtio16 last_used_idx;
2300	int r;
2301	bool is_le = vq->is_le;
2302
2303	if (!vq->private_data)
2304		return 0;
2305
2306	vhost_init_is_le(vq);
2307
2308	r = vhost_update_used_flags(vq);
2309	if (r)
2310		goto err;
2311	vq->signalled_used_valid = false;
2312	if (!vq->iotlb &&
2313	    !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2314		r = -EFAULT;
2315		goto err;
2316	}
2317	r = vhost_get_used_idx(vq, &last_used_idx);
2318	if (r) {
2319		vq_err(vq, "Can't access used idx at %p\n",
2320		       &vq->used->idx);
2321		goto err;
2322	}
2323	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2324	return 0;
2325
2326err:
2327	vq->is_le = is_le;
2328	return r;
2329}
2330EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2331
2332static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2333			  struct iovec iov[], int iov_size, int access)
2334{
2335	const struct vhost_iotlb_map *map;
2336	struct vhost_dev *dev = vq->dev;
2337	struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2338	struct iovec *_iov;
2339	u64 s = 0, last = addr + len - 1;
2340	int ret = 0;
2341
2342	while ((u64)len > s) {
2343		u64 size;
2344		if (unlikely(ret >= iov_size)) {
2345			ret = -ENOBUFS;
2346			break;
2347		}
2348
2349		map = vhost_iotlb_itree_first(umem, addr, last);
2350		if (map == NULL || map->start > addr) {
 
2351			if (umem != dev->iotlb) {
2352				ret = -EFAULT;
2353				break;
2354			}
2355			ret = -EAGAIN;
2356			break;
2357		} else if (!(map->perm & access)) {
2358			ret = -EPERM;
2359			break;
2360		}
2361
2362		_iov = iov + ret;
2363		size = map->size - addr + map->start;
2364		_iov->iov_len = min((u64)len - s, size);
2365		_iov->iov_base = (void __user *)(unsigned long)
2366				 (map->addr + addr - map->start);
2367		s += size;
2368		addr += size;
2369		++ret;
2370	}
2371
2372	if (ret == -EAGAIN)
2373		vhost_iotlb_miss(vq, addr, access);
2374	return ret;
2375}
2376
2377/* Each buffer in the virtqueues is actually a chain of descriptors.  This
2378 * function returns the next descriptor in the chain,
2379 * or -1U if we're at the end. */
2380static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2381{
2382	unsigned int next;
2383
2384	/* If this descriptor says it doesn't chain, we're done. */
2385	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2386		return -1U;
2387
2388	/* Check they're not leading us off end of descriptors. */
2389	next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
 
 
 
 
 
2390	return next;
2391}
2392
2393static int get_indirect(struct vhost_virtqueue *vq,
2394			struct iovec iov[], unsigned int iov_size,
2395			unsigned int *out_num, unsigned int *in_num,
2396			struct vhost_log *log, unsigned int *log_num,
2397			struct vring_desc *indirect)
2398{
2399	struct vring_desc desc;
2400	unsigned int i = 0, count, found = 0;
2401	u32 len = vhost32_to_cpu(vq, indirect->len);
2402	struct iov_iter from;
2403	int ret, access;
2404
2405	/* Sanity check */
2406	if (unlikely(len % sizeof desc)) {
2407		vq_err(vq, "Invalid length in indirect descriptor: "
2408		       "len 0x%llx not multiple of 0x%zx\n",
2409		       (unsigned long long)len,
2410		       sizeof desc);
2411		return -EINVAL;
2412	}
2413
2414	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2415			     UIO_MAXIOV, VHOST_ACCESS_RO);
2416	if (unlikely(ret < 0)) {
2417		if (ret != -EAGAIN)
2418			vq_err(vq, "Translation failure %d in indirect.\n", ret);
2419		return ret;
2420	}
2421	iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len);
 
 
 
 
 
2422	count = len / sizeof desc;
2423	/* Buffers are chained via a 16 bit next field, so
2424	 * we can have at most 2^16 of these. */
2425	if (unlikely(count > USHRT_MAX + 1)) {
2426		vq_err(vq, "Indirect buffer length too big: %d\n",
2427		       indirect->len);
2428		return -E2BIG;
2429	}
2430
2431	do {
2432		unsigned iov_count = *in_num + *out_num;
2433		if (unlikely(++found > count)) {
2434			vq_err(vq, "Loop detected: last one at %u "
2435			       "indirect size %u\n",
2436			       i, count);
2437			return -EINVAL;
2438		}
2439		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2440			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2441			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2442			return -EINVAL;
2443		}
2444		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2445			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2446			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2447			return -EINVAL;
2448		}
2449
2450		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2451			access = VHOST_ACCESS_WO;
2452		else
2453			access = VHOST_ACCESS_RO;
2454
2455		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2456				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2457				     iov_size - iov_count, access);
2458		if (unlikely(ret < 0)) {
2459			if (ret != -EAGAIN)
2460				vq_err(vq, "Translation failure %d indirect idx %d\n",
2461					ret, i);
2462			return ret;
2463		}
2464		/* If this is an input descriptor, increment that count. */
2465		if (access == VHOST_ACCESS_WO) {
2466			*in_num += ret;
2467			if (unlikely(log && ret)) {
2468				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2469				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2470				++*log_num;
2471			}
2472		} else {
2473			/* If it's an output descriptor, they're all supposed
2474			 * to come before any input descriptors. */
2475			if (unlikely(*in_num)) {
2476				vq_err(vq, "Indirect descriptor "
2477				       "has out after in: idx %d\n", i);
2478				return -EINVAL;
2479			}
2480			*out_num += ret;
2481		}
2482	} while ((i = next_desc(vq, &desc)) != -1);
2483	return 0;
2484}
2485
2486/* This looks in the virtqueue and for the first available buffer, and converts
2487 * it to an iovec for convenient access.  Since descriptors consist of some
2488 * number of output then some number of input descriptors, it's actually two
2489 * iovecs, but we pack them into one and note how many of each there were.
2490 *
2491 * This function returns the descriptor number found, or vq->num (which is
2492 * never a valid descriptor number) if none was found.  A negative code is
2493 * returned on error. */
2494int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2495		      struct iovec iov[], unsigned int iov_size,
2496		      unsigned int *out_num, unsigned int *in_num,
2497		      struct vhost_log *log, unsigned int *log_num)
2498{
2499	struct vring_desc desc;
2500	unsigned int i, head, found = 0;
2501	u16 last_avail_idx;
2502	__virtio16 avail_idx;
2503	__virtio16 ring_head;
2504	int ret, access;
2505
2506	/* Check it isn't doing very strange things with descriptor numbers. */
2507	last_avail_idx = vq->last_avail_idx;
 
 
 
 
 
 
2508
2509	if (vq->avail_idx == vq->last_avail_idx) {
2510		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2511			vq_err(vq, "Failed to access avail idx at %p\n",
2512				&vq->avail->idx);
2513			return -EFAULT;
2514		}
2515		vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2516
2517		if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2518			vq_err(vq, "Guest moved used index from %u to %u",
2519				last_avail_idx, vq->avail_idx);
2520			return -EFAULT;
2521		}
2522
2523		/* If there's nothing new since last we looked, return
2524		 * invalid.
2525		 */
2526		if (vq->avail_idx == last_avail_idx)
2527			return vq->num;
2528
2529		/* Only get avail ring entries after they have been
2530		 * exposed by guest.
2531		 */
2532		smp_rmb();
2533	}
2534
2535	/* Grab the next descriptor number they're advertising, and increment
2536	 * the index we've seen. */
2537	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
 
2538		vq_err(vq, "Failed to read head: idx %d address %p\n",
2539		       last_avail_idx,
2540		       &vq->avail->ring[last_avail_idx % vq->num]);
2541		return -EFAULT;
2542	}
2543
2544	head = vhost16_to_cpu(vq, ring_head);
2545
2546	/* If their number is silly, that's an error. */
2547	if (unlikely(head >= vq->num)) {
2548		vq_err(vq, "Guest says index %u > %u is available",
2549		       head, vq->num);
2550		return -EINVAL;
2551	}
2552
2553	/* When we start there are none of either input nor output. */
2554	*out_num = *in_num = 0;
2555	if (unlikely(log))
2556		*log_num = 0;
2557
2558	i = head;
2559	do {
2560		unsigned iov_count = *in_num + *out_num;
2561		if (unlikely(i >= vq->num)) {
2562			vq_err(vq, "Desc index is %u > %u, head = %u",
2563			       i, vq->num, head);
2564			return -EINVAL;
2565		}
2566		if (unlikely(++found > vq->num)) {
2567			vq_err(vq, "Loop detected: last one at %u "
2568			       "vq size %u head %u\n",
2569			       i, vq->num, head);
2570			return -EINVAL;
2571		}
2572		ret = vhost_get_desc(vq, &desc, i);
 
2573		if (unlikely(ret)) {
2574			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2575			       i, vq->desc + i);
2576			return -EFAULT;
2577		}
2578		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2579			ret = get_indirect(vq, iov, iov_size,
2580					   out_num, in_num,
2581					   log, log_num, &desc);
2582			if (unlikely(ret < 0)) {
2583				if (ret != -EAGAIN)
2584					vq_err(vq, "Failure detected "
2585						"in indirect descriptor at idx %d\n", i);
2586				return ret;
2587			}
2588			continue;
2589		}
2590
2591		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2592			access = VHOST_ACCESS_WO;
2593		else
2594			access = VHOST_ACCESS_RO;
2595		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2596				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2597				     iov_size - iov_count, access);
2598		if (unlikely(ret < 0)) {
2599			if (ret != -EAGAIN)
2600				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2601					ret, i);
2602			return ret;
2603		}
2604		if (access == VHOST_ACCESS_WO) {
2605			/* If this is an input descriptor,
2606			 * increment that count. */
2607			*in_num += ret;
2608			if (unlikely(log && ret)) {
2609				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2610				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2611				++*log_num;
2612			}
2613		} else {
2614			/* If it's an output descriptor, they're all supposed
2615			 * to come before any input descriptors. */
2616			if (unlikely(*in_num)) {
2617				vq_err(vq, "Descriptor has out after in: "
2618				       "idx %d\n", i);
2619				return -EINVAL;
2620			}
2621			*out_num += ret;
2622		}
2623	} while ((i = next_desc(vq, &desc)) != -1);
2624
2625	/* On success, increment avail index. */
2626	vq->last_avail_idx++;
2627
2628	/* Assume notifications from guest are disabled at this point,
2629	 * if they aren't we would need to update avail_event index. */
2630	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2631	return head;
2632}
2633EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2634
2635/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2636void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2637{
2638	vq->last_avail_idx -= n;
2639}
2640EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2641
2642/* After we've used one of their buffers, we tell them about it.  We'll then
2643 * want to notify the guest, using eventfd. */
2644int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2645{
2646	struct vring_used_elem heads = {
2647		cpu_to_vhost32(vq, head),
2648		cpu_to_vhost32(vq, len)
2649	};
2650
2651	return vhost_add_used_n(vq, &heads, 1);
2652}
2653EXPORT_SYMBOL_GPL(vhost_add_used);
2654
2655static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2656			    struct vring_used_elem *heads,
2657			    unsigned count)
2658{
2659	vring_used_elem_t __user *used;
2660	u16 old, new;
2661	int start;
2662
2663	start = vq->last_used_idx & (vq->num - 1);
2664	used = vq->used->ring + start;
2665	if (vhost_put_used(vq, heads, start, count)) {
 
 
 
 
 
 
 
 
 
2666		vq_err(vq, "Failed to write used");
2667		return -EFAULT;
2668	}
2669	if (unlikely(vq->log_used)) {
2670		/* Make sure data is seen before log. */
2671		smp_wmb();
2672		/* Log used ring entry write. */
2673		log_used(vq, ((void __user *)used - (void __user *)vq->used),
2674			 count * sizeof *used);
 
 
2675	}
2676	old = vq->last_used_idx;
2677	new = (vq->last_used_idx += count);
2678	/* If the driver never bothers to signal in a very long while,
2679	 * used index might wrap around. If that happens, invalidate
2680	 * signalled_used index we stored. TODO: make sure driver
2681	 * signals at least once in 2^16 and remove this. */
2682	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2683		vq->signalled_used_valid = false;
2684	return 0;
2685}
2686
2687/* After we've used one of their buffers, we tell them about it.  We'll then
2688 * want to notify the guest, using eventfd. */
2689int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2690		     unsigned count)
2691{
2692	int start, n, r;
2693
2694	start = vq->last_used_idx & (vq->num - 1);
2695	n = vq->num - start;
2696	if (n < count) {
2697		r = __vhost_add_used_n(vq, heads, n);
2698		if (r < 0)
2699			return r;
2700		heads += n;
2701		count -= n;
2702	}
2703	r = __vhost_add_used_n(vq, heads, count);
2704
2705	/* Make sure buffer is written before we update index. */
2706	smp_wmb();
2707	if (vhost_put_used_idx(vq)) {
 
2708		vq_err(vq, "Failed to increment used idx");
2709		return -EFAULT;
2710	}
2711	if (unlikely(vq->log_used)) {
2712		/* Make sure used idx is seen before log. */
2713		smp_wmb();
2714		/* Log used index update. */
2715		log_used(vq, offsetof(struct vring_used, idx),
2716			 sizeof vq->used->idx);
 
2717		if (vq->log_ctx)
2718			eventfd_signal(vq->log_ctx);
2719	}
2720	return r;
2721}
2722EXPORT_SYMBOL_GPL(vhost_add_used_n);
2723
2724static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2725{
2726	__u16 old, new;
2727	__virtio16 event;
2728	bool v;
2729	/* Flush out used index updates. This is paired
2730	 * with the barrier that the Guest executes when enabling
2731	 * interrupts. */
2732	smp_mb();
2733
2734	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2735	    unlikely(vq->avail_idx == vq->last_avail_idx))
2736		return true;
2737
2738	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2739		__virtio16 flags;
2740		if (vhost_get_avail_flags(vq, &flags)) {
 
 
 
 
2741			vq_err(vq, "Failed to get flags");
2742			return true;
2743		}
2744		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2745	}
2746	old = vq->signalled_used;
2747	v = vq->signalled_used_valid;
2748	new = vq->signalled_used = vq->last_used_idx;
2749	vq->signalled_used_valid = true;
2750
2751	if (unlikely(!v))
2752		return true;
2753
2754	if (vhost_get_used_event(vq, &event)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
2755		vq_err(vq, "Failed to get used event idx");
2756		return true;
2757	}
2758	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
 
 
2759}
2760
2761/* This actually signals the guest, using eventfd. */
2762void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2763{
2764	/* Signal the Guest tell them we used something up. */
2765	if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2766		eventfd_signal(vq->call_ctx.ctx);
2767}
2768EXPORT_SYMBOL_GPL(vhost_signal);
2769
2770/* And here's the combo meal deal.  Supersize me! */
2771void vhost_add_used_and_signal(struct vhost_dev *dev,
2772			       struct vhost_virtqueue *vq,
2773			       unsigned int head, int len)
2774{
2775	vhost_add_used(vq, head, len);
2776	vhost_signal(dev, vq);
2777}
2778EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2779
2780/* multi-buffer version of vhost_add_used_and_signal */
2781void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2782				 struct vhost_virtqueue *vq,
2783				 struct vring_used_elem *heads, unsigned count)
2784{
2785	vhost_add_used_n(vq, heads, count);
2786	vhost_signal(dev, vq);
2787}
2788EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2789
2790/* return true if we're sure that avaiable ring is empty */
2791bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2792{
2793	__virtio16 avail_idx;
2794	int r;
2795
2796	if (vq->avail_idx != vq->last_avail_idx)
2797		return false;
2798
2799	r = vhost_get_avail_idx(vq, &avail_idx);
2800	if (unlikely(r))
2801		return false;
2802	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2803
2804	return vq->avail_idx == vq->last_avail_idx;
2805}
2806EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2807
2808/* OK, now we need to know about added descriptors. */
2809bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2810{
2811	__virtio16 avail_idx;
2812	int r;
2813
2814	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2815		return false;
2816	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2817	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2818		r = vhost_update_used_flags(vq);
2819		if (r) {
2820			vq_err(vq, "Failed to enable notification at %p: %d\n",
2821			       &vq->used->flags, r);
2822			return false;
2823		}
2824	} else {
2825		r = vhost_update_avail_event(vq);
2826		if (r) {
2827			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2828			       vhost_avail_event(vq), r);
2829			return false;
2830		}
2831	}
2832	/* They could have slipped one in as we were doing that: make
2833	 * sure it's written, then check again. */
2834	smp_mb();
2835	r = vhost_get_avail_idx(vq, &avail_idx);
2836	if (r) {
2837		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2838		       &vq->avail->idx, r);
2839		return false;
2840	}
2841	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2842
2843	return vq->avail_idx != vq->last_avail_idx;
2844}
2845EXPORT_SYMBOL_GPL(vhost_enable_notify);
2846
2847/* We don't need to be notified again. */
2848void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2849{
2850	int r;
2851
2852	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2853		return;
2854	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2855	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2856		r = vhost_update_used_flags(vq);
2857		if (r)
2858			vq_err(vq, "Failed to disable notification at %p: %d\n",
2859			       &vq->used->flags, r);
2860	}
2861}
2862EXPORT_SYMBOL_GPL(vhost_disable_notify);
2863
2864/* Create a new message. */
2865struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2866{
2867	/* Make sure all padding within the structure is initialized. */
2868	struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
2869	if (!node)
2870		return NULL;
2871
2872	node->vq = vq;
2873	node->msg.type = type;
2874	return node;
2875}
2876EXPORT_SYMBOL_GPL(vhost_new_msg);
2877
2878void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2879		       struct vhost_msg_node *node)
2880{
2881	spin_lock(&dev->iotlb_lock);
2882	list_add_tail(&node->node, head);
2883	spin_unlock(&dev->iotlb_lock);
2884
2885	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2886}
2887EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2888
2889struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2890					 struct list_head *head)
2891{
2892	struct vhost_msg_node *node = NULL;
2893
2894	spin_lock(&dev->iotlb_lock);
2895	if (!list_empty(head)) {
2896		node = list_first_entry(head, struct vhost_msg_node,
2897					node);
2898		list_del(&node->node);
2899	}
2900	spin_unlock(&dev->iotlb_lock);
2901
2902	return node;
2903}
2904EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2905
2906void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2907{
2908	struct vhost_virtqueue *vq;
2909	int i;
2910
2911	mutex_lock(&dev->mutex);
2912	for (i = 0; i < dev->nvqs; ++i) {
2913		vq = dev->vqs[i];
2914		mutex_lock(&vq->mutex);
2915		vq->acked_backend_features = features;
2916		mutex_unlock(&vq->mutex);
2917	}
2918	mutex_unlock(&dev->mutex);
2919}
2920EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2921
2922static int __init vhost_init(void)
2923{
2924	return 0;
2925}
2926
2927static void __exit vhost_exit(void)
2928{
2929}
2930
2931module_init(vhost_init);
2932module_exit(vhost_exit);
2933
2934MODULE_VERSION("0.0.1");
2935MODULE_LICENSE("GPL v2");
2936MODULE_AUTHOR("Michael S. Tsirkin");
2937MODULE_DESCRIPTION("Host kernel accelerator for virtio");
v4.10.11
 
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/uio.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/poll.h>
  22#include <linux/file.h>
  23#include <linux/highmem.h>
  24#include <linux/slab.h>
  25#include <linux/vmalloc.h>
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28#include <linux/module.h>
  29#include <linux/sort.h>
 
 
 
  30#include <linux/interval_tree_generic.h>
 
 
  31
  32#include "vhost.h"
  33
  34static ushort max_mem_regions = 64;
  35module_param(max_mem_regions, ushort, 0444);
  36MODULE_PARM_DESC(max_mem_regions,
  37	"Maximum number of memory regions in memory map. (default: 64)");
  38static int max_iotlb_entries = 2048;
  39module_param(max_iotlb_entries, int, 0444);
  40MODULE_PARM_DESC(max_iotlb_entries,
  41	"Maximum number of iotlb entries. (default: 2048)");
  42
  43enum {
  44	VHOST_MEMORY_F_LOG = 0x1,
  45};
  46
  47#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  48#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  49
  50INTERVAL_TREE_DEFINE(struct vhost_umem_node,
  51		     rb, __u64, __subtree_last,
  52		     START, LAST, static inline, vhost_umem_interval_tree);
  53
  54#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  55static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  56{
  57	vq->user_be = !virtio_legacy_is_little_endian();
  58}
  59
  60static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  61{
  62	vq->user_be = true;
  63}
  64
  65static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  66{
  67	vq->user_be = false;
  68}
  69
  70static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  71{
  72	struct vhost_vring_state s;
  73
  74	if (vq->private_data)
  75		return -EBUSY;
  76
  77	if (copy_from_user(&s, argp, sizeof(s)))
  78		return -EFAULT;
  79
  80	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  81	    s.num != VHOST_VRING_BIG_ENDIAN)
  82		return -EINVAL;
  83
  84	if (s.num == VHOST_VRING_BIG_ENDIAN)
  85		vhost_enable_cross_endian_big(vq);
  86	else
  87		vhost_enable_cross_endian_little(vq);
  88
  89	return 0;
  90}
  91
  92static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  93				   int __user *argp)
  94{
  95	struct vhost_vring_state s = {
  96		.index = idx,
  97		.num = vq->user_be
  98	};
  99
 100	if (copy_to_user(argp, &s, sizeof(s)))
 101		return -EFAULT;
 102
 103	return 0;
 104}
 105
 106static void vhost_init_is_le(struct vhost_virtqueue *vq)
 107{
 108	/* Note for legacy virtio: user_be is initialized at reset time
 109	 * according to the host endianness. If userspace does not set an
 110	 * explicit endianness, the default behavior is native endian, as
 111	 * expected by legacy virtio.
 112	 */
 113	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 114}
 115#else
 116static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 117{
 118}
 119
 120static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 121{
 122	return -ENOIOCTLCMD;
 123}
 124
 125static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 126				   int __user *argp)
 127{
 128	return -ENOIOCTLCMD;
 129}
 130
 131static void vhost_init_is_le(struct vhost_virtqueue *vq)
 132{
 133	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
 134		|| virtio_legacy_is_little_endian();
 135}
 136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 137
 138static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 139{
 140	vhost_init_is_le(vq);
 141}
 142
 143struct vhost_flush_struct {
 144	struct vhost_work work;
 145	struct completion wait_event;
 146};
 147
 148static void vhost_flush_work(struct vhost_work *work)
 149{
 150	struct vhost_flush_struct *s;
 151
 152	s = container_of(work, struct vhost_flush_struct, work);
 153	complete(&s->wait_event);
 154}
 155
 156static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 157			    poll_table *pt)
 158{
 159	struct vhost_poll *poll;
 160
 161	poll = container_of(pt, struct vhost_poll, table);
 162	poll->wqh = wqh;
 163	add_wait_queue(wqh, &poll->wait);
 164}
 165
 166static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
 167			     void *key)
 168{
 169	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 
 170
 171	if (!((unsigned long)key & poll->mask))
 172		return 0;
 173
 174	vhost_poll_queue(poll);
 
 
 
 
 175	return 0;
 176}
 177
 178void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 179{
 180	clear_bit(VHOST_WORK_QUEUED, &work->flags);
 181	work->fn = fn;
 182	init_waitqueue_head(&work->done);
 183}
 184EXPORT_SYMBOL_GPL(vhost_work_init);
 185
 186/* Init poll structure */
 187void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 188		     unsigned long mask, struct vhost_dev *dev)
 
 189{
 190	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 191	init_poll_funcptr(&poll->table, vhost_poll_func);
 192	poll->mask = mask;
 193	poll->dev = dev;
 194	poll->wqh = NULL;
 
 195
 196	vhost_work_init(&poll->work, fn);
 197}
 198EXPORT_SYMBOL_GPL(vhost_poll_init);
 199
 200/* Start polling a file. We add ourselves to file's wait queue. The caller must
 201 * keep a reference to a file until after vhost_poll_stop is called. */
 202int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 203{
 204	unsigned long mask;
 205	int ret = 0;
 206
 207	if (poll->wqh)
 208		return 0;
 209
 210	mask = file->f_op->poll(file, &poll->table);
 211	if (mask)
 212		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 213	if (mask & POLLERR) {
 214		if (poll->wqh)
 215			remove_wait_queue(poll->wqh, &poll->wait);
 216		ret = -EINVAL;
 217	}
 218
 219	return ret;
 220}
 221EXPORT_SYMBOL_GPL(vhost_poll_start);
 222
 223/* Stop polling a file. After this function returns, it becomes safe to drop the
 224 * file reference. You must also flush afterwards. */
 225void vhost_poll_stop(struct vhost_poll *poll)
 226{
 227	if (poll->wqh) {
 228		remove_wait_queue(poll->wqh, &poll->wait);
 229		poll->wqh = NULL;
 230	}
 231}
 232EXPORT_SYMBOL_GPL(vhost_poll_stop);
 233
 234void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235{
 236	struct vhost_flush_struct flush;
 237
 238	if (dev->worker) {
 239		init_completion(&flush.wait_event);
 240		vhost_work_init(&flush.work, vhost_flush_work);
 241
 242		vhost_work_queue(dev, &flush.work);
 243		wait_for_completion(&flush.wait_event);
 244	}
 245}
 246EXPORT_SYMBOL_GPL(vhost_work_flush);
 247
 248/* Flush any work that has been scheduled. When calling this, don't hold any
 249 * locks that are also used by the callback. */
 250void vhost_poll_flush(struct vhost_poll *poll)
 
 
 
 
 
 251{
 252	vhost_work_flush(poll->dev, &poll->work);
 
 
 
 
 
 
 253}
 254EXPORT_SYMBOL_GPL(vhost_poll_flush);
 255
 256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 257{
 258	if (!dev->worker)
 259		return;
 260
 261	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 262		/* We can only add the work to the list after we're
 263		 * sure it was not in the list.
 264		 * test_and_set_bit() implies a memory barrier.
 265		 */
 266		llist_add(&work->node, &dev->work_list);
 267		wake_up_process(dev->worker);
 
 268	}
 269}
 270EXPORT_SYMBOL_GPL(vhost_work_queue);
 271
 272/* A lockless hint for busy polling code to exit the loop */
 273bool vhost_has_work(struct vhost_dev *dev)
 274{
 275	return !llist_empty(&dev->work_list);
 
 
 
 
 
 
 
 
 
 276}
 277EXPORT_SYMBOL_GPL(vhost_has_work);
 278
 279void vhost_poll_queue(struct vhost_poll *poll)
 280{
 281	vhost_work_queue(poll->dev, &poll->work);
 282}
 283EXPORT_SYMBOL_GPL(vhost_poll_queue);
 284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285static void vhost_vq_reset(struct vhost_dev *dev,
 286			   struct vhost_virtqueue *vq)
 287{
 288	vq->num = 1;
 289	vq->desc = NULL;
 290	vq->avail = NULL;
 291	vq->used = NULL;
 292	vq->last_avail_idx = 0;
 293	vq->last_used_event = 0;
 294	vq->avail_idx = 0;
 295	vq->last_used_idx = 0;
 296	vq->signalled_used = 0;
 297	vq->signalled_used_valid = false;
 298	vq->used_flags = 0;
 299	vq->log_used = false;
 300	vq->log_addr = -1ull;
 301	vq->private_data = NULL;
 302	vq->acked_features = 0;
 
 303	vq->log_base = NULL;
 304	vq->error_ctx = NULL;
 305	vq->error = NULL;
 306	vq->kick = NULL;
 307	vq->call_ctx = NULL;
 308	vq->call = NULL;
 309	vq->log_ctx = NULL;
 
 310	vhost_reset_is_le(vq);
 311	vhost_disable_cross_endian(vq);
 312	vq->busyloop_timeout = 0;
 313	vq->umem = NULL;
 314	vq->iotlb = NULL;
 
 
 
 315}
 316
 317static int vhost_worker(void *data)
 318{
 319	struct vhost_dev *dev = data;
 320	struct vhost_work *work, *work_next;
 321	struct llist_node *node;
 322	mm_segment_t oldfs = get_fs();
 323
 324	set_fs(USER_DS);
 325	use_mm(dev->mm);
 326
 327	for (;;) {
 328		/* mb paired w/ kthread_stop */
 329		set_current_state(TASK_INTERRUPTIBLE);
 330
 331		if (kthread_should_stop()) {
 332			__set_current_state(TASK_RUNNING);
 333			break;
 334		}
 335
 336		node = llist_del_all(&dev->work_list);
 337		if (!node)
 338			schedule();
 339
 340		node = llist_reverse_order(node);
 341		/* make sure flag is seen after deletion */
 342		smp_wmb();
 343		llist_for_each_entry_safe(work, work_next, node, node) {
 344			clear_bit(VHOST_WORK_QUEUED, &work->flags);
 345			__set_current_state(TASK_RUNNING);
 346			work->fn(work);
 347			if (need_resched())
 348				schedule();
 349		}
 350	}
 351	unuse_mm(dev->mm);
 352	set_fs(oldfs);
 353	return 0;
 354}
 355
 356static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 357{
 358	kfree(vq->indirect);
 359	vq->indirect = NULL;
 360	kfree(vq->log);
 361	vq->log = NULL;
 362	kfree(vq->heads);
 363	vq->heads = NULL;
 364}
 365
 366/* Helper to allocate iovec buffers for all vqs. */
 367static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 368{
 369	struct vhost_virtqueue *vq;
 370	int i;
 371
 372	for (i = 0; i < dev->nvqs; ++i) {
 373		vq = dev->vqs[i];
 374		vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
 375				       GFP_KERNEL);
 376		vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
 377		vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
 
 
 
 378		if (!vq->indirect || !vq->log || !vq->heads)
 379			goto err_nomem;
 380	}
 381	return 0;
 382
 383err_nomem:
 384	for (; i >= 0; --i)
 385		vhost_vq_free_iovecs(dev->vqs[i]);
 386	return -ENOMEM;
 387}
 388
 389static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 390{
 391	int i;
 392
 393	for (i = 0; i < dev->nvqs; ++i)
 394		vhost_vq_free_iovecs(dev->vqs[i]);
 395}
 396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397void vhost_dev_init(struct vhost_dev *dev,
 398		    struct vhost_virtqueue **vqs, int nvqs)
 
 
 
 
 399{
 400	struct vhost_virtqueue *vq;
 401	int i;
 402
 403	dev->vqs = vqs;
 404	dev->nvqs = nvqs;
 405	mutex_init(&dev->mutex);
 406	dev->log_ctx = NULL;
 407	dev->log_file = NULL;
 408	dev->umem = NULL;
 409	dev->iotlb = NULL;
 410	dev->mm = NULL;
 411	dev->worker = NULL;
 412	init_llist_head(&dev->work_list);
 
 
 
 413	init_waitqueue_head(&dev->wait);
 414	INIT_LIST_HEAD(&dev->read_list);
 415	INIT_LIST_HEAD(&dev->pending_list);
 416	spin_lock_init(&dev->iotlb_lock);
 417
 418
 419	for (i = 0; i < dev->nvqs; ++i) {
 420		vq = dev->vqs[i];
 421		vq->log = NULL;
 422		vq->indirect = NULL;
 423		vq->heads = NULL;
 424		vq->dev = dev;
 425		mutex_init(&vq->mutex);
 426		vhost_vq_reset(dev, vq);
 427		if (vq->handle_kick)
 428			vhost_poll_init(&vq->poll, vq->handle_kick,
 429					POLLIN, dev);
 430	}
 431}
 432EXPORT_SYMBOL_GPL(vhost_dev_init);
 433
 434/* Caller should have device mutex */
 435long vhost_dev_check_owner(struct vhost_dev *dev)
 436{
 437	/* Are you the owner? If not, I don't think you mean to do that */
 438	return dev->mm == current->mm ? 0 : -EPERM;
 439}
 440EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 441
 442struct vhost_attach_cgroups_struct {
 443	struct vhost_work work;
 444	struct task_struct *owner;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	int ret;
 446};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447
 448static void vhost_attach_cgroups_work(struct vhost_work *work)
 
 
 449{
 450	struct vhost_attach_cgroups_struct *s;
 451
 452	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 453	s->ret = cgroup_attach_task_all(s->owner, current);
 
 
 
 
 454}
 455
 456static int vhost_attach_cgroups(struct vhost_dev *dev)
 
 
 457{
 458	struct vhost_attach_cgroups_struct attach;
 
 459
 460	attach.owner = current;
 461	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 462	vhost_work_queue(dev, &attach.work);
 463	vhost_work_flush(dev, &attach.work);
 464	return attach.ret;
 
 
 
 
 
 
 
 
 465}
 466
 467/* Caller should have device mutex */
 468bool vhost_dev_has_owner(struct vhost_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 469{
 470	return dev->mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 471}
 472EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 473
 474/* Caller should have device mutex */
 475long vhost_dev_set_owner(struct vhost_dev *dev)
 476{
 477	struct task_struct *worker;
 478	int err;
 479
 480	/* Is there an owner already? */
 481	if (vhost_dev_has_owner(dev)) {
 482		err = -EBUSY;
 483		goto err_mm;
 484	}
 485
 486	/* No owner, become one */
 487	dev->mm = get_task_mm(current);
 488	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 489	if (IS_ERR(worker)) {
 490		err = PTR_ERR(worker);
 491		goto err_worker;
 492	}
 493
 494	dev->worker = worker;
 495	wake_up_process(worker);	/* avoid contributing to loadavg */
 
 496
 497	err = vhost_attach_cgroups(dev);
 498	if (err)
 499		goto err_cgroup;
 
 
 
 
 
 
 
 
 
 500
 501	err = vhost_dev_alloc_iovecs(dev);
 502	if (err)
 503		goto err_cgroup;
 504
 505	return 0;
 506err_cgroup:
 507	kthread_stop(worker);
 508	dev->worker = NULL;
 509err_worker:
 510	if (dev->mm)
 511		mmput(dev->mm);
 512	dev->mm = NULL;
 513err_mm:
 514	return err;
 515}
 516EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 517
 518static void *vhost_kvzalloc(unsigned long size)
 519{
 520	void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 521
 522	if (!n)
 523		n = vzalloc(size);
 524	return n;
 525}
 526
 527struct vhost_umem *vhost_dev_reset_owner_prepare(void)
 528{
 529	return vhost_kvzalloc(sizeof(struct vhost_umem));
 530}
 531EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 532
 533/* Caller should have device mutex */
 534void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
 535{
 536	int i;
 537
 538	vhost_dev_cleanup(dev, true);
 539
 540	/* Restore memory to default empty mapping. */
 541	INIT_LIST_HEAD(&umem->umem_list);
 542	dev->umem = umem;
 543	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
 544	 * VQs aren't running.
 545	 */
 546	for (i = 0; i < dev->nvqs; ++i)
 547		dev->vqs[i]->umem = umem;
 548}
 549EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 550
 551void vhost_dev_stop(struct vhost_dev *dev)
 552{
 553	int i;
 554
 555	for (i = 0; i < dev->nvqs; ++i) {
 556		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 557			vhost_poll_stop(&dev->vqs[i]->poll);
 558			vhost_poll_flush(&dev->vqs[i]->poll);
 559		}
 560	}
 
 
 561}
 562EXPORT_SYMBOL_GPL(vhost_dev_stop);
 563
 564static void vhost_umem_free(struct vhost_umem *umem,
 565			    struct vhost_umem_node *node)
 566{
 567	vhost_umem_interval_tree_remove(node, &umem->umem_tree);
 568	list_del(&node->link);
 569	kfree(node);
 570	umem->numem--;
 571}
 572
 573static void vhost_umem_clean(struct vhost_umem *umem)
 574{
 575	struct vhost_umem_node *node, *tmp;
 576
 577	if (!umem)
 578		return;
 579
 580	list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
 581		vhost_umem_free(umem, node);
 582
 583	kvfree(umem);
 584}
 585
 586static void vhost_clear_msg(struct vhost_dev *dev)
 587{
 588	struct vhost_msg_node *node, *n;
 589
 590	spin_lock(&dev->iotlb_lock);
 591
 592	list_for_each_entry_safe(node, n, &dev->read_list, node) {
 593		list_del(&node->node);
 594		kfree(node);
 595	}
 596
 597	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
 598		list_del(&node->node);
 599		kfree(node);
 600	}
 601
 602	spin_unlock(&dev->iotlb_lock);
 603}
 
 604
 605/* Caller should have device mutex if and only if locked is set */
 606void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 607{
 608	int i;
 609
 610	for (i = 0; i < dev->nvqs; ++i) {
 611		if (dev->vqs[i]->error_ctx)
 612			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 613		if (dev->vqs[i]->error)
 614			fput(dev->vqs[i]->error);
 615		if (dev->vqs[i]->kick)
 616			fput(dev->vqs[i]->kick);
 617		if (dev->vqs[i]->call_ctx)
 618			eventfd_ctx_put(dev->vqs[i]->call_ctx);
 619		if (dev->vqs[i]->call)
 620			fput(dev->vqs[i]->call);
 621		vhost_vq_reset(dev, dev->vqs[i]);
 622	}
 623	vhost_dev_free_iovecs(dev);
 624	if (dev->log_ctx)
 625		eventfd_ctx_put(dev->log_ctx);
 626	dev->log_ctx = NULL;
 627	if (dev->log_file)
 628		fput(dev->log_file);
 629	dev->log_file = NULL;
 630	/* No one will access memory at this point */
 631	vhost_umem_clean(dev->umem);
 632	dev->umem = NULL;
 633	vhost_umem_clean(dev->iotlb);
 634	dev->iotlb = NULL;
 635	vhost_clear_msg(dev);
 636	wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
 637	WARN_ON(!llist_empty(&dev->work_list));
 638	if (dev->worker) {
 639		kthread_stop(dev->worker);
 640		dev->worker = NULL;
 641	}
 642	if (dev->mm)
 643		mmput(dev->mm);
 644	dev->mm = NULL;
 645}
 646EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 647
 648static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 649{
 650	u64 a = addr / VHOST_PAGE_SIZE / 8;
 651
 652	/* Make sure 64 bit math will not overflow. */
 653	if (a > ULONG_MAX - (unsigned long)log_base ||
 654	    a + (unsigned long)log_base > ULONG_MAX)
 655		return 0;
 656
 657	return access_ok(VERIFY_WRITE, log_base + a,
 658			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 659}
 660
 
 661static bool vhost_overflow(u64 uaddr, u64 size)
 662{
 663	/* Make sure 64 bit math will not overflow. */
 664	return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
 
 
 
 
 
 665}
 666
 667/* Caller should have vq mutex and device mutex. */
 668static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
 669			       int log_all)
 670{
 671	struct vhost_umem_node *node;
 672
 673	if (!umem)
 674		return 0;
 675
 676	list_for_each_entry(node, &umem->umem_list, link) {
 677		unsigned long a = node->userspace_addr;
 678
 679		if (vhost_overflow(node->userspace_addr, node->size))
 680			return 0;
 681
 682
 683		if (!access_ok(VERIFY_WRITE, (void __user *)a,
 684				    node->size))
 685			return 0;
 686		else if (log_all && !log_access_ok(log_base,
 687						   node->start,
 688						   node->size))
 689			return 0;
 690	}
 691	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 692}
 693
 694/* Can we switch to this memory table? */
 695/* Caller should have device mutex but not vq mutex */
 696static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
 697			    int log_all)
 698{
 699	int i;
 700
 701	for (i = 0; i < d->nvqs; ++i) {
 702		int ok;
 703		bool log;
 704
 705		mutex_lock(&d->vqs[i]->mutex);
 706		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
 707		/* If ring is inactive, will check when it's enabled. */
 708		if (d->vqs[i]->private_data)
 709			ok = vq_memory_access_ok(d->vqs[i]->log_base,
 710						 umem, log);
 711		else
 712			ok = 1;
 713		mutex_unlock(&d->vqs[i]->mutex);
 714		if (!ok)
 715			return 0;
 716	}
 717	return 1;
 718}
 719
 720static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
 721			  struct iovec iov[], int iov_size, int access);
 722
 723static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
 724			      const void *from, unsigned size)
 725{
 726	int ret;
 727
 728	if (!vq->iotlb)
 729		return __copy_to_user(to, from, size);
 730	else {
 731		/* This function should be called after iotlb
 732		 * prefetch, which means we're sure that all vq
 733		 * could be access through iotlb. So -EAGAIN should
 734		 * not happen in this case.
 735		 */
 736		/* TODO: more fast path */
 737		struct iov_iter t;
 
 
 
 
 
 
 
 738		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
 739				     ARRAY_SIZE(vq->iotlb_iov),
 740				     VHOST_ACCESS_WO);
 741		if (ret < 0)
 742			goto out;
 743		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
 744		ret = copy_to_iter(from, size, &t);
 745		if (ret == size)
 746			ret = 0;
 747	}
 748out:
 749	return ret;
 750}
 751
 752static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
 753				void __user *from, unsigned size)
 754{
 755	int ret;
 756
 757	if (!vq->iotlb)
 758		return __copy_from_user(to, from, size);
 759	else {
 760		/* This function should be called after iotlb
 761		 * prefetch, which means we're sure that vq
 762		 * could be access through iotlb. So -EAGAIN should
 763		 * not happen in this case.
 764		 */
 765		/* TODO: more fast path */
 
 
 766		struct iov_iter f;
 
 
 
 
 767		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
 768				     ARRAY_SIZE(vq->iotlb_iov),
 769				     VHOST_ACCESS_RO);
 770		if (ret < 0) {
 771			vq_err(vq, "IOTLB translation failure: uaddr "
 772			       "%p size 0x%llx\n", from,
 773			       (unsigned long long) size);
 774			goto out;
 775		}
 776		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
 777		ret = copy_from_iter(to, size, &f);
 778		if (ret == size)
 779			ret = 0;
 780	}
 781
 782out:
 783	return ret;
 784}
 785
 786static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 787				     void __user *addr, unsigned size)
 
 788{
 789	int ret;
 790
 791	/* This function should be called after iotlb
 792	 * prefetch, which means we're sure that vq
 793	 * could be access through iotlb. So -EAGAIN should
 794	 * not happen in this case.
 795	 */
 796	/* TODO: more fast path */
 797	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
 798			     ARRAY_SIZE(vq->iotlb_iov),
 799			     VHOST_ACCESS_RO);
 800	if (ret < 0) {
 801		vq_err(vq, "IOTLB translation failure: uaddr "
 802			"%p size 0x%llx\n", addr,
 803			(unsigned long long) size);
 804		return NULL;
 805	}
 806
 807	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
 808		vq_err(vq, "Non atomic userspace memory access: uaddr "
 809			"%p size 0x%llx\n", addr,
 810			(unsigned long long) size);
 811		return NULL;
 812	}
 813
 814	return vq->iotlb_iov[0].iov_base;
 815}
 816
 817#define vhost_put_user(vq, x, ptr) \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 818({ \
 819	int ret = -EFAULT; \
 820	if (!vq->iotlb) { \
 821		ret = __put_user(x, ptr); \
 822	} else { \
 823		__typeof__(ptr) to = \
 824			(__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
 
 825		if (to != NULL) \
 826			ret = __put_user(x, to); \
 827		else \
 828			ret = -EFAULT;	\
 829	} \
 830	ret; \
 831})
 832
 833#define vhost_get_user(vq, x, ptr) \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834({ \
 835	int ret; \
 836	if (!vq->iotlb) { \
 837		ret = __get_user(x, ptr); \
 838	} else { \
 839		__typeof__(ptr) from = \
 840			(__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
 
 
 841		if (from != NULL) \
 842			ret = __get_user(x, from); \
 843		else \
 844			ret = -EFAULT; \
 845	} \
 846	ret; \
 847})
 848
 
 
 
 
 
 
 849static void vhost_dev_lock_vqs(struct vhost_dev *d)
 850{
 851	int i = 0;
 852	for (i = 0; i < d->nvqs; ++i)
 853		mutex_lock(&d->vqs[i]->mutex);
 854}
 855
 856static void vhost_dev_unlock_vqs(struct vhost_dev *d)
 857{
 858	int i = 0;
 859	for (i = 0; i < d->nvqs; ++i)
 860		mutex_unlock(&d->vqs[i]->mutex);
 861}
 862
 863static int vhost_new_umem_range(struct vhost_umem *umem,
 864				u64 start, u64 size, u64 end,
 865				u64 userspace_addr, int perm)
 866{
 867	struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
 
 868
 869	if (!node)
 870		return -ENOMEM;
 
 
 
 
 871
 872	if (umem->numem == max_iotlb_entries) {
 873		tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
 874		vhost_umem_free(umem, tmp);
 875	}
 
 876
 877	node->start = start;
 878	node->size = size;
 879	node->last = end;
 880	node->userspace_addr = userspace_addr;
 881	node->perm = perm;
 882	INIT_LIST_HEAD(&node->link);
 883	list_add_tail(&node->link, &umem->umem_list);
 884	vhost_umem_interval_tree_insert(node, &umem->umem_tree);
 885	umem->numem++;
 886
 887	return 0;
 
 
 
 888}
 889
 890static void vhost_del_umem_range(struct vhost_umem *umem,
 891				 u64 start, u64 end)
 892{
 893	struct vhost_umem_node *node;
 894
 895	while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
 896							   start, end)))
 897		vhost_umem_free(umem, node);
 898}
 899
 900static void vhost_iotlb_notify_vq(struct vhost_dev *d,
 901				  struct vhost_iotlb_msg *msg)
 902{
 903	struct vhost_msg_node *node, *n;
 904
 905	spin_lock(&d->iotlb_lock);
 906
 907	list_for_each_entry_safe(node, n, &d->pending_list, node) {
 908		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
 909		if (msg->iova <= vq_msg->iova &&
 910		    msg->iova + msg->size - 1 > vq_msg->iova &&
 911		    vq_msg->type == VHOST_IOTLB_MISS) {
 912			vhost_poll_queue(&node->vq->poll);
 913			list_del(&node->node);
 914			kfree(node);
 915		}
 916	}
 917
 918	spin_unlock(&d->iotlb_lock);
 919}
 920
 921static int umem_access_ok(u64 uaddr, u64 size, int access)
 922{
 923	unsigned long a = uaddr;
 924
 925	/* Make sure 64 bit math will not overflow. */
 926	if (vhost_overflow(uaddr, size))
 927		return -EFAULT;
 928
 929	if ((access & VHOST_ACCESS_RO) &&
 930	    !access_ok(VERIFY_READ, (void __user *)a, size))
 931		return -EFAULT;
 932	if ((access & VHOST_ACCESS_WO) &&
 933	    !access_ok(VERIFY_WRITE, (void __user *)a, size))
 934		return -EFAULT;
 935	return 0;
 936}
 937
 938static int vhost_process_iotlb_msg(struct vhost_dev *dev,
 939				   struct vhost_iotlb_msg *msg)
 940{
 941	int ret = 0;
 942
 
 
 
 
 943	vhost_dev_lock_vqs(dev);
 944	switch (msg->type) {
 945	case VHOST_IOTLB_UPDATE:
 946		if (!dev->iotlb) {
 947			ret = -EFAULT;
 948			break;
 949		}
 950		if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
 951			ret = -EFAULT;
 952			break;
 953		}
 954		if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
 955					 msg->iova + msg->size - 1,
 956					 msg->uaddr, msg->perm)) {
 
 957			ret = -ENOMEM;
 958			break;
 959		}
 960		vhost_iotlb_notify_vq(dev, msg);
 961		break;
 962	case VHOST_IOTLB_INVALIDATE:
 963		vhost_del_umem_range(dev->iotlb, msg->iova,
 964				     msg->iova + msg->size - 1);
 
 
 
 
 
 965		break;
 966	default:
 967		ret = -EINVAL;
 968		break;
 969	}
 970
 971	vhost_dev_unlock_vqs(dev);
 
 
 972	return ret;
 973}
 974ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
 975			     struct iov_iter *from)
 976{
 977	struct vhost_msg_node node;
 978	unsigned size = sizeof(struct vhost_msg);
 979	size_t ret;
 980	int err;
 981
 982	if (iov_iter_count(from) < size)
 983		return 0;
 984	ret = copy_from_iter(&node.msg, size, from);
 985	if (ret != size)
 986		goto done;
 
 987
 988	switch (node.msg.type) {
 989	case VHOST_IOTLB_MSG:
 990		err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
 991		if (err)
 992			ret = err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 993		break;
 994	default:
 995		ret = -EINVAL;
 996		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 997	}
 998
 
 
 999done:
1000	return ret;
1001}
1002EXPORT_SYMBOL(vhost_chr_write_iter);
1003
1004unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1005			    poll_table *wait)
1006{
1007	unsigned int mask = 0;
1008
1009	poll_wait(file, &dev->wait, wait);
1010
1011	if (!list_empty(&dev->read_list))
1012		mask |= POLLIN | POLLRDNORM;
1013
1014	return mask;
1015}
1016EXPORT_SYMBOL(vhost_chr_poll);
1017
1018ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1019			    int noblock)
1020{
1021	DEFINE_WAIT(wait);
1022	struct vhost_msg_node *node;
1023	ssize_t ret = 0;
1024	unsigned size = sizeof(struct vhost_msg);
1025
1026	if (iov_iter_count(to) < size)
1027		return 0;
1028
1029	while (1) {
1030		if (!noblock)
1031			prepare_to_wait(&dev->wait, &wait,
1032					TASK_INTERRUPTIBLE);
1033
1034		node = vhost_dequeue_msg(dev, &dev->read_list);
1035		if (node)
1036			break;
1037		if (noblock) {
1038			ret = -EAGAIN;
1039			break;
1040		}
1041		if (signal_pending(current)) {
1042			ret = -ERESTARTSYS;
1043			break;
1044		}
1045		if (!dev->iotlb) {
1046			ret = -EBADFD;
1047			break;
1048		}
1049
1050		schedule();
1051	}
1052
1053	if (!noblock)
1054		finish_wait(&dev->wait, &wait);
1055
1056	if (node) {
1057		ret = copy_to_iter(&node->msg, size, to);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058
1059		if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
 
1060			kfree(node);
1061			return ret;
1062		}
1063
1064		vhost_enqueue_msg(dev, &dev->pending_list, node);
1065	}
1066
1067	return ret;
1068}
1069EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1070
1071static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1072{
1073	struct vhost_dev *dev = vq->dev;
1074	struct vhost_msg_node *node;
1075	struct vhost_iotlb_msg *msg;
 
1076
1077	node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
1078	if (!node)
1079		return -ENOMEM;
1080
1081	msg = &node->msg.iotlb;
 
 
 
 
 
 
1082	msg->type = VHOST_IOTLB_MISS;
1083	msg->iova = iova;
1084	msg->perm = access;
1085
1086	vhost_enqueue_msg(dev, &dev->read_list, node);
1087
1088	return 0;
1089}
1090
1091static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1092			struct vring_desc __user *desc,
1093			struct vring_avail __user *avail,
1094			struct vring_used __user *used)
1095
1096{
1097	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 
 
 
1098
1099	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
1100	       access_ok(VERIFY_READ, avail,
1101			 sizeof *avail + num * sizeof *avail->ring + s) &&
1102	       access_ok(VERIFY_WRITE, used,
1103			sizeof *used + num * sizeof *used->ring + s);
1104}
1105
1106static int iotlb_access_ok(struct vhost_virtqueue *vq,
1107			   int access, u64 addr, u64 len)
 
1108{
1109	const struct vhost_umem_node *node;
1110	struct vhost_umem *umem = vq->iotlb;
1111	u64 s = 0, size;
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
1113	while (len > s) {
1114		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1115							   addr,
1116							   addr + len - 1);
1117		if (node == NULL || node->start > addr) {
1118			vhost_iotlb_miss(vq, addr, access);
1119			return false;
1120		} else if (!(node->perm & access)) {
1121			/* Report the possible access violation by
1122			 * request another translation from userspace.
1123			 */
1124			return false;
1125		}
1126
1127		size = node->size - addr + node->start;
 
 
 
 
1128		s += size;
1129		addr += size;
1130	}
1131
1132	return true;
1133}
1134
1135int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
1136{
1137	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1138	unsigned int num = vq->num;
1139
1140	if (!vq->iotlb)
1141		return 1;
1142
1143	return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1144			       num * sizeof *vq->desc) &&
1145	       iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1146			       sizeof *vq->avail +
1147			       num * sizeof *vq->avail->ring + s) &&
1148	       iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1149			       sizeof *vq->used +
1150			       num * sizeof *vq->used->ring + s);
1151}
1152EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
1153
1154/* Can we log writes? */
1155/* Caller should have device mutex but not vq mutex */
1156int vhost_log_access_ok(struct vhost_dev *dev)
1157{
1158	return memory_access_ok(dev, dev->umem, 1);
1159}
1160EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1162/* Verify access for write logging. */
1163/* Caller should have vq mutex and device mutex */
1164static int vq_log_access_ok(struct vhost_virtqueue *vq,
1165			    void __user *log_base)
1166{
1167	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1168
1169	return vq_memory_access_ok(log_base, vq->umem,
1170				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1171		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
1172					sizeof *vq->used +
1173					vq->num * sizeof *vq->used->ring + s));
1174}
1175
1176/* Can we start vq? */
1177/* Caller should have vq mutex and device mutex */
1178int vhost_vq_access_ok(struct vhost_virtqueue *vq)
1179{
1180	if (vq->iotlb) {
1181		/* When device IOTLB was used, the access validation
1182		 * will be validated during prefetching.
1183		 */
1184		return 1;
1185	}
1186	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
1187		vq_log_access_ok(vq, vq->log_base);
1188}
1189EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1190
1191static struct vhost_umem *vhost_umem_alloc(void)
1192{
1193	struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
1194
1195	if (!umem)
1196		return NULL;
1197
1198	umem->umem_tree = RB_ROOT;
1199	umem->numem = 0;
1200	INIT_LIST_HEAD(&umem->umem_list);
1201
1202	return umem;
1203}
1204
1205static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1206{
1207	struct vhost_memory mem, *newmem;
1208	struct vhost_memory_region *region;
1209	struct vhost_umem *newumem, *oldumem;
1210	unsigned long size = offsetof(struct vhost_memory, regions);
1211	int i;
1212
1213	if (copy_from_user(&mem, m, size))
1214		return -EFAULT;
1215	if (mem.padding)
1216		return -EOPNOTSUPP;
1217	if (mem.nregions > max_mem_regions)
1218		return -E2BIG;
1219	newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
 
1220	if (!newmem)
1221		return -ENOMEM;
1222
1223	memcpy(newmem, &mem, size);
1224	if (copy_from_user(newmem->regions, m->regions,
1225			   mem.nregions * sizeof *m->regions)) {
1226		kvfree(newmem);
1227		return -EFAULT;
1228	}
1229
1230	newumem = vhost_umem_alloc();
1231	if (!newumem) {
1232		kvfree(newmem);
1233		return -ENOMEM;
1234	}
1235
1236	for (region = newmem->regions;
1237	     region < newmem->regions + mem.nregions;
1238	     region++) {
1239		if (vhost_new_umem_range(newumem,
1240					 region->guest_phys_addr,
1241					 region->memory_size,
1242					 region->guest_phys_addr +
1243					 region->memory_size - 1,
1244					 region->userspace_addr,
1245					 VHOST_ACCESS_RW))
1246			goto err;
1247	}
1248
1249	if (!memory_access_ok(d, newumem, 0))
1250		goto err;
1251
1252	oldumem = d->umem;
1253	d->umem = newumem;
1254
1255	/* All memory accesses are done under some VQ mutex. */
1256	for (i = 0; i < d->nvqs; ++i) {
1257		mutex_lock(&d->vqs[i]->mutex);
1258		d->vqs[i]->umem = newumem;
1259		mutex_unlock(&d->vqs[i]->mutex);
1260	}
1261
1262	kvfree(newmem);
1263	vhost_umem_clean(oldumem);
1264	return 0;
1265
1266err:
1267	vhost_umem_clean(newumem);
1268	kvfree(newmem);
1269	return -EFAULT;
1270}
1271
1272long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1273{
1274	struct file *eventfp, *filep = NULL;
1275	bool pollstart = false, pollstop = false;
1276	struct eventfd_ctx *ctx = NULL;
1277	u32 __user *idxp = argp;
1278	struct vhost_virtqueue *vq;
1279	struct vhost_vring_state s;
1280	struct vhost_vring_file f;
1281	struct vhost_vring_addr a;
1282	u32 idx;
1283	long r;
1284
1285	r = get_user(idx, idxp);
1286	if (r < 0)
1287		return r;
1288	if (idx >= d->nvqs)
1289		return -ENOBUFS;
1290
1291	vq = d->vqs[idx];
 
 
 
1292
1293	mutex_lock(&vq->mutex);
1294
1295	switch (ioctl) {
1296	case VHOST_SET_VRING_NUM:
1297		/* Resizing ring with an active backend?
1298		 * You don't want to do that. */
1299		if (vq->private_data) {
1300			r = -EBUSY;
1301			break;
1302		}
1303		if (copy_from_user(&s, argp, sizeof s)) {
1304			r = -EFAULT;
1305			break;
1306		}
1307		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
1308			r = -EINVAL;
1309			break;
1310		}
1311		vq->num = s.num;
1312		break;
1313	case VHOST_SET_VRING_BASE:
1314		/* Moving base with an active backend?
1315		 * You don't want to do that. */
1316		if (vq->private_data) {
1317			r = -EBUSY;
1318			break;
1319		}
1320		if (copy_from_user(&s, argp, sizeof s)) {
1321			r = -EFAULT;
1322			break;
1323		}
1324		if (s.num > 0xffff) {
1325			r = -EINVAL;
1326			break;
 
 
 
 
 
 
1327		}
1328		vq->last_avail_idx = vq->last_used_event = s.num;
1329		/* Forget the cached index value. */
1330		vq->avail_idx = vq->last_avail_idx;
1331		break;
1332	case VHOST_GET_VRING_BASE:
1333		s.index = idx;
1334		s.num = vq->last_avail_idx;
 
 
 
1335		if (copy_to_user(argp, &s, sizeof s))
1336			r = -EFAULT;
1337		break;
1338	case VHOST_SET_VRING_ADDR:
1339		if (copy_from_user(&a, argp, sizeof a)) {
1340			r = -EFAULT;
1341			break;
1342		}
1343		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
1344			r = -EOPNOTSUPP;
1345			break;
1346		}
1347		/* For 32bit, verify that the top 32bits of the user
1348		   data are set to zero. */
1349		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1350		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1351		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
1352			r = -EFAULT;
1353			break;
1354		}
1355
1356		/* Make sure it's safe to cast pointers to vring types. */
1357		BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1358		BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1359		if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1360		    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1361		    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
1362			r = -EINVAL;
1363			break;
1364		}
1365
1366		/* We only verify access here if backend is configured.
1367		 * If it is not, we don't as size might not have been setup.
1368		 * We will verify when backend is configured. */
1369		if (vq->private_data) {
1370			if (!vq_access_ok(vq, vq->num,
1371				(void __user *)(unsigned long)a.desc_user_addr,
1372				(void __user *)(unsigned long)a.avail_user_addr,
1373				(void __user *)(unsigned long)a.used_user_addr)) {
1374				r = -EINVAL;
1375				break;
1376			}
1377
1378			/* Also validate log access for used ring if enabled. */
1379			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1380			    !log_access_ok(vq->log_base, a.log_guest_addr,
1381					   sizeof *vq->used +
1382					   vq->num * sizeof *vq->used->ring)) {
1383				r = -EINVAL;
1384				break;
1385			}
1386		}
1387
1388		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1389		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1390		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1391		vq->log_addr = a.log_guest_addr;
1392		vq->used = (void __user *)(unsigned long)a.used_user_addr;
1393		break;
1394	case VHOST_SET_VRING_KICK:
1395		if (copy_from_user(&f, argp, sizeof f)) {
1396			r = -EFAULT;
1397			break;
1398		}
1399		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1400		if (IS_ERR(eventfp)) {
1401			r = PTR_ERR(eventfp);
1402			break;
1403		}
1404		if (eventfp != vq->kick) {
1405			pollstop = (filep = vq->kick) != NULL;
1406			pollstart = (vq->kick = eventfp) != NULL;
1407		} else
1408			filep = eventfp;
1409		break;
1410	case VHOST_SET_VRING_CALL:
1411		if (copy_from_user(&f, argp, sizeof f)) {
1412			r = -EFAULT;
1413			break;
1414		}
1415		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1416		if (IS_ERR(eventfp)) {
1417			r = PTR_ERR(eventfp);
1418			break;
1419		}
1420		if (eventfp != vq->call) {
1421			filep = vq->call;
1422			ctx = vq->call_ctx;
1423			vq->call = eventfp;
1424			vq->call_ctx = eventfp ?
1425				eventfd_ctx_fileget(eventfp) : NULL;
1426		} else
1427			filep = eventfp;
1428		break;
1429	case VHOST_SET_VRING_ERR:
1430		if (copy_from_user(&f, argp, sizeof f)) {
1431			r = -EFAULT;
1432			break;
1433		}
1434		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1435		if (IS_ERR(eventfp)) {
1436			r = PTR_ERR(eventfp);
1437			break;
1438		}
1439		if (eventfp != vq->error) {
1440			filep = vq->error;
1441			vq->error = eventfp;
1442			ctx = vq->error_ctx;
1443			vq->error_ctx = eventfp ?
1444				eventfd_ctx_fileget(eventfp) : NULL;
1445		} else
1446			filep = eventfp;
1447		break;
1448	case VHOST_SET_VRING_ENDIAN:
1449		r = vhost_set_vring_endian(vq, argp);
1450		break;
1451	case VHOST_GET_VRING_ENDIAN:
1452		r = vhost_get_vring_endian(vq, idx, argp);
1453		break;
1454	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1455		if (copy_from_user(&s, argp, sizeof(s))) {
1456			r = -EFAULT;
1457			break;
1458		}
1459		vq->busyloop_timeout = s.num;
1460		break;
1461	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1462		s.index = idx;
1463		s.num = vq->busyloop_timeout;
1464		if (copy_to_user(argp, &s, sizeof(s)))
1465			r = -EFAULT;
1466		break;
1467	default:
1468		r = -ENOIOCTLCMD;
1469	}
1470
1471	if (pollstop && vq->handle_kick)
1472		vhost_poll_stop(&vq->poll);
1473
1474	if (ctx)
1475		eventfd_ctx_put(ctx);
1476	if (filep)
1477		fput(filep);
1478
1479	if (pollstart && vq->handle_kick)
1480		r = vhost_poll_start(&vq->poll, vq->kick);
1481
1482	mutex_unlock(&vq->mutex);
1483
1484	if (pollstop && vq->handle_kick)
1485		vhost_poll_flush(&vq->poll);
1486	return r;
1487}
1488EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1489
1490int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1491{
1492	struct vhost_umem *niotlb, *oiotlb;
1493	int i;
1494
1495	niotlb = vhost_umem_alloc();
1496	if (!niotlb)
1497		return -ENOMEM;
1498
1499	oiotlb = d->iotlb;
1500	d->iotlb = niotlb;
1501
1502	for (i = 0; i < d->nvqs; ++i) {
1503		mutex_lock(&d->vqs[i]->mutex);
1504		d->vqs[i]->iotlb = niotlb;
1505		mutex_unlock(&d->vqs[i]->mutex);
 
 
 
1506	}
1507
1508	vhost_umem_clean(oiotlb);
1509
1510	return 0;
1511}
1512EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1513
1514/* Caller must have device mutex */
1515long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1516{
1517	struct file *eventfp, *filep = NULL;
1518	struct eventfd_ctx *ctx = NULL;
1519	u64 p;
1520	long r;
1521	int i, fd;
1522
1523	/* If you are not the owner, you can become one */
1524	if (ioctl == VHOST_SET_OWNER) {
1525		r = vhost_dev_set_owner(d);
1526		goto done;
1527	}
1528
1529	/* You must be the owner to do anything else */
1530	r = vhost_dev_check_owner(d);
1531	if (r)
1532		goto done;
1533
1534	switch (ioctl) {
1535	case VHOST_SET_MEM_TABLE:
1536		r = vhost_set_memory(d, argp);
1537		break;
1538	case VHOST_SET_LOG_BASE:
1539		if (copy_from_user(&p, argp, sizeof p)) {
1540			r = -EFAULT;
1541			break;
1542		}
1543		if ((u64)(unsigned long)p != p) {
1544			r = -EFAULT;
1545			break;
1546		}
1547		for (i = 0; i < d->nvqs; ++i) {
1548			struct vhost_virtqueue *vq;
1549			void __user *base = (void __user *)(unsigned long)p;
1550			vq = d->vqs[i];
1551			mutex_lock(&vq->mutex);
1552			/* If ring is inactive, will check when it's enabled. */
1553			if (vq->private_data && !vq_log_access_ok(vq, base))
1554				r = -EFAULT;
1555			else
1556				vq->log_base = base;
1557			mutex_unlock(&vq->mutex);
1558		}
1559		break;
1560	case VHOST_SET_LOG_FD:
1561		r = get_user(fd, (int __user *)argp);
1562		if (r < 0)
1563			break;
1564		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
1565		if (IS_ERR(eventfp)) {
1566			r = PTR_ERR(eventfp);
1567			break;
1568		}
1569		if (eventfp != d->log_file) {
1570			filep = d->log_file;
1571			d->log_file = eventfp;
1572			ctx = d->log_ctx;
1573			d->log_ctx = eventfp ?
1574				eventfd_ctx_fileget(eventfp) : NULL;
1575		} else
1576			filep = eventfp;
1577		for (i = 0; i < d->nvqs; ++i) {
1578			mutex_lock(&d->vqs[i]->mutex);
1579			d->vqs[i]->log_ctx = d->log_ctx;
1580			mutex_unlock(&d->vqs[i]->mutex);
1581		}
1582		if (ctx)
1583			eventfd_ctx_put(ctx);
1584		if (filep)
1585			fput(filep);
1586		break;
1587	default:
1588		r = -ENOIOCTLCMD;
1589		break;
1590	}
1591done:
1592	return r;
1593}
1594EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1595
1596/* TODO: This is really inefficient.  We need something like get_user()
1597 * (instruction directly accesses the data, with an exception table entry
1598 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1599 */
1600static int set_bit_to_user(int nr, void __user *addr)
1601{
1602	unsigned long log = (unsigned long)addr;
1603	struct page *page;
1604	void *base;
1605	int bit = nr + (log % PAGE_SIZE) * 8;
1606	int r;
1607
1608	r = get_user_pages_fast(log, 1, 1, &page);
1609	if (r < 0)
1610		return r;
1611	BUG_ON(r != 1);
1612	base = kmap_atomic(page);
1613	set_bit(bit, base);
1614	kunmap_atomic(base);
1615	set_page_dirty_lock(page);
1616	put_page(page);
1617	return 0;
1618}
1619
1620static int log_write(void __user *log_base,
1621		     u64 write_address, u64 write_length)
1622{
1623	u64 write_page = write_address / VHOST_PAGE_SIZE;
1624	int r;
1625
1626	if (!write_length)
1627		return 0;
1628	write_length += write_address % VHOST_PAGE_SIZE;
1629	for (;;) {
1630		u64 base = (u64)(unsigned long)log_base;
1631		u64 log = base + write_page / 8;
1632		int bit = write_page % 8;
1633		if ((u64)(unsigned long)log != log)
1634			return -EFAULT;
1635		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1636		if (r < 0)
1637			return r;
1638		if (write_length <= VHOST_PAGE_SIZE)
1639			break;
1640		write_length -= VHOST_PAGE_SIZE;
1641		write_page += 1;
1642	}
1643	return r;
1644}
1645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1646int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1647		    unsigned int log_num, u64 len)
1648{
1649	int i, r;
1650
1651	/* Make sure data written is seen before log. */
1652	smp_wmb();
 
 
 
 
 
 
 
 
 
 
 
1653	for (i = 0; i < log_num; ++i) {
1654		u64 l = min(log[i].len, len);
1655		r = log_write(vq->log_base, log[i].addr, l);
1656		if (r < 0)
1657			return r;
1658		len -= l;
1659		if (!len) {
1660			if (vq->log_ctx)
1661				eventfd_signal(vq->log_ctx, 1);
1662			return 0;
1663		}
1664	}
1665	/* Length written exceeds what we have stored. This is a bug. */
1666	BUG();
1667	return 0;
1668}
1669EXPORT_SYMBOL_GPL(vhost_log_write);
1670
1671static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1672{
1673	void __user *used;
1674	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1675			   &vq->used->flags) < 0)
1676		return -EFAULT;
1677	if (unlikely(vq->log_used)) {
1678		/* Make sure the flag is seen before log. */
1679		smp_wmb();
1680		/* Log used flag write. */
1681		used = &vq->used->flags;
1682		log_write(vq->log_base, vq->log_addr +
1683			  (used - (void __user *)vq->used),
1684			  sizeof vq->used->flags);
1685		if (vq->log_ctx)
1686			eventfd_signal(vq->log_ctx, 1);
1687	}
1688	return 0;
1689}
1690
1691static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1692{
1693	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1694			   vhost_avail_event(vq)))
1695		return -EFAULT;
1696	if (unlikely(vq->log_used)) {
1697		void __user *used;
1698		/* Make sure the event is seen before log. */
1699		smp_wmb();
1700		/* Log avail event write */
1701		used = vhost_avail_event(vq);
1702		log_write(vq->log_base, vq->log_addr +
1703			  (used - (void __user *)vq->used),
1704			  sizeof *vhost_avail_event(vq));
1705		if (vq->log_ctx)
1706			eventfd_signal(vq->log_ctx, 1);
1707	}
1708	return 0;
1709}
1710
1711int vhost_vq_init_access(struct vhost_virtqueue *vq)
1712{
1713	__virtio16 last_used_idx;
1714	int r;
1715	bool is_le = vq->is_le;
1716
1717	if (!vq->private_data)
1718		return 0;
1719
1720	vhost_init_is_le(vq);
1721
1722	r = vhost_update_used_flags(vq);
1723	if (r)
1724		goto err;
1725	vq->signalled_used_valid = false;
1726	if (!vq->iotlb &&
1727	    !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1728		r = -EFAULT;
1729		goto err;
1730	}
1731	r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
1732	if (r) {
1733		vq_err(vq, "Can't access used idx at %p\n",
1734		       &vq->used->idx);
1735		goto err;
1736	}
1737	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1738	return 0;
1739
1740err:
1741	vq->is_le = is_le;
1742	return r;
1743}
1744EXPORT_SYMBOL_GPL(vhost_vq_init_access);
1745
1746static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1747			  struct iovec iov[], int iov_size, int access)
1748{
1749	const struct vhost_umem_node *node;
1750	struct vhost_dev *dev = vq->dev;
1751	struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
1752	struct iovec *_iov;
1753	u64 s = 0;
1754	int ret = 0;
1755
1756	while ((u64)len > s) {
1757		u64 size;
1758		if (unlikely(ret >= iov_size)) {
1759			ret = -ENOBUFS;
1760			break;
1761		}
1762
1763		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1764							addr, addr + len - 1);
1765		if (node == NULL || node->start > addr) {
1766			if (umem != dev->iotlb) {
1767				ret = -EFAULT;
1768				break;
1769			}
1770			ret = -EAGAIN;
1771			break;
1772		} else if (!(node->perm & access)) {
1773			ret = -EPERM;
1774			break;
1775		}
1776
1777		_iov = iov + ret;
1778		size = node->size - addr + node->start;
1779		_iov->iov_len = min((u64)len - s, size);
1780		_iov->iov_base = (void __user *)(unsigned long)
1781			(node->userspace_addr + addr - node->start);
1782		s += size;
1783		addr += size;
1784		++ret;
1785	}
1786
1787	if (ret == -EAGAIN)
1788		vhost_iotlb_miss(vq, addr, access);
1789	return ret;
1790}
1791
1792/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1793 * function returns the next descriptor in the chain,
1794 * or -1U if we're at the end. */
1795static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
1796{
1797	unsigned int next;
1798
1799	/* If this descriptor says it doesn't chain, we're done. */
1800	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
1801		return -1U;
1802
1803	/* Check they're not leading us off end of descriptors. */
1804	next = vhost16_to_cpu(vq, desc->next);
1805	/* Make sure compiler knows to grab that: we don't want it changing! */
1806	/* We will use the result as an index in an array, so most
1807	 * architectures only need a compiler barrier here. */
1808	read_barrier_depends();
1809
1810	return next;
1811}
1812
1813static int get_indirect(struct vhost_virtqueue *vq,
1814			struct iovec iov[], unsigned int iov_size,
1815			unsigned int *out_num, unsigned int *in_num,
1816			struct vhost_log *log, unsigned int *log_num,
1817			struct vring_desc *indirect)
1818{
1819	struct vring_desc desc;
1820	unsigned int i = 0, count, found = 0;
1821	u32 len = vhost32_to_cpu(vq, indirect->len);
1822	struct iov_iter from;
1823	int ret, access;
1824
1825	/* Sanity check */
1826	if (unlikely(len % sizeof desc)) {
1827		vq_err(vq, "Invalid length in indirect descriptor: "
1828		       "len 0x%llx not multiple of 0x%zx\n",
1829		       (unsigned long long)len,
1830		       sizeof desc);
1831		return -EINVAL;
1832	}
1833
1834	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
1835			     UIO_MAXIOV, VHOST_ACCESS_RO);
1836	if (unlikely(ret < 0)) {
1837		if (ret != -EAGAIN)
1838			vq_err(vq, "Translation failure %d in indirect.\n", ret);
1839		return ret;
1840	}
1841	iov_iter_init(&from, READ, vq->indirect, ret, len);
1842
1843	/* We will use the result as an address to read from, so most
1844	 * architectures only need a compiler barrier here. */
1845	read_barrier_depends();
1846
1847	count = len / sizeof desc;
1848	/* Buffers are chained via a 16 bit next field, so
1849	 * we can have at most 2^16 of these. */
1850	if (unlikely(count > USHRT_MAX + 1)) {
1851		vq_err(vq, "Indirect buffer length too big: %d\n",
1852		       indirect->len);
1853		return -E2BIG;
1854	}
1855
1856	do {
1857		unsigned iov_count = *in_num + *out_num;
1858		if (unlikely(++found > count)) {
1859			vq_err(vq, "Loop detected: last one at %u "
1860			       "indirect size %u\n",
1861			       i, count);
1862			return -EINVAL;
1863		}
1864		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
1865			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1866			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1867			return -EINVAL;
1868		}
1869		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
1870			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1871			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1872			return -EINVAL;
1873		}
1874
1875		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
1876			access = VHOST_ACCESS_WO;
1877		else
1878			access = VHOST_ACCESS_RO;
1879
1880		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1881				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
1882				     iov_size - iov_count, access);
1883		if (unlikely(ret < 0)) {
1884			if (ret != -EAGAIN)
1885				vq_err(vq, "Translation failure %d indirect idx %d\n",
1886					ret, i);
1887			return ret;
1888		}
1889		/* If this is an input descriptor, increment that count. */
1890		if (access == VHOST_ACCESS_WO) {
1891			*in_num += ret;
1892			if (unlikely(log)) {
1893				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1894				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1895				++*log_num;
1896			}
1897		} else {
1898			/* If it's an output descriptor, they're all supposed
1899			 * to come before any input descriptors. */
1900			if (unlikely(*in_num)) {
1901				vq_err(vq, "Indirect descriptor "
1902				       "has out after in: idx %d\n", i);
1903				return -EINVAL;
1904			}
1905			*out_num += ret;
1906		}
1907	} while ((i = next_desc(vq, &desc)) != -1);
1908	return 0;
1909}
1910
1911/* This looks in the virtqueue and for the first available buffer, and converts
1912 * it to an iovec for convenient access.  Since descriptors consist of some
1913 * number of output then some number of input descriptors, it's actually two
1914 * iovecs, but we pack them into one and note how many of each there were.
1915 *
1916 * This function returns the descriptor number found, or vq->num (which is
1917 * never a valid descriptor number) if none was found.  A negative code is
1918 * returned on error. */
1919int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1920		      struct iovec iov[], unsigned int iov_size,
1921		      unsigned int *out_num, unsigned int *in_num,
1922		      struct vhost_log *log, unsigned int *log_num)
1923{
1924	struct vring_desc desc;
1925	unsigned int i, head, found = 0;
1926	u16 last_avail_idx;
1927	__virtio16 avail_idx;
1928	__virtio16 ring_head;
1929	int ret, access;
1930
1931	/* Check it isn't doing very strange things with descriptor numbers. */
1932	last_avail_idx = vq->last_avail_idx;
1933	if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
1934		vq_err(vq, "Failed to access avail idx at %p\n",
1935		       &vq->avail->idx);
1936		return -EFAULT;
1937	}
1938	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
1939
1940	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1941		vq_err(vq, "Guest moved used index from %u to %u",
1942		       last_avail_idx, vq->avail_idx);
1943		return -EFAULT;
1944	}
 
 
 
 
 
 
 
 
1945
1946	/* If there's nothing new since last we looked, return invalid. */
1947	if (vq->avail_idx == last_avail_idx)
1948		return vq->num;
 
 
1949
1950	/* Only get avail ring entries after they have been exposed by guest. */
1951	smp_rmb();
 
 
 
1952
1953	/* Grab the next descriptor number they're advertising, and increment
1954	 * the index we've seen. */
1955	if (unlikely(vhost_get_user(vq, ring_head,
1956		     &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
1957		vq_err(vq, "Failed to read head: idx %d address %p\n",
1958		       last_avail_idx,
1959		       &vq->avail->ring[last_avail_idx % vq->num]);
1960		return -EFAULT;
1961	}
1962
1963	head = vhost16_to_cpu(vq, ring_head);
1964
1965	/* If their number is silly, that's an error. */
1966	if (unlikely(head >= vq->num)) {
1967		vq_err(vq, "Guest says index %u > %u is available",
1968		       head, vq->num);
1969		return -EINVAL;
1970	}
1971
1972	/* When we start there are none of either input nor output. */
1973	*out_num = *in_num = 0;
1974	if (unlikely(log))
1975		*log_num = 0;
1976
1977	i = head;
1978	do {
1979		unsigned iov_count = *in_num + *out_num;
1980		if (unlikely(i >= vq->num)) {
1981			vq_err(vq, "Desc index is %u > %u, head = %u",
1982			       i, vq->num, head);
1983			return -EINVAL;
1984		}
1985		if (unlikely(++found > vq->num)) {
1986			vq_err(vq, "Loop detected: last one at %u "
1987			       "vq size %u head %u\n",
1988			       i, vq->num, head);
1989			return -EINVAL;
1990		}
1991		ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
1992					   sizeof desc);
1993		if (unlikely(ret)) {
1994			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1995			       i, vq->desc + i);
1996			return -EFAULT;
1997		}
1998		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
1999			ret = get_indirect(vq, iov, iov_size,
2000					   out_num, in_num,
2001					   log, log_num, &desc);
2002			if (unlikely(ret < 0)) {
2003				if (ret != -EAGAIN)
2004					vq_err(vq, "Failure detected "
2005						"in indirect descriptor at idx %d\n", i);
2006				return ret;
2007			}
2008			continue;
2009		}
2010
2011		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2012			access = VHOST_ACCESS_WO;
2013		else
2014			access = VHOST_ACCESS_RO;
2015		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2016				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2017				     iov_size - iov_count, access);
2018		if (unlikely(ret < 0)) {
2019			if (ret != -EAGAIN)
2020				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2021					ret, i);
2022			return ret;
2023		}
2024		if (access == VHOST_ACCESS_WO) {
2025			/* If this is an input descriptor,
2026			 * increment that count. */
2027			*in_num += ret;
2028			if (unlikely(log)) {
2029				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2030				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2031				++*log_num;
2032			}
2033		} else {
2034			/* If it's an output descriptor, they're all supposed
2035			 * to come before any input descriptors. */
2036			if (unlikely(*in_num)) {
2037				vq_err(vq, "Descriptor has out after in: "
2038				       "idx %d\n", i);
2039				return -EINVAL;
2040			}
2041			*out_num += ret;
2042		}
2043	} while ((i = next_desc(vq, &desc)) != -1);
2044
2045	/* On success, increment avail index. */
2046	vq->last_avail_idx++;
2047
2048	/* Assume notifications from guest are disabled at this point,
2049	 * if they aren't we would need to update avail_event index. */
2050	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2051	return head;
2052}
2053EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2054
2055/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2056void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2057{
2058	vq->last_avail_idx -= n;
2059}
2060EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2061
2062/* After we've used one of their buffers, we tell them about it.  We'll then
2063 * want to notify the guest, using eventfd. */
2064int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2065{
2066	struct vring_used_elem heads = {
2067		cpu_to_vhost32(vq, head),
2068		cpu_to_vhost32(vq, len)
2069	};
2070
2071	return vhost_add_used_n(vq, &heads, 1);
2072}
2073EXPORT_SYMBOL_GPL(vhost_add_used);
2074
2075static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2076			    struct vring_used_elem *heads,
2077			    unsigned count)
2078{
2079	struct vring_used_elem __user *used;
2080	u16 old, new;
2081	int start;
2082
2083	start = vq->last_used_idx & (vq->num - 1);
2084	used = vq->used->ring + start;
2085	if (count == 1) {
2086		if (vhost_put_user(vq, heads[0].id, &used->id)) {
2087			vq_err(vq, "Failed to write used id");
2088			return -EFAULT;
2089		}
2090		if (vhost_put_user(vq, heads[0].len, &used->len)) {
2091			vq_err(vq, "Failed to write used len");
2092			return -EFAULT;
2093		}
2094	} else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
2095		vq_err(vq, "Failed to write used");
2096		return -EFAULT;
2097	}
2098	if (unlikely(vq->log_used)) {
2099		/* Make sure data is seen before log. */
2100		smp_wmb();
2101		/* Log used ring entry write. */
2102		log_write(vq->log_base,
2103			  vq->log_addr +
2104			   ((void __user *)used - (void __user *)vq->used),
2105			  count * sizeof *used);
2106	}
2107	old = vq->last_used_idx;
2108	new = (vq->last_used_idx += count);
2109	/* If the driver never bothers to signal in a very long while,
2110	 * used index might wrap around. If that happens, invalidate
2111	 * signalled_used index we stored. TODO: make sure driver
2112	 * signals at least once in 2^16 and remove this. */
2113	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2114		vq->signalled_used_valid = false;
2115	return 0;
2116}
2117
2118/* After we've used one of their buffers, we tell them about it.  We'll then
2119 * want to notify the guest, using eventfd. */
2120int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2121		     unsigned count)
2122{
2123	int start, n, r;
2124
2125	start = vq->last_used_idx & (vq->num - 1);
2126	n = vq->num - start;
2127	if (n < count) {
2128		r = __vhost_add_used_n(vq, heads, n);
2129		if (r < 0)
2130			return r;
2131		heads += n;
2132		count -= n;
2133	}
2134	r = __vhost_add_used_n(vq, heads, count);
2135
2136	/* Make sure buffer is written before we update index. */
2137	smp_wmb();
2138	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
2139			   &vq->used->idx)) {
2140		vq_err(vq, "Failed to increment used idx");
2141		return -EFAULT;
2142	}
2143	if (unlikely(vq->log_used)) {
 
 
2144		/* Log used index update. */
2145		log_write(vq->log_base,
2146			  vq->log_addr + offsetof(struct vring_used, idx),
2147			  sizeof vq->used->idx);
2148		if (vq->log_ctx)
2149			eventfd_signal(vq->log_ctx, 1);
2150	}
2151	return r;
2152}
2153EXPORT_SYMBOL_GPL(vhost_add_used_n);
2154
2155static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2156{
2157	__u16 old, new;
2158	__virtio16 event;
2159	bool v;
 
 
 
 
2160
2161	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2162	    unlikely(vq->avail_idx == vq->last_avail_idx))
2163		return true;
2164
2165	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2166		__virtio16 flags;
2167		/* Flush out used index updates. This is paired
2168		 * with the barrier that the Guest executes when enabling
2169		 * interrupts. */
2170		smp_mb();
2171		if (vhost_get_user(vq, flags, &vq->avail->flags)) {
2172			vq_err(vq, "Failed to get flags");
2173			return true;
2174		}
2175		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2176	}
2177	old = vq->signalled_used;
2178	v = vq->signalled_used_valid;
2179	new = vq->signalled_used = vq->last_used_idx;
2180	vq->signalled_used_valid = true;
2181
2182	if (unlikely(!v))
2183		return true;
2184
2185	/* We're sure if the following conditions are met, there's no
2186	 * need to notify guest:
2187	 * 1) cached used event is ahead of new
2188	 * 2) old to new updating does not cross cached used event. */
2189	if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
2190	    !vring_need_event(vq->last_used_event, new, old))
2191		return false;
2192
2193	/* Flush out used index updates. This is paired
2194	 * with the barrier that the Guest executes when enabling
2195	 * interrupts. */
2196	smp_mb();
2197
2198	if (vhost_get_user(vq, event, vhost_used_event(vq))) {
2199		vq_err(vq, "Failed to get used event idx");
2200		return true;
2201	}
2202	vq->last_used_event = vhost16_to_cpu(vq, event);
2203
2204	return vring_need_event(vq->last_used_event, new, old);
2205}
2206
2207/* This actually signals the guest, using eventfd. */
2208void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2209{
2210	/* Signal the Guest tell them we used something up. */
2211	if (vq->call_ctx && vhost_notify(dev, vq))
2212		eventfd_signal(vq->call_ctx, 1);
2213}
2214EXPORT_SYMBOL_GPL(vhost_signal);
2215
2216/* And here's the combo meal deal.  Supersize me! */
2217void vhost_add_used_and_signal(struct vhost_dev *dev,
2218			       struct vhost_virtqueue *vq,
2219			       unsigned int head, int len)
2220{
2221	vhost_add_used(vq, head, len);
2222	vhost_signal(dev, vq);
2223}
2224EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2225
2226/* multi-buffer version of vhost_add_used_and_signal */
2227void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2228				 struct vhost_virtqueue *vq,
2229				 struct vring_used_elem *heads, unsigned count)
2230{
2231	vhost_add_used_n(vq, heads, count);
2232	vhost_signal(dev, vq);
2233}
2234EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2235
2236/* return true if we're sure that avaiable ring is empty */
2237bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2238{
2239	__virtio16 avail_idx;
2240	int r;
2241
2242	r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
2243	if (r)
 
 
 
2244		return false;
 
2245
2246	return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
2247}
2248EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2249
2250/* OK, now we need to know about added descriptors. */
2251bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2252{
2253	__virtio16 avail_idx;
2254	int r;
2255
2256	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2257		return false;
2258	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2259	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2260		r = vhost_update_used_flags(vq);
2261		if (r) {
2262			vq_err(vq, "Failed to enable notification at %p: %d\n",
2263			       &vq->used->flags, r);
2264			return false;
2265		}
2266	} else {
2267		r = vhost_update_avail_event(vq, vq->avail_idx);
2268		if (r) {
2269			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2270			       vhost_avail_event(vq), r);
2271			return false;
2272		}
2273	}
2274	/* They could have slipped one in as we were doing that: make
2275	 * sure it's written, then check again. */
2276	smp_mb();
2277	r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
2278	if (r) {
2279		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2280		       &vq->avail->idx, r);
2281		return false;
2282	}
 
2283
2284	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2285}
2286EXPORT_SYMBOL_GPL(vhost_enable_notify);
2287
2288/* We don't need to be notified again. */
2289void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2290{
2291	int r;
2292
2293	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2294		return;
2295	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2296	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2297		r = vhost_update_used_flags(vq);
2298		if (r)
2299			vq_err(vq, "Failed to enable notification at %p: %d\n",
2300			       &vq->used->flags, r);
2301	}
2302}
2303EXPORT_SYMBOL_GPL(vhost_disable_notify);
2304
2305/* Create a new message. */
2306struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2307{
2308	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
 
2309	if (!node)
2310		return NULL;
 
2311	node->vq = vq;
2312	node->msg.type = type;
2313	return node;
2314}
2315EXPORT_SYMBOL_GPL(vhost_new_msg);
2316
2317void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2318		       struct vhost_msg_node *node)
2319{
2320	spin_lock(&dev->iotlb_lock);
2321	list_add_tail(&node->node, head);
2322	spin_unlock(&dev->iotlb_lock);
2323
2324	wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
2325}
2326EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2327
2328struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2329					 struct list_head *head)
2330{
2331	struct vhost_msg_node *node = NULL;
2332
2333	spin_lock(&dev->iotlb_lock);
2334	if (!list_empty(head)) {
2335		node = list_first_entry(head, struct vhost_msg_node,
2336					node);
2337		list_del(&node->node);
2338	}
2339	spin_unlock(&dev->iotlb_lock);
2340
2341	return node;
2342}
2343EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345
2346static int __init vhost_init(void)
2347{
2348	return 0;
2349}
2350
2351static void __exit vhost_exit(void)
2352{
2353}
2354
2355module_init(vhost_init);
2356module_exit(vhost_exit);
2357
2358MODULE_VERSION("0.0.1");
2359MODULE_LICENSE("GPL v2");
2360MODULE_AUTHOR("Michael S. Tsirkin");
2361MODULE_DESCRIPTION("Host kernel accelerator for virtio");