Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2009 Red Hat, Inc.
   3 * Copyright (C) 2006 Rusty Russell IBM Corporation
   4 *
   5 * Author: Michael S. Tsirkin <mst@redhat.com>
   6 *
   7 * Inspiration, some code, and most witty comments come from
   8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   9 *
 
 
  10 * Generic code for virtio server in host kernel.
  11 */
  12
  13#include <linux/eventfd.h>
  14#include <linux/vhost.h>
  15#include <linux/uio.h>
  16#include <linux/mm.h>
 
  17#include <linux/miscdevice.h>
  18#include <linux/mutex.h>
 
  19#include <linux/poll.h>
  20#include <linux/file.h>
  21#include <linux/highmem.h>
  22#include <linux/slab.h>
  23#include <linux/vmalloc.h>
  24#include <linux/kthread.h>
  25#include <linux/cgroup.h>
  26#include <linux/module.h>
  27#include <linux/sort.h>
  28#include <linux/sched/mm.h>
  29#include <linux/sched/signal.h>
  30#include <linux/interval_tree_generic.h>
  31#include <linux/nospec.h>
  32#include <linux/kcov.h>
  33
  34#include "vhost.h"
  35
  36static ushort max_mem_regions = 64;
  37module_param(max_mem_regions, ushort, 0444);
  38MODULE_PARM_DESC(max_mem_regions,
  39	"Maximum number of memory regions in memory map. (default: 64)");
  40static int max_iotlb_entries = 2048;
  41module_param(max_iotlb_entries, int, 0444);
  42MODULE_PARM_DESC(max_iotlb_entries,
  43	"Maximum number of iotlb entries. (default: 2048)");
  44
  45enum {
 
  46	VHOST_MEMORY_F_LOG = 0x1,
  47};
  48
  49#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  50#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  51
  52#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  53static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  54{
  55	vq->user_be = !virtio_legacy_is_little_endian();
  56}
  57
  58static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  59{
  60	vq->user_be = true;
  61}
  62
  63static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  64{
  65	vq->user_be = false;
  66}
  67
  68static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  69{
  70	struct vhost_vring_state s;
  71
  72	if (vq->private_data)
  73		return -EBUSY;
  74
  75	if (copy_from_user(&s, argp, sizeof(s)))
  76		return -EFAULT;
  77
  78	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  79	    s.num != VHOST_VRING_BIG_ENDIAN)
  80		return -EINVAL;
  81
  82	if (s.num == VHOST_VRING_BIG_ENDIAN)
  83		vhost_enable_cross_endian_big(vq);
  84	else
  85		vhost_enable_cross_endian_little(vq);
  86
  87	return 0;
  88}
  89
  90static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  91				   int __user *argp)
  92{
  93	struct vhost_vring_state s = {
  94		.index = idx,
  95		.num = vq->user_be
  96	};
  97
  98	if (copy_to_user(argp, &s, sizeof(s)))
  99		return -EFAULT;
 100
 101	return 0;
 102}
 103
 104static void vhost_init_is_le(struct vhost_virtqueue *vq)
 105{
 106	/* Note for legacy virtio: user_be is initialized at reset time
 107	 * according to the host endianness. If userspace does not set an
 108	 * explicit endianness, the default behavior is native endian, as
 109	 * expected by legacy virtio.
 110	 */
 111	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 112}
 113#else
 114static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 115{
 116}
 117
 118static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 119{
 120	return -ENOIOCTLCMD;
 121}
 122
 123static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 124				   int __user *argp)
 125{
 126	return -ENOIOCTLCMD;
 127}
 128
 129static void vhost_init_is_le(struct vhost_virtqueue *vq)
 130{
 131	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
 132		|| virtio_legacy_is_little_endian();
 133}
 134#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 135
 136static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 137{
 138	vhost_init_is_le(vq);
 139}
 140
 141struct vhost_flush_struct {
 142	struct vhost_work work;
 143	struct completion wait_event;
 144};
 145
 146static void vhost_flush_work(struct vhost_work *work)
 147{
 148	struct vhost_flush_struct *s;
 149
 150	s = container_of(work, struct vhost_flush_struct, work);
 151	complete(&s->wait_event);
 152}
 153
 154static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 155			    poll_table *pt)
 156{
 157	struct vhost_poll *poll;
 158
 159	poll = container_of(pt, struct vhost_poll, table);
 160	poll->wqh = wqh;
 161	add_wait_queue(wqh, &poll->wait);
 162}
 163
 164static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
 165			     void *key)
 166{
 167	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 168	struct vhost_work *work = &poll->work;
 169
 170	if (!(key_to_poll(key) & poll->mask))
 171		return 0;
 172
 173	if (!poll->dev->use_worker)
 174		work->fn(work);
 175	else
 176		vhost_poll_queue(poll);
 177
 178	return 0;
 179}
 180
 181void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 182{
 183	clear_bit(VHOST_WORK_QUEUED, &work->flags);
 184	work->fn = fn;
 
 
 
 185}
 186EXPORT_SYMBOL_GPL(vhost_work_init);
 187
 188/* Init poll structure */
 189void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 190		     __poll_t mask, struct vhost_dev *dev)
 191{
 192	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 193	init_poll_funcptr(&poll->table, vhost_poll_func);
 194	poll->mask = mask;
 195	poll->dev = dev;
 196	poll->wqh = NULL;
 197
 198	vhost_work_init(&poll->work, fn);
 199}
 200EXPORT_SYMBOL_GPL(vhost_poll_init);
 201
 202/* Start polling a file. We add ourselves to file's wait queue. The caller must
 203 * keep a reference to a file until after vhost_poll_stop is called. */
 204int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 205{
 206	__poll_t mask;
 
 207
 208	if (poll->wqh)
 209		return 0;
 210
 211	mask = vfs_poll(file, &poll->table);
 212	if (mask)
 213		vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
 214	if (mask & EPOLLERR) {
 215		vhost_poll_stop(poll);
 216		return -EINVAL;
 
 217	}
 218
 219	return 0;
 220}
 221EXPORT_SYMBOL_GPL(vhost_poll_start);
 222
 223/* Stop polling a file. After this function returns, it becomes safe to drop the
 224 * file reference. You must also flush afterwards. */
 225void vhost_poll_stop(struct vhost_poll *poll)
 226{
 227	if (poll->wqh) {
 228		remove_wait_queue(poll->wqh, &poll->wait);
 229		poll->wqh = NULL;
 230	}
 231}
 232EXPORT_SYMBOL_GPL(vhost_poll_stop);
 233
 234void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 
 235{
 236	struct vhost_flush_struct flush;
 237
 238	if (dev->worker) {
 239		init_completion(&flush.wait_event);
 240		vhost_work_init(&flush.work, vhost_flush_work);
 
 
 241
 242		vhost_work_queue(dev, &flush.work);
 243		wait_for_completion(&flush.wait_event);
 244	}
 
 
 
 
 
 
 
 
 
 
 
 245}
 246EXPORT_SYMBOL_GPL(vhost_work_flush);
 247
 248/* Flush any work that has been scheduled. When calling this, don't hold any
 249 * locks that are also used by the callback. */
 250void vhost_poll_flush(struct vhost_poll *poll)
 251{
 252	vhost_work_flush(poll->dev, &poll->work);
 253}
 254EXPORT_SYMBOL_GPL(vhost_poll_flush);
 255
 256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 257{
 258	if (!dev->worker)
 259		return;
 260
 261	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 262		/* We can only add the work to the list after we're
 263		 * sure it was not in the list.
 264		 * test_and_set_bit() implies a memory barrier.
 265		 */
 266		llist_add(&work->node, &dev->work_list);
 267		wake_up_process(dev->worker);
 
 
 268	}
 269}
 270EXPORT_SYMBOL_GPL(vhost_work_queue);
 271
 272/* A lockless hint for busy polling code to exit the loop */
 273bool vhost_has_work(struct vhost_dev *dev)
 274{
 275	return !llist_empty(&dev->work_list);
 276}
 277EXPORT_SYMBOL_GPL(vhost_has_work);
 278
 279void vhost_poll_queue(struct vhost_poll *poll)
 280{
 281	vhost_work_queue(poll->dev, &poll->work);
 282}
 283EXPORT_SYMBOL_GPL(vhost_poll_queue);
 284
 285static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
 286{
 287	int j;
 288
 289	for (j = 0; j < VHOST_NUM_ADDRS; j++)
 290		vq->meta_iotlb[j] = NULL;
 291}
 292
 293static void vhost_vq_meta_reset(struct vhost_dev *d)
 294{
 295	int i;
 296
 297	for (i = 0; i < d->nvqs; ++i)
 298		__vhost_vq_meta_reset(d->vqs[i]);
 299}
 300
 301static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
 302{
 303	call_ctx->ctx = NULL;
 304	memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
 305	spin_lock_init(&call_ctx->ctx_lock);
 306}
 307
 308static void vhost_vq_reset(struct vhost_dev *dev,
 309			   struct vhost_virtqueue *vq)
 310{
 311	vq->num = 1;
 312	vq->desc = NULL;
 313	vq->avail = NULL;
 314	vq->used = NULL;
 315	vq->last_avail_idx = 0;
 316	vq->avail_idx = 0;
 317	vq->last_used_idx = 0;
 318	vq->signalled_used = 0;
 319	vq->signalled_used_valid = false;
 320	vq->used_flags = 0;
 321	vq->log_used = false;
 322	vq->log_addr = -1ull;
 323	vq->private_data = NULL;
 324	vq->acked_features = 0;
 325	vq->acked_backend_features = 0;
 326	vq->log_base = NULL;
 327	vq->error_ctx = NULL;
 
 328	vq->kick = NULL;
 
 
 329	vq->log_ctx = NULL;
 330	vhost_reset_is_le(vq);
 331	vhost_disable_cross_endian(vq);
 332	vq->busyloop_timeout = 0;
 333	vq->umem = NULL;
 334	vq->iotlb = NULL;
 335	vhost_vring_call_reset(&vq->call_ctx);
 336	__vhost_vq_meta_reset(vq);
 337}
 338
 339static int vhost_worker(void *data)
 340{
 341	struct vhost_dev *dev = data;
 342	struct vhost_work *work, *work_next;
 343	struct llist_node *node;
 
 344
 345	kthread_use_mm(dev->mm);
 
 346
 347	for (;;) {
 348		/* mb paired w/ kthread_stop */
 349		set_current_state(TASK_INTERRUPTIBLE);
 350
 
 
 
 
 
 
 
 351		if (kthread_should_stop()) {
 
 352			__set_current_state(TASK_RUNNING);
 353			break;
 354		}
 
 
 
 
 
 
 
 
 355
 356		node = llist_del_all(&dev->work_list);
 357		if (!node)
 358			schedule();
 359
 360		node = llist_reverse_order(node);
 361		/* make sure flag is seen after deletion */
 362		smp_wmb();
 363		llist_for_each_entry_safe(work, work_next, node, node) {
 364			clear_bit(VHOST_WORK_QUEUED, &work->flags);
 365			__set_current_state(TASK_RUNNING);
 366			kcov_remote_start_common(dev->kcov_handle);
 367			work->fn(work);
 368			kcov_remote_stop();
 369			if (need_resched())
 370				schedule();
 371		}
 
 
 372	}
 373	kthread_unuse_mm(dev->mm);
 
 374	return 0;
 375}
 376
 377static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 378{
 379	kfree(vq->indirect);
 380	vq->indirect = NULL;
 381	kfree(vq->log);
 382	vq->log = NULL;
 383	kfree(vq->heads);
 384	vq->heads = NULL;
 385}
 386
 387/* Helper to allocate iovec buffers for all vqs. */
 388static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 389{
 390	struct vhost_virtqueue *vq;
 391	int i;
 392
 393	for (i = 0; i < dev->nvqs; ++i) {
 394		vq = dev->vqs[i];
 395		vq->indirect = kmalloc_array(UIO_MAXIOV,
 396					     sizeof(*vq->indirect),
 397					     GFP_KERNEL);
 398		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
 399					GFP_KERNEL);
 400		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
 401					  GFP_KERNEL);
 402		if (!vq->indirect || !vq->log || !vq->heads)
 403			goto err_nomem;
 404	}
 405	return 0;
 406
 407err_nomem:
 408	for (; i >= 0; --i)
 409		vhost_vq_free_iovecs(dev->vqs[i]);
 410	return -ENOMEM;
 411}
 412
 413static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 414{
 415	int i;
 416
 417	for (i = 0; i < dev->nvqs; ++i)
 418		vhost_vq_free_iovecs(dev->vqs[i]);
 419}
 420
 421bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
 422			  int pkts, int total_len)
 423{
 424	struct vhost_dev *dev = vq->dev;
 425
 426	if ((dev->byte_weight && total_len >= dev->byte_weight) ||
 427	    pkts >= dev->weight) {
 428		vhost_poll_queue(&vq->poll);
 429		return true;
 430	}
 431
 432	return false;
 433}
 434EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
 435
 436static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
 437				   unsigned int num)
 438{
 439	size_t event __maybe_unused =
 440	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 441
 442	return sizeof(*vq->avail) +
 443	       sizeof(*vq->avail->ring) * num + event;
 444}
 445
 446static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
 447				  unsigned int num)
 448{
 449	size_t event __maybe_unused =
 450	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 451
 452	return sizeof(*vq->used) +
 453	       sizeof(*vq->used->ring) * num + event;
 454}
 455
 456static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
 457				  unsigned int num)
 458{
 459	return sizeof(*vq->desc) * num;
 460}
 461
 462void vhost_dev_init(struct vhost_dev *dev,
 463		    struct vhost_virtqueue **vqs, int nvqs,
 464		    int iov_limit, int weight, int byte_weight,
 465		    bool use_worker,
 466		    int (*msg_handler)(struct vhost_dev *dev,
 467				       struct vhost_iotlb_msg *msg))
 468{
 469	struct vhost_virtqueue *vq;
 470	int i;
 471
 472	dev->vqs = vqs;
 473	dev->nvqs = nvqs;
 474	mutex_init(&dev->mutex);
 475	dev->log_ctx = NULL;
 476	dev->umem = NULL;
 477	dev->iotlb = NULL;
 478	dev->mm = NULL;
 
 
 479	dev->worker = NULL;
 480	dev->iov_limit = iov_limit;
 481	dev->weight = weight;
 482	dev->byte_weight = byte_weight;
 483	dev->use_worker = use_worker;
 484	dev->msg_handler = msg_handler;
 485	init_llist_head(&dev->work_list);
 486	init_waitqueue_head(&dev->wait);
 487	INIT_LIST_HEAD(&dev->read_list);
 488	INIT_LIST_HEAD(&dev->pending_list);
 489	spin_lock_init(&dev->iotlb_lock);
 490
 491
 492	for (i = 0; i < dev->nvqs; ++i) {
 493		vq = dev->vqs[i];
 494		vq->log = NULL;
 495		vq->indirect = NULL;
 496		vq->heads = NULL;
 497		vq->dev = dev;
 498		mutex_init(&vq->mutex);
 499		vhost_vq_reset(dev, vq);
 500		if (vq->handle_kick)
 501			vhost_poll_init(&vq->poll, vq->handle_kick,
 502					EPOLLIN, dev);
 503	}
 504}
 505EXPORT_SYMBOL_GPL(vhost_dev_init);
 506
 507/* Caller should have device mutex */
 508long vhost_dev_check_owner(struct vhost_dev *dev)
 509{
 510	/* Are you the owner? If not, I don't think you mean to do that */
 511	return dev->mm == current->mm ? 0 : -EPERM;
 512}
 513EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 514
 515struct vhost_attach_cgroups_struct {
 516	struct vhost_work work;
 517	struct task_struct *owner;
 518	int ret;
 519};
 520
 521static void vhost_attach_cgroups_work(struct vhost_work *work)
 522{
 523	struct vhost_attach_cgroups_struct *s;
 524
 525	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 526	s->ret = cgroup_attach_task_all(s->owner, current);
 527}
 528
 529static int vhost_attach_cgroups(struct vhost_dev *dev)
 530{
 531	struct vhost_attach_cgroups_struct attach;
 532
 533	attach.owner = current;
 534	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 535	vhost_work_queue(dev, &attach.work);
 536	vhost_work_flush(dev, &attach.work);
 537	return attach.ret;
 538}
 539
 540/* Caller should have device mutex */
 541bool vhost_dev_has_owner(struct vhost_dev *dev)
 542{
 543	return dev->mm;
 544}
 545EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 546
 547static void vhost_attach_mm(struct vhost_dev *dev)
 548{
 549	/* No owner, become one */
 550	if (dev->use_worker) {
 551		dev->mm = get_task_mm(current);
 552	} else {
 553		/* vDPA device does not use worker thead, so there's
 554		 * no need to hold the address space for mm. This help
 555		 * to avoid deadlock in the case of mmap() which may
 556		 * held the refcnt of the file and depends on release
 557		 * method to remove vma.
 558		 */
 559		dev->mm = current->mm;
 560		mmgrab(dev->mm);
 561	}
 562}
 563
 564static void vhost_detach_mm(struct vhost_dev *dev)
 565{
 566	if (!dev->mm)
 567		return;
 568
 569	if (dev->use_worker)
 570		mmput(dev->mm);
 571	else
 572		mmdrop(dev->mm);
 573
 574	dev->mm = NULL;
 575}
 576
 577/* Caller should have device mutex */
 578long vhost_dev_set_owner(struct vhost_dev *dev)
 579{
 580	struct task_struct *worker;
 581	int err;
 582
 583	/* Is there an owner already? */
 584	if (vhost_dev_has_owner(dev)) {
 585		err = -EBUSY;
 586		goto err_mm;
 587	}
 588
 589	vhost_attach_mm(dev);
 590
 591	dev->kcov_handle = kcov_common_handle();
 592	if (dev->use_worker) {
 593		worker = kthread_create(vhost_worker, dev,
 594					"vhost-%d", current->pid);
 595		if (IS_ERR(worker)) {
 596			err = PTR_ERR(worker);
 597			goto err_worker;
 598		}
 599
 600		dev->worker = worker;
 601		wake_up_process(worker); /* avoid contributing to loadavg */
 602
 603		err = vhost_attach_cgroups(dev);
 604		if (err)
 605			goto err_cgroup;
 606	}
 607
 
 
 
 
 
 
 
 608	err = vhost_dev_alloc_iovecs(dev);
 609	if (err)
 610		goto err_cgroup;
 611
 612	return 0;
 613err_cgroup:
 614	if (dev->worker) {
 615		kthread_stop(dev->worker);
 616		dev->worker = NULL;
 617	}
 618err_worker:
 619	vhost_detach_mm(dev);
 620	dev->kcov_handle = 0;
 
 621err_mm:
 622	return err;
 623}
 624EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 625
 626static struct vhost_iotlb *iotlb_alloc(void)
 627{
 628	return vhost_iotlb_alloc(max_iotlb_entries,
 629				 VHOST_IOTLB_FLAG_RETIRE);
 630}
 631
 632struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
 633{
 634	return iotlb_alloc();
 635}
 636EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 637
 638/* Caller should have device mutex */
 639void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
 640{
 641	int i;
 642
 643	vhost_dev_cleanup(dev);
 644
 645	dev->umem = umem;
 646	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
 647	 * VQs aren't running.
 648	 */
 649	for (i = 0; i < dev->nvqs; ++i)
 650		dev->vqs[i]->umem = umem;
 651}
 652EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 653
 654void vhost_dev_stop(struct vhost_dev *dev)
 655{
 656	int i;
 657
 658	for (i = 0; i < dev->nvqs; ++i) {
 659		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 660			vhost_poll_stop(&dev->vqs[i]->poll);
 661			vhost_poll_flush(&dev->vqs[i]->poll);
 662		}
 663	}
 664}
 665EXPORT_SYMBOL_GPL(vhost_dev_stop);
 666
 667static void vhost_clear_msg(struct vhost_dev *dev)
 668{
 669	struct vhost_msg_node *node, *n;
 670
 671	spin_lock(&dev->iotlb_lock);
 672
 673	list_for_each_entry_safe(node, n, &dev->read_list, node) {
 674		list_del(&node->node);
 675		kfree(node);
 676	}
 677
 678	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
 679		list_del(&node->node);
 680		kfree(node);
 681	}
 682
 683	spin_unlock(&dev->iotlb_lock);
 684}
 685
 686void vhost_dev_cleanup(struct vhost_dev *dev)
 687{
 688	int i;
 689
 690	for (i = 0; i < dev->nvqs; ++i) {
 691		if (dev->vqs[i]->error_ctx)
 692			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 
 
 693		if (dev->vqs[i]->kick)
 694			fput(dev->vqs[i]->kick);
 695		if (dev->vqs[i]->call_ctx.ctx)
 696			eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
 
 
 697		vhost_vq_reset(dev, dev->vqs[i]);
 698	}
 699	vhost_dev_free_iovecs(dev);
 700	if (dev->log_ctx)
 701		eventfd_ctx_put(dev->log_ctx);
 702	dev->log_ctx = NULL;
 
 
 
 703	/* No one will access memory at this point */
 704	vhost_iotlb_free(dev->umem);
 705	dev->umem = NULL;
 706	vhost_iotlb_free(dev->iotlb);
 707	dev->iotlb = NULL;
 708	vhost_clear_msg(dev);
 709	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
 710	WARN_ON(!llist_empty(&dev->work_list));
 711	if (dev->worker) {
 712		kthread_stop(dev->worker);
 713		dev->worker = NULL;
 714		dev->kcov_handle = 0;
 715	}
 716	vhost_detach_mm(dev);
 
 
 717}
 718EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 719
 720static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 721{
 722	u64 a = addr / VHOST_PAGE_SIZE / 8;
 723
 724	/* Make sure 64 bit math will not overflow. */
 725	if (a > ULONG_MAX - (unsigned long)log_base ||
 726	    a + (unsigned long)log_base > ULONG_MAX)
 727		return false;
 728
 729	return access_ok(log_base + a,
 730			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 731}
 732
 733static bool vhost_overflow(u64 uaddr, u64 size)
 734{
 735	/* Make sure 64 bit math will not overflow. */
 736	return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
 737}
 738
 739/* Caller should have vq mutex and device mutex. */
 740static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
 741				int log_all)
 742{
 743	struct vhost_iotlb_map *map;
 744
 745	if (!umem)
 746		return false;
 747
 748	list_for_each_entry(map, &umem->list, link) {
 749		unsigned long a = map->addr;
 750
 751		if (vhost_overflow(map->addr, map->size))
 752			return false;
 753
 
 
 754
 755		if (!access_ok((void __user *)a, map->size))
 756			return false;
 
 
 
 
 
 
 757		else if (log_all && !log_access_ok(log_base,
 758						   map->start,
 759						   map->size))
 760			return false;
 761	}
 762	return true;
 763}
 764
 765static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
 766					       u64 addr, unsigned int size,
 767					       int type)
 768{
 769	const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
 770
 771	if (!map)
 772		return NULL;
 773
 774	return (void __user *)(uintptr_t)(map->addr + addr - map->start);
 775}
 776
 777/* Can we switch to this memory table? */
 778/* Caller should have device mutex but not vq mutex */
 779static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
 780			     int log_all)
 781{
 782	int i;
 783
 784	for (i = 0; i < d->nvqs; ++i) {
 785		bool ok;
 786		bool log;
 787
 788		mutex_lock(&d->vqs[i]->mutex);
 789		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
 790		/* If ring is inactive, will check when it's enabled. */
 791		if (d->vqs[i]->private_data)
 792			ok = vq_memory_access_ok(d->vqs[i]->log_base,
 793						 umem, log);
 794		else
 795			ok = true;
 796		mutex_unlock(&d->vqs[i]->mutex);
 797		if (!ok)
 798			return false;
 799	}
 800	return true;
 801}
 802
 803static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
 804			  struct iovec iov[], int iov_size, int access);
 805
 806static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
 807			      const void *from, unsigned size)
 808{
 809	int ret;
 810
 811	if (!vq->iotlb)
 812		return __copy_to_user(to, from, size);
 813	else {
 814		/* This function should be called after iotlb
 815		 * prefetch, which means we're sure that all vq
 816		 * could be access through iotlb. So -EAGAIN should
 817		 * not happen in this case.
 818		 */
 819		struct iov_iter t;
 820		void __user *uaddr = vhost_vq_meta_fetch(vq,
 821				     (u64)(uintptr_t)to, size,
 822				     VHOST_ADDR_USED);
 823
 824		if (uaddr)
 825			return __copy_to_user(uaddr, from, size);
 826
 827		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
 828				     ARRAY_SIZE(vq->iotlb_iov),
 829				     VHOST_ACCESS_WO);
 830		if (ret < 0)
 831			goto out;
 832		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
 833		ret = copy_to_iter(from, size, &t);
 834		if (ret == size)
 835			ret = 0;
 836	}
 837out:
 838	return ret;
 839}
 840
 841static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
 842				void __user *from, unsigned size)
 843{
 844	int ret;
 845
 846	if (!vq->iotlb)
 847		return __copy_from_user(to, from, size);
 848	else {
 849		/* This function should be called after iotlb
 850		 * prefetch, which means we're sure that vq
 851		 * could be access through iotlb. So -EAGAIN should
 852		 * not happen in this case.
 853		 */
 854		void __user *uaddr = vhost_vq_meta_fetch(vq,
 855				     (u64)(uintptr_t)from, size,
 856				     VHOST_ADDR_DESC);
 857		struct iov_iter f;
 858
 859		if (uaddr)
 860			return __copy_from_user(to, uaddr, size);
 861
 862		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
 863				     ARRAY_SIZE(vq->iotlb_iov),
 864				     VHOST_ACCESS_RO);
 865		if (ret < 0) {
 866			vq_err(vq, "IOTLB translation failure: uaddr "
 867			       "%p size 0x%llx\n", from,
 868			       (unsigned long long) size);
 869			goto out;
 870		}
 871		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
 872		ret = copy_from_iter(to, size, &f);
 873		if (ret == size)
 874			ret = 0;
 875	}
 876
 877out:
 878	return ret;
 879}
 880
 881static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
 882					  void __user *addr, unsigned int size,
 883					  int type)
 884{
 885	int ret;
 886
 887	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
 888			     ARRAY_SIZE(vq->iotlb_iov),
 889			     VHOST_ACCESS_RO);
 890	if (ret < 0) {
 891		vq_err(vq, "IOTLB translation failure: uaddr "
 892			"%p size 0x%llx\n", addr,
 893			(unsigned long long) size);
 894		return NULL;
 895	}
 896
 897	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
 898		vq_err(vq, "Non atomic userspace memory access: uaddr "
 899			"%p size 0x%llx\n", addr,
 900			(unsigned long long) size);
 901		return NULL;
 902	}
 903
 904	return vq->iotlb_iov[0].iov_base;
 905}
 906
 907/* This function should be called after iotlb
 908 * prefetch, which means we're sure that vq
 909 * could be access through iotlb. So -EAGAIN should
 910 * not happen in this case.
 911 */
 912static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 913					    void __user *addr, unsigned int size,
 914					    int type)
 915{
 916	void __user *uaddr = vhost_vq_meta_fetch(vq,
 917			     (u64)(uintptr_t)addr, size, type);
 918	if (uaddr)
 919		return uaddr;
 920
 921	return __vhost_get_user_slow(vq, addr, size, type);
 922}
 923
 924#define vhost_put_user(vq, x, ptr)		\
 925({ \
 926	int ret; \
 927	if (!vq->iotlb) { \
 928		ret = __put_user(x, ptr); \
 929	} else { \
 930		__typeof__(ptr) to = \
 931			(__typeof__(ptr)) __vhost_get_user(vq, ptr,	\
 932					  sizeof(*ptr), VHOST_ADDR_USED); \
 933		if (to != NULL) \
 934			ret = __put_user(x, to); \
 935		else \
 936			ret = -EFAULT;	\
 937	} \
 938	ret; \
 939})
 940
 941static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
 942{
 943	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
 944			      vhost_avail_event(vq));
 945}
 946
 947static inline int vhost_put_used(struct vhost_virtqueue *vq,
 948				 struct vring_used_elem *head, int idx,
 949				 int count)
 950{
 951	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
 952				  count * sizeof(*head));
 953}
 954
 955static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
 956
 957{
 958	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
 959			      &vq->used->flags);
 960}
 961
 962static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
 963
 964{
 965	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
 966			      &vq->used->idx);
 967}
 968
 969#define vhost_get_user(vq, x, ptr, type)		\
 970({ \
 971	int ret; \
 972	if (!vq->iotlb) { \
 973		ret = __get_user(x, ptr); \
 974	} else { \
 975		__typeof__(ptr) from = \
 976			(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
 977							   sizeof(*ptr), \
 978							   type); \
 979		if (from != NULL) \
 980			ret = __get_user(x, from); \
 981		else \
 982			ret = -EFAULT; \
 983	} \
 984	ret; \
 985})
 986
 987#define vhost_get_avail(vq, x, ptr) \
 988	vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
 989
 990#define vhost_get_used(vq, x, ptr) \
 991	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 992
 993static void vhost_dev_lock_vqs(struct vhost_dev *d)
 994{
 995	int i = 0;
 996	for (i = 0; i < d->nvqs; ++i)
 997		mutex_lock_nested(&d->vqs[i]->mutex, i);
 998}
 999
1000static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1001{
1002	int i = 0;
1003	for (i = 0; i < d->nvqs; ++i)
1004		mutex_unlock(&d->vqs[i]->mutex);
1005}
1006
1007static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1008				      __virtio16 *idx)
1009{
1010	return vhost_get_avail(vq, *idx, &vq->avail->idx);
1011}
1012
1013static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1014				       __virtio16 *head, int idx)
1015{
1016	return vhost_get_avail(vq, *head,
1017			       &vq->avail->ring[idx & (vq->num - 1)]);
1018}
1019
1020static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1021					__virtio16 *flags)
1022{
1023	return vhost_get_avail(vq, *flags, &vq->avail->flags);
1024}
1025
1026static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1027				       __virtio16 *event)
1028{
1029	return vhost_get_avail(vq, *event, vhost_used_event(vq));
1030}
1031
1032static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1033				     __virtio16 *idx)
1034{
1035	return vhost_get_used(vq, *idx, &vq->used->idx);
1036}
1037
1038static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1039				 struct vring_desc *desc, int idx)
1040{
1041	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1042}
1043
1044static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1045				  struct vhost_iotlb_msg *msg)
1046{
1047	struct vhost_msg_node *node, *n;
1048
1049	spin_lock(&d->iotlb_lock);
1050
1051	list_for_each_entry_safe(node, n, &d->pending_list, node) {
1052		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1053		if (msg->iova <= vq_msg->iova &&
1054		    msg->iova + msg->size - 1 >= vq_msg->iova &&
1055		    vq_msg->type == VHOST_IOTLB_MISS) {
1056			vhost_poll_queue(&node->vq->poll);
1057			list_del(&node->node);
1058			kfree(node);
1059		}
1060	}
1061
1062	spin_unlock(&d->iotlb_lock);
1063}
1064
1065static bool umem_access_ok(u64 uaddr, u64 size, int access)
1066{
1067	unsigned long a = uaddr;
1068
1069	/* Make sure 64 bit math will not overflow. */
1070	if (vhost_overflow(uaddr, size))
1071		return false;
1072
1073	if ((access & VHOST_ACCESS_RO) &&
1074	    !access_ok((void __user *)a, size))
1075		return false;
1076	if ((access & VHOST_ACCESS_WO) &&
1077	    !access_ok((void __user *)a, size))
1078		return false;
1079	return true;
1080}
1081
1082static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1083				   struct vhost_iotlb_msg *msg)
1084{
1085	int ret = 0;
1086
1087	mutex_lock(&dev->mutex);
1088	vhost_dev_lock_vqs(dev);
1089	switch (msg->type) {
1090	case VHOST_IOTLB_UPDATE:
1091		if (!dev->iotlb) {
1092			ret = -EFAULT;
1093			break;
1094		}
1095		if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1096			ret = -EFAULT;
1097			break;
1098		}
1099		vhost_vq_meta_reset(dev);
1100		if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1101					  msg->iova + msg->size - 1,
1102					  msg->uaddr, msg->perm)) {
1103			ret = -ENOMEM;
1104			break;
1105		}
1106		vhost_iotlb_notify_vq(dev, msg);
1107		break;
1108	case VHOST_IOTLB_INVALIDATE:
1109		if (!dev->iotlb) {
1110			ret = -EFAULT;
1111			break;
1112		}
1113		vhost_vq_meta_reset(dev);
1114		vhost_iotlb_del_range(dev->iotlb, msg->iova,
1115				      msg->iova + msg->size - 1);
1116		break;
1117	default:
1118		ret = -EINVAL;
1119		break;
1120	}
1121
1122	vhost_dev_unlock_vqs(dev);
1123	mutex_unlock(&dev->mutex);
1124
1125	return ret;
1126}
1127ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1128			     struct iov_iter *from)
1129{
1130	struct vhost_iotlb_msg msg;
1131	size_t offset;
1132	int type, ret;
1133
1134	ret = copy_from_iter(&type, sizeof(type), from);
1135	if (ret != sizeof(type)) {
1136		ret = -EINVAL;
1137		goto done;
1138	}
1139
1140	switch (type) {
1141	case VHOST_IOTLB_MSG:
1142		/* There maybe a hole after type for V1 message type,
1143		 * so skip it here.
1144		 */
1145		offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1146		break;
1147	case VHOST_IOTLB_MSG_V2:
1148		offset = sizeof(__u32);
1149		break;
1150	default:
1151		ret = -EINVAL;
1152		goto done;
1153	}
1154
1155	iov_iter_advance(from, offset);
1156	ret = copy_from_iter(&msg, sizeof(msg), from);
1157	if (ret != sizeof(msg)) {
1158		ret = -EINVAL;
1159		goto done;
1160	}
1161
1162	if (dev->msg_handler)
1163		ret = dev->msg_handler(dev, &msg);
1164	else
1165		ret = vhost_process_iotlb_msg(dev, &msg);
1166	if (ret) {
1167		ret = -EFAULT;
1168		goto done;
1169	}
1170
1171	ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1172	      sizeof(struct vhost_msg_v2);
1173done:
1174	return ret;
1175}
1176EXPORT_SYMBOL(vhost_chr_write_iter);
1177
1178__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1179			    poll_table *wait)
1180{
1181	__poll_t mask = 0;
1182
1183	poll_wait(file, &dev->wait, wait);
1184
1185	if (!list_empty(&dev->read_list))
1186		mask |= EPOLLIN | EPOLLRDNORM;
1187
1188	return mask;
1189}
1190EXPORT_SYMBOL(vhost_chr_poll);
1191
1192ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1193			    int noblock)
1194{
1195	DEFINE_WAIT(wait);
1196	struct vhost_msg_node *node;
1197	ssize_t ret = 0;
1198	unsigned size = sizeof(struct vhost_msg);
1199
1200	if (iov_iter_count(to) < size)
1201		return 0;
1202
1203	while (1) {
1204		if (!noblock)
1205			prepare_to_wait(&dev->wait, &wait,
1206					TASK_INTERRUPTIBLE);
1207
1208		node = vhost_dequeue_msg(dev, &dev->read_list);
1209		if (node)
1210			break;
1211		if (noblock) {
1212			ret = -EAGAIN;
1213			break;
1214		}
1215		if (signal_pending(current)) {
1216			ret = -ERESTARTSYS;
1217			break;
1218		}
1219		if (!dev->iotlb) {
1220			ret = -EBADFD;
1221			break;
1222		}
1223
1224		schedule();
1225	}
1226
1227	if (!noblock)
1228		finish_wait(&dev->wait, &wait);
1229
1230	if (node) {
1231		struct vhost_iotlb_msg *msg;
1232		void *start = &node->msg;
1233
1234		switch (node->msg.type) {
1235		case VHOST_IOTLB_MSG:
1236			size = sizeof(node->msg);
1237			msg = &node->msg.iotlb;
1238			break;
1239		case VHOST_IOTLB_MSG_V2:
1240			size = sizeof(node->msg_v2);
1241			msg = &node->msg_v2.iotlb;
1242			break;
1243		default:
1244			BUG();
1245			break;
1246		}
1247
1248		ret = copy_to_iter(start, size, to);
1249		if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1250			kfree(node);
1251			return ret;
1252		}
1253		vhost_enqueue_msg(dev, &dev->pending_list, node);
1254	}
1255
1256	return ret;
1257}
1258EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1259
1260static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1261{
1262	struct vhost_dev *dev = vq->dev;
1263	struct vhost_msg_node *node;
1264	struct vhost_iotlb_msg *msg;
1265	bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1266
1267	node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1268	if (!node)
1269		return -ENOMEM;
1270
1271	if (v2) {
1272		node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1273		msg = &node->msg_v2.iotlb;
1274	} else {
1275		msg = &node->msg.iotlb;
1276	}
1277
1278	msg->type = VHOST_IOTLB_MISS;
1279	msg->iova = iova;
1280	msg->perm = access;
1281
1282	vhost_enqueue_msg(dev, &dev->read_list, node);
1283
1284	return 0;
1285}
1286
1287static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1288			 vring_desc_t __user *desc,
1289			 vring_avail_t __user *avail,
1290			 vring_used_t __user *used)
1291
1292{
1293	/* If an IOTLB device is present, the vring addresses are
1294	 * GIOVAs. Access validation occurs at prefetch time. */
1295	if (vq->iotlb)
1296		return true;
1297
1298	return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1299	       access_ok(avail, vhost_get_avail_size(vq, num)) &&
1300	       access_ok(used, vhost_get_used_size(vq, num));
1301}
1302
1303static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1304				 const struct vhost_iotlb_map *map,
1305				 int type)
1306{
1307	int access = (type == VHOST_ADDR_USED) ?
1308		     VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1309
1310	if (likely(map->perm & access))
1311		vq->meta_iotlb[type] = map;
1312}
1313
1314static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1315			    int access, u64 addr, u64 len, int type)
1316{
1317	const struct vhost_iotlb_map *map;
1318	struct vhost_iotlb *umem = vq->iotlb;
1319	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1320
1321	if (vhost_vq_meta_fetch(vq, addr, len, type))
1322		return true;
1323
1324	while (len > s) {
1325		map = vhost_iotlb_itree_first(umem, addr, last);
1326		if (map == NULL || map->start > addr) {
1327			vhost_iotlb_miss(vq, addr, access);
1328			return false;
1329		} else if (!(map->perm & access)) {
1330			/* Report the possible access violation by
1331			 * request another translation from userspace.
1332			 */
1333			return false;
1334		}
1335
1336		size = map->size - addr + map->start;
1337
1338		if (orig_addr == addr && size >= len)
1339			vhost_vq_meta_update(vq, map, type);
1340
1341		s += size;
1342		addr += size;
1343	}
1344
1345	return true;
1346}
1347
1348int vq_meta_prefetch(struct vhost_virtqueue *vq)
1349{
1350	unsigned int num = vq->num;
1351
1352	if (!vq->iotlb)
1353		return 1;
1354
1355	return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1356			       vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1357	       iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1358			       vhost_get_avail_size(vq, num),
1359			       VHOST_ADDR_AVAIL) &&
1360	       iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1361			       vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1362}
1363EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1364
1365/* Can we log writes? */
1366/* Caller should have device mutex but not vq mutex */
1367bool vhost_log_access_ok(struct vhost_dev *dev)
1368{
1369	return memory_access_ok(dev, dev->umem, 1);
1370}
1371EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1372
1373static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1374				  void __user *log_base,
1375				  bool log_used,
1376				  u64 log_addr)
1377{
1378	/* If an IOTLB device is present, log_addr is a GIOVA that
1379	 * will never be logged by log_used(). */
1380	if (vq->iotlb)
1381		return true;
1382
1383	return !log_used || log_access_ok(log_base, log_addr,
1384					  vhost_get_used_size(vq, vq->num));
 
1385}
 
1386
1387/* Verify access for write logging. */
1388/* Caller should have vq mutex and device mutex */
1389static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1390			     void __user *log_base)
1391{
1392	return vq_memory_access_ok(log_base, vq->umem,
1393				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1394		vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
 
 
 
 
 
 
 
1395}
1396
1397/* Can we start vq? */
1398/* Caller should have vq mutex and device mutex */
1399bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1400{
1401	if (!vq_log_access_ok(vq, vq->log_base))
1402		return false;
1403
1404	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1405}
1406EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1407
1408static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1409{
1410	struct vhost_memory mem, *newmem;
1411	struct vhost_memory_region *region;
1412	struct vhost_iotlb *newumem, *oldumem;
1413	unsigned long size = offsetof(struct vhost_memory, regions);
1414	int i;
1415
1416	if (copy_from_user(&mem, m, size))
1417		return -EFAULT;
1418	if (mem.padding)
1419		return -EOPNOTSUPP;
1420	if (mem.nregions > max_mem_regions)
1421		return -E2BIG;
1422	newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1423			GFP_KERNEL);
1424	if (!newmem)
1425		return -ENOMEM;
1426
1427	memcpy(newmem, &mem, size);
1428	if (copy_from_user(newmem->regions, m->regions,
1429			   flex_array_size(newmem, regions, mem.nregions))) {
1430		kvfree(newmem);
1431		return -EFAULT;
1432	}
1433
1434	newumem = iotlb_alloc();
1435	if (!newumem) {
1436		kvfree(newmem);
1437		return -ENOMEM;
1438	}
1439
1440	for (region = newmem->regions;
1441	     region < newmem->regions + mem.nregions;
1442	     region++) {
1443		if (vhost_iotlb_add_range(newumem,
1444					  region->guest_phys_addr,
1445					  region->guest_phys_addr +
1446					  region->memory_size - 1,
1447					  region->userspace_addr,
1448					  VHOST_MAP_RW))
1449			goto err;
1450	}
1451
1452	if (!memory_access_ok(d, newumem, 0))
1453		goto err;
1454
1455	oldumem = d->umem;
1456	d->umem = newumem;
1457
1458	/* All memory accesses are done under some VQ mutex. */
1459	for (i = 0; i < d->nvqs; ++i) {
1460		mutex_lock(&d->vqs[i]->mutex);
1461		d->vqs[i]->umem = newumem;
1462		mutex_unlock(&d->vqs[i]->mutex);
1463	}
1464
1465	kvfree(newmem);
1466	vhost_iotlb_free(oldumem);
1467	return 0;
1468
1469err:
1470	vhost_iotlb_free(newumem);
1471	kvfree(newmem);
1472	return -EFAULT;
1473}
1474
1475static long vhost_vring_set_num(struct vhost_dev *d,
1476				struct vhost_virtqueue *vq,
1477				void __user *argp)
1478{
1479	struct vhost_vring_state s;
1480
1481	/* Resizing ring with an active backend?
1482	 * You don't want to do that. */
1483	if (vq->private_data)
1484		return -EBUSY;
1485
1486	if (copy_from_user(&s, argp, sizeof s))
1487		return -EFAULT;
1488
1489	if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1490		return -EINVAL;
1491	vq->num = s.num;
1492
1493	return 0;
1494}
1495
1496static long vhost_vring_set_addr(struct vhost_dev *d,
1497				 struct vhost_virtqueue *vq,
1498				 void __user *argp)
1499{
1500	struct vhost_vring_addr a;
1501
1502	if (copy_from_user(&a, argp, sizeof a))
1503		return -EFAULT;
1504	if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1505		return -EOPNOTSUPP;
1506
1507	/* For 32bit, verify that the top 32bits of the user
1508	   data are set to zero. */
1509	if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1510	    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1511	    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1512		return -EFAULT;
1513
1514	/* Make sure it's safe to cast pointers to vring types. */
1515	BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1516	BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1517	if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1518	    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1519	    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1520		return -EINVAL;
1521
1522	/* We only verify access here if backend is configured.
1523	 * If it is not, we don't as size might not have been setup.
1524	 * We will verify when backend is configured. */
1525	if (vq->private_data) {
1526		if (!vq_access_ok(vq, vq->num,
1527			(void __user *)(unsigned long)a.desc_user_addr,
1528			(void __user *)(unsigned long)a.avail_user_addr,
1529			(void __user *)(unsigned long)a.used_user_addr))
1530			return -EINVAL;
1531
1532		/* Also validate log access for used ring if enabled. */
1533		if (!vq_log_used_access_ok(vq, vq->log_base,
1534				a.flags & (0x1 << VHOST_VRING_F_LOG),
1535				a.log_guest_addr))
1536			return -EINVAL;
1537	}
1538
1539	vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1540	vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1541	vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1542	vq->log_addr = a.log_guest_addr;
1543	vq->used = (void __user *)(unsigned long)a.used_user_addr;
1544
1545	return 0;
1546}
1547
1548static long vhost_vring_set_num_addr(struct vhost_dev *d,
1549				     struct vhost_virtqueue *vq,
1550				     unsigned int ioctl,
1551				     void __user *argp)
1552{
1553	long r;
1554
1555	mutex_lock(&vq->mutex);
1556
1557	switch (ioctl) {
1558	case VHOST_SET_VRING_NUM:
1559		r = vhost_vring_set_num(d, vq, argp);
1560		break;
1561	case VHOST_SET_VRING_ADDR:
1562		r = vhost_vring_set_addr(d, vq, argp);
1563		break;
1564	default:
1565		BUG();
1566	}
1567
1568	mutex_unlock(&vq->mutex);
1569
1570	return r;
1571}
1572long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1573{
1574	struct file *eventfp, *filep = NULL;
1575	bool pollstart = false, pollstop = false;
1576	struct eventfd_ctx *ctx = NULL;
1577	u32 __user *idxp = argp;
1578	struct vhost_virtqueue *vq;
1579	struct vhost_vring_state s;
1580	struct vhost_vring_file f;
 
1581	u32 idx;
1582	long r;
1583
1584	r = get_user(idx, idxp);
1585	if (r < 0)
1586		return r;
1587	if (idx >= d->nvqs)
1588		return -ENOBUFS;
1589
1590	idx = array_index_nospec(idx, d->nvqs);
1591	vq = d->vqs[idx];
1592
1593	if (ioctl == VHOST_SET_VRING_NUM ||
1594	    ioctl == VHOST_SET_VRING_ADDR) {
1595		return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1596	}
1597
1598	mutex_lock(&vq->mutex);
1599
1600	switch (ioctl) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601	case VHOST_SET_VRING_BASE:
1602		/* Moving base with an active backend?
1603		 * You don't want to do that. */
1604		if (vq->private_data) {
1605			r = -EBUSY;
1606			break;
1607		}
1608		if (copy_from_user(&s, argp, sizeof s)) {
1609			r = -EFAULT;
1610			break;
1611		}
1612		if (s.num > 0xffff) {
1613			r = -EINVAL;
1614			break;
1615		}
1616		vq->last_avail_idx = s.num;
1617		/* Forget the cached index value. */
1618		vq->avail_idx = vq->last_avail_idx;
1619		break;
1620	case VHOST_GET_VRING_BASE:
1621		s.index = idx;
1622		s.num = vq->last_avail_idx;
1623		if (copy_to_user(argp, &s, sizeof s))
1624			r = -EFAULT;
1625		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626	case VHOST_SET_VRING_KICK:
1627		if (copy_from_user(&f, argp, sizeof f)) {
1628			r = -EFAULT;
1629			break;
1630		}
1631		eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1632		if (IS_ERR(eventfp)) {
1633			r = PTR_ERR(eventfp);
1634			break;
1635		}
1636		if (eventfp != vq->kick) {
1637			pollstop = (filep = vq->kick) != NULL;
1638			pollstart = (vq->kick = eventfp) != NULL;
1639		} else
1640			filep = eventfp;
1641		break;
1642	case VHOST_SET_VRING_CALL:
1643		if (copy_from_user(&f, argp, sizeof f)) {
1644			r = -EFAULT;
1645			break;
1646		}
1647		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1648		if (IS_ERR(ctx)) {
1649			r = PTR_ERR(ctx);
1650			break;
1651		}
1652
1653		spin_lock(&vq->call_ctx.ctx_lock);
1654		swap(ctx, vq->call_ctx.ctx);
1655		spin_unlock(&vq->call_ctx.ctx_lock);
 
 
 
 
1656		break;
1657	case VHOST_SET_VRING_ERR:
1658		if (copy_from_user(&f, argp, sizeof f)) {
1659			r = -EFAULT;
1660			break;
1661		}
1662		ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1663		if (IS_ERR(ctx)) {
1664			r = PTR_ERR(ctx);
1665			break;
1666		}
1667		swap(ctx, vq->error_ctx);
1668		break;
1669	case VHOST_SET_VRING_ENDIAN:
1670		r = vhost_set_vring_endian(vq, argp);
1671		break;
1672	case VHOST_GET_VRING_ENDIAN:
1673		r = vhost_get_vring_endian(vq, idx, argp);
1674		break;
1675	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1676		if (copy_from_user(&s, argp, sizeof(s))) {
1677			r = -EFAULT;
1678			break;
1679		}
1680		vq->busyloop_timeout = s.num;
1681		break;
1682	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1683		s.index = idx;
1684		s.num = vq->busyloop_timeout;
1685		if (copy_to_user(argp, &s, sizeof(s)))
1686			r = -EFAULT;
 
1687		break;
1688	default:
1689		r = -ENOIOCTLCMD;
1690	}
1691
1692	if (pollstop && vq->handle_kick)
1693		vhost_poll_stop(&vq->poll);
1694
1695	if (!IS_ERR_OR_NULL(ctx))
1696		eventfd_ctx_put(ctx);
1697	if (filep)
1698		fput(filep);
1699
1700	if (pollstart && vq->handle_kick)
1701		r = vhost_poll_start(&vq->poll, vq->kick);
1702
1703	mutex_unlock(&vq->mutex);
1704
1705	if (pollstop && vq->handle_kick)
1706		vhost_poll_flush(&vq->poll);
1707	return r;
1708}
1709EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1710
1711int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1712{
1713	struct vhost_iotlb *niotlb, *oiotlb;
1714	int i;
1715
1716	niotlb = iotlb_alloc();
1717	if (!niotlb)
1718		return -ENOMEM;
1719
1720	oiotlb = d->iotlb;
1721	d->iotlb = niotlb;
1722
1723	for (i = 0; i < d->nvqs; ++i) {
1724		struct vhost_virtqueue *vq = d->vqs[i];
1725
1726		mutex_lock(&vq->mutex);
1727		vq->iotlb = niotlb;
1728		__vhost_vq_meta_reset(vq);
1729		mutex_unlock(&vq->mutex);
1730	}
1731
1732	vhost_iotlb_free(oiotlb);
1733
1734	return 0;
1735}
1736EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1737
1738/* Caller must have device mutex */
1739long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1740{
1741	struct eventfd_ctx *ctx;
 
1742	u64 p;
1743	long r;
1744	int i, fd;
1745
1746	/* If you are not the owner, you can become one */
1747	if (ioctl == VHOST_SET_OWNER) {
1748		r = vhost_dev_set_owner(d);
1749		goto done;
1750	}
1751
1752	/* You must be the owner to do anything else */
1753	r = vhost_dev_check_owner(d);
1754	if (r)
1755		goto done;
1756
1757	switch (ioctl) {
1758	case VHOST_SET_MEM_TABLE:
1759		r = vhost_set_memory(d, argp);
1760		break;
1761	case VHOST_SET_LOG_BASE:
1762		if (copy_from_user(&p, argp, sizeof p)) {
1763			r = -EFAULT;
1764			break;
1765		}
1766		if ((u64)(unsigned long)p != p) {
1767			r = -EFAULT;
1768			break;
1769		}
1770		for (i = 0; i < d->nvqs; ++i) {
1771			struct vhost_virtqueue *vq;
1772			void __user *base = (void __user *)(unsigned long)p;
1773			vq = d->vqs[i];
1774			mutex_lock(&vq->mutex);
1775			/* If ring is inactive, will check when it's enabled. */
1776			if (vq->private_data && !vq_log_access_ok(vq, base))
1777				r = -EFAULT;
1778			else
1779				vq->log_base = base;
1780			mutex_unlock(&vq->mutex);
1781		}
1782		break;
1783	case VHOST_SET_LOG_FD:
1784		r = get_user(fd, (int __user *)argp);
1785		if (r < 0)
1786			break;
1787		ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
1788		if (IS_ERR(ctx)) {
1789			r = PTR_ERR(ctx);
1790			break;
1791		}
1792		swap(ctx, d->log_ctx);
 
 
 
 
 
 
1793		for (i = 0; i < d->nvqs; ++i) {
1794			mutex_lock(&d->vqs[i]->mutex);
1795			d->vqs[i]->log_ctx = d->log_ctx;
1796			mutex_unlock(&d->vqs[i]->mutex);
1797		}
1798		if (ctx)
1799			eventfd_ctx_put(ctx);
 
 
1800		break;
1801	default:
1802		r = -ENOIOCTLCMD;
1803		break;
1804	}
1805done:
1806	return r;
1807}
1808EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1809
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810/* TODO: This is really inefficient.  We need something like get_user()
1811 * (instruction directly accesses the data, with an exception table entry
1812 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1813 */
1814static int set_bit_to_user(int nr, void __user *addr)
1815{
1816	unsigned long log = (unsigned long)addr;
1817	struct page *page;
1818	void *base;
1819	int bit = nr + (log % PAGE_SIZE) * 8;
1820	int r;
1821
1822	r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1823	if (r < 0)
1824		return r;
1825	BUG_ON(r != 1);
1826	base = kmap_atomic(page);
1827	set_bit(bit, base);
1828	kunmap_atomic(base);
1829	unpin_user_pages_dirty_lock(&page, 1, true);
 
1830	return 0;
1831}
1832
1833static int log_write(void __user *log_base,
1834		     u64 write_address, u64 write_length)
1835{
1836	u64 write_page = write_address / VHOST_PAGE_SIZE;
1837	int r;
1838
1839	if (!write_length)
1840		return 0;
1841	write_length += write_address % VHOST_PAGE_SIZE;
1842	for (;;) {
1843		u64 base = (u64)(unsigned long)log_base;
1844		u64 log = base + write_page / 8;
1845		int bit = write_page % 8;
1846		if ((u64)(unsigned long)log != log)
1847			return -EFAULT;
1848		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1849		if (r < 0)
1850			return r;
1851		if (write_length <= VHOST_PAGE_SIZE)
1852			break;
1853		write_length -= VHOST_PAGE_SIZE;
1854		write_page += 1;
1855	}
1856	return r;
1857}
1858
1859static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1860{
1861	struct vhost_iotlb *umem = vq->umem;
1862	struct vhost_iotlb_map *u;
1863	u64 start, end, l, min;
1864	int r;
1865	bool hit = false;
1866
1867	while (len) {
1868		min = len;
1869		/* More than one GPAs can be mapped into a single HVA. So
1870		 * iterate all possible umems here to be safe.
1871		 */
1872		list_for_each_entry(u, &umem->list, link) {
1873			if (u->addr > hva - 1 + len ||
1874			    u->addr - 1 + u->size < hva)
1875				continue;
1876			start = max(u->addr, hva);
1877			end = min(u->addr - 1 + u->size, hva - 1 + len);
1878			l = end - start + 1;
1879			r = log_write(vq->log_base,
1880				      u->start + start - u->addr,
1881				      l);
1882			if (r < 0)
1883				return r;
1884			hit = true;
1885			min = min(l, min);
1886		}
1887
1888		if (!hit)
1889			return -EFAULT;
1890
1891		len -= min;
1892		hva += min;
1893	}
1894
1895	return 0;
1896}
1897
1898static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1899{
1900	struct iovec iov[64];
1901	int i, ret;
1902
1903	if (!vq->iotlb)
1904		return log_write(vq->log_base, vq->log_addr + used_offset, len);
1905
1906	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1907			     len, iov, 64, VHOST_ACCESS_WO);
1908	if (ret < 0)
1909		return ret;
1910
1911	for (i = 0; i < ret; i++) {
1912		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
1913				    iov[i].iov_len);
1914		if (ret)
1915			return ret;
1916	}
1917
1918	return 0;
1919}
1920
1921int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1922		    unsigned int log_num, u64 len, struct iovec *iov, int count)
1923{
1924	int i, r;
1925
1926	/* Make sure data written is seen before log. */
1927	smp_wmb();
1928
1929	if (vq->iotlb) {
1930		for (i = 0; i < count; i++) {
1931			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1932					  iov[i].iov_len);
1933			if (r < 0)
1934				return r;
1935		}
1936		return 0;
1937	}
1938
1939	for (i = 0; i < log_num; ++i) {
1940		u64 l = min(log[i].len, len);
1941		r = log_write(vq->log_base, log[i].addr, l);
1942		if (r < 0)
1943			return r;
1944		len -= l;
1945		if (!len) {
1946			if (vq->log_ctx)
1947				eventfd_signal(vq->log_ctx, 1);
1948			return 0;
1949		}
1950	}
1951	/* Length written exceeds what we have stored. This is a bug. */
1952	BUG();
1953	return 0;
1954}
1955EXPORT_SYMBOL_GPL(vhost_log_write);
1956
1957static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1958{
1959	void __user *used;
1960	if (vhost_put_used_flags(vq))
1961		return -EFAULT;
1962	if (unlikely(vq->log_used)) {
1963		/* Make sure the flag is seen before log. */
1964		smp_wmb();
1965		/* Log used flag write. */
1966		used = &vq->used->flags;
1967		log_used(vq, (used - (void __user *)vq->used),
1968			 sizeof vq->used->flags);
 
1969		if (vq->log_ctx)
1970			eventfd_signal(vq->log_ctx, 1);
1971	}
1972	return 0;
1973}
1974
1975static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1976{
1977	if (vhost_put_avail_event(vq))
1978		return -EFAULT;
1979	if (unlikely(vq->log_used)) {
1980		void __user *used;
1981		/* Make sure the event is seen before log. */
1982		smp_wmb();
1983		/* Log avail event write */
1984		used = vhost_avail_event(vq);
1985		log_used(vq, (used - (void __user *)vq->used),
1986			 sizeof *vhost_avail_event(vq));
 
1987		if (vq->log_ctx)
1988			eventfd_signal(vq->log_ctx, 1);
1989	}
1990	return 0;
1991}
1992
1993int vhost_vq_init_access(struct vhost_virtqueue *vq)
1994{
1995	__virtio16 last_used_idx;
1996	int r;
1997	bool is_le = vq->is_le;
1998
1999	if (!vq->private_data)
2000		return 0;
2001
2002	vhost_init_is_le(vq);
2003
2004	r = vhost_update_used_flags(vq);
2005	if (r)
2006		goto err;
2007	vq->signalled_used_valid = false;
2008	if (!vq->iotlb &&
2009	    !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2010		r = -EFAULT;
2011		goto err;
2012	}
2013	r = vhost_get_used_idx(vq, &last_used_idx);
2014	if (r) {
2015		vq_err(vq, "Can't access used idx at %p\n",
2016		       &vq->used->idx);
2017		goto err;
2018	}
2019	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2020	return 0;
2021
2022err:
2023	vq->is_le = is_le;
2024	return r;
2025}
2026EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2027
2028static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2029			  struct iovec iov[], int iov_size, int access)
2030{
2031	const struct vhost_iotlb_map *map;
2032	struct vhost_dev *dev = vq->dev;
2033	struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2034	struct iovec *_iov;
2035	u64 s = 0;
2036	int ret = 0;
2037
 
 
 
2038	while ((u64)len > s) {
2039		u64 size;
2040		if (unlikely(ret >= iov_size)) {
2041			ret = -ENOBUFS;
2042			break;
2043		}
2044
2045		map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2046		if (map == NULL || map->start > addr) {
2047			if (umem != dev->iotlb) {
2048				ret = -EFAULT;
2049				break;
2050			}
2051			ret = -EAGAIN;
2052			break;
2053		} else if (!(map->perm & access)) {
2054			ret = -EPERM;
2055			break;
2056		}
2057
2058		_iov = iov + ret;
2059		size = map->size - addr + map->start;
2060		_iov->iov_len = min((u64)len - s, size);
2061		_iov->iov_base = (void __user *)(unsigned long)
2062				 (map->addr + addr - map->start);
2063		s += size;
2064		addr += size;
2065		++ret;
2066	}
2067
2068	if (ret == -EAGAIN)
2069		vhost_iotlb_miss(vq, addr, access);
2070	return ret;
2071}
2072
2073/* Each buffer in the virtqueues is actually a chain of descriptors.  This
2074 * function returns the next descriptor in the chain,
2075 * or -1U if we're at the end. */
2076static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2077{
2078	unsigned int next;
2079
2080	/* If this descriptor says it doesn't chain, we're done. */
2081	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2082		return -1U;
2083
2084	/* Check they're not leading us off end of descriptors. */
2085	next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
 
 
 
 
 
2086	return next;
2087}
2088
2089static int get_indirect(struct vhost_virtqueue *vq,
2090			struct iovec iov[], unsigned int iov_size,
2091			unsigned int *out_num, unsigned int *in_num,
2092			struct vhost_log *log, unsigned int *log_num,
2093			struct vring_desc *indirect)
2094{
2095	struct vring_desc desc;
2096	unsigned int i = 0, count, found = 0;
2097	u32 len = vhost32_to_cpu(vq, indirect->len);
2098	struct iov_iter from;
2099	int ret, access;
2100
2101	/* Sanity check */
2102	if (unlikely(len % sizeof desc)) {
2103		vq_err(vq, "Invalid length in indirect descriptor: "
2104		       "len 0x%llx not multiple of 0x%zx\n",
2105		       (unsigned long long)len,
2106		       sizeof desc);
2107		return -EINVAL;
2108	}
2109
2110	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2111			     UIO_MAXIOV, VHOST_ACCESS_RO);
2112	if (unlikely(ret < 0)) {
2113		if (ret != -EAGAIN)
2114			vq_err(vq, "Translation failure %d in indirect.\n", ret);
2115		return ret;
2116	}
2117	iov_iter_init(&from, READ, vq->indirect, ret, len);
2118	count = len / sizeof desc;
 
 
 
 
2119	/* Buffers are chained via a 16 bit next field, so
2120	 * we can have at most 2^16 of these. */
2121	if (unlikely(count > USHRT_MAX + 1)) {
2122		vq_err(vq, "Indirect buffer length too big: %d\n",
2123		       indirect->len);
2124		return -E2BIG;
2125	}
2126
2127	do {
2128		unsigned iov_count = *in_num + *out_num;
2129		if (unlikely(++found > count)) {
2130			vq_err(vq, "Loop detected: last one at %u "
2131			       "indirect size %u\n",
2132			       i, count);
2133			return -EINVAL;
2134		}
2135		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
 
2136			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2137			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2138			return -EINVAL;
2139		}
2140		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2141			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2142			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2143			return -EINVAL;
2144		}
2145
2146		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2147			access = VHOST_ACCESS_WO;
2148		else
2149			access = VHOST_ACCESS_RO;
2150
2151		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2152				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2153				     iov_size - iov_count, access);
2154		if (unlikely(ret < 0)) {
2155			if (ret != -EAGAIN)
2156				vq_err(vq, "Translation failure %d indirect idx %d\n",
2157					ret, i);
2158			return ret;
2159		}
2160		/* If this is an input descriptor, increment that count. */
2161		if (access == VHOST_ACCESS_WO) {
2162			*in_num += ret;
2163			if (unlikely(log && ret)) {
2164				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2165				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2166				++*log_num;
2167			}
2168		} else {
2169			/* If it's an output descriptor, they're all supposed
2170			 * to come before any input descriptors. */
2171			if (unlikely(*in_num)) {
2172				vq_err(vq, "Indirect descriptor "
2173				       "has out after in: idx %d\n", i);
2174				return -EINVAL;
2175			}
2176			*out_num += ret;
2177		}
2178	} while ((i = next_desc(vq, &desc)) != -1);
2179	return 0;
2180}
2181
2182/* This looks in the virtqueue and for the first available buffer, and converts
2183 * it to an iovec for convenient access.  Since descriptors consist of some
2184 * number of output then some number of input descriptors, it's actually two
2185 * iovecs, but we pack them into one and note how many of each there were.
2186 *
2187 * This function returns the descriptor number found, or vq->num (which is
2188 * never a valid descriptor number) if none was found.  A negative code is
2189 * returned on error. */
2190int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2191		      struct iovec iov[], unsigned int iov_size,
2192		      unsigned int *out_num, unsigned int *in_num,
2193		      struct vhost_log *log, unsigned int *log_num)
2194{
2195	struct vring_desc desc;
2196	unsigned int i, head, found = 0;
2197	u16 last_avail_idx;
2198	__virtio16 avail_idx;
2199	__virtio16 ring_head;
2200	int ret, access;
2201
2202	/* Check it isn't doing very strange things with descriptor numbers. */
2203	last_avail_idx = vq->last_avail_idx;
 
 
 
 
 
2204
2205	if (vq->avail_idx == vq->last_avail_idx) {
2206		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2207			vq_err(vq, "Failed to access avail idx at %p\n",
2208				&vq->avail->idx);
2209			return -EFAULT;
2210		}
2211		vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2212
2213		if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2214			vq_err(vq, "Guest moved used index from %u to %u",
2215				last_avail_idx, vq->avail_idx);
2216			return -EFAULT;
2217		}
2218
2219		/* If there's nothing new since last we looked, return
2220		 * invalid.
2221		 */
2222		if (vq->avail_idx == last_avail_idx)
2223			return vq->num;
2224
2225		/* Only get avail ring entries after they have been
2226		 * exposed by guest.
2227		 */
2228		smp_rmb();
2229	}
2230
2231	/* Grab the next descriptor number they're advertising, and increment
2232	 * the index we've seen. */
2233	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
 
2234		vq_err(vq, "Failed to read head: idx %d address %p\n",
2235		       last_avail_idx,
2236		       &vq->avail->ring[last_avail_idx % vq->num]);
2237		return -EFAULT;
2238	}
2239
2240	head = vhost16_to_cpu(vq, ring_head);
2241
2242	/* If their number is silly, that's an error. */
2243	if (unlikely(head >= vq->num)) {
2244		vq_err(vq, "Guest says index %u > %u is available",
2245		       head, vq->num);
2246		return -EINVAL;
2247	}
2248
2249	/* When we start there are none of either input nor output. */
2250	*out_num = *in_num = 0;
2251	if (unlikely(log))
2252		*log_num = 0;
2253
2254	i = head;
2255	do {
2256		unsigned iov_count = *in_num + *out_num;
2257		if (unlikely(i >= vq->num)) {
2258			vq_err(vq, "Desc index is %u > %u, head = %u",
2259			       i, vq->num, head);
2260			return -EINVAL;
2261		}
2262		if (unlikely(++found > vq->num)) {
2263			vq_err(vq, "Loop detected: last one at %u "
2264			       "vq size %u head %u\n",
2265			       i, vq->num, head);
2266			return -EINVAL;
2267		}
2268		ret = vhost_get_desc(vq, &desc, i);
2269		if (unlikely(ret)) {
2270			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2271			       i, vq->desc + i);
2272			return -EFAULT;
2273		}
2274		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2275			ret = get_indirect(vq, iov, iov_size,
2276					   out_num, in_num,
2277					   log, log_num, &desc);
2278			if (unlikely(ret < 0)) {
2279				if (ret != -EAGAIN)
2280					vq_err(vq, "Failure detected "
2281						"in indirect descriptor at idx %d\n", i);
2282				return ret;
2283			}
2284			continue;
2285		}
2286
2287		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2288			access = VHOST_ACCESS_WO;
2289		else
2290			access = VHOST_ACCESS_RO;
2291		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2292				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2293				     iov_size - iov_count, access);
2294		if (unlikely(ret < 0)) {
2295			if (ret != -EAGAIN)
2296				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2297					ret, i);
2298			return ret;
2299		}
2300		if (access == VHOST_ACCESS_WO) {
2301			/* If this is an input descriptor,
2302			 * increment that count. */
2303			*in_num += ret;
2304			if (unlikely(log && ret)) {
2305				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2306				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2307				++*log_num;
2308			}
2309		} else {
2310			/* If it's an output descriptor, they're all supposed
2311			 * to come before any input descriptors. */
2312			if (unlikely(*in_num)) {
2313				vq_err(vq, "Descriptor has out after in: "
2314				       "idx %d\n", i);
2315				return -EINVAL;
2316			}
2317			*out_num += ret;
2318		}
2319	} while ((i = next_desc(vq, &desc)) != -1);
2320
2321	/* On success, increment avail index. */
2322	vq->last_avail_idx++;
2323
2324	/* Assume notifications from guest are disabled at this point,
2325	 * if they aren't we would need to update avail_event index. */
2326	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2327	return head;
2328}
2329EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2330
2331/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2332void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2333{
2334	vq->last_avail_idx -= n;
2335}
2336EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2337
2338/* After we've used one of their buffers, we tell them about it.  We'll then
2339 * want to notify the guest, using eventfd. */
2340int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2341{
2342	struct vring_used_elem heads = {
2343		cpu_to_vhost32(vq, head),
2344		cpu_to_vhost32(vq, len)
2345	};
2346
2347	return vhost_add_used_n(vq, &heads, 1);
2348}
2349EXPORT_SYMBOL_GPL(vhost_add_used);
2350
2351static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2352			    struct vring_used_elem *heads,
2353			    unsigned count)
2354{
2355	vring_used_elem_t __user *used;
2356	u16 old, new;
2357	int start;
2358
2359	start = vq->last_used_idx & (vq->num - 1);
2360	used = vq->used->ring + start;
2361	if (vhost_put_used(vq, heads, start, count)) {
 
 
 
 
 
 
 
 
 
2362		vq_err(vq, "Failed to write used");
2363		return -EFAULT;
2364	}
2365	if (unlikely(vq->log_used)) {
2366		/* Make sure data is seen before log. */
2367		smp_wmb();
2368		/* Log used ring entry write. */
2369		log_used(vq, ((void __user *)used - (void __user *)vq->used),
2370			 count * sizeof *used);
 
 
2371	}
2372	old = vq->last_used_idx;
2373	new = (vq->last_used_idx += count);
2374	/* If the driver never bothers to signal in a very long while,
2375	 * used index might wrap around. If that happens, invalidate
2376	 * signalled_used index we stored. TODO: make sure driver
2377	 * signals at least once in 2^16 and remove this. */
2378	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2379		vq->signalled_used_valid = false;
2380	return 0;
2381}
2382
2383/* After we've used one of their buffers, we tell them about it.  We'll then
2384 * want to notify the guest, using eventfd. */
2385int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2386		     unsigned count)
2387{
2388	int start, n, r;
2389
2390	start = vq->last_used_idx & (vq->num - 1);
2391	n = vq->num - start;
2392	if (n < count) {
2393		r = __vhost_add_used_n(vq, heads, n);
2394		if (r < 0)
2395			return r;
2396		heads += n;
2397		count -= n;
2398	}
2399	r = __vhost_add_used_n(vq, heads, count);
2400
2401	/* Make sure buffer is written before we update index. */
2402	smp_wmb();
2403	if (vhost_put_used_idx(vq)) {
2404		vq_err(vq, "Failed to increment used idx");
2405		return -EFAULT;
2406	}
2407	if (unlikely(vq->log_used)) {
2408		/* Make sure used idx is seen before log. */
2409		smp_wmb();
2410		/* Log used index update. */
2411		log_used(vq, offsetof(struct vring_used, idx),
2412			 sizeof vq->used->idx);
 
2413		if (vq->log_ctx)
2414			eventfd_signal(vq->log_ctx, 1);
2415	}
2416	return r;
2417}
2418EXPORT_SYMBOL_GPL(vhost_add_used_n);
2419
2420static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2421{
2422	__u16 old, new;
2423	__virtio16 event;
2424	bool v;
2425	/* Flush out used index updates. This is paired
2426	 * with the barrier that the Guest executes when enabling
2427	 * interrupts. */
2428	smp_mb();
2429
2430	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2431	    unlikely(vq->avail_idx == vq->last_avail_idx))
2432		return true;
2433
2434	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2435		__virtio16 flags;
2436		if (vhost_get_avail_flags(vq, &flags)) {
2437			vq_err(vq, "Failed to get flags");
2438			return true;
2439		}
2440		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2441	}
2442	old = vq->signalled_used;
2443	v = vq->signalled_used_valid;
2444	new = vq->signalled_used = vq->last_used_idx;
2445	vq->signalled_used_valid = true;
2446
2447	if (unlikely(!v))
2448		return true;
2449
2450	if (vhost_get_used_event(vq, &event)) {
2451		vq_err(vq, "Failed to get used event idx");
2452		return true;
2453	}
2454	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2455}
2456
2457/* This actually signals the guest, using eventfd. */
2458void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2459{
2460	/* Signal the Guest tell them we used something up. */
2461	if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2462		eventfd_signal(vq->call_ctx.ctx, 1);
2463}
2464EXPORT_SYMBOL_GPL(vhost_signal);
2465
2466/* And here's the combo meal deal.  Supersize me! */
2467void vhost_add_used_and_signal(struct vhost_dev *dev,
2468			       struct vhost_virtqueue *vq,
2469			       unsigned int head, int len)
2470{
2471	vhost_add_used(vq, head, len);
2472	vhost_signal(dev, vq);
2473}
2474EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2475
2476/* multi-buffer version of vhost_add_used_and_signal */
2477void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2478				 struct vhost_virtqueue *vq,
2479				 struct vring_used_elem *heads, unsigned count)
2480{
2481	vhost_add_used_n(vq, heads, count);
2482	vhost_signal(dev, vq);
2483}
2484EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2485
2486/* return true if we're sure that avaiable ring is empty */
2487bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2488{
2489	__virtio16 avail_idx;
2490	int r;
2491
2492	if (vq->avail_idx != vq->last_avail_idx)
2493		return false;
2494
2495	r = vhost_get_avail_idx(vq, &avail_idx);
2496	if (unlikely(r))
2497		return false;
2498	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2499
2500	return vq->avail_idx == vq->last_avail_idx;
2501}
2502EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2503
2504/* OK, now we need to know about added descriptors. */
2505bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2506{
2507	__virtio16 avail_idx;
2508	int r;
2509
2510	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2511		return false;
2512	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2513	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2514		r = vhost_update_used_flags(vq);
2515		if (r) {
2516			vq_err(vq, "Failed to enable notification at %p: %d\n",
2517			       &vq->used->flags, r);
2518			return false;
2519		}
2520	} else {
2521		r = vhost_update_avail_event(vq, vq->avail_idx);
2522		if (r) {
2523			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2524			       vhost_avail_event(vq), r);
2525			return false;
2526		}
2527	}
2528	/* They could have slipped one in as we were doing that: make
2529	 * sure it's written, then check again. */
2530	smp_mb();
2531	r = vhost_get_avail_idx(vq, &avail_idx);
2532	if (r) {
2533		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2534		       &vq->avail->idx, r);
2535		return false;
2536	}
2537
2538	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2539}
2540EXPORT_SYMBOL_GPL(vhost_enable_notify);
2541
2542/* We don't need to be notified again. */
2543void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2544{
2545	int r;
2546
2547	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2548		return;
2549	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2550	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2551		r = vhost_update_used_flags(vq);
2552		if (r)
2553			vq_err(vq, "Failed to disable notification at %p: %d\n",
2554			       &vq->used->flags, r);
2555	}
2556}
2557EXPORT_SYMBOL_GPL(vhost_disable_notify);
2558
2559/* Create a new message. */
2560struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2561{
2562	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2563	if (!node)
2564		return NULL;
2565
2566	/* Make sure all padding within the structure is initialized. */
2567	memset(&node->msg, 0, sizeof node->msg);
2568	node->vq = vq;
2569	node->msg.type = type;
2570	return node;
2571}
2572EXPORT_SYMBOL_GPL(vhost_new_msg);
2573
2574void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2575		       struct vhost_msg_node *node)
2576{
2577	spin_lock(&dev->iotlb_lock);
2578	list_add_tail(&node->node, head);
2579	spin_unlock(&dev->iotlb_lock);
2580
2581	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2582}
2583EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2584
2585struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2586					 struct list_head *head)
2587{
2588	struct vhost_msg_node *node = NULL;
2589
2590	spin_lock(&dev->iotlb_lock);
2591	if (!list_empty(head)) {
2592		node = list_first_entry(head, struct vhost_msg_node,
2593					node);
2594		list_del(&node->node);
2595	}
2596	spin_unlock(&dev->iotlb_lock);
2597
2598	return node;
2599}
2600EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2601
2602void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2603{
2604	struct vhost_virtqueue *vq;
2605	int i;
2606
2607	mutex_lock(&dev->mutex);
2608	for (i = 0; i < dev->nvqs; ++i) {
2609		vq = dev->vqs[i];
2610		mutex_lock(&vq->mutex);
2611		vq->acked_backend_features = features;
2612		mutex_unlock(&vq->mutex);
2613	}
2614	mutex_unlock(&dev->mutex);
2615}
2616EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2617
2618static int __init vhost_init(void)
2619{
2620	return 0;
2621}
2622
2623static void __exit vhost_exit(void)
2624{
2625}
2626
2627module_init(vhost_init);
2628module_exit(vhost_exit);
2629
2630MODULE_VERSION("0.0.1");
2631MODULE_LICENSE("GPL v2");
2632MODULE_AUTHOR("Michael S. Tsirkin");
2633MODULE_DESCRIPTION("Host kernel accelerator for virtio");
v3.15
 
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/uio.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/rcupdate.h>
  22#include <linux/poll.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/slab.h>
 
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28#include <linux/module.h>
 
 
 
 
 
 
  29
  30#include "vhost.h"
  31
 
 
 
 
 
 
 
 
 
  32enum {
  33	VHOST_MEMORY_MAX_NREGIONS = 64,
  34	VHOST_MEMORY_F_LOG = 0x1,
  35};
  36
  37#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
  38#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  39
  40static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  41			    poll_table *pt)
  42{
  43	struct vhost_poll *poll;
  44
  45	poll = container_of(pt, struct vhost_poll, table);
  46	poll->wqh = wqh;
  47	add_wait_queue(wqh, &poll->wait);
  48}
  49
  50static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  51			     void *key)
  52{
  53	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 
  54
  55	if (!((unsigned long)key & poll->mask))
  56		return 0;
  57
  58	vhost_poll_queue(poll);
 
 
 
 
  59	return 0;
  60}
  61
  62void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
  63{
  64	INIT_LIST_HEAD(&work->node);
  65	work->fn = fn;
  66	init_waitqueue_head(&work->done);
  67	work->flushing = 0;
  68	work->queue_seq = work->done_seq = 0;
  69}
  70EXPORT_SYMBOL_GPL(vhost_work_init);
  71
  72/* Init poll structure */
  73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  74		     unsigned long mask, struct vhost_dev *dev)
  75{
  76	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  77	init_poll_funcptr(&poll->table, vhost_poll_func);
  78	poll->mask = mask;
  79	poll->dev = dev;
  80	poll->wqh = NULL;
  81
  82	vhost_work_init(&poll->work, fn);
  83}
  84EXPORT_SYMBOL_GPL(vhost_poll_init);
  85
  86/* Start polling a file. We add ourselves to file's wait queue. The caller must
  87 * keep a reference to a file until after vhost_poll_stop is called. */
  88int vhost_poll_start(struct vhost_poll *poll, struct file *file)
  89{
  90	unsigned long mask;
  91	int ret = 0;
  92
  93	if (poll->wqh)
  94		return 0;
  95
  96	mask = file->f_op->poll(file, &poll->table);
  97	if (mask)
  98		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
  99	if (mask & POLLERR) {
 100		if (poll->wqh)
 101			remove_wait_queue(poll->wqh, &poll->wait);
 102		ret = -EINVAL;
 103	}
 104
 105	return ret;
 106}
 107EXPORT_SYMBOL_GPL(vhost_poll_start);
 108
 109/* Stop polling a file. After this function returns, it becomes safe to drop the
 110 * file reference. You must also flush afterwards. */
 111void vhost_poll_stop(struct vhost_poll *poll)
 112{
 113	if (poll->wqh) {
 114		remove_wait_queue(poll->wqh, &poll->wait);
 115		poll->wqh = NULL;
 116	}
 117}
 118EXPORT_SYMBOL_GPL(vhost_poll_stop);
 119
 120static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 121				unsigned seq)
 122{
 123	int left;
 124
 125	spin_lock_irq(&dev->work_lock);
 126	left = seq - work->done_seq;
 127	spin_unlock_irq(&dev->work_lock);
 128	return left <= 0;
 129}
 130
 131void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 132{
 133	unsigned seq;
 134	int flushing;
 135
 136	spin_lock_irq(&dev->work_lock);
 137	seq = work->queue_seq;
 138	work->flushing++;
 139	spin_unlock_irq(&dev->work_lock);
 140	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 141	spin_lock_irq(&dev->work_lock);
 142	flushing = --work->flushing;
 143	spin_unlock_irq(&dev->work_lock);
 144	BUG_ON(flushing < 0);
 145}
 146EXPORT_SYMBOL_GPL(vhost_work_flush);
 147
 148/* Flush any work that has been scheduled. When calling this, don't hold any
 149 * locks that are also used by the callback. */
 150void vhost_poll_flush(struct vhost_poll *poll)
 151{
 152	vhost_work_flush(poll->dev, &poll->work);
 153}
 154EXPORT_SYMBOL_GPL(vhost_poll_flush);
 155
 156void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 157{
 158	unsigned long flags;
 
 159
 160	spin_lock_irqsave(&dev->work_lock, flags);
 161	if (list_empty(&work->node)) {
 162		list_add_tail(&work->node, &dev->work_list);
 163		work->queue_seq++;
 164		spin_unlock_irqrestore(&dev->work_lock, flags);
 
 165		wake_up_process(dev->worker);
 166	} else {
 167		spin_unlock_irqrestore(&dev->work_lock, flags);
 168	}
 169}
 170EXPORT_SYMBOL_GPL(vhost_work_queue);
 171
 
 
 
 
 
 
 
 172void vhost_poll_queue(struct vhost_poll *poll)
 173{
 174	vhost_work_queue(poll->dev, &poll->work);
 175}
 176EXPORT_SYMBOL_GPL(vhost_poll_queue);
 177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178static void vhost_vq_reset(struct vhost_dev *dev,
 179			   struct vhost_virtqueue *vq)
 180{
 181	vq->num = 1;
 182	vq->desc = NULL;
 183	vq->avail = NULL;
 184	vq->used = NULL;
 185	vq->last_avail_idx = 0;
 186	vq->avail_idx = 0;
 187	vq->last_used_idx = 0;
 188	vq->signalled_used = 0;
 189	vq->signalled_used_valid = false;
 190	vq->used_flags = 0;
 191	vq->log_used = false;
 192	vq->log_addr = -1ull;
 193	vq->private_data = NULL;
 
 
 194	vq->log_base = NULL;
 195	vq->error_ctx = NULL;
 196	vq->error = NULL;
 197	vq->kick = NULL;
 198	vq->call_ctx = NULL;
 199	vq->call = NULL;
 200	vq->log_ctx = NULL;
 
 
 
 
 
 
 
 201}
 202
 203static int vhost_worker(void *data)
 204{
 205	struct vhost_dev *dev = data;
 206	struct vhost_work *work = NULL;
 207	unsigned uninitialized_var(seq);
 208	mm_segment_t oldfs = get_fs();
 209
 210	set_fs(USER_DS);
 211	use_mm(dev->mm);
 212
 213	for (;;) {
 214		/* mb paired w/ kthread_stop */
 215		set_current_state(TASK_INTERRUPTIBLE);
 216
 217		spin_lock_irq(&dev->work_lock);
 218		if (work) {
 219			work->done_seq = seq;
 220			if (work->flushing)
 221				wake_up_all(&work->done);
 222		}
 223
 224		if (kthread_should_stop()) {
 225			spin_unlock_irq(&dev->work_lock);
 226			__set_current_state(TASK_RUNNING);
 227			break;
 228		}
 229		if (!list_empty(&dev->work_list)) {
 230			work = list_first_entry(&dev->work_list,
 231						struct vhost_work, node);
 232			list_del_init(&work->node);
 233			seq = work->queue_seq;
 234		} else
 235			work = NULL;
 236		spin_unlock_irq(&dev->work_lock);
 237
 238		if (work) {
 
 
 
 
 
 
 
 
 239			__set_current_state(TASK_RUNNING);
 
 240			work->fn(work);
 
 241			if (need_resched())
 242				schedule();
 243		} else
 244			schedule();
 245
 246	}
 247	unuse_mm(dev->mm);
 248	set_fs(oldfs);
 249	return 0;
 250}
 251
 252static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 253{
 254	kfree(vq->indirect);
 255	vq->indirect = NULL;
 256	kfree(vq->log);
 257	vq->log = NULL;
 258	kfree(vq->heads);
 259	vq->heads = NULL;
 260}
 261
 262/* Helper to allocate iovec buffers for all vqs. */
 263static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 264{
 265	struct vhost_virtqueue *vq;
 266	int i;
 267
 268	for (i = 0; i < dev->nvqs; ++i) {
 269		vq = dev->vqs[i];
 270		vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
 271				       GFP_KERNEL);
 272		vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
 273		vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
 
 
 
 274		if (!vq->indirect || !vq->log || !vq->heads)
 275			goto err_nomem;
 276	}
 277	return 0;
 278
 279err_nomem:
 280	for (; i >= 0; --i)
 281		vhost_vq_free_iovecs(dev->vqs[i]);
 282	return -ENOMEM;
 283}
 284
 285static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 286{
 287	int i;
 288
 289	for (i = 0; i < dev->nvqs; ++i)
 290		vhost_vq_free_iovecs(dev->vqs[i]);
 291}
 292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293void vhost_dev_init(struct vhost_dev *dev,
 294		    struct vhost_virtqueue **vqs, int nvqs)
 
 
 
 
 295{
 296	struct vhost_virtqueue *vq;
 297	int i;
 298
 299	dev->vqs = vqs;
 300	dev->nvqs = nvqs;
 301	mutex_init(&dev->mutex);
 302	dev->log_ctx = NULL;
 303	dev->log_file = NULL;
 304	dev->memory = NULL;
 305	dev->mm = NULL;
 306	spin_lock_init(&dev->work_lock);
 307	INIT_LIST_HEAD(&dev->work_list);
 308	dev->worker = NULL;
 
 
 
 
 
 
 
 
 
 
 
 309
 310	for (i = 0; i < dev->nvqs; ++i) {
 311		vq = dev->vqs[i];
 312		vq->log = NULL;
 313		vq->indirect = NULL;
 314		vq->heads = NULL;
 315		vq->dev = dev;
 316		mutex_init(&vq->mutex);
 317		vhost_vq_reset(dev, vq);
 318		if (vq->handle_kick)
 319			vhost_poll_init(&vq->poll, vq->handle_kick,
 320					POLLIN, dev);
 321	}
 322}
 323EXPORT_SYMBOL_GPL(vhost_dev_init);
 324
 325/* Caller should have device mutex */
 326long vhost_dev_check_owner(struct vhost_dev *dev)
 327{
 328	/* Are you the owner? If not, I don't think you mean to do that */
 329	return dev->mm == current->mm ? 0 : -EPERM;
 330}
 331EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 332
 333struct vhost_attach_cgroups_struct {
 334	struct vhost_work work;
 335	struct task_struct *owner;
 336	int ret;
 337};
 338
 339static void vhost_attach_cgroups_work(struct vhost_work *work)
 340{
 341	struct vhost_attach_cgroups_struct *s;
 342
 343	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 344	s->ret = cgroup_attach_task_all(s->owner, current);
 345}
 346
 347static int vhost_attach_cgroups(struct vhost_dev *dev)
 348{
 349	struct vhost_attach_cgroups_struct attach;
 350
 351	attach.owner = current;
 352	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 353	vhost_work_queue(dev, &attach.work);
 354	vhost_work_flush(dev, &attach.work);
 355	return attach.ret;
 356}
 357
 358/* Caller should have device mutex */
 359bool vhost_dev_has_owner(struct vhost_dev *dev)
 360{
 361	return dev->mm;
 362}
 363EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 364
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 365/* Caller should have device mutex */
 366long vhost_dev_set_owner(struct vhost_dev *dev)
 367{
 368	struct task_struct *worker;
 369	int err;
 370
 371	/* Is there an owner already? */
 372	if (vhost_dev_has_owner(dev)) {
 373		err = -EBUSY;
 374		goto err_mm;
 375	}
 376
 377	/* No owner, become one */
 378	dev->mm = get_task_mm(current);
 379	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 380	if (IS_ERR(worker)) {
 381		err = PTR_ERR(worker);
 382		goto err_worker;
 
 
 
 
 
 
 
 
 
 
 
 383	}
 384
 385	dev->worker = worker;
 386	wake_up_process(worker);	/* avoid contributing to loadavg */
 387
 388	err = vhost_attach_cgroups(dev);
 389	if (err)
 390		goto err_cgroup;
 391
 392	err = vhost_dev_alloc_iovecs(dev);
 393	if (err)
 394		goto err_cgroup;
 395
 396	return 0;
 397err_cgroup:
 398	kthread_stop(worker);
 399	dev->worker = NULL;
 
 
 400err_worker:
 401	if (dev->mm)
 402		mmput(dev->mm);
 403	dev->mm = NULL;
 404err_mm:
 405	return err;
 406}
 407EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 408
 409struct vhost_memory *vhost_dev_reset_owner_prepare(void)
 
 
 
 
 
 
 410{
 411	return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 412}
 413EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 414
 415/* Caller should have device mutex */
 416void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
 417{
 418	vhost_dev_cleanup(dev, true);
 419
 420	/* Restore memory to default empty mapping. */
 421	memory->nregions = 0;
 422	RCU_INIT_POINTER(dev->memory, memory);
 
 
 
 
 
 423}
 424EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 425
 426void vhost_dev_stop(struct vhost_dev *dev)
 427{
 428	int i;
 429
 430	for (i = 0; i < dev->nvqs; ++i) {
 431		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 432			vhost_poll_stop(&dev->vqs[i]->poll);
 433			vhost_poll_flush(&dev->vqs[i]->poll);
 434		}
 435	}
 436}
 437EXPORT_SYMBOL_GPL(vhost_dev_stop);
 438
 439/* Caller should have device mutex if and only if locked is set */
 440void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441{
 442	int i;
 443
 444	for (i = 0; i < dev->nvqs; ++i) {
 445		if (dev->vqs[i]->error_ctx)
 446			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 447		if (dev->vqs[i]->error)
 448			fput(dev->vqs[i]->error);
 449		if (dev->vqs[i]->kick)
 450			fput(dev->vqs[i]->kick);
 451		if (dev->vqs[i]->call_ctx)
 452			eventfd_ctx_put(dev->vqs[i]->call_ctx);
 453		if (dev->vqs[i]->call)
 454			fput(dev->vqs[i]->call);
 455		vhost_vq_reset(dev, dev->vqs[i]);
 456	}
 457	vhost_dev_free_iovecs(dev);
 458	if (dev->log_ctx)
 459		eventfd_ctx_put(dev->log_ctx);
 460	dev->log_ctx = NULL;
 461	if (dev->log_file)
 462		fput(dev->log_file);
 463	dev->log_file = NULL;
 464	/* No one will access memory at this point */
 465	kfree(rcu_dereference_protected(dev->memory,
 466					locked ==
 467						lockdep_is_held(&dev->mutex)));
 468	RCU_INIT_POINTER(dev->memory, NULL);
 469	WARN_ON(!list_empty(&dev->work_list));
 
 
 470	if (dev->worker) {
 471		kthread_stop(dev->worker);
 472		dev->worker = NULL;
 
 473	}
 474	if (dev->mm)
 475		mmput(dev->mm);
 476	dev->mm = NULL;
 477}
 478EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 479
 480static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 481{
 482	u64 a = addr / VHOST_PAGE_SIZE / 8;
 483
 484	/* Make sure 64 bit math will not overflow. */
 485	if (a > ULONG_MAX - (unsigned long)log_base ||
 486	    a + (unsigned long)log_base > ULONG_MAX)
 487		return 0;
 488
 489	return access_ok(VERIFY_WRITE, log_base + a,
 490			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 491}
 492
 
 
 
 
 
 
 493/* Caller should have vq mutex and device mutex. */
 494static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 495			       int log_all)
 496{
 497	int i;
 
 
 
 
 
 
 
 
 
 498
 499	if (!mem)
 500		return 0;
 501
 502	for (i = 0; i < mem->nregions; ++i) {
 503		struct vhost_memory_region *m = mem->regions + i;
 504		unsigned long a = m->userspace_addr;
 505		if (m->memory_size > ULONG_MAX)
 506			return 0;
 507		else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 508				    m->memory_size))
 509			return 0;
 510		else if (log_all && !log_access_ok(log_base,
 511						   m->guest_phys_addr,
 512						   m->memory_size))
 513			return 0;
 514	}
 515	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 516}
 517
 518/* Can we switch to this memory table? */
 519/* Caller should have device mutex but not vq mutex */
 520static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 521			    int log_all)
 522{
 523	int i;
 524
 525	for (i = 0; i < d->nvqs; ++i) {
 526		int ok;
 
 
 527		mutex_lock(&d->vqs[i]->mutex);
 
 528		/* If ring is inactive, will check when it's enabled. */
 529		if (d->vqs[i]->private_data)
 530			ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
 531						 log_all);
 532		else
 533			ok = 1;
 534		mutex_unlock(&d->vqs[i]->mutex);
 535		if (!ok)
 536			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 537	}
 538	return 1;
 
 539}
 540
 541static int vq_access_ok(struct vhost_dev *d, unsigned int num,
 542			struct vring_desc __user *desc,
 543			struct vring_avail __user *avail,
 544			struct vring_used __user *used)
 545{
 546	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 547	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 548	       access_ok(VERIFY_READ, avail,
 549			 sizeof *avail + num * sizeof *avail->ring + s) &&
 550	       access_ok(VERIFY_WRITE, used,
 551			sizeof *used + num * sizeof *used->ring + s);
 
 
 
 552}
 
 553
 554/* Can we log writes? */
 555/* Caller should have device mutex but not vq mutex */
 556int vhost_log_access_ok(struct vhost_dev *dev)
 557{
 558	struct vhost_memory *mp;
 
 
 
 
 
 
 
 
 
 
 
 
 559
 560	mp = rcu_dereference_protected(dev->memory,
 561				       lockdep_is_held(&dev->mutex));
 562	return memory_access_ok(dev, mp, 1);
 563}
 564EXPORT_SYMBOL_GPL(vhost_log_access_ok);
 565
 566/* Verify access for write logging. */
 567/* Caller should have vq mutex and device mutex */
 568static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
 569			    void __user *log_base)
 570{
 571	struct vhost_memory *mp;
 572	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 573
 574	mp = rcu_dereference_protected(vq->dev->memory,
 575				       lockdep_is_held(&vq->mutex));
 576	return vq_memory_access_ok(log_base, mp,
 577			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
 578		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
 579					sizeof *vq->used +
 580					vq->num * sizeof *vq->used->ring + s));
 581}
 582
 583/* Can we start vq? */
 584/* Caller should have vq mutex and device mutex */
 585int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 586{
 587	return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
 588		vq_log_access_ok(vq->dev, vq, vq->log_base);
 
 
 589}
 590EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 591
 592static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 593{
 594	struct vhost_memory mem, *newmem, *oldmem;
 
 
 595	unsigned long size = offsetof(struct vhost_memory, regions);
 
 596
 597	if (copy_from_user(&mem, m, size))
 598		return -EFAULT;
 599	if (mem.padding)
 600		return -EOPNOTSUPP;
 601	if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
 602		return -E2BIG;
 603	newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
 
 604	if (!newmem)
 605		return -ENOMEM;
 606
 607	memcpy(newmem, &mem, size);
 608	if (copy_from_user(newmem->regions, m->regions,
 609			   mem.nregions * sizeof *m->regions)) {
 610		kfree(newmem);
 611		return -EFAULT;
 612	}
 613
 614	if (!memory_access_ok(d, newmem,
 615			      vhost_has_feature(d, VHOST_F_LOG_ALL))) {
 616		kfree(newmem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618	}
 619	oldmem = rcu_dereference_protected(d->memory,
 620					   lockdep_is_held(&d->mutex));
 621	rcu_assign_pointer(d->memory, newmem);
 622	synchronize_rcu();
 623	kfree(oldmem);
 
 
 624	return 0;
 625}
 626
 627long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 628{
 629	struct file *eventfp, *filep = NULL;
 630	bool pollstart = false, pollstop = false;
 631	struct eventfd_ctx *ctx = NULL;
 632	u32 __user *idxp = argp;
 633	struct vhost_virtqueue *vq;
 634	struct vhost_vring_state s;
 635	struct vhost_vring_file f;
 636	struct vhost_vring_addr a;
 637	u32 idx;
 638	long r;
 639
 640	r = get_user(idx, idxp);
 641	if (r < 0)
 642		return r;
 643	if (idx >= d->nvqs)
 644		return -ENOBUFS;
 645
 
 646	vq = d->vqs[idx];
 647
 
 
 
 
 
 648	mutex_lock(&vq->mutex);
 649
 650	switch (ioctl) {
 651	case VHOST_SET_VRING_NUM:
 652		/* Resizing ring with an active backend?
 653		 * You don't want to do that. */
 654		if (vq->private_data) {
 655			r = -EBUSY;
 656			break;
 657		}
 658		if (copy_from_user(&s, argp, sizeof s)) {
 659			r = -EFAULT;
 660			break;
 661		}
 662		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 663			r = -EINVAL;
 664			break;
 665		}
 666		vq->num = s.num;
 667		break;
 668	case VHOST_SET_VRING_BASE:
 669		/* Moving base with an active backend?
 670		 * You don't want to do that. */
 671		if (vq->private_data) {
 672			r = -EBUSY;
 673			break;
 674		}
 675		if (copy_from_user(&s, argp, sizeof s)) {
 676			r = -EFAULT;
 677			break;
 678		}
 679		if (s.num > 0xffff) {
 680			r = -EINVAL;
 681			break;
 682		}
 683		vq->last_avail_idx = s.num;
 684		/* Forget the cached index value. */
 685		vq->avail_idx = vq->last_avail_idx;
 686		break;
 687	case VHOST_GET_VRING_BASE:
 688		s.index = idx;
 689		s.num = vq->last_avail_idx;
 690		if (copy_to_user(argp, &s, sizeof s))
 691			r = -EFAULT;
 692		break;
 693	case VHOST_SET_VRING_ADDR:
 694		if (copy_from_user(&a, argp, sizeof a)) {
 695			r = -EFAULT;
 696			break;
 697		}
 698		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 699			r = -EOPNOTSUPP;
 700			break;
 701		}
 702		/* For 32bit, verify that the top 32bits of the user
 703		   data are set to zero. */
 704		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 705		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 706		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 707			r = -EFAULT;
 708			break;
 709		}
 710		if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
 711		    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
 712		    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
 713			r = -EINVAL;
 714			break;
 715		}
 716
 717		/* We only verify access here if backend is configured.
 718		 * If it is not, we don't as size might not have been setup.
 719		 * We will verify when backend is configured. */
 720		if (vq->private_data) {
 721			if (!vq_access_ok(d, vq->num,
 722				(void __user *)(unsigned long)a.desc_user_addr,
 723				(void __user *)(unsigned long)a.avail_user_addr,
 724				(void __user *)(unsigned long)a.used_user_addr)) {
 725				r = -EINVAL;
 726				break;
 727			}
 728
 729			/* Also validate log access for used ring if enabled. */
 730			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 731			    !log_access_ok(vq->log_base, a.log_guest_addr,
 732					   sizeof *vq->used +
 733					   vq->num * sizeof *vq->used->ring)) {
 734				r = -EINVAL;
 735				break;
 736			}
 737		}
 738
 739		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 740		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 741		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 742		vq->log_addr = a.log_guest_addr;
 743		vq->used = (void __user *)(unsigned long)a.used_user_addr;
 744		break;
 745	case VHOST_SET_VRING_KICK:
 746		if (copy_from_user(&f, argp, sizeof f)) {
 747			r = -EFAULT;
 748			break;
 749		}
 750		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 751		if (IS_ERR(eventfp)) {
 752			r = PTR_ERR(eventfp);
 753			break;
 754		}
 755		if (eventfp != vq->kick) {
 756			pollstop = (filep = vq->kick) != NULL;
 757			pollstart = (vq->kick = eventfp) != NULL;
 758		} else
 759			filep = eventfp;
 760		break;
 761	case VHOST_SET_VRING_CALL:
 762		if (copy_from_user(&f, argp, sizeof f)) {
 763			r = -EFAULT;
 764			break;
 765		}
 766		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 767		if (IS_ERR(eventfp)) {
 768			r = PTR_ERR(eventfp);
 769			break;
 770		}
 771		if (eventfp != vq->call) {
 772			filep = vq->call;
 773			ctx = vq->call_ctx;
 774			vq->call = eventfp;
 775			vq->call_ctx = eventfp ?
 776				eventfd_ctx_fileget(eventfp) : NULL;
 777		} else
 778			filep = eventfp;
 779		break;
 780	case VHOST_SET_VRING_ERR:
 781		if (copy_from_user(&f, argp, sizeof f)) {
 782			r = -EFAULT;
 783			break;
 784		}
 785		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 786		if (IS_ERR(eventfp)) {
 787			r = PTR_ERR(eventfp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 788			break;
 789		}
 790		if (eventfp != vq->error) {
 791			filep = vq->error;
 792			vq->error = eventfp;
 793			ctx = vq->error_ctx;
 794			vq->error_ctx = eventfp ?
 795				eventfd_ctx_fileget(eventfp) : NULL;
 796		} else
 797			filep = eventfp;
 798		break;
 799	default:
 800		r = -ENOIOCTLCMD;
 801	}
 802
 803	if (pollstop && vq->handle_kick)
 804		vhost_poll_stop(&vq->poll);
 805
 806	if (ctx)
 807		eventfd_ctx_put(ctx);
 808	if (filep)
 809		fput(filep);
 810
 811	if (pollstart && vq->handle_kick)
 812		r = vhost_poll_start(&vq->poll, vq->kick);
 813
 814	mutex_unlock(&vq->mutex);
 815
 816	if (pollstop && vq->handle_kick)
 817		vhost_poll_flush(&vq->poll);
 818	return r;
 819}
 820EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
 821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822/* Caller must have device mutex */
 823long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 824{
 825	struct file *eventfp, *filep = NULL;
 826	struct eventfd_ctx *ctx = NULL;
 827	u64 p;
 828	long r;
 829	int i, fd;
 830
 831	/* If you are not the owner, you can become one */
 832	if (ioctl == VHOST_SET_OWNER) {
 833		r = vhost_dev_set_owner(d);
 834		goto done;
 835	}
 836
 837	/* You must be the owner to do anything else */
 838	r = vhost_dev_check_owner(d);
 839	if (r)
 840		goto done;
 841
 842	switch (ioctl) {
 843	case VHOST_SET_MEM_TABLE:
 844		r = vhost_set_memory(d, argp);
 845		break;
 846	case VHOST_SET_LOG_BASE:
 847		if (copy_from_user(&p, argp, sizeof p)) {
 848			r = -EFAULT;
 849			break;
 850		}
 851		if ((u64)(unsigned long)p != p) {
 852			r = -EFAULT;
 853			break;
 854		}
 855		for (i = 0; i < d->nvqs; ++i) {
 856			struct vhost_virtqueue *vq;
 857			void __user *base = (void __user *)(unsigned long)p;
 858			vq = d->vqs[i];
 859			mutex_lock(&vq->mutex);
 860			/* If ring is inactive, will check when it's enabled. */
 861			if (vq->private_data && !vq_log_access_ok(d, vq, base))
 862				r = -EFAULT;
 863			else
 864				vq->log_base = base;
 865			mutex_unlock(&vq->mutex);
 866		}
 867		break;
 868	case VHOST_SET_LOG_FD:
 869		r = get_user(fd, (int __user *)argp);
 870		if (r < 0)
 871			break;
 872		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
 873		if (IS_ERR(eventfp)) {
 874			r = PTR_ERR(eventfp);
 875			break;
 876		}
 877		if (eventfp != d->log_file) {
 878			filep = d->log_file;
 879			ctx = d->log_ctx;
 880			d->log_ctx = eventfp ?
 881				eventfd_ctx_fileget(eventfp) : NULL;
 882		} else
 883			filep = eventfp;
 884		for (i = 0; i < d->nvqs; ++i) {
 885			mutex_lock(&d->vqs[i]->mutex);
 886			d->vqs[i]->log_ctx = d->log_ctx;
 887			mutex_unlock(&d->vqs[i]->mutex);
 888		}
 889		if (ctx)
 890			eventfd_ctx_put(ctx);
 891		if (filep)
 892			fput(filep);
 893		break;
 894	default:
 895		r = -ENOIOCTLCMD;
 896		break;
 897	}
 898done:
 899	return r;
 900}
 901EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 902
 903static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 904						     __u64 addr, __u32 len)
 905{
 906	struct vhost_memory_region *reg;
 907	int i;
 908
 909	/* linear search is not brilliant, but we really have on the order of 6
 910	 * regions in practice */
 911	for (i = 0; i < mem->nregions; ++i) {
 912		reg = mem->regions + i;
 913		if (reg->guest_phys_addr <= addr &&
 914		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
 915			return reg;
 916	}
 917	return NULL;
 918}
 919
 920/* TODO: This is really inefficient.  We need something like get_user()
 921 * (instruction directly accesses the data, with an exception table entry
 922 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 923 */
 924static int set_bit_to_user(int nr, void __user *addr)
 925{
 926	unsigned long log = (unsigned long)addr;
 927	struct page *page;
 928	void *base;
 929	int bit = nr + (log % PAGE_SIZE) * 8;
 930	int r;
 931
 932	r = get_user_pages_fast(log, 1, 1, &page);
 933	if (r < 0)
 934		return r;
 935	BUG_ON(r != 1);
 936	base = kmap_atomic(page);
 937	set_bit(bit, base);
 938	kunmap_atomic(base);
 939	set_page_dirty_lock(page);
 940	put_page(page);
 941	return 0;
 942}
 943
 944static int log_write(void __user *log_base,
 945		     u64 write_address, u64 write_length)
 946{
 947	u64 write_page = write_address / VHOST_PAGE_SIZE;
 948	int r;
 949
 950	if (!write_length)
 951		return 0;
 952	write_length += write_address % VHOST_PAGE_SIZE;
 953	for (;;) {
 954		u64 base = (u64)(unsigned long)log_base;
 955		u64 log = base + write_page / 8;
 956		int bit = write_page % 8;
 957		if ((u64)(unsigned long)log != log)
 958			return -EFAULT;
 959		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
 960		if (r < 0)
 961			return r;
 962		if (write_length <= VHOST_PAGE_SIZE)
 963			break;
 964		write_length -= VHOST_PAGE_SIZE;
 965		write_page += 1;
 966	}
 967	return r;
 968}
 969
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 971		    unsigned int log_num, u64 len)
 972{
 973	int i, r;
 974
 975	/* Make sure data written is seen before log. */
 976	smp_wmb();
 
 
 
 
 
 
 
 
 
 
 
 977	for (i = 0; i < log_num; ++i) {
 978		u64 l = min(log[i].len, len);
 979		r = log_write(vq->log_base, log[i].addr, l);
 980		if (r < 0)
 981			return r;
 982		len -= l;
 983		if (!len) {
 984			if (vq->log_ctx)
 985				eventfd_signal(vq->log_ctx, 1);
 986			return 0;
 987		}
 988	}
 989	/* Length written exceeds what we have stored. This is a bug. */
 990	BUG();
 991	return 0;
 992}
 993EXPORT_SYMBOL_GPL(vhost_log_write);
 994
 995static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 996{
 997	void __user *used;
 998	if (__put_user(vq->used_flags, &vq->used->flags) < 0)
 999		return -EFAULT;
1000	if (unlikely(vq->log_used)) {
1001		/* Make sure the flag is seen before log. */
1002		smp_wmb();
1003		/* Log used flag write. */
1004		used = &vq->used->flags;
1005		log_write(vq->log_base, vq->log_addr +
1006			  (used - (void __user *)vq->used),
1007			  sizeof vq->used->flags);
1008		if (vq->log_ctx)
1009			eventfd_signal(vq->log_ctx, 1);
1010	}
1011	return 0;
1012}
1013
1014static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1015{
1016	if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
1017		return -EFAULT;
1018	if (unlikely(vq->log_used)) {
1019		void __user *used;
1020		/* Make sure the event is seen before log. */
1021		smp_wmb();
1022		/* Log avail event write */
1023		used = vhost_avail_event(vq);
1024		log_write(vq->log_base, vq->log_addr +
1025			  (used - (void __user *)vq->used),
1026			  sizeof *vhost_avail_event(vq));
1027		if (vq->log_ctx)
1028			eventfd_signal(vq->log_ctx, 1);
1029	}
1030	return 0;
1031}
1032
1033int vhost_init_used(struct vhost_virtqueue *vq)
1034{
 
1035	int r;
 
 
1036	if (!vq->private_data)
1037		return 0;
1038
 
 
1039	r = vhost_update_used_flags(vq);
1040	if (r)
1041		return r;
1042	vq->signalled_used_valid = false;
1043	return get_user(vq->last_used_idx, &vq->used->idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044}
1045EXPORT_SYMBOL_GPL(vhost_init_used);
1046
1047static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1048			  struct iovec iov[], int iov_size)
1049{
1050	const struct vhost_memory_region *reg;
1051	struct vhost_memory *mem;
 
1052	struct iovec *_iov;
1053	u64 s = 0;
1054	int ret = 0;
1055
1056	rcu_read_lock();
1057
1058	mem = rcu_dereference(dev->memory);
1059	while ((u64)len > s) {
1060		u64 size;
1061		if (unlikely(ret >= iov_size)) {
1062			ret = -ENOBUFS;
1063			break;
1064		}
1065		reg = find_region(mem, addr, len);
1066		if (unlikely(!reg)) {
1067			ret = -EFAULT;
 
 
 
 
 
 
 
 
1068			break;
1069		}
 
1070		_iov = iov + ret;
1071		size = reg->memory_size - addr + reg->guest_phys_addr;
1072		_iov->iov_len = min((u64)len - s, size);
1073		_iov->iov_base = (void __user *)(unsigned long)
1074			(reg->userspace_addr + addr - reg->guest_phys_addr);
1075		s += size;
1076		addr += size;
1077		++ret;
1078	}
1079
1080	rcu_read_unlock();
 
1081	return ret;
1082}
1083
1084/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1085 * function returns the next descriptor in the chain,
1086 * or -1U if we're at the end. */
1087static unsigned next_desc(struct vring_desc *desc)
1088{
1089	unsigned int next;
1090
1091	/* If this descriptor says it doesn't chain, we're done. */
1092	if (!(desc->flags & VRING_DESC_F_NEXT))
1093		return -1U;
1094
1095	/* Check they're not leading us off end of descriptors. */
1096	next = desc->next;
1097	/* Make sure compiler knows to grab that: we don't want it changing! */
1098	/* We will use the result as an index in an array, so most
1099	 * architectures only need a compiler barrier here. */
1100	read_barrier_depends();
1101
1102	return next;
1103}
1104
1105static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1106			struct iovec iov[], unsigned int iov_size,
1107			unsigned int *out_num, unsigned int *in_num,
1108			struct vhost_log *log, unsigned int *log_num,
1109			struct vring_desc *indirect)
1110{
1111	struct vring_desc desc;
1112	unsigned int i = 0, count, found = 0;
1113	int ret;
 
 
1114
1115	/* Sanity check */
1116	if (unlikely(indirect->len % sizeof desc)) {
1117		vq_err(vq, "Invalid length in indirect descriptor: "
1118		       "len 0x%llx not multiple of 0x%zx\n",
1119		       (unsigned long long)indirect->len,
1120		       sizeof desc);
1121		return -EINVAL;
1122	}
1123
1124	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1125			     UIO_MAXIOV);
1126	if (unlikely(ret < 0)) {
1127		vq_err(vq, "Translation failure %d in indirect.\n", ret);
 
1128		return ret;
1129	}
1130
1131	/* We will use the result as an address to read from, so most
1132	 * architectures only need a compiler barrier here. */
1133	read_barrier_depends();
1134
1135	count = indirect->len / sizeof desc;
1136	/* Buffers are chained via a 16 bit next field, so
1137	 * we can have at most 2^16 of these. */
1138	if (unlikely(count > USHRT_MAX + 1)) {
1139		vq_err(vq, "Indirect buffer length too big: %d\n",
1140		       indirect->len);
1141		return -E2BIG;
1142	}
1143
1144	do {
1145		unsigned iov_count = *in_num + *out_num;
1146		if (unlikely(++found > count)) {
1147			vq_err(vq, "Loop detected: last one at %u "
1148			       "indirect size %u\n",
1149			       i, count);
1150			return -EINVAL;
1151		}
1152		if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1153					      vq->indirect, sizeof desc))) {
1154			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1155			       i, (size_t)indirect->addr + i * sizeof desc);
1156			return -EINVAL;
1157		}
1158		if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1159			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1160			       i, (size_t)indirect->addr + i * sizeof desc);
1161			return -EINVAL;
1162		}
1163
1164		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1165				     iov_size - iov_count);
 
 
 
 
 
 
1166		if (unlikely(ret < 0)) {
1167			vq_err(vq, "Translation failure %d indirect idx %d\n",
1168			       ret, i);
 
1169			return ret;
1170		}
1171		/* If this is an input descriptor, increment that count. */
1172		if (desc.flags & VRING_DESC_F_WRITE) {
1173			*in_num += ret;
1174			if (unlikely(log)) {
1175				log[*log_num].addr = desc.addr;
1176				log[*log_num].len = desc.len;
1177				++*log_num;
1178			}
1179		} else {
1180			/* If it's an output descriptor, they're all supposed
1181			 * to come before any input descriptors. */
1182			if (unlikely(*in_num)) {
1183				vq_err(vq, "Indirect descriptor "
1184				       "has out after in: idx %d\n", i);
1185				return -EINVAL;
1186			}
1187			*out_num += ret;
1188		}
1189	} while ((i = next_desc(&desc)) != -1);
1190	return 0;
1191}
1192
1193/* This looks in the virtqueue and for the first available buffer, and converts
1194 * it to an iovec for convenient access.  Since descriptors consist of some
1195 * number of output then some number of input descriptors, it's actually two
1196 * iovecs, but we pack them into one and note how many of each there were.
1197 *
1198 * This function returns the descriptor number found, or vq->num (which is
1199 * never a valid descriptor number) if none was found.  A negative code is
1200 * returned on error. */
1201int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1202		      struct iovec iov[], unsigned int iov_size,
1203		      unsigned int *out_num, unsigned int *in_num,
1204		      struct vhost_log *log, unsigned int *log_num)
1205{
1206	struct vring_desc desc;
1207	unsigned int i, head, found = 0;
1208	u16 last_avail_idx;
1209	int ret;
 
 
1210
1211	/* Check it isn't doing very strange things with descriptor numbers. */
1212	last_avail_idx = vq->last_avail_idx;
1213	if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1214		vq_err(vq, "Failed to access avail idx at %p\n",
1215		       &vq->avail->idx);
1216		return -EFAULT;
1217	}
1218
1219	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1220		vq_err(vq, "Guest moved used index from %u to %u",
1221		       last_avail_idx, vq->avail_idx);
1222		return -EFAULT;
1223	}
 
 
1224
1225	/* If there's nothing new since last we looked, return invalid. */
1226	if (vq->avail_idx == last_avail_idx)
1227		return vq->num;
 
 
1228
1229	/* Only get avail ring entries after they have been exposed by guest. */
1230	smp_rmb();
 
 
 
 
 
 
 
 
 
1231
1232	/* Grab the next descriptor number they're advertising, and increment
1233	 * the index we've seen. */
1234	if (unlikely(__get_user(head,
1235				&vq->avail->ring[last_avail_idx % vq->num]))) {
1236		vq_err(vq, "Failed to read head: idx %d address %p\n",
1237		       last_avail_idx,
1238		       &vq->avail->ring[last_avail_idx % vq->num]);
1239		return -EFAULT;
1240	}
1241
 
 
1242	/* If their number is silly, that's an error. */
1243	if (unlikely(head >= vq->num)) {
1244		vq_err(vq, "Guest says index %u > %u is available",
1245		       head, vq->num);
1246		return -EINVAL;
1247	}
1248
1249	/* When we start there are none of either input nor output. */
1250	*out_num = *in_num = 0;
1251	if (unlikely(log))
1252		*log_num = 0;
1253
1254	i = head;
1255	do {
1256		unsigned iov_count = *in_num + *out_num;
1257		if (unlikely(i >= vq->num)) {
1258			vq_err(vq, "Desc index is %u > %u, head = %u",
1259			       i, vq->num, head);
1260			return -EINVAL;
1261		}
1262		if (unlikely(++found > vq->num)) {
1263			vq_err(vq, "Loop detected: last one at %u "
1264			       "vq size %u head %u\n",
1265			       i, vq->num, head);
1266			return -EINVAL;
1267		}
1268		ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1269		if (unlikely(ret)) {
1270			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1271			       i, vq->desc + i);
1272			return -EFAULT;
1273		}
1274		if (desc.flags & VRING_DESC_F_INDIRECT) {
1275			ret = get_indirect(dev, vq, iov, iov_size,
1276					   out_num, in_num,
1277					   log, log_num, &desc);
1278			if (unlikely(ret < 0)) {
1279				vq_err(vq, "Failure detected "
1280				       "in indirect descriptor at idx %d\n", i);
 
1281				return ret;
1282			}
1283			continue;
1284		}
1285
1286		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1287				     iov_size - iov_count);
 
 
 
 
 
1288		if (unlikely(ret < 0)) {
1289			vq_err(vq, "Translation failure %d descriptor idx %d\n",
1290			       ret, i);
 
1291			return ret;
1292		}
1293		if (desc.flags & VRING_DESC_F_WRITE) {
1294			/* If this is an input descriptor,
1295			 * increment that count. */
1296			*in_num += ret;
1297			if (unlikely(log)) {
1298				log[*log_num].addr = desc.addr;
1299				log[*log_num].len = desc.len;
1300				++*log_num;
1301			}
1302		} else {
1303			/* If it's an output descriptor, they're all supposed
1304			 * to come before any input descriptors. */
1305			if (unlikely(*in_num)) {
1306				vq_err(vq, "Descriptor has out after in: "
1307				       "idx %d\n", i);
1308				return -EINVAL;
1309			}
1310			*out_num += ret;
1311		}
1312	} while ((i = next_desc(&desc)) != -1);
1313
1314	/* On success, increment avail index. */
1315	vq->last_avail_idx++;
1316
1317	/* Assume notifications from guest are disabled at this point,
1318	 * if they aren't we would need to update avail_event index. */
1319	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1320	return head;
1321}
1322EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
1323
1324/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1325void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1326{
1327	vq->last_avail_idx -= n;
1328}
1329EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1330
1331/* After we've used one of their buffers, we tell them about it.  We'll then
1332 * want to notify the guest, using eventfd. */
1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1334{
1335	struct vring_used_elem heads = { head, len };
 
 
 
1336
1337	return vhost_add_used_n(vq, &heads, 1);
1338}
1339EXPORT_SYMBOL_GPL(vhost_add_used);
1340
1341static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1342			    struct vring_used_elem *heads,
1343			    unsigned count)
1344{
1345	struct vring_used_elem __user *used;
1346	u16 old, new;
1347	int start;
1348
1349	start = vq->last_used_idx % vq->num;
1350	used = vq->used->ring + start;
1351	if (count == 1) {
1352		if (__put_user(heads[0].id, &used->id)) {
1353			vq_err(vq, "Failed to write used id");
1354			return -EFAULT;
1355		}
1356		if (__put_user(heads[0].len, &used->len)) {
1357			vq_err(vq, "Failed to write used len");
1358			return -EFAULT;
1359		}
1360	} else if (__copy_to_user(used, heads, count * sizeof *used)) {
1361		vq_err(vq, "Failed to write used");
1362		return -EFAULT;
1363	}
1364	if (unlikely(vq->log_used)) {
1365		/* Make sure data is seen before log. */
1366		smp_wmb();
1367		/* Log used ring entry write. */
1368		log_write(vq->log_base,
1369			  vq->log_addr +
1370			   ((void __user *)used - (void __user *)vq->used),
1371			  count * sizeof *used);
1372	}
1373	old = vq->last_used_idx;
1374	new = (vq->last_used_idx += count);
1375	/* If the driver never bothers to signal in a very long while,
1376	 * used index might wrap around. If that happens, invalidate
1377	 * signalled_used index we stored. TODO: make sure driver
1378	 * signals at least once in 2^16 and remove this. */
1379	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1380		vq->signalled_used_valid = false;
1381	return 0;
1382}
1383
1384/* After we've used one of their buffers, we tell them about it.  We'll then
1385 * want to notify the guest, using eventfd. */
1386int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1387		     unsigned count)
1388{
1389	int start, n, r;
1390
1391	start = vq->last_used_idx % vq->num;
1392	n = vq->num - start;
1393	if (n < count) {
1394		r = __vhost_add_used_n(vq, heads, n);
1395		if (r < 0)
1396			return r;
1397		heads += n;
1398		count -= n;
1399	}
1400	r = __vhost_add_used_n(vq, heads, count);
1401
1402	/* Make sure buffer is written before we update index. */
1403	smp_wmb();
1404	if (put_user(vq->last_used_idx, &vq->used->idx)) {
1405		vq_err(vq, "Failed to increment used idx");
1406		return -EFAULT;
1407	}
1408	if (unlikely(vq->log_used)) {
 
 
1409		/* Log used index update. */
1410		log_write(vq->log_base,
1411			  vq->log_addr + offsetof(struct vring_used, idx),
1412			  sizeof vq->used->idx);
1413		if (vq->log_ctx)
1414			eventfd_signal(vq->log_ctx, 1);
1415	}
1416	return r;
1417}
1418EXPORT_SYMBOL_GPL(vhost_add_used_n);
1419
1420static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1421{
1422	__u16 old, new, event;
 
1423	bool v;
1424	/* Flush out used index updates. This is paired
1425	 * with the barrier that the Guest executes when enabling
1426	 * interrupts. */
1427	smp_mb();
1428
1429	if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1430	    unlikely(vq->avail_idx == vq->last_avail_idx))
1431		return true;
1432
1433	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1434		__u16 flags;
1435		if (__get_user(flags, &vq->avail->flags)) {
1436			vq_err(vq, "Failed to get flags");
1437			return true;
1438		}
1439		return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1440	}
1441	old = vq->signalled_used;
1442	v = vq->signalled_used_valid;
1443	new = vq->signalled_used = vq->last_used_idx;
1444	vq->signalled_used_valid = true;
1445
1446	if (unlikely(!v))
1447		return true;
1448
1449	if (get_user(event, vhost_used_event(vq))) {
1450		vq_err(vq, "Failed to get used event idx");
1451		return true;
1452	}
1453	return vring_need_event(event, new, old);
1454}
1455
1456/* This actually signals the guest, using eventfd. */
1457void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1458{
1459	/* Signal the Guest tell them we used something up. */
1460	if (vq->call_ctx && vhost_notify(dev, vq))
1461		eventfd_signal(vq->call_ctx, 1);
1462}
1463EXPORT_SYMBOL_GPL(vhost_signal);
1464
1465/* And here's the combo meal deal.  Supersize me! */
1466void vhost_add_used_and_signal(struct vhost_dev *dev,
1467			       struct vhost_virtqueue *vq,
1468			       unsigned int head, int len)
1469{
1470	vhost_add_used(vq, head, len);
1471	vhost_signal(dev, vq);
1472}
1473EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
1474
1475/* multi-buffer version of vhost_add_used_and_signal */
1476void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1477				 struct vhost_virtqueue *vq,
1478				 struct vring_used_elem *heads, unsigned count)
1479{
1480	vhost_add_used_n(vq, heads, count);
1481	vhost_signal(dev, vq);
1482}
1483EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
1484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1485/* OK, now we need to know about added descriptors. */
1486bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1487{
1488	u16 avail_idx;
1489	int r;
1490
1491	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1492		return false;
1493	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1494	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1495		r = vhost_update_used_flags(vq);
1496		if (r) {
1497			vq_err(vq, "Failed to enable notification at %p: %d\n",
1498			       &vq->used->flags, r);
1499			return false;
1500		}
1501	} else {
1502		r = vhost_update_avail_event(vq, vq->avail_idx);
1503		if (r) {
1504			vq_err(vq, "Failed to update avail event index at %p: %d\n",
1505			       vhost_avail_event(vq), r);
1506			return false;
1507		}
1508	}
1509	/* They could have slipped one in as we were doing that: make
1510	 * sure it's written, then check again. */
1511	smp_mb();
1512	r = __get_user(avail_idx, &vq->avail->idx);
1513	if (r) {
1514		vq_err(vq, "Failed to check avail idx at %p: %d\n",
1515		       &vq->avail->idx, r);
1516		return false;
1517	}
1518
1519	return avail_idx != vq->avail_idx;
1520}
1521EXPORT_SYMBOL_GPL(vhost_enable_notify);
1522
1523/* We don't need to be notified again. */
1524void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1525{
1526	int r;
1527
1528	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1529		return;
1530	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1531	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1532		r = vhost_update_used_flags(vq);
1533		if (r)
1534			vq_err(vq, "Failed to enable notification at %p: %d\n",
1535			       &vq->used->flags, r);
1536	}
1537}
1538EXPORT_SYMBOL_GPL(vhost_disable_notify);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1539
1540static int __init vhost_init(void)
1541{
1542	return 0;
1543}
1544
1545static void __exit vhost_exit(void)
1546{
1547}
1548
1549module_init(vhost_init);
1550module_exit(vhost_exit);
1551
1552MODULE_VERSION("0.0.1");
1553MODULE_LICENSE("GPL v2");
1554MODULE_AUTHOR("Michael S. Tsirkin");
1555MODULE_DESCRIPTION("Host kernel accelerator for virtio");