Linux Audio

Check our new training course

Loading...
v3.1
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/virtio_net.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/rcupdate.h>
  22#include <linux/poll.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/slab.h>
 
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28
  29#include <linux/net.h>
  30#include <linux/if_packet.h>
  31#include <linux/if_arp.h>
  32
  33#include "vhost.h"
  34
 
 
 
 
 
 
 
 
 
  35enum {
  36	VHOST_MEMORY_MAX_NREGIONS = 64,
  37	VHOST_MEMORY_F_LOG = 0x1,
  38};
  39
  40static unsigned vhost_zcopy_mask __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41
  42#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
  43#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
 
  44
  45static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  46			    poll_table *pt)
  47{
  48	struct vhost_poll *poll;
  49
  50	poll = container_of(pt, struct vhost_poll, table);
  51	poll->wqh = wqh;
  52	add_wait_queue(wqh, &poll->wait);
  53}
  54
  55static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  56			     void *key)
  57{
  58	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
  59
  60	if (!((unsigned long)key & poll->mask))
  61		return 0;
  62
  63	vhost_poll_queue(poll);
  64	return 0;
  65}
  66
  67static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
  68{
  69	INIT_LIST_HEAD(&work->node);
  70	work->fn = fn;
  71	init_waitqueue_head(&work->done);
  72	work->flushing = 0;
  73	work->queue_seq = work->done_seq = 0;
  74}
 
  75
  76/* Init poll structure */
  77void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  78		     unsigned long mask, struct vhost_dev *dev)
  79{
  80	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  81	init_poll_funcptr(&poll->table, vhost_poll_func);
  82	poll->mask = mask;
  83	poll->dev = dev;
 
  84
  85	vhost_work_init(&poll->work, fn);
  86}
 
  87
  88/* Start polling a file. We add ourselves to file's wait queue. The caller must
  89 * keep a reference to a file until after vhost_poll_stop is called. */
  90void vhost_poll_start(struct vhost_poll *poll, struct file *file)
  91{
  92	unsigned long mask;
 
 
 
 
  93
  94	mask = file->f_op->poll(file, &poll->table);
  95	if (mask)
  96		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 
 
 
 
 
 
 
  97}
 
  98
  99/* Stop polling a file. After this function returns, it becomes safe to drop the
 100 * file reference. You must also flush afterwards. */
 101void vhost_poll_stop(struct vhost_poll *poll)
 102{
 103	remove_wait_queue(poll->wqh, &poll->wait);
 
 
 
 104}
 
 105
 106static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 107				unsigned seq)
 108{
 109	int left;
 110
 111	spin_lock_irq(&dev->work_lock);
 112	left = seq - work->done_seq;
 113	spin_unlock_irq(&dev->work_lock);
 114	return left <= 0;
 115}
 116
 117static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 118{
 119	unsigned seq;
 120	int flushing;
 121
 122	spin_lock_irq(&dev->work_lock);
 123	seq = work->queue_seq;
 124	work->flushing++;
 125	spin_unlock_irq(&dev->work_lock);
 126	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 127	spin_lock_irq(&dev->work_lock);
 128	flushing = --work->flushing;
 129	spin_unlock_irq(&dev->work_lock);
 130	BUG_ON(flushing < 0);
 131}
 
 132
 133/* Flush any work that has been scheduled. When calling this, don't hold any
 134 * locks that are also used by the callback. */
 135void vhost_poll_flush(struct vhost_poll *poll)
 136{
 137	vhost_work_flush(poll->dev, &poll->work);
 138}
 
 139
 140static inline void vhost_work_queue(struct vhost_dev *dev,
 141				    struct vhost_work *work)
 142{
 143	unsigned long flags;
 
 144
 145	spin_lock_irqsave(&dev->work_lock, flags);
 146	if (list_empty(&work->node)) {
 147		list_add_tail(&work->node, &dev->work_list);
 148		work->queue_seq++;
 
 
 149		wake_up_process(dev->worker);
 150	}
 151	spin_unlock_irqrestore(&dev->work_lock, flags);
 152}
 
 
 
 
 
 
 
 
 153
 154void vhost_poll_queue(struct vhost_poll *poll)
 155{
 156	vhost_work_queue(poll->dev, &poll->work);
 157}
 
 158
 159static void vhost_vq_reset(struct vhost_dev *dev,
 160			   struct vhost_virtqueue *vq)
 161{
 162	vq->num = 1;
 163	vq->desc = NULL;
 164	vq->avail = NULL;
 165	vq->used = NULL;
 166	vq->last_avail_idx = 0;
 
 167	vq->avail_idx = 0;
 168	vq->last_used_idx = 0;
 169	vq->signalled_used = 0;
 170	vq->signalled_used_valid = false;
 171	vq->used_flags = 0;
 172	vq->log_used = false;
 173	vq->log_addr = -1ull;
 174	vq->vhost_hlen = 0;
 175	vq->sock_hlen = 0;
 176	vq->private_data = NULL;
 
 177	vq->log_base = NULL;
 178	vq->error_ctx = NULL;
 179	vq->error = NULL;
 180	vq->kick = NULL;
 181	vq->call_ctx = NULL;
 182	vq->call = NULL;
 183	vq->log_ctx = NULL;
 184	vq->upend_idx = 0;
 185	vq->done_idx = 0;
 186	vq->ubufs = NULL;
 
 
 187}
 188
 189static int vhost_worker(void *data)
 190{
 191	struct vhost_dev *dev = data;
 192	struct vhost_work *work = NULL;
 193	unsigned uninitialized_var(seq);
 
 194
 
 195	use_mm(dev->mm);
 196
 197	for (;;) {
 198		/* mb paired w/ kthread_stop */
 199		set_current_state(TASK_INTERRUPTIBLE);
 200
 201		spin_lock_irq(&dev->work_lock);
 202		if (work) {
 203			work->done_seq = seq;
 204			if (work->flushing)
 205				wake_up_all(&work->done);
 206		}
 207
 208		if (kthread_should_stop()) {
 209			spin_unlock_irq(&dev->work_lock);
 210			__set_current_state(TASK_RUNNING);
 211			break;
 212		}
 213		if (!list_empty(&dev->work_list)) {
 214			work = list_first_entry(&dev->work_list,
 215						struct vhost_work, node);
 216			list_del_init(&work->node);
 217			seq = work->queue_seq;
 218		} else
 219			work = NULL;
 220		spin_unlock_irq(&dev->work_lock);
 221
 222		if (work) {
 223			__set_current_state(TASK_RUNNING);
 224			work->fn(work);
 225		} else
 226			schedule();
 227
 
 
 
 
 
 
 
 
 
 
 228	}
 229	unuse_mm(dev->mm);
 
 230	return 0;
 231}
 232
 233static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 234{
 235	kfree(vq->indirect);
 236	vq->indirect = NULL;
 237	kfree(vq->log);
 238	vq->log = NULL;
 239	kfree(vq->heads);
 240	vq->heads = NULL;
 241	kfree(vq->ubuf_info);
 242	vq->ubuf_info = NULL;
 243}
 244
 245void vhost_enable_zcopy(int vq)
 246{
 247	vhost_zcopy_mask |= 0x1 << vq;
 248}
 249
 250/* Helper to allocate iovec buffers for all vqs. */
 251static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 252{
 
 253	int i;
 254	bool zcopy;
 255
 256	for (i = 0; i < dev->nvqs; ++i) {
 257		dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
 258					       UIO_MAXIOV, GFP_KERNEL);
 259		dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
 260					  GFP_KERNEL);
 261		dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
 262					    UIO_MAXIOV, GFP_KERNEL);
 263		zcopy = vhost_zcopy_mask & (0x1 << i);
 264		if (zcopy)
 265			dev->vqs[i].ubuf_info =
 266				kmalloc(sizeof *dev->vqs[i].ubuf_info *
 267					UIO_MAXIOV, GFP_KERNEL);
 268		if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
 269			!dev->vqs[i].heads ||
 270			(zcopy && !dev->vqs[i].ubuf_info))
 271			goto err_nomem;
 272	}
 273	return 0;
 274
 275err_nomem:
 276	for (; i >= 0; --i)
 277		vhost_vq_free_iovecs(&dev->vqs[i]);
 278	return -ENOMEM;
 279}
 280
 281static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 282{
 283	int i;
 284
 285	for (i = 0; i < dev->nvqs; ++i)
 286		vhost_vq_free_iovecs(&dev->vqs[i]);
 287}
 288
 289long vhost_dev_init(struct vhost_dev *dev,
 290		    struct vhost_virtqueue *vqs, int nvqs)
 291{
 
 292	int i;
 293
 294	dev->vqs = vqs;
 295	dev->nvqs = nvqs;
 296	mutex_init(&dev->mutex);
 297	dev->log_ctx = NULL;
 298	dev->log_file = NULL;
 299	dev->memory = NULL;
 
 300	dev->mm = NULL;
 301	spin_lock_init(&dev->work_lock);
 302	INIT_LIST_HEAD(&dev->work_list);
 303	dev->worker = NULL;
 
 
 
 
 
 
 304
 305	for (i = 0; i < dev->nvqs; ++i) {
 306		dev->vqs[i].log = NULL;
 307		dev->vqs[i].indirect = NULL;
 308		dev->vqs[i].heads = NULL;
 309		dev->vqs[i].ubuf_info = NULL;
 310		dev->vqs[i].dev = dev;
 311		mutex_init(&dev->vqs[i].mutex);
 312		vhost_vq_reset(dev, dev->vqs + i);
 313		if (dev->vqs[i].handle_kick)
 314			vhost_poll_init(&dev->vqs[i].poll,
 315					dev->vqs[i].handle_kick, POLLIN, dev);
 316	}
 317
 318	return 0;
 319}
 
 320
 321/* Caller should have device mutex */
 322long vhost_dev_check_owner(struct vhost_dev *dev)
 323{
 324	/* Are you the owner? If not, I don't think you mean to do that */
 325	return dev->mm == current->mm ? 0 : -EPERM;
 326}
 
 327
 328struct vhost_attach_cgroups_struct {
 329	struct vhost_work work;
 330	struct task_struct *owner;
 331	int ret;
 332};
 333
 334static void vhost_attach_cgroups_work(struct vhost_work *work)
 335{
 336	struct vhost_attach_cgroups_struct *s;
 337
 338	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 339	s->ret = cgroup_attach_task_all(s->owner, current);
 340}
 341
 342static int vhost_attach_cgroups(struct vhost_dev *dev)
 343{
 344	struct vhost_attach_cgroups_struct attach;
 345
 346	attach.owner = current;
 347	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 348	vhost_work_queue(dev, &attach.work);
 349	vhost_work_flush(dev, &attach.work);
 350	return attach.ret;
 351}
 352
 353/* Caller should have device mutex */
 354static long vhost_dev_set_owner(struct vhost_dev *dev)
 
 
 
 
 
 
 
 355{
 356	struct task_struct *worker;
 357	int err;
 358
 359	/* Is there an owner already? */
 360	if (dev->mm) {
 361		err = -EBUSY;
 362		goto err_mm;
 363	}
 364
 365	/* No owner, become one */
 366	dev->mm = get_task_mm(current);
 367	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 368	if (IS_ERR(worker)) {
 369		err = PTR_ERR(worker);
 370		goto err_worker;
 371	}
 372
 373	dev->worker = worker;
 374	wake_up_process(worker);	/* avoid contributing to loadavg */
 375
 376	err = vhost_attach_cgroups(dev);
 377	if (err)
 378		goto err_cgroup;
 379
 380	err = vhost_dev_alloc_iovecs(dev);
 381	if (err)
 382		goto err_cgroup;
 383
 384	return 0;
 385err_cgroup:
 386	kthread_stop(worker);
 387	dev->worker = NULL;
 388err_worker:
 389	if (dev->mm)
 390		mmput(dev->mm);
 391	dev->mm = NULL;
 392err_mm:
 393	return err;
 394}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395
 396/* Caller should have device mutex */
 397long vhost_dev_reset_owner(struct vhost_dev *dev)
 398{
 399	struct vhost_memory *memory;
 
 
 400
 401	/* Restore memory to default empty mapping. */
 402	memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 403	if (!memory)
 404		return -ENOMEM;
 
 
 
 
 
 
 405
 406	vhost_dev_cleanup(dev);
 
 
 407
 408	memory->nregions = 0;
 409	RCU_INIT_POINTER(dev->memory, memory);
 410	return 0;
 
 
 
 411}
 
 412
 413/* In case of DMA done not in order in lower device driver for some reason.
 414 * upend_idx is used to track end of used idx, done_idx is used to track head
 415 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 416 * guest used idx.
 417 */
 418int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
 419{
 420	int i;
 421	int j = 0;
 
 
 
 422
 423	for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
 424		if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
 425			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
 426			vhost_add_used_and_signal(vq->dev, vq,
 427						  vq->heads[i].id, 0);
 428			++j;
 429		} else
 430			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431	}
 432	if (j)
 433		vq->done_idx = i;
 434	return j;
 
 
 
 
 435}
 436
 437/* Caller should have device mutex */
 438void vhost_dev_cleanup(struct vhost_dev *dev)
 439{
 440	int i;
 441
 442	for (i = 0; i < dev->nvqs; ++i) {
 443		if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
 444			vhost_poll_stop(&dev->vqs[i].poll);
 445			vhost_poll_flush(&dev->vqs[i].poll);
 446		}
 447		/* Wait for all lower device DMAs done. */
 448		if (dev->vqs[i].ubufs)
 449			vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
 450
 451		/* Signal guest as appropriate. */
 452		vhost_zerocopy_signal_used(&dev->vqs[i]);
 453
 454		if (dev->vqs[i].error_ctx)
 455			eventfd_ctx_put(dev->vqs[i].error_ctx);
 456		if (dev->vqs[i].error)
 457			fput(dev->vqs[i].error);
 458		if (dev->vqs[i].kick)
 459			fput(dev->vqs[i].kick);
 460		if (dev->vqs[i].call_ctx)
 461			eventfd_ctx_put(dev->vqs[i].call_ctx);
 462		if (dev->vqs[i].call)
 463			fput(dev->vqs[i].call);
 464		vhost_vq_reset(dev, dev->vqs + i);
 465	}
 466	vhost_dev_free_iovecs(dev);
 467	if (dev->log_ctx)
 468		eventfd_ctx_put(dev->log_ctx);
 469	dev->log_ctx = NULL;
 470	if (dev->log_file)
 471		fput(dev->log_file);
 472	dev->log_file = NULL;
 473	/* No one will access memory at this point */
 474	kfree(rcu_dereference_protected(dev->memory,
 475					lockdep_is_held(&dev->mutex)));
 476	RCU_INIT_POINTER(dev->memory, NULL);
 477	WARN_ON(!list_empty(&dev->work_list));
 
 
 
 478	if (dev->worker) {
 479		kthread_stop(dev->worker);
 480		dev->worker = NULL;
 481	}
 482	if (dev->mm)
 483		mmput(dev->mm);
 484	dev->mm = NULL;
 485}
 
 486
 487static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 488{
 489	u64 a = addr / VHOST_PAGE_SIZE / 8;
 490
 491	/* Make sure 64 bit math will not overflow. */
 492	if (a > ULONG_MAX - (unsigned long)log_base ||
 493	    a + (unsigned long)log_base > ULONG_MAX)
 494		return 0;
 495
 496	return access_ok(VERIFY_WRITE, log_base + a,
 497			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 498}
 499
 
 
 
 
 
 
 500/* Caller should have vq mutex and device mutex. */
 501static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 502			       int log_all)
 503{
 504	int i;
 505
 506	if (!mem)
 507		return 0;
 508
 509	for (i = 0; i < mem->nregions; ++i) {
 510		struct vhost_memory_region *m = mem->regions + i;
 511		unsigned long a = m->userspace_addr;
 512		if (m->memory_size > ULONG_MAX)
 513			return 0;
 514		else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 515				    m->memory_size))
 
 
 516			return 0;
 517		else if (log_all && !log_access_ok(log_base,
 518						   m->guest_phys_addr,
 519						   m->memory_size))
 520			return 0;
 521	}
 522	return 1;
 523}
 524
 525/* Can we switch to this memory table? */
 526/* Caller should have device mutex but not vq mutex */
 527static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 528			    int log_all)
 529{
 530	int i;
 531
 532	for (i = 0; i < d->nvqs; ++i) {
 533		int ok;
 534		mutex_lock(&d->vqs[i].mutex);
 
 
 
 535		/* If ring is inactive, will check when it's enabled. */
 536		if (d->vqs[i].private_data)
 537			ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
 538						 log_all);
 539		else
 540			ok = 1;
 541		mutex_unlock(&d->vqs[i].mutex);
 542		if (!ok)
 543			return 0;
 544	}
 545	return 1;
 546}
 547
 548static int vq_access_ok(struct vhost_dev *d, unsigned int num,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549			struct vring_desc __user *desc,
 550			struct vring_avail __user *avail,
 551			struct vring_used __user *used)
 
 552{
 553	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 
 554	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 555	       access_ok(VERIFY_READ, avail,
 556			 sizeof *avail + num * sizeof *avail->ring + s) &&
 557	       access_ok(VERIFY_WRITE, used,
 558			sizeof *used + num * sizeof *used->ring + s);
 559}
 560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 561/* Can we log writes? */
 562/* Caller should have device mutex but not vq mutex */
 563int vhost_log_access_ok(struct vhost_dev *dev)
 564{
 565	struct vhost_memory *mp;
 566
 567	mp = rcu_dereference_protected(dev->memory,
 568				       lockdep_is_held(&dev->mutex));
 569	return memory_access_ok(dev, mp, 1);
 570}
 
 571
 572/* Verify access for write logging. */
 573/* Caller should have vq mutex and device mutex */
 574static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
 575			    void __user *log_base)
 576{
 577	struct vhost_memory *mp;
 578	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 579
 580	mp = rcu_dereference_protected(vq->dev->memory,
 581				       lockdep_is_held(&vq->mutex));
 582	return vq_memory_access_ok(log_base, mp,
 583			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
 584		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
 585					sizeof *vq->used +
 586					vq->num * sizeof *vq->used->ring + s));
 587}
 588
 589/* Can we start vq? */
 590/* Caller should have vq mutex and device mutex */
 591int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 592{
 593	return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
 594		vq_log_access_ok(vq->dev, vq, vq->log_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 595}
 596
 597static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 598{
 599	struct vhost_memory mem, *newmem, *oldmem;
 
 
 600	unsigned long size = offsetof(struct vhost_memory, regions);
 
 601
 602	if (copy_from_user(&mem, m, size))
 603		return -EFAULT;
 604	if (mem.padding)
 605		return -EOPNOTSUPP;
 606	if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
 607		return -E2BIG;
 608	newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
 609	if (!newmem)
 610		return -ENOMEM;
 611
 612	memcpy(newmem, &mem, size);
 613	if (copy_from_user(newmem->regions, m->regions,
 614			   mem.nregions * sizeof *m->regions)) {
 615		kfree(newmem);
 616		return -EFAULT;
 617	}
 618
 619	if (!memory_access_ok(d, newmem,
 620			      vhost_has_feature(d, VHOST_F_LOG_ALL))) {
 621		kfree(newmem);
 622		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 623	}
 624	oldmem = rcu_dereference_protected(d->memory,
 625					   lockdep_is_held(&d->mutex));
 626	rcu_assign_pointer(d->memory, newmem);
 627	synchronize_rcu();
 628	kfree(oldmem);
 629	return 0;
 
 
 
 
 
 630}
 631
 632static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
 633{
 634	struct file *eventfp, *filep = NULL,
 635		    *pollstart = NULL, *pollstop = NULL;
 636	struct eventfd_ctx *ctx = NULL;
 637	u32 __user *idxp = argp;
 638	struct vhost_virtqueue *vq;
 639	struct vhost_vring_state s;
 640	struct vhost_vring_file f;
 641	struct vhost_vring_addr a;
 642	u32 idx;
 643	long r;
 644
 645	r = get_user(idx, idxp);
 646	if (r < 0)
 647		return r;
 648	if (idx >= d->nvqs)
 649		return -ENOBUFS;
 650
 651	vq = d->vqs + idx;
 652
 653	mutex_lock(&vq->mutex);
 654
 655	switch (ioctl) {
 656	case VHOST_SET_VRING_NUM:
 657		/* Resizing ring with an active backend?
 658		 * You don't want to do that. */
 659		if (vq->private_data) {
 660			r = -EBUSY;
 661			break;
 662		}
 663		if (copy_from_user(&s, argp, sizeof s)) {
 664			r = -EFAULT;
 665			break;
 666		}
 667		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 668			r = -EINVAL;
 669			break;
 670		}
 671		vq->num = s.num;
 672		break;
 673	case VHOST_SET_VRING_BASE:
 674		/* Moving base with an active backend?
 675		 * You don't want to do that. */
 676		if (vq->private_data) {
 677			r = -EBUSY;
 678			break;
 679		}
 680		if (copy_from_user(&s, argp, sizeof s)) {
 681			r = -EFAULT;
 682			break;
 683		}
 684		if (s.num > 0xffff) {
 685			r = -EINVAL;
 686			break;
 687		}
 688		vq->last_avail_idx = s.num;
 689		/* Forget the cached index value. */
 690		vq->avail_idx = vq->last_avail_idx;
 691		break;
 692	case VHOST_GET_VRING_BASE:
 693		s.index = idx;
 694		s.num = vq->last_avail_idx;
 695		if (copy_to_user(argp, &s, sizeof s))
 696			r = -EFAULT;
 697		break;
 698	case VHOST_SET_VRING_ADDR:
 699		if (copy_from_user(&a, argp, sizeof a)) {
 700			r = -EFAULT;
 701			break;
 702		}
 703		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 704			r = -EOPNOTSUPP;
 705			break;
 706		}
 707		/* For 32bit, verify that the top 32bits of the user
 708		   data are set to zero. */
 709		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 710		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 711		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 712			r = -EFAULT;
 713			break;
 714		}
 715		if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
 716		    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
 717		    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
 
 
 
 
 718			r = -EINVAL;
 719			break;
 720		}
 721
 722		/* We only verify access here if backend is configured.
 723		 * If it is not, we don't as size might not have been setup.
 724		 * We will verify when backend is configured. */
 725		if (vq->private_data) {
 726			if (!vq_access_ok(d, vq->num,
 727				(void __user *)(unsigned long)a.desc_user_addr,
 728				(void __user *)(unsigned long)a.avail_user_addr,
 729				(void __user *)(unsigned long)a.used_user_addr)) {
 730				r = -EINVAL;
 731				break;
 732			}
 733
 734			/* Also validate log access for used ring if enabled. */
 735			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 736			    !log_access_ok(vq->log_base, a.log_guest_addr,
 737					   sizeof *vq->used +
 738					   vq->num * sizeof *vq->used->ring)) {
 739				r = -EINVAL;
 740				break;
 741			}
 742		}
 743
 744		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 745		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 746		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 747		vq->log_addr = a.log_guest_addr;
 748		vq->used = (void __user *)(unsigned long)a.used_user_addr;
 749		break;
 750	case VHOST_SET_VRING_KICK:
 751		if (copy_from_user(&f, argp, sizeof f)) {
 752			r = -EFAULT;
 753			break;
 754		}
 755		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 756		if (IS_ERR(eventfp)) {
 757			r = PTR_ERR(eventfp);
 758			break;
 759		}
 760		if (eventfp != vq->kick) {
 761			pollstop = filep = vq->kick;
 762			pollstart = vq->kick = eventfp;
 763		} else
 764			filep = eventfp;
 765		break;
 766	case VHOST_SET_VRING_CALL:
 767		if (copy_from_user(&f, argp, sizeof f)) {
 768			r = -EFAULT;
 769			break;
 770		}
 771		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 772		if (IS_ERR(eventfp)) {
 773			r = PTR_ERR(eventfp);
 774			break;
 775		}
 776		if (eventfp != vq->call) {
 777			filep = vq->call;
 778			ctx = vq->call_ctx;
 779			vq->call = eventfp;
 780			vq->call_ctx = eventfp ?
 781				eventfd_ctx_fileget(eventfp) : NULL;
 782		} else
 783			filep = eventfp;
 784		break;
 785	case VHOST_SET_VRING_ERR:
 786		if (copy_from_user(&f, argp, sizeof f)) {
 787			r = -EFAULT;
 788			break;
 789		}
 790		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 791		if (IS_ERR(eventfp)) {
 792			r = PTR_ERR(eventfp);
 793			break;
 794		}
 795		if (eventfp != vq->error) {
 796			filep = vq->error;
 797			vq->error = eventfp;
 798			ctx = vq->error_ctx;
 799			vq->error_ctx = eventfp ?
 800				eventfd_ctx_fileget(eventfp) : NULL;
 801		} else
 802			filep = eventfp;
 803		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804	default:
 805		r = -ENOIOCTLCMD;
 806	}
 807
 808	if (pollstop && vq->handle_kick)
 809		vhost_poll_stop(&vq->poll);
 810
 811	if (ctx)
 812		eventfd_ctx_put(ctx);
 813	if (filep)
 814		fput(filep);
 815
 816	if (pollstart && vq->handle_kick)
 817		vhost_poll_start(&vq->poll, vq->kick);
 818
 819	mutex_unlock(&vq->mutex);
 820
 821	if (pollstop && vq->handle_kick)
 822		vhost_poll_flush(&vq->poll);
 823	return r;
 824}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825
 826/* Caller must have device mutex */
 827long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
 828{
 829	void __user *argp = (void __user *)arg;
 830	struct file *eventfp, *filep = NULL;
 831	struct eventfd_ctx *ctx = NULL;
 832	u64 p;
 833	long r;
 834	int i, fd;
 835
 836	/* If you are not the owner, you can become one */
 837	if (ioctl == VHOST_SET_OWNER) {
 838		r = vhost_dev_set_owner(d);
 839		goto done;
 840	}
 841
 842	/* You must be the owner to do anything else */
 843	r = vhost_dev_check_owner(d);
 844	if (r)
 845		goto done;
 846
 847	switch (ioctl) {
 848	case VHOST_SET_MEM_TABLE:
 849		r = vhost_set_memory(d, argp);
 850		break;
 851	case VHOST_SET_LOG_BASE:
 852		if (copy_from_user(&p, argp, sizeof p)) {
 853			r = -EFAULT;
 854			break;
 855		}
 856		if ((u64)(unsigned long)p != p) {
 857			r = -EFAULT;
 858			break;
 859		}
 860		for (i = 0; i < d->nvqs; ++i) {
 861			struct vhost_virtqueue *vq;
 862			void __user *base = (void __user *)(unsigned long)p;
 863			vq = d->vqs + i;
 864			mutex_lock(&vq->mutex);
 865			/* If ring is inactive, will check when it's enabled. */
 866			if (vq->private_data && !vq_log_access_ok(d, vq, base))
 867				r = -EFAULT;
 868			else
 869				vq->log_base = base;
 870			mutex_unlock(&vq->mutex);
 871		}
 872		break;
 873	case VHOST_SET_LOG_FD:
 874		r = get_user(fd, (int __user *)argp);
 875		if (r < 0)
 876			break;
 877		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
 878		if (IS_ERR(eventfp)) {
 879			r = PTR_ERR(eventfp);
 880			break;
 881		}
 882		if (eventfp != d->log_file) {
 883			filep = d->log_file;
 
 884			ctx = d->log_ctx;
 885			d->log_ctx = eventfp ?
 886				eventfd_ctx_fileget(eventfp) : NULL;
 887		} else
 888			filep = eventfp;
 889		for (i = 0; i < d->nvqs; ++i) {
 890			mutex_lock(&d->vqs[i].mutex);
 891			d->vqs[i].log_ctx = d->log_ctx;
 892			mutex_unlock(&d->vqs[i].mutex);
 893		}
 894		if (ctx)
 895			eventfd_ctx_put(ctx);
 896		if (filep)
 897			fput(filep);
 898		break;
 899	default:
 900		r = vhost_set_vring(d, ioctl, argp);
 901		break;
 902	}
 903done:
 904	return r;
 905}
 906
 907static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 908						     __u64 addr, __u32 len)
 909{
 910	struct vhost_memory_region *reg;
 911	int i;
 912
 913	/* linear search is not brilliant, but we really have on the order of 6
 914	 * regions in practice */
 915	for (i = 0; i < mem->nregions; ++i) {
 916		reg = mem->regions + i;
 917		if (reg->guest_phys_addr <= addr &&
 918		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
 919			return reg;
 920	}
 921	return NULL;
 922}
 923
 924/* TODO: This is really inefficient.  We need something like get_user()
 925 * (instruction directly accesses the data, with an exception table entry
 926 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 927 */
 928static int set_bit_to_user(int nr, void __user *addr)
 929{
 930	unsigned long log = (unsigned long)addr;
 931	struct page *page;
 932	void *base;
 933	int bit = nr + (log % PAGE_SIZE) * 8;
 934	int r;
 935
 936	r = get_user_pages_fast(log, 1, 1, &page);
 937	if (r < 0)
 938		return r;
 939	BUG_ON(r != 1);
 940	base = kmap_atomic(page, KM_USER0);
 941	set_bit(bit, base);
 942	kunmap_atomic(base, KM_USER0);
 943	set_page_dirty_lock(page);
 944	put_page(page);
 945	return 0;
 946}
 947
 948static int log_write(void __user *log_base,
 949		     u64 write_address, u64 write_length)
 950{
 951	u64 write_page = write_address / VHOST_PAGE_SIZE;
 952	int r;
 953
 954	if (!write_length)
 955		return 0;
 956	write_length += write_address % VHOST_PAGE_SIZE;
 957	for (;;) {
 958		u64 base = (u64)(unsigned long)log_base;
 959		u64 log = base + write_page / 8;
 960		int bit = write_page % 8;
 961		if ((u64)(unsigned long)log != log)
 962			return -EFAULT;
 963		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
 964		if (r < 0)
 965			return r;
 966		if (write_length <= VHOST_PAGE_SIZE)
 967			break;
 968		write_length -= VHOST_PAGE_SIZE;
 969		write_page += 1;
 970	}
 971	return r;
 972}
 973
 974int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 975		    unsigned int log_num, u64 len)
 976{
 977	int i, r;
 978
 979	/* Make sure data written is seen before log. */
 980	smp_wmb();
 981	for (i = 0; i < log_num; ++i) {
 982		u64 l = min(log[i].len, len);
 983		r = log_write(vq->log_base, log[i].addr, l);
 984		if (r < 0)
 985			return r;
 986		len -= l;
 987		if (!len) {
 988			if (vq->log_ctx)
 989				eventfd_signal(vq->log_ctx, 1);
 990			return 0;
 991		}
 992	}
 993	/* Length written exceeds what we have stored. This is a bug. */
 994	BUG();
 995	return 0;
 996}
 
 997
 998static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 999{
1000	void __user *used;
1001	if (__put_user(vq->used_flags, &vq->used->flags) < 0)
 
1002		return -EFAULT;
1003	if (unlikely(vq->log_used)) {
1004		/* Make sure the flag is seen before log. */
1005		smp_wmb();
1006		/* Log used flag write. */
1007		used = &vq->used->flags;
1008		log_write(vq->log_base, vq->log_addr +
1009			  (used - (void __user *)vq->used),
1010			  sizeof vq->used->flags);
1011		if (vq->log_ctx)
1012			eventfd_signal(vq->log_ctx, 1);
1013	}
1014	return 0;
1015}
1016
1017static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1018{
1019	if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
 
1020		return -EFAULT;
1021	if (unlikely(vq->log_used)) {
1022		void __user *used;
1023		/* Make sure the event is seen before log. */
1024		smp_wmb();
1025		/* Log avail event write */
1026		used = vhost_avail_event(vq);
1027		log_write(vq->log_base, vq->log_addr +
1028			  (used - (void __user *)vq->used),
1029			  sizeof *vhost_avail_event(vq));
1030		if (vq->log_ctx)
1031			eventfd_signal(vq->log_ctx, 1);
1032	}
1033	return 0;
1034}
1035
1036int vhost_init_used(struct vhost_virtqueue *vq)
1037{
 
1038	int r;
 
 
1039	if (!vq->private_data)
1040		return 0;
1041
 
 
1042	r = vhost_update_used_flags(vq);
1043	if (r)
1044		return r;
1045	vq->signalled_used_valid = false;
1046	return get_user(vq->last_used_idx, &vq->used->idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047}
 
1048
1049static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1050			  struct iovec iov[], int iov_size)
1051{
1052	const struct vhost_memory_region *reg;
1053	struct vhost_memory *mem;
 
1054	struct iovec *_iov;
1055	u64 s = 0;
1056	int ret = 0;
1057
1058	rcu_read_lock();
1059
1060	mem = rcu_dereference(dev->memory);
1061	while ((u64)len > s) {
1062		u64 size;
1063		if (unlikely(ret >= iov_size)) {
1064			ret = -ENOBUFS;
1065			break;
1066		}
1067		reg = find_region(mem, addr, len);
1068		if (unlikely(!reg)) {
1069			ret = -EFAULT;
 
 
 
 
 
 
 
 
 
1070			break;
1071		}
 
1072		_iov = iov + ret;
1073		size = reg->memory_size - addr + reg->guest_phys_addr;
1074		_iov->iov_len = min((u64)len, size);
1075		_iov->iov_base = (void __user *)(unsigned long)
1076			(reg->userspace_addr + addr - reg->guest_phys_addr);
1077		s += size;
1078		addr += size;
1079		++ret;
1080	}
1081
1082	rcu_read_unlock();
 
1083	return ret;
1084}
1085
1086/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1087 * function returns the next descriptor in the chain,
1088 * or -1U if we're at the end. */
1089static unsigned next_desc(struct vring_desc *desc)
1090{
1091	unsigned int next;
1092
1093	/* If this descriptor says it doesn't chain, we're done. */
1094	if (!(desc->flags & VRING_DESC_F_NEXT))
1095		return -1U;
1096
1097	/* Check they're not leading us off end of descriptors. */
1098	next = desc->next;
1099	/* Make sure compiler knows to grab that: we don't want it changing! */
1100	/* We will use the result as an index in an array, so most
1101	 * architectures only need a compiler barrier here. */
1102	read_barrier_depends();
1103
1104	return next;
1105}
1106
1107static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1108			struct iovec iov[], unsigned int iov_size,
1109			unsigned int *out_num, unsigned int *in_num,
1110			struct vhost_log *log, unsigned int *log_num,
1111			struct vring_desc *indirect)
1112{
1113	struct vring_desc desc;
1114	unsigned int i = 0, count, found = 0;
1115	int ret;
 
 
1116
1117	/* Sanity check */
1118	if (unlikely(indirect->len % sizeof desc)) {
1119		vq_err(vq, "Invalid length in indirect descriptor: "
1120		       "len 0x%llx not multiple of 0x%zx\n",
1121		       (unsigned long long)indirect->len,
1122		       sizeof desc);
1123		return -EINVAL;
1124	}
1125
1126	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1127			     UIO_MAXIOV);
1128	if (unlikely(ret < 0)) {
1129		vq_err(vq, "Translation failure %d in indirect.\n", ret);
 
1130		return ret;
1131	}
 
1132
1133	/* We will use the result as an address to read from, so most
1134	 * architectures only need a compiler barrier here. */
1135	read_barrier_depends();
1136
1137	count = indirect->len / sizeof desc;
1138	/* Buffers are chained via a 16 bit next field, so
1139	 * we can have at most 2^16 of these. */
1140	if (unlikely(count > USHRT_MAX + 1)) {
1141		vq_err(vq, "Indirect buffer length too big: %d\n",
1142		       indirect->len);
1143		return -E2BIG;
1144	}
1145
1146	do {
1147		unsigned iov_count = *in_num + *out_num;
1148		if (unlikely(++found > count)) {
1149			vq_err(vq, "Loop detected: last one at %u "
1150			       "indirect size %u\n",
1151			       i, count);
1152			return -EINVAL;
1153		}
1154		if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1155					      vq->indirect, sizeof desc))) {
1156			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1157			       i, (size_t)indirect->addr + i * sizeof desc);
1158			return -EINVAL;
1159		}
1160		if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1161			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1162			       i, (size_t)indirect->addr + i * sizeof desc);
1163			return -EINVAL;
1164		}
1165
1166		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1167				     iov_size - iov_count);
 
 
 
 
 
 
1168		if (unlikely(ret < 0)) {
1169			vq_err(vq, "Translation failure %d indirect idx %d\n",
1170			       ret, i);
 
1171			return ret;
1172		}
1173		/* If this is an input descriptor, increment that count. */
1174		if (desc.flags & VRING_DESC_F_WRITE) {
1175			*in_num += ret;
1176			if (unlikely(log)) {
1177				log[*log_num].addr = desc.addr;
1178				log[*log_num].len = desc.len;
1179				++*log_num;
1180			}
1181		} else {
1182			/* If it's an output descriptor, they're all supposed
1183			 * to come before any input descriptors. */
1184			if (unlikely(*in_num)) {
1185				vq_err(vq, "Indirect descriptor "
1186				       "has out after in: idx %d\n", i);
1187				return -EINVAL;
1188			}
1189			*out_num += ret;
1190		}
1191	} while ((i = next_desc(&desc)) != -1);
1192	return 0;
1193}
1194
1195/* This looks in the virtqueue and for the first available buffer, and converts
1196 * it to an iovec for convenient access.  Since descriptors consist of some
1197 * number of output then some number of input descriptors, it's actually two
1198 * iovecs, but we pack them into one and note how many of each there were.
1199 *
1200 * This function returns the descriptor number found, or vq->num (which is
1201 * never a valid descriptor number) if none was found.  A negative code is
1202 * returned on error. */
1203int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1204		      struct iovec iov[], unsigned int iov_size,
1205		      unsigned int *out_num, unsigned int *in_num,
1206		      struct vhost_log *log, unsigned int *log_num)
1207{
1208	struct vring_desc desc;
1209	unsigned int i, head, found = 0;
1210	u16 last_avail_idx;
1211	int ret;
 
 
1212
1213	/* Check it isn't doing very strange things with descriptor numbers. */
1214	last_avail_idx = vq->last_avail_idx;
1215	if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1216		vq_err(vq, "Failed to access avail idx at %p\n",
1217		       &vq->avail->idx);
1218		return -EFAULT;
1219	}
 
1220
1221	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1222		vq_err(vq, "Guest moved used index from %u to %u",
1223		       last_avail_idx, vq->avail_idx);
1224		return -EFAULT;
1225	}
1226
1227	/* If there's nothing new since last we looked, return invalid. */
1228	if (vq->avail_idx == last_avail_idx)
1229		return vq->num;
1230
1231	/* Only get avail ring entries after they have been exposed by guest. */
1232	smp_rmb();
1233
1234	/* Grab the next descriptor number they're advertising, and increment
1235	 * the index we've seen. */
1236	if (unlikely(__get_user(head,
1237				&vq->avail->ring[last_avail_idx % vq->num]))) {
1238		vq_err(vq, "Failed to read head: idx %d address %p\n",
1239		       last_avail_idx,
1240		       &vq->avail->ring[last_avail_idx % vq->num]);
1241		return -EFAULT;
1242	}
1243
 
 
1244	/* If their number is silly, that's an error. */
1245	if (unlikely(head >= vq->num)) {
1246		vq_err(vq, "Guest says index %u > %u is available",
1247		       head, vq->num);
1248		return -EINVAL;
1249	}
1250
1251	/* When we start there are none of either input nor output. */
1252	*out_num = *in_num = 0;
1253	if (unlikely(log))
1254		*log_num = 0;
1255
1256	i = head;
1257	do {
1258		unsigned iov_count = *in_num + *out_num;
1259		if (unlikely(i >= vq->num)) {
1260			vq_err(vq, "Desc index is %u > %u, head = %u",
1261			       i, vq->num, head);
1262			return -EINVAL;
1263		}
1264		if (unlikely(++found > vq->num)) {
1265			vq_err(vq, "Loop detected: last one at %u "
1266			       "vq size %u head %u\n",
1267			       i, vq->num, head);
1268			return -EINVAL;
1269		}
1270		ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
 
1271		if (unlikely(ret)) {
1272			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1273			       i, vq->desc + i);
1274			return -EFAULT;
1275		}
1276		if (desc.flags & VRING_DESC_F_INDIRECT) {
1277			ret = get_indirect(dev, vq, iov, iov_size,
1278					   out_num, in_num,
1279					   log, log_num, &desc);
1280			if (unlikely(ret < 0)) {
1281				vq_err(vq, "Failure detected "
1282				       "in indirect descriptor at idx %d\n", i);
 
1283				return ret;
1284			}
1285			continue;
1286		}
1287
1288		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1289				     iov_size - iov_count);
 
 
 
 
 
1290		if (unlikely(ret < 0)) {
1291			vq_err(vq, "Translation failure %d descriptor idx %d\n",
1292			       ret, i);
 
1293			return ret;
1294		}
1295		if (desc.flags & VRING_DESC_F_WRITE) {
1296			/* If this is an input descriptor,
1297			 * increment that count. */
1298			*in_num += ret;
1299			if (unlikely(log)) {
1300				log[*log_num].addr = desc.addr;
1301				log[*log_num].len = desc.len;
1302				++*log_num;
1303			}
1304		} else {
1305			/* If it's an output descriptor, they're all supposed
1306			 * to come before any input descriptors. */
1307			if (unlikely(*in_num)) {
1308				vq_err(vq, "Descriptor has out after in: "
1309				       "idx %d\n", i);
1310				return -EINVAL;
1311			}
1312			*out_num += ret;
1313		}
1314	} while ((i = next_desc(&desc)) != -1);
1315
1316	/* On success, increment avail index. */
1317	vq->last_avail_idx++;
1318
1319	/* Assume notifications from guest are disabled at this point,
1320	 * if they aren't we would need to update avail_event index. */
1321	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1322	return head;
1323}
 
1324
1325/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1326void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1327{
1328	vq->last_avail_idx -= n;
1329}
 
1330
1331/* After we've used one of their buffers, we tell them about it.  We'll then
1332 * want to notify the guest, using eventfd. */
1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1334{
1335	struct vring_used_elem __user *used;
 
 
 
1336
1337	/* The virtqueue contains a ring of used buffers.  Get a pointer to the
1338	 * next entry in that used ring. */
1339	used = &vq->used->ring[vq->last_used_idx % vq->num];
1340	if (__put_user(head, &used->id)) {
1341		vq_err(vq, "Failed to write used id");
1342		return -EFAULT;
1343	}
1344	if (__put_user(len, &used->len)) {
1345		vq_err(vq, "Failed to write used len");
1346		return -EFAULT;
1347	}
1348	/* Make sure buffer is written before we update index. */
1349	smp_wmb();
1350	if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1351		vq_err(vq, "Failed to increment used idx");
1352		return -EFAULT;
1353	}
1354	if (unlikely(vq->log_used)) {
1355		/* Make sure data is seen before log. */
1356		smp_wmb();
1357		/* Log used ring entry write. */
1358		log_write(vq->log_base,
1359			  vq->log_addr +
1360			   ((void __user *)used - (void __user *)vq->used),
1361			  sizeof *used);
1362		/* Log used index update. */
1363		log_write(vq->log_base,
1364			  vq->log_addr + offsetof(struct vring_used, idx),
1365			  sizeof vq->used->idx);
1366		if (vq->log_ctx)
1367			eventfd_signal(vq->log_ctx, 1);
1368	}
1369	vq->last_used_idx++;
1370	/* If the driver never bothers to signal in a very long while,
1371	 * used index might wrap around. If that happens, invalidate
1372	 * signalled_used index we stored. TODO: make sure driver
1373	 * signals at least once in 2^16 and remove this. */
1374	if (unlikely(vq->last_used_idx == vq->signalled_used))
1375		vq->signalled_used_valid = false;
1376	return 0;
1377}
 
1378
1379static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1380			    struct vring_used_elem *heads,
1381			    unsigned count)
1382{
1383	struct vring_used_elem __user *used;
1384	u16 old, new;
1385	int start;
1386
1387	start = vq->last_used_idx % vq->num;
1388	used = vq->used->ring + start;
1389	if (__copy_to_user(used, heads, count * sizeof *used)) {
 
 
 
 
 
 
 
 
 
1390		vq_err(vq, "Failed to write used");
1391		return -EFAULT;
1392	}
1393	if (unlikely(vq->log_used)) {
1394		/* Make sure data is seen before log. */
1395		smp_wmb();
1396		/* Log used ring entry write. */
1397		log_write(vq->log_base,
1398			  vq->log_addr +
1399			   ((void __user *)used - (void __user *)vq->used),
1400			  count * sizeof *used);
1401	}
1402	old = vq->last_used_idx;
1403	new = (vq->last_used_idx += count);
1404	/* If the driver never bothers to signal in a very long while,
1405	 * used index might wrap around. If that happens, invalidate
1406	 * signalled_used index we stored. TODO: make sure driver
1407	 * signals at least once in 2^16 and remove this. */
1408	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1409		vq->signalled_used_valid = false;
1410	return 0;
1411}
1412
1413/* After we've used one of their buffers, we tell them about it.  We'll then
1414 * want to notify the guest, using eventfd. */
1415int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1416		     unsigned count)
1417{
1418	int start, n, r;
1419
1420	start = vq->last_used_idx % vq->num;
1421	n = vq->num - start;
1422	if (n < count) {
1423		r = __vhost_add_used_n(vq, heads, n);
1424		if (r < 0)
1425			return r;
1426		heads += n;
1427		count -= n;
1428	}
1429	r = __vhost_add_used_n(vq, heads, count);
1430
1431	/* Make sure buffer is written before we update index. */
1432	smp_wmb();
1433	if (put_user(vq->last_used_idx, &vq->used->idx)) {
 
1434		vq_err(vq, "Failed to increment used idx");
1435		return -EFAULT;
1436	}
1437	if (unlikely(vq->log_used)) {
1438		/* Log used index update. */
1439		log_write(vq->log_base,
1440			  vq->log_addr + offsetof(struct vring_used, idx),
1441			  sizeof vq->used->idx);
1442		if (vq->log_ctx)
1443			eventfd_signal(vq->log_ctx, 1);
1444	}
1445	return r;
1446}
 
1447
1448static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1449{
1450	__u16 old, new, event;
 
1451	bool v;
1452	/* Flush out used index updates. This is paired
1453	 * with the barrier that the Guest executes when enabling
1454	 * interrupts. */
1455	smp_mb();
1456
1457	if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1458	    unlikely(vq->avail_idx == vq->last_avail_idx))
1459		return true;
1460
1461	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1462		__u16 flags;
1463		if (__get_user(flags, &vq->avail->flags)) {
 
 
 
 
1464			vq_err(vq, "Failed to get flags");
1465			return true;
1466		}
1467		return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1468	}
1469	old = vq->signalled_used;
1470	v = vq->signalled_used_valid;
1471	new = vq->signalled_used = vq->last_used_idx;
1472	vq->signalled_used_valid = true;
1473
1474	if (unlikely(!v))
1475		return true;
1476
1477	if (get_user(event, vhost_used_event(vq))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1478		vq_err(vq, "Failed to get used event idx");
1479		return true;
1480	}
1481	return vring_need_event(event, new, old);
 
 
1482}
1483
1484/* This actually signals the guest, using eventfd. */
1485void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1486{
1487	/* Signal the Guest tell them we used something up. */
1488	if (vq->call_ctx && vhost_notify(dev, vq))
1489		eventfd_signal(vq->call_ctx, 1);
1490}
 
1491
1492/* And here's the combo meal deal.  Supersize me! */
1493void vhost_add_used_and_signal(struct vhost_dev *dev,
1494			       struct vhost_virtqueue *vq,
1495			       unsigned int head, int len)
1496{
1497	vhost_add_used(vq, head, len);
1498	vhost_signal(dev, vq);
1499}
 
1500
1501/* multi-buffer version of vhost_add_used_and_signal */
1502void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1503				 struct vhost_virtqueue *vq,
1504				 struct vring_used_elem *heads, unsigned count)
1505{
1506	vhost_add_used_n(vq, heads, count);
1507	vhost_signal(dev, vq);
1508}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509
1510/* OK, now we need to know about added descriptors. */
1511bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1512{
1513	u16 avail_idx;
1514	int r;
1515
1516	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1517		return false;
1518	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1519	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1520		r = vhost_update_used_flags(vq);
1521		if (r) {
1522			vq_err(vq, "Failed to enable notification at %p: %d\n",
1523			       &vq->used->flags, r);
1524			return false;
1525		}
1526	} else {
1527		r = vhost_update_avail_event(vq, vq->avail_idx);
1528		if (r) {
1529			vq_err(vq, "Failed to update avail event index at %p: %d\n",
1530			       vhost_avail_event(vq), r);
1531			return false;
1532		}
1533	}
1534	/* They could have slipped one in as we were doing that: make
1535	 * sure it's written, then check again. */
1536	smp_mb();
1537	r = __get_user(avail_idx, &vq->avail->idx);
1538	if (r) {
1539		vq_err(vq, "Failed to check avail idx at %p: %d\n",
1540		       &vq->avail->idx, r);
1541		return false;
1542	}
1543
1544	return avail_idx != vq->avail_idx;
1545}
 
1546
1547/* We don't need to be notified again. */
1548void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1549{
1550	int r;
1551
1552	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1553		return;
1554	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1555	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1556		r = vhost_update_used_flags(vq);
1557		if (r)
1558			vq_err(vq, "Failed to enable notification at %p: %d\n",
1559			       &vq->used->flags, r);
1560	}
1561}
 
1562
1563static void vhost_zerocopy_done_signal(struct kref *kref)
 
1564{
1565	struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
1566						    kref);
1567	wake_up(&ubufs->wait);
 
 
 
1568}
 
1569
1570struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
1571					bool zcopy)
1572{
1573	struct vhost_ubuf_ref *ubufs;
1574	/* No zero copy backend? Nothing to count. */
1575	if (!zcopy)
1576		return NULL;
1577	ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
1578	if (!ubufs)
1579		return ERR_PTR(-ENOMEM);
1580	kref_init(&ubufs->kref);
1581	init_waitqueue_head(&ubufs->wait);
1582	ubufs->vq = vq;
1583	return ubufs;
1584}
 
1585
1586void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
 
1587{
1588	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
 
 
 
 
 
 
 
 
 
 
1589}
 
 
1590
1591void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1592{
1593	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1594	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1595	kfree(ubufs);
1596}
1597
1598void vhost_zerocopy_callback(void *arg)
1599{
1600	struct ubuf_info *ubuf = arg;
1601	struct vhost_ubuf_ref *ubufs = ubuf->arg;
1602	struct vhost_virtqueue *vq = ubufs->vq;
1603
1604	/* set len = 1 to mark this desc buffers done DMA */
1605	vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1606	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1607}
v4.10.11
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/uio.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
 
  21#include <linux/poll.h>
  22#include <linux/file.h>
  23#include <linux/highmem.h>
  24#include <linux/slab.h>
  25#include <linux/vmalloc.h>
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28#include <linux/module.h>
  29#include <linux/sort.h>
  30#include <linux/interval_tree_generic.h>
 
  31
  32#include "vhost.h"
  33
  34static ushort max_mem_regions = 64;
  35module_param(max_mem_regions, ushort, 0444);
  36MODULE_PARM_DESC(max_mem_regions,
  37	"Maximum number of memory regions in memory map. (default: 64)");
  38static int max_iotlb_entries = 2048;
  39module_param(max_iotlb_entries, int, 0444);
  40MODULE_PARM_DESC(max_iotlb_entries,
  41	"Maximum number of iotlb entries. (default: 2048)");
  42
  43enum {
 
  44	VHOST_MEMORY_F_LOG = 0x1,
  45};
  46
  47#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  48#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  49
  50INTERVAL_TREE_DEFINE(struct vhost_umem_node,
  51		     rb, __u64, __subtree_last,
  52		     START, LAST, static inline, vhost_umem_interval_tree);
  53
  54#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  55static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  56{
  57	vq->user_be = !virtio_legacy_is_little_endian();
  58}
  59
  60static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  61{
  62	vq->user_be = true;
  63}
  64
  65static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  66{
  67	vq->user_be = false;
  68}
  69
  70static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  71{
  72	struct vhost_vring_state s;
  73
  74	if (vq->private_data)
  75		return -EBUSY;
  76
  77	if (copy_from_user(&s, argp, sizeof(s)))
  78		return -EFAULT;
  79
  80	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  81	    s.num != VHOST_VRING_BIG_ENDIAN)
  82		return -EINVAL;
  83
  84	if (s.num == VHOST_VRING_BIG_ENDIAN)
  85		vhost_enable_cross_endian_big(vq);
  86	else
  87		vhost_enable_cross_endian_little(vq);
  88
  89	return 0;
  90}
  91
  92static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  93				   int __user *argp)
  94{
  95	struct vhost_vring_state s = {
  96		.index = idx,
  97		.num = vq->user_be
  98	};
  99
 100	if (copy_to_user(argp, &s, sizeof(s)))
 101		return -EFAULT;
 102
 103	return 0;
 104}
 105
 106static void vhost_init_is_le(struct vhost_virtqueue *vq)
 107{
 108	/* Note for legacy virtio: user_be is initialized at reset time
 109	 * according to the host endianness. If userspace does not set an
 110	 * explicit endianness, the default behavior is native endian, as
 111	 * expected by legacy virtio.
 112	 */
 113	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 114}
 115#else
 116static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 117{
 118}
 119
 120static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 121{
 122	return -ENOIOCTLCMD;
 123}
 124
 125static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 126				   int __user *argp)
 127{
 128	return -ENOIOCTLCMD;
 129}
 130
 131static void vhost_init_is_le(struct vhost_virtqueue *vq)
 132{
 133	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
 134		|| virtio_legacy_is_little_endian();
 135}
 136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 137
 138static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 139{
 140	vhost_init_is_le(vq);
 141}
 142
 143struct vhost_flush_struct {
 144	struct vhost_work work;
 145	struct completion wait_event;
 146};
 147
 148static void vhost_flush_work(struct vhost_work *work)
 149{
 150	struct vhost_flush_struct *s;
 151
 152	s = container_of(work, struct vhost_flush_struct, work);
 153	complete(&s->wait_event);
 154}
 155
 156static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 157			    poll_table *pt)
 158{
 159	struct vhost_poll *poll;
 160
 161	poll = container_of(pt, struct vhost_poll, table);
 162	poll->wqh = wqh;
 163	add_wait_queue(wqh, &poll->wait);
 164}
 165
 166static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
 167			     void *key)
 168{
 169	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 170
 171	if (!((unsigned long)key & poll->mask))
 172		return 0;
 173
 174	vhost_poll_queue(poll);
 175	return 0;
 176}
 177
 178void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 179{
 180	clear_bit(VHOST_WORK_QUEUED, &work->flags);
 181	work->fn = fn;
 182	init_waitqueue_head(&work->done);
 
 
 183}
 184EXPORT_SYMBOL_GPL(vhost_work_init);
 185
 186/* Init poll structure */
 187void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 188		     unsigned long mask, struct vhost_dev *dev)
 189{
 190	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 191	init_poll_funcptr(&poll->table, vhost_poll_func);
 192	poll->mask = mask;
 193	poll->dev = dev;
 194	poll->wqh = NULL;
 195
 196	vhost_work_init(&poll->work, fn);
 197}
 198EXPORT_SYMBOL_GPL(vhost_poll_init);
 199
 200/* Start polling a file. We add ourselves to file's wait queue. The caller must
 201 * keep a reference to a file until after vhost_poll_stop is called. */
 202int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 203{
 204	unsigned long mask;
 205	int ret = 0;
 206
 207	if (poll->wqh)
 208		return 0;
 209
 210	mask = file->f_op->poll(file, &poll->table);
 211	if (mask)
 212		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 213	if (mask & POLLERR) {
 214		if (poll->wqh)
 215			remove_wait_queue(poll->wqh, &poll->wait);
 216		ret = -EINVAL;
 217	}
 218
 219	return ret;
 220}
 221EXPORT_SYMBOL_GPL(vhost_poll_start);
 222
 223/* Stop polling a file. After this function returns, it becomes safe to drop the
 224 * file reference. You must also flush afterwards. */
 225void vhost_poll_stop(struct vhost_poll *poll)
 226{
 227	if (poll->wqh) {
 228		remove_wait_queue(poll->wqh, &poll->wait);
 229		poll->wqh = NULL;
 230	}
 231}
 232EXPORT_SYMBOL_GPL(vhost_poll_stop);
 233
 234void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 
 235{
 236	struct vhost_flush_struct flush;
 237
 238	if (dev->worker) {
 239		init_completion(&flush.wait_event);
 240		vhost_work_init(&flush.work, vhost_flush_work);
 
 
 
 
 
 
 
 241
 242		vhost_work_queue(dev, &flush.work);
 243		wait_for_completion(&flush.wait_event);
 244	}
 
 
 
 
 
 
 245}
 246EXPORT_SYMBOL_GPL(vhost_work_flush);
 247
 248/* Flush any work that has been scheduled. When calling this, don't hold any
 249 * locks that are also used by the callback. */
 250void vhost_poll_flush(struct vhost_poll *poll)
 251{
 252	vhost_work_flush(poll->dev, &poll->work);
 253}
 254EXPORT_SYMBOL_GPL(vhost_poll_flush);
 255
 256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 
 257{
 258	if (!dev->worker)
 259		return;
 260
 261	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 262		/* We can only add the work to the list after we're
 263		 * sure it was not in the list.
 264		 * test_and_set_bit() implies a memory barrier.
 265		 */
 266		llist_add(&work->node, &dev->work_list);
 267		wake_up_process(dev->worker);
 268	}
 
 269}
 270EXPORT_SYMBOL_GPL(vhost_work_queue);
 271
 272/* A lockless hint for busy polling code to exit the loop */
 273bool vhost_has_work(struct vhost_dev *dev)
 274{
 275	return !llist_empty(&dev->work_list);
 276}
 277EXPORT_SYMBOL_GPL(vhost_has_work);
 278
 279void vhost_poll_queue(struct vhost_poll *poll)
 280{
 281	vhost_work_queue(poll->dev, &poll->work);
 282}
 283EXPORT_SYMBOL_GPL(vhost_poll_queue);
 284
 285static void vhost_vq_reset(struct vhost_dev *dev,
 286			   struct vhost_virtqueue *vq)
 287{
 288	vq->num = 1;
 289	vq->desc = NULL;
 290	vq->avail = NULL;
 291	vq->used = NULL;
 292	vq->last_avail_idx = 0;
 293	vq->last_used_event = 0;
 294	vq->avail_idx = 0;
 295	vq->last_used_idx = 0;
 296	vq->signalled_used = 0;
 297	vq->signalled_used_valid = false;
 298	vq->used_flags = 0;
 299	vq->log_used = false;
 300	vq->log_addr = -1ull;
 
 
 301	vq->private_data = NULL;
 302	vq->acked_features = 0;
 303	vq->log_base = NULL;
 304	vq->error_ctx = NULL;
 305	vq->error = NULL;
 306	vq->kick = NULL;
 307	vq->call_ctx = NULL;
 308	vq->call = NULL;
 309	vq->log_ctx = NULL;
 310	vhost_reset_is_le(vq);
 311	vhost_disable_cross_endian(vq);
 312	vq->busyloop_timeout = 0;
 313	vq->umem = NULL;
 314	vq->iotlb = NULL;
 315}
 316
 317static int vhost_worker(void *data)
 318{
 319	struct vhost_dev *dev = data;
 320	struct vhost_work *work, *work_next;
 321	struct llist_node *node;
 322	mm_segment_t oldfs = get_fs();
 323
 324	set_fs(USER_DS);
 325	use_mm(dev->mm);
 326
 327	for (;;) {
 328		/* mb paired w/ kthread_stop */
 329		set_current_state(TASK_INTERRUPTIBLE);
 330
 
 
 
 
 
 
 
 331		if (kthread_should_stop()) {
 
 332			__set_current_state(TASK_RUNNING);
 333			break;
 334		}
 
 
 
 
 
 
 
 
 335
 336		node = llist_del_all(&dev->work_list);
 337		if (!node)
 
 
 338			schedule();
 339
 340		node = llist_reverse_order(node);
 341		/* make sure flag is seen after deletion */
 342		smp_wmb();
 343		llist_for_each_entry_safe(work, work_next, node, node) {
 344			clear_bit(VHOST_WORK_QUEUED, &work->flags);
 345			__set_current_state(TASK_RUNNING);
 346			work->fn(work);
 347			if (need_resched())
 348				schedule();
 349		}
 350	}
 351	unuse_mm(dev->mm);
 352	set_fs(oldfs);
 353	return 0;
 354}
 355
 356static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 357{
 358	kfree(vq->indirect);
 359	vq->indirect = NULL;
 360	kfree(vq->log);
 361	vq->log = NULL;
 362	kfree(vq->heads);
 363	vq->heads = NULL;
 
 
 
 
 
 
 
 364}
 365
 366/* Helper to allocate iovec buffers for all vqs. */
 367static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 368{
 369	struct vhost_virtqueue *vq;
 370	int i;
 
 371
 372	for (i = 0; i < dev->nvqs; ++i) {
 373		vq = dev->vqs[i];
 374		vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
 375				       GFP_KERNEL);
 376		vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
 377		vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
 378		if (!vq->indirect || !vq->log || !vq->heads)
 
 
 
 
 
 
 
 
 379			goto err_nomem;
 380	}
 381	return 0;
 382
 383err_nomem:
 384	for (; i >= 0; --i)
 385		vhost_vq_free_iovecs(dev->vqs[i]);
 386	return -ENOMEM;
 387}
 388
 389static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 390{
 391	int i;
 392
 393	for (i = 0; i < dev->nvqs; ++i)
 394		vhost_vq_free_iovecs(dev->vqs[i]);
 395}
 396
 397void vhost_dev_init(struct vhost_dev *dev,
 398		    struct vhost_virtqueue **vqs, int nvqs)
 399{
 400	struct vhost_virtqueue *vq;
 401	int i;
 402
 403	dev->vqs = vqs;
 404	dev->nvqs = nvqs;
 405	mutex_init(&dev->mutex);
 406	dev->log_ctx = NULL;
 407	dev->log_file = NULL;
 408	dev->umem = NULL;
 409	dev->iotlb = NULL;
 410	dev->mm = NULL;
 
 
 411	dev->worker = NULL;
 412	init_llist_head(&dev->work_list);
 413	init_waitqueue_head(&dev->wait);
 414	INIT_LIST_HEAD(&dev->read_list);
 415	INIT_LIST_HEAD(&dev->pending_list);
 416	spin_lock_init(&dev->iotlb_lock);
 417
 418
 419	for (i = 0; i < dev->nvqs; ++i) {
 420		vq = dev->vqs[i];
 421		vq->log = NULL;
 422		vq->indirect = NULL;
 423		vq->heads = NULL;
 424		vq->dev = dev;
 425		mutex_init(&vq->mutex);
 426		vhost_vq_reset(dev, vq);
 427		if (vq->handle_kick)
 428			vhost_poll_init(&vq->poll, vq->handle_kick,
 429					POLLIN, dev);
 430	}
 
 
 431}
 432EXPORT_SYMBOL_GPL(vhost_dev_init);
 433
 434/* Caller should have device mutex */
 435long vhost_dev_check_owner(struct vhost_dev *dev)
 436{
 437	/* Are you the owner? If not, I don't think you mean to do that */
 438	return dev->mm == current->mm ? 0 : -EPERM;
 439}
 440EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 441
 442struct vhost_attach_cgroups_struct {
 443	struct vhost_work work;
 444	struct task_struct *owner;
 445	int ret;
 446};
 447
 448static void vhost_attach_cgroups_work(struct vhost_work *work)
 449{
 450	struct vhost_attach_cgroups_struct *s;
 451
 452	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 453	s->ret = cgroup_attach_task_all(s->owner, current);
 454}
 455
 456static int vhost_attach_cgroups(struct vhost_dev *dev)
 457{
 458	struct vhost_attach_cgroups_struct attach;
 459
 460	attach.owner = current;
 461	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 462	vhost_work_queue(dev, &attach.work);
 463	vhost_work_flush(dev, &attach.work);
 464	return attach.ret;
 465}
 466
 467/* Caller should have device mutex */
 468bool vhost_dev_has_owner(struct vhost_dev *dev)
 469{
 470	return dev->mm;
 471}
 472EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 473
 474/* Caller should have device mutex */
 475long vhost_dev_set_owner(struct vhost_dev *dev)
 476{
 477	struct task_struct *worker;
 478	int err;
 479
 480	/* Is there an owner already? */
 481	if (vhost_dev_has_owner(dev)) {
 482		err = -EBUSY;
 483		goto err_mm;
 484	}
 485
 486	/* No owner, become one */
 487	dev->mm = get_task_mm(current);
 488	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 489	if (IS_ERR(worker)) {
 490		err = PTR_ERR(worker);
 491		goto err_worker;
 492	}
 493
 494	dev->worker = worker;
 495	wake_up_process(worker);	/* avoid contributing to loadavg */
 496
 497	err = vhost_attach_cgroups(dev);
 498	if (err)
 499		goto err_cgroup;
 500
 501	err = vhost_dev_alloc_iovecs(dev);
 502	if (err)
 503		goto err_cgroup;
 504
 505	return 0;
 506err_cgroup:
 507	kthread_stop(worker);
 508	dev->worker = NULL;
 509err_worker:
 510	if (dev->mm)
 511		mmput(dev->mm);
 512	dev->mm = NULL;
 513err_mm:
 514	return err;
 515}
 516EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 517
 518static void *vhost_kvzalloc(unsigned long size)
 519{
 520	void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 521
 522	if (!n)
 523		n = vzalloc(size);
 524	return n;
 525}
 526
 527struct vhost_umem *vhost_dev_reset_owner_prepare(void)
 528{
 529	return vhost_kvzalloc(sizeof(struct vhost_umem));
 530}
 531EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 532
 533/* Caller should have device mutex */
 534void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
 535{
 536	int i;
 537
 538	vhost_dev_cleanup(dev, true);
 539
 540	/* Restore memory to default empty mapping. */
 541	INIT_LIST_HEAD(&umem->umem_list);
 542	dev->umem = umem;
 543	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
 544	 * VQs aren't running.
 545	 */
 546	for (i = 0; i < dev->nvqs; ++i)
 547		dev->vqs[i]->umem = umem;
 548}
 549EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 550
 551void vhost_dev_stop(struct vhost_dev *dev)
 552{
 553	int i;
 554
 555	for (i = 0; i < dev->nvqs; ++i) {
 556		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 557			vhost_poll_stop(&dev->vqs[i]->poll);
 558			vhost_poll_flush(&dev->vqs[i]->poll);
 559		}
 560	}
 561}
 562EXPORT_SYMBOL_GPL(vhost_dev_stop);
 563
 564static void vhost_umem_free(struct vhost_umem *umem,
 565			    struct vhost_umem_node *node)
 
 
 
 
 566{
 567	vhost_umem_interval_tree_remove(node, &umem->umem_tree);
 568	list_del(&node->link);
 569	kfree(node);
 570	umem->numem--;
 571}
 572
 573static void vhost_umem_clean(struct vhost_umem *umem)
 574{
 575	struct vhost_umem_node *node, *tmp;
 576
 577	if (!umem)
 578		return;
 579
 580	list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
 581		vhost_umem_free(umem, node);
 582
 583	kvfree(umem);
 584}
 585
 586static void vhost_clear_msg(struct vhost_dev *dev)
 587{
 588	struct vhost_msg_node *node, *n;
 589
 590	spin_lock(&dev->iotlb_lock);
 591
 592	list_for_each_entry_safe(node, n, &dev->read_list, node) {
 593		list_del(&node->node);
 594		kfree(node);
 595	}
 596
 597	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
 598		list_del(&node->node);
 599		kfree(node);
 600	}
 601
 602	spin_unlock(&dev->iotlb_lock);
 603}
 604
 605/* Caller should have device mutex if and only if locked is set */
 606void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 607{
 608	int i;
 609
 610	for (i = 0; i < dev->nvqs; ++i) {
 611		if (dev->vqs[i]->error_ctx)
 612			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 613		if (dev->vqs[i]->error)
 614			fput(dev->vqs[i]->error);
 615		if (dev->vqs[i]->kick)
 616			fput(dev->vqs[i]->kick);
 617		if (dev->vqs[i]->call_ctx)
 618			eventfd_ctx_put(dev->vqs[i]->call_ctx);
 619		if (dev->vqs[i]->call)
 620			fput(dev->vqs[i]->call);
 621		vhost_vq_reset(dev, dev->vqs[i]);
 
 
 
 
 
 
 
 
 
 
 
 622	}
 623	vhost_dev_free_iovecs(dev);
 624	if (dev->log_ctx)
 625		eventfd_ctx_put(dev->log_ctx);
 626	dev->log_ctx = NULL;
 627	if (dev->log_file)
 628		fput(dev->log_file);
 629	dev->log_file = NULL;
 630	/* No one will access memory at this point */
 631	vhost_umem_clean(dev->umem);
 632	dev->umem = NULL;
 633	vhost_umem_clean(dev->iotlb);
 634	dev->iotlb = NULL;
 635	vhost_clear_msg(dev);
 636	wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
 637	WARN_ON(!llist_empty(&dev->work_list));
 638	if (dev->worker) {
 639		kthread_stop(dev->worker);
 640		dev->worker = NULL;
 641	}
 642	if (dev->mm)
 643		mmput(dev->mm);
 644	dev->mm = NULL;
 645}
 646EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 647
 648static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 649{
 650	u64 a = addr / VHOST_PAGE_SIZE / 8;
 651
 652	/* Make sure 64 bit math will not overflow. */
 653	if (a > ULONG_MAX - (unsigned long)log_base ||
 654	    a + (unsigned long)log_base > ULONG_MAX)
 655		return 0;
 656
 657	return access_ok(VERIFY_WRITE, log_base + a,
 658			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 659}
 660
 661static bool vhost_overflow(u64 uaddr, u64 size)
 662{
 663	/* Make sure 64 bit math will not overflow. */
 664	return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
 665}
 666
 667/* Caller should have vq mutex and device mutex. */
 668static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
 669			       int log_all)
 670{
 671	struct vhost_umem_node *node;
 672
 673	if (!umem)
 674		return 0;
 675
 676	list_for_each_entry(node, &umem->umem_list, link) {
 677		unsigned long a = node->userspace_addr;
 678
 679		if (vhost_overflow(node->userspace_addr, node->size))
 680			return 0;
 681
 682
 683		if (!access_ok(VERIFY_WRITE, (void __user *)a,
 684				    node->size))
 685			return 0;
 686		else if (log_all && !log_access_ok(log_base,
 687						   node->start,
 688						   node->size))
 689			return 0;
 690	}
 691	return 1;
 692}
 693
 694/* Can we switch to this memory table? */
 695/* Caller should have device mutex but not vq mutex */
 696static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
 697			    int log_all)
 698{
 699	int i;
 700
 701	for (i = 0; i < d->nvqs; ++i) {
 702		int ok;
 703		bool log;
 704
 705		mutex_lock(&d->vqs[i]->mutex);
 706		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
 707		/* If ring is inactive, will check when it's enabled. */
 708		if (d->vqs[i]->private_data)
 709			ok = vq_memory_access_ok(d->vqs[i]->log_base,
 710						 umem, log);
 711		else
 712			ok = 1;
 713		mutex_unlock(&d->vqs[i]->mutex);
 714		if (!ok)
 715			return 0;
 716	}
 717	return 1;
 718}
 719
 720static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
 721			  struct iovec iov[], int iov_size, int access);
 722
 723static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
 724			      const void *from, unsigned size)
 725{
 726	int ret;
 727
 728	if (!vq->iotlb)
 729		return __copy_to_user(to, from, size);
 730	else {
 731		/* This function should be called after iotlb
 732		 * prefetch, which means we're sure that all vq
 733		 * could be access through iotlb. So -EAGAIN should
 734		 * not happen in this case.
 735		 */
 736		/* TODO: more fast path */
 737		struct iov_iter t;
 738		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
 739				     ARRAY_SIZE(vq->iotlb_iov),
 740				     VHOST_ACCESS_WO);
 741		if (ret < 0)
 742			goto out;
 743		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
 744		ret = copy_to_iter(from, size, &t);
 745		if (ret == size)
 746			ret = 0;
 747	}
 748out:
 749	return ret;
 750}
 751
 752static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
 753				void __user *from, unsigned size)
 754{
 755	int ret;
 756
 757	if (!vq->iotlb)
 758		return __copy_from_user(to, from, size);
 759	else {
 760		/* This function should be called after iotlb
 761		 * prefetch, which means we're sure that vq
 762		 * could be access through iotlb. So -EAGAIN should
 763		 * not happen in this case.
 764		 */
 765		/* TODO: more fast path */
 766		struct iov_iter f;
 767		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
 768				     ARRAY_SIZE(vq->iotlb_iov),
 769				     VHOST_ACCESS_RO);
 770		if (ret < 0) {
 771			vq_err(vq, "IOTLB translation failure: uaddr "
 772			       "%p size 0x%llx\n", from,
 773			       (unsigned long long) size);
 774			goto out;
 775		}
 776		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
 777		ret = copy_from_iter(to, size, &f);
 778		if (ret == size)
 779			ret = 0;
 780	}
 781
 782out:
 783	return ret;
 784}
 785
 786static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 787				     void __user *addr, unsigned size)
 788{
 789	int ret;
 790
 791	/* This function should be called after iotlb
 792	 * prefetch, which means we're sure that vq
 793	 * could be access through iotlb. So -EAGAIN should
 794	 * not happen in this case.
 795	 */
 796	/* TODO: more fast path */
 797	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
 798			     ARRAY_SIZE(vq->iotlb_iov),
 799			     VHOST_ACCESS_RO);
 800	if (ret < 0) {
 801		vq_err(vq, "IOTLB translation failure: uaddr "
 802			"%p size 0x%llx\n", addr,
 803			(unsigned long long) size);
 804		return NULL;
 805	}
 806
 807	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
 808		vq_err(vq, "Non atomic userspace memory access: uaddr "
 809			"%p size 0x%llx\n", addr,
 810			(unsigned long long) size);
 811		return NULL;
 812	}
 813
 814	return vq->iotlb_iov[0].iov_base;
 815}
 816
 817#define vhost_put_user(vq, x, ptr) \
 818({ \
 819	int ret = -EFAULT; \
 820	if (!vq->iotlb) { \
 821		ret = __put_user(x, ptr); \
 822	} else { \
 823		__typeof__(ptr) to = \
 824			(__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
 825		if (to != NULL) \
 826			ret = __put_user(x, to); \
 827		else \
 828			ret = -EFAULT;	\
 829	} \
 830	ret; \
 831})
 832
 833#define vhost_get_user(vq, x, ptr) \
 834({ \
 835	int ret; \
 836	if (!vq->iotlb) { \
 837		ret = __get_user(x, ptr); \
 838	} else { \
 839		__typeof__(ptr) from = \
 840			(__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
 841		if (from != NULL) \
 842			ret = __get_user(x, from); \
 843		else \
 844			ret = -EFAULT; \
 845	} \
 846	ret; \
 847})
 848
 849static void vhost_dev_lock_vqs(struct vhost_dev *d)
 850{
 851	int i = 0;
 852	for (i = 0; i < d->nvqs; ++i)
 853		mutex_lock(&d->vqs[i]->mutex);
 854}
 855
 856static void vhost_dev_unlock_vqs(struct vhost_dev *d)
 857{
 858	int i = 0;
 859	for (i = 0; i < d->nvqs; ++i)
 860		mutex_unlock(&d->vqs[i]->mutex);
 861}
 862
 863static int vhost_new_umem_range(struct vhost_umem *umem,
 864				u64 start, u64 size, u64 end,
 865				u64 userspace_addr, int perm)
 866{
 867	struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
 868
 869	if (!node)
 870		return -ENOMEM;
 871
 872	if (umem->numem == max_iotlb_entries) {
 873		tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
 874		vhost_umem_free(umem, tmp);
 875	}
 876
 877	node->start = start;
 878	node->size = size;
 879	node->last = end;
 880	node->userspace_addr = userspace_addr;
 881	node->perm = perm;
 882	INIT_LIST_HEAD(&node->link);
 883	list_add_tail(&node->link, &umem->umem_list);
 884	vhost_umem_interval_tree_insert(node, &umem->umem_tree);
 885	umem->numem++;
 886
 887	return 0;
 888}
 889
 890static void vhost_del_umem_range(struct vhost_umem *umem,
 891				 u64 start, u64 end)
 892{
 893	struct vhost_umem_node *node;
 894
 895	while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
 896							   start, end)))
 897		vhost_umem_free(umem, node);
 898}
 899
 900static void vhost_iotlb_notify_vq(struct vhost_dev *d,
 901				  struct vhost_iotlb_msg *msg)
 902{
 903	struct vhost_msg_node *node, *n;
 904
 905	spin_lock(&d->iotlb_lock);
 906
 907	list_for_each_entry_safe(node, n, &d->pending_list, node) {
 908		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
 909		if (msg->iova <= vq_msg->iova &&
 910		    msg->iova + msg->size - 1 > vq_msg->iova &&
 911		    vq_msg->type == VHOST_IOTLB_MISS) {
 912			vhost_poll_queue(&node->vq->poll);
 913			list_del(&node->node);
 914			kfree(node);
 915		}
 916	}
 917
 918	spin_unlock(&d->iotlb_lock);
 919}
 920
 921static int umem_access_ok(u64 uaddr, u64 size, int access)
 922{
 923	unsigned long a = uaddr;
 924
 925	/* Make sure 64 bit math will not overflow. */
 926	if (vhost_overflow(uaddr, size))
 927		return -EFAULT;
 928
 929	if ((access & VHOST_ACCESS_RO) &&
 930	    !access_ok(VERIFY_READ, (void __user *)a, size))
 931		return -EFAULT;
 932	if ((access & VHOST_ACCESS_WO) &&
 933	    !access_ok(VERIFY_WRITE, (void __user *)a, size))
 934		return -EFAULT;
 935	return 0;
 936}
 937
 938static int vhost_process_iotlb_msg(struct vhost_dev *dev,
 939				   struct vhost_iotlb_msg *msg)
 940{
 941	int ret = 0;
 942
 943	vhost_dev_lock_vqs(dev);
 944	switch (msg->type) {
 945	case VHOST_IOTLB_UPDATE:
 946		if (!dev->iotlb) {
 947			ret = -EFAULT;
 948			break;
 949		}
 950		if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
 951			ret = -EFAULT;
 952			break;
 953		}
 954		if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
 955					 msg->iova + msg->size - 1,
 956					 msg->uaddr, msg->perm)) {
 957			ret = -ENOMEM;
 958			break;
 959		}
 960		vhost_iotlb_notify_vq(dev, msg);
 961		break;
 962	case VHOST_IOTLB_INVALIDATE:
 963		vhost_del_umem_range(dev->iotlb, msg->iova,
 964				     msg->iova + msg->size - 1);
 965		break;
 966	default:
 967		ret = -EINVAL;
 968		break;
 969	}
 970
 971	vhost_dev_unlock_vqs(dev);
 972	return ret;
 973}
 974ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
 975			     struct iov_iter *from)
 976{
 977	struct vhost_msg_node node;
 978	unsigned size = sizeof(struct vhost_msg);
 979	size_t ret;
 980	int err;
 981
 982	if (iov_iter_count(from) < size)
 983		return 0;
 984	ret = copy_from_iter(&node.msg, size, from);
 985	if (ret != size)
 986		goto done;
 987
 988	switch (node.msg.type) {
 989	case VHOST_IOTLB_MSG:
 990		err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
 991		if (err)
 992			ret = err;
 993		break;
 994	default:
 995		ret = -EINVAL;
 996		break;
 997	}
 998
 999done:
1000	return ret;
1001}
1002EXPORT_SYMBOL(vhost_chr_write_iter);
1003
1004unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1005			    poll_table *wait)
1006{
1007	unsigned int mask = 0;
1008
1009	poll_wait(file, &dev->wait, wait);
1010
1011	if (!list_empty(&dev->read_list))
1012		mask |= POLLIN | POLLRDNORM;
1013
1014	return mask;
1015}
1016EXPORT_SYMBOL(vhost_chr_poll);
1017
1018ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1019			    int noblock)
1020{
1021	DEFINE_WAIT(wait);
1022	struct vhost_msg_node *node;
1023	ssize_t ret = 0;
1024	unsigned size = sizeof(struct vhost_msg);
1025
1026	if (iov_iter_count(to) < size)
1027		return 0;
1028
1029	while (1) {
1030		if (!noblock)
1031			prepare_to_wait(&dev->wait, &wait,
1032					TASK_INTERRUPTIBLE);
1033
1034		node = vhost_dequeue_msg(dev, &dev->read_list);
1035		if (node)
1036			break;
1037		if (noblock) {
1038			ret = -EAGAIN;
1039			break;
1040		}
1041		if (signal_pending(current)) {
1042			ret = -ERESTARTSYS;
1043			break;
1044		}
1045		if (!dev->iotlb) {
1046			ret = -EBADFD;
1047			break;
1048		}
1049
1050		schedule();
1051	}
1052
1053	if (!noblock)
1054		finish_wait(&dev->wait, &wait);
1055
1056	if (node) {
1057		ret = copy_to_iter(&node->msg, size, to);
1058
1059		if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
1060			kfree(node);
1061			return ret;
1062		}
1063
1064		vhost_enqueue_msg(dev, &dev->pending_list, node);
1065	}
1066
1067	return ret;
1068}
1069EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1070
1071static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1072{
1073	struct vhost_dev *dev = vq->dev;
1074	struct vhost_msg_node *node;
1075	struct vhost_iotlb_msg *msg;
1076
1077	node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
1078	if (!node)
1079		return -ENOMEM;
1080
1081	msg = &node->msg.iotlb;
1082	msg->type = VHOST_IOTLB_MISS;
1083	msg->iova = iova;
1084	msg->perm = access;
1085
1086	vhost_enqueue_msg(dev, &dev->read_list, node);
1087
1088	return 0;
1089}
1090
1091static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1092			struct vring_desc __user *desc,
1093			struct vring_avail __user *avail,
1094			struct vring_used __user *used)
1095
1096{
1097	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1098
1099	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
1100	       access_ok(VERIFY_READ, avail,
1101			 sizeof *avail + num * sizeof *avail->ring + s) &&
1102	       access_ok(VERIFY_WRITE, used,
1103			sizeof *used + num * sizeof *used->ring + s);
1104}
1105
1106static int iotlb_access_ok(struct vhost_virtqueue *vq,
1107			   int access, u64 addr, u64 len)
1108{
1109	const struct vhost_umem_node *node;
1110	struct vhost_umem *umem = vq->iotlb;
1111	u64 s = 0, size;
1112
1113	while (len > s) {
1114		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1115							   addr,
1116							   addr + len - 1);
1117		if (node == NULL || node->start > addr) {
1118			vhost_iotlb_miss(vq, addr, access);
1119			return false;
1120		} else if (!(node->perm & access)) {
1121			/* Report the possible access violation by
1122			 * request another translation from userspace.
1123			 */
1124			return false;
1125		}
1126
1127		size = node->size - addr + node->start;
1128		s += size;
1129		addr += size;
1130	}
1131
1132	return true;
1133}
1134
1135int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
1136{
1137	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1138	unsigned int num = vq->num;
1139
1140	if (!vq->iotlb)
1141		return 1;
1142
1143	return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1144			       num * sizeof *vq->desc) &&
1145	       iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1146			       sizeof *vq->avail +
1147			       num * sizeof *vq->avail->ring + s) &&
1148	       iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1149			       sizeof *vq->used +
1150			       num * sizeof *vq->used->ring + s);
1151}
1152EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
1153
1154/* Can we log writes? */
1155/* Caller should have device mutex but not vq mutex */
1156int vhost_log_access_ok(struct vhost_dev *dev)
1157{
1158	return memory_access_ok(dev, dev->umem, 1);
 
 
 
 
1159}
1160EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1161
1162/* Verify access for write logging. */
1163/* Caller should have vq mutex and device mutex */
1164static int vq_log_access_ok(struct vhost_virtqueue *vq,
1165			    void __user *log_base)
1166{
1167	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 
1168
1169	return vq_memory_access_ok(log_base, vq->umem,
1170				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
 
 
1171		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
1172					sizeof *vq->used +
1173					vq->num * sizeof *vq->used->ring + s));
1174}
1175
1176/* Can we start vq? */
1177/* Caller should have vq mutex and device mutex */
1178int vhost_vq_access_ok(struct vhost_virtqueue *vq)
1179{
1180	if (vq->iotlb) {
1181		/* When device IOTLB was used, the access validation
1182		 * will be validated during prefetching.
1183		 */
1184		return 1;
1185	}
1186	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
1187		vq_log_access_ok(vq, vq->log_base);
1188}
1189EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1190
1191static struct vhost_umem *vhost_umem_alloc(void)
1192{
1193	struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
1194
1195	if (!umem)
1196		return NULL;
1197
1198	umem->umem_tree = RB_ROOT;
1199	umem->numem = 0;
1200	INIT_LIST_HEAD(&umem->umem_list);
1201
1202	return umem;
1203}
1204
1205static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1206{
1207	struct vhost_memory mem, *newmem;
1208	struct vhost_memory_region *region;
1209	struct vhost_umem *newumem, *oldumem;
1210	unsigned long size = offsetof(struct vhost_memory, regions);
1211	int i;
1212
1213	if (copy_from_user(&mem, m, size))
1214		return -EFAULT;
1215	if (mem.padding)
1216		return -EOPNOTSUPP;
1217	if (mem.nregions > max_mem_regions)
1218		return -E2BIG;
1219	newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
1220	if (!newmem)
1221		return -ENOMEM;
1222
1223	memcpy(newmem, &mem, size);
1224	if (copy_from_user(newmem->regions, m->regions,
1225			   mem.nregions * sizeof *m->regions)) {
1226		kvfree(newmem);
1227		return -EFAULT;
1228	}
1229
1230	newumem = vhost_umem_alloc();
1231	if (!newumem) {
1232		kvfree(newmem);
1233		return -ENOMEM;
1234	}
1235
1236	for (region = newmem->regions;
1237	     region < newmem->regions + mem.nregions;
1238	     region++) {
1239		if (vhost_new_umem_range(newumem,
1240					 region->guest_phys_addr,
1241					 region->memory_size,
1242					 region->guest_phys_addr +
1243					 region->memory_size - 1,
1244					 region->userspace_addr,
1245					 VHOST_ACCESS_RW))
1246			goto err;
1247	}
1248
1249	if (!memory_access_ok(d, newumem, 0))
1250		goto err;
1251
1252	oldumem = d->umem;
1253	d->umem = newumem;
1254
1255	/* All memory accesses are done under some VQ mutex. */
1256	for (i = 0; i < d->nvqs; ++i) {
1257		mutex_lock(&d->vqs[i]->mutex);
1258		d->vqs[i]->umem = newumem;
1259		mutex_unlock(&d->vqs[i]->mutex);
1260	}
1261
1262	kvfree(newmem);
1263	vhost_umem_clean(oldumem);
 
 
1264	return 0;
1265
1266err:
1267	vhost_umem_clean(newumem);
1268	kvfree(newmem);
1269	return -EFAULT;
1270}
1271
1272long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
1273{
1274	struct file *eventfp, *filep = NULL;
1275	bool pollstart = false, pollstop = false;
1276	struct eventfd_ctx *ctx = NULL;
1277	u32 __user *idxp = argp;
1278	struct vhost_virtqueue *vq;
1279	struct vhost_vring_state s;
1280	struct vhost_vring_file f;
1281	struct vhost_vring_addr a;
1282	u32 idx;
1283	long r;
1284
1285	r = get_user(idx, idxp);
1286	if (r < 0)
1287		return r;
1288	if (idx >= d->nvqs)
1289		return -ENOBUFS;
1290
1291	vq = d->vqs[idx];
1292
1293	mutex_lock(&vq->mutex);
1294
1295	switch (ioctl) {
1296	case VHOST_SET_VRING_NUM:
1297		/* Resizing ring with an active backend?
1298		 * You don't want to do that. */
1299		if (vq->private_data) {
1300			r = -EBUSY;
1301			break;
1302		}
1303		if (copy_from_user(&s, argp, sizeof s)) {
1304			r = -EFAULT;
1305			break;
1306		}
1307		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
1308			r = -EINVAL;
1309			break;
1310		}
1311		vq->num = s.num;
1312		break;
1313	case VHOST_SET_VRING_BASE:
1314		/* Moving base with an active backend?
1315		 * You don't want to do that. */
1316		if (vq->private_data) {
1317			r = -EBUSY;
1318			break;
1319		}
1320		if (copy_from_user(&s, argp, sizeof s)) {
1321			r = -EFAULT;
1322			break;
1323		}
1324		if (s.num > 0xffff) {
1325			r = -EINVAL;
1326			break;
1327		}
1328		vq->last_avail_idx = vq->last_used_event = s.num;
1329		/* Forget the cached index value. */
1330		vq->avail_idx = vq->last_avail_idx;
1331		break;
1332	case VHOST_GET_VRING_BASE:
1333		s.index = idx;
1334		s.num = vq->last_avail_idx;
1335		if (copy_to_user(argp, &s, sizeof s))
1336			r = -EFAULT;
1337		break;
1338	case VHOST_SET_VRING_ADDR:
1339		if (copy_from_user(&a, argp, sizeof a)) {
1340			r = -EFAULT;
1341			break;
1342		}
1343		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
1344			r = -EOPNOTSUPP;
1345			break;
1346		}
1347		/* For 32bit, verify that the top 32bits of the user
1348		   data are set to zero. */
1349		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1350		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1351		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
1352			r = -EFAULT;
1353			break;
1354		}
1355
1356		/* Make sure it's safe to cast pointers to vring types. */
1357		BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1358		BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1359		if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1360		    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1361		    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
1362			r = -EINVAL;
1363			break;
1364		}
1365
1366		/* We only verify access here if backend is configured.
1367		 * If it is not, we don't as size might not have been setup.
1368		 * We will verify when backend is configured. */
1369		if (vq->private_data) {
1370			if (!vq_access_ok(vq, vq->num,
1371				(void __user *)(unsigned long)a.desc_user_addr,
1372				(void __user *)(unsigned long)a.avail_user_addr,
1373				(void __user *)(unsigned long)a.used_user_addr)) {
1374				r = -EINVAL;
1375				break;
1376			}
1377
1378			/* Also validate log access for used ring if enabled. */
1379			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1380			    !log_access_ok(vq->log_base, a.log_guest_addr,
1381					   sizeof *vq->used +
1382					   vq->num * sizeof *vq->used->ring)) {
1383				r = -EINVAL;
1384				break;
1385			}
1386		}
1387
1388		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1389		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1390		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1391		vq->log_addr = a.log_guest_addr;
1392		vq->used = (void __user *)(unsigned long)a.used_user_addr;
1393		break;
1394	case VHOST_SET_VRING_KICK:
1395		if (copy_from_user(&f, argp, sizeof f)) {
1396			r = -EFAULT;
1397			break;
1398		}
1399		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1400		if (IS_ERR(eventfp)) {
1401			r = PTR_ERR(eventfp);
1402			break;
1403		}
1404		if (eventfp != vq->kick) {
1405			pollstop = (filep = vq->kick) != NULL;
1406			pollstart = (vq->kick = eventfp) != NULL;
1407		} else
1408			filep = eventfp;
1409		break;
1410	case VHOST_SET_VRING_CALL:
1411		if (copy_from_user(&f, argp, sizeof f)) {
1412			r = -EFAULT;
1413			break;
1414		}
1415		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1416		if (IS_ERR(eventfp)) {
1417			r = PTR_ERR(eventfp);
1418			break;
1419		}
1420		if (eventfp != vq->call) {
1421			filep = vq->call;
1422			ctx = vq->call_ctx;
1423			vq->call = eventfp;
1424			vq->call_ctx = eventfp ?
1425				eventfd_ctx_fileget(eventfp) : NULL;
1426		} else
1427			filep = eventfp;
1428		break;
1429	case VHOST_SET_VRING_ERR:
1430		if (copy_from_user(&f, argp, sizeof f)) {
1431			r = -EFAULT;
1432			break;
1433		}
1434		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1435		if (IS_ERR(eventfp)) {
1436			r = PTR_ERR(eventfp);
1437			break;
1438		}
1439		if (eventfp != vq->error) {
1440			filep = vq->error;
1441			vq->error = eventfp;
1442			ctx = vq->error_ctx;
1443			vq->error_ctx = eventfp ?
1444				eventfd_ctx_fileget(eventfp) : NULL;
1445		} else
1446			filep = eventfp;
1447		break;
1448	case VHOST_SET_VRING_ENDIAN:
1449		r = vhost_set_vring_endian(vq, argp);
1450		break;
1451	case VHOST_GET_VRING_ENDIAN:
1452		r = vhost_get_vring_endian(vq, idx, argp);
1453		break;
1454	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1455		if (copy_from_user(&s, argp, sizeof(s))) {
1456			r = -EFAULT;
1457			break;
1458		}
1459		vq->busyloop_timeout = s.num;
1460		break;
1461	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1462		s.index = idx;
1463		s.num = vq->busyloop_timeout;
1464		if (copy_to_user(argp, &s, sizeof(s)))
1465			r = -EFAULT;
1466		break;
1467	default:
1468		r = -ENOIOCTLCMD;
1469	}
1470
1471	if (pollstop && vq->handle_kick)
1472		vhost_poll_stop(&vq->poll);
1473
1474	if (ctx)
1475		eventfd_ctx_put(ctx);
1476	if (filep)
1477		fput(filep);
1478
1479	if (pollstart && vq->handle_kick)
1480		r = vhost_poll_start(&vq->poll, vq->kick);
1481
1482	mutex_unlock(&vq->mutex);
1483
1484	if (pollstop && vq->handle_kick)
1485		vhost_poll_flush(&vq->poll);
1486	return r;
1487}
1488EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1489
1490int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1491{
1492	struct vhost_umem *niotlb, *oiotlb;
1493	int i;
1494
1495	niotlb = vhost_umem_alloc();
1496	if (!niotlb)
1497		return -ENOMEM;
1498
1499	oiotlb = d->iotlb;
1500	d->iotlb = niotlb;
1501
1502	for (i = 0; i < d->nvqs; ++i) {
1503		mutex_lock(&d->vqs[i]->mutex);
1504		d->vqs[i]->iotlb = niotlb;
1505		mutex_unlock(&d->vqs[i]->mutex);
1506	}
1507
1508	vhost_umem_clean(oiotlb);
1509
1510	return 0;
1511}
1512EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1513
1514/* Caller must have device mutex */
1515long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1516{
 
1517	struct file *eventfp, *filep = NULL;
1518	struct eventfd_ctx *ctx = NULL;
1519	u64 p;
1520	long r;
1521	int i, fd;
1522
1523	/* If you are not the owner, you can become one */
1524	if (ioctl == VHOST_SET_OWNER) {
1525		r = vhost_dev_set_owner(d);
1526		goto done;
1527	}
1528
1529	/* You must be the owner to do anything else */
1530	r = vhost_dev_check_owner(d);
1531	if (r)
1532		goto done;
1533
1534	switch (ioctl) {
1535	case VHOST_SET_MEM_TABLE:
1536		r = vhost_set_memory(d, argp);
1537		break;
1538	case VHOST_SET_LOG_BASE:
1539		if (copy_from_user(&p, argp, sizeof p)) {
1540			r = -EFAULT;
1541			break;
1542		}
1543		if ((u64)(unsigned long)p != p) {
1544			r = -EFAULT;
1545			break;
1546		}
1547		for (i = 0; i < d->nvqs; ++i) {
1548			struct vhost_virtqueue *vq;
1549			void __user *base = (void __user *)(unsigned long)p;
1550			vq = d->vqs[i];
1551			mutex_lock(&vq->mutex);
1552			/* If ring is inactive, will check when it's enabled. */
1553			if (vq->private_data && !vq_log_access_ok(vq, base))
1554				r = -EFAULT;
1555			else
1556				vq->log_base = base;
1557			mutex_unlock(&vq->mutex);
1558		}
1559		break;
1560	case VHOST_SET_LOG_FD:
1561		r = get_user(fd, (int __user *)argp);
1562		if (r < 0)
1563			break;
1564		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
1565		if (IS_ERR(eventfp)) {
1566			r = PTR_ERR(eventfp);
1567			break;
1568		}
1569		if (eventfp != d->log_file) {
1570			filep = d->log_file;
1571			d->log_file = eventfp;
1572			ctx = d->log_ctx;
1573			d->log_ctx = eventfp ?
1574				eventfd_ctx_fileget(eventfp) : NULL;
1575		} else
1576			filep = eventfp;
1577		for (i = 0; i < d->nvqs; ++i) {
1578			mutex_lock(&d->vqs[i]->mutex);
1579			d->vqs[i]->log_ctx = d->log_ctx;
1580			mutex_unlock(&d->vqs[i]->mutex);
1581		}
1582		if (ctx)
1583			eventfd_ctx_put(ctx);
1584		if (filep)
1585			fput(filep);
1586		break;
1587	default:
1588		r = -ENOIOCTLCMD;
1589		break;
1590	}
1591done:
1592	return r;
1593}
1594EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1595
1596/* TODO: This is really inefficient.  We need something like get_user()
1597 * (instruction directly accesses the data, with an exception table entry
1598 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1599 */
1600static int set_bit_to_user(int nr, void __user *addr)
1601{
1602	unsigned long log = (unsigned long)addr;
1603	struct page *page;
1604	void *base;
1605	int bit = nr + (log % PAGE_SIZE) * 8;
1606	int r;
1607
1608	r = get_user_pages_fast(log, 1, 1, &page);
1609	if (r < 0)
1610		return r;
1611	BUG_ON(r != 1);
1612	base = kmap_atomic(page);
1613	set_bit(bit, base);
1614	kunmap_atomic(base);
1615	set_page_dirty_lock(page);
1616	put_page(page);
1617	return 0;
1618}
1619
1620static int log_write(void __user *log_base,
1621		     u64 write_address, u64 write_length)
1622{
1623	u64 write_page = write_address / VHOST_PAGE_SIZE;
1624	int r;
1625
1626	if (!write_length)
1627		return 0;
1628	write_length += write_address % VHOST_PAGE_SIZE;
1629	for (;;) {
1630		u64 base = (u64)(unsigned long)log_base;
1631		u64 log = base + write_page / 8;
1632		int bit = write_page % 8;
1633		if ((u64)(unsigned long)log != log)
1634			return -EFAULT;
1635		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1636		if (r < 0)
1637			return r;
1638		if (write_length <= VHOST_PAGE_SIZE)
1639			break;
1640		write_length -= VHOST_PAGE_SIZE;
1641		write_page += 1;
1642	}
1643	return r;
1644}
1645
1646int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1647		    unsigned int log_num, u64 len)
1648{
1649	int i, r;
1650
1651	/* Make sure data written is seen before log. */
1652	smp_wmb();
1653	for (i = 0; i < log_num; ++i) {
1654		u64 l = min(log[i].len, len);
1655		r = log_write(vq->log_base, log[i].addr, l);
1656		if (r < 0)
1657			return r;
1658		len -= l;
1659		if (!len) {
1660			if (vq->log_ctx)
1661				eventfd_signal(vq->log_ctx, 1);
1662			return 0;
1663		}
1664	}
1665	/* Length written exceeds what we have stored. This is a bug. */
1666	BUG();
1667	return 0;
1668}
1669EXPORT_SYMBOL_GPL(vhost_log_write);
1670
1671static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1672{
1673	void __user *used;
1674	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1675			   &vq->used->flags) < 0)
1676		return -EFAULT;
1677	if (unlikely(vq->log_used)) {
1678		/* Make sure the flag is seen before log. */
1679		smp_wmb();
1680		/* Log used flag write. */
1681		used = &vq->used->flags;
1682		log_write(vq->log_base, vq->log_addr +
1683			  (used - (void __user *)vq->used),
1684			  sizeof vq->used->flags);
1685		if (vq->log_ctx)
1686			eventfd_signal(vq->log_ctx, 1);
1687	}
1688	return 0;
1689}
1690
1691static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1692{
1693	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1694			   vhost_avail_event(vq)))
1695		return -EFAULT;
1696	if (unlikely(vq->log_used)) {
1697		void __user *used;
1698		/* Make sure the event is seen before log. */
1699		smp_wmb();
1700		/* Log avail event write */
1701		used = vhost_avail_event(vq);
1702		log_write(vq->log_base, vq->log_addr +
1703			  (used - (void __user *)vq->used),
1704			  sizeof *vhost_avail_event(vq));
1705		if (vq->log_ctx)
1706			eventfd_signal(vq->log_ctx, 1);
1707	}
1708	return 0;
1709}
1710
1711int vhost_vq_init_access(struct vhost_virtqueue *vq)
1712{
1713	__virtio16 last_used_idx;
1714	int r;
1715	bool is_le = vq->is_le;
1716
1717	if (!vq->private_data)
1718		return 0;
1719
1720	vhost_init_is_le(vq);
1721
1722	r = vhost_update_used_flags(vq);
1723	if (r)
1724		goto err;
1725	vq->signalled_used_valid = false;
1726	if (!vq->iotlb &&
1727	    !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1728		r = -EFAULT;
1729		goto err;
1730	}
1731	r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
1732	if (r) {
1733		vq_err(vq, "Can't access used idx at %p\n",
1734		       &vq->used->idx);
1735		goto err;
1736	}
1737	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1738	return 0;
1739
1740err:
1741	vq->is_le = is_le;
1742	return r;
1743}
1744EXPORT_SYMBOL_GPL(vhost_vq_init_access);
1745
1746static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1747			  struct iovec iov[], int iov_size, int access)
1748{
1749	const struct vhost_umem_node *node;
1750	struct vhost_dev *dev = vq->dev;
1751	struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
1752	struct iovec *_iov;
1753	u64 s = 0;
1754	int ret = 0;
1755
 
 
 
1756	while ((u64)len > s) {
1757		u64 size;
1758		if (unlikely(ret >= iov_size)) {
1759			ret = -ENOBUFS;
1760			break;
1761		}
1762
1763		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1764							addr, addr + len - 1);
1765		if (node == NULL || node->start > addr) {
1766			if (umem != dev->iotlb) {
1767				ret = -EFAULT;
1768				break;
1769			}
1770			ret = -EAGAIN;
1771			break;
1772		} else if (!(node->perm & access)) {
1773			ret = -EPERM;
1774			break;
1775		}
1776
1777		_iov = iov + ret;
1778		size = node->size - addr + node->start;
1779		_iov->iov_len = min((u64)len - s, size);
1780		_iov->iov_base = (void __user *)(unsigned long)
1781			(node->userspace_addr + addr - node->start);
1782		s += size;
1783		addr += size;
1784		++ret;
1785	}
1786
1787	if (ret == -EAGAIN)
1788		vhost_iotlb_miss(vq, addr, access);
1789	return ret;
1790}
1791
1792/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1793 * function returns the next descriptor in the chain,
1794 * or -1U if we're at the end. */
1795static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
1796{
1797	unsigned int next;
1798
1799	/* If this descriptor says it doesn't chain, we're done. */
1800	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
1801		return -1U;
1802
1803	/* Check they're not leading us off end of descriptors. */
1804	next = vhost16_to_cpu(vq, desc->next);
1805	/* Make sure compiler knows to grab that: we don't want it changing! */
1806	/* We will use the result as an index in an array, so most
1807	 * architectures only need a compiler barrier here. */
1808	read_barrier_depends();
1809
1810	return next;
1811}
1812
1813static int get_indirect(struct vhost_virtqueue *vq,
1814			struct iovec iov[], unsigned int iov_size,
1815			unsigned int *out_num, unsigned int *in_num,
1816			struct vhost_log *log, unsigned int *log_num,
1817			struct vring_desc *indirect)
1818{
1819	struct vring_desc desc;
1820	unsigned int i = 0, count, found = 0;
1821	u32 len = vhost32_to_cpu(vq, indirect->len);
1822	struct iov_iter from;
1823	int ret, access;
1824
1825	/* Sanity check */
1826	if (unlikely(len % sizeof desc)) {
1827		vq_err(vq, "Invalid length in indirect descriptor: "
1828		       "len 0x%llx not multiple of 0x%zx\n",
1829		       (unsigned long long)len,
1830		       sizeof desc);
1831		return -EINVAL;
1832	}
1833
1834	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
1835			     UIO_MAXIOV, VHOST_ACCESS_RO);
1836	if (unlikely(ret < 0)) {
1837		if (ret != -EAGAIN)
1838			vq_err(vq, "Translation failure %d in indirect.\n", ret);
1839		return ret;
1840	}
1841	iov_iter_init(&from, READ, vq->indirect, ret, len);
1842
1843	/* We will use the result as an address to read from, so most
1844	 * architectures only need a compiler barrier here. */
1845	read_barrier_depends();
1846
1847	count = len / sizeof desc;
1848	/* Buffers are chained via a 16 bit next field, so
1849	 * we can have at most 2^16 of these. */
1850	if (unlikely(count > USHRT_MAX + 1)) {
1851		vq_err(vq, "Indirect buffer length too big: %d\n",
1852		       indirect->len);
1853		return -E2BIG;
1854	}
1855
1856	do {
1857		unsigned iov_count = *in_num + *out_num;
1858		if (unlikely(++found > count)) {
1859			vq_err(vq, "Loop detected: last one at %u "
1860			       "indirect size %u\n",
1861			       i, count);
1862			return -EINVAL;
1863		}
1864		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
 
1865			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1866			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1867			return -EINVAL;
1868		}
1869		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
1870			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1871			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1872			return -EINVAL;
1873		}
1874
1875		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
1876			access = VHOST_ACCESS_WO;
1877		else
1878			access = VHOST_ACCESS_RO;
1879
1880		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1881				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
1882				     iov_size - iov_count, access);
1883		if (unlikely(ret < 0)) {
1884			if (ret != -EAGAIN)
1885				vq_err(vq, "Translation failure %d indirect idx %d\n",
1886					ret, i);
1887			return ret;
1888		}
1889		/* If this is an input descriptor, increment that count. */
1890		if (access == VHOST_ACCESS_WO) {
1891			*in_num += ret;
1892			if (unlikely(log)) {
1893				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1894				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1895				++*log_num;
1896			}
1897		} else {
1898			/* If it's an output descriptor, they're all supposed
1899			 * to come before any input descriptors. */
1900			if (unlikely(*in_num)) {
1901				vq_err(vq, "Indirect descriptor "
1902				       "has out after in: idx %d\n", i);
1903				return -EINVAL;
1904			}
1905			*out_num += ret;
1906		}
1907	} while ((i = next_desc(vq, &desc)) != -1);
1908	return 0;
1909}
1910
1911/* This looks in the virtqueue and for the first available buffer, and converts
1912 * it to an iovec for convenient access.  Since descriptors consist of some
1913 * number of output then some number of input descriptors, it's actually two
1914 * iovecs, but we pack them into one and note how many of each there were.
1915 *
1916 * This function returns the descriptor number found, or vq->num (which is
1917 * never a valid descriptor number) if none was found.  A negative code is
1918 * returned on error. */
1919int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1920		      struct iovec iov[], unsigned int iov_size,
1921		      unsigned int *out_num, unsigned int *in_num,
1922		      struct vhost_log *log, unsigned int *log_num)
1923{
1924	struct vring_desc desc;
1925	unsigned int i, head, found = 0;
1926	u16 last_avail_idx;
1927	__virtio16 avail_idx;
1928	__virtio16 ring_head;
1929	int ret, access;
1930
1931	/* Check it isn't doing very strange things with descriptor numbers. */
1932	last_avail_idx = vq->last_avail_idx;
1933	if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
1934		vq_err(vq, "Failed to access avail idx at %p\n",
1935		       &vq->avail->idx);
1936		return -EFAULT;
1937	}
1938	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
1939
1940	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1941		vq_err(vq, "Guest moved used index from %u to %u",
1942		       last_avail_idx, vq->avail_idx);
1943		return -EFAULT;
1944	}
1945
1946	/* If there's nothing new since last we looked, return invalid. */
1947	if (vq->avail_idx == last_avail_idx)
1948		return vq->num;
1949
1950	/* Only get avail ring entries after they have been exposed by guest. */
1951	smp_rmb();
1952
1953	/* Grab the next descriptor number they're advertising, and increment
1954	 * the index we've seen. */
1955	if (unlikely(vhost_get_user(vq, ring_head,
1956		     &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
1957		vq_err(vq, "Failed to read head: idx %d address %p\n",
1958		       last_avail_idx,
1959		       &vq->avail->ring[last_avail_idx % vq->num]);
1960		return -EFAULT;
1961	}
1962
1963	head = vhost16_to_cpu(vq, ring_head);
1964
1965	/* If their number is silly, that's an error. */
1966	if (unlikely(head >= vq->num)) {
1967		vq_err(vq, "Guest says index %u > %u is available",
1968		       head, vq->num);
1969		return -EINVAL;
1970	}
1971
1972	/* When we start there are none of either input nor output. */
1973	*out_num = *in_num = 0;
1974	if (unlikely(log))
1975		*log_num = 0;
1976
1977	i = head;
1978	do {
1979		unsigned iov_count = *in_num + *out_num;
1980		if (unlikely(i >= vq->num)) {
1981			vq_err(vq, "Desc index is %u > %u, head = %u",
1982			       i, vq->num, head);
1983			return -EINVAL;
1984		}
1985		if (unlikely(++found > vq->num)) {
1986			vq_err(vq, "Loop detected: last one at %u "
1987			       "vq size %u head %u\n",
1988			       i, vq->num, head);
1989			return -EINVAL;
1990		}
1991		ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
1992					   sizeof desc);
1993		if (unlikely(ret)) {
1994			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1995			       i, vq->desc + i);
1996			return -EFAULT;
1997		}
1998		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
1999			ret = get_indirect(vq, iov, iov_size,
2000					   out_num, in_num,
2001					   log, log_num, &desc);
2002			if (unlikely(ret < 0)) {
2003				if (ret != -EAGAIN)
2004					vq_err(vq, "Failure detected "
2005						"in indirect descriptor at idx %d\n", i);
2006				return ret;
2007			}
2008			continue;
2009		}
2010
2011		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2012			access = VHOST_ACCESS_WO;
2013		else
2014			access = VHOST_ACCESS_RO;
2015		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2016				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2017				     iov_size - iov_count, access);
2018		if (unlikely(ret < 0)) {
2019			if (ret != -EAGAIN)
2020				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2021					ret, i);
2022			return ret;
2023		}
2024		if (access == VHOST_ACCESS_WO) {
2025			/* If this is an input descriptor,
2026			 * increment that count. */
2027			*in_num += ret;
2028			if (unlikely(log)) {
2029				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2030				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2031				++*log_num;
2032			}
2033		} else {
2034			/* If it's an output descriptor, they're all supposed
2035			 * to come before any input descriptors. */
2036			if (unlikely(*in_num)) {
2037				vq_err(vq, "Descriptor has out after in: "
2038				       "idx %d\n", i);
2039				return -EINVAL;
2040			}
2041			*out_num += ret;
2042		}
2043	} while ((i = next_desc(vq, &desc)) != -1);
2044
2045	/* On success, increment avail index. */
2046	vq->last_avail_idx++;
2047
2048	/* Assume notifications from guest are disabled at this point,
2049	 * if they aren't we would need to update avail_event index. */
2050	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2051	return head;
2052}
2053EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2054
2055/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2056void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2057{
2058	vq->last_avail_idx -= n;
2059}
2060EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2061
2062/* After we've used one of their buffers, we tell them about it.  We'll then
2063 * want to notify the guest, using eventfd. */
2064int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2065{
2066	struct vring_used_elem heads = {
2067		cpu_to_vhost32(vq, head),
2068		cpu_to_vhost32(vq, len)
2069	};
2070
2071	return vhost_add_used_n(vq, &heads, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2072}
2073EXPORT_SYMBOL_GPL(vhost_add_used);
2074
2075static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2076			    struct vring_used_elem *heads,
2077			    unsigned count)
2078{
2079	struct vring_used_elem __user *used;
2080	u16 old, new;
2081	int start;
2082
2083	start = vq->last_used_idx & (vq->num - 1);
2084	used = vq->used->ring + start;
2085	if (count == 1) {
2086		if (vhost_put_user(vq, heads[0].id, &used->id)) {
2087			vq_err(vq, "Failed to write used id");
2088			return -EFAULT;
2089		}
2090		if (vhost_put_user(vq, heads[0].len, &used->len)) {
2091			vq_err(vq, "Failed to write used len");
2092			return -EFAULT;
2093		}
2094	} else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
2095		vq_err(vq, "Failed to write used");
2096		return -EFAULT;
2097	}
2098	if (unlikely(vq->log_used)) {
2099		/* Make sure data is seen before log. */
2100		smp_wmb();
2101		/* Log used ring entry write. */
2102		log_write(vq->log_base,
2103			  vq->log_addr +
2104			   ((void __user *)used - (void __user *)vq->used),
2105			  count * sizeof *used);
2106	}
2107	old = vq->last_used_idx;
2108	new = (vq->last_used_idx += count);
2109	/* If the driver never bothers to signal in a very long while,
2110	 * used index might wrap around. If that happens, invalidate
2111	 * signalled_used index we stored. TODO: make sure driver
2112	 * signals at least once in 2^16 and remove this. */
2113	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2114		vq->signalled_used_valid = false;
2115	return 0;
2116}
2117
2118/* After we've used one of their buffers, we tell them about it.  We'll then
2119 * want to notify the guest, using eventfd. */
2120int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2121		     unsigned count)
2122{
2123	int start, n, r;
2124
2125	start = vq->last_used_idx & (vq->num - 1);
2126	n = vq->num - start;
2127	if (n < count) {
2128		r = __vhost_add_used_n(vq, heads, n);
2129		if (r < 0)
2130			return r;
2131		heads += n;
2132		count -= n;
2133	}
2134	r = __vhost_add_used_n(vq, heads, count);
2135
2136	/* Make sure buffer is written before we update index. */
2137	smp_wmb();
2138	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
2139			   &vq->used->idx)) {
2140		vq_err(vq, "Failed to increment used idx");
2141		return -EFAULT;
2142	}
2143	if (unlikely(vq->log_used)) {
2144		/* Log used index update. */
2145		log_write(vq->log_base,
2146			  vq->log_addr + offsetof(struct vring_used, idx),
2147			  sizeof vq->used->idx);
2148		if (vq->log_ctx)
2149			eventfd_signal(vq->log_ctx, 1);
2150	}
2151	return r;
2152}
2153EXPORT_SYMBOL_GPL(vhost_add_used_n);
2154
2155static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2156{
2157	__u16 old, new;
2158	__virtio16 event;
2159	bool v;
 
 
 
 
2160
2161	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2162	    unlikely(vq->avail_idx == vq->last_avail_idx))
2163		return true;
2164
2165	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2166		__virtio16 flags;
2167		/* Flush out used index updates. This is paired
2168		 * with the barrier that the Guest executes when enabling
2169		 * interrupts. */
2170		smp_mb();
2171		if (vhost_get_user(vq, flags, &vq->avail->flags)) {
2172			vq_err(vq, "Failed to get flags");
2173			return true;
2174		}
2175		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2176	}
2177	old = vq->signalled_used;
2178	v = vq->signalled_used_valid;
2179	new = vq->signalled_used = vq->last_used_idx;
2180	vq->signalled_used_valid = true;
2181
2182	if (unlikely(!v))
2183		return true;
2184
2185	/* We're sure if the following conditions are met, there's no
2186	 * need to notify guest:
2187	 * 1) cached used event is ahead of new
2188	 * 2) old to new updating does not cross cached used event. */
2189	if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
2190	    !vring_need_event(vq->last_used_event, new, old))
2191		return false;
2192
2193	/* Flush out used index updates. This is paired
2194	 * with the barrier that the Guest executes when enabling
2195	 * interrupts. */
2196	smp_mb();
2197
2198	if (vhost_get_user(vq, event, vhost_used_event(vq))) {
2199		vq_err(vq, "Failed to get used event idx");
2200		return true;
2201	}
2202	vq->last_used_event = vhost16_to_cpu(vq, event);
2203
2204	return vring_need_event(vq->last_used_event, new, old);
2205}
2206
2207/* This actually signals the guest, using eventfd. */
2208void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2209{
2210	/* Signal the Guest tell them we used something up. */
2211	if (vq->call_ctx && vhost_notify(dev, vq))
2212		eventfd_signal(vq->call_ctx, 1);
2213}
2214EXPORT_SYMBOL_GPL(vhost_signal);
2215
2216/* And here's the combo meal deal.  Supersize me! */
2217void vhost_add_used_and_signal(struct vhost_dev *dev,
2218			       struct vhost_virtqueue *vq,
2219			       unsigned int head, int len)
2220{
2221	vhost_add_used(vq, head, len);
2222	vhost_signal(dev, vq);
2223}
2224EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2225
2226/* multi-buffer version of vhost_add_used_and_signal */
2227void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2228				 struct vhost_virtqueue *vq,
2229				 struct vring_used_elem *heads, unsigned count)
2230{
2231	vhost_add_used_n(vq, heads, count);
2232	vhost_signal(dev, vq);
2233}
2234EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2235
2236/* return true if we're sure that avaiable ring is empty */
2237bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2238{
2239	__virtio16 avail_idx;
2240	int r;
2241
2242	r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
2243	if (r)
2244		return false;
2245
2246	return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
2247}
2248EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2249
2250/* OK, now we need to know about added descriptors. */
2251bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2252{
2253	__virtio16 avail_idx;
2254	int r;
2255
2256	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2257		return false;
2258	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2259	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2260		r = vhost_update_used_flags(vq);
2261		if (r) {
2262			vq_err(vq, "Failed to enable notification at %p: %d\n",
2263			       &vq->used->flags, r);
2264			return false;
2265		}
2266	} else {
2267		r = vhost_update_avail_event(vq, vq->avail_idx);
2268		if (r) {
2269			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2270			       vhost_avail_event(vq), r);
2271			return false;
2272		}
2273	}
2274	/* They could have slipped one in as we were doing that: make
2275	 * sure it's written, then check again. */
2276	smp_mb();
2277	r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
2278	if (r) {
2279		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2280		       &vq->avail->idx, r);
2281		return false;
2282	}
2283
2284	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2285}
2286EXPORT_SYMBOL_GPL(vhost_enable_notify);
2287
2288/* We don't need to be notified again. */
2289void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2290{
2291	int r;
2292
2293	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2294		return;
2295	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2296	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2297		r = vhost_update_used_flags(vq);
2298		if (r)
2299			vq_err(vq, "Failed to enable notification at %p: %d\n",
2300			       &vq->used->flags, r);
2301	}
2302}
2303EXPORT_SYMBOL_GPL(vhost_disable_notify);
2304
2305/* Create a new message. */
2306struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2307{
2308	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2309	if (!node)
2310		return NULL;
2311	node->vq = vq;
2312	node->msg.type = type;
2313	return node;
2314}
2315EXPORT_SYMBOL_GPL(vhost_new_msg);
2316
2317void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2318		       struct vhost_msg_node *node)
2319{
2320	spin_lock(&dev->iotlb_lock);
2321	list_add_tail(&node->node, head);
2322	spin_unlock(&dev->iotlb_lock);
2323
2324	wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
 
 
 
 
 
 
2325}
2326EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2327
2328struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2329					 struct list_head *head)
2330{
2331	struct vhost_msg_node *node = NULL;
2332
2333	spin_lock(&dev->iotlb_lock);
2334	if (!list_empty(head)) {
2335		node = list_first_entry(head, struct vhost_msg_node,
2336					node);
2337		list_del(&node->node);
2338	}
2339	spin_unlock(&dev->iotlb_lock);
2340
2341	return node;
2342}
2343EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2344
2345
2346static int __init vhost_init(void)
2347{
2348	return 0;
 
 
2349}
2350
2351static void __exit vhost_exit(void)
2352{
 
 
 
 
 
 
 
2353}
2354
2355module_init(vhost_init);
2356module_exit(vhost_exit);
2357
2358MODULE_VERSION("0.0.1");
2359MODULE_LICENSE("GPL v2");
2360MODULE_AUTHOR("Michael S. Tsirkin");
2361MODULE_DESCRIPTION("Host kernel accelerator for virtio");