Linux Audio

Check our new training course

Loading...
v3.1
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/virtio_net.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/rcupdate.h>
  22#include <linux/poll.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/slab.h>
 
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28
  29#include <linux/net.h>
  30#include <linux/if_packet.h>
  31#include <linux/if_arp.h>
  32
  33#include "vhost.h"
  34
 
 
 
 
 
  35enum {
  36	VHOST_MEMORY_MAX_NREGIONS = 64,
  37	VHOST_MEMORY_F_LOG = 0x1,
  38};
  39
  40static unsigned vhost_zcopy_mask __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
  41
  42#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
  43#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  44
  45static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  46			    poll_table *pt)
  47{
  48	struct vhost_poll *poll;
  49
  50	poll = container_of(pt, struct vhost_poll, table);
  51	poll->wqh = wqh;
  52	add_wait_queue(wqh, &poll->wait);
  53}
  54
  55static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  56			     void *key)
  57{
  58	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
  59
  60	if (!((unsigned long)key & poll->mask))
  61		return 0;
  62
  63	vhost_poll_queue(poll);
  64	return 0;
  65}
  66
  67static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
  68{
  69	INIT_LIST_HEAD(&work->node);
  70	work->fn = fn;
  71	init_waitqueue_head(&work->done);
  72	work->flushing = 0;
  73	work->queue_seq = work->done_seq = 0;
  74}
 
  75
  76/* Init poll structure */
  77void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  78		     unsigned long mask, struct vhost_dev *dev)
  79{
  80	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  81	init_poll_funcptr(&poll->table, vhost_poll_func);
  82	poll->mask = mask;
  83	poll->dev = dev;
 
  84
  85	vhost_work_init(&poll->work, fn);
  86}
 
  87
  88/* Start polling a file. We add ourselves to file's wait queue. The caller must
  89 * keep a reference to a file until after vhost_poll_stop is called. */
  90void vhost_poll_start(struct vhost_poll *poll, struct file *file)
  91{
  92	unsigned long mask;
 
 
 
 
  93
  94	mask = file->f_op->poll(file, &poll->table);
  95	if (mask)
  96		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 
 
 
 
 
 
 
  97}
 
  98
  99/* Stop polling a file. After this function returns, it becomes safe to drop the
 100 * file reference. You must also flush afterwards. */
 101void vhost_poll_stop(struct vhost_poll *poll)
 102{
 103	remove_wait_queue(poll->wqh, &poll->wait);
 
 
 
 104}
 
 105
 106static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 107				unsigned seq)
 108{
 109	int left;
 110
 111	spin_lock_irq(&dev->work_lock);
 112	left = seq - work->done_seq;
 113	spin_unlock_irq(&dev->work_lock);
 114	return left <= 0;
 115}
 116
 117static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 118{
 119	unsigned seq;
 120	int flushing;
 121
 122	spin_lock_irq(&dev->work_lock);
 123	seq = work->queue_seq;
 124	work->flushing++;
 125	spin_unlock_irq(&dev->work_lock);
 126	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 127	spin_lock_irq(&dev->work_lock);
 128	flushing = --work->flushing;
 129	spin_unlock_irq(&dev->work_lock);
 130	BUG_ON(flushing < 0);
 131}
 
 132
 133/* Flush any work that has been scheduled. When calling this, don't hold any
 134 * locks that are also used by the callback. */
 135void vhost_poll_flush(struct vhost_poll *poll)
 136{
 137	vhost_work_flush(poll->dev, &poll->work);
 138}
 
 139
 140static inline void vhost_work_queue(struct vhost_dev *dev,
 141				    struct vhost_work *work)
 142{
 143	unsigned long flags;
 144
 145	spin_lock_irqsave(&dev->work_lock, flags);
 146	if (list_empty(&work->node)) {
 147		list_add_tail(&work->node, &dev->work_list);
 148		work->queue_seq++;
 
 149		wake_up_process(dev->worker);
 
 
 150	}
 151	spin_unlock_irqrestore(&dev->work_lock, flags);
 152}
 
 
 
 
 
 
 
 
 153
 154void vhost_poll_queue(struct vhost_poll *poll)
 155{
 156	vhost_work_queue(poll->dev, &poll->work);
 157}
 
 158
 159static void vhost_vq_reset(struct vhost_dev *dev,
 160			   struct vhost_virtqueue *vq)
 161{
 162	vq->num = 1;
 163	vq->desc = NULL;
 164	vq->avail = NULL;
 165	vq->used = NULL;
 166	vq->last_avail_idx = 0;
 167	vq->avail_idx = 0;
 168	vq->last_used_idx = 0;
 169	vq->signalled_used = 0;
 170	vq->signalled_used_valid = false;
 171	vq->used_flags = 0;
 172	vq->log_used = false;
 173	vq->log_addr = -1ull;
 174	vq->vhost_hlen = 0;
 175	vq->sock_hlen = 0;
 176	vq->private_data = NULL;
 
 177	vq->log_base = NULL;
 178	vq->error_ctx = NULL;
 179	vq->error = NULL;
 180	vq->kick = NULL;
 181	vq->call_ctx = NULL;
 182	vq->call = NULL;
 183	vq->log_ctx = NULL;
 184	vq->upend_idx = 0;
 185	vq->done_idx = 0;
 186	vq->ubufs = NULL;
 
 187}
 188
 189static int vhost_worker(void *data)
 190{
 191	struct vhost_dev *dev = data;
 192	struct vhost_work *work = NULL;
 193	unsigned uninitialized_var(seq);
 
 194
 
 195	use_mm(dev->mm);
 196
 197	for (;;) {
 198		/* mb paired w/ kthread_stop */
 199		set_current_state(TASK_INTERRUPTIBLE);
 200
 201		spin_lock_irq(&dev->work_lock);
 202		if (work) {
 203			work->done_seq = seq;
 204			if (work->flushing)
 205				wake_up_all(&work->done);
 206		}
 207
 208		if (kthread_should_stop()) {
 209			spin_unlock_irq(&dev->work_lock);
 210			__set_current_state(TASK_RUNNING);
 211			break;
 212		}
 213		if (!list_empty(&dev->work_list)) {
 214			work = list_first_entry(&dev->work_list,
 215						struct vhost_work, node);
 216			list_del_init(&work->node);
 217			seq = work->queue_seq;
 218		} else
 219			work = NULL;
 220		spin_unlock_irq(&dev->work_lock);
 221
 222		if (work) {
 223			__set_current_state(TASK_RUNNING);
 224			work->fn(work);
 
 
 225		} else
 226			schedule();
 227
 228	}
 229	unuse_mm(dev->mm);
 
 230	return 0;
 231}
 232
 233static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 234{
 235	kfree(vq->indirect);
 236	vq->indirect = NULL;
 237	kfree(vq->log);
 238	vq->log = NULL;
 239	kfree(vq->heads);
 240	vq->heads = NULL;
 241	kfree(vq->ubuf_info);
 242	vq->ubuf_info = NULL;
 243}
 244
 245void vhost_enable_zcopy(int vq)
 246{
 247	vhost_zcopy_mask |= 0x1 << vq;
 248}
 249
 250/* Helper to allocate iovec buffers for all vqs. */
 251static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 252{
 
 253	int i;
 254	bool zcopy;
 255
 256	for (i = 0; i < dev->nvqs; ++i) {
 257		dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
 258					       UIO_MAXIOV, GFP_KERNEL);
 259		dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
 260					  GFP_KERNEL);
 261		dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
 262					    UIO_MAXIOV, GFP_KERNEL);
 263		zcopy = vhost_zcopy_mask & (0x1 << i);
 264		if (zcopy)
 265			dev->vqs[i].ubuf_info =
 266				kmalloc(sizeof *dev->vqs[i].ubuf_info *
 267					UIO_MAXIOV, GFP_KERNEL);
 268		if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
 269			!dev->vqs[i].heads ||
 270			(zcopy && !dev->vqs[i].ubuf_info))
 271			goto err_nomem;
 272	}
 273	return 0;
 274
 275err_nomem:
 276	for (; i >= 0; --i)
 277		vhost_vq_free_iovecs(&dev->vqs[i]);
 278	return -ENOMEM;
 279}
 280
 281static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 282{
 283	int i;
 284
 285	for (i = 0; i < dev->nvqs; ++i)
 286		vhost_vq_free_iovecs(&dev->vqs[i]);
 287}
 288
 289long vhost_dev_init(struct vhost_dev *dev,
 290		    struct vhost_virtqueue *vqs, int nvqs)
 291{
 
 292	int i;
 293
 294	dev->vqs = vqs;
 295	dev->nvqs = nvqs;
 296	mutex_init(&dev->mutex);
 297	dev->log_ctx = NULL;
 298	dev->log_file = NULL;
 299	dev->memory = NULL;
 300	dev->mm = NULL;
 301	spin_lock_init(&dev->work_lock);
 302	INIT_LIST_HEAD(&dev->work_list);
 303	dev->worker = NULL;
 304
 305	for (i = 0; i < dev->nvqs; ++i) {
 306		dev->vqs[i].log = NULL;
 307		dev->vqs[i].indirect = NULL;
 308		dev->vqs[i].heads = NULL;
 309		dev->vqs[i].ubuf_info = NULL;
 310		dev->vqs[i].dev = dev;
 311		mutex_init(&dev->vqs[i].mutex);
 312		vhost_vq_reset(dev, dev->vqs + i);
 313		if (dev->vqs[i].handle_kick)
 314			vhost_poll_init(&dev->vqs[i].poll,
 315					dev->vqs[i].handle_kick, POLLIN, dev);
 316	}
 317
 318	return 0;
 319}
 
 320
 321/* Caller should have device mutex */
 322long vhost_dev_check_owner(struct vhost_dev *dev)
 323{
 324	/* Are you the owner? If not, I don't think you mean to do that */
 325	return dev->mm == current->mm ? 0 : -EPERM;
 326}
 
 327
 328struct vhost_attach_cgroups_struct {
 329	struct vhost_work work;
 330	struct task_struct *owner;
 331	int ret;
 332};
 333
 334static void vhost_attach_cgroups_work(struct vhost_work *work)
 335{
 336	struct vhost_attach_cgroups_struct *s;
 337
 338	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 339	s->ret = cgroup_attach_task_all(s->owner, current);
 340}
 341
 342static int vhost_attach_cgroups(struct vhost_dev *dev)
 343{
 344	struct vhost_attach_cgroups_struct attach;
 345
 346	attach.owner = current;
 347	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 348	vhost_work_queue(dev, &attach.work);
 349	vhost_work_flush(dev, &attach.work);
 350	return attach.ret;
 351}
 352
 353/* Caller should have device mutex */
 354static long vhost_dev_set_owner(struct vhost_dev *dev)
 
 
 
 
 
 
 
 355{
 356	struct task_struct *worker;
 357	int err;
 358
 359	/* Is there an owner already? */
 360	if (dev->mm) {
 361		err = -EBUSY;
 362		goto err_mm;
 363	}
 364
 365	/* No owner, become one */
 366	dev->mm = get_task_mm(current);
 367	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 368	if (IS_ERR(worker)) {
 369		err = PTR_ERR(worker);
 370		goto err_worker;
 371	}
 372
 373	dev->worker = worker;
 374	wake_up_process(worker);	/* avoid contributing to loadavg */
 375
 376	err = vhost_attach_cgroups(dev);
 377	if (err)
 378		goto err_cgroup;
 379
 380	err = vhost_dev_alloc_iovecs(dev);
 381	if (err)
 382		goto err_cgroup;
 383
 384	return 0;
 385err_cgroup:
 386	kthread_stop(worker);
 387	dev->worker = NULL;
 388err_worker:
 389	if (dev->mm)
 390		mmput(dev->mm);
 391	dev->mm = NULL;
 392err_mm:
 393	return err;
 394}
 
 395
 396/* Caller should have device mutex */
 397long vhost_dev_reset_owner(struct vhost_dev *dev)
 398{
 399	struct vhost_memory *memory;
 
 
 400
 401	/* Restore memory to default empty mapping. */
 402	memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 403	if (!memory)
 404		return -ENOMEM;
 405
 406	vhost_dev_cleanup(dev);
 407
 
 408	memory->nregions = 0;
 409	RCU_INIT_POINTER(dev->memory, memory);
 410	return 0;
 
 
 
 
 411}
 
 412
 413/* In case of DMA done not in order in lower device driver for some reason.
 414 * upend_idx is used to track end of used idx, done_idx is used to track head
 415 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 416 * guest used idx.
 417 */
 418int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
 419{
 420	int i;
 421	int j = 0;
 422
 423	for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
 424		if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
 425			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
 426			vhost_add_used_and_signal(vq->dev, vq,
 427						  vq->heads[i].id, 0);
 428			++j;
 429		} else
 430			break;
 431	}
 432	if (j)
 433		vq->done_idx = i;
 434	return j;
 435}
 
 436
 437/* Caller should have device mutex */
 438void vhost_dev_cleanup(struct vhost_dev *dev)
 439{
 440	int i;
 441
 442	for (i = 0; i < dev->nvqs; ++i) {
 443		if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
 444			vhost_poll_stop(&dev->vqs[i].poll);
 445			vhost_poll_flush(&dev->vqs[i].poll);
 446		}
 447		/* Wait for all lower device DMAs done. */
 448		if (dev->vqs[i].ubufs)
 449			vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
 450
 451		/* Signal guest as appropriate. */
 452		vhost_zerocopy_signal_used(&dev->vqs[i]);
 453
 454		if (dev->vqs[i].error_ctx)
 455			eventfd_ctx_put(dev->vqs[i].error_ctx);
 456		if (dev->vqs[i].error)
 457			fput(dev->vqs[i].error);
 458		if (dev->vqs[i].kick)
 459			fput(dev->vqs[i].kick);
 460		if (dev->vqs[i].call_ctx)
 461			eventfd_ctx_put(dev->vqs[i].call_ctx);
 462		if (dev->vqs[i].call)
 463			fput(dev->vqs[i].call);
 464		vhost_vq_reset(dev, dev->vqs + i);
 465	}
 466	vhost_dev_free_iovecs(dev);
 467	if (dev->log_ctx)
 468		eventfd_ctx_put(dev->log_ctx);
 469	dev->log_ctx = NULL;
 470	if (dev->log_file)
 471		fput(dev->log_file);
 472	dev->log_file = NULL;
 473	/* No one will access memory at this point */
 474	kfree(rcu_dereference_protected(dev->memory,
 475					lockdep_is_held(&dev->mutex)));
 476	RCU_INIT_POINTER(dev->memory, NULL);
 477	WARN_ON(!list_empty(&dev->work_list));
 478	if (dev->worker) {
 479		kthread_stop(dev->worker);
 480		dev->worker = NULL;
 481	}
 482	if (dev->mm)
 483		mmput(dev->mm);
 484	dev->mm = NULL;
 485}
 
 486
 487static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 488{
 489	u64 a = addr / VHOST_PAGE_SIZE / 8;
 490
 491	/* Make sure 64 bit math will not overflow. */
 492	if (a > ULONG_MAX - (unsigned long)log_base ||
 493	    a + (unsigned long)log_base > ULONG_MAX)
 494		return 0;
 495
 496	return access_ok(VERIFY_WRITE, log_base + a,
 497			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 498}
 499
 500/* Caller should have vq mutex and device mutex. */
 501static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 502			       int log_all)
 503{
 504	int i;
 505
 506	if (!mem)
 507		return 0;
 508
 509	for (i = 0; i < mem->nregions; ++i) {
 510		struct vhost_memory_region *m = mem->regions + i;
 511		unsigned long a = m->userspace_addr;
 512		if (m->memory_size > ULONG_MAX)
 513			return 0;
 514		else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 515				    m->memory_size))
 516			return 0;
 517		else if (log_all && !log_access_ok(log_base,
 518						   m->guest_phys_addr,
 519						   m->memory_size))
 520			return 0;
 521	}
 522	return 1;
 523}
 524
 525/* Can we switch to this memory table? */
 526/* Caller should have device mutex but not vq mutex */
 527static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 528			    int log_all)
 529{
 530	int i;
 531
 532	for (i = 0; i < d->nvqs; ++i) {
 533		int ok;
 534		mutex_lock(&d->vqs[i].mutex);
 
 
 
 535		/* If ring is inactive, will check when it's enabled. */
 536		if (d->vqs[i].private_data)
 537			ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
 538						 log_all);
 539		else
 540			ok = 1;
 541		mutex_unlock(&d->vqs[i].mutex);
 542		if (!ok)
 543			return 0;
 544	}
 545	return 1;
 546}
 547
 548static int vq_access_ok(struct vhost_dev *d, unsigned int num,
 549			struct vring_desc __user *desc,
 550			struct vring_avail __user *avail,
 551			struct vring_used __user *used)
 552{
 553	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 554	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 555	       access_ok(VERIFY_READ, avail,
 556			 sizeof *avail + num * sizeof *avail->ring + s) &&
 557	       access_ok(VERIFY_WRITE, used,
 558			sizeof *used + num * sizeof *used->ring + s);
 559}
 560
 561/* Can we log writes? */
 562/* Caller should have device mutex but not vq mutex */
 563int vhost_log_access_ok(struct vhost_dev *dev)
 564{
 565	struct vhost_memory *mp;
 566
 567	mp = rcu_dereference_protected(dev->memory,
 568				       lockdep_is_held(&dev->mutex));
 569	return memory_access_ok(dev, mp, 1);
 570}
 
 571
 572/* Verify access for write logging. */
 573/* Caller should have vq mutex and device mutex */
 574static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
 575			    void __user *log_base)
 576{
 577	struct vhost_memory *mp;
 578	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 579
 580	mp = rcu_dereference_protected(vq->dev->memory,
 581				       lockdep_is_held(&vq->mutex));
 582	return vq_memory_access_ok(log_base, mp,
 583			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
 584		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
 585					sizeof *vq->used +
 586					vq->num * sizeof *vq->used->ring + s));
 587}
 588
 589/* Can we start vq? */
 590/* Caller should have vq mutex and device mutex */
 591int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 592{
 593	return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
 594		vq_log_access_ok(vq->dev, vq, vq->log_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 595}
 596
 597static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 598{
 599	struct vhost_memory mem, *newmem, *oldmem;
 600	unsigned long size = offsetof(struct vhost_memory, regions);
 
 601
 602	if (copy_from_user(&mem, m, size))
 603		return -EFAULT;
 604	if (mem.padding)
 605		return -EOPNOTSUPP;
 606	if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
 607		return -E2BIG;
 608	newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
 609	if (!newmem)
 610		return -ENOMEM;
 611
 612	memcpy(newmem, &mem, size);
 613	if (copy_from_user(newmem->regions, m->regions,
 614			   mem.nregions * sizeof *m->regions)) {
 615		kfree(newmem);
 616		return -EFAULT;
 617	}
 
 
 618
 619	if (!memory_access_ok(d, newmem,
 620			      vhost_has_feature(d, VHOST_F_LOG_ALL))) {
 621		kfree(newmem);
 622		return -EFAULT;
 623	}
 624	oldmem = rcu_dereference_protected(d->memory,
 625					   lockdep_is_held(&d->mutex));
 626	rcu_assign_pointer(d->memory, newmem);
 627	synchronize_rcu();
 628	kfree(oldmem);
 
 
 
 
 
 629	return 0;
 630}
 631
 632static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
 633{
 634	struct file *eventfp, *filep = NULL,
 635		    *pollstart = NULL, *pollstop = NULL;
 636	struct eventfd_ctx *ctx = NULL;
 637	u32 __user *idxp = argp;
 638	struct vhost_virtqueue *vq;
 639	struct vhost_vring_state s;
 640	struct vhost_vring_file f;
 641	struct vhost_vring_addr a;
 642	u32 idx;
 643	long r;
 644
 645	r = get_user(idx, idxp);
 646	if (r < 0)
 647		return r;
 648	if (idx >= d->nvqs)
 649		return -ENOBUFS;
 650
 651	vq = d->vqs + idx;
 652
 653	mutex_lock(&vq->mutex);
 654
 655	switch (ioctl) {
 656	case VHOST_SET_VRING_NUM:
 657		/* Resizing ring with an active backend?
 658		 * You don't want to do that. */
 659		if (vq->private_data) {
 660			r = -EBUSY;
 661			break;
 662		}
 663		if (copy_from_user(&s, argp, sizeof s)) {
 664			r = -EFAULT;
 665			break;
 666		}
 667		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 668			r = -EINVAL;
 669			break;
 670		}
 671		vq->num = s.num;
 672		break;
 673	case VHOST_SET_VRING_BASE:
 674		/* Moving base with an active backend?
 675		 * You don't want to do that. */
 676		if (vq->private_data) {
 677			r = -EBUSY;
 678			break;
 679		}
 680		if (copy_from_user(&s, argp, sizeof s)) {
 681			r = -EFAULT;
 682			break;
 683		}
 684		if (s.num > 0xffff) {
 685			r = -EINVAL;
 686			break;
 687		}
 688		vq->last_avail_idx = s.num;
 689		/* Forget the cached index value. */
 690		vq->avail_idx = vq->last_avail_idx;
 691		break;
 692	case VHOST_GET_VRING_BASE:
 693		s.index = idx;
 694		s.num = vq->last_avail_idx;
 695		if (copy_to_user(argp, &s, sizeof s))
 696			r = -EFAULT;
 697		break;
 698	case VHOST_SET_VRING_ADDR:
 699		if (copy_from_user(&a, argp, sizeof a)) {
 700			r = -EFAULT;
 701			break;
 702		}
 703		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 704			r = -EOPNOTSUPP;
 705			break;
 706		}
 707		/* For 32bit, verify that the top 32bits of the user
 708		   data are set to zero. */
 709		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 710		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 711		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 712			r = -EFAULT;
 713			break;
 714		}
 715		if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
 716		    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
 717		    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
 
 
 
 
 718			r = -EINVAL;
 719			break;
 720		}
 721
 722		/* We only verify access here if backend is configured.
 723		 * If it is not, we don't as size might not have been setup.
 724		 * We will verify when backend is configured. */
 725		if (vq->private_data) {
 726			if (!vq_access_ok(d, vq->num,
 727				(void __user *)(unsigned long)a.desc_user_addr,
 728				(void __user *)(unsigned long)a.avail_user_addr,
 729				(void __user *)(unsigned long)a.used_user_addr)) {
 730				r = -EINVAL;
 731				break;
 732			}
 733
 734			/* Also validate log access for used ring if enabled. */
 735			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 736			    !log_access_ok(vq->log_base, a.log_guest_addr,
 737					   sizeof *vq->used +
 738					   vq->num * sizeof *vq->used->ring)) {
 739				r = -EINVAL;
 740				break;
 741			}
 742		}
 743
 744		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 745		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 746		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 747		vq->log_addr = a.log_guest_addr;
 748		vq->used = (void __user *)(unsigned long)a.used_user_addr;
 749		break;
 750	case VHOST_SET_VRING_KICK:
 751		if (copy_from_user(&f, argp, sizeof f)) {
 752			r = -EFAULT;
 753			break;
 754		}
 755		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 756		if (IS_ERR(eventfp)) {
 757			r = PTR_ERR(eventfp);
 758			break;
 759		}
 760		if (eventfp != vq->kick) {
 761			pollstop = filep = vq->kick;
 762			pollstart = vq->kick = eventfp;
 763		} else
 764			filep = eventfp;
 765		break;
 766	case VHOST_SET_VRING_CALL:
 767		if (copy_from_user(&f, argp, sizeof f)) {
 768			r = -EFAULT;
 769			break;
 770		}
 771		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 772		if (IS_ERR(eventfp)) {
 773			r = PTR_ERR(eventfp);
 774			break;
 775		}
 776		if (eventfp != vq->call) {
 777			filep = vq->call;
 778			ctx = vq->call_ctx;
 779			vq->call = eventfp;
 780			vq->call_ctx = eventfp ?
 781				eventfd_ctx_fileget(eventfp) : NULL;
 782		} else
 783			filep = eventfp;
 784		break;
 785	case VHOST_SET_VRING_ERR:
 786		if (copy_from_user(&f, argp, sizeof f)) {
 787			r = -EFAULT;
 788			break;
 789		}
 790		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 791		if (IS_ERR(eventfp)) {
 792			r = PTR_ERR(eventfp);
 793			break;
 794		}
 795		if (eventfp != vq->error) {
 796			filep = vq->error;
 797			vq->error = eventfp;
 798			ctx = vq->error_ctx;
 799			vq->error_ctx = eventfp ?
 800				eventfd_ctx_fileget(eventfp) : NULL;
 801		} else
 802			filep = eventfp;
 803		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804	default:
 805		r = -ENOIOCTLCMD;
 806	}
 807
 808	if (pollstop && vq->handle_kick)
 809		vhost_poll_stop(&vq->poll);
 810
 811	if (ctx)
 812		eventfd_ctx_put(ctx);
 813	if (filep)
 814		fput(filep);
 815
 816	if (pollstart && vq->handle_kick)
 817		vhost_poll_start(&vq->poll, vq->kick);
 818
 819	mutex_unlock(&vq->mutex);
 820
 821	if (pollstop && vq->handle_kick)
 822		vhost_poll_flush(&vq->poll);
 823	return r;
 824}
 
 825
 826/* Caller must have device mutex */
 827long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
 828{
 829	void __user *argp = (void __user *)arg;
 830	struct file *eventfp, *filep = NULL;
 831	struct eventfd_ctx *ctx = NULL;
 832	u64 p;
 833	long r;
 834	int i, fd;
 835
 836	/* If you are not the owner, you can become one */
 837	if (ioctl == VHOST_SET_OWNER) {
 838		r = vhost_dev_set_owner(d);
 839		goto done;
 840	}
 841
 842	/* You must be the owner to do anything else */
 843	r = vhost_dev_check_owner(d);
 844	if (r)
 845		goto done;
 846
 847	switch (ioctl) {
 848	case VHOST_SET_MEM_TABLE:
 849		r = vhost_set_memory(d, argp);
 850		break;
 851	case VHOST_SET_LOG_BASE:
 852		if (copy_from_user(&p, argp, sizeof p)) {
 853			r = -EFAULT;
 854			break;
 855		}
 856		if ((u64)(unsigned long)p != p) {
 857			r = -EFAULT;
 858			break;
 859		}
 860		for (i = 0; i < d->nvqs; ++i) {
 861			struct vhost_virtqueue *vq;
 862			void __user *base = (void __user *)(unsigned long)p;
 863			vq = d->vqs + i;
 864			mutex_lock(&vq->mutex);
 865			/* If ring is inactive, will check when it's enabled. */
 866			if (vq->private_data && !vq_log_access_ok(d, vq, base))
 867				r = -EFAULT;
 868			else
 869				vq->log_base = base;
 870			mutex_unlock(&vq->mutex);
 871		}
 872		break;
 873	case VHOST_SET_LOG_FD:
 874		r = get_user(fd, (int __user *)argp);
 875		if (r < 0)
 876			break;
 877		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
 878		if (IS_ERR(eventfp)) {
 879			r = PTR_ERR(eventfp);
 880			break;
 881		}
 882		if (eventfp != d->log_file) {
 883			filep = d->log_file;
 
 884			ctx = d->log_ctx;
 885			d->log_ctx = eventfp ?
 886				eventfd_ctx_fileget(eventfp) : NULL;
 887		} else
 888			filep = eventfp;
 889		for (i = 0; i < d->nvqs; ++i) {
 890			mutex_lock(&d->vqs[i].mutex);
 891			d->vqs[i].log_ctx = d->log_ctx;
 892			mutex_unlock(&d->vqs[i].mutex);
 893		}
 894		if (ctx)
 895			eventfd_ctx_put(ctx);
 896		if (filep)
 897			fput(filep);
 898		break;
 899	default:
 900		r = vhost_set_vring(d, ioctl, argp);
 901		break;
 902	}
 903done:
 904	return r;
 905}
 
 906
 907static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 908						     __u64 addr, __u32 len)
 909{
 910	struct vhost_memory_region *reg;
 911	int i;
 912
 913	/* linear search is not brilliant, but we really have on the order of 6
 914	 * regions in practice */
 915	for (i = 0; i < mem->nregions; ++i) {
 916		reg = mem->regions + i;
 917		if (reg->guest_phys_addr <= addr &&
 918		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
 919			return reg;
 920	}
 
 
 
 
 
 921	return NULL;
 922}
 923
 924/* TODO: This is really inefficient.  We need something like get_user()
 925 * (instruction directly accesses the data, with an exception table entry
 926 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 927 */
 928static int set_bit_to_user(int nr, void __user *addr)
 929{
 930	unsigned long log = (unsigned long)addr;
 931	struct page *page;
 932	void *base;
 933	int bit = nr + (log % PAGE_SIZE) * 8;
 934	int r;
 935
 936	r = get_user_pages_fast(log, 1, 1, &page);
 937	if (r < 0)
 938		return r;
 939	BUG_ON(r != 1);
 940	base = kmap_atomic(page, KM_USER0);
 941	set_bit(bit, base);
 942	kunmap_atomic(base, KM_USER0);
 943	set_page_dirty_lock(page);
 944	put_page(page);
 945	return 0;
 946}
 947
 948static int log_write(void __user *log_base,
 949		     u64 write_address, u64 write_length)
 950{
 951	u64 write_page = write_address / VHOST_PAGE_SIZE;
 952	int r;
 953
 954	if (!write_length)
 955		return 0;
 956	write_length += write_address % VHOST_PAGE_SIZE;
 957	for (;;) {
 958		u64 base = (u64)(unsigned long)log_base;
 959		u64 log = base + write_page / 8;
 960		int bit = write_page % 8;
 961		if ((u64)(unsigned long)log != log)
 962			return -EFAULT;
 963		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
 964		if (r < 0)
 965			return r;
 966		if (write_length <= VHOST_PAGE_SIZE)
 967			break;
 968		write_length -= VHOST_PAGE_SIZE;
 969		write_page += 1;
 970	}
 971	return r;
 972}
 973
 974int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 975		    unsigned int log_num, u64 len)
 976{
 977	int i, r;
 978
 979	/* Make sure data written is seen before log. */
 980	smp_wmb();
 981	for (i = 0; i < log_num; ++i) {
 982		u64 l = min(log[i].len, len);
 983		r = log_write(vq->log_base, log[i].addr, l);
 984		if (r < 0)
 985			return r;
 986		len -= l;
 987		if (!len) {
 988			if (vq->log_ctx)
 989				eventfd_signal(vq->log_ctx, 1);
 990			return 0;
 991		}
 992	}
 993	/* Length written exceeds what we have stored. This is a bug. */
 994	BUG();
 995	return 0;
 996}
 
 997
 998static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 999{
1000	void __user *used;
1001	if (__put_user(vq->used_flags, &vq->used->flags) < 0)
1002		return -EFAULT;
1003	if (unlikely(vq->log_used)) {
1004		/* Make sure the flag is seen before log. */
1005		smp_wmb();
1006		/* Log used flag write. */
1007		used = &vq->used->flags;
1008		log_write(vq->log_base, vq->log_addr +
1009			  (used - (void __user *)vq->used),
1010			  sizeof vq->used->flags);
1011		if (vq->log_ctx)
1012			eventfd_signal(vq->log_ctx, 1);
1013	}
1014	return 0;
1015}
1016
1017static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1018{
1019	if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
1020		return -EFAULT;
1021	if (unlikely(vq->log_used)) {
1022		void __user *used;
1023		/* Make sure the event is seen before log. */
1024		smp_wmb();
1025		/* Log avail event write */
1026		used = vhost_avail_event(vq);
1027		log_write(vq->log_base, vq->log_addr +
1028			  (used - (void __user *)vq->used),
1029			  sizeof *vhost_avail_event(vq));
1030		if (vq->log_ctx)
1031			eventfd_signal(vq->log_ctx, 1);
1032	}
1033	return 0;
1034}
1035
1036int vhost_init_used(struct vhost_virtqueue *vq)
1037{
 
1038	int r;
1039	if (!vq->private_data)
 
 
 
1040		return 0;
 
 
 
1041
1042	r = vhost_update_used_flags(vq);
1043	if (r)
1044		return r;
1045	vq->signalled_used_valid = false;
1046	return get_user(vq->last_used_idx, &vq->used->idx);
 
 
 
 
 
 
 
 
 
 
 
1047}
 
1048
1049static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1050			  struct iovec iov[], int iov_size)
1051{
1052	const struct vhost_memory_region *reg;
1053	struct vhost_memory *mem;
1054	struct iovec *_iov;
1055	u64 s = 0;
1056	int ret = 0;
1057
1058	rcu_read_lock();
1059
1060	mem = rcu_dereference(dev->memory);
1061	while ((u64)len > s) {
1062		u64 size;
1063		if (unlikely(ret >= iov_size)) {
1064			ret = -ENOBUFS;
1065			break;
1066		}
1067		reg = find_region(mem, addr, len);
1068		if (unlikely(!reg)) {
1069			ret = -EFAULT;
1070			break;
1071		}
1072		_iov = iov + ret;
1073		size = reg->memory_size - addr + reg->guest_phys_addr;
1074		_iov->iov_len = min((u64)len, size);
1075		_iov->iov_base = (void __user *)(unsigned long)
1076			(reg->userspace_addr + addr - reg->guest_phys_addr);
1077		s += size;
1078		addr += size;
1079		++ret;
1080	}
1081
1082	rcu_read_unlock();
1083	return ret;
1084}
1085
1086/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1087 * function returns the next descriptor in the chain,
1088 * or -1U if we're at the end. */
1089static unsigned next_desc(struct vring_desc *desc)
1090{
1091	unsigned int next;
1092
1093	/* If this descriptor says it doesn't chain, we're done. */
1094	if (!(desc->flags & VRING_DESC_F_NEXT))
1095		return -1U;
1096
1097	/* Check they're not leading us off end of descriptors. */
1098	next = desc->next;
1099	/* Make sure compiler knows to grab that: we don't want it changing! */
1100	/* We will use the result as an index in an array, so most
1101	 * architectures only need a compiler barrier here. */
1102	read_barrier_depends();
1103
1104	return next;
1105}
1106
1107static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1108			struct iovec iov[], unsigned int iov_size,
1109			unsigned int *out_num, unsigned int *in_num,
1110			struct vhost_log *log, unsigned int *log_num,
1111			struct vring_desc *indirect)
1112{
1113	struct vring_desc desc;
1114	unsigned int i = 0, count, found = 0;
 
 
1115	int ret;
1116
1117	/* Sanity check */
1118	if (unlikely(indirect->len % sizeof desc)) {
1119		vq_err(vq, "Invalid length in indirect descriptor: "
1120		       "len 0x%llx not multiple of 0x%zx\n",
1121		       (unsigned long long)indirect->len,
1122		       sizeof desc);
1123		return -EINVAL;
1124	}
1125
1126	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1127			     UIO_MAXIOV);
1128	if (unlikely(ret < 0)) {
1129		vq_err(vq, "Translation failure %d in indirect.\n", ret);
1130		return ret;
1131	}
 
1132
1133	/* We will use the result as an address to read from, so most
1134	 * architectures only need a compiler barrier here. */
1135	read_barrier_depends();
1136
1137	count = indirect->len / sizeof desc;
1138	/* Buffers are chained via a 16 bit next field, so
1139	 * we can have at most 2^16 of these. */
1140	if (unlikely(count > USHRT_MAX + 1)) {
1141		vq_err(vq, "Indirect buffer length too big: %d\n",
1142		       indirect->len);
1143		return -E2BIG;
1144	}
1145
1146	do {
1147		unsigned iov_count = *in_num + *out_num;
1148		if (unlikely(++found > count)) {
1149			vq_err(vq, "Loop detected: last one at %u "
1150			       "indirect size %u\n",
1151			       i, count);
1152			return -EINVAL;
1153		}
1154		if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1155					      vq->indirect, sizeof desc))) {
1156			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1157			       i, (size_t)indirect->addr + i * sizeof desc);
1158			return -EINVAL;
1159		}
1160		if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1161			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1162			       i, (size_t)indirect->addr + i * sizeof desc);
1163			return -EINVAL;
1164		}
1165
1166		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
 
1167				     iov_size - iov_count);
1168		if (unlikely(ret < 0)) {
1169			vq_err(vq, "Translation failure %d indirect idx %d\n",
1170			       ret, i);
1171			return ret;
1172		}
1173		/* If this is an input descriptor, increment that count. */
1174		if (desc.flags & VRING_DESC_F_WRITE) {
1175			*in_num += ret;
1176			if (unlikely(log)) {
1177				log[*log_num].addr = desc.addr;
1178				log[*log_num].len = desc.len;
1179				++*log_num;
1180			}
1181		} else {
1182			/* If it's an output descriptor, they're all supposed
1183			 * to come before any input descriptors. */
1184			if (unlikely(*in_num)) {
1185				vq_err(vq, "Indirect descriptor "
1186				       "has out after in: idx %d\n", i);
1187				return -EINVAL;
1188			}
1189			*out_num += ret;
1190		}
1191	} while ((i = next_desc(&desc)) != -1);
1192	return 0;
1193}
1194
1195/* This looks in the virtqueue and for the first available buffer, and converts
1196 * it to an iovec for convenient access.  Since descriptors consist of some
1197 * number of output then some number of input descriptors, it's actually two
1198 * iovecs, but we pack them into one and note how many of each there were.
1199 *
1200 * This function returns the descriptor number found, or vq->num (which is
1201 * never a valid descriptor number) if none was found.  A negative code is
1202 * returned on error. */
1203int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1204		      struct iovec iov[], unsigned int iov_size,
1205		      unsigned int *out_num, unsigned int *in_num,
1206		      struct vhost_log *log, unsigned int *log_num)
1207{
1208	struct vring_desc desc;
1209	unsigned int i, head, found = 0;
1210	u16 last_avail_idx;
 
 
1211	int ret;
1212
1213	/* Check it isn't doing very strange things with descriptor numbers. */
1214	last_avail_idx = vq->last_avail_idx;
1215	if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1216		vq_err(vq, "Failed to access avail idx at %p\n",
1217		       &vq->avail->idx);
1218		return -EFAULT;
1219	}
 
1220
1221	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1222		vq_err(vq, "Guest moved used index from %u to %u",
1223		       last_avail_idx, vq->avail_idx);
1224		return -EFAULT;
1225	}
1226
1227	/* If there's nothing new since last we looked, return invalid. */
1228	if (vq->avail_idx == last_avail_idx)
1229		return vq->num;
1230
1231	/* Only get avail ring entries after they have been exposed by guest. */
1232	smp_rmb();
1233
1234	/* Grab the next descriptor number they're advertising, and increment
1235	 * the index we've seen. */
1236	if (unlikely(__get_user(head,
1237				&vq->avail->ring[last_avail_idx % vq->num]))) {
1238		vq_err(vq, "Failed to read head: idx %d address %p\n",
1239		       last_avail_idx,
1240		       &vq->avail->ring[last_avail_idx % vq->num]);
1241		return -EFAULT;
1242	}
1243
 
 
1244	/* If their number is silly, that's an error. */
1245	if (unlikely(head >= vq->num)) {
1246		vq_err(vq, "Guest says index %u > %u is available",
1247		       head, vq->num);
1248		return -EINVAL;
1249	}
1250
1251	/* When we start there are none of either input nor output. */
1252	*out_num = *in_num = 0;
1253	if (unlikely(log))
1254		*log_num = 0;
1255
1256	i = head;
1257	do {
1258		unsigned iov_count = *in_num + *out_num;
1259		if (unlikely(i >= vq->num)) {
1260			vq_err(vq, "Desc index is %u > %u, head = %u",
1261			       i, vq->num, head);
1262			return -EINVAL;
1263		}
1264		if (unlikely(++found > vq->num)) {
1265			vq_err(vq, "Loop detected: last one at %u "
1266			       "vq size %u head %u\n",
1267			       i, vq->num, head);
1268			return -EINVAL;
1269		}
1270		ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1271		if (unlikely(ret)) {
1272			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1273			       i, vq->desc + i);
1274			return -EFAULT;
1275		}
1276		if (desc.flags & VRING_DESC_F_INDIRECT) {
1277			ret = get_indirect(dev, vq, iov, iov_size,
1278					   out_num, in_num,
1279					   log, log_num, &desc);
1280			if (unlikely(ret < 0)) {
1281				vq_err(vq, "Failure detected "
1282				       "in indirect descriptor at idx %d\n", i);
1283				return ret;
1284			}
1285			continue;
1286		}
1287
1288		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
 
1289				     iov_size - iov_count);
1290		if (unlikely(ret < 0)) {
1291			vq_err(vq, "Translation failure %d descriptor idx %d\n",
1292			       ret, i);
1293			return ret;
1294		}
1295		if (desc.flags & VRING_DESC_F_WRITE) {
1296			/* If this is an input descriptor,
1297			 * increment that count. */
1298			*in_num += ret;
1299			if (unlikely(log)) {
1300				log[*log_num].addr = desc.addr;
1301				log[*log_num].len = desc.len;
1302				++*log_num;
1303			}
1304		} else {
1305			/* If it's an output descriptor, they're all supposed
1306			 * to come before any input descriptors. */
1307			if (unlikely(*in_num)) {
1308				vq_err(vq, "Descriptor has out after in: "
1309				       "idx %d\n", i);
1310				return -EINVAL;
1311			}
1312			*out_num += ret;
1313		}
1314	} while ((i = next_desc(&desc)) != -1);
1315
1316	/* On success, increment avail index. */
1317	vq->last_avail_idx++;
1318
1319	/* Assume notifications from guest are disabled at this point,
1320	 * if they aren't we would need to update avail_event index. */
1321	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1322	return head;
1323}
 
1324
1325/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1326void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1327{
1328	vq->last_avail_idx -= n;
1329}
 
1330
1331/* After we've used one of their buffers, we tell them about it.  We'll then
1332 * want to notify the guest, using eventfd. */
1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1334{
1335	struct vring_used_elem __user *used;
 
 
 
1336
1337	/* The virtqueue contains a ring of used buffers.  Get a pointer to the
1338	 * next entry in that used ring. */
1339	used = &vq->used->ring[vq->last_used_idx % vq->num];
1340	if (__put_user(head, &used->id)) {
1341		vq_err(vq, "Failed to write used id");
1342		return -EFAULT;
1343	}
1344	if (__put_user(len, &used->len)) {
1345		vq_err(vq, "Failed to write used len");
1346		return -EFAULT;
1347	}
1348	/* Make sure buffer is written before we update index. */
1349	smp_wmb();
1350	if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1351		vq_err(vq, "Failed to increment used idx");
1352		return -EFAULT;
1353	}
1354	if (unlikely(vq->log_used)) {
1355		/* Make sure data is seen before log. */
1356		smp_wmb();
1357		/* Log used ring entry write. */
1358		log_write(vq->log_base,
1359			  vq->log_addr +
1360			   ((void __user *)used - (void __user *)vq->used),
1361			  sizeof *used);
1362		/* Log used index update. */
1363		log_write(vq->log_base,
1364			  vq->log_addr + offsetof(struct vring_used, idx),
1365			  sizeof vq->used->idx);
1366		if (vq->log_ctx)
1367			eventfd_signal(vq->log_ctx, 1);
1368	}
1369	vq->last_used_idx++;
1370	/* If the driver never bothers to signal in a very long while,
1371	 * used index might wrap around. If that happens, invalidate
1372	 * signalled_used index we stored. TODO: make sure driver
1373	 * signals at least once in 2^16 and remove this. */
1374	if (unlikely(vq->last_used_idx == vq->signalled_used))
1375		vq->signalled_used_valid = false;
1376	return 0;
1377}
 
1378
1379static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1380			    struct vring_used_elem *heads,
1381			    unsigned count)
1382{
1383	struct vring_used_elem __user *used;
1384	u16 old, new;
1385	int start;
1386
1387	start = vq->last_used_idx % vq->num;
1388	used = vq->used->ring + start;
1389	if (__copy_to_user(used, heads, count * sizeof *used)) {
 
 
 
 
 
 
 
 
 
1390		vq_err(vq, "Failed to write used");
1391		return -EFAULT;
1392	}
1393	if (unlikely(vq->log_used)) {
1394		/* Make sure data is seen before log. */
1395		smp_wmb();
1396		/* Log used ring entry write. */
1397		log_write(vq->log_base,
1398			  vq->log_addr +
1399			   ((void __user *)used - (void __user *)vq->used),
1400			  count * sizeof *used);
1401	}
1402	old = vq->last_used_idx;
1403	new = (vq->last_used_idx += count);
1404	/* If the driver never bothers to signal in a very long while,
1405	 * used index might wrap around. If that happens, invalidate
1406	 * signalled_used index we stored. TODO: make sure driver
1407	 * signals at least once in 2^16 and remove this. */
1408	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1409		vq->signalled_used_valid = false;
1410	return 0;
1411}
1412
1413/* After we've used one of their buffers, we tell them about it.  We'll then
1414 * want to notify the guest, using eventfd. */
1415int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1416		     unsigned count)
1417{
1418	int start, n, r;
1419
1420	start = vq->last_used_idx % vq->num;
1421	n = vq->num - start;
1422	if (n < count) {
1423		r = __vhost_add_used_n(vq, heads, n);
1424		if (r < 0)
1425			return r;
1426		heads += n;
1427		count -= n;
1428	}
1429	r = __vhost_add_used_n(vq, heads, count);
1430
1431	/* Make sure buffer is written before we update index. */
1432	smp_wmb();
1433	if (put_user(vq->last_used_idx, &vq->used->idx)) {
1434		vq_err(vq, "Failed to increment used idx");
1435		return -EFAULT;
1436	}
1437	if (unlikely(vq->log_used)) {
1438		/* Log used index update. */
1439		log_write(vq->log_base,
1440			  vq->log_addr + offsetof(struct vring_used, idx),
1441			  sizeof vq->used->idx);
1442		if (vq->log_ctx)
1443			eventfd_signal(vq->log_ctx, 1);
1444	}
1445	return r;
1446}
 
1447
1448static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1449{
1450	__u16 old, new, event;
 
1451	bool v;
1452	/* Flush out used index updates. This is paired
1453	 * with the barrier that the Guest executes when enabling
1454	 * interrupts. */
1455	smp_mb();
1456
1457	if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1458	    unlikely(vq->avail_idx == vq->last_avail_idx))
1459		return true;
1460
1461	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1462		__u16 flags;
1463		if (__get_user(flags, &vq->avail->flags)) {
1464			vq_err(vq, "Failed to get flags");
1465			return true;
1466		}
1467		return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1468	}
1469	old = vq->signalled_used;
1470	v = vq->signalled_used_valid;
1471	new = vq->signalled_used = vq->last_used_idx;
1472	vq->signalled_used_valid = true;
1473
1474	if (unlikely(!v))
1475		return true;
1476
1477	if (get_user(event, vhost_used_event(vq))) {
1478		vq_err(vq, "Failed to get used event idx");
1479		return true;
1480	}
1481	return vring_need_event(event, new, old);
1482}
1483
1484/* This actually signals the guest, using eventfd. */
1485void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1486{
1487	/* Signal the Guest tell them we used something up. */
1488	if (vq->call_ctx && vhost_notify(dev, vq))
1489		eventfd_signal(vq->call_ctx, 1);
1490}
 
1491
1492/* And here's the combo meal deal.  Supersize me! */
1493void vhost_add_used_and_signal(struct vhost_dev *dev,
1494			       struct vhost_virtqueue *vq,
1495			       unsigned int head, int len)
1496{
1497	vhost_add_used(vq, head, len);
1498	vhost_signal(dev, vq);
1499}
 
1500
1501/* multi-buffer version of vhost_add_used_and_signal */
1502void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1503				 struct vhost_virtqueue *vq,
1504				 struct vring_used_elem *heads, unsigned count)
1505{
1506	vhost_add_used_n(vq, heads, count);
1507	vhost_signal(dev, vq);
1508}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509
1510/* OK, now we need to know about added descriptors. */
1511bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1512{
1513	u16 avail_idx;
1514	int r;
1515
1516	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1517		return false;
1518	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1519	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1520		r = vhost_update_used_flags(vq);
1521		if (r) {
1522			vq_err(vq, "Failed to enable notification at %p: %d\n",
1523			       &vq->used->flags, r);
1524			return false;
1525		}
1526	} else {
1527		r = vhost_update_avail_event(vq, vq->avail_idx);
1528		if (r) {
1529			vq_err(vq, "Failed to update avail event index at %p: %d\n",
1530			       vhost_avail_event(vq), r);
1531			return false;
1532		}
1533	}
1534	/* They could have slipped one in as we were doing that: make
1535	 * sure it's written, then check again. */
1536	smp_mb();
1537	r = __get_user(avail_idx, &vq->avail->idx);
1538	if (r) {
1539		vq_err(vq, "Failed to check avail idx at %p: %d\n",
1540		       &vq->avail->idx, r);
1541		return false;
1542	}
1543
1544	return avail_idx != vq->avail_idx;
1545}
 
1546
1547/* We don't need to be notified again. */
1548void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1549{
1550	int r;
1551
1552	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1553		return;
1554	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1555	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1556		r = vhost_update_used_flags(vq);
1557		if (r)
1558			vq_err(vq, "Failed to enable notification at %p: %d\n",
1559			       &vq->used->flags, r);
1560	}
1561}
 
1562
1563static void vhost_zerocopy_done_signal(struct kref *kref)
1564{
1565	struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
1566						    kref);
1567	wake_up(&ubufs->wait);
1568}
1569
1570struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
1571					bool zcopy)
1572{
1573	struct vhost_ubuf_ref *ubufs;
1574	/* No zero copy backend? Nothing to count. */
1575	if (!zcopy)
1576		return NULL;
1577	ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
1578	if (!ubufs)
1579		return ERR_PTR(-ENOMEM);
1580	kref_init(&ubufs->kref);
1581	init_waitqueue_head(&ubufs->wait);
1582	ubufs->vq = vq;
1583	return ubufs;
1584}
1585
1586void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
1587{
1588	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1589}
1590
1591void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1592{
1593	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1594	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1595	kfree(ubufs);
1596}
1597
1598void vhost_zerocopy_callback(void *arg)
1599{
1600	struct ubuf_info *ubuf = arg;
1601	struct vhost_ubuf_ref *ubufs = ubuf->arg;
1602	struct vhost_virtqueue *vq = ubufs->vq;
1603
1604	/* set len = 1 to mark this desc buffers done DMA */
1605	vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1606	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1607}
v4.6
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/uio.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
 
  21#include <linux/poll.h>
  22#include <linux/file.h>
  23#include <linux/highmem.h>
  24#include <linux/slab.h>
  25#include <linux/vmalloc.h>
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28#include <linux/module.h>
  29#include <linux/sort.h>
 
 
  30
  31#include "vhost.h"
  32
  33static ushort max_mem_regions = 64;
  34module_param(max_mem_regions, ushort, 0444);
  35MODULE_PARM_DESC(max_mem_regions,
  36	"Maximum number of memory regions in memory map. (default: 64)");
  37
  38enum {
 
  39	VHOST_MEMORY_F_LOG = 0x1,
  40};
  41
  42#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  43#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  44
  45#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  46static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  47{
  48	vq->user_be = !virtio_legacy_is_little_endian();
  49}
  50
  51static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  52{
  53	vq->user_be = true;
  54}
  55
  56static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  57{
  58	vq->user_be = false;
  59}
  60
  61static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  62{
  63	struct vhost_vring_state s;
  64
  65	if (vq->private_data)
  66		return -EBUSY;
  67
  68	if (copy_from_user(&s, argp, sizeof(s)))
  69		return -EFAULT;
  70
  71	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  72	    s.num != VHOST_VRING_BIG_ENDIAN)
  73		return -EINVAL;
  74
  75	if (s.num == VHOST_VRING_BIG_ENDIAN)
  76		vhost_enable_cross_endian_big(vq);
  77	else
  78		vhost_enable_cross_endian_little(vq);
  79
  80	return 0;
  81}
  82
  83static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  84				   int __user *argp)
  85{
  86	struct vhost_vring_state s = {
  87		.index = idx,
  88		.num = vq->user_be
  89	};
  90
  91	if (copy_to_user(argp, &s, sizeof(s)))
  92		return -EFAULT;
  93
  94	return 0;
  95}
  96
  97static void vhost_init_is_le(struct vhost_virtqueue *vq)
  98{
  99	/* Note for legacy virtio: user_be is initialized at reset time
 100	 * according to the host endianness. If userspace does not set an
 101	 * explicit endianness, the default behavior is native endian, as
 102	 * expected by legacy virtio.
 103	 */
 104	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 105}
 106#else
 107static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 108{
 109}
 110
 111static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 112{
 113	return -ENOIOCTLCMD;
 114}
 115
 116static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 117				   int __user *argp)
 118{
 119	return -ENOIOCTLCMD;
 120}
 121
 122static void vhost_init_is_le(struct vhost_virtqueue *vq)
 123{
 124	if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
 125		vq->is_le = true;
 126}
 127#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 128
 129static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 130{
 131	vq->is_le = virtio_legacy_is_little_endian();
 132}
 133
 134static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 135			    poll_table *pt)
 136{
 137	struct vhost_poll *poll;
 138
 139	poll = container_of(pt, struct vhost_poll, table);
 140	poll->wqh = wqh;
 141	add_wait_queue(wqh, &poll->wait);
 142}
 143
 144static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
 145			     void *key)
 146{
 147	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 148
 149	if (!((unsigned long)key & poll->mask))
 150		return 0;
 151
 152	vhost_poll_queue(poll);
 153	return 0;
 154}
 155
 156void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 157{
 158	INIT_LIST_HEAD(&work->node);
 159	work->fn = fn;
 160	init_waitqueue_head(&work->done);
 161	work->flushing = 0;
 162	work->queue_seq = work->done_seq = 0;
 163}
 164EXPORT_SYMBOL_GPL(vhost_work_init);
 165
 166/* Init poll structure */
 167void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 168		     unsigned long mask, struct vhost_dev *dev)
 169{
 170	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 171	init_poll_funcptr(&poll->table, vhost_poll_func);
 172	poll->mask = mask;
 173	poll->dev = dev;
 174	poll->wqh = NULL;
 175
 176	vhost_work_init(&poll->work, fn);
 177}
 178EXPORT_SYMBOL_GPL(vhost_poll_init);
 179
 180/* Start polling a file. We add ourselves to file's wait queue. The caller must
 181 * keep a reference to a file until after vhost_poll_stop is called. */
 182int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 183{
 184	unsigned long mask;
 185	int ret = 0;
 186
 187	if (poll->wqh)
 188		return 0;
 189
 190	mask = file->f_op->poll(file, &poll->table);
 191	if (mask)
 192		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 193	if (mask & POLLERR) {
 194		if (poll->wqh)
 195			remove_wait_queue(poll->wqh, &poll->wait);
 196		ret = -EINVAL;
 197	}
 198
 199	return ret;
 200}
 201EXPORT_SYMBOL_GPL(vhost_poll_start);
 202
 203/* Stop polling a file. After this function returns, it becomes safe to drop the
 204 * file reference. You must also flush afterwards. */
 205void vhost_poll_stop(struct vhost_poll *poll)
 206{
 207	if (poll->wqh) {
 208		remove_wait_queue(poll->wqh, &poll->wait);
 209		poll->wqh = NULL;
 210	}
 211}
 212EXPORT_SYMBOL_GPL(vhost_poll_stop);
 213
 214static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 215				unsigned seq)
 216{
 217	int left;
 218
 219	spin_lock_irq(&dev->work_lock);
 220	left = seq - work->done_seq;
 221	spin_unlock_irq(&dev->work_lock);
 222	return left <= 0;
 223}
 224
 225void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 226{
 227	unsigned seq;
 228	int flushing;
 229
 230	spin_lock_irq(&dev->work_lock);
 231	seq = work->queue_seq;
 232	work->flushing++;
 233	spin_unlock_irq(&dev->work_lock);
 234	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 235	spin_lock_irq(&dev->work_lock);
 236	flushing = --work->flushing;
 237	spin_unlock_irq(&dev->work_lock);
 238	BUG_ON(flushing < 0);
 239}
 240EXPORT_SYMBOL_GPL(vhost_work_flush);
 241
 242/* Flush any work that has been scheduled. When calling this, don't hold any
 243 * locks that are also used by the callback. */
 244void vhost_poll_flush(struct vhost_poll *poll)
 245{
 246	vhost_work_flush(poll->dev, &poll->work);
 247}
 248EXPORT_SYMBOL_GPL(vhost_poll_flush);
 249
 250void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 
 251{
 252	unsigned long flags;
 253
 254	spin_lock_irqsave(&dev->work_lock, flags);
 255	if (list_empty(&work->node)) {
 256		list_add_tail(&work->node, &dev->work_list);
 257		work->queue_seq++;
 258		spin_unlock_irqrestore(&dev->work_lock, flags);
 259		wake_up_process(dev->worker);
 260	} else {
 261		spin_unlock_irqrestore(&dev->work_lock, flags);
 262	}
 
 263}
 264EXPORT_SYMBOL_GPL(vhost_work_queue);
 265
 266/* A lockless hint for busy polling code to exit the loop */
 267bool vhost_has_work(struct vhost_dev *dev)
 268{
 269	return !list_empty(&dev->work_list);
 270}
 271EXPORT_SYMBOL_GPL(vhost_has_work);
 272
 273void vhost_poll_queue(struct vhost_poll *poll)
 274{
 275	vhost_work_queue(poll->dev, &poll->work);
 276}
 277EXPORT_SYMBOL_GPL(vhost_poll_queue);
 278
 279static void vhost_vq_reset(struct vhost_dev *dev,
 280			   struct vhost_virtqueue *vq)
 281{
 282	vq->num = 1;
 283	vq->desc = NULL;
 284	vq->avail = NULL;
 285	vq->used = NULL;
 286	vq->last_avail_idx = 0;
 287	vq->avail_idx = 0;
 288	vq->last_used_idx = 0;
 289	vq->signalled_used = 0;
 290	vq->signalled_used_valid = false;
 291	vq->used_flags = 0;
 292	vq->log_used = false;
 293	vq->log_addr = -1ull;
 
 
 294	vq->private_data = NULL;
 295	vq->acked_features = 0;
 296	vq->log_base = NULL;
 297	vq->error_ctx = NULL;
 298	vq->error = NULL;
 299	vq->kick = NULL;
 300	vq->call_ctx = NULL;
 301	vq->call = NULL;
 302	vq->log_ctx = NULL;
 303	vq->memory = NULL;
 304	vhost_reset_is_le(vq);
 305	vhost_disable_cross_endian(vq);
 306	vq->busyloop_timeout = 0;
 307}
 308
 309static int vhost_worker(void *data)
 310{
 311	struct vhost_dev *dev = data;
 312	struct vhost_work *work = NULL;
 313	unsigned uninitialized_var(seq);
 314	mm_segment_t oldfs = get_fs();
 315
 316	set_fs(USER_DS);
 317	use_mm(dev->mm);
 318
 319	for (;;) {
 320		/* mb paired w/ kthread_stop */
 321		set_current_state(TASK_INTERRUPTIBLE);
 322
 323		spin_lock_irq(&dev->work_lock);
 324		if (work) {
 325			work->done_seq = seq;
 326			if (work->flushing)
 327				wake_up_all(&work->done);
 328		}
 329
 330		if (kthread_should_stop()) {
 331			spin_unlock_irq(&dev->work_lock);
 332			__set_current_state(TASK_RUNNING);
 333			break;
 334		}
 335		if (!list_empty(&dev->work_list)) {
 336			work = list_first_entry(&dev->work_list,
 337						struct vhost_work, node);
 338			list_del_init(&work->node);
 339			seq = work->queue_seq;
 340		} else
 341			work = NULL;
 342		spin_unlock_irq(&dev->work_lock);
 343
 344		if (work) {
 345			__set_current_state(TASK_RUNNING);
 346			work->fn(work);
 347			if (need_resched())
 348				schedule();
 349		} else
 350			schedule();
 351
 352	}
 353	unuse_mm(dev->mm);
 354	set_fs(oldfs);
 355	return 0;
 356}
 357
 358static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 359{
 360	kfree(vq->indirect);
 361	vq->indirect = NULL;
 362	kfree(vq->log);
 363	vq->log = NULL;
 364	kfree(vq->heads);
 365	vq->heads = NULL;
 
 
 
 
 
 
 
 366}
 367
 368/* Helper to allocate iovec buffers for all vqs. */
 369static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 370{
 371	struct vhost_virtqueue *vq;
 372	int i;
 
 373
 374	for (i = 0; i < dev->nvqs; ++i) {
 375		vq = dev->vqs[i];
 376		vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
 377				       GFP_KERNEL);
 378		vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
 379		vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
 380		if (!vq->indirect || !vq->log || !vq->heads)
 
 
 
 
 
 
 
 
 381			goto err_nomem;
 382	}
 383	return 0;
 384
 385err_nomem:
 386	for (; i >= 0; --i)
 387		vhost_vq_free_iovecs(dev->vqs[i]);
 388	return -ENOMEM;
 389}
 390
 391static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 392{
 393	int i;
 394
 395	for (i = 0; i < dev->nvqs; ++i)
 396		vhost_vq_free_iovecs(dev->vqs[i]);
 397}
 398
 399void vhost_dev_init(struct vhost_dev *dev,
 400		    struct vhost_virtqueue **vqs, int nvqs)
 401{
 402	struct vhost_virtqueue *vq;
 403	int i;
 404
 405	dev->vqs = vqs;
 406	dev->nvqs = nvqs;
 407	mutex_init(&dev->mutex);
 408	dev->log_ctx = NULL;
 409	dev->log_file = NULL;
 410	dev->memory = NULL;
 411	dev->mm = NULL;
 412	spin_lock_init(&dev->work_lock);
 413	INIT_LIST_HEAD(&dev->work_list);
 414	dev->worker = NULL;
 415
 416	for (i = 0; i < dev->nvqs; ++i) {
 417		vq = dev->vqs[i];
 418		vq->log = NULL;
 419		vq->indirect = NULL;
 420		vq->heads = NULL;
 421		vq->dev = dev;
 422		mutex_init(&vq->mutex);
 423		vhost_vq_reset(dev, vq);
 424		if (vq->handle_kick)
 425			vhost_poll_init(&vq->poll, vq->handle_kick,
 426					POLLIN, dev);
 427	}
 
 
 428}
 429EXPORT_SYMBOL_GPL(vhost_dev_init);
 430
 431/* Caller should have device mutex */
 432long vhost_dev_check_owner(struct vhost_dev *dev)
 433{
 434	/* Are you the owner? If not, I don't think you mean to do that */
 435	return dev->mm == current->mm ? 0 : -EPERM;
 436}
 437EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 438
 439struct vhost_attach_cgroups_struct {
 440	struct vhost_work work;
 441	struct task_struct *owner;
 442	int ret;
 443};
 444
 445static void vhost_attach_cgroups_work(struct vhost_work *work)
 446{
 447	struct vhost_attach_cgroups_struct *s;
 448
 449	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 450	s->ret = cgroup_attach_task_all(s->owner, current);
 451}
 452
 453static int vhost_attach_cgroups(struct vhost_dev *dev)
 454{
 455	struct vhost_attach_cgroups_struct attach;
 456
 457	attach.owner = current;
 458	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 459	vhost_work_queue(dev, &attach.work);
 460	vhost_work_flush(dev, &attach.work);
 461	return attach.ret;
 462}
 463
 464/* Caller should have device mutex */
 465bool vhost_dev_has_owner(struct vhost_dev *dev)
 466{
 467	return dev->mm;
 468}
 469EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 470
 471/* Caller should have device mutex */
 472long vhost_dev_set_owner(struct vhost_dev *dev)
 473{
 474	struct task_struct *worker;
 475	int err;
 476
 477	/* Is there an owner already? */
 478	if (vhost_dev_has_owner(dev)) {
 479		err = -EBUSY;
 480		goto err_mm;
 481	}
 482
 483	/* No owner, become one */
 484	dev->mm = get_task_mm(current);
 485	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 486	if (IS_ERR(worker)) {
 487		err = PTR_ERR(worker);
 488		goto err_worker;
 489	}
 490
 491	dev->worker = worker;
 492	wake_up_process(worker);	/* avoid contributing to loadavg */
 493
 494	err = vhost_attach_cgroups(dev);
 495	if (err)
 496		goto err_cgroup;
 497
 498	err = vhost_dev_alloc_iovecs(dev);
 499	if (err)
 500		goto err_cgroup;
 501
 502	return 0;
 503err_cgroup:
 504	kthread_stop(worker);
 505	dev->worker = NULL;
 506err_worker:
 507	if (dev->mm)
 508		mmput(dev->mm);
 509	dev->mm = NULL;
 510err_mm:
 511	return err;
 512}
 513EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 514
 515struct vhost_memory *vhost_dev_reset_owner_prepare(void)
 
 516{
 517	return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 518}
 519EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 520
 521/* Caller should have device mutex */
 522void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
 523{
 524	int i;
 525
 526	vhost_dev_cleanup(dev, true);
 527
 528	/* Restore memory to default empty mapping. */
 529	memory->nregions = 0;
 530	dev->memory = memory;
 531	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
 532	 * VQs aren't running.
 533	 */
 534	for (i = 0; i < dev->nvqs; ++i)
 535		dev->vqs[i]->memory = memory;
 536}
 537EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 538
 539void vhost_dev_stop(struct vhost_dev *dev)
 
 
 
 
 
 540{
 541	int i;
 
 542
 543	for (i = 0; i < dev->nvqs; ++i) {
 544		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 545			vhost_poll_stop(&dev->vqs[i]->poll);
 546			vhost_poll_flush(&dev->vqs[i]->poll);
 547		}
 
 
 
 548	}
 
 
 
 549}
 550EXPORT_SYMBOL_GPL(vhost_dev_stop);
 551
 552/* Caller should have device mutex if and only if locked is set */
 553void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 554{
 555	int i;
 556
 557	for (i = 0; i < dev->nvqs; ++i) {
 558		if (dev->vqs[i]->error_ctx)
 559			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 560		if (dev->vqs[i]->error)
 561			fput(dev->vqs[i]->error);
 562		if (dev->vqs[i]->kick)
 563			fput(dev->vqs[i]->kick);
 564		if (dev->vqs[i]->call_ctx)
 565			eventfd_ctx_put(dev->vqs[i]->call_ctx);
 566		if (dev->vqs[i]->call)
 567			fput(dev->vqs[i]->call);
 568		vhost_vq_reset(dev, dev->vqs[i]);
 
 
 
 
 
 
 
 
 
 
 
 569	}
 570	vhost_dev_free_iovecs(dev);
 571	if (dev->log_ctx)
 572		eventfd_ctx_put(dev->log_ctx);
 573	dev->log_ctx = NULL;
 574	if (dev->log_file)
 575		fput(dev->log_file);
 576	dev->log_file = NULL;
 577	/* No one will access memory at this point */
 578	kvfree(dev->memory);
 579	dev->memory = NULL;
 
 580	WARN_ON(!list_empty(&dev->work_list));
 581	if (dev->worker) {
 582		kthread_stop(dev->worker);
 583		dev->worker = NULL;
 584	}
 585	if (dev->mm)
 586		mmput(dev->mm);
 587	dev->mm = NULL;
 588}
 589EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 590
 591static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 592{
 593	u64 a = addr / VHOST_PAGE_SIZE / 8;
 594
 595	/* Make sure 64 bit math will not overflow. */
 596	if (a > ULONG_MAX - (unsigned long)log_base ||
 597	    a + (unsigned long)log_base > ULONG_MAX)
 598		return 0;
 599
 600	return access_ok(VERIFY_WRITE, log_base + a,
 601			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 602}
 603
 604/* Caller should have vq mutex and device mutex. */
 605static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 606			       int log_all)
 607{
 608	int i;
 609
 610	if (!mem)
 611		return 0;
 612
 613	for (i = 0; i < mem->nregions; ++i) {
 614		struct vhost_memory_region *m = mem->regions + i;
 615		unsigned long a = m->userspace_addr;
 616		if (m->memory_size > ULONG_MAX)
 617			return 0;
 618		else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 619				    m->memory_size))
 620			return 0;
 621		else if (log_all && !log_access_ok(log_base,
 622						   m->guest_phys_addr,
 623						   m->memory_size))
 624			return 0;
 625	}
 626	return 1;
 627}
 628
 629/* Can we switch to this memory table? */
 630/* Caller should have device mutex but not vq mutex */
 631static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 632			    int log_all)
 633{
 634	int i;
 635
 636	for (i = 0; i < d->nvqs; ++i) {
 637		int ok;
 638		bool log;
 639
 640		mutex_lock(&d->vqs[i]->mutex);
 641		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
 642		/* If ring is inactive, will check when it's enabled. */
 643		if (d->vqs[i]->private_data)
 644			ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
 
 645		else
 646			ok = 1;
 647		mutex_unlock(&d->vqs[i]->mutex);
 648		if (!ok)
 649			return 0;
 650	}
 651	return 1;
 652}
 653
 654static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
 655			struct vring_desc __user *desc,
 656			struct vring_avail __user *avail,
 657			struct vring_used __user *used)
 658{
 659	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 660	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 661	       access_ok(VERIFY_READ, avail,
 662			 sizeof *avail + num * sizeof *avail->ring + s) &&
 663	       access_ok(VERIFY_WRITE, used,
 664			sizeof *used + num * sizeof *used->ring + s);
 665}
 666
 667/* Can we log writes? */
 668/* Caller should have device mutex but not vq mutex */
 669int vhost_log_access_ok(struct vhost_dev *dev)
 670{
 671	return memory_access_ok(dev, dev->memory, 1);
 
 
 
 
 672}
 673EXPORT_SYMBOL_GPL(vhost_log_access_ok);
 674
 675/* Verify access for write logging. */
 676/* Caller should have vq mutex and device mutex */
 677static int vq_log_access_ok(struct vhost_virtqueue *vq,
 678			    void __user *log_base)
 679{
 680	size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 
 681
 682	return vq_memory_access_ok(log_base, vq->memory,
 683				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
 
 
 684		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
 685					sizeof *vq->used +
 686					vq->num * sizeof *vq->used->ring + s));
 687}
 688
 689/* Can we start vq? */
 690/* Caller should have vq mutex and device mutex */
 691int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 692{
 693	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
 694		vq_log_access_ok(vq, vq->log_base);
 695}
 696EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 697
 698static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
 699{
 700	const struct vhost_memory_region *r1 = p1, *r2 = p2;
 701	if (r1->guest_phys_addr < r2->guest_phys_addr)
 702		return 1;
 703	if (r1->guest_phys_addr > r2->guest_phys_addr)
 704		return -1;
 705	return 0;
 706}
 707
 708static void *vhost_kvzalloc(unsigned long size)
 709{
 710	void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 711
 712	if (!n)
 713		n = vzalloc(size);
 714	return n;
 715}
 716
 717static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 718{
 719	struct vhost_memory mem, *newmem, *oldmem;
 720	unsigned long size = offsetof(struct vhost_memory, regions);
 721	int i;
 722
 723	if (copy_from_user(&mem, m, size))
 724		return -EFAULT;
 725	if (mem.padding)
 726		return -EOPNOTSUPP;
 727	if (mem.nregions > max_mem_regions)
 728		return -E2BIG;
 729	newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
 730	if (!newmem)
 731		return -ENOMEM;
 732
 733	memcpy(newmem, &mem, size);
 734	if (copy_from_user(newmem->regions, m->regions,
 735			   mem.nregions * sizeof *m->regions)) {
 736		kvfree(newmem);
 737		return -EFAULT;
 738	}
 739	sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
 740		vhost_memory_reg_sort_cmp, NULL);
 741
 742	if (!memory_access_ok(d, newmem, 0)) {
 743		kvfree(newmem);
 
 744		return -EFAULT;
 745	}
 746	oldmem = d->memory;
 747	d->memory = newmem;
 748
 749	/* All memory accesses are done under some VQ mutex. */
 750	for (i = 0; i < d->nvqs; ++i) {
 751		mutex_lock(&d->vqs[i]->mutex);
 752		d->vqs[i]->memory = newmem;
 753		mutex_unlock(&d->vqs[i]->mutex);
 754	}
 755	kvfree(oldmem);
 756	return 0;
 757}
 758
 759long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
 760{
 761	struct file *eventfp, *filep = NULL;
 762	bool pollstart = false, pollstop = false;
 763	struct eventfd_ctx *ctx = NULL;
 764	u32 __user *idxp = argp;
 765	struct vhost_virtqueue *vq;
 766	struct vhost_vring_state s;
 767	struct vhost_vring_file f;
 768	struct vhost_vring_addr a;
 769	u32 idx;
 770	long r;
 771
 772	r = get_user(idx, idxp);
 773	if (r < 0)
 774		return r;
 775	if (idx >= d->nvqs)
 776		return -ENOBUFS;
 777
 778	vq = d->vqs[idx];
 779
 780	mutex_lock(&vq->mutex);
 781
 782	switch (ioctl) {
 783	case VHOST_SET_VRING_NUM:
 784		/* Resizing ring with an active backend?
 785		 * You don't want to do that. */
 786		if (vq->private_data) {
 787			r = -EBUSY;
 788			break;
 789		}
 790		if (copy_from_user(&s, argp, sizeof s)) {
 791			r = -EFAULT;
 792			break;
 793		}
 794		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 795			r = -EINVAL;
 796			break;
 797		}
 798		vq->num = s.num;
 799		break;
 800	case VHOST_SET_VRING_BASE:
 801		/* Moving base with an active backend?
 802		 * You don't want to do that. */
 803		if (vq->private_data) {
 804			r = -EBUSY;
 805			break;
 806		}
 807		if (copy_from_user(&s, argp, sizeof s)) {
 808			r = -EFAULT;
 809			break;
 810		}
 811		if (s.num > 0xffff) {
 812			r = -EINVAL;
 813			break;
 814		}
 815		vq->last_avail_idx = s.num;
 816		/* Forget the cached index value. */
 817		vq->avail_idx = vq->last_avail_idx;
 818		break;
 819	case VHOST_GET_VRING_BASE:
 820		s.index = idx;
 821		s.num = vq->last_avail_idx;
 822		if (copy_to_user(argp, &s, sizeof s))
 823			r = -EFAULT;
 824		break;
 825	case VHOST_SET_VRING_ADDR:
 826		if (copy_from_user(&a, argp, sizeof a)) {
 827			r = -EFAULT;
 828			break;
 829		}
 830		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 831			r = -EOPNOTSUPP;
 832			break;
 833		}
 834		/* For 32bit, verify that the top 32bits of the user
 835		   data are set to zero. */
 836		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 837		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 838		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 839			r = -EFAULT;
 840			break;
 841		}
 842
 843		/* Make sure it's safe to cast pointers to vring types. */
 844		BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
 845		BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
 846		if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
 847		    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
 848		    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
 849			r = -EINVAL;
 850			break;
 851		}
 852
 853		/* We only verify access here if backend is configured.
 854		 * If it is not, we don't as size might not have been setup.
 855		 * We will verify when backend is configured. */
 856		if (vq->private_data) {
 857			if (!vq_access_ok(vq, vq->num,
 858				(void __user *)(unsigned long)a.desc_user_addr,
 859				(void __user *)(unsigned long)a.avail_user_addr,
 860				(void __user *)(unsigned long)a.used_user_addr)) {
 861				r = -EINVAL;
 862				break;
 863			}
 864
 865			/* Also validate log access for used ring if enabled. */
 866			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 867			    !log_access_ok(vq->log_base, a.log_guest_addr,
 868					   sizeof *vq->used +
 869					   vq->num * sizeof *vq->used->ring)) {
 870				r = -EINVAL;
 871				break;
 872			}
 873		}
 874
 875		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 876		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 877		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 878		vq->log_addr = a.log_guest_addr;
 879		vq->used = (void __user *)(unsigned long)a.used_user_addr;
 880		break;
 881	case VHOST_SET_VRING_KICK:
 882		if (copy_from_user(&f, argp, sizeof f)) {
 883			r = -EFAULT;
 884			break;
 885		}
 886		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 887		if (IS_ERR(eventfp)) {
 888			r = PTR_ERR(eventfp);
 889			break;
 890		}
 891		if (eventfp != vq->kick) {
 892			pollstop = (filep = vq->kick) != NULL;
 893			pollstart = (vq->kick = eventfp) != NULL;
 894		} else
 895			filep = eventfp;
 896		break;
 897	case VHOST_SET_VRING_CALL:
 898		if (copy_from_user(&f, argp, sizeof f)) {
 899			r = -EFAULT;
 900			break;
 901		}
 902		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 903		if (IS_ERR(eventfp)) {
 904			r = PTR_ERR(eventfp);
 905			break;
 906		}
 907		if (eventfp != vq->call) {
 908			filep = vq->call;
 909			ctx = vq->call_ctx;
 910			vq->call = eventfp;
 911			vq->call_ctx = eventfp ?
 912				eventfd_ctx_fileget(eventfp) : NULL;
 913		} else
 914			filep = eventfp;
 915		break;
 916	case VHOST_SET_VRING_ERR:
 917		if (copy_from_user(&f, argp, sizeof f)) {
 918			r = -EFAULT;
 919			break;
 920		}
 921		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 922		if (IS_ERR(eventfp)) {
 923			r = PTR_ERR(eventfp);
 924			break;
 925		}
 926		if (eventfp != vq->error) {
 927			filep = vq->error;
 928			vq->error = eventfp;
 929			ctx = vq->error_ctx;
 930			vq->error_ctx = eventfp ?
 931				eventfd_ctx_fileget(eventfp) : NULL;
 932		} else
 933			filep = eventfp;
 934		break;
 935	case VHOST_SET_VRING_ENDIAN:
 936		r = vhost_set_vring_endian(vq, argp);
 937		break;
 938	case VHOST_GET_VRING_ENDIAN:
 939		r = vhost_get_vring_endian(vq, idx, argp);
 940		break;
 941	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
 942		if (copy_from_user(&s, argp, sizeof(s))) {
 943			r = -EFAULT;
 944			break;
 945		}
 946		vq->busyloop_timeout = s.num;
 947		break;
 948	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
 949		s.index = idx;
 950		s.num = vq->busyloop_timeout;
 951		if (copy_to_user(argp, &s, sizeof(s)))
 952			r = -EFAULT;
 953		break;
 954	default:
 955		r = -ENOIOCTLCMD;
 956	}
 957
 958	if (pollstop && vq->handle_kick)
 959		vhost_poll_stop(&vq->poll);
 960
 961	if (ctx)
 962		eventfd_ctx_put(ctx);
 963	if (filep)
 964		fput(filep);
 965
 966	if (pollstart && vq->handle_kick)
 967		r = vhost_poll_start(&vq->poll, vq->kick);
 968
 969	mutex_unlock(&vq->mutex);
 970
 971	if (pollstop && vq->handle_kick)
 972		vhost_poll_flush(&vq->poll);
 973	return r;
 974}
 975EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
 976
 977/* Caller must have device mutex */
 978long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 979{
 
 980	struct file *eventfp, *filep = NULL;
 981	struct eventfd_ctx *ctx = NULL;
 982	u64 p;
 983	long r;
 984	int i, fd;
 985
 986	/* If you are not the owner, you can become one */
 987	if (ioctl == VHOST_SET_OWNER) {
 988		r = vhost_dev_set_owner(d);
 989		goto done;
 990	}
 991
 992	/* You must be the owner to do anything else */
 993	r = vhost_dev_check_owner(d);
 994	if (r)
 995		goto done;
 996
 997	switch (ioctl) {
 998	case VHOST_SET_MEM_TABLE:
 999		r = vhost_set_memory(d, argp);
1000		break;
1001	case VHOST_SET_LOG_BASE:
1002		if (copy_from_user(&p, argp, sizeof p)) {
1003			r = -EFAULT;
1004			break;
1005		}
1006		if ((u64)(unsigned long)p != p) {
1007			r = -EFAULT;
1008			break;
1009		}
1010		for (i = 0; i < d->nvqs; ++i) {
1011			struct vhost_virtqueue *vq;
1012			void __user *base = (void __user *)(unsigned long)p;
1013			vq = d->vqs[i];
1014			mutex_lock(&vq->mutex);
1015			/* If ring is inactive, will check when it's enabled. */
1016			if (vq->private_data && !vq_log_access_ok(vq, base))
1017				r = -EFAULT;
1018			else
1019				vq->log_base = base;
1020			mutex_unlock(&vq->mutex);
1021		}
1022		break;
1023	case VHOST_SET_LOG_FD:
1024		r = get_user(fd, (int __user *)argp);
1025		if (r < 0)
1026			break;
1027		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
1028		if (IS_ERR(eventfp)) {
1029			r = PTR_ERR(eventfp);
1030			break;
1031		}
1032		if (eventfp != d->log_file) {
1033			filep = d->log_file;
1034			d->log_file = eventfp;
1035			ctx = d->log_ctx;
1036			d->log_ctx = eventfp ?
1037				eventfd_ctx_fileget(eventfp) : NULL;
1038		} else
1039			filep = eventfp;
1040		for (i = 0; i < d->nvqs; ++i) {
1041			mutex_lock(&d->vqs[i]->mutex);
1042			d->vqs[i]->log_ctx = d->log_ctx;
1043			mutex_unlock(&d->vqs[i]->mutex);
1044		}
1045		if (ctx)
1046			eventfd_ctx_put(ctx);
1047		if (filep)
1048			fput(filep);
1049		break;
1050	default:
1051		r = -ENOIOCTLCMD;
1052		break;
1053	}
1054done:
1055	return r;
1056}
1057EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1058
1059static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
1060						     __u64 addr, __u32 len)
1061{
1062	const struct vhost_memory_region *reg;
1063	int start = 0, end = mem->nregions;
1064
1065	while (start < end) {
1066		int slot = start + (end - start) / 2;
1067		reg = mem->regions + slot;
1068		if (addr >= reg->guest_phys_addr)
1069			end = slot;
1070		else
1071			start = slot + 1;
1072	}
1073
1074	reg = mem->regions + start;
1075	if (addr >= reg->guest_phys_addr &&
1076		reg->guest_phys_addr + reg->memory_size > addr)
1077		return reg;
1078	return NULL;
1079}
1080
1081/* TODO: This is really inefficient.  We need something like get_user()
1082 * (instruction directly accesses the data, with an exception table entry
1083 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1084 */
1085static int set_bit_to_user(int nr, void __user *addr)
1086{
1087	unsigned long log = (unsigned long)addr;
1088	struct page *page;
1089	void *base;
1090	int bit = nr + (log % PAGE_SIZE) * 8;
1091	int r;
1092
1093	r = get_user_pages_fast(log, 1, 1, &page);
1094	if (r < 0)
1095		return r;
1096	BUG_ON(r != 1);
1097	base = kmap_atomic(page);
1098	set_bit(bit, base);
1099	kunmap_atomic(base);
1100	set_page_dirty_lock(page);
1101	put_page(page);
1102	return 0;
1103}
1104
1105static int log_write(void __user *log_base,
1106		     u64 write_address, u64 write_length)
1107{
1108	u64 write_page = write_address / VHOST_PAGE_SIZE;
1109	int r;
1110
1111	if (!write_length)
1112		return 0;
1113	write_length += write_address % VHOST_PAGE_SIZE;
1114	for (;;) {
1115		u64 base = (u64)(unsigned long)log_base;
1116		u64 log = base + write_page / 8;
1117		int bit = write_page % 8;
1118		if ((u64)(unsigned long)log != log)
1119			return -EFAULT;
1120		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1121		if (r < 0)
1122			return r;
1123		if (write_length <= VHOST_PAGE_SIZE)
1124			break;
1125		write_length -= VHOST_PAGE_SIZE;
1126		write_page += 1;
1127	}
1128	return r;
1129}
1130
1131int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1132		    unsigned int log_num, u64 len)
1133{
1134	int i, r;
1135
1136	/* Make sure data written is seen before log. */
1137	smp_wmb();
1138	for (i = 0; i < log_num; ++i) {
1139		u64 l = min(log[i].len, len);
1140		r = log_write(vq->log_base, log[i].addr, l);
1141		if (r < 0)
1142			return r;
1143		len -= l;
1144		if (!len) {
1145			if (vq->log_ctx)
1146				eventfd_signal(vq->log_ctx, 1);
1147			return 0;
1148		}
1149	}
1150	/* Length written exceeds what we have stored. This is a bug. */
1151	BUG();
1152	return 0;
1153}
1154EXPORT_SYMBOL_GPL(vhost_log_write);
1155
1156static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1157{
1158	void __user *used;
1159	if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
1160		return -EFAULT;
1161	if (unlikely(vq->log_used)) {
1162		/* Make sure the flag is seen before log. */
1163		smp_wmb();
1164		/* Log used flag write. */
1165		used = &vq->used->flags;
1166		log_write(vq->log_base, vq->log_addr +
1167			  (used - (void __user *)vq->used),
1168			  sizeof vq->used->flags);
1169		if (vq->log_ctx)
1170			eventfd_signal(vq->log_ctx, 1);
1171	}
1172	return 0;
1173}
1174
1175static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1176{
1177	if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
1178		return -EFAULT;
1179	if (unlikely(vq->log_used)) {
1180		void __user *used;
1181		/* Make sure the event is seen before log. */
1182		smp_wmb();
1183		/* Log avail event write */
1184		used = vhost_avail_event(vq);
1185		log_write(vq->log_base, vq->log_addr +
1186			  (used - (void __user *)vq->used),
1187			  sizeof *vhost_avail_event(vq));
1188		if (vq->log_ctx)
1189			eventfd_signal(vq->log_ctx, 1);
1190	}
1191	return 0;
1192}
1193
1194int vhost_vq_init_access(struct vhost_virtqueue *vq)
1195{
1196	__virtio16 last_used_idx;
1197	int r;
1198	bool is_le = vq->is_le;
1199
1200	if (!vq->private_data) {
1201		vhost_reset_is_le(vq);
1202		return 0;
1203	}
1204
1205	vhost_init_is_le(vq);
1206
1207	r = vhost_update_used_flags(vq);
1208	if (r)
1209		goto err;
1210	vq->signalled_used_valid = false;
1211	if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1212		r = -EFAULT;
1213		goto err;
1214	}
1215	r = __get_user(last_used_idx, &vq->used->idx);
1216	if (r)
1217		goto err;
1218	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1219	return 0;
1220err:
1221	vq->is_le = is_le;
1222	return r;
1223}
1224EXPORT_SYMBOL_GPL(vhost_vq_init_access);
1225
1226static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1227			  struct iovec iov[], int iov_size)
1228{
1229	const struct vhost_memory_region *reg;
1230	struct vhost_memory *mem;
1231	struct iovec *_iov;
1232	u64 s = 0;
1233	int ret = 0;
1234
1235	mem = vq->memory;
 
 
1236	while ((u64)len > s) {
1237		u64 size;
1238		if (unlikely(ret >= iov_size)) {
1239			ret = -ENOBUFS;
1240			break;
1241		}
1242		reg = find_region(mem, addr, len);
1243		if (unlikely(!reg)) {
1244			ret = -EFAULT;
1245			break;
1246		}
1247		_iov = iov + ret;
1248		size = reg->memory_size - addr + reg->guest_phys_addr;
1249		_iov->iov_len = min((u64)len - s, size);
1250		_iov->iov_base = (void __user *)(unsigned long)
1251			(reg->userspace_addr + addr - reg->guest_phys_addr);
1252		s += size;
1253		addr += size;
1254		++ret;
1255	}
1256
 
1257	return ret;
1258}
1259
1260/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1261 * function returns the next descriptor in the chain,
1262 * or -1U if we're at the end. */
1263static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
1264{
1265	unsigned int next;
1266
1267	/* If this descriptor says it doesn't chain, we're done. */
1268	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
1269		return -1U;
1270
1271	/* Check they're not leading us off end of descriptors. */
1272	next = vhost16_to_cpu(vq, desc->next);
1273	/* Make sure compiler knows to grab that: we don't want it changing! */
1274	/* We will use the result as an index in an array, so most
1275	 * architectures only need a compiler barrier here. */
1276	read_barrier_depends();
1277
1278	return next;
1279}
1280
1281static int get_indirect(struct vhost_virtqueue *vq,
1282			struct iovec iov[], unsigned int iov_size,
1283			unsigned int *out_num, unsigned int *in_num,
1284			struct vhost_log *log, unsigned int *log_num,
1285			struct vring_desc *indirect)
1286{
1287	struct vring_desc desc;
1288	unsigned int i = 0, count, found = 0;
1289	u32 len = vhost32_to_cpu(vq, indirect->len);
1290	struct iov_iter from;
1291	int ret;
1292
1293	/* Sanity check */
1294	if (unlikely(len % sizeof desc)) {
1295		vq_err(vq, "Invalid length in indirect descriptor: "
1296		       "len 0x%llx not multiple of 0x%zx\n",
1297		       (unsigned long long)len,
1298		       sizeof desc);
1299		return -EINVAL;
1300	}
1301
1302	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
1303			     UIO_MAXIOV);
1304	if (unlikely(ret < 0)) {
1305		vq_err(vq, "Translation failure %d in indirect.\n", ret);
1306		return ret;
1307	}
1308	iov_iter_init(&from, READ, vq->indirect, ret, len);
1309
1310	/* We will use the result as an address to read from, so most
1311	 * architectures only need a compiler barrier here. */
1312	read_barrier_depends();
1313
1314	count = len / sizeof desc;
1315	/* Buffers are chained via a 16 bit next field, so
1316	 * we can have at most 2^16 of these. */
1317	if (unlikely(count > USHRT_MAX + 1)) {
1318		vq_err(vq, "Indirect buffer length too big: %d\n",
1319		       indirect->len);
1320		return -E2BIG;
1321	}
1322
1323	do {
1324		unsigned iov_count = *in_num + *out_num;
1325		if (unlikely(++found > count)) {
1326			vq_err(vq, "Loop detected: last one at %u "
1327			       "indirect size %u\n",
1328			       i, count);
1329			return -EINVAL;
1330		}
1331		if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
1332			     sizeof(desc))) {
1333			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1334			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1335			return -EINVAL;
1336		}
1337		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
1338			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1339			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1340			return -EINVAL;
1341		}
1342
1343		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1344				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
1345				     iov_size - iov_count);
1346		if (unlikely(ret < 0)) {
1347			vq_err(vq, "Translation failure %d indirect idx %d\n",
1348			       ret, i);
1349			return ret;
1350		}
1351		/* If this is an input descriptor, increment that count. */
1352		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
1353			*in_num += ret;
1354			if (unlikely(log)) {
1355				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1356				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1357				++*log_num;
1358			}
1359		} else {
1360			/* If it's an output descriptor, they're all supposed
1361			 * to come before any input descriptors. */
1362			if (unlikely(*in_num)) {
1363				vq_err(vq, "Indirect descriptor "
1364				       "has out after in: idx %d\n", i);
1365				return -EINVAL;
1366			}
1367			*out_num += ret;
1368		}
1369	} while ((i = next_desc(vq, &desc)) != -1);
1370	return 0;
1371}
1372
1373/* This looks in the virtqueue and for the first available buffer, and converts
1374 * it to an iovec for convenient access.  Since descriptors consist of some
1375 * number of output then some number of input descriptors, it's actually two
1376 * iovecs, but we pack them into one and note how many of each there were.
1377 *
1378 * This function returns the descriptor number found, or vq->num (which is
1379 * never a valid descriptor number) if none was found.  A negative code is
1380 * returned on error. */
1381int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1382		      struct iovec iov[], unsigned int iov_size,
1383		      unsigned int *out_num, unsigned int *in_num,
1384		      struct vhost_log *log, unsigned int *log_num)
1385{
1386	struct vring_desc desc;
1387	unsigned int i, head, found = 0;
1388	u16 last_avail_idx;
1389	__virtio16 avail_idx;
1390	__virtio16 ring_head;
1391	int ret;
1392
1393	/* Check it isn't doing very strange things with descriptor numbers. */
1394	last_avail_idx = vq->last_avail_idx;
1395	if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
1396		vq_err(vq, "Failed to access avail idx at %p\n",
1397		       &vq->avail->idx);
1398		return -EFAULT;
1399	}
1400	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
1401
1402	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1403		vq_err(vq, "Guest moved used index from %u to %u",
1404		       last_avail_idx, vq->avail_idx);
1405		return -EFAULT;
1406	}
1407
1408	/* If there's nothing new since last we looked, return invalid. */
1409	if (vq->avail_idx == last_avail_idx)
1410		return vq->num;
1411
1412	/* Only get avail ring entries after they have been exposed by guest. */
1413	smp_rmb();
1414
1415	/* Grab the next descriptor number they're advertising, and increment
1416	 * the index we've seen. */
1417	if (unlikely(__get_user(ring_head,
1418				&vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
1419		vq_err(vq, "Failed to read head: idx %d address %p\n",
1420		       last_avail_idx,
1421		       &vq->avail->ring[last_avail_idx % vq->num]);
1422		return -EFAULT;
1423	}
1424
1425	head = vhost16_to_cpu(vq, ring_head);
1426
1427	/* If their number is silly, that's an error. */
1428	if (unlikely(head >= vq->num)) {
1429		vq_err(vq, "Guest says index %u > %u is available",
1430		       head, vq->num);
1431		return -EINVAL;
1432	}
1433
1434	/* When we start there are none of either input nor output. */
1435	*out_num = *in_num = 0;
1436	if (unlikely(log))
1437		*log_num = 0;
1438
1439	i = head;
1440	do {
1441		unsigned iov_count = *in_num + *out_num;
1442		if (unlikely(i >= vq->num)) {
1443			vq_err(vq, "Desc index is %u > %u, head = %u",
1444			       i, vq->num, head);
1445			return -EINVAL;
1446		}
1447		if (unlikely(++found > vq->num)) {
1448			vq_err(vq, "Loop detected: last one at %u "
1449			       "vq size %u head %u\n",
1450			       i, vq->num, head);
1451			return -EINVAL;
1452		}
1453		ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1454		if (unlikely(ret)) {
1455			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1456			       i, vq->desc + i);
1457			return -EFAULT;
1458		}
1459		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
1460			ret = get_indirect(vq, iov, iov_size,
1461					   out_num, in_num,
1462					   log, log_num, &desc);
1463			if (unlikely(ret < 0)) {
1464				vq_err(vq, "Failure detected "
1465				       "in indirect descriptor at idx %d\n", i);
1466				return ret;
1467			}
1468			continue;
1469		}
1470
1471		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1472				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
1473				     iov_size - iov_count);
1474		if (unlikely(ret < 0)) {
1475			vq_err(vq, "Translation failure %d descriptor idx %d\n",
1476			       ret, i);
1477			return ret;
1478		}
1479		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
1480			/* If this is an input descriptor,
1481			 * increment that count. */
1482			*in_num += ret;
1483			if (unlikely(log)) {
1484				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1485				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1486				++*log_num;
1487			}
1488		} else {
1489			/* If it's an output descriptor, they're all supposed
1490			 * to come before any input descriptors. */
1491			if (unlikely(*in_num)) {
1492				vq_err(vq, "Descriptor has out after in: "
1493				       "idx %d\n", i);
1494				return -EINVAL;
1495			}
1496			*out_num += ret;
1497		}
1498	} while ((i = next_desc(vq, &desc)) != -1);
1499
1500	/* On success, increment avail index. */
1501	vq->last_avail_idx++;
1502
1503	/* Assume notifications from guest are disabled at this point,
1504	 * if they aren't we would need to update avail_event index. */
1505	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1506	return head;
1507}
1508EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
1509
1510/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1511void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1512{
1513	vq->last_avail_idx -= n;
1514}
1515EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1516
1517/* After we've used one of their buffers, we tell them about it.  We'll then
1518 * want to notify the guest, using eventfd. */
1519int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1520{
1521	struct vring_used_elem heads = {
1522		cpu_to_vhost32(vq, head),
1523		cpu_to_vhost32(vq, len)
1524	};
1525
1526	return vhost_add_used_n(vq, &heads, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1527}
1528EXPORT_SYMBOL_GPL(vhost_add_used);
1529
1530static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1531			    struct vring_used_elem *heads,
1532			    unsigned count)
1533{
1534	struct vring_used_elem __user *used;
1535	u16 old, new;
1536	int start;
1537
1538	start = vq->last_used_idx & (vq->num - 1);
1539	used = vq->used->ring + start;
1540	if (count == 1) {
1541		if (__put_user(heads[0].id, &used->id)) {
1542			vq_err(vq, "Failed to write used id");
1543			return -EFAULT;
1544		}
1545		if (__put_user(heads[0].len, &used->len)) {
1546			vq_err(vq, "Failed to write used len");
1547			return -EFAULT;
1548		}
1549	} else if (__copy_to_user(used, heads, count * sizeof *used)) {
1550		vq_err(vq, "Failed to write used");
1551		return -EFAULT;
1552	}
1553	if (unlikely(vq->log_used)) {
1554		/* Make sure data is seen before log. */
1555		smp_wmb();
1556		/* Log used ring entry write. */
1557		log_write(vq->log_base,
1558			  vq->log_addr +
1559			   ((void __user *)used - (void __user *)vq->used),
1560			  count * sizeof *used);
1561	}
1562	old = vq->last_used_idx;
1563	new = (vq->last_used_idx += count);
1564	/* If the driver never bothers to signal in a very long while,
1565	 * used index might wrap around. If that happens, invalidate
1566	 * signalled_used index we stored. TODO: make sure driver
1567	 * signals at least once in 2^16 and remove this. */
1568	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1569		vq->signalled_used_valid = false;
1570	return 0;
1571}
1572
1573/* After we've used one of their buffers, we tell them about it.  We'll then
1574 * want to notify the guest, using eventfd. */
1575int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1576		     unsigned count)
1577{
1578	int start, n, r;
1579
1580	start = vq->last_used_idx & (vq->num - 1);
1581	n = vq->num - start;
1582	if (n < count) {
1583		r = __vhost_add_used_n(vq, heads, n);
1584		if (r < 0)
1585			return r;
1586		heads += n;
1587		count -= n;
1588	}
1589	r = __vhost_add_used_n(vq, heads, count);
1590
1591	/* Make sure buffer is written before we update index. */
1592	smp_wmb();
1593	if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
1594		vq_err(vq, "Failed to increment used idx");
1595		return -EFAULT;
1596	}
1597	if (unlikely(vq->log_used)) {
1598		/* Log used index update. */
1599		log_write(vq->log_base,
1600			  vq->log_addr + offsetof(struct vring_used, idx),
1601			  sizeof vq->used->idx);
1602		if (vq->log_ctx)
1603			eventfd_signal(vq->log_ctx, 1);
1604	}
1605	return r;
1606}
1607EXPORT_SYMBOL_GPL(vhost_add_used_n);
1608
1609static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1610{
1611	__u16 old, new;
1612	__virtio16 event;
1613	bool v;
1614	/* Flush out used index updates. This is paired
1615	 * with the barrier that the Guest executes when enabling
1616	 * interrupts. */
1617	smp_mb();
1618
1619	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1620	    unlikely(vq->avail_idx == vq->last_avail_idx))
1621		return true;
1622
1623	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1624		__virtio16 flags;
1625		if (__get_user(flags, &vq->avail->flags)) {
1626			vq_err(vq, "Failed to get flags");
1627			return true;
1628		}
1629		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
1630	}
1631	old = vq->signalled_used;
1632	v = vq->signalled_used_valid;
1633	new = vq->signalled_used = vq->last_used_idx;
1634	vq->signalled_used_valid = true;
1635
1636	if (unlikely(!v))
1637		return true;
1638
1639	if (__get_user(event, vhost_used_event(vq))) {
1640		vq_err(vq, "Failed to get used event idx");
1641		return true;
1642	}
1643	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
1644}
1645
1646/* This actually signals the guest, using eventfd. */
1647void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1648{
1649	/* Signal the Guest tell them we used something up. */
1650	if (vq->call_ctx && vhost_notify(dev, vq))
1651		eventfd_signal(vq->call_ctx, 1);
1652}
1653EXPORT_SYMBOL_GPL(vhost_signal);
1654
1655/* And here's the combo meal deal.  Supersize me! */
1656void vhost_add_used_and_signal(struct vhost_dev *dev,
1657			       struct vhost_virtqueue *vq,
1658			       unsigned int head, int len)
1659{
1660	vhost_add_used(vq, head, len);
1661	vhost_signal(dev, vq);
1662}
1663EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
1664
1665/* multi-buffer version of vhost_add_used_and_signal */
1666void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1667				 struct vhost_virtqueue *vq,
1668				 struct vring_used_elem *heads, unsigned count)
1669{
1670	vhost_add_used_n(vq, heads, count);
1671	vhost_signal(dev, vq);
1672}
1673EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
1674
1675/* return true if we're sure that avaiable ring is empty */
1676bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1677{
1678	__virtio16 avail_idx;
1679	int r;
1680
1681	r = __get_user(avail_idx, &vq->avail->idx);
1682	if (r)
1683		return false;
1684
1685	return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
1686}
1687EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
1688
1689/* OK, now we need to know about added descriptors. */
1690bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1691{
1692	__virtio16 avail_idx;
1693	int r;
1694
1695	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1696		return false;
1697	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1698	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1699		r = vhost_update_used_flags(vq);
1700		if (r) {
1701			vq_err(vq, "Failed to enable notification at %p: %d\n",
1702			       &vq->used->flags, r);
1703			return false;
1704		}
1705	} else {
1706		r = vhost_update_avail_event(vq, vq->avail_idx);
1707		if (r) {
1708			vq_err(vq, "Failed to update avail event index at %p: %d\n",
1709			       vhost_avail_event(vq), r);
1710			return false;
1711		}
1712	}
1713	/* They could have slipped one in as we were doing that: make
1714	 * sure it's written, then check again. */
1715	smp_mb();
1716	r = __get_user(avail_idx, &vq->avail->idx);
1717	if (r) {
1718		vq_err(vq, "Failed to check avail idx at %p: %d\n",
1719		       &vq->avail->idx, r);
1720		return false;
1721	}
1722
1723	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
1724}
1725EXPORT_SYMBOL_GPL(vhost_enable_notify);
1726
1727/* We don't need to be notified again. */
1728void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1729{
1730	int r;
1731
1732	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1733		return;
1734	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1735	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1736		r = vhost_update_used_flags(vq);
1737		if (r)
1738			vq_err(vq, "Failed to enable notification at %p: %d\n",
1739			       &vq->used->flags, r);
1740	}
1741}
1742EXPORT_SYMBOL_GPL(vhost_disable_notify);
1743
1744static int __init vhost_init(void)
 
 
 
 
 
 
 
 
1745{
1746	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1747}
1748
1749static void __exit vhost_exit(void)
1750{
 
 
 
1751}
1752
1753module_init(vhost_init);
1754module_exit(vhost_exit);
 
 
 
1755
1756MODULE_VERSION("0.0.1");
1757MODULE_LICENSE("GPL v2");
1758MODULE_AUTHOR("Michael S. Tsirkin");
1759MODULE_DESCRIPTION("Host kernel accelerator for virtio");