Linux Audio

Check our new training course

Loading...
v3.1
 
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/virtio_net.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/rcupdate.h>
  22#include <linux/poll.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/slab.h>
 
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28
  29#include <linux/net.h>
  30#include <linux/if_packet.h>
  31#include <linux/if_arp.h>
 
 
  32
  33#include "vhost.h"
  34
 
 
 
 
 
 
 
 
 
  35enum {
  36	VHOST_MEMORY_MAX_NREGIONS = 64,
  37	VHOST_MEMORY_F_LOG = 0x1,
  38};
  39
  40static unsigned vhost_zcopy_mask __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41
  42#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
  43#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
 
  44
  45static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  46			    poll_table *pt)
  47{
  48	struct vhost_poll *poll;
  49
  50	poll = container_of(pt, struct vhost_poll, table);
  51	poll->wqh = wqh;
  52	add_wait_queue(wqh, &poll->wait);
  53}
  54
  55static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  56			     void *key)
  57{
  58	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
  59
  60	if (!((unsigned long)key & poll->mask))
  61		return 0;
  62
  63	vhost_poll_queue(poll);
  64	return 0;
  65}
  66
  67static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
  68{
  69	INIT_LIST_HEAD(&work->node);
  70	work->fn = fn;
  71	init_waitqueue_head(&work->done);
  72	work->flushing = 0;
  73	work->queue_seq = work->done_seq = 0;
  74}
 
  75
  76/* Init poll structure */
  77void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  78		     unsigned long mask, struct vhost_dev *dev)
  79{
  80	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  81	init_poll_funcptr(&poll->table, vhost_poll_func);
  82	poll->mask = mask;
  83	poll->dev = dev;
 
  84
  85	vhost_work_init(&poll->work, fn);
  86}
 
  87
  88/* Start polling a file. We add ourselves to file's wait queue. The caller must
  89 * keep a reference to a file until after vhost_poll_stop is called. */
  90void vhost_poll_start(struct vhost_poll *poll, struct file *file)
  91{
  92	unsigned long mask;
  93
  94	mask = file->f_op->poll(file, &poll->table);
 
 
 
  95	if (mask)
  96		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
 
 
 
 
 
 
  97}
 
  98
  99/* Stop polling a file. After this function returns, it becomes safe to drop the
 100 * file reference. You must also flush afterwards. */
 101void vhost_poll_stop(struct vhost_poll *poll)
 102{
 103	remove_wait_queue(poll->wqh, &poll->wait);
 
 
 
 104}
 
 105
 106static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 107				unsigned seq)
 108{
 109	int left;
 110
 111	spin_lock_irq(&dev->work_lock);
 112	left = seq - work->done_seq;
 113	spin_unlock_irq(&dev->work_lock);
 114	return left <= 0;
 115}
 116
 117static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 118{
 119	unsigned seq;
 120	int flushing;
 121
 122	spin_lock_irq(&dev->work_lock);
 123	seq = work->queue_seq;
 124	work->flushing++;
 125	spin_unlock_irq(&dev->work_lock);
 126	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 127	spin_lock_irq(&dev->work_lock);
 128	flushing = --work->flushing;
 129	spin_unlock_irq(&dev->work_lock);
 130	BUG_ON(flushing < 0);
 131}
 
 132
 133/* Flush any work that has been scheduled. When calling this, don't hold any
 134 * locks that are also used by the callback. */
 135void vhost_poll_flush(struct vhost_poll *poll)
 136{
 137	vhost_work_flush(poll->dev, &poll->work);
 138}
 
 139
 140static inline void vhost_work_queue(struct vhost_dev *dev,
 141				    struct vhost_work *work)
 142{
 143	unsigned long flags;
 
 144
 145	spin_lock_irqsave(&dev->work_lock, flags);
 146	if (list_empty(&work->node)) {
 147		list_add_tail(&work->node, &dev->work_list);
 148		work->queue_seq++;
 
 
 149		wake_up_process(dev->worker);
 150	}
 151	spin_unlock_irqrestore(&dev->work_lock, flags);
 152}
 
 
 
 
 
 
 
 
 153
 154void vhost_poll_queue(struct vhost_poll *poll)
 155{
 156	vhost_work_queue(poll->dev, &poll->work);
 157}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158
 159static void vhost_vq_reset(struct vhost_dev *dev,
 160			   struct vhost_virtqueue *vq)
 161{
 162	vq->num = 1;
 163	vq->desc = NULL;
 164	vq->avail = NULL;
 165	vq->used = NULL;
 166	vq->last_avail_idx = 0;
 167	vq->avail_idx = 0;
 168	vq->last_used_idx = 0;
 169	vq->signalled_used = 0;
 170	vq->signalled_used_valid = false;
 171	vq->used_flags = 0;
 172	vq->log_used = false;
 173	vq->log_addr = -1ull;
 174	vq->vhost_hlen = 0;
 175	vq->sock_hlen = 0;
 176	vq->private_data = NULL;
 
 
 177	vq->log_base = NULL;
 178	vq->error_ctx = NULL;
 179	vq->error = NULL;
 180	vq->kick = NULL;
 181	vq->call_ctx = NULL;
 182	vq->call = NULL;
 183	vq->log_ctx = NULL;
 184	vq->upend_idx = 0;
 185	vq->done_idx = 0;
 186	vq->ubufs = NULL;
 
 
 
 187}
 188
 189static int vhost_worker(void *data)
 190{
 191	struct vhost_dev *dev = data;
 192	struct vhost_work *work = NULL;
 193	unsigned uninitialized_var(seq);
 
 194
 
 195	use_mm(dev->mm);
 196
 197	for (;;) {
 198		/* mb paired w/ kthread_stop */
 199		set_current_state(TASK_INTERRUPTIBLE);
 200
 201		spin_lock_irq(&dev->work_lock);
 202		if (work) {
 203			work->done_seq = seq;
 204			if (work->flushing)
 205				wake_up_all(&work->done);
 206		}
 207
 208		if (kthread_should_stop()) {
 209			spin_unlock_irq(&dev->work_lock);
 210			__set_current_state(TASK_RUNNING);
 211			break;
 212		}
 213		if (!list_empty(&dev->work_list)) {
 214			work = list_first_entry(&dev->work_list,
 215						struct vhost_work, node);
 216			list_del_init(&work->node);
 217			seq = work->queue_seq;
 218		} else
 219			work = NULL;
 220		spin_unlock_irq(&dev->work_lock);
 221
 222		if (work) {
 223			__set_current_state(TASK_RUNNING);
 224			work->fn(work);
 225		} else
 226			schedule();
 227
 
 
 
 
 
 
 
 
 
 
 228	}
 229	unuse_mm(dev->mm);
 
 230	return 0;
 231}
 232
 233static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 234{
 235	kfree(vq->indirect);
 236	vq->indirect = NULL;
 237	kfree(vq->log);
 238	vq->log = NULL;
 239	kfree(vq->heads);
 240	vq->heads = NULL;
 241	kfree(vq->ubuf_info);
 242	vq->ubuf_info = NULL;
 243}
 244
 245void vhost_enable_zcopy(int vq)
 246{
 247	vhost_zcopy_mask |= 0x1 << vq;
 248}
 249
 250/* Helper to allocate iovec buffers for all vqs. */
 251static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 252{
 
 253	int i;
 254	bool zcopy;
 255
 256	for (i = 0; i < dev->nvqs; ++i) {
 257		dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
 258					       UIO_MAXIOV, GFP_KERNEL);
 259		dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
 
 
 
 
 260					  GFP_KERNEL);
 261		dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
 262					    UIO_MAXIOV, GFP_KERNEL);
 263		zcopy = vhost_zcopy_mask & (0x1 << i);
 264		if (zcopy)
 265			dev->vqs[i].ubuf_info =
 266				kmalloc(sizeof *dev->vqs[i].ubuf_info *
 267					UIO_MAXIOV, GFP_KERNEL);
 268		if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
 269			!dev->vqs[i].heads ||
 270			(zcopy && !dev->vqs[i].ubuf_info))
 271			goto err_nomem;
 272	}
 273	return 0;
 274
 275err_nomem:
 276	for (; i >= 0; --i)
 277		vhost_vq_free_iovecs(&dev->vqs[i]);
 278	return -ENOMEM;
 279}
 280
 281static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 282{
 283	int i;
 284
 285	for (i = 0; i < dev->nvqs; ++i)
 286		vhost_vq_free_iovecs(&dev->vqs[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287}
 288
 289long vhost_dev_init(struct vhost_dev *dev,
 290		    struct vhost_virtqueue *vqs, int nvqs)
 291{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 292	int i;
 293
 294	dev->vqs = vqs;
 295	dev->nvqs = nvqs;
 296	mutex_init(&dev->mutex);
 297	dev->log_ctx = NULL;
 298	dev->log_file = NULL;
 299	dev->memory = NULL;
 300	dev->mm = NULL;
 301	spin_lock_init(&dev->work_lock);
 302	INIT_LIST_HEAD(&dev->work_list);
 303	dev->worker = NULL;
 
 
 
 
 
 
 
 
 
 304
 305	for (i = 0; i < dev->nvqs; ++i) {
 306		dev->vqs[i].log = NULL;
 307		dev->vqs[i].indirect = NULL;
 308		dev->vqs[i].heads = NULL;
 309		dev->vqs[i].ubuf_info = NULL;
 310		dev->vqs[i].dev = dev;
 311		mutex_init(&dev->vqs[i].mutex);
 312		vhost_vq_reset(dev, dev->vqs + i);
 313		if (dev->vqs[i].handle_kick)
 314			vhost_poll_init(&dev->vqs[i].poll,
 315					dev->vqs[i].handle_kick, POLLIN, dev);
 316	}
 317
 318	return 0;
 319}
 
 320
 321/* Caller should have device mutex */
 322long vhost_dev_check_owner(struct vhost_dev *dev)
 323{
 324	/* Are you the owner? If not, I don't think you mean to do that */
 325	return dev->mm == current->mm ? 0 : -EPERM;
 326}
 
 327
 328struct vhost_attach_cgroups_struct {
 329	struct vhost_work work;
 330	struct task_struct *owner;
 331	int ret;
 332};
 333
 334static void vhost_attach_cgroups_work(struct vhost_work *work)
 335{
 336	struct vhost_attach_cgroups_struct *s;
 337
 338	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 339	s->ret = cgroup_attach_task_all(s->owner, current);
 340}
 341
 342static int vhost_attach_cgroups(struct vhost_dev *dev)
 343{
 344	struct vhost_attach_cgroups_struct attach;
 345
 346	attach.owner = current;
 347	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 348	vhost_work_queue(dev, &attach.work);
 349	vhost_work_flush(dev, &attach.work);
 350	return attach.ret;
 351}
 352
 353/* Caller should have device mutex */
 354static long vhost_dev_set_owner(struct vhost_dev *dev)
 
 
 
 
 
 
 
 355{
 356	struct task_struct *worker;
 357	int err;
 358
 359	/* Is there an owner already? */
 360	if (dev->mm) {
 361		err = -EBUSY;
 362		goto err_mm;
 363	}
 364
 365	/* No owner, become one */
 366	dev->mm = get_task_mm(current);
 367	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 368	if (IS_ERR(worker)) {
 369		err = PTR_ERR(worker);
 370		goto err_worker;
 371	}
 372
 373	dev->worker = worker;
 374	wake_up_process(worker);	/* avoid contributing to loadavg */
 375
 376	err = vhost_attach_cgroups(dev);
 377	if (err)
 378		goto err_cgroup;
 379
 380	err = vhost_dev_alloc_iovecs(dev);
 381	if (err)
 382		goto err_cgroup;
 383
 384	return 0;
 385err_cgroup:
 386	kthread_stop(worker);
 387	dev->worker = NULL;
 388err_worker:
 389	if (dev->mm)
 390		mmput(dev->mm);
 391	dev->mm = NULL;
 392err_mm:
 393	return err;
 394}
 
 395
 396/* Caller should have device mutex */
 397long vhost_dev_reset_owner(struct vhost_dev *dev)
 398{
 399	struct vhost_memory *memory;
 
 
 400
 401	/* Restore memory to default empty mapping. */
 402	memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 403	if (!memory)
 404		return -ENOMEM;
 405
 406	vhost_dev_cleanup(dev);
 407
 408	memory->nregions = 0;
 409	RCU_INIT_POINTER(dev->memory, memory);
 410	return 0;
 
 
 
 
 
 411}
 
 412
 413/* In case of DMA done not in order in lower device driver for some reason.
 414 * upend_idx is used to track end of used idx, done_idx is used to track head
 415 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 416 * guest used idx.
 417 */
 418int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
 419{
 420	int i;
 421	int j = 0;
 422
 423	for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
 424		if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
 425			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
 426			vhost_add_used_and_signal(vq->dev, vq,
 427						  vq->heads[i].id, 0);
 428			++j;
 429		} else
 430			break;
 431	}
 432	if (j)
 433		vq->done_idx = i;
 434	return j;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435}
 436
 437/* Caller should have device mutex */
 438void vhost_dev_cleanup(struct vhost_dev *dev)
 439{
 440	int i;
 441
 442	for (i = 0; i < dev->nvqs; ++i) {
 443		if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
 444			vhost_poll_stop(&dev->vqs[i].poll);
 445			vhost_poll_flush(&dev->vqs[i].poll);
 446		}
 447		/* Wait for all lower device DMAs done. */
 448		if (dev->vqs[i].ubufs)
 449			vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
 450
 451		/* Signal guest as appropriate. */
 452		vhost_zerocopy_signal_used(&dev->vqs[i]);
 453
 454		if (dev->vqs[i].error_ctx)
 455			eventfd_ctx_put(dev->vqs[i].error_ctx);
 456		if (dev->vqs[i].error)
 457			fput(dev->vqs[i].error);
 458		if (dev->vqs[i].kick)
 459			fput(dev->vqs[i].kick);
 460		if (dev->vqs[i].call_ctx)
 461			eventfd_ctx_put(dev->vqs[i].call_ctx);
 462		if (dev->vqs[i].call)
 463			fput(dev->vqs[i].call);
 464		vhost_vq_reset(dev, dev->vqs + i);
 465	}
 466	vhost_dev_free_iovecs(dev);
 467	if (dev->log_ctx)
 468		eventfd_ctx_put(dev->log_ctx);
 469	dev->log_ctx = NULL;
 470	if (dev->log_file)
 471		fput(dev->log_file);
 472	dev->log_file = NULL;
 473	/* No one will access memory at this point */
 474	kfree(rcu_dereference_protected(dev->memory,
 475					lockdep_is_held(&dev->mutex)));
 476	RCU_INIT_POINTER(dev->memory, NULL);
 477	WARN_ON(!list_empty(&dev->work_list));
 
 
 
 478	if (dev->worker) {
 479		kthread_stop(dev->worker);
 480		dev->worker = NULL;
 481	}
 482	if (dev->mm)
 483		mmput(dev->mm);
 484	dev->mm = NULL;
 485}
 
 486
 487static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 488{
 489	u64 a = addr / VHOST_PAGE_SIZE / 8;
 490
 491	/* Make sure 64 bit math will not overflow. */
 492	if (a > ULONG_MAX - (unsigned long)log_base ||
 493	    a + (unsigned long)log_base > ULONG_MAX)
 494		return 0;
 495
 496	return access_ok(VERIFY_WRITE, log_base + a,
 497			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 498}
 499
 
 
 
 
 
 
 500/* Caller should have vq mutex and device mutex. */
 501static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 502			       int log_all)
 503{
 504	int i;
 505
 506	if (!mem)
 507		return 0;
 508
 509	for (i = 0; i < mem->nregions; ++i) {
 510		struct vhost_memory_region *m = mem->regions + i;
 511		unsigned long a = m->userspace_addr;
 512		if (m->memory_size > ULONG_MAX)
 513			return 0;
 514		else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 515				    m->memory_size))
 516			return 0;
 
 
 517		else if (log_all && !log_access_ok(log_base,
 518						   m->guest_phys_addr,
 519						   m->memory_size))
 520			return 0;
 521	}
 522	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 523}
 524
 525/* Can we switch to this memory table? */
 526/* Caller should have device mutex but not vq mutex */
 527static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 528			    int log_all)
 529{
 530	int i;
 531
 532	for (i = 0; i < d->nvqs; ++i) {
 533		int ok;
 534		mutex_lock(&d->vqs[i].mutex);
 
 
 
 535		/* If ring is inactive, will check when it's enabled. */
 536		if (d->vqs[i].private_data)
 537			ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
 538						 log_all);
 539		else
 540			ok = 1;
 541		mutex_unlock(&d->vqs[i].mutex);
 542		if (!ok)
 543			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544	}
 545	return 1;
 
 546}
 547
 548static int vq_access_ok(struct vhost_dev *d, unsigned int num,
 549			struct vring_desc __user *desc,
 550			struct vring_avail __user *avail,
 551			struct vring_used __user *used)
 552{
 553	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 554	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 555	       access_ok(VERIFY_READ, avail,
 556			 sizeof *avail + num * sizeof *avail->ring + s) &&
 557	       access_ok(VERIFY_WRITE, used,
 558			sizeof *used + num * sizeof *used->ring + s);
 
 
 
 559}
 
 560
 561/* Can we log writes? */
 562/* Caller should have device mutex but not vq mutex */
 563int vhost_log_access_ok(struct vhost_dev *dev)
 564{
 565	struct vhost_memory *mp;
 566
 567	mp = rcu_dereference_protected(dev->memory,
 568				       lockdep_is_held(&dev->mutex));
 569	return memory_access_ok(dev, mp, 1);
 570}
 
 571
 572/* Verify access for write logging. */
 573/* Caller should have vq mutex and device mutex */
 574static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
 575			    void __user *log_base)
 576{
 577	struct vhost_memory *mp;
 578	size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 579
 580	mp = rcu_dereference_protected(vq->dev->memory,
 581				       lockdep_is_held(&vq->mutex));
 582	return vq_memory_access_ok(log_base, mp,
 583			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
 584		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
 585					sizeof *vq->used +
 586					vq->num * sizeof *vq->used->ring + s));
 587}
 588
 589/* Can we start vq? */
 590/* Caller should have vq mutex and device mutex */
 591int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 592{
 593	return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
 594		vq_log_access_ok(vq->dev, vq, vq->log_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 595}
 596
 597static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 598{
 599	struct vhost_memory mem, *newmem, *oldmem;
 
 
 600	unsigned long size = offsetof(struct vhost_memory, regions);
 
 601
 602	if (copy_from_user(&mem, m, size))
 603		return -EFAULT;
 604	if (mem.padding)
 605		return -EOPNOTSUPP;
 606	if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
 607		return -E2BIG;
 608	newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
 
 609	if (!newmem)
 610		return -ENOMEM;
 611
 612	memcpy(newmem, &mem, size);
 613	if (copy_from_user(newmem->regions, m->regions,
 614			   mem.nregions * sizeof *m->regions)) {
 615		kfree(newmem);
 616		return -EFAULT;
 617	}
 618
 619	if (!memory_access_ok(d, newmem,
 620			      vhost_has_feature(d, VHOST_F_LOG_ALL))) {
 621		kfree(newmem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 622		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 623	}
 624	oldmem = rcu_dereference_protected(d->memory,
 625					   lockdep_is_held(&d->mutex));
 626	rcu_assign_pointer(d->memory, newmem);
 627	synchronize_rcu();
 628	kfree(oldmem);
 
 
 629	return 0;
 630}
 631
 632static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633{
 634	struct file *eventfp, *filep = NULL,
 635		    *pollstart = NULL, *pollstop = NULL;
 636	struct eventfd_ctx *ctx = NULL;
 637	u32 __user *idxp = argp;
 638	struct vhost_virtqueue *vq;
 639	struct vhost_vring_state s;
 640	struct vhost_vring_file f;
 641	struct vhost_vring_addr a;
 642	u32 idx;
 643	long r;
 644
 645	r = get_user(idx, idxp);
 646	if (r < 0)
 647		return r;
 648	if (idx >= d->nvqs)
 649		return -ENOBUFS;
 650
 651	vq = d->vqs + idx;
 
 
 
 
 
 
 652
 653	mutex_lock(&vq->mutex);
 654
 655	switch (ioctl) {
 656	case VHOST_SET_VRING_NUM:
 657		/* Resizing ring with an active backend?
 658		 * You don't want to do that. */
 659		if (vq->private_data) {
 660			r = -EBUSY;
 661			break;
 662		}
 663		if (copy_from_user(&s, argp, sizeof s)) {
 664			r = -EFAULT;
 665			break;
 666		}
 667		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 668			r = -EINVAL;
 669			break;
 670		}
 671		vq->num = s.num;
 672		break;
 673	case VHOST_SET_VRING_BASE:
 674		/* Moving base with an active backend?
 675		 * You don't want to do that. */
 676		if (vq->private_data) {
 677			r = -EBUSY;
 678			break;
 679		}
 680		if (copy_from_user(&s, argp, sizeof s)) {
 681			r = -EFAULT;
 682			break;
 683		}
 684		if (s.num > 0xffff) {
 685			r = -EINVAL;
 686			break;
 687		}
 688		vq->last_avail_idx = s.num;
 689		/* Forget the cached index value. */
 690		vq->avail_idx = vq->last_avail_idx;
 691		break;
 692	case VHOST_GET_VRING_BASE:
 693		s.index = idx;
 694		s.num = vq->last_avail_idx;
 695		if (copy_to_user(argp, &s, sizeof s))
 696			r = -EFAULT;
 697		break;
 698	case VHOST_SET_VRING_ADDR:
 699		if (copy_from_user(&a, argp, sizeof a)) {
 700			r = -EFAULT;
 701			break;
 702		}
 703		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 704			r = -EOPNOTSUPP;
 705			break;
 706		}
 707		/* For 32bit, verify that the top 32bits of the user
 708		   data are set to zero. */
 709		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 710		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 711		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 712			r = -EFAULT;
 713			break;
 714		}
 715		if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
 716		    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
 717		    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
 718			r = -EINVAL;
 719			break;
 720		}
 721
 722		/* We only verify access here if backend is configured.
 723		 * If it is not, we don't as size might not have been setup.
 724		 * We will verify when backend is configured. */
 725		if (vq->private_data) {
 726			if (!vq_access_ok(d, vq->num,
 727				(void __user *)(unsigned long)a.desc_user_addr,
 728				(void __user *)(unsigned long)a.avail_user_addr,
 729				(void __user *)(unsigned long)a.used_user_addr)) {
 730				r = -EINVAL;
 731				break;
 732			}
 733
 734			/* Also validate log access for used ring if enabled. */
 735			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 736			    !log_access_ok(vq->log_base, a.log_guest_addr,
 737					   sizeof *vq->used +
 738					   vq->num * sizeof *vq->used->ring)) {
 739				r = -EINVAL;
 740				break;
 741			}
 742		}
 743
 744		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 745		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 746		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 747		vq->log_addr = a.log_guest_addr;
 748		vq->used = (void __user *)(unsigned long)a.used_user_addr;
 749		break;
 750	case VHOST_SET_VRING_KICK:
 751		if (copy_from_user(&f, argp, sizeof f)) {
 752			r = -EFAULT;
 753			break;
 754		}
 755		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 756		if (IS_ERR(eventfp)) {
 757			r = PTR_ERR(eventfp);
 758			break;
 759		}
 760		if (eventfp != vq->kick) {
 761			pollstop = filep = vq->kick;
 762			pollstart = vq->kick = eventfp;
 763		} else
 764			filep = eventfp;
 765		break;
 766	case VHOST_SET_VRING_CALL:
 767		if (copy_from_user(&f, argp, sizeof f)) {
 768			r = -EFAULT;
 769			break;
 770		}
 771		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 772		if (IS_ERR(eventfp)) {
 773			r = PTR_ERR(eventfp);
 774			break;
 775		}
 776		if (eventfp != vq->call) {
 777			filep = vq->call;
 778			ctx = vq->call_ctx;
 779			vq->call = eventfp;
 780			vq->call_ctx = eventfp ?
 781				eventfd_ctx_fileget(eventfp) : NULL;
 782		} else
 783			filep = eventfp;
 784		break;
 785	case VHOST_SET_VRING_ERR:
 786		if (copy_from_user(&f, argp, sizeof f)) {
 787			r = -EFAULT;
 788			break;
 789		}
 790		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 791		if (IS_ERR(eventfp)) {
 792			r = PTR_ERR(eventfp);
 793			break;
 794		}
 795		if (eventfp != vq->error) {
 796			filep = vq->error;
 797			vq->error = eventfp;
 798			ctx = vq->error_ctx;
 799			vq->error_ctx = eventfp ?
 800				eventfd_ctx_fileget(eventfp) : NULL;
 801		} else
 802			filep = eventfp;
 
 
 
 
 
 
 
 
 
 
 
 
 803		break;
 804	default:
 805		r = -ENOIOCTLCMD;
 806	}
 807
 808	if (pollstop && vq->handle_kick)
 809		vhost_poll_stop(&vq->poll);
 810
 811	if (ctx)
 812		eventfd_ctx_put(ctx);
 813	if (filep)
 814		fput(filep);
 815
 816	if (pollstart && vq->handle_kick)
 817		vhost_poll_start(&vq->poll, vq->kick);
 818
 819	mutex_unlock(&vq->mutex);
 820
 821	if (pollstop && vq->handle_kick)
 822		vhost_poll_flush(&vq->poll);
 823	return r;
 824}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825
 826/* Caller must have device mutex */
 827long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
 828{
 829	void __user *argp = (void __user *)arg;
 830	struct file *eventfp, *filep = NULL;
 831	struct eventfd_ctx *ctx = NULL;
 832	u64 p;
 833	long r;
 834	int i, fd;
 835
 836	/* If you are not the owner, you can become one */
 837	if (ioctl == VHOST_SET_OWNER) {
 838		r = vhost_dev_set_owner(d);
 839		goto done;
 840	}
 841
 842	/* You must be the owner to do anything else */
 843	r = vhost_dev_check_owner(d);
 844	if (r)
 845		goto done;
 846
 847	switch (ioctl) {
 848	case VHOST_SET_MEM_TABLE:
 849		r = vhost_set_memory(d, argp);
 850		break;
 851	case VHOST_SET_LOG_BASE:
 852		if (copy_from_user(&p, argp, sizeof p)) {
 853			r = -EFAULT;
 854			break;
 855		}
 856		if ((u64)(unsigned long)p != p) {
 857			r = -EFAULT;
 858			break;
 859		}
 860		for (i = 0; i < d->nvqs; ++i) {
 861			struct vhost_virtqueue *vq;
 862			void __user *base = (void __user *)(unsigned long)p;
 863			vq = d->vqs + i;
 864			mutex_lock(&vq->mutex);
 865			/* If ring is inactive, will check when it's enabled. */
 866			if (vq->private_data && !vq_log_access_ok(d, vq, base))
 867				r = -EFAULT;
 868			else
 869				vq->log_base = base;
 870			mutex_unlock(&vq->mutex);
 871		}
 872		break;
 873	case VHOST_SET_LOG_FD:
 874		r = get_user(fd, (int __user *)argp);
 875		if (r < 0)
 876			break;
 877		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
 878		if (IS_ERR(eventfp)) {
 879			r = PTR_ERR(eventfp);
 880			break;
 881		}
 882		if (eventfp != d->log_file) {
 883			filep = d->log_file;
 884			ctx = d->log_ctx;
 885			d->log_ctx = eventfp ?
 886				eventfd_ctx_fileget(eventfp) : NULL;
 887		} else
 888			filep = eventfp;
 889		for (i = 0; i < d->nvqs; ++i) {
 890			mutex_lock(&d->vqs[i].mutex);
 891			d->vqs[i].log_ctx = d->log_ctx;
 892			mutex_unlock(&d->vqs[i].mutex);
 893		}
 894		if (ctx)
 895			eventfd_ctx_put(ctx);
 896		if (filep)
 897			fput(filep);
 898		break;
 899	default:
 900		r = vhost_set_vring(d, ioctl, argp);
 901		break;
 902	}
 903done:
 904	return r;
 905}
 906
 907static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 908						     __u64 addr, __u32 len)
 909{
 910	struct vhost_memory_region *reg;
 911	int i;
 912
 913	/* linear search is not brilliant, but we really have on the order of 6
 914	 * regions in practice */
 915	for (i = 0; i < mem->nregions; ++i) {
 916		reg = mem->regions + i;
 917		if (reg->guest_phys_addr <= addr &&
 918		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
 919			return reg;
 920	}
 921	return NULL;
 922}
 923
 924/* TODO: This is really inefficient.  We need something like get_user()
 925 * (instruction directly accesses the data, with an exception table entry
 926 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 927 */
 928static int set_bit_to_user(int nr, void __user *addr)
 929{
 930	unsigned long log = (unsigned long)addr;
 931	struct page *page;
 932	void *base;
 933	int bit = nr + (log % PAGE_SIZE) * 8;
 934	int r;
 935
 936	r = get_user_pages_fast(log, 1, 1, &page);
 937	if (r < 0)
 938		return r;
 939	BUG_ON(r != 1);
 940	base = kmap_atomic(page, KM_USER0);
 941	set_bit(bit, base);
 942	kunmap_atomic(base, KM_USER0);
 943	set_page_dirty_lock(page);
 944	put_page(page);
 945	return 0;
 946}
 947
 948static int log_write(void __user *log_base,
 949		     u64 write_address, u64 write_length)
 950{
 951	u64 write_page = write_address / VHOST_PAGE_SIZE;
 952	int r;
 953
 954	if (!write_length)
 955		return 0;
 956	write_length += write_address % VHOST_PAGE_SIZE;
 957	for (;;) {
 958		u64 base = (u64)(unsigned long)log_base;
 959		u64 log = base + write_page / 8;
 960		int bit = write_page % 8;
 961		if ((u64)(unsigned long)log != log)
 962			return -EFAULT;
 963		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
 964		if (r < 0)
 965			return r;
 966		if (write_length <= VHOST_PAGE_SIZE)
 967			break;
 968		write_length -= VHOST_PAGE_SIZE;
 969		write_page += 1;
 970	}
 971	return r;
 972}
 973
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 975		    unsigned int log_num, u64 len)
 976{
 977	int i, r;
 978
 979	/* Make sure data written is seen before log. */
 980	smp_wmb();
 
 
 
 
 
 
 
 
 
 
 
 981	for (i = 0; i < log_num; ++i) {
 982		u64 l = min(log[i].len, len);
 983		r = log_write(vq->log_base, log[i].addr, l);
 984		if (r < 0)
 985			return r;
 986		len -= l;
 987		if (!len) {
 988			if (vq->log_ctx)
 989				eventfd_signal(vq->log_ctx, 1);
 990			return 0;
 991		}
 992	}
 993	/* Length written exceeds what we have stored. This is a bug. */
 994	BUG();
 995	return 0;
 996}
 
 997
 998static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 999{
1000	void __user *used;
1001	if (__put_user(vq->used_flags, &vq->used->flags) < 0)
1002		return -EFAULT;
1003	if (unlikely(vq->log_used)) {
1004		/* Make sure the flag is seen before log. */
1005		smp_wmb();
1006		/* Log used flag write. */
1007		used = &vq->used->flags;
1008		log_write(vq->log_base, vq->log_addr +
1009			  (used - (void __user *)vq->used),
1010			  sizeof vq->used->flags);
1011		if (vq->log_ctx)
1012			eventfd_signal(vq->log_ctx, 1);
1013	}
1014	return 0;
1015}
1016
1017static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1018{
1019	if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
1020		return -EFAULT;
1021	if (unlikely(vq->log_used)) {
1022		void __user *used;
1023		/* Make sure the event is seen before log. */
1024		smp_wmb();
1025		/* Log avail event write */
1026		used = vhost_avail_event(vq);
1027		log_write(vq->log_base, vq->log_addr +
1028			  (used - (void __user *)vq->used),
1029			  sizeof *vhost_avail_event(vq));
1030		if (vq->log_ctx)
1031			eventfd_signal(vq->log_ctx, 1);
1032	}
1033	return 0;
1034}
1035
1036int vhost_init_used(struct vhost_virtqueue *vq)
1037{
 
1038	int r;
 
 
1039	if (!vq->private_data)
1040		return 0;
1041
 
 
1042	r = vhost_update_used_flags(vq);
1043	if (r)
1044		return r;
1045	vq->signalled_used_valid = false;
1046	return get_user(vq->last_used_idx, &vq->used->idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047}
 
1048
1049static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1050			  struct iovec iov[], int iov_size)
1051{
1052	const struct vhost_memory_region *reg;
1053	struct vhost_memory *mem;
 
1054	struct iovec *_iov;
1055	u64 s = 0;
1056	int ret = 0;
1057
1058	rcu_read_lock();
1059
1060	mem = rcu_dereference(dev->memory);
1061	while ((u64)len > s) {
1062		u64 size;
1063		if (unlikely(ret >= iov_size)) {
1064			ret = -ENOBUFS;
1065			break;
1066		}
1067		reg = find_region(mem, addr, len);
1068		if (unlikely(!reg)) {
1069			ret = -EFAULT;
 
 
 
 
 
 
 
 
 
1070			break;
1071		}
 
1072		_iov = iov + ret;
1073		size = reg->memory_size - addr + reg->guest_phys_addr;
1074		_iov->iov_len = min((u64)len, size);
1075		_iov->iov_base = (void __user *)(unsigned long)
1076			(reg->userspace_addr + addr - reg->guest_phys_addr);
1077		s += size;
1078		addr += size;
1079		++ret;
1080	}
1081
1082	rcu_read_unlock();
 
1083	return ret;
1084}
1085
1086/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1087 * function returns the next descriptor in the chain,
1088 * or -1U if we're at the end. */
1089static unsigned next_desc(struct vring_desc *desc)
1090{
1091	unsigned int next;
1092
1093	/* If this descriptor says it doesn't chain, we're done. */
1094	if (!(desc->flags & VRING_DESC_F_NEXT))
1095		return -1U;
1096
1097	/* Check they're not leading us off end of descriptors. */
1098	next = desc->next;
1099	/* Make sure compiler knows to grab that: we don't want it changing! */
1100	/* We will use the result as an index in an array, so most
1101	 * architectures only need a compiler barrier here. */
1102	read_barrier_depends();
1103
1104	return next;
1105}
1106
1107static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1108			struct iovec iov[], unsigned int iov_size,
1109			unsigned int *out_num, unsigned int *in_num,
1110			struct vhost_log *log, unsigned int *log_num,
1111			struct vring_desc *indirect)
1112{
1113	struct vring_desc desc;
1114	unsigned int i = 0, count, found = 0;
1115	int ret;
 
 
1116
1117	/* Sanity check */
1118	if (unlikely(indirect->len % sizeof desc)) {
1119		vq_err(vq, "Invalid length in indirect descriptor: "
1120		       "len 0x%llx not multiple of 0x%zx\n",
1121		       (unsigned long long)indirect->len,
1122		       sizeof desc);
1123		return -EINVAL;
1124	}
1125
1126	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1127			     UIO_MAXIOV);
1128	if (unlikely(ret < 0)) {
1129		vq_err(vq, "Translation failure %d in indirect.\n", ret);
 
1130		return ret;
1131	}
 
1132
1133	/* We will use the result as an address to read from, so most
1134	 * architectures only need a compiler barrier here. */
1135	read_barrier_depends();
1136
1137	count = indirect->len / sizeof desc;
1138	/* Buffers are chained via a 16 bit next field, so
1139	 * we can have at most 2^16 of these. */
1140	if (unlikely(count > USHRT_MAX + 1)) {
1141		vq_err(vq, "Indirect buffer length too big: %d\n",
1142		       indirect->len);
1143		return -E2BIG;
1144	}
1145
1146	do {
1147		unsigned iov_count = *in_num + *out_num;
1148		if (unlikely(++found > count)) {
1149			vq_err(vq, "Loop detected: last one at %u "
1150			       "indirect size %u\n",
1151			       i, count);
1152			return -EINVAL;
1153		}
1154		if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1155					      vq->indirect, sizeof desc))) {
1156			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1157			       i, (size_t)indirect->addr + i * sizeof desc);
1158			return -EINVAL;
1159		}
1160		if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1161			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1162			       i, (size_t)indirect->addr + i * sizeof desc);
1163			return -EINVAL;
1164		}
1165
1166		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1167				     iov_size - iov_count);
 
 
 
 
 
 
1168		if (unlikely(ret < 0)) {
1169			vq_err(vq, "Translation failure %d indirect idx %d\n",
1170			       ret, i);
 
1171			return ret;
1172		}
1173		/* If this is an input descriptor, increment that count. */
1174		if (desc.flags & VRING_DESC_F_WRITE) {
1175			*in_num += ret;
1176			if (unlikely(log)) {
1177				log[*log_num].addr = desc.addr;
1178				log[*log_num].len = desc.len;
1179				++*log_num;
1180			}
1181		} else {
1182			/* If it's an output descriptor, they're all supposed
1183			 * to come before any input descriptors. */
1184			if (unlikely(*in_num)) {
1185				vq_err(vq, "Indirect descriptor "
1186				       "has out after in: idx %d\n", i);
1187				return -EINVAL;
1188			}
1189			*out_num += ret;
1190		}
1191	} while ((i = next_desc(&desc)) != -1);
1192	return 0;
1193}
1194
1195/* This looks in the virtqueue and for the first available buffer, and converts
1196 * it to an iovec for convenient access.  Since descriptors consist of some
1197 * number of output then some number of input descriptors, it's actually two
1198 * iovecs, but we pack them into one and note how many of each there were.
1199 *
1200 * This function returns the descriptor number found, or vq->num (which is
1201 * never a valid descriptor number) if none was found.  A negative code is
1202 * returned on error. */
1203int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1204		      struct iovec iov[], unsigned int iov_size,
1205		      unsigned int *out_num, unsigned int *in_num,
1206		      struct vhost_log *log, unsigned int *log_num)
1207{
1208	struct vring_desc desc;
1209	unsigned int i, head, found = 0;
1210	u16 last_avail_idx;
1211	int ret;
 
 
1212
1213	/* Check it isn't doing very strange things with descriptor numbers. */
1214	last_avail_idx = vq->last_avail_idx;
1215	if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1216		vq_err(vq, "Failed to access avail idx at %p\n",
1217		       &vq->avail->idx);
1218		return -EFAULT;
1219	}
1220
1221	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1222		vq_err(vq, "Guest moved used index from %u to %u",
1223		       last_avail_idx, vq->avail_idx);
1224		return -EFAULT;
1225	}
 
 
1226
1227	/* If there's nothing new since last we looked, return invalid. */
1228	if (vq->avail_idx == last_avail_idx)
1229		return vq->num;
 
 
1230
1231	/* Only get avail ring entries after they have been exposed by guest. */
1232	smp_rmb();
 
 
 
 
 
 
 
 
 
1233
1234	/* Grab the next descriptor number they're advertising, and increment
1235	 * the index we've seen. */
1236	if (unlikely(__get_user(head,
1237				&vq->avail->ring[last_avail_idx % vq->num]))) {
1238		vq_err(vq, "Failed to read head: idx %d address %p\n",
1239		       last_avail_idx,
1240		       &vq->avail->ring[last_avail_idx % vq->num]);
1241		return -EFAULT;
1242	}
1243
 
 
1244	/* If their number is silly, that's an error. */
1245	if (unlikely(head >= vq->num)) {
1246		vq_err(vq, "Guest says index %u > %u is available",
1247		       head, vq->num);
1248		return -EINVAL;
1249	}
1250
1251	/* When we start there are none of either input nor output. */
1252	*out_num = *in_num = 0;
1253	if (unlikely(log))
1254		*log_num = 0;
1255
1256	i = head;
1257	do {
1258		unsigned iov_count = *in_num + *out_num;
1259		if (unlikely(i >= vq->num)) {
1260			vq_err(vq, "Desc index is %u > %u, head = %u",
1261			       i, vq->num, head);
1262			return -EINVAL;
1263		}
1264		if (unlikely(++found > vq->num)) {
1265			vq_err(vq, "Loop detected: last one at %u "
1266			       "vq size %u head %u\n",
1267			       i, vq->num, head);
1268			return -EINVAL;
1269		}
1270		ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1271		if (unlikely(ret)) {
1272			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1273			       i, vq->desc + i);
1274			return -EFAULT;
1275		}
1276		if (desc.flags & VRING_DESC_F_INDIRECT) {
1277			ret = get_indirect(dev, vq, iov, iov_size,
1278					   out_num, in_num,
1279					   log, log_num, &desc);
1280			if (unlikely(ret < 0)) {
1281				vq_err(vq, "Failure detected "
1282				       "in indirect descriptor at idx %d\n", i);
 
1283				return ret;
1284			}
1285			continue;
1286		}
1287
1288		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1289				     iov_size - iov_count);
 
 
 
 
 
1290		if (unlikely(ret < 0)) {
1291			vq_err(vq, "Translation failure %d descriptor idx %d\n",
1292			       ret, i);
 
1293			return ret;
1294		}
1295		if (desc.flags & VRING_DESC_F_WRITE) {
1296			/* If this is an input descriptor,
1297			 * increment that count. */
1298			*in_num += ret;
1299			if (unlikely(log)) {
1300				log[*log_num].addr = desc.addr;
1301				log[*log_num].len = desc.len;
1302				++*log_num;
1303			}
1304		} else {
1305			/* If it's an output descriptor, they're all supposed
1306			 * to come before any input descriptors. */
1307			if (unlikely(*in_num)) {
1308				vq_err(vq, "Descriptor has out after in: "
1309				       "idx %d\n", i);
1310				return -EINVAL;
1311			}
1312			*out_num += ret;
1313		}
1314	} while ((i = next_desc(&desc)) != -1);
1315
1316	/* On success, increment avail index. */
1317	vq->last_avail_idx++;
1318
1319	/* Assume notifications from guest are disabled at this point,
1320	 * if they aren't we would need to update avail_event index. */
1321	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1322	return head;
1323}
 
1324
1325/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1326void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1327{
1328	vq->last_avail_idx -= n;
1329}
 
1330
1331/* After we've used one of their buffers, we tell them about it.  We'll then
1332 * want to notify the guest, using eventfd. */
1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1334{
1335	struct vring_used_elem __user *used;
 
 
 
1336
1337	/* The virtqueue contains a ring of used buffers.  Get a pointer to the
1338	 * next entry in that used ring. */
1339	used = &vq->used->ring[vq->last_used_idx % vq->num];
1340	if (__put_user(head, &used->id)) {
1341		vq_err(vq, "Failed to write used id");
1342		return -EFAULT;
1343	}
1344	if (__put_user(len, &used->len)) {
1345		vq_err(vq, "Failed to write used len");
1346		return -EFAULT;
1347	}
1348	/* Make sure buffer is written before we update index. */
1349	smp_wmb();
1350	if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1351		vq_err(vq, "Failed to increment used idx");
1352		return -EFAULT;
1353	}
1354	if (unlikely(vq->log_used)) {
1355		/* Make sure data is seen before log. */
1356		smp_wmb();
1357		/* Log used ring entry write. */
1358		log_write(vq->log_base,
1359			  vq->log_addr +
1360			   ((void __user *)used - (void __user *)vq->used),
1361			  sizeof *used);
1362		/* Log used index update. */
1363		log_write(vq->log_base,
1364			  vq->log_addr + offsetof(struct vring_used, idx),
1365			  sizeof vq->used->idx);
1366		if (vq->log_ctx)
1367			eventfd_signal(vq->log_ctx, 1);
1368	}
1369	vq->last_used_idx++;
1370	/* If the driver never bothers to signal in a very long while,
1371	 * used index might wrap around. If that happens, invalidate
1372	 * signalled_used index we stored. TODO: make sure driver
1373	 * signals at least once in 2^16 and remove this. */
1374	if (unlikely(vq->last_used_idx == vq->signalled_used))
1375		vq->signalled_used_valid = false;
1376	return 0;
1377}
 
1378
1379static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1380			    struct vring_used_elem *heads,
1381			    unsigned count)
1382{
1383	struct vring_used_elem __user *used;
1384	u16 old, new;
1385	int start;
1386
1387	start = vq->last_used_idx % vq->num;
1388	used = vq->used->ring + start;
1389	if (__copy_to_user(used, heads, count * sizeof *used)) {
1390		vq_err(vq, "Failed to write used");
1391		return -EFAULT;
1392	}
1393	if (unlikely(vq->log_used)) {
1394		/* Make sure data is seen before log. */
1395		smp_wmb();
1396		/* Log used ring entry write. */
1397		log_write(vq->log_base,
1398			  vq->log_addr +
1399			   ((void __user *)used - (void __user *)vq->used),
1400			  count * sizeof *used);
1401	}
1402	old = vq->last_used_idx;
1403	new = (vq->last_used_idx += count);
1404	/* If the driver never bothers to signal in a very long while,
1405	 * used index might wrap around. If that happens, invalidate
1406	 * signalled_used index we stored. TODO: make sure driver
1407	 * signals at least once in 2^16 and remove this. */
1408	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1409		vq->signalled_used_valid = false;
1410	return 0;
1411}
1412
1413/* After we've used one of their buffers, we tell them about it.  We'll then
1414 * want to notify the guest, using eventfd. */
1415int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1416		     unsigned count)
1417{
1418	int start, n, r;
1419
1420	start = vq->last_used_idx % vq->num;
1421	n = vq->num - start;
1422	if (n < count) {
1423		r = __vhost_add_used_n(vq, heads, n);
1424		if (r < 0)
1425			return r;
1426		heads += n;
1427		count -= n;
1428	}
1429	r = __vhost_add_used_n(vq, heads, count);
1430
1431	/* Make sure buffer is written before we update index. */
1432	smp_wmb();
1433	if (put_user(vq->last_used_idx, &vq->used->idx)) {
1434		vq_err(vq, "Failed to increment used idx");
1435		return -EFAULT;
1436	}
1437	if (unlikely(vq->log_used)) {
 
 
1438		/* Log used index update. */
1439		log_write(vq->log_base,
1440			  vq->log_addr + offsetof(struct vring_used, idx),
1441			  sizeof vq->used->idx);
1442		if (vq->log_ctx)
1443			eventfd_signal(vq->log_ctx, 1);
1444	}
1445	return r;
1446}
 
1447
1448static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1449{
1450	__u16 old, new, event;
 
1451	bool v;
1452	/* Flush out used index updates. This is paired
1453	 * with the barrier that the Guest executes when enabling
1454	 * interrupts. */
1455	smp_mb();
1456
1457	if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1458	    unlikely(vq->avail_idx == vq->last_avail_idx))
1459		return true;
1460
1461	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1462		__u16 flags;
1463		if (__get_user(flags, &vq->avail->flags)) {
1464			vq_err(vq, "Failed to get flags");
1465			return true;
1466		}
1467		return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1468	}
1469	old = vq->signalled_used;
1470	v = vq->signalled_used_valid;
1471	new = vq->signalled_used = vq->last_used_idx;
1472	vq->signalled_used_valid = true;
1473
1474	if (unlikely(!v))
1475		return true;
1476
1477	if (get_user(event, vhost_used_event(vq))) {
1478		vq_err(vq, "Failed to get used event idx");
1479		return true;
1480	}
1481	return vring_need_event(event, new, old);
1482}
1483
1484/* This actually signals the guest, using eventfd. */
1485void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1486{
1487	/* Signal the Guest tell them we used something up. */
1488	if (vq->call_ctx && vhost_notify(dev, vq))
1489		eventfd_signal(vq->call_ctx, 1);
1490}
 
1491
1492/* And here's the combo meal deal.  Supersize me! */
1493void vhost_add_used_and_signal(struct vhost_dev *dev,
1494			       struct vhost_virtqueue *vq,
1495			       unsigned int head, int len)
1496{
1497	vhost_add_used(vq, head, len);
1498	vhost_signal(dev, vq);
1499}
 
1500
1501/* multi-buffer version of vhost_add_used_and_signal */
1502void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1503				 struct vhost_virtqueue *vq,
1504				 struct vring_used_elem *heads, unsigned count)
1505{
1506	vhost_add_used_n(vq, heads, count);
1507	vhost_signal(dev, vq);
1508}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509
1510/* OK, now we need to know about added descriptors. */
1511bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1512{
1513	u16 avail_idx;
1514	int r;
1515
1516	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1517		return false;
1518	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1519	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1520		r = vhost_update_used_flags(vq);
1521		if (r) {
1522			vq_err(vq, "Failed to enable notification at %p: %d\n",
1523			       &vq->used->flags, r);
1524			return false;
1525		}
1526	} else {
1527		r = vhost_update_avail_event(vq, vq->avail_idx);
1528		if (r) {
1529			vq_err(vq, "Failed to update avail event index at %p: %d\n",
1530			       vhost_avail_event(vq), r);
1531			return false;
1532		}
1533	}
1534	/* They could have slipped one in as we were doing that: make
1535	 * sure it's written, then check again. */
1536	smp_mb();
1537	r = __get_user(avail_idx, &vq->avail->idx);
1538	if (r) {
1539		vq_err(vq, "Failed to check avail idx at %p: %d\n",
1540		       &vq->avail->idx, r);
1541		return false;
1542	}
1543
1544	return avail_idx != vq->avail_idx;
1545}
 
1546
1547/* We don't need to be notified again. */
1548void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1549{
1550	int r;
1551
1552	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1553		return;
1554	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1555	if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1556		r = vhost_update_used_flags(vq);
1557		if (r)
1558			vq_err(vq, "Failed to enable notification at %p: %d\n",
1559			       &vq->used->flags, r);
1560	}
1561}
 
1562
1563static void vhost_zerocopy_done_signal(struct kref *kref)
 
1564{
1565	struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
1566						    kref);
1567	wake_up(&ubufs->wait);
 
 
 
 
 
 
1568}
 
1569
1570struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
1571					bool zcopy)
1572{
1573	struct vhost_ubuf_ref *ubufs;
1574	/* No zero copy backend? Nothing to count. */
1575	if (!zcopy)
1576		return NULL;
1577	ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
1578	if (!ubufs)
1579		return ERR_PTR(-ENOMEM);
1580	kref_init(&ubufs->kref);
1581	init_waitqueue_head(&ubufs->wait);
1582	ubufs->vq = vq;
1583	return ubufs;
1584}
 
1585
1586void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
 
1587{
1588	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
 
 
 
 
 
 
 
 
 
 
1589}
 
1590
1591void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
 
1592{
1593	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1594	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1595	kfree(ubufs);
1596}
1597
1598void vhost_zerocopy_callback(void *arg)
1599{
1600	struct ubuf_info *ubuf = arg;
1601	struct vhost_ubuf_ref *ubufs = ubuf->arg;
1602	struct vhost_virtqueue *vq = ubufs->vq;
1603
1604	/* set len = 1 to mark this desc buffers done DMA */
1605	vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1606	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1607}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2009 Red Hat, Inc.
   3 * Copyright (C) 2006 Rusty Russell IBM Corporation
   4 *
   5 * Author: Michael S. Tsirkin <mst@redhat.com>
   6 *
   7 * Inspiration, some code, and most witty comments come from
   8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   9 *
 
 
  10 * Generic code for virtio server in host kernel.
  11 */
  12
  13#include <linux/eventfd.h>
  14#include <linux/vhost.h>
  15#include <linux/uio.h>
  16#include <linux/mm.h>
  17#include <linux/mmu_context.h>
  18#include <linux/miscdevice.h>
  19#include <linux/mutex.h>
 
  20#include <linux/poll.h>
  21#include <linux/file.h>
  22#include <linux/highmem.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/kthread.h>
  26#include <linux/cgroup.h>
  27#include <linux/module.h>
  28#include <linux/sort.h>
  29#include <linux/sched/mm.h>
  30#include <linux/sched/signal.h>
  31#include <linux/interval_tree_generic.h>
  32#include <linux/nospec.h>
  33
  34#include "vhost.h"
  35
  36static ushort max_mem_regions = 64;
  37module_param(max_mem_regions, ushort, 0444);
  38MODULE_PARM_DESC(max_mem_regions,
  39	"Maximum number of memory regions in memory map. (default: 64)");
  40static int max_iotlb_entries = 2048;
  41module_param(max_iotlb_entries, int, 0444);
  42MODULE_PARM_DESC(max_iotlb_entries,
  43	"Maximum number of iotlb entries. (default: 2048)");
  44
  45enum {
 
  46	VHOST_MEMORY_F_LOG = 0x1,
  47};
  48
  49#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
  50#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
  51
  52INTERVAL_TREE_DEFINE(struct vhost_umem_node,
  53		     rb, __u64, __subtree_last,
  54		     START, LAST, static inline, vhost_umem_interval_tree);
  55
  56#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
  57static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
  58{
  59	vq->user_be = !virtio_legacy_is_little_endian();
  60}
  61
  62static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
  63{
  64	vq->user_be = true;
  65}
  66
  67static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
  68{
  69	vq->user_be = false;
  70}
  71
  72static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
  73{
  74	struct vhost_vring_state s;
  75
  76	if (vq->private_data)
  77		return -EBUSY;
  78
  79	if (copy_from_user(&s, argp, sizeof(s)))
  80		return -EFAULT;
  81
  82	if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
  83	    s.num != VHOST_VRING_BIG_ENDIAN)
  84		return -EINVAL;
  85
  86	if (s.num == VHOST_VRING_BIG_ENDIAN)
  87		vhost_enable_cross_endian_big(vq);
  88	else
  89		vhost_enable_cross_endian_little(vq);
  90
  91	return 0;
  92}
  93
  94static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
  95				   int __user *argp)
  96{
  97	struct vhost_vring_state s = {
  98		.index = idx,
  99		.num = vq->user_be
 100	};
 101
 102	if (copy_to_user(argp, &s, sizeof(s)))
 103		return -EFAULT;
 104
 105	return 0;
 106}
 107
 108static void vhost_init_is_le(struct vhost_virtqueue *vq)
 109{
 110	/* Note for legacy virtio: user_be is initialized at reset time
 111	 * according to the host endianness. If userspace does not set an
 112	 * explicit endianness, the default behavior is native endian, as
 113	 * expected by legacy virtio.
 114	 */
 115	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
 116}
 117#else
 118static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
 119{
 120}
 121
 122static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
 123{
 124	return -ENOIOCTLCMD;
 125}
 126
 127static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 128				   int __user *argp)
 129{
 130	return -ENOIOCTLCMD;
 131}
 132
 133static void vhost_init_is_le(struct vhost_virtqueue *vq)
 134{
 135	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
 136		|| virtio_legacy_is_little_endian();
 137}
 138#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 139
 140static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 141{
 142	vhost_init_is_le(vq);
 143}
 144
 145struct vhost_flush_struct {
 146	struct vhost_work work;
 147	struct completion wait_event;
 148};
 149
 150static void vhost_flush_work(struct vhost_work *work)
 151{
 152	struct vhost_flush_struct *s;
 153
 154	s = container_of(work, struct vhost_flush_struct, work);
 155	complete(&s->wait_event);
 156}
 157
 158static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 159			    poll_table *pt)
 160{
 161	struct vhost_poll *poll;
 162
 163	poll = container_of(pt, struct vhost_poll, table);
 164	poll->wqh = wqh;
 165	add_wait_queue(wqh, &poll->wait);
 166}
 167
 168static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
 169			     void *key)
 170{
 171	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
 172
 173	if (!(key_to_poll(key) & poll->mask))
 174		return 0;
 175
 176	vhost_poll_queue(poll);
 177	return 0;
 178}
 179
 180void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
 181{
 182	clear_bit(VHOST_WORK_QUEUED, &work->flags);
 183	work->fn = fn;
 
 
 
 184}
 185EXPORT_SYMBOL_GPL(vhost_work_init);
 186
 187/* Init poll structure */
 188void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 189		     __poll_t mask, struct vhost_dev *dev)
 190{
 191	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 192	init_poll_funcptr(&poll->table, vhost_poll_func);
 193	poll->mask = mask;
 194	poll->dev = dev;
 195	poll->wqh = NULL;
 196
 197	vhost_work_init(&poll->work, fn);
 198}
 199EXPORT_SYMBOL_GPL(vhost_poll_init);
 200
 201/* Start polling a file. We add ourselves to file's wait queue. The caller must
 202 * keep a reference to a file until after vhost_poll_stop is called. */
 203int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 204{
 205	__poll_t mask;
 206
 207	if (poll->wqh)
 208		return 0;
 209
 210	mask = vfs_poll(file, &poll->table);
 211	if (mask)
 212		vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
 213	if (mask & EPOLLERR) {
 214		vhost_poll_stop(poll);
 215		return -EINVAL;
 216	}
 217
 218	return 0;
 219}
 220EXPORT_SYMBOL_GPL(vhost_poll_start);
 221
 222/* Stop polling a file. After this function returns, it becomes safe to drop the
 223 * file reference. You must also flush afterwards. */
 224void vhost_poll_stop(struct vhost_poll *poll)
 225{
 226	if (poll->wqh) {
 227		remove_wait_queue(poll->wqh, &poll->wait);
 228		poll->wqh = NULL;
 229	}
 230}
 231EXPORT_SYMBOL_GPL(vhost_poll_stop);
 232
 233void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 
 234{
 235	struct vhost_flush_struct flush;
 
 
 
 
 
 
 236
 237	if (dev->worker) {
 238		init_completion(&flush.wait_event);
 239		vhost_work_init(&flush.work, vhost_flush_work);
 
 240
 241		vhost_work_queue(dev, &flush.work);
 242		wait_for_completion(&flush.wait_event);
 243	}
 
 
 
 
 
 
 244}
 245EXPORT_SYMBOL_GPL(vhost_work_flush);
 246
 247/* Flush any work that has been scheduled. When calling this, don't hold any
 248 * locks that are also used by the callback. */
 249void vhost_poll_flush(struct vhost_poll *poll)
 250{
 251	vhost_work_flush(poll->dev, &poll->work);
 252}
 253EXPORT_SYMBOL_GPL(vhost_poll_flush);
 254
 255void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 
 256{
 257	if (!dev->worker)
 258		return;
 259
 260	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 261		/* We can only add the work to the list after we're
 262		 * sure it was not in the list.
 263		 * test_and_set_bit() implies a memory barrier.
 264		 */
 265		llist_add(&work->node, &dev->work_list);
 266		wake_up_process(dev->worker);
 267	}
 
 268}
 269EXPORT_SYMBOL_GPL(vhost_work_queue);
 270
 271/* A lockless hint for busy polling code to exit the loop */
 272bool vhost_has_work(struct vhost_dev *dev)
 273{
 274	return !llist_empty(&dev->work_list);
 275}
 276EXPORT_SYMBOL_GPL(vhost_has_work);
 277
 278void vhost_poll_queue(struct vhost_poll *poll)
 279{
 280	vhost_work_queue(poll->dev, &poll->work);
 281}
 282EXPORT_SYMBOL_GPL(vhost_poll_queue);
 283
 284static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
 285{
 286	int j;
 287
 288	for (j = 0; j < VHOST_NUM_ADDRS; j++)
 289		vq->meta_iotlb[j] = NULL;
 290}
 291
 292static void vhost_vq_meta_reset(struct vhost_dev *d)
 293{
 294	int i;
 295
 296	for (i = 0; i < d->nvqs; ++i)
 297		__vhost_vq_meta_reset(d->vqs[i]);
 298}
 299
 300static void vhost_vq_reset(struct vhost_dev *dev,
 301			   struct vhost_virtqueue *vq)
 302{
 303	vq->num = 1;
 304	vq->desc = NULL;
 305	vq->avail = NULL;
 306	vq->used = NULL;
 307	vq->last_avail_idx = 0;
 308	vq->avail_idx = 0;
 309	vq->last_used_idx = 0;
 310	vq->signalled_used = 0;
 311	vq->signalled_used_valid = false;
 312	vq->used_flags = 0;
 313	vq->log_used = false;
 314	vq->log_addr = -1ull;
 
 
 315	vq->private_data = NULL;
 316	vq->acked_features = 0;
 317	vq->acked_backend_features = 0;
 318	vq->log_base = NULL;
 319	vq->error_ctx = NULL;
 
 320	vq->kick = NULL;
 321	vq->call_ctx = NULL;
 
 322	vq->log_ctx = NULL;
 323	vhost_reset_is_le(vq);
 324	vhost_disable_cross_endian(vq);
 325	vq->busyloop_timeout = 0;
 326	vq->umem = NULL;
 327	vq->iotlb = NULL;
 328	__vhost_vq_meta_reset(vq);
 329}
 330
 331static int vhost_worker(void *data)
 332{
 333	struct vhost_dev *dev = data;
 334	struct vhost_work *work, *work_next;
 335	struct llist_node *node;
 336	mm_segment_t oldfs = get_fs();
 337
 338	set_fs(USER_DS);
 339	use_mm(dev->mm);
 340
 341	for (;;) {
 342		/* mb paired w/ kthread_stop */
 343		set_current_state(TASK_INTERRUPTIBLE);
 344
 
 
 
 
 
 
 
 345		if (kthread_should_stop()) {
 
 346			__set_current_state(TASK_RUNNING);
 347			break;
 348		}
 
 
 
 
 
 
 
 
 349
 350		node = llist_del_all(&dev->work_list);
 351		if (!node)
 
 
 352			schedule();
 353
 354		node = llist_reverse_order(node);
 355		/* make sure flag is seen after deletion */
 356		smp_wmb();
 357		llist_for_each_entry_safe(work, work_next, node, node) {
 358			clear_bit(VHOST_WORK_QUEUED, &work->flags);
 359			__set_current_state(TASK_RUNNING);
 360			work->fn(work);
 361			if (need_resched())
 362				schedule();
 363		}
 364	}
 365	unuse_mm(dev->mm);
 366	set_fs(oldfs);
 367	return 0;
 368}
 369
 370static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 371{
 372	kfree(vq->indirect);
 373	vq->indirect = NULL;
 374	kfree(vq->log);
 375	vq->log = NULL;
 376	kfree(vq->heads);
 377	vq->heads = NULL;
 
 
 
 
 
 
 
 378}
 379
 380/* Helper to allocate iovec buffers for all vqs. */
 381static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 382{
 383	struct vhost_virtqueue *vq;
 384	int i;
 
 385
 386	for (i = 0; i < dev->nvqs; ++i) {
 387		vq = dev->vqs[i];
 388		vq->indirect = kmalloc_array(UIO_MAXIOV,
 389					     sizeof(*vq->indirect),
 390					     GFP_KERNEL);
 391		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
 392					GFP_KERNEL);
 393		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
 394					  GFP_KERNEL);
 395		if (!vq->indirect || !vq->log || !vq->heads)
 
 
 
 
 
 
 
 
 
 396			goto err_nomem;
 397	}
 398	return 0;
 399
 400err_nomem:
 401	for (; i >= 0; --i)
 402		vhost_vq_free_iovecs(dev->vqs[i]);
 403	return -ENOMEM;
 404}
 405
 406static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 407{
 408	int i;
 409
 410	for (i = 0; i < dev->nvqs; ++i)
 411		vhost_vq_free_iovecs(dev->vqs[i]);
 412}
 413
 414bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
 415			  int pkts, int total_len)
 416{
 417	struct vhost_dev *dev = vq->dev;
 418
 419	if ((dev->byte_weight && total_len >= dev->byte_weight) ||
 420	    pkts >= dev->weight) {
 421		vhost_poll_queue(&vq->poll);
 422		return true;
 423	}
 424
 425	return false;
 426}
 427EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
 428
 429static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
 430				   unsigned int num)
 431{
 432	size_t event __maybe_unused =
 433	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 434
 435	return sizeof(*vq->avail) +
 436	       sizeof(*vq->avail->ring) * num + event;
 437}
 438
 439static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
 440				  unsigned int num)
 441{
 442	size_t event __maybe_unused =
 443	       vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 444
 445	return sizeof(*vq->used) +
 446	       sizeof(*vq->used->ring) * num + event;
 447}
 448
 449static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
 450				  unsigned int num)
 451{
 452	return sizeof(*vq->desc) * num;
 453}
 454
 455void vhost_dev_init(struct vhost_dev *dev,
 456		    struct vhost_virtqueue **vqs, int nvqs,
 457		    int iov_limit, int weight, int byte_weight)
 458{
 459	struct vhost_virtqueue *vq;
 460	int i;
 461
 462	dev->vqs = vqs;
 463	dev->nvqs = nvqs;
 464	mutex_init(&dev->mutex);
 465	dev->log_ctx = NULL;
 466	dev->umem = NULL;
 467	dev->iotlb = NULL;
 468	dev->mm = NULL;
 
 
 469	dev->worker = NULL;
 470	dev->iov_limit = iov_limit;
 471	dev->weight = weight;
 472	dev->byte_weight = byte_weight;
 473	init_llist_head(&dev->work_list);
 474	init_waitqueue_head(&dev->wait);
 475	INIT_LIST_HEAD(&dev->read_list);
 476	INIT_LIST_HEAD(&dev->pending_list);
 477	spin_lock_init(&dev->iotlb_lock);
 478
 479
 480	for (i = 0; i < dev->nvqs; ++i) {
 481		vq = dev->vqs[i];
 482		vq->log = NULL;
 483		vq->indirect = NULL;
 484		vq->heads = NULL;
 485		vq->dev = dev;
 486		mutex_init(&vq->mutex);
 487		vhost_vq_reset(dev, vq);
 488		if (vq->handle_kick)
 489			vhost_poll_init(&vq->poll, vq->handle_kick,
 490					EPOLLIN, dev);
 491	}
 
 
 492}
 493EXPORT_SYMBOL_GPL(vhost_dev_init);
 494
 495/* Caller should have device mutex */
 496long vhost_dev_check_owner(struct vhost_dev *dev)
 497{
 498	/* Are you the owner? If not, I don't think you mean to do that */
 499	return dev->mm == current->mm ? 0 : -EPERM;
 500}
 501EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 502
 503struct vhost_attach_cgroups_struct {
 504	struct vhost_work work;
 505	struct task_struct *owner;
 506	int ret;
 507};
 508
 509static void vhost_attach_cgroups_work(struct vhost_work *work)
 510{
 511	struct vhost_attach_cgroups_struct *s;
 512
 513	s = container_of(work, struct vhost_attach_cgroups_struct, work);
 514	s->ret = cgroup_attach_task_all(s->owner, current);
 515}
 516
 517static int vhost_attach_cgroups(struct vhost_dev *dev)
 518{
 519	struct vhost_attach_cgroups_struct attach;
 520
 521	attach.owner = current;
 522	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 523	vhost_work_queue(dev, &attach.work);
 524	vhost_work_flush(dev, &attach.work);
 525	return attach.ret;
 526}
 527
 528/* Caller should have device mutex */
 529bool vhost_dev_has_owner(struct vhost_dev *dev)
 530{
 531	return dev->mm;
 532}
 533EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 534
 535/* Caller should have device mutex */
 536long vhost_dev_set_owner(struct vhost_dev *dev)
 537{
 538	struct task_struct *worker;
 539	int err;
 540
 541	/* Is there an owner already? */
 542	if (vhost_dev_has_owner(dev)) {
 543		err = -EBUSY;
 544		goto err_mm;
 545	}
 546
 547	/* No owner, become one */
 548	dev->mm = get_task_mm(current);
 549	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 550	if (IS_ERR(worker)) {
 551		err = PTR_ERR(worker);
 552		goto err_worker;
 553	}
 554
 555	dev->worker = worker;
 556	wake_up_process(worker);	/* avoid contributing to loadavg */
 557
 558	err = vhost_attach_cgroups(dev);
 559	if (err)
 560		goto err_cgroup;
 561
 562	err = vhost_dev_alloc_iovecs(dev);
 563	if (err)
 564		goto err_cgroup;
 565
 566	return 0;
 567err_cgroup:
 568	kthread_stop(worker);
 569	dev->worker = NULL;
 570err_worker:
 571	if (dev->mm)
 572		mmput(dev->mm);
 573	dev->mm = NULL;
 574err_mm:
 575	return err;
 576}
 577EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 578
 579struct vhost_umem *vhost_dev_reset_owner_prepare(void)
 
 580{
 581	return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
 582}
 583EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 584
 585/* Caller should have device mutex */
 586void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
 587{
 588	int i;
 589
 590	vhost_dev_cleanup(dev);
 591
 592	/* Restore memory to default empty mapping. */
 593	INIT_LIST_HEAD(&umem->umem_list);
 594	dev->umem = umem;
 595	/* We don't need VQ locks below since vhost_dev_cleanup makes sure
 596	 * VQs aren't running.
 597	 */
 598	for (i = 0; i < dev->nvqs; ++i)
 599		dev->vqs[i]->umem = umem;
 600}
 601EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 602
 603void vhost_dev_stop(struct vhost_dev *dev)
 
 
 
 
 
 604{
 605	int i;
 
 606
 607	for (i = 0; i < dev->nvqs; ++i) {
 608		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 609			vhost_poll_stop(&dev->vqs[i]->poll);
 610			vhost_poll_flush(&dev->vqs[i]->poll);
 611		}
 
 
 
 612	}
 613}
 614EXPORT_SYMBOL_GPL(vhost_dev_stop);
 615
 616static void vhost_umem_free(struct vhost_umem *umem,
 617			    struct vhost_umem_node *node)
 618{
 619	vhost_umem_interval_tree_remove(node, &umem->umem_tree);
 620	list_del(&node->link);
 621	kfree(node);
 622	umem->numem--;
 623}
 624
 625static void vhost_umem_clean(struct vhost_umem *umem)
 626{
 627	struct vhost_umem_node *node, *tmp;
 628
 629	if (!umem)
 630		return;
 631
 632	list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
 633		vhost_umem_free(umem, node);
 634
 635	kvfree(umem);
 636}
 637
 638static void vhost_clear_msg(struct vhost_dev *dev)
 639{
 640	struct vhost_msg_node *node, *n;
 641
 642	spin_lock(&dev->iotlb_lock);
 643
 644	list_for_each_entry_safe(node, n, &dev->read_list, node) {
 645		list_del(&node->node);
 646		kfree(node);
 647	}
 648
 649	list_for_each_entry_safe(node, n, &dev->pending_list, node) {
 650		list_del(&node->node);
 651		kfree(node);
 652	}
 653
 654	spin_unlock(&dev->iotlb_lock);
 655}
 656
 
 657void vhost_dev_cleanup(struct vhost_dev *dev)
 658{
 659	int i;
 660
 661	for (i = 0; i < dev->nvqs; ++i) {
 662		if (dev->vqs[i]->error_ctx)
 663			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 664		if (dev->vqs[i]->kick)
 665			fput(dev->vqs[i]->kick);
 666		if (dev->vqs[i]->call_ctx)
 667			eventfd_ctx_put(dev->vqs[i]->call_ctx);
 668		vhost_vq_reset(dev, dev->vqs[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 669	}
 670	vhost_dev_free_iovecs(dev);
 671	if (dev->log_ctx)
 672		eventfd_ctx_put(dev->log_ctx);
 673	dev->log_ctx = NULL;
 
 
 
 674	/* No one will access memory at this point */
 675	vhost_umem_clean(dev->umem);
 676	dev->umem = NULL;
 677	vhost_umem_clean(dev->iotlb);
 678	dev->iotlb = NULL;
 679	vhost_clear_msg(dev);
 680	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
 681	WARN_ON(!llist_empty(&dev->work_list));
 682	if (dev->worker) {
 683		kthread_stop(dev->worker);
 684		dev->worker = NULL;
 685	}
 686	if (dev->mm)
 687		mmput(dev->mm);
 688	dev->mm = NULL;
 689}
 690EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 691
 692static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 693{
 694	u64 a = addr / VHOST_PAGE_SIZE / 8;
 695
 696	/* Make sure 64 bit math will not overflow. */
 697	if (a > ULONG_MAX - (unsigned long)log_base ||
 698	    a + (unsigned long)log_base > ULONG_MAX)
 699		return false;
 700
 701	return access_ok(log_base + a,
 702			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 703}
 704
 705static bool vhost_overflow(u64 uaddr, u64 size)
 706{
 707	/* Make sure 64 bit math will not overflow. */
 708	return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
 709}
 710
 711/* Caller should have vq mutex and device mutex. */
 712static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
 713				int log_all)
 714{
 715	struct vhost_umem_node *node;
 716
 717	if (!umem)
 718		return false;
 719
 720	list_for_each_entry(node, &umem->umem_list, link) {
 721		unsigned long a = node->userspace_addr;
 722
 723		if (vhost_overflow(node->userspace_addr, node->size))
 724			return false;
 725
 726
 727		if (!access_ok((void __user *)a,
 728				    node->size))
 729			return false;
 730		else if (log_all && !log_access_ok(log_base,
 731						   node->start,
 732						   node->size))
 733			return false;
 734	}
 735	return true;
 736}
 737
 738static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
 739					       u64 addr, unsigned int size,
 740					       int type)
 741{
 742	const struct vhost_umem_node *node = vq->meta_iotlb[type];
 743
 744	if (!node)
 745		return NULL;
 746
 747	return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
 748}
 749
 750/* Can we switch to this memory table? */
 751/* Caller should have device mutex but not vq mutex */
 752static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
 753			     int log_all)
 754{
 755	int i;
 756
 757	for (i = 0; i < d->nvqs; ++i) {
 758		bool ok;
 759		bool log;
 760
 761		mutex_lock(&d->vqs[i]->mutex);
 762		log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
 763		/* If ring is inactive, will check when it's enabled. */
 764		if (d->vqs[i]->private_data)
 765			ok = vq_memory_access_ok(d->vqs[i]->log_base,
 766						 umem, log);
 767		else
 768			ok = true;
 769		mutex_unlock(&d->vqs[i]->mutex);
 770		if (!ok)
 771			return false;
 772	}
 773	return true;
 774}
 775
 776static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
 777			  struct iovec iov[], int iov_size, int access);
 778
 779static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
 780			      const void *from, unsigned size)
 781{
 782	int ret;
 783
 784	if (!vq->iotlb)
 785		return __copy_to_user(to, from, size);
 786	else {
 787		/* This function should be called after iotlb
 788		 * prefetch, which means we're sure that all vq
 789		 * could be access through iotlb. So -EAGAIN should
 790		 * not happen in this case.
 791		 */
 792		struct iov_iter t;
 793		void __user *uaddr = vhost_vq_meta_fetch(vq,
 794				     (u64)(uintptr_t)to, size,
 795				     VHOST_ADDR_USED);
 796
 797		if (uaddr)
 798			return __copy_to_user(uaddr, from, size);
 799
 800		ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
 801				     ARRAY_SIZE(vq->iotlb_iov),
 802				     VHOST_ACCESS_WO);
 803		if (ret < 0)
 804			goto out;
 805		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
 806		ret = copy_to_iter(from, size, &t);
 807		if (ret == size)
 808			ret = 0;
 809	}
 810out:
 811	return ret;
 812}
 813
 814static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
 815				void __user *from, unsigned size)
 816{
 817	int ret;
 818
 819	if (!vq->iotlb)
 820		return __copy_from_user(to, from, size);
 821	else {
 822		/* This function should be called after iotlb
 823		 * prefetch, which means we're sure that vq
 824		 * could be access through iotlb. So -EAGAIN should
 825		 * not happen in this case.
 826		 */
 827		void __user *uaddr = vhost_vq_meta_fetch(vq,
 828				     (u64)(uintptr_t)from, size,
 829				     VHOST_ADDR_DESC);
 830		struct iov_iter f;
 831
 832		if (uaddr)
 833			return __copy_from_user(to, uaddr, size);
 834
 835		ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
 836				     ARRAY_SIZE(vq->iotlb_iov),
 837				     VHOST_ACCESS_RO);
 838		if (ret < 0) {
 839			vq_err(vq, "IOTLB translation failure: uaddr "
 840			       "%p size 0x%llx\n", from,
 841			       (unsigned long long) size);
 842			goto out;
 843		}
 844		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
 845		ret = copy_from_iter(to, size, &f);
 846		if (ret == size)
 847			ret = 0;
 848	}
 849
 850out:
 851	return ret;
 852}
 853
 854static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
 855					  void __user *addr, unsigned int size,
 856					  int type)
 857{
 858	int ret;
 859
 860	ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
 861			     ARRAY_SIZE(vq->iotlb_iov),
 862			     VHOST_ACCESS_RO);
 863	if (ret < 0) {
 864		vq_err(vq, "IOTLB translation failure: uaddr "
 865			"%p size 0x%llx\n", addr,
 866			(unsigned long long) size);
 867		return NULL;
 868	}
 869
 870	if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
 871		vq_err(vq, "Non atomic userspace memory access: uaddr "
 872			"%p size 0x%llx\n", addr,
 873			(unsigned long long) size);
 874		return NULL;
 875	}
 876
 877	return vq->iotlb_iov[0].iov_base;
 878}
 879
 880/* This function should be called after iotlb
 881 * prefetch, which means we're sure that vq
 882 * could be access through iotlb. So -EAGAIN should
 883 * not happen in this case.
 884 */
 885static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 886					    void *addr, unsigned int size,
 887					    int type)
 888{
 889	void __user *uaddr = vhost_vq_meta_fetch(vq,
 890			     (u64)(uintptr_t)addr, size, type);
 891	if (uaddr)
 892		return uaddr;
 893
 894	return __vhost_get_user_slow(vq, addr, size, type);
 895}
 896
 897#define vhost_put_user(vq, x, ptr)		\
 898({ \
 899	int ret = -EFAULT; \
 900	if (!vq->iotlb) { \
 901		ret = __put_user(x, ptr); \
 902	} else { \
 903		__typeof__(ptr) to = \
 904			(__typeof__(ptr)) __vhost_get_user(vq, ptr,	\
 905					  sizeof(*ptr), VHOST_ADDR_USED); \
 906		if (to != NULL) \
 907			ret = __put_user(x, to); \
 908		else \
 909			ret = -EFAULT;	\
 910	} \
 911	ret; \
 912})
 913
 914static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
 915{
 916	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
 917			      vhost_avail_event(vq));
 918}
 919
 920static inline int vhost_put_used(struct vhost_virtqueue *vq,
 921				 struct vring_used_elem *head, int idx,
 922				 int count)
 923{
 924	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
 925				  count * sizeof(*head));
 926}
 927
 928static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
 929
 930{
 931	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
 932			      &vq->used->flags);
 933}
 934
 935static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
 936
 937{
 938	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
 939			      &vq->used->idx);
 940}
 941
 942#define vhost_get_user(vq, x, ptr, type)		\
 943({ \
 944	int ret; \
 945	if (!vq->iotlb) { \
 946		ret = __get_user(x, ptr); \
 947	} else { \
 948		__typeof__(ptr) from = \
 949			(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
 950							   sizeof(*ptr), \
 951							   type); \
 952		if (from != NULL) \
 953			ret = __get_user(x, from); \
 954		else \
 955			ret = -EFAULT; \
 956	} \
 957	ret; \
 958})
 959
 960#define vhost_get_avail(vq, x, ptr) \
 961	vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
 962
 963#define vhost_get_used(vq, x, ptr) \
 964	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 965
 966static void vhost_dev_lock_vqs(struct vhost_dev *d)
 967{
 968	int i = 0;
 969	for (i = 0; i < d->nvqs; ++i)
 970		mutex_lock_nested(&d->vqs[i]->mutex, i);
 971}
 972
 973static void vhost_dev_unlock_vqs(struct vhost_dev *d)
 974{
 975	int i = 0;
 976	for (i = 0; i < d->nvqs; ++i)
 977		mutex_unlock(&d->vqs[i]->mutex);
 978}
 979
 980static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
 981				      __virtio16 *idx)
 982{
 983	return vhost_get_avail(vq, *idx, &vq->avail->idx);
 984}
 985
 986static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
 987				       __virtio16 *head, int idx)
 988{
 989	return vhost_get_avail(vq, *head,
 990			       &vq->avail->ring[idx & (vq->num - 1)]);
 991}
 992
 993static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
 994					__virtio16 *flags)
 995{
 996	return vhost_get_avail(vq, *flags, &vq->avail->flags);
 997}
 998
 999static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1000				       __virtio16 *event)
1001{
1002	return vhost_get_avail(vq, *event, vhost_used_event(vq));
1003}
1004
1005static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1006				     __virtio16 *idx)
1007{
1008	return vhost_get_used(vq, *idx, &vq->used->idx);
1009}
1010
1011static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1012				 struct vring_desc *desc, int idx)
1013{
1014	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1015}
1016
1017static int vhost_new_umem_range(struct vhost_umem *umem,
1018				u64 start, u64 size, u64 end,
1019				u64 userspace_addr, int perm)
1020{
1021	struct vhost_umem_node *tmp, *node;
1022
1023	if (!size)
1024		return -EFAULT;
1025
1026	node = kmalloc(sizeof(*node), GFP_ATOMIC);
1027	if (!node)
1028		return -ENOMEM;
1029
1030	if (umem->numem == max_iotlb_entries) {
1031		tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
1032		vhost_umem_free(umem, tmp);
1033	}
1034
1035	node->start = start;
1036	node->size = size;
1037	node->last = end;
1038	node->userspace_addr = userspace_addr;
1039	node->perm = perm;
1040	INIT_LIST_HEAD(&node->link);
1041	list_add_tail(&node->link, &umem->umem_list);
1042	vhost_umem_interval_tree_insert(node, &umem->umem_tree);
1043	umem->numem++;
1044
1045	return 0;
1046}
1047
1048static void vhost_del_umem_range(struct vhost_umem *umem,
1049				 u64 start, u64 end)
1050{
1051	struct vhost_umem_node *node;
1052
1053	while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1054							   start, end)))
1055		vhost_umem_free(umem, node);
1056}
1057
1058static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1059				  struct vhost_iotlb_msg *msg)
1060{
1061	struct vhost_msg_node *node, *n;
1062
1063	spin_lock(&d->iotlb_lock);
1064
1065	list_for_each_entry_safe(node, n, &d->pending_list, node) {
1066		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1067		if (msg->iova <= vq_msg->iova &&
1068		    msg->iova + msg->size - 1 >= vq_msg->iova &&
1069		    vq_msg->type == VHOST_IOTLB_MISS) {
1070			vhost_poll_queue(&node->vq->poll);
1071			list_del(&node->node);
1072			kfree(node);
1073		}
1074	}
1075
1076	spin_unlock(&d->iotlb_lock);
1077}
1078
1079static bool umem_access_ok(u64 uaddr, u64 size, int access)
1080{
1081	unsigned long a = uaddr;
1082
1083	/* Make sure 64 bit math will not overflow. */
1084	if (vhost_overflow(uaddr, size))
1085		return false;
1086
1087	if ((access & VHOST_ACCESS_RO) &&
1088	    !access_ok((void __user *)a, size))
1089		return false;
1090	if ((access & VHOST_ACCESS_WO) &&
1091	    !access_ok((void __user *)a, size))
1092		return false;
1093	return true;
1094}
1095
1096static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1097				   struct vhost_iotlb_msg *msg)
1098{
1099	int ret = 0;
1100
1101	mutex_lock(&dev->mutex);
1102	vhost_dev_lock_vqs(dev);
1103	switch (msg->type) {
1104	case VHOST_IOTLB_UPDATE:
1105		if (!dev->iotlb) {
1106			ret = -EFAULT;
1107			break;
1108		}
1109		if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1110			ret = -EFAULT;
1111			break;
1112		}
1113		vhost_vq_meta_reset(dev);
1114		if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
1115					 msg->iova + msg->size - 1,
1116					 msg->uaddr, msg->perm)) {
1117			ret = -ENOMEM;
1118			break;
1119		}
1120		vhost_iotlb_notify_vq(dev, msg);
1121		break;
1122	case VHOST_IOTLB_INVALIDATE:
1123		if (!dev->iotlb) {
1124			ret = -EFAULT;
1125			break;
1126		}
1127		vhost_vq_meta_reset(dev);
1128		vhost_del_umem_range(dev->iotlb, msg->iova,
1129				     msg->iova + msg->size - 1);
1130		break;
1131	default:
1132		ret = -EINVAL;
1133		break;
1134	}
1135
1136	vhost_dev_unlock_vqs(dev);
1137	mutex_unlock(&dev->mutex);
1138
1139	return ret;
1140}
1141ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1142			     struct iov_iter *from)
1143{
1144	struct vhost_iotlb_msg msg;
1145	size_t offset;
1146	int type, ret;
1147
1148	ret = copy_from_iter(&type, sizeof(type), from);
1149	if (ret != sizeof(type)) {
1150		ret = -EINVAL;
1151		goto done;
1152	}
1153
1154	switch (type) {
1155	case VHOST_IOTLB_MSG:
1156		/* There maybe a hole after type for V1 message type,
1157		 * so skip it here.
1158		 */
1159		offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1160		break;
1161	case VHOST_IOTLB_MSG_V2:
1162		offset = sizeof(__u32);
1163		break;
1164	default:
1165		ret = -EINVAL;
1166		goto done;
1167	}
1168
1169	iov_iter_advance(from, offset);
1170	ret = copy_from_iter(&msg, sizeof(msg), from);
1171	if (ret != sizeof(msg)) {
1172		ret = -EINVAL;
1173		goto done;
1174	}
1175	if (vhost_process_iotlb_msg(dev, &msg)) {
1176		ret = -EFAULT;
1177		goto done;
1178	}
1179
1180	ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1181	      sizeof(struct vhost_msg_v2);
1182done:
1183	return ret;
1184}
1185EXPORT_SYMBOL(vhost_chr_write_iter);
1186
1187__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1188			    poll_table *wait)
1189{
1190	__poll_t mask = 0;
1191
1192	poll_wait(file, &dev->wait, wait);
1193
1194	if (!list_empty(&dev->read_list))
1195		mask |= EPOLLIN | EPOLLRDNORM;
1196
1197	return mask;
1198}
1199EXPORT_SYMBOL(vhost_chr_poll);
1200
1201ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1202			    int noblock)
1203{
1204	DEFINE_WAIT(wait);
1205	struct vhost_msg_node *node;
1206	ssize_t ret = 0;
1207	unsigned size = sizeof(struct vhost_msg);
1208
1209	if (iov_iter_count(to) < size)
1210		return 0;
1211
1212	while (1) {
1213		if (!noblock)
1214			prepare_to_wait(&dev->wait, &wait,
1215					TASK_INTERRUPTIBLE);
1216
1217		node = vhost_dequeue_msg(dev, &dev->read_list);
1218		if (node)
1219			break;
1220		if (noblock) {
1221			ret = -EAGAIN;
1222			break;
1223		}
1224		if (signal_pending(current)) {
1225			ret = -ERESTARTSYS;
1226			break;
1227		}
1228		if (!dev->iotlb) {
1229			ret = -EBADFD;
1230			break;
1231		}
1232
1233		schedule();
1234	}
1235
1236	if (!noblock)
1237		finish_wait(&dev->wait, &wait);
1238
1239	if (node) {
1240		struct vhost_iotlb_msg *msg;
1241		void *start = &node->msg;
1242
1243		switch (node->msg.type) {
1244		case VHOST_IOTLB_MSG:
1245			size = sizeof(node->msg);
1246			msg = &node->msg.iotlb;
1247			break;
1248		case VHOST_IOTLB_MSG_V2:
1249			size = sizeof(node->msg_v2);
1250			msg = &node->msg_v2.iotlb;
1251			break;
1252		default:
1253			BUG();
1254			break;
1255		}
1256
1257		ret = copy_to_iter(start, size, to);
1258		if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1259			kfree(node);
1260			return ret;
1261		}
1262		vhost_enqueue_msg(dev, &dev->pending_list, node);
1263	}
1264
1265	return ret;
1266}
1267EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1268
1269static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1270{
1271	struct vhost_dev *dev = vq->dev;
1272	struct vhost_msg_node *node;
1273	struct vhost_iotlb_msg *msg;
1274	bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1275
1276	node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1277	if (!node)
1278		return -ENOMEM;
1279
1280	if (v2) {
1281		node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1282		msg = &node->msg_v2.iotlb;
1283	} else {
1284		msg = &node->msg.iotlb;
1285	}
1286
1287	msg->type = VHOST_IOTLB_MISS;
1288	msg->iova = iova;
1289	msg->perm = access;
1290
1291	vhost_enqueue_msg(dev, &dev->read_list, node);
1292
1293	return 0;
1294}
1295
1296static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1297			 struct vring_desc __user *desc,
1298			 struct vring_avail __user *avail,
1299			 struct vring_used __user *used)
1300
1301{
1302	return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1303	       access_ok(avail, vhost_get_avail_size(vq, num)) &&
1304	       access_ok(used, vhost_get_used_size(vq, num));
1305}
1306
1307static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1308				 const struct vhost_umem_node *node,
1309				 int type)
1310{
1311	int access = (type == VHOST_ADDR_USED) ?
1312		     VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1313
1314	if (likely(node->perm & access))
1315		vq->meta_iotlb[type] = node;
1316}
1317
1318static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1319			    int access, u64 addr, u64 len, int type)
1320{
1321	const struct vhost_umem_node *node;
1322	struct vhost_umem *umem = vq->iotlb;
1323	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1324
1325	if (vhost_vq_meta_fetch(vq, addr, len, type))
1326		return true;
1327
1328	while (len > s) {
1329		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1330							   addr,
1331							   last);
1332		if (node == NULL || node->start > addr) {
1333			vhost_iotlb_miss(vq, addr, access);
1334			return false;
1335		} else if (!(node->perm & access)) {
1336			/* Report the possible access violation by
1337			 * request another translation from userspace.
1338			 */
1339			return false;
1340		}
1341
1342		size = node->size - addr + node->start;
1343
1344		if (orig_addr == addr && size >= len)
1345			vhost_vq_meta_update(vq, node, type);
1346
1347		s += size;
1348		addr += size;
1349	}
1350
1351	return true;
1352}
1353
1354int vq_meta_prefetch(struct vhost_virtqueue *vq)
1355{
1356	unsigned int num = vq->num;
1357
1358	if (!vq->iotlb)
1359		return 1;
1360
1361	return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1362			       vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1363	       iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1364			       vhost_get_avail_size(vq, num),
1365			       VHOST_ADDR_AVAIL) &&
1366	       iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1367			       vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1368}
1369EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1370
1371/* Can we log writes? */
1372/* Caller should have device mutex but not vq mutex */
1373bool vhost_log_access_ok(struct vhost_dev *dev)
1374{
1375	return memory_access_ok(dev, dev->umem, 1);
 
 
 
 
1376}
1377EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1378
1379/* Verify access for write logging. */
1380/* Caller should have vq mutex and device mutex */
1381static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1382			     void __user *log_base)
1383{
1384	return vq_memory_access_ok(log_base, vq->umem,
1385				   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
 
 
 
 
 
1386		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
1387				  vhost_get_used_size(vq, vq->num)));
 
1388}
1389
1390/* Can we start vq? */
1391/* Caller should have vq mutex and device mutex */
1392bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1393{
1394	if (!vq_log_access_ok(vq, vq->log_base))
1395		return false;
1396
1397	/* Access validation occurs at prefetch time with IOTLB */
1398	if (vq->iotlb)
1399		return true;
1400
1401	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1402}
1403EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1404
1405static struct vhost_umem *vhost_umem_alloc(void)
1406{
1407	struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
1408
1409	if (!umem)
1410		return NULL;
1411
1412	umem->umem_tree = RB_ROOT_CACHED;
1413	umem->numem = 0;
1414	INIT_LIST_HEAD(&umem->umem_list);
1415
1416	return umem;
1417}
1418
1419static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1420{
1421	struct vhost_memory mem, *newmem;
1422	struct vhost_memory_region *region;
1423	struct vhost_umem *newumem, *oldumem;
1424	unsigned long size = offsetof(struct vhost_memory, regions);
1425	int i;
1426
1427	if (copy_from_user(&mem, m, size))
1428		return -EFAULT;
1429	if (mem.padding)
1430		return -EOPNOTSUPP;
1431	if (mem.nregions > max_mem_regions)
1432		return -E2BIG;
1433	newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1434			GFP_KERNEL);
1435	if (!newmem)
1436		return -ENOMEM;
1437
1438	memcpy(newmem, &mem, size);
1439	if (copy_from_user(newmem->regions, m->regions,
1440			   mem.nregions * sizeof *m->regions)) {
1441		kvfree(newmem);
1442		return -EFAULT;
1443	}
1444
1445	newumem = vhost_umem_alloc();
1446	if (!newumem) {
1447		kvfree(newmem);
1448		return -ENOMEM;
1449	}
1450
1451	for (region = newmem->regions;
1452	     region < newmem->regions + mem.nregions;
1453	     region++) {
1454		if (vhost_new_umem_range(newumem,
1455					 region->guest_phys_addr,
1456					 region->memory_size,
1457					 region->guest_phys_addr +
1458					 region->memory_size - 1,
1459					 region->userspace_addr,
1460					 VHOST_ACCESS_RW))
1461			goto err;
1462	}
1463
1464	if (!memory_access_ok(d, newumem, 0))
1465		goto err;
1466
1467	oldumem = d->umem;
1468	d->umem = newumem;
1469
1470	/* All memory accesses are done under some VQ mutex. */
1471	for (i = 0; i < d->nvqs; ++i) {
1472		mutex_lock(&d->vqs[i]->mutex);
1473		d->vqs[i]->umem = newumem;
1474		mutex_unlock(&d->vqs[i]->mutex);
1475	}
1476
1477	kvfree(newmem);
1478	vhost_umem_clean(oldumem);
1479	return 0;
1480
1481err:
1482	vhost_umem_clean(newumem);
1483	kvfree(newmem);
1484	return -EFAULT;
1485}
1486
1487static long vhost_vring_set_num(struct vhost_dev *d,
1488				struct vhost_virtqueue *vq,
1489				void __user *argp)
1490{
1491	struct vhost_vring_state s;
1492
1493	/* Resizing ring with an active backend?
1494	 * You don't want to do that. */
1495	if (vq->private_data)
1496		return -EBUSY;
1497
1498	if (copy_from_user(&s, argp, sizeof s))
1499		return -EFAULT;
1500
1501	if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1502		return -EINVAL;
1503	vq->num = s.num;
1504
1505	return 0;
1506}
1507
1508static long vhost_vring_set_addr(struct vhost_dev *d,
1509				 struct vhost_virtqueue *vq,
1510				 void __user *argp)
1511{
1512	struct vhost_vring_addr a;
1513
1514	if (copy_from_user(&a, argp, sizeof a))
1515		return -EFAULT;
1516	if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1517		return -EOPNOTSUPP;
1518
1519	/* For 32bit, verify that the top 32bits of the user
1520	   data are set to zero. */
1521	if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1522	    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1523	    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1524		return -EFAULT;
1525
1526	/* Make sure it's safe to cast pointers to vring types. */
1527	BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1528	BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1529	if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1530	    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1531	    (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1532		return -EINVAL;
1533
1534	/* We only verify access here if backend is configured.
1535	 * If it is not, we don't as size might not have been setup.
1536	 * We will verify when backend is configured. */
1537	if (vq->private_data) {
1538		if (!vq_access_ok(vq, vq->num,
1539			(void __user *)(unsigned long)a.desc_user_addr,
1540			(void __user *)(unsigned long)a.avail_user_addr,
1541			(void __user *)(unsigned long)a.used_user_addr))
1542			return -EINVAL;
1543
1544		/* Also validate log access for used ring if enabled. */
1545		if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1546			!log_access_ok(vq->log_base, a.log_guest_addr,
1547				sizeof *vq->used +
1548				vq->num * sizeof *vq->used->ring))
1549			return -EINVAL;
1550	}
1551
1552	vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1553	vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1554	vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1555	vq->log_addr = a.log_guest_addr;
1556	vq->used = (void __user *)(unsigned long)a.used_user_addr;
1557
1558	return 0;
1559}
1560
1561static long vhost_vring_set_num_addr(struct vhost_dev *d,
1562				     struct vhost_virtqueue *vq,
1563				     unsigned int ioctl,
1564				     void __user *argp)
1565{
1566	long r;
1567
1568	mutex_lock(&vq->mutex);
1569
1570	switch (ioctl) {
1571	case VHOST_SET_VRING_NUM:
1572		r = vhost_vring_set_num(d, vq, argp);
1573		break;
1574	case VHOST_SET_VRING_ADDR:
1575		r = vhost_vring_set_addr(d, vq, argp);
1576		break;
1577	default:
1578		BUG();
1579	}
1580
1581	mutex_unlock(&vq->mutex);
1582
1583	return r;
1584}
1585long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1586{
1587	struct file *eventfp, *filep = NULL;
1588	bool pollstart = false, pollstop = false;
1589	struct eventfd_ctx *ctx = NULL;
1590	u32 __user *idxp = argp;
1591	struct vhost_virtqueue *vq;
1592	struct vhost_vring_state s;
1593	struct vhost_vring_file f;
 
1594	u32 idx;
1595	long r;
1596
1597	r = get_user(idx, idxp);
1598	if (r < 0)
1599		return r;
1600	if (idx >= d->nvqs)
1601		return -ENOBUFS;
1602
1603	idx = array_index_nospec(idx, d->nvqs);
1604	vq = d->vqs[idx];
1605
1606	if (ioctl == VHOST_SET_VRING_NUM ||
1607	    ioctl == VHOST_SET_VRING_ADDR) {
1608		return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1609	}
1610
1611	mutex_lock(&vq->mutex);
1612
1613	switch (ioctl) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1614	case VHOST_SET_VRING_BASE:
1615		/* Moving base with an active backend?
1616		 * You don't want to do that. */
1617		if (vq->private_data) {
1618			r = -EBUSY;
1619			break;
1620		}
1621		if (copy_from_user(&s, argp, sizeof s)) {
1622			r = -EFAULT;
1623			break;
1624		}
1625		if (s.num > 0xffff) {
1626			r = -EINVAL;
1627			break;
1628		}
1629		vq->last_avail_idx = s.num;
1630		/* Forget the cached index value. */
1631		vq->avail_idx = vq->last_avail_idx;
1632		break;
1633	case VHOST_GET_VRING_BASE:
1634		s.index = idx;
1635		s.num = vq->last_avail_idx;
1636		if (copy_to_user(argp, &s, sizeof s))
1637			r = -EFAULT;
1638		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1639	case VHOST_SET_VRING_KICK:
1640		if (copy_from_user(&f, argp, sizeof f)) {
1641			r = -EFAULT;
1642			break;
1643		}
1644		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1645		if (IS_ERR(eventfp)) {
1646			r = PTR_ERR(eventfp);
1647			break;
1648		}
1649		if (eventfp != vq->kick) {
1650			pollstop = (filep = vq->kick) != NULL;
1651			pollstart = (vq->kick = eventfp) != NULL;
1652		} else
1653			filep = eventfp;
1654		break;
1655	case VHOST_SET_VRING_CALL:
1656		if (copy_from_user(&f, argp, sizeof f)) {
1657			r = -EFAULT;
1658			break;
1659		}
1660		ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1661		if (IS_ERR(ctx)) {
1662			r = PTR_ERR(ctx);
1663			break;
1664		}
1665		swap(ctx, vq->call_ctx);
 
 
 
 
 
 
 
1666		break;
1667	case VHOST_SET_VRING_ERR:
1668		if (copy_from_user(&f, argp, sizeof f)) {
1669			r = -EFAULT;
1670			break;
1671		}
1672		ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1673		if (IS_ERR(ctx)) {
1674			r = PTR_ERR(ctx);
1675			break;
1676		}
1677		swap(ctx, vq->error_ctx);
1678		break;
1679	case VHOST_SET_VRING_ENDIAN:
1680		r = vhost_set_vring_endian(vq, argp);
1681		break;
1682	case VHOST_GET_VRING_ENDIAN:
1683		r = vhost_get_vring_endian(vq, idx, argp);
1684		break;
1685	case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1686		if (copy_from_user(&s, argp, sizeof(s))) {
1687			r = -EFAULT;
1688			break;
1689		}
1690		vq->busyloop_timeout = s.num;
1691		break;
1692	case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1693		s.index = idx;
1694		s.num = vq->busyloop_timeout;
1695		if (copy_to_user(argp, &s, sizeof(s)))
1696			r = -EFAULT;
1697		break;
1698	default:
1699		r = -ENOIOCTLCMD;
1700	}
1701
1702	if (pollstop && vq->handle_kick)
1703		vhost_poll_stop(&vq->poll);
1704
1705	if (!IS_ERR_OR_NULL(ctx))
1706		eventfd_ctx_put(ctx);
1707	if (filep)
1708		fput(filep);
1709
1710	if (pollstart && vq->handle_kick)
1711		r = vhost_poll_start(&vq->poll, vq->kick);
1712
1713	mutex_unlock(&vq->mutex);
1714
1715	if (pollstop && vq->handle_kick)
1716		vhost_poll_flush(&vq->poll);
1717	return r;
1718}
1719EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1720
1721int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1722{
1723	struct vhost_umem *niotlb, *oiotlb;
1724	int i;
1725
1726	niotlb = vhost_umem_alloc();
1727	if (!niotlb)
1728		return -ENOMEM;
1729
1730	oiotlb = d->iotlb;
1731	d->iotlb = niotlb;
1732
1733	for (i = 0; i < d->nvqs; ++i) {
1734		struct vhost_virtqueue *vq = d->vqs[i];
1735
1736		mutex_lock(&vq->mutex);
1737		vq->iotlb = niotlb;
1738		__vhost_vq_meta_reset(vq);
1739		mutex_unlock(&vq->mutex);
1740	}
1741
1742	vhost_umem_clean(oiotlb);
1743
1744	return 0;
1745}
1746EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1747
1748/* Caller must have device mutex */
1749long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1750{
1751	struct eventfd_ctx *ctx;
 
 
1752	u64 p;
1753	long r;
1754	int i, fd;
1755
1756	/* If you are not the owner, you can become one */
1757	if (ioctl == VHOST_SET_OWNER) {
1758		r = vhost_dev_set_owner(d);
1759		goto done;
1760	}
1761
1762	/* You must be the owner to do anything else */
1763	r = vhost_dev_check_owner(d);
1764	if (r)
1765		goto done;
1766
1767	switch (ioctl) {
1768	case VHOST_SET_MEM_TABLE:
1769		r = vhost_set_memory(d, argp);
1770		break;
1771	case VHOST_SET_LOG_BASE:
1772		if (copy_from_user(&p, argp, sizeof p)) {
1773			r = -EFAULT;
1774			break;
1775		}
1776		if ((u64)(unsigned long)p != p) {
1777			r = -EFAULT;
1778			break;
1779		}
1780		for (i = 0; i < d->nvqs; ++i) {
1781			struct vhost_virtqueue *vq;
1782			void __user *base = (void __user *)(unsigned long)p;
1783			vq = d->vqs[i];
1784			mutex_lock(&vq->mutex);
1785			/* If ring is inactive, will check when it's enabled. */
1786			if (vq->private_data && !vq_log_access_ok(vq, base))
1787				r = -EFAULT;
1788			else
1789				vq->log_base = base;
1790			mutex_unlock(&vq->mutex);
1791		}
1792		break;
1793	case VHOST_SET_LOG_FD:
1794		r = get_user(fd, (int __user *)argp);
1795		if (r < 0)
1796			break;
1797		ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
1798		if (IS_ERR(ctx)) {
1799			r = PTR_ERR(ctx);
1800			break;
1801		}
1802		swap(ctx, d->log_ctx);
 
 
 
 
 
 
1803		for (i = 0; i < d->nvqs; ++i) {
1804			mutex_lock(&d->vqs[i]->mutex);
1805			d->vqs[i]->log_ctx = d->log_ctx;
1806			mutex_unlock(&d->vqs[i]->mutex);
1807		}
1808		if (ctx)
1809			eventfd_ctx_put(ctx);
 
 
1810		break;
1811	default:
1812		r = -ENOIOCTLCMD;
1813		break;
1814	}
1815done:
1816	return r;
1817}
1818EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1819
1820/* TODO: This is really inefficient.  We need something like get_user()
1821 * (instruction directly accesses the data, with an exception table entry
1822 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1823 */
1824static int set_bit_to_user(int nr, void __user *addr)
1825{
1826	unsigned long log = (unsigned long)addr;
1827	struct page *page;
1828	void *base;
1829	int bit = nr + (log % PAGE_SIZE) * 8;
1830	int r;
1831
1832	r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
1833	if (r < 0)
1834		return r;
1835	BUG_ON(r != 1);
1836	base = kmap_atomic(page);
1837	set_bit(bit, base);
1838	kunmap_atomic(base);
1839	set_page_dirty_lock(page);
1840	put_page(page);
1841	return 0;
1842}
1843
1844static int log_write(void __user *log_base,
1845		     u64 write_address, u64 write_length)
1846{
1847	u64 write_page = write_address / VHOST_PAGE_SIZE;
1848	int r;
1849
1850	if (!write_length)
1851		return 0;
1852	write_length += write_address % VHOST_PAGE_SIZE;
1853	for (;;) {
1854		u64 base = (u64)(unsigned long)log_base;
1855		u64 log = base + write_page / 8;
1856		int bit = write_page % 8;
1857		if ((u64)(unsigned long)log != log)
1858			return -EFAULT;
1859		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1860		if (r < 0)
1861			return r;
1862		if (write_length <= VHOST_PAGE_SIZE)
1863			break;
1864		write_length -= VHOST_PAGE_SIZE;
1865		write_page += 1;
1866	}
1867	return r;
1868}
1869
1870static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1871{
1872	struct vhost_umem *umem = vq->umem;
1873	struct vhost_umem_node *u;
1874	u64 start, end, l, min;
1875	int r;
1876	bool hit = false;
1877
1878	while (len) {
1879		min = len;
1880		/* More than one GPAs can be mapped into a single HVA. So
1881		 * iterate all possible umems here to be safe.
1882		 */
1883		list_for_each_entry(u, &umem->umem_list, link) {
1884			if (u->userspace_addr > hva - 1 + len ||
1885			    u->userspace_addr - 1 + u->size < hva)
1886				continue;
1887			start = max(u->userspace_addr, hva);
1888			end = min(u->userspace_addr - 1 + u->size,
1889				  hva - 1 + len);
1890			l = end - start + 1;
1891			r = log_write(vq->log_base,
1892				      u->start + start - u->userspace_addr,
1893				      l);
1894			if (r < 0)
1895				return r;
1896			hit = true;
1897			min = min(l, min);
1898		}
1899
1900		if (!hit)
1901			return -EFAULT;
1902
1903		len -= min;
1904		hva += min;
1905	}
1906
1907	return 0;
1908}
1909
1910static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1911{
1912	struct iovec iov[64];
1913	int i, ret;
1914
1915	if (!vq->iotlb)
1916		return log_write(vq->log_base, vq->log_addr + used_offset, len);
1917
1918	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1919			     len, iov, 64, VHOST_ACCESS_WO);
1920	if (ret < 0)
1921		return ret;
1922
1923	for (i = 0; i < ret; i++) {
1924		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
1925				    iov[i].iov_len);
1926		if (ret)
1927			return ret;
1928	}
1929
1930	return 0;
1931}
1932
1933int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1934		    unsigned int log_num, u64 len, struct iovec *iov, int count)
1935{
1936	int i, r;
1937
1938	/* Make sure data written is seen before log. */
1939	smp_wmb();
1940
1941	if (vq->iotlb) {
1942		for (i = 0; i < count; i++) {
1943			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1944					  iov[i].iov_len);
1945			if (r < 0)
1946				return r;
1947		}
1948		return 0;
1949	}
1950
1951	for (i = 0; i < log_num; ++i) {
1952		u64 l = min(log[i].len, len);
1953		r = log_write(vq->log_base, log[i].addr, l);
1954		if (r < 0)
1955			return r;
1956		len -= l;
1957		if (!len) {
1958			if (vq->log_ctx)
1959				eventfd_signal(vq->log_ctx, 1);
1960			return 0;
1961		}
1962	}
1963	/* Length written exceeds what we have stored. This is a bug. */
1964	BUG();
1965	return 0;
1966}
1967EXPORT_SYMBOL_GPL(vhost_log_write);
1968
1969static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1970{
1971	void __user *used;
1972	if (vhost_put_used_flags(vq))
1973		return -EFAULT;
1974	if (unlikely(vq->log_used)) {
1975		/* Make sure the flag is seen before log. */
1976		smp_wmb();
1977		/* Log used flag write. */
1978		used = &vq->used->flags;
1979		log_used(vq, (used - (void __user *)vq->used),
1980			 sizeof vq->used->flags);
 
1981		if (vq->log_ctx)
1982			eventfd_signal(vq->log_ctx, 1);
1983	}
1984	return 0;
1985}
1986
1987static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1988{
1989	if (vhost_put_avail_event(vq))
1990		return -EFAULT;
1991	if (unlikely(vq->log_used)) {
1992		void __user *used;
1993		/* Make sure the event is seen before log. */
1994		smp_wmb();
1995		/* Log avail event write */
1996		used = vhost_avail_event(vq);
1997		log_used(vq, (used - (void __user *)vq->used),
1998			 sizeof *vhost_avail_event(vq));
 
1999		if (vq->log_ctx)
2000			eventfd_signal(vq->log_ctx, 1);
2001	}
2002	return 0;
2003}
2004
2005int vhost_vq_init_access(struct vhost_virtqueue *vq)
2006{
2007	__virtio16 last_used_idx;
2008	int r;
2009	bool is_le = vq->is_le;
2010
2011	if (!vq->private_data)
2012		return 0;
2013
2014	vhost_init_is_le(vq);
2015
2016	r = vhost_update_used_flags(vq);
2017	if (r)
2018		goto err;
2019	vq->signalled_used_valid = false;
2020	if (!vq->iotlb &&
2021	    !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2022		r = -EFAULT;
2023		goto err;
2024	}
2025	r = vhost_get_used_idx(vq, &last_used_idx);
2026	if (r) {
2027		vq_err(vq, "Can't access used idx at %p\n",
2028		       &vq->used->idx);
2029		goto err;
2030	}
2031	vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2032	return 0;
2033
2034err:
2035	vq->is_le = is_le;
2036	return r;
2037}
2038EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2039
2040static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2041			  struct iovec iov[], int iov_size, int access)
2042{
2043	const struct vhost_umem_node *node;
2044	struct vhost_dev *dev = vq->dev;
2045	struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
2046	struct iovec *_iov;
2047	u64 s = 0;
2048	int ret = 0;
2049
 
 
 
2050	while ((u64)len > s) {
2051		u64 size;
2052		if (unlikely(ret >= iov_size)) {
2053			ret = -ENOBUFS;
2054			break;
2055		}
2056
2057		node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
2058							addr, addr + len - 1);
2059		if (node == NULL || node->start > addr) {
2060			if (umem != dev->iotlb) {
2061				ret = -EFAULT;
2062				break;
2063			}
2064			ret = -EAGAIN;
2065			break;
2066		} else if (!(node->perm & access)) {
2067			ret = -EPERM;
2068			break;
2069		}
2070
2071		_iov = iov + ret;
2072		size = node->size - addr + node->start;
2073		_iov->iov_len = min((u64)len - s, size);
2074		_iov->iov_base = (void __user *)(unsigned long)
2075			(node->userspace_addr + addr - node->start);
2076		s += size;
2077		addr += size;
2078		++ret;
2079	}
2080
2081	if (ret == -EAGAIN)
2082		vhost_iotlb_miss(vq, addr, access);
2083	return ret;
2084}
2085
2086/* Each buffer in the virtqueues is actually a chain of descriptors.  This
2087 * function returns the next descriptor in the chain,
2088 * or -1U if we're at the end. */
2089static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2090{
2091	unsigned int next;
2092
2093	/* If this descriptor says it doesn't chain, we're done. */
2094	if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2095		return -1U;
2096
2097	/* Check they're not leading us off end of descriptors. */
2098	next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
 
 
 
 
 
2099	return next;
2100}
2101
2102static int get_indirect(struct vhost_virtqueue *vq,
2103			struct iovec iov[], unsigned int iov_size,
2104			unsigned int *out_num, unsigned int *in_num,
2105			struct vhost_log *log, unsigned int *log_num,
2106			struct vring_desc *indirect)
2107{
2108	struct vring_desc desc;
2109	unsigned int i = 0, count, found = 0;
2110	u32 len = vhost32_to_cpu(vq, indirect->len);
2111	struct iov_iter from;
2112	int ret, access;
2113
2114	/* Sanity check */
2115	if (unlikely(len % sizeof desc)) {
2116		vq_err(vq, "Invalid length in indirect descriptor: "
2117		       "len 0x%llx not multiple of 0x%zx\n",
2118		       (unsigned long long)len,
2119		       sizeof desc);
2120		return -EINVAL;
2121	}
2122
2123	ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2124			     UIO_MAXIOV, VHOST_ACCESS_RO);
2125	if (unlikely(ret < 0)) {
2126		if (ret != -EAGAIN)
2127			vq_err(vq, "Translation failure %d in indirect.\n", ret);
2128		return ret;
2129	}
2130	iov_iter_init(&from, READ, vq->indirect, ret, len);
2131
2132	/* We will use the result as an address to read from, so most
2133	 * architectures only need a compiler barrier here. */
2134	read_barrier_depends();
2135
2136	count = len / sizeof desc;
2137	/* Buffers are chained via a 16 bit next field, so
2138	 * we can have at most 2^16 of these. */
2139	if (unlikely(count > USHRT_MAX + 1)) {
2140		vq_err(vq, "Indirect buffer length too big: %d\n",
2141		       indirect->len);
2142		return -E2BIG;
2143	}
2144
2145	do {
2146		unsigned iov_count = *in_num + *out_num;
2147		if (unlikely(++found > count)) {
2148			vq_err(vq, "Loop detected: last one at %u "
2149			       "indirect size %u\n",
2150			       i, count);
2151			return -EINVAL;
2152		}
2153		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
 
2154			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2155			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2156			return -EINVAL;
2157		}
2158		if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2159			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2160			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2161			return -EINVAL;
2162		}
2163
2164		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2165			access = VHOST_ACCESS_WO;
2166		else
2167			access = VHOST_ACCESS_RO;
2168
2169		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2170				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2171				     iov_size - iov_count, access);
2172		if (unlikely(ret < 0)) {
2173			if (ret != -EAGAIN)
2174				vq_err(vq, "Translation failure %d indirect idx %d\n",
2175					ret, i);
2176			return ret;
2177		}
2178		/* If this is an input descriptor, increment that count. */
2179		if (access == VHOST_ACCESS_WO) {
2180			*in_num += ret;
2181			if (unlikely(log && ret)) {
2182				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2183				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2184				++*log_num;
2185			}
2186		} else {
2187			/* If it's an output descriptor, they're all supposed
2188			 * to come before any input descriptors. */
2189			if (unlikely(*in_num)) {
2190				vq_err(vq, "Indirect descriptor "
2191				       "has out after in: idx %d\n", i);
2192				return -EINVAL;
2193			}
2194			*out_num += ret;
2195		}
2196	} while ((i = next_desc(vq, &desc)) != -1);
2197	return 0;
2198}
2199
2200/* This looks in the virtqueue and for the first available buffer, and converts
2201 * it to an iovec for convenient access.  Since descriptors consist of some
2202 * number of output then some number of input descriptors, it's actually two
2203 * iovecs, but we pack them into one and note how many of each there were.
2204 *
2205 * This function returns the descriptor number found, or vq->num (which is
2206 * never a valid descriptor number) if none was found.  A negative code is
2207 * returned on error. */
2208int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2209		      struct iovec iov[], unsigned int iov_size,
2210		      unsigned int *out_num, unsigned int *in_num,
2211		      struct vhost_log *log, unsigned int *log_num)
2212{
2213	struct vring_desc desc;
2214	unsigned int i, head, found = 0;
2215	u16 last_avail_idx;
2216	__virtio16 avail_idx;
2217	__virtio16 ring_head;
2218	int ret, access;
2219
2220	/* Check it isn't doing very strange things with descriptor numbers. */
2221	last_avail_idx = vq->last_avail_idx;
 
 
 
 
 
2222
2223	if (vq->avail_idx == vq->last_avail_idx) {
2224		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2225			vq_err(vq, "Failed to access avail idx at %p\n",
2226				&vq->avail->idx);
2227			return -EFAULT;
2228		}
2229		vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2230
2231		if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2232			vq_err(vq, "Guest moved used index from %u to %u",
2233				last_avail_idx, vq->avail_idx);
2234			return -EFAULT;
2235		}
2236
2237		/* If there's nothing new since last we looked, return
2238		 * invalid.
2239		 */
2240		if (vq->avail_idx == last_avail_idx)
2241			return vq->num;
2242
2243		/* Only get avail ring entries after they have been
2244		 * exposed by guest.
2245		 */
2246		smp_rmb();
2247	}
2248
2249	/* Grab the next descriptor number they're advertising, and increment
2250	 * the index we've seen. */
2251	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
 
2252		vq_err(vq, "Failed to read head: idx %d address %p\n",
2253		       last_avail_idx,
2254		       &vq->avail->ring[last_avail_idx % vq->num]);
2255		return -EFAULT;
2256	}
2257
2258	head = vhost16_to_cpu(vq, ring_head);
2259
2260	/* If their number is silly, that's an error. */
2261	if (unlikely(head >= vq->num)) {
2262		vq_err(vq, "Guest says index %u > %u is available",
2263		       head, vq->num);
2264		return -EINVAL;
2265	}
2266
2267	/* When we start there are none of either input nor output. */
2268	*out_num = *in_num = 0;
2269	if (unlikely(log))
2270		*log_num = 0;
2271
2272	i = head;
2273	do {
2274		unsigned iov_count = *in_num + *out_num;
2275		if (unlikely(i >= vq->num)) {
2276			vq_err(vq, "Desc index is %u > %u, head = %u",
2277			       i, vq->num, head);
2278			return -EINVAL;
2279		}
2280		if (unlikely(++found > vq->num)) {
2281			vq_err(vq, "Loop detected: last one at %u "
2282			       "vq size %u head %u\n",
2283			       i, vq->num, head);
2284			return -EINVAL;
2285		}
2286		ret = vhost_get_desc(vq, &desc, i);
2287		if (unlikely(ret)) {
2288			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2289			       i, vq->desc + i);
2290			return -EFAULT;
2291		}
2292		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2293			ret = get_indirect(vq, iov, iov_size,
2294					   out_num, in_num,
2295					   log, log_num, &desc);
2296			if (unlikely(ret < 0)) {
2297				if (ret != -EAGAIN)
2298					vq_err(vq, "Failure detected "
2299						"in indirect descriptor at idx %d\n", i);
2300				return ret;
2301			}
2302			continue;
2303		}
2304
2305		if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2306			access = VHOST_ACCESS_WO;
2307		else
2308			access = VHOST_ACCESS_RO;
2309		ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2310				     vhost32_to_cpu(vq, desc.len), iov + iov_count,
2311				     iov_size - iov_count, access);
2312		if (unlikely(ret < 0)) {
2313			if (ret != -EAGAIN)
2314				vq_err(vq, "Translation failure %d descriptor idx %d\n",
2315					ret, i);
2316			return ret;
2317		}
2318		if (access == VHOST_ACCESS_WO) {
2319			/* If this is an input descriptor,
2320			 * increment that count. */
2321			*in_num += ret;
2322			if (unlikely(log && ret)) {
2323				log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2324				log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2325				++*log_num;
2326			}
2327		} else {
2328			/* If it's an output descriptor, they're all supposed
2329			 * to come before any input descriptors. */
2330			if (unlikely(*in_num)) {
2331				vq_err(vq, "Descriptor has out after in: "
2332				       "idx %d\n", i);
2333				return -EINVAL;
2334			}
2335			*out_num += ret;
2336		}
2337	} while ((i = next_desc(vq, &desc)) != -1);
2338
2339	/* On success, increment avail index. */
2340	vq->last_avail_idx++;
2341
2342	/* Assume notifications from guest are disabled at this point,
2343	 * if they aren't we would need to update avail_event index. */
2344	BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2345	return head;
2346}
2347EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2348
2349/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2350void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2351{
2352	vq->last_avail_idx -= n;
2353}
2354EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2355
2356/* After we've used one of their buffers, we tell them about it.  We'll then
2357 * want to notify the guest, using eventfd. */
2358int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2359{
2360	struct vring_used_elem heads = {
2361		cpu_to_vhost32(vq, head),
2362		cpu_to_vhost32(vq, len)
2363	};
2364
2365	return vhost_add_used_n(vq, &heads, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2366}
2367EXPORT_SYMBOL_GPL(vhost_add_used);
2368
2369static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2370			    struct vring_used_elem *heads,
2371			    unsigned count)
2372{
2373	struct vring_used_elem __user *used;
2374	u16 old, new;
2375	int start;
2376
2377	start = vq->last_used_idx & (vq->num - 1);
2378	used = vq->used->ring + start;
2379	if (vhost_put_used(vq, heads, start, count)) {
2380		vq_err(vq, "Failed to write used");
2381		return -EFAULT;
2382	}
2383	if (unlikely(vq->log_used)) {
2384		/* Make sure data is seen before log. */
2385		smp_wmb();
2386		/* Log used ring entry write. */
2387		log_used(vq, ((void __user *)used - (void __user *)vq->used),
2388			 count * sizeof *used);
 
 
2389	}
2390	old = vq->last_used_idx;
2391	new = (vq->last_used_idx += count);
2392	/* If the driver never bothers to signal in a very long while,
2393	 * used index might wrap around. If that happens, invalidate
2394	 * signalled_used index we stored. TODO: make sure driver
2395	 * signals at least once in 2^16 and remove this. */
2396	if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2397		vq->signalled_used_valid = false;
2398	return 0;
2399}
2400
2401/* After we've used one of their buffers, we tell them about it.  We'll then
2402 * want to notify the guest, using eventfd. */
2403int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2404		     unsigned count)
2405{
2406	int start, n, r;
2407
2408	start = vq->last_used_idx & (vq->num - 1);
2409	n = vq->num - start;
2410	if (n < count) {
2411		r = __vhost_add_used_n(vq, heads, n);
2412		if (r < 0)
2413			return r;
2414		heads += n;
2415		count -= n;
2416	}
2417	r = __vhost_add_used_n(vq, heads, count);
2418
2419	/* Make sure buffer is written before we update index. */
2420	smp_wmb();
2421	if (vhost_put_used_idx(vq)) {
2422		vq_err(vq, "Failed to increment used idx");
2423		return -EFAULT;
2424	}
2425	if (unlikely(vq->log_used)) {
2426		/* Make sure used idx is seen before log. */
2427		smp_wmb();
2428		/* Log used index update. */
2429		log_used(vq, offsetof(struct vring_used, idx),
2430			 sizeof vq->used->idx);
 
2431		if (vq->log_ctx)
2432			eventfd_signal(vq->log_ctx, 1);
2433	}
2434	return r;
2435}
2436EXPORT_SYMBOL_GPL(vhost_add_used_n);
2437
2438static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2439{
2440	__u16 old, new;
2441	__virtio16 event;
2442	bool v;
2443	/* Flush out used index updates. This is paired
2444	 * with the barrier that the Guest executes when enabling
2445	 * interrupts. */
2446	smp_mb();
2447
2448	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2449	    unlikely(vq->avail_idx == vq->last_avail_idx))
2450		return true;
2451
2452	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2453		__virtio16 flags;
2454		if (vhost_get_avail_flags(vq, &flags)) {
2455			vq_err(vq, "Failed to get flags");
2456			return true;
2457		}
2458		return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2459	}
2460	old = vq->signalled_used;
2461	v = vq->signalled_used_valid;
2462	new = vq->signalled_used = vq->last_used_idx;
2463	vq->signalled_used_valid = true;
2464
2465	if (unlikely(!v))
2466		return true;
2467
2468	if (vhost_get_used_event(vq, &event)) {
2469		vq_err(vq, "Failed to get used event idx");
2470		return true;
2471	}
2472	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2473}
2474
2475/* This actually signals the guest, using eventfd. */
2476void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2477{
2478	/* Signal the Guest tell them we used something up. */
2479	if (vq->call_ctx && vhost_notify(dev, vq))
2480		eventfd_signal(vq->call_ctx, 1);
2481}
2482EXPORT_SYMBOL_GPL(vhost_signal);
2483
2484/* And here's the combo meal deal.  Supersize me! */
2485void vhost_add_used_and_signal(struct vhost_dev *dev,
2486			       struct vhost_virtqueue *vq,
2487			       unsigned int head, int len)
2488{
2489	vhost_add_used(vq, head, len);
2490	vhost_signal(dev, vq);
2491}
2492EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2493
2494/* multi-buffer version of vhost_add_used_and_signal */
2495void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2496				 struct vhost_virtqueue *vq,
2497				 struct vring_used_elem *heads, unsigned count)
2498{
2499	vhost_add_used_n(vq, heads, count);
2500	vhost_signal(dev, vq);
2501}
2502EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2503
2504/* return true if we're sure that avaiable ring is empty */
2505bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2506{
2507	__virtio16 avail_idx;
2508	int r;
2509
2510	if (vq->avail_idx != vq->last_avail_idx)
2511		return false;
2512
2513	r = vhost_get_avail_idx(vq, &avail_idx);
2514	if (unlikely(r))
2515		return false;
2516	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2517
2518	return vq->avail_idx == vq->last_avail_idx;
2519}
2520EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2521
2522/* OK, now we need to know about added descriptors. */
2523bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2524{
2525	__virtio16 avail_idx;
2526	int r;
2527
2528	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2529		return false;
2530	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2531	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2532		r = vhost_update_used_flags(vq);
2533		if (r) {
2534			vq_err(vq, "Failed to enable notification at %p: %d\n",
2535			       &vq->used->flags, r);
2536			return false;
2537		}
2538	} else {
2539		r = vhost_update_avail_event(vq, vq->avail_idx);
2540		if (r) {
2541			vq_err(vq, "Failed to update avail event index at %p: %d\n",
2542			       vhost_avail_event(vq), r);
2543			return false;
2544		}
2545	}
2546	/* They could have slipped one in as we were doing that: make
2547	 * sure it's written, then check again. */
2548	smp_mb();
2549	r = vhost_get_avail_idx(vq, &avail_idx);
2550	if (r) {
2551		vq_err(vq, "Failed to check avail idx at %p: %d\n",
2552		       &vq->avail->idx, r);
2553		return false;
2554	}
2555
2556	return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2557}
2558EXPORT_SYMBOL_GPL(vhost_enable_notify);
2559
2560/* We don't need to be notified again. */
2561void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2562{
2563	int r;
2564
2565	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2566		return;
2567	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2568	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2569		r = vhost_update_used_flags(vq);
2570		if (r)
2571			vq_err(vq, "Failed to enable notification at %p: %d\n",
2572			       &vq->used->flags, r);
2573	}
2574}
2575EXPORT_SYMBOL_GPL(vhost_disable_notify);
2576
2577/* Create a new message. */
2578struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2579{
2580	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2581	if (!node)
2582		return NULL;
2583
2584	/* Make sure all padding within the structure is initialized. */
2585	memset(&node->msg, 0, sizeof node->msg);
2586	node->vq = vq;
2587	node->msg.type = type;
2588	return node;
2589}
2590EXPORT_SYMBOL_GPL(vhost_new_msg);
2591
2592void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2593		       struct vhost_msg_node *node)
2594{
2595	spin_lock(&dev->iotlb_lock);
2596	list_add_tail(&node->node, head);
2597	spin_unlock(&dev->iotlb_lock);
2598
2599	wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
 
 
 
 
 
 
2600}
2601EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2602
2603struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2604					 struct list_head *head)
2605{
2606	struct vhost_msg_node *node = NULL;
2607
2608	spin_lock(&dev->iotlb_lock);
2609	if (!list_empty(head)) {
2610		node = list_first_entry(head, struct vhost_msg_node,
2611					node);
2612		list_del(&node->node);
2613	}
2614	spin_unlock(&dev->iotlb_lock);
2615
2616	return node;
2617}
2618EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2619
2620
2621static int __init vhost_init(void)
2622{
2623	return 0;
 
 
2624}
2625
2626static void __exit vhost_exit(void)
2627{
 
 
 
 
 
 
 
2628}
2629
2630module_init(vhost_init);
2631module_exit(vhost_exit);
2632
2633MODULE_VERSION("0.0.1");
2634MODULE_LICENSE("GPL v2");
2635MODULE_AUTHOR("Michael S. Tsirkin");
2636MODULE_DESCRIPTION("Host kernel accelerator for virtio");