Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Helpers for the host side of a virtio ring.
   4 *
   5 * Since these may be in userspace, we use (inline) accessors.
   6 */
   7#include <linux/compiler.h>
   8#include <linux/module.h>
   9#include <linux/vringh.h>
  10#include <linux/virtio_ring.h>
  11#include <linux/kernel.h>
  12#include <linux/ratelimit.h>
  13#include <linux/uaccess.h>
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
  17#include <linux/bvec.h>
  18#include <linux/highmem.h>
  19#include <linux/vhost_iotlb.h>
  20#endif
  21#include <uapi/linux/virtio_config.h>
  22
  23static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
  24{
  25	static DEFINE_RATELIMIT_STATE(vringh_rs,
  26				      DEFAULT_RATELIMIT_INTERVAL,
  27				      DEFAULT_RATELIMIT_BURST);
  28	if (__ratelimit(&vringh_rs)) {
  29		va_list ap;
  30		va_start(ap, fmt);
  31		printk(KERN_NOTICE "vringh:");
  32		vprintk(fmt, ap);
  33		va_end(ap);
  34	}
  35}
  36
  37/* Returns vring->num if empty, -ve on error. */
  38static inline int __vringh_get_head(const struct vringh *vrh,
  39				    int (*getu16)(const struct vringh *vrh,
  40						  u16 *val, const __virtio16 *p),
  41				    u16 *last_avail_idx)
  42{
  43	u16 avail_idx, i, head;
  44	int err;
  45
  46	err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
  47	if (err) {
  48		vringh_bad("Failed to access avail idx at %p",
  49			   &vrh->vring.avail->idx);
  50		return err;
  51	}
  52
  53	if (*last_avail_idx == avail_idx)
  54		return vrh->vring.num;
  55
  56	/* Only get avail ring entries after they have been exposed by guest. */
  57	virtio_rmb(vrh->weak_barriers);
  58
  59	i = *last_avail_idx & (vrh->vring.num - 1);
  60
  61	err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
  62	if (err) {
  63		vringh_bad("Failed to read head: idx %d address %p",
  64			   *last_avail_idx, &vrh->vring.avail->ring[i]);
  65		return err;
  66	}
  67
  68	if (head >= vrh->vring.num) {
  69		vringh_bad("Guest says index %u > %u is available",
  70			   head, vrh->vring.num);
  71		return -EINVAL;
  72	}
  73
  74	(*last_avail_idx)++;
  75	return head;
  76}
  77
  78/**
  79 * vringh_kiov_advance - skip bytes from vring_kiov
  80 * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
  81 * @len: the maximum length to advance
  82 */
  83void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
  84{
  85	while (len && iov->i < iov->used) {
  86		size_t partlen = min(iov->iov[iov->i].iov_len, len);
  87
  88		iov->consumed += partlen;
  89		iov->iov[iov->i].iov_len -= partlen;
  90		iov->iov[iov->i].iov_base += partlen;
  91
  92		if (!iov->iov[iov->i].iov_len) {
  93			/* Fix up old iov element then increment. */
  94			iov->iov[iov->i].iov_len = iov->consumed;
  95			iov->iov[iov->i].iov_base -= iov->consumed;
  96
  97			iov->consumed = 0;
  98			iov->i++;
  99		}
 100
 101		len -= partlen;
 102	}
 103}
 104EXPORT_SYMBOL(vringh_kiov_advance);
 105
 106/* Copy some bytes to/from the iovec.  Returns num copied. */
 107static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
 108				      struct vringh_kiov *iov,
 109				      void *ptr, size_t len,
 110				      int (*xfer)(const struct vringh *vrh,
 111						  void *addr, void *ptr,
 112						  size_t len))
 113{
 114	int err, done = 0;
 115
 116	while (len && iov->i < iov->used) {
 117		size_t partlen;
 118
 119		partlen = min(iov->iov[iov->i].iov_len, len);
 120		err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
 121		if (err)
 122			return err;
 123		done += partlen;
 124		len -= partlen;
 125		ptr += partlen;
 126		iov->consumed += partlen;
 127		iov->iov[iov->i].iov_len -= partlen;
 128		iov->iov[iov->i].iov_base += partlen;
 129
 130		if (!iov->iov[iov->i].iov_len) {
 131			/* Fix up old iov element then increment. */
 132			iov->iov[iov->i].iov_len = iov->consumed;
 133			iov->iov[iov->i].iov_base -= iov->consumed;
 134
 135			iov->consumed = 0;
 136			iov->i++;
 137		}
 138	}
 139	return done;
 140}
 141
 142/* May reduce *len if range is shorter. */
 143static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
 144			       struct vringh_range *range,
 145			       bool (*getrange)(struct vringh *,
 146						u64, struct vringh_range *))
 147{
 148	if (addr < range->start || addr > range->end_incl) {
 149		if (!getrange(vrh, addr, range))
 150			return false;
 151	}
 152	BUG_ON(addr < range->start || addr > range->end_incl);
 153
 154	/* To end of memory? */
 155	if (unlikely(addr + *len == 0)) {
 156		if (range->end_incl == -1ULL)
 157			return true;
 158		goto truncate;
 159	}
 160
 161	/* Otherwise, don't wrap. */
 162	if (addr + *len < addr) {
 163		vringh_bad("Wrapping descriptor %zu@0x%llx",
 164			   *len, (unsigned long long)addr);
 165		return false;
 166	}
 167
 168	if (unlikely(addr + *len - 1 > range->end_incl))
 169		goto truncate;
 170	return true;
 171
 172truncate:
 173	*len = range->end_incl + 1 - addr;
 174	return true;
 175}
 176
 177static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
 178				  struct vringh_range *range,
 179				  bool (*getrange)(struct vringh *,
 180						   u64, struct vringh_range *))
 181{
 182	return true;
 183}
 184
 185/* No reason for this code to be inline. */
 186static int move_to_indirect(const struct vringh *vrh,
 187			    int *up_next, u16 *i, void *addr,
 188			    const struct vring_desc *desc,
 189			    struct vring_desc **descs, int *desc_max)
 190{
 191	u32 len;
 192
 193	/* Indirect tables can't have indirect. */
 194	if (*up_next != -1) {
 195		vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
 196		return -EINVAL;
 197	}
 198
 199	len = vringh32_to_cpu(vrh, desc->len);
 200	if (unlikely(len % sizeof(struct vring_desc))) {
 201		vringh_bad("Strange indirect len %u", desc->len);
 202		return -EINVAL;
 203	}
 204
 205	/* We will check this when we follow it! */
 206	if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
 207		*up_next = vringh16_to_cpu(vrh, desc->next);
 208	else
 209		*up_next = -2;
 210	*descs = addr;
 211	*desc_max = len / sizeof(struct vring_desc);
 212
 213	/* Now, start at the first indirect. */
 214	*i = 0;
 215	return 0;
 216}
 217
 218static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
 219{
 220	struct kvec *new;
 221	unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
 222
 223	if (new_num < 8)
 224		new_num = 8;
 225
 226	flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
 227	if (flag)
 228		new = krealloc_array(iov->iov, new_num,
 229				     sizeof(struct iovec), gfp);
 230	else {
 231		new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
 232		if (new) {
 233			memcpy(new, iov->iov,
 234			       iov->max_num * sizeof(struct iovec));
 235			flag = VRINGH_IOV_ALLOCATED;
 236		}
 237	}
 238	if (!new)
 239		return -ENOMEM;
 240	iov->iov = new;
 241	iov->max_num = (new_num | flag);
 242	return 0;
 243}
 244
 245static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
 246				       struct vring_desc **descs, int *desc_max)
 247{
 248	u16 i = *up_next;
 249
 250	*up_next = -1;
 251	*descs = vrh->vring.desc;
 252	*desc_max = vrh->vring.num;
 253	return i;
 254}
 255
 256static int slow_copy(struct vringh *vrh, void *dst, const void *src,
 257		     bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
 258				    struct vringh_range *range,
 259				    bool (*getrange)(struct vringh *vrh,
 260						     u64,
 261						     struct vringh_range *)),
 262		     bool (*getrange)(struct vringh *vrh,
 263				      u64 addr,
 264				      struct vringh_range *r),
 265		     struct vringh_range *range,
 266		     int (*copy)(const struct vringh *vrh,
 267				 void *dst, const void *src, size_t len))
 268{
 269	size_t part, len = sizeof(struct vring_desc);
 270
 271	do {
 272		u64 addr;
 273		int err;
 274
 275		part = len;
 276		addr = (u64)(unsigned long)src - range->offset;
 277
 278		if (!rcheck(vrh, addr, &part, range, getrange))
 279			return -EINVAL;
 280
 281		err = copy(vrh, dst, src, part);
 282		if (err)
 283			return err;
 284
 285		dst += part;
 286		src += part;
 287		len -= part;
 288	} while (len);
 289	return 0;
 290}
 291
 292static inline int
 293__vringh_iov(struct vringh *vrh, u16 i,
 294	     struct vringh_kiov *riov,
 295	     struct vringh_kiov *wiov,
 296	     bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
 297			    struct vringh_range *range,
 298			    bool (*getrange)(struct vringh *, u64,
 299					     struct vringh_range *)),
 300	     bool (*getrange)(struct vringh *, u64, struct vringh_range *),
 301	     gfp_t gfp,
 302	     int (*copy)(const struct vringh *vrh,
 303			 void *dst, const void *src, size_t len))
 304{
 305	int err, count = 0, indirect_count = 0, up_next, desc_max;
 306	struct vring_desc desc, *descs;
 307	struct vringh_range range = { -1ULL, 0 }, slowrange;
 308	bool slow = false;
 309
 310	/* We start traversing vring's descriptor table. */
 311	descs = vrh->vring.desc;
 312	desc_max = vrh->vring.num;
 313	up_next = -1;
 314
 315	/* You must want something! */
 316	if (WARN_ON(!riov && !wiov))
 317		return -EINVAL;
 318
 319	if (riov)
 320		riov->i = riov->used = riov->consumed = 0;
 321	if (wiov)
 322		wiov->i = wiov->used = wiov->consumed = 0;
 323
 324	for (;;) {
 325		void *addr;
 326		struct vringh_kiov *iov;
 327		size_t len;
 328
 329		if (unlikely(slow))
 330			err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
 331					&slowrange, copy);
 332		else
 333			err = copy(vrh, &desc, &descs[i], sizeof(desc));
 334		if (unlikely(err))
 335			goto fail;
 336
 337		if (unlikely(desc.flags &
 338			     cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
 339			u64 a = vringh64_to_cpu(vrh, desc.addr);
 340
 341			/* Make sure it's OK, and get offset. */
 342			len = vringh32_to_cpu(vrh, desc.len);
 343			if (!rcheck(vrh, a, &len, &range, getrange)) {
 344				err = -EINVAL;
 345				goto fail;
 346			}
 347
 348			if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
 349				slow = true;
 350				/* We need to save this range to use offset */
 351				slowrange = range;
 352			}
 353
 354			addr = (void *)(long)(a + range.offset);
 355			err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
 356					       &descs, &desc_max);
 357			if (err)
 358				goto fail;
 359			continue;
 360		}
 361
 362		if (up_next == -1)
 363			count++;
 364		else
 365			indirect_count++;
 366
 367		if (count > vrh->vring.num || indirect_count > desc_max) {
 368			vringh_bad("Descriptor loop in %p", descs);
 369			err = -ELOOP;
 370			goto fail;
 371		}
 372
 373		if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
 374			iov = wiov;
 375		else {
 376			iov = riov;
 377			if (unlikely(wiov && wiov->used)) {
 378				vringh_bad("Readable desc %p after writable",
 379					   &descs[i]);
 380				err = -EINVAL;
 381				goto fail;
 382			}
 383		}
 384
 385		if (!iov) {
 386			vringh_bad("Unexpected %s desc",
 387				   !wiov ? "writable" : "readable");
 388			err = -EPROTO;
 389			goto fail;
 390		}
 391
 392	again:
 393		/* Make sure it's OK, and get offset. */
 394		len = vringh32_to_cpu(vrh, desc.len);
 395		if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
 396			    getrange)) {
 397			err = -EINVAL;
 398			goto fail;
 399		}
 400		addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
 401					       range.offset);
 402
 403		if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
 404			err = resize_iovec(iov, gfp);
 405			if (err)
 406				goto fail;
 407		}
 408
 409		iov->iov[iov->used].iov_base = addr;
 410		iov->iov[iov->used].iov_len = len;
 411		iov->used++;
 412
 413		if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
 414			desc.len = cpu_to_vringh32(vrh,
 415				   vringh32_to_cpu(vrh, desc.len) - len);
 416			desc.addr = cpu_to_vringh64(vrh,
 417				    vringh64_to_cpu(vrh, desc.addr) + len);
 418			goto again;
 419		}
 420
 421		if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
 422			i = vringh16_to_cpu(vrh, desc.next);
 423		} else {
 424			/* Just in case we need to finish traversing above. */
 425			if (unlikely(up_next > 0)) {
 426				i = return_from_indirect(vrh, &up_next,
 427							 &descs, &desc_max);
 428				slow = false;
 429				indirect_count = 0;
 430			} else
 431				break;
 432		}
 433
 434		if (i >= desc_max) {
 435			vringh_bad("Chained index %u > %u", i, desc_max);
 436			err = -EINVAL;
 437			goto fail;
 438		}
 439	}
 440
 441	return 0;
 442
 443fail:
 444	return err;
 445}
 446
 447static inline int __vringh_complete(struct vringh *vrh,
 448				    const struct vring_used_elem *used,
 449				    unsigned int num_used,
 450				    int (*putu16)(const struct vringh *vrh,
 451						  __virtio16 *p, u16 val),
 452				    int (*putused)(const struct vringh *vrh,
 453						   struct vring_used_elem *dst,
 454						   const struct vring_used_elem
 455						   *src, unsigned num))
 456{
 457	struct vring_used *used_ring;
 458	int err;
 459	u16 used_idx, off;
 460
 461	used_ring = vrh->vring.used;
 462	used_idx = vrh->last_used_idx + vrh->completed;
 463
 464	off = used_idx % vrh->vring.num;
 465
 466	/* Compiler knows num_used == 1 sometimes, hence extra check */
 467	if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
 468		u16 part = vrh->vring.num - off;
 469		err = putused(vrh, &used_ring->ring[off], used, part);
 470		if (!err)
 471			err = putused(vrh, &used_ring->ring[0], used + part,
 472				      num_used - part);
 473	} else
 474		err = putused(vrh, &used_ring->ring[off], used, num_used);
 475
 476	if (err) {
 477		vringh_bad("Failed to write %u used entries %u at %p",
 478			   num_used, off, &used_ring->ring[off]);
 479		return err;
 480	}
 481
 482	/* Make sure buffer is written before we update index. */
 483	virtio_wmb(vrh->weak_barriers);
 484
 485	err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
 486	if (err) {
 487		vringh_bad("Failed to update used index at %p",
 488			   &vrh->vring.used->idx);
 489		return err;
 490	}
 491
 492	vrh->completed += num_used;
 493	return 0;
 494}
 495
 496
 497static inline int __vringh_need_notify(struct vringh *vrh,
 498				       int (*getu16)(const struct vringh *vrh,
 499						     u16 *val,
 500						     const __virtio16 *p))
 501{
 502	bool notify;
 503	u16 used_event;
 504	int err;
 505
 506	/* Flush out used index update. This is paired with the
 507	 * barrier that the Guest executes when enabling
 508	 * interrupts. */
 509	virtio_mb(vrh->weak_barriers);
 510
 511	/* Old-style, without event indices. */
 512	if (!vrh->event_indices) {
 513		u16 flags;
 514		err = getu16(vrh, &flags, &vrh->vring.avail->flags);
 515		if (err) {
 516			vringh_bad("Failed to get flags at %p",
 517				   &vrh->vring.avail->flags);
 518			return err;
 519		}
 520		return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
 521	}
 522
 523	/* Modern: we know when other side wants to know. */
 524	err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
 525	if (err) {
 526		vringh_bad("Failed to get used event idx at %p",
 527			   &vring_used_event(&vrh->vring));
 528		return err;
 529	}
 530
 531	/* Just in case we added so many that we wrap. */
 532	if (unlikely(vrh->completed > 0xffff))
 533		notify = true;
 534	else
 535		notify = vring_need_event(used_event,
 536					  vrh->last_used_idx + vrh->completed,
 537					  vrh->last_used_idx);
 538
 539	vrh->last_used_idx += vrh->completed;
 540	vrh->completed = 0;
 541	return notify;
 542}
 543
 544static inline bool __vringh_notify_enable(struct vringh *vrh,
 545					  int (*getu16)(const struct vringh *vrh,
 546							u16 *val, const __virtio16 *p),
 547					  int (*putu16)(const struct vringh *vrh,
 548							__virtio16 *p, u16 val))
 549{
 550	u16 avail;
 551
 552	if (!vrh->event_indices) {
 553		/* Old-school; update flags. */
 554		if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
 555			vringh_bad("Clearing used flags %p",
 556				   &vrh->vring.used->flags);
 557			return true;
 558		}
 559	} else {
 560		if (putu16(vrh, &vring_avail_event(&vrh->vring),
 561			   vrh->last_avail_idx) != 0) {
 562			vringh_bad("Updating avail event index %p",
 563				   &vring_avail_event(&vrh->vring));
 564			return true;
 565		}
 566	}
 567
 568	/* They could have slipped one in as we were doing that: make
 569	 * sure it's written, then check again. */
 570	virtio_mb(vrh->weak_barriers);
 571
 572	if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
 573		vringh_bad("Failed to check avail idx at %p",
 574			   &vrh->vring.avail->idx);
 575		return true;
 576	}
 577
 578	/* This is unlikely, so we just leave notifications enabled
 579	 * (if we're using event_indices, we'll only get one
 580	 * notification anyway). */
 581	return avail == vrh->last_avail_idx;
 582}
 583
 584static inline void __vringh_notify_disable(struct vringh *vrh,
 585					   int (*putu16)(const struct vringh *vrh,
 586							 __virtio16 *p, u16 val))
 587{
 588	if (!vrh->event_indices) {
 589		/* Old-school; update flags. */
 590		if (putu16(vrh, &vrh->vring.used->flags,
 591			   VRING_USED_F_NO_NOTIFY)) {
 592			vringh_bad("Setting used flags %p",
 593				   &vrh->vring.used->flags);
 594		}
 595	}
 596}
 597
 598/* Userspace access helpers: in this case, addresses are really userspace. */
 599static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
 600{
 601	__virtio16 v = 0;
 602	int rc = get_user(v, (__force __virtio16 __user *)p);
 603	*val = vringh16_to_cpu(vrh, v);
 604	return rc;
 605}
 606
 607static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
 608{
 609	__virtio16 v = cpu_to_vringh16(vrh, val);
 610	return put_user(v, (__force __virtio16 __user *)p);
 611}
 612
 613static inline int copydesc_user(const struct vringh *vrh,
 614				void *dst, const void *src, size_t len)
 615{
 616	return copy_from_user(dst, (__force void __user *)src, len) ?
 617		-EFAULT : 0;
 618}
 619
 620static inline int putused_user(const struct vringh *vrh,
 621			       struct vring_used_elem *dst,
 622			       const struct vring_used_elem *src,
 623			       unsigned int num)
 624{
 625	return copy_to_user((__force void __user *)dst, src,
 626			    sizeof(*dst) * num) ? -EFAULT : 0;
 627}
 628
 629static inline int xfer_from_user(const struct vringh *vrh, void *src,
 630				 void *dst, size_t len)
 631{
 632	return copy_from_user(dst, (__force void __user *)src, len) ?
 633		-EFAULT : 0;
 634}
 635
 636static inline int xfer_to_user(const struct vringh *vrh,
 637			       void *dst, void *src, size_t len)
 638{
 639	return copy_to_user((__force void __user *)dst, src, len) ?
 640		-EFAULT : 0;
 641}
 642
 643/**
 644 * vringh_init_user - initialize a vringh for a userspace vring.
 645 * @vrh: the vringh to initialize.
 646 * @features: the feature bits for this ring.
 647 * @num: the number of elements.
 648 * @weak_barriers: true if we only need memory barriers, not I/O.
 649 * @desc: the userspace descriptor pointer.
 650 * @avail: the userspace avail pointer.
 651 * @used: the userspace used pointer.
 652 *
 653 * Returns an error if num is invalid: you should check pointers
 654 * yourself!
 655 */
 656int vringh_init_user(struct vringh *vrh, u64 features,
 657		     unsigned int num, bool weak_barriers,
 658		     vring_desc_t __user *desc,
 659		     vring_avail_t __user *avail,
 660		     vring_used_t __user *used)
 661{
 662	/* Sane power of 2 please! */
 663	if (!num || num > 0xffff || (num & (num - 1))) {
 664		vringh_bad("Bad ring size %u", num);
 665		return -EINVAL;
 666	}
 667
 668	vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
 669	vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
 670	vrh->weak_barriers = weak_barriers;
 671	vrh->completed = 0;
 672	vrh->last_avail_idx = 0;
 673	vrh->last_used_idx = 0;
 674	vrh->vring.num = num;
 675	/* vring expects kernel addresses, but only used via accessors. */
 676	vrh->vring.desc = (__force struct vring_desc *)desc;
 677	vrh->vring.avail = (__force struct vring_avail *)avail;
 678	vrh->vring.used = (__force struct vring_used *)used;
 679	return 0;
 680}
 681EXPORT_SYMBOL(vringh_init_user);
 682
 683/**
 684 * vringh_getdesc_user - get next available descriptor from userspace ring.
 685 * @vrh: the userspace vring.
 686 * @riov: where to put the readable descriptors (or NULL)
 687 * @wiov: where to put the writable descriptors (or NULL)
 688 * @getrange: function to call to check ranges.
 689 * @head: head index we received, for passing to vringh_complete_user().
 690 *
 691 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
 692 *
 693 * Note that on error return, you can tell the difference between an
 694 * invalid ring and a single invalid descriptor: in the former case,
 695 * *head will be vrh->vring.num.  You may be able to ignore an invalid
 696 * descriptor, but there's not much you can do with an invalid ring.
 697 *
 698 * Note that you can reuse riov and wiov with subsequent calls. Content is
 699 * overwritten and memory reallocated if more space is needed.
 700 * When you don't have to use riov and wiov anymore, you should clean up them
 701 * calling vringh_iov_cleanup() to release the memory, even on error!
 702 */
 703int vringh_getdesc_user(struct vringh *vrh,
 704			struct vringh_iov *riov,
 705			struct vringh_iov *wiov,
 706			bool (*getrange)(struct vringh *vrh,
 707					 u64 addr, struct vringh_range *r),
 708			u16 *head)
 709{
 710	int err;
 711
 712	*head = vrh->vring.num;
 713	err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
 714	if (err < 0)
 715		return err;
 716
 717	/* Empty... */
 718	if (err == vrh->vring.num)
 719		return 0;
 720
 721	/* We need the layouts to be the identical for this to work */
 722	BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
 723	BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
 724		     offsetof(struct vringh_iov, iov));
 725	BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
 726		     offsetof(struct vringh_iov, i));
 727	BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
 728		     offsetof(struct vringh_iov, used));
 729	BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
 730		     offsetof(struct vringh_iov, max_num));
 731	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
 732	BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
 733		     offsetof(struct kvec, iov_base));
 734	BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
 735		     offsetof(struct kvec, iov_len));
 736	BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
 737		     != sizeof(((struct kvec *)NULL)->iov_base));
 738	BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
 739		     != sizeof(((struct kvec *)NULL)->iov_len));
 740
 741	*head = err;
 742	err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
 743			   (struct vringh_kiov *)wiov,
 744			   range_check, getrange, GFP_KERNEL, copydesc_user);
 745	if (err)
 746		return err;
 747
 748	return 1;
 749}
 750EXPORT_SYMBOL(vringh_getdesc_user);
 751
 752/**
 753 * vringh_iov_pull_user - copy bytes from vring_iov.
 754 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
 755 * @dst: the place to copy.
 756 * @len: the maximum length to copy.
 757 *
 758 * Returns the bytes copied <= len or a negative errno.
 759 */
 760ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
 761{
 762	return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
 763			       dst, len, xfer_from_user);
 764}
 765EXPORT_SYMBOL(vringh_iov_pull_user);
 766
 767/**
 768 * vringh_iov_push_user - copy bytes into vring_iov.
 769 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
 770 * @src: the place to copy from.
 771 * @len: the maximum length to copy.
 772 *
 773 * Returns the bytes copied <= len or a negative errno.
 774 */
 775ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
 776			     const void *src, size_t len)
 777{
 778	return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
 779			       (void *)src, len, xfer_to_user);
 780}
 781EXPORT_SYMBOL(vringh_iov_push_user);
 782
 783/**
 784 * vringh_abandon_user - we've decided not to handle the descriptor(s).
 785 * @vrh: the vring.
 786 * @num: the number of descriptors to put back (ie. num
 787 *	 vringh_get_user() to undo).
 788 *
 789 * The next vringh_get_user() will return the old descriptor(s) again.
 790 */
 791void vringh_abandon_user(struct vringh *vrh, unsigned int num)
 792{
 793	/* We only update vring_avail_event(vr) when we want to be notified,
 794	 * so we haven't changed that yet. */
 795	vrh->last_avail_idx -= num;
 796}
 797EXPORT_SYMBOL(vringh_abandon_user);
 798
 799/**
 800 * vringh_complete_user - we've finished with descriptor, publish it.
 801 * @vrh: the vring.
 802 * @head: the head as filled in by vringh_getdesc_user.
 803 * @len: the length of data we have written.
 804 *
 805 * You should check vringh_need_notify_user() after one or more calls
 806 * to this function.
 807 */
 808int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
 809{
 810	struct vring_used_elem used;
 811
 812	used.id = cpu_to_vringh32(vrh, head);
 813	used.len = cpu_to_vringh32(vrh, len);
 814	return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
 815}
 816EXPORT_SYMBOL(vringh_complete_user);
 817
 818/**
 819 * vringh_complete_multi_user - we've finished with many descriptors.
 820 * @vrh: the vring.
 821 * @used: the head, length pairs.
 822 * @num_used: the number of used elements.
 823 *
 824 * You should check vringh_need_notify_user() after one or more calls
 825 * to this function.
 826 */
 827int vringh_complete_multi_user(struct vringh *vrh,
 828			       const struct vring_used_elem used[],
 829			       unsigned num_used)
 830{
 831	return __vringh_complete(vrh, used, num_used,
 832				 putu16_user, putused_user);
 833}
 834EXPORT_SYMBOL(vringh_complete_multi_user);
 835
 836/**
 837 * vringh_notify_enable_user - we want to know if something changes.
 838 * @vrh: the vring.
 839 *
 840 * This always enables notifications, but returns false if there are
 841 * now more buffers available in the vring.
 842 */
 843bool vringh_notify_enable_user(struct vringh *vrh)
 844{
 845	return __vringh_notify_enable(vrh, getu16_user, putu16_user);
 846}
 847EXPORT_SYMBOL(vringh_notify_enable_user);
 848
 849/**
 850 * vringh_notify_disable_user - don't tell us if something changes.
 851 * @vrh: the vring.
 852 *
 853 * This is our normal running state: we disable and then only enable when
 854 * we're going to sleep.
 855 */
 856void vringh_notify_disable_user(struct vringh *vrh)
 857{
 858	__vringh_notify_disable(vrh, putu16_user);
 859}
 860EXPORT_SYMBOL(vringh_notify_disable_user);
 861
 862/**
 863 * vringh_need_notify_user - must we tell the other side about used buffers?
 864 * @vrh: the vring we've called vringh_complete_user() on.
 865 *
 866 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
 867 */
 868int vringh_need_notify_user(struct vringh *vrh)
 869{
 870	return __vringh_need_notify(vrh, getu16_user);
 871}
 872EXPORT_SYMBOL(vringh_need_notify_user);
 873
 874/* Kernelspace access helpers. */
 875static inline int getu16_kern(const struct vringh *vrh,
 876			      u16 *val, const __virtio16 *p)
 877{
 878	*val = vringh16_to_cpu(vrh, READ_ONCE(*p));
 879	return 0;
 880}
 881
 882static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
 883{
 884	WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
 885	return 0;
 886}
 887
 888static inline int copydesc_kern(const struct vringh *vrh,
 889				void *dst, const void *src, size_t len)
 890{
 891	memcpy(dst, src, len);
 892	return 0;
 893}
 894
 895static inline int putused_kern(const struct vringh *vrh,
 896			       struct vring_used_elem *dst,
 897			       const struct vring_used_elem *src,
 898			       unsigned int num)
 899{
 900	memcpy(dst, src, num * sizeof(*dst));
 901	return 0;
 902}
 903
 904static inline int xfer_kern(const struct vringh *vrh, void *src,
 905			    void *dst, size_t len)
 906{
 907	memcpy(dst, src, len);
 908	return 0;
 909}
 910
 911static inline int kern_xfer(const struct vringh *vrh, void *dst,
 912			    void *src, size_t len)
 913{
 914	memcpy(dst, src, len);
 915	return 0;
 916}
 917
 918/**
 919 * vringh_init_kern - initialize a vringh for a kernelspace vring.
 920 * @vrh: the vringh to initialize.
 921 * @features: the feature bits for this ring.
 922 * @num: the number of elements.
 923 * @weak_barriers: true if we only need memory barriers, not I/O.
 924 * @desc: the userspace descriptor pointer.
 925 * @avail: the userspace avail pointer.
 926 * @used: the userspace used pointer.
 927 *
 928 * Returns an error if num is invalid.
 929 */
 930int vringh_init_kern(struct vringh *vrh, u64 features,
 931		     unsigned int num, bool weak_barriers,
 932		     struct vring_desc *desc,
 933		     struct vring_avail *avail,
 934		     struct vring_used *used)
 935{
 936	/* Sane power of 2 please! */
 937	if (!num || num > 0xffff || (num & (num - 1))) {
 938		vringh_bad("Bad ring size %u", num);
 939		return -EINVAL;
 940	}
 941
 942	vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
 943	vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
 944	vrh->weak_barriers = weak_barriers;
 945	vrh->completed = 0;
 946	vrh->last_avail_idx = 0;
 947	vrh->last_used_idx = 0;
 948	vrh->vring.num = num;
 949	vrh->vring.desc = desc;
 950	vrh->vring.avail = avail;
 951	vrh->vring.used = used;
 952	return 0;
 953}
 954EXPORT_SYMBOL(vringh_init_kern);
 955
 956/**
 957 * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
 958 * @vrh: the kernelspace vring.
 959 * @riov: where to put the readable descriptors (or NULL)
 960 * @wiov: where to put the writable descriptors (or NULL)
 961 * @head: head index we received, for passing to vringh_complete_kern().
 962 * @gfp: flags for allocating larger riov/wiov.
 963 *
 964 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
 965 *
 966 * Note that on error return, you can tell the difference between an
 967 * invalid ring and a single invalid descriptor: in the former case,
 968 * *head will be vrh->vring.num.  You may be able to ignore an invalid
 969 * descriptor, but there's not much you can do with an invalid ring.
 970 *
 971 * Note that you can reuse riov and wiov with subsequent calls. Content is
 972 * overwritten and memory reallocated if more space is needed.
 973 * When you don't have to use riov and wiov anymore, you should clean up them
 974 * calling vringh_kiov_cleanup() to release the memory, even on error!
 975 */
 976int vringh_getdesc_kern(struct vringh *vrh,
 977			struct vringh_kiov *riov,
 978			struct vringh_kiov *wiov,
 979			u16 *head,
 980			gfp_t gfp)
 981{
 982	int err;
 983
 984	err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
 985	if (err < 0)
 986		return err;
 987
 988	/* Empty... */
 989	if (err == vrh->vring.num)
 990		return 0;
 991
 992	*head = err;
 993	err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
 994			   gfp, copydesc_kern);
 995	if (err)
 996		return err;
 997
 998	return 1;
 999}
1000EXPORT_SYMBOL(vringh_getdesc_kern);
1001
1002/**
1003 * vringh_iov_pull_kern - copy bytes from vring_iov.
1004 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
1005 * @dst: the place to copy.
1006 * @len: the maximum length to copy.
1007 *
1008 * Returns the bytes copied <= len or a negative errno.
1009 */
1010ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
1011{
1012	return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
1013}
1014EXPORT_SYMBOL(vringh_iov_pull_kern);
1015
1016/**
1017 * vringh_iov_push_kern - copy bytes into vring_iov.
1018 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
1019 * @src: the place to copy from.
1020 * @len: the maximum length to copy.
1021 *
1022 * Returns the bytes copied <= len or a negative errno.
1023 */
1024ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
1025			     const void *src, size_t len)
1026{
1027	return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
1028}
1029EXPORT_SYMBOL(vringh_iov_push_kern);
1030
1031/**
1032 * vringh_abandon_kern - we've decided not to handle the descriptor(s).
1033 * @vrh: the vring.
1034 * @num: the number of descriptors to put back (ie. num
1035 *	 vringh_get_kern() to undo).
1036 *
1037 * The next vringh_get_kern() will return the old descriptor(s) again.
1038 */
1039void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1040{
1041	/* We only update vring_avail_event(vr) when we want to be notified,
1042	 * so we haven't changed that yet. */
1043	vrh->last_avail_idx -= num;
1044}
1045EXPORT_SYMBOL(vringh_abandon_kern);
1046
1047/**
1048 * vringh_complete_kern - we've finished with descriptor, publish it.
1049 * @vrh: the vring.
1050 * @head: the head as filled in by vringh_getdesc_kern.
1051 * @len: the length of data we have written.
1052 *
1053 * You should check vringh_need_notify_kern() after one or more calls
1054 * to this function.
1055 */
1056int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1057{
1058	struct vring_used_elem used;
1059
1060	used.id = cpu_to_vringh32(vrh, head);
1061	used.len = cpu_to_vringh32(vrh, len);
1062
1063	return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1064}
1065EXPORT_SYMBOL(vringh_complete_kern);
1066
1067/**
1068 * vringh_notify_enable_kern - we want to know if something changes.
1069 * @vrh: the vring.
1070 *
1071 * This always enables notifications, but returns false if there are
1072 * now more buffers available in the vring.
1073 */
1074bool vringh_notify_enable_kern(struct vringh *vrh)
1075{
1076	return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1077}
1078EXPORT_SYMBOL(vringh_notify_enable_kern);
1079
1080/**
1081 * vringh_notify_disable_kern - don't tell us if something changes.
1082 * @vrh: the vring.
1083 *
1084 * This is our normal running state: we disable and then only enable when
1085 * we're going to sleep.
1086 */
1087void vringh_notify_disable_kern(struct vringh *vrh)
1088{
1089	__vringh_notify_disable(vrh, putu16_kern);
1090}
1091EXPORT_SYMBOL(vringh_notify_disable_kern);
1092
1093/**
1094 * vringh_need_notify_kern - must we tell the other side about used buffers?
1095 * @vrh: the vring we've called vringh_complete_kern() on.
1096 *
1097 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1098 */
1099int vringh_need_notify_kern(struct vringh *vrh)
1100{
1101	return __vringh_need_notify(vrh, getu16_kern);
1102}
1103EXPORT_SYMBOL(vringh_need_notify_kern);
1104
1105#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1106
1107struct iotlb_vec {
1108	union {
1109		struct iovec *iovec;
1110		struct bio_vec *bvec;
1111	} iov;
1112	size_t count;
1113};
1114
1115static int iotlb_translate(const struct vringh *vrh,
1116			   u64 addr, u64 len, u64 *translated,
1117			   struct iotlb_vec *ivec, u32 perm)
1118{
1119	struct vhost_iotlb_map *map;
1120	struct vhost_iotlb *iotlb = vrh->iotlb;
1121	int ret = 0;
1122	u64 s = 0, last = addr + len - 1;
1123
1124	spin_lock(vrh->iotlb_lock);
1125
1126	while (len > s) {
1127		uintptr_t io_addr;
1128		size_t io_len;
1129		u64 size;
1130
1131		if (unlikely(ret >= ivec->count)) {
1132			ret = -ENOBUFS;
1133			break;
1134		}
1135
1136		map = vhost_iotlb_itree_first(iotlb, addr, last);
 
1137		if (!map || map->start > addr) {
1138			ret = -EINVAL;
1139			break;
1140		} else if (!(map->perm & perm)) {
1141			ret = -EPERM;
1142			break;
1143		}
1144
1145		size = map->size - addr + map->start;
1146		io_len = min(len - s, size);
1147		io_addr = map->addr - map->start + addr;
1148
1149		if (vrh->use_va) {
1150			struct iovec *iovec = ivec->iov.iovec;
1151
1152			iovec[ret].iov_len = io_len;
1153			iovec[ret].iov_base = (void __user *)io_addr;
1154		} else {
1155			u64 pfn = io_addr >> PAGE_SHIFT;
1156			struct bio_vec *bvec = ivec->iov.bvec;
1157
1158			bvec_set_page(&bvec[ret], pfn_to_page(pfn), io_len,
1159				      io_addr & (PAGE_SIZE - 1));
1160		}
1161
1162		s += size;
1163		addr += size;
1164		++ret;
1165	}
1166
1167	spin_unlock(vrh->iotlb_lock);
1168
1169	if (translated)
1170		*translated = min(len, s);
1171
1172	return ret;
1173}
1174
1175#define IOTLB_IOV_STRIDE 16
1176
1177static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1178				  void *src, size_t len)
1179{
1180	struct iotlb_vec ivec;
1181	union {
1182		struct iovec iovec[IOTLB_IOV_STRIDE];
1183		struct bio_vec bvec[IOTLB_IOV_STRIDE];
1184	} iov;
1185	u64 total_translated = 0;
1186
1187	ivec.iov.iovec = iov.iovec;
1188	ivec.count = IOTLB_IOV_STRIDE;
1189
1190	while (total_translated < len) {
1191		struct iov_iter iter;
1192		u64 translated;
1193		int ret;
1194
1195		ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1196				      len - total_translated, &translated,
1197				      &ivec, VHOST_MAP_RO);
1198		if (ret == -ENOBUFS)
1199			ret = IOTLB_IOV_STRIDE;
1200		else if (ret < 0)
1201			return ret;
1202
1203		if (vrh->use_va) {
1204			iov_iter_init(&iter, ITER_SOURCE, ivec.iov.iovec, ret,
1205				      translated);
1206		} else {
1207			iov_iter_bvec(&iter, ITER_SOURCE, ivec.iov.bvec, ret,
1208				      translated);
1209		}
1210
1211		ret = copy_from_iter(dst, translated, &iter);
1212		if (ret < 0)
1213			return ret;
1214
1215		src += translated;
1216		dst += translated;
1217		total_translated += translated;
1218	}
1219
1220	return total_translated;
 
 
 
 
1221}
1222
1223static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1224				void *src, size_t len)
1225{
1226	struct iotlb_vec ivec;
1227	union {
1228		struct iovec iovec[IOTLB_IOV_STRIDE];
1229		struct bio_vec bvec[IOTLB_IOV_STRIDE];
1230	} iov;
1231	u64 total_translated = 0;
1232
1233	ivec.iov.iovec = iov.iovec;
1234	ivec.count = IOTLB_IOV_STRIDE;
1235
1236	while (total_translated < len) {
1237		struct iov_iter iter;
1238		u64 translated;
1239		int ret;
1240
1241		ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1242				      len - total_translated, &translated,
1243				      &ivec, VHOST_MAP_WO);
1244		if (ret == -ENOBUFS)
1245			ret = IOTLB_IOV_STRIDE;
1246		else if (ret < 0)
1247			return ret;
1248
1249		if (vrh->use_va) {
1250			iov_iter_init(&iter, ITER_DEST, ivec.iov.iovec, ret,
1251				      translated);
1252		} else {
1253			iov_iter_bvec(&iter, ITER_DEST, ivec.iov.bvec, ret,
1254				      translated);
1255		}
1256
1257		ret = copy_to_iter(src, translated, &iter);
1258		if (ret < 0)
1259			return ret;
1260
1261		src += translated;
1262		dst += translated;
1263		total_translated += translated;
1264	}
1265
1266	return total_translated;
 
 
1267}
1268
1269static inline int getu16_iotlb(const struct vringh *vrh,
1270			       u16 *val, const __virtio16 *p)
1271{
1272	struct iotlb_vec ivec;
1273	union {
1274		struct iovec iovec[1];
1275		struct bio_vec bvec[1];
1276	} iov;
1277	__virtio16 tmp;
1278	int ret;
1279
1280	ivec.iov.iovec = iov.iovec;
1281	ivec.count = 1;
1282
1283	/* Atomic read is needed for getu16 */
1284	ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1285			      NULL, &ivec, VHOST_MAP_RO);
1286	if (ret < 0)
1287		return ret;
1288
1289	if (vrh->use_va) {
1290		ret = __get_user(tmp, (__virtio16 __user *)ivec.iov.iovec[0].iov_base);
1291		if (ret)
1292			return ret;
1293	} else {
1294		void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
1295		void *from = kaddr + ivec.iov.bvec[0].bv_offset;
1296
1297		tmp = READ_ONCE(*(__virtio16 *)from);
1298		kunmap_local(kaddr);
1299	}
1300
1301	*val = vringh16_to_cpu(vrh, tmp);
1302
1303	return 0;
1304}
1305
1306static inline int putu16_iotlb(const struct vringh *vrh,
1307			       __virtio16 *p, u16 val)
1308{
1309	struct iotlb_vec ivec;
1310	union {
1311		struct iovec iovec;
1312		struct bio_vec bvec;
1313	} iov;
1314	__virtio16 tmp;
1315	int ret;
1316
1317	ivec.iov.iovec = &iov.iovec;
1318	ivec.count = 1;
1319
1320	/* Atomic write is needed for putu16 */
1321	ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1322			      NULL, &ivec, VHOST_MAP_RO);
1323	if (ret < 0)
1324		return ret;
1325
1326	tmp = cpu_to_vringh16(vrh, val);
1327
1328	if (vrh->use_va) {
1329		ret = __put_user(tmp, (__virtio16 __user *)ivec.iov.iovec[0].iov_base);
1330		if (ret)
1331			return ret;
1332	} else {
1333		void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
1334		void *to = kaddr + ivec.iov.bvec[0].bv_offset;
1335
1336		WRITE_ONCE(*(__virtio16 *)to, tmp);
1337		kunmap_local(kaddr);
1338	}
1339
1340	return 0;
1341}
1342
1343static inline int copydesc_iotlb(const struct vringh *vrh,
1344				 void *dst, const void *src, size_t len)
1345{
1346	int ret;
1347
1348	ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1349	if (ret != len)
1350		return -EFAULT;
1351
1352	return 0;
1353}
1354
1355static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1356				  void *dst, size_t len)
1357{
1358	int ret;
1359
1360	ret = copy_from_iotlb(vrh, dst, src, len);
1361	if (ret != len)
1362		return -EFAULT;
1363
1364	return 0;
1365}
1366
1367static inline int xfer_to_iotlb(const struct vringh *vrh,
1368			       void *dst, void *src, size_t len)
1369{
1370	int ret;
1371
1372	ret = copy_to_iotlb(vrh, dst, src, len);
1373	if (ret != len)
1374		return -EFAULT;
1375
1376	return 0;
1377}
1378
1379static inline int putused_iotlb(const struct vringh *vrh,
1380				struct vring_used_elem *dst,
1381				const struct vring_used_elem *src,
1382				unsigned int num)
1383{
1384	int size = num * sizeof(*dst);
1385	int ret;
1386
1387	ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1388	if (ret != size)
1389		return -EFAULT;
1390
1391	return 0;
1392}
1393
1394/**
1395 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1396 * @vrh: the vringh to initialize.
1397 * @features: the feature bits for this ring.
1398 * @num: the number of elements.
1399 * @weak_barriers: true if we only need memory barriers, not I/O.
1400 * @desc: the userspace descriptor pointer.
1401 * @avail: the userspace avail pointer.
1402 * @used: the userspace used pointer.
1403 *
1404 * Returns an error if num is invalid.
1405 */
1406int vringh_init_iotlb(struct vringh *vrh, u64 features,
1407		      unsigned int num, bool weak_barriers,
1408		      struct vring_desc *desc,
1409		      struct vring_avail *avail,
1410		      struct vring_used *used)
1411{
1412	vrh->use_va = false;
1413
1414	return vringh_init_kern(vrh, features, num, weak_barriers,
1415				desc, avail, used);
1416}
1417EXPORT_SYMBOL(vringh_init_iotlb);
1418
1419/**
1420 * vringh_init_iotlb_va - initialize a vringh for a ring with IOTLB containing
1421 *                        user VA.
1422 * @vrh: the vringh to initialize.
1423 * @features: the feature bits for this ring.
1424 * @num: the number of elements.
1425 * @weak_barriers: true if we only need memory barriers, not I/O.
1426 * @desc: the userspace descriptor pointer.
1427 * @avail: the userspace avail pointer.
1428 * @used: the userspace used pointer.
1429 *
1430 * Returns an error if num is invalid.
1431 */
1432int vringh_init_iotlb_va(struct vringh *vrh, u64 features,
1433			 unsigned int num, bool weak_barriers,
1434			 struct vring_desc *desc,
1435			 struct vring_avail *avail,
1436			 struct vring_used *used)
1437{
1438	vrh->use_va = true;
1439
1440	return vringh_init_kern(vrh, features, num, weak_barriers,
1441				desc, avail, used);
1442}
1443EXPORT_SYMBOL(vringh_init_iotlb_va);
1444
1445/**
1446 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1447 * @vrh: the vring
1448 * @iotlb: iotlb associated with this vring
1449 * @iotlb_lock: spinlock to synchronize the iotlb accesses
1450 */
1451void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
1452		      spinlock_t *iotlb_lock)
1453{
1454	vrh->iotlb = iotlb;
1455	vrh->iotlb_lock = iotlb_lock;
1456}
1457EXPORT_SYMBOL(vringh_set_iotlb);
1458
1459/**
1460 * vringh_getdesc_iotlb - get next available descriptor from ring with
1461 * IOTLB.
1462 * @vrh: the kernelspace vring.
1463 * @riov: where to put the readable descriptors (or NULL)
1464 * @wiov: where to put the writable descriptors (or NULL)
1465 * @head: head index we received, for passing to vringh_complete_iotlb().
1466 * @gfp: flags for allocating larger riov/wiov.
1467 *
1468 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1469 *
1470 * Note that on error return, you can tell the difference between an
1471 * invalid ring and a single invalid descriptor: in the former case,
1472 * *head will be vrh->vring.num.  You may be able to ignore an invalid
1473 * descriptor, but there's not much you can do with an invalid ring.
1474 *
1475 * Note that you can reuse riov and wiov with subsequent calls. Content is
1476 * overwritten and memory reallocated if more space is needed.
1477 * When you don't have to use riov and wiov anymore, you should clean up them
1478 * calling vringh_kiov_cleanup() to release the memory, even on error!
1479 */
1480int vringh_getdesc_iotlb(struct vringh *vrh,
1481			 struct vringh_kiov *riov,
1482			 struct vringh_kiov *wiov,
1483			 u16 *head,
1484			 gfp_t gfp)
1485{
1486	int err;
1487
1488	err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1489	if (err < 0)
1490		return err;
1491
1492	/* Empty... */
1493	if (err == vrh->vring.num)
1494		return 0;
1495
1496	*head = err;
1497	err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1498			   gfp, copydesc_iotlb);
1499	if (err)
1500		return err;
1501
1502	return 1;
1503}
1504EXPORT_SYMBOL(vringh_getdesc_iotlb);
1505
1506/**
1507 * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1508 * @vrh: the vring.
1509 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1510 * @dst: the place to copy.
1511 * @len: the maximum length to copy.
1512 *
1513 * Returns the bytes copied <= len or a negative errno.
1514 */
1515ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1516			      struct vringh_kiov *riov,
1517			      void *dst, size_t len)
1518{
1519	return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1520}
1521EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1522
1523/**
1524 * vringh_iov_push_iotlb - copy bytes into vring_iov.
1525 * @vrh: the vring.
1526 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1527 * @src: the place to copy from.
1528 * @len: the maximum length to copy.
1529 *
1530 * Returns the bytes copied <= len or a negative errno.
1531 */
1532ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1533			      struct vringh_kiov *wiov,
1534			      const void *src, size_t len)
1535{
1536	return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1537}
1538EXPORT_SYMBOL(vringh_iov_push_iotlb);
1539
1540/**
1541 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1542 * @vrh: the vring.
1543 * @num: the number of descriptors to put back (ie. num
1544 *	 vringh_get_iotlb() to undo).
1545 *
1546 * The next vringh_get_iotlb() will return the old descriptor(s) again.
1547 */
1548void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1549{
1550	/* We only update vring_avail_event(vr) when we want to be notified,
1551	 * so we haven't changed that yet.
1552	 */
1553	vrh->last_avail_idx -= num;
1554}
1555EXPORT_SYMBOL(vringh_abandon_iotlb);
1556
1557/**
1558 * vringh_complete_iotlb - we've finished with descriptor, publish it.
1559 * @vrh: the vring.
1560 * @head: the head as filled in by vringh_getdesc_iotlb.
1561 * @len: the length of data we have written.
1562 *
1563 * You should check vringh_need_notify_iotlb() after one or more calls
1564 * to this function.
1565 */
1566int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1567{
1568	struct vring_used_elem used;
1569
1570	used.id = cpu_to_vringh32(vrh, head);
1571	used.len = cpu_to_vringh32(vrh, len);
1572
1573	return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1574}
1575EXPORT_SYMBOL(vringh_complete_iotlb);
1576
1577/**
1578 * vringh_notify_enable_iotlb - we want to know if something changes.
1579 * @vrh: the vring.
1580 *
1581 * This always enables notifications, but returns false if there are
1582 * now more buffers available in the vring.
1583 */
1584bool vringh_notify_enable_iotlb(struct vringh *vrh)
1585{
1586	return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1587}
1588EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1589
1590/**
1591 * vringh_notify_disable_iotlb - don't tell us if something changes.
1592 * @vrh: the vring.
1593 *
1594 * This is our normal running state: we disable and then only enable when
1595 * we're going to sleep.
1596 */
1597void vringh_notify_disable_iotlb(struct vringh *vrh)
1598{
1599	__vringh_notify_disable(vrh, putu16_iotlb);
1600}
1601EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1602
1603/**
1604 * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1605 * @vrh: the vring we've called vringh_complete_iotlb() on.
1606 *
1607 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1608 */
1609int vringh_need_notify_iotlb(struct vringh *vrh)
1610{
1611	return __vringh_need_notify(vrh, getu16_iotlb);
1612}
1613EXPORT_SYMBOL(vringh_need_notify_iotlb);
1614
1615#endif
1616
1617MODULE_DESCRIPTION("host side of a virtio ring");
1618MODULE_LICENSE("GPL");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Helpers for the host side of a virtio ring.
   4 *
   5 * Since these may be in userspace, we use (inline) accessors.
   6 */
   7#include <linux/compiler.h>
   8#include <linux/module.h>
   9#include <linux/vringh.h>
  10#include <linux/virtio_ring.h>
  11#include <linux/kernel.h>
  12#include <linux/ratelimit.h>
  13#include <linux/uaccess.h>
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
  17#include <linux/bvec.h>
  18#include <linux/highmem.h>
  19#include <linux/vhost_iotlb.h>
  20#endif
  21#include <uapi/linux/virtio_config.h>
  22
  23static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
  24{
  25	static DEFINE_RATELIMIT_STATE(vringh_rs,
  26				      DEFAULT_RATELIMIT_INTERVAL,
  27				      DEFAULT_RATELIMIT_BURST);
  28	if (__ratelimit(&vringh_rs)) {
  29		va_list ap;
  30		va_start(ap, fmt);
  31		printk(KERN_NOTICE "vringh:");
  32		vprintk(fmt, ap);
  33		va_end(ap);
  34	}
  35}
  36
  37/* Returns vring->num if empty, -ve on error. */
  38static inline int __vringh_get_head(const struct vringh *vrh,
  39				    int (*getu16)(const struct vringh *vrh,
  40						  u16 *val, const __virtio16 *p),
  41				    u16 *last_avail_idx)
  42{
  43	u16 avail_idx, i, head;
  44	int err;
  45
  46	err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
  47	if (err) {
  48		vringh_bad("Failed to access avail idx at %p",
  49			   &vrh->vring.avail->idx);
  50		return err;
  51	}
  52
  53	if (*last_avail_idx == avail_idx)
  54		return vrh->vring.num;
  55
  56	/* Only get avail ring entries after they have been exposed by guest. */
  57	virtio_rmb(vrh->weak_barriers);
  58
  59	i = *last_avail_idx & (vrh->vring.num - 1);
  60
  61	err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
  62	if (err) {
  63		vringh_bad("Failed to read head: idx %d address %p",
  64			   *last_avail_idx, &vrh->vring.avail->ring[i]);
  65		return err;
  66	}
  67
  68	if (head >= vrh->vring.num) {
  69		vringh_bad("Guest says index %u > %u is available",
  70			   head, vrh->vring.num);
  71		return -EINVAL;
  72	}
  73
  74	(*last_avail_idx)++;
  75	return head;
  76}
  77
  78/**
  79 * vringh_kiov_advance - skip bytes from vring_kiov
  80 * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
  81 * @len: the maximum length to advance
  82 */
  83void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
  84{
  85	while (len && iov->i < iov->used) {
  86		size_t partlen = min(iov->iov[iov->i].iov_len, len);
  87
  88		iov->consumed += partlen;
  89		iov->iov[iov->i].iov_len -= partlen;
  90		iov->iov[iov->i].iov_base += partlen;
  91
  92		if (!iov->iov[iov->i].iov_len) {
  93			/* Fix up old iov element then increment. */
  94			iov->iov[iov->i].iov_len = iov->consumed;
  95			iov->iov[iov->i].iov_base -= iov->consumed;
  96
  97			iov->consumed = 0;
  98			iov->i++;
  99		}
 100
 101		len -= partlen;
 102	}
 103}
 104EXPORT_SYMBOL(vringh_kiov_advance);
 105
 106/* Copy some bytes to/from the iovec.  Returns num copied. */
 107static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
 108				      struct vringh_kiov *iov,
 109				      void *ptr, size_t len,
 110				      int (*xfer)(const struct vringh *vrh,
 111						  void *addr, void *ptr,
 112						  size_t len))
 113{
 114	int err, done = 0;
 115
 116	while (len && iov->i < iov->used) {
 117		size_t partlen;
 118
 119		partlen = min(iov->iov[iov->i].iov_len, len);
 120		err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
 121		if (err)
 122			return err;
 123		done += partlen;
 124		len -= partlen;
 125		ptr += partlen;
 
 
 
 
 
 
 
 
 126
 127		vringh_kiov_advance(iov, partlen);
 
 
 128	}
 129	return done;
 130}
 131
 132/* May reduce *len if range is shorter. */
 133static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
 134			       struct vringh_range *range,
 135			       bool (*getrange)(struct vringh *,
 136						u64, struct vringh_range *))
 137{
 138	if (addr < range->start || addr > range->end_incl) {
 139		if (!getrange(vrh, addr, range))
 140			return false;
 141	}
 142	BUG_ON(addr < range->start || addr > range->end_incl);
 143
 144	/* To end of memory? */
 145	if (unlikely(addr + *len == 0)) {
 146		if (range->end_incl == -1ULL)
 147			return true;
 148		goto truncate;
 149	}
 150
 151	/* Otherwise, don't wrap. */
 152	if (addr + *len < addr) {
 153		vringh_bad("Wrapping descriptor %zu@0x%llx",
 154			   *len, (unsigned long long)addr);
 155		return false;
 156	}
 157
 158	if (unlikely(addr + *len - 1 > range->end_incl))
 159		goto truncate;
 160	return true;
 161
 162truncate:
 163	*len = range->end_incl + 1 - addr;
 164	return true;
 165}
 166
 167static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
 168				  struct vringh_range *range,
 169				  bool (*getrange)(struct vringh *,
 170						   u64, struct vringh_range *))
 171{
 172	return true;
 173}
 174
 175/* No reason for this code to be inline. */
 176static int move_to_indirect(const struct vringh *vrh,
 177			    int *up_next, u16 *i, void *addr,
 178			    const struct vring_desc *desc,
 179			    struct vring_desc **descs, int *desc_max)
 180{
 181	u32 len;
 182
 183	/* Indirect tables can't have indirect. */
 184	if (*up_next != -1) {
 185		vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
 186		return -EINVAL;
 187	}
 188
 189	len = vringh32_to_cpu(vrh, desc->len);
 190	if (unlikely(len % sizeof(struct vring_desc))) {
 191		vringh_bad("Strange indirect len %u", desc->len);
 192		return -EINVAL;
 193	}
 194
 195	/* We will check this when we follow it! */
 196	if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
 197		*up_next = vringh16_to_cpu(vrh, desc->next);
 198	else
 199		*up_next = -2;
 200	*descs = addr;
 201	*desc_max = len / sizeof(struct vring_desc);
 202
 203	/* Now, start at the first indirect. */
 204	*i = 0;
 205	return 0;
 206}
 207
 208static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
 209{
 210	struct kvec *new;
 211	unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
 212
 213	if (new_num < 8)
 214		new_num = 8;
 215
 216	flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
 217	if (flag)
 218		new = krealloc_array(iov->iov, new_num,
 219				     sizeof(struct iovec), gfp);
 220	else {
 221		new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
 222		if (new) {
 223			memcpy(new, iov->iov,
 224			       iov->max_num * sizeof(struct iovec));
 225			flag = VRINGH_IOV_ALLOCATED;
 226		}
 227	}
 228	if (!new)
 229		return -ENOMEM;
 230	iov->iov = new;
 231	iov->max_num = (new_num | flag);
 232	return 0;
 233}
 234
 235static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
 236				       struct vring_desc **descs, int *desc_max)
 237{
 238	u16 i = *up_next;
 239
 240	*up_next = -1;
 241	*descs = vrh->vring.desc;
 242	*desc_max = vrh->vring.num;
 243	return i;
 244}
 245
 246static int slow_copy(struct vringh *vrh, void *dst, const void *src,
 247		     bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
 248				    struct vringh_range *range,
 249				    bool (*getrange)(struct vringh *vrh,
 250						     u64,
 251						     struct vringh_range *)),
 252		     bool (*getrange)(struct vringh *vrh,
 253				      u64 addr,
 254				      struct vringh_range *r),
 255		     struct vringh_range *range,
 256		     int (*copy)(const struct vringh *vrh,
 257				 void *dst, const void *src, size_t len))
 258{
 259	size_t part, len = sizeof(struct vring_desc);
 260
 261	do {
 262		u64 addr;
 263		int err;
 264
 265		part = len;
 266		addr = (u64)(unsigned long)src - range->offset;
 267
 268		if (!rcheck(vrh, addr, &part, range, getrange))
 269			return -EINVAL;
 270
 271		err = copy(vrh, dst, src, part);
 272		if (err)
 273			return err;
 274
 275		dst += part;
 276		src += part;
 277		len -= part;
 278	} while (len);
 279	return 0;
 280}
 281
 282static inline int
 283__vringh_iov(struct vringh *vrh, u16 i,
 284	     struct vringh_kiov *riov,
 285	     struct vringh_kiov *wiov,
 286	     bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
 287			    struct vringh_range *range,
 288			    bool (*getrange)(struct vringh *, u64,
 289					     struct vringh_range *)),
 290	     bool (*getrange)(struct vringh *, u64, struct vringh_range *),
 291	     gfp_t gfp,
 292	     int (*copy)(const struct vringh *vrh,
 293			 void *dst, const void *src, size_t len))
 294{
 295	int err, count = 0, up_next, desc_max;
 296	struct vring_desc desc, *descs;
 297	struct vringh_range range = { -1ULL, 0 }, slowrange;
 298	bool slow = false;
 299
 300	/* We start traversing vring's descriptor table. */
 301	descs = vrh->vring.desc;
 302	desc_max = vrh->vring.num;
 303	up_next = -1;
 304
 305	/* You must want something! */
 306	if (WARN_ON(!riov && !wiov))
 307		return -EINVAL;
 308
 309	if (riov)
 310		riov->i = riov->used = riov->consumed = 0;
 311	if (wiov)
 312		wiov->i = wiov->used = wiov->consumed = 0;
 313
 314	for (;;) {
 315		void *addr;
 316		struct vringh_kiov *iov;
 317		size_t len;
 318
 319		if (unlikely(slow))
 320			err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
 321					&slowrange, copy);
 322		else
 323			err = copy(vrh, &desc, &descs[i], sizeof(desc));
 324		if (unlikely(err))
 325			goto fail;
 326
 327		if (unlikely(desc.flags &
 328			     cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
 329			u64 a = vringh64_to_cpu(vrh, desc.addr);
 330
 331			/* Make sure it's OK, and get offset. */
 332			len = vringh32_to_cpu(vrh, desc.len);
 333			if (!rcheck(vrh, a, &len, &range, getrange)) {
 334				err = -EINVAL;
 335				goto fail;
 336			}
 337
 338			if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
 339				slow = true;
 340				/* We need to save this range to use offset */
 341				slowrange = range;
 342			}
 343
 344			addr = (void *)(long)(a + range.offset);
 345			err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
 346					       &descs, &desc_max);
 347			if (err)
 348				goto fail;
 349			continue;
 350		}
 351
 352		if (count++ == vrh->vring.num) {
 
 
 
 
 
 353			vringh_bad("Descriptor loop in %p", descs);
 354			err = -ELOOP;
 355			goto fail;
 356		}
 357
 358		if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
 359			iov = wiov;
 360		else {
 361			iov = riov;
 362			if (unlikely(wiov && wiov->used)) {
 363				vringh_bad("Readable desc %p after writable",
 364					   &descs[i]);
 365				err = -EINVAL;
 366				goto fail;
 367			}
 368		}
 369
 370		if (!iov) {
 371			vringh_bad("Unexpected %s desc",
 372				   !wiov ? "writable" : "readable");
 373			err = -EPROTO;
 374			goto fail;
 375		}
 376
 377	again:
 378		/* Make sure it's OK, and get offset. */
 379		len = vringh32_to_cpu(vrh, desc.len);
 380		if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
 381			    getrange)) {
 382			err = -EINVAL;
 383			goto fail;
 384		}
 385		addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
 386					       range.offset);
 387
 388		if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
 389			err = resize_iovec(iov, gfp);
 390			if (err)
 391				goto fail;
 392		}
 393
 394		iov->iov[iov->used].iov_base = addr;
 395		iov->iov[iov->used].iov_len = len;
 396		iov->used++;
 397
 398		if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
 399			desc.len = cpu_to_vringh32(vrh,
 400				   vringh32_to_cpu(vrh, desc.len) - len);
 401			desc.addr = cpu_to_vringh64(vrh,
 402				    vringh64_to_cpu(vrh, desc.addr) + len);
 403			goto again;
 404		}
 405
 406		if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
 407			i = vringh16_to_cpu(vrh, desc.next);
 408		} else {
 409			/* Just in case we need to finish traversing above. */
 410			if (unlikely(up_next > 0)) {
 411				i = return_from_indirect(vrh, &up_next,
 412							 &descs, &desc_max);
 413				slow = false;
 
 414			} else
 415				break;
 416		}
 417
 418		if (i >= desc_max) {
 419			vringh_bad("Chained index %u > %u", i, desc_max);
 420			err = -EINVAL;
 421			goto fail;
 422		}
 423	}
 424
 425	return 0;
 426
 427fail:
 428	return err;
 429}
 430
 431static inline int __vringh_complete(struct vringh *vrh,
 432				    const struct vring_used_elem *used,
 433				    unsigned int num_used,
 434				    int (*putu16)(const struct vringh *vrh,
 435						  __virtio16 *p, u16 val),
 436				    int (*putused)(const struct vringh *vrh,
 437						   struct vring_used_elem *dst,
 438						   const struct vring_used_elem
 439						   *src, unsigned num))
 440{
 441	struct vring_used *used_ring;
 442	int err;
 443	u16 used_idx, off;
 444
 445	used_ring = vrh->vring.used;
 446	used_idx = vrh->last_used_idx + vrh->completed;
 447
 448	off = used_idx % vrh->vring.num;
 449
 450	/* Compiler knows num_used == 1 sometimes, hence extra check */
 451	if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
 452		u16 part = vrh->vring.num - off;
 453		err = putused(vrh, &used_ring->ring[off], used, part);
 454		if (!err)
 455			err = putused(vrh, &used_ring->ring[0], used + part,
 456				      num_used - part);
 457	} else
 458		err = putused(vrh, &used_ring->ring[off], used, num_used);
 459
 460	if (err) {
 461		vringh_bad("Failed to write %u used entries %u at %p",
 462			   num_used, off, &used_ring->ring[off]);
 463		return err;
 464	}
 465
 466	/* Make sure buffer is written before we update index. */
 467	virtio_wmb(vrh->weak_barriers);
 468
 469	err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
 470	if (err) {
 471		vringh_bad("Failed to update used index at %p",
 472			   &vrh->vring.used->idx);
 473		return err;
 474	}
 475
 476	vrh->completed += num_used;
 477	return 0;
 478}
 479
 480
 481static inline int __vringh_need_notify(struct vringh *vrh,
 482				       int (*getu16)(const struct vringh *vrh,
 483						     u16 *val,
 484						     const __virtio16 *p))
 485{
 486	bool notify;
 487	u16 used_event;
 488	int err;
 489
 490	/* Flush out used index update. This is paired with the
 491	 * barrier that the Guest executes when enabling
 492	 * interrupts. */
 493	virtio_mb(vrh->weak_barriers);
 494
 495	/* Old-style, without event indices. */
 496	if (!vrh->event_indices) {
 497		u16 flags;
 498		err = getu16(vrh, &flags, &vrh->vring.avail->flags);
 499		if (err) {
 500			vringh_bad("Failed to get flags at %p",
 501				   &vrh->vring.avail->flags);
 502			return err;
 503		}
 504		return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
 505	}
 506
 507	/* Modern: we know when other side wants to know. */
 508	err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
 509	if (err) {
 510		vringh_bad("Failed to get used event idx at %p",
 511			   &vring_used_event(&vrh->vring));
 512		return err;
 513	}
 514
 515	/* Just in case we added so many that we wrap. */
 516	if (unlikely(vrh->completed > 0xffff))
 517		notify = true;
 518	else
 519		notify = vring_need_event(used_event,
 520					  vrh->last_used_idx + vrh->completed,
 521					  vrh->last_used_idx);
 522
 523	vrh->last_used_idx += vrh->completed;
 524	vrh->completed = 0;
 525	return notify;
 526}
 527
 528static inline bool __vringh_notify_enable(struct vringh *vrh,
 529					  int (*getu16)(const struct vringh *vrh,
 530							u16 *val, const __virtio16 *p),
 531					  int (*putu16)(const struct vringh *vrh,
 532							__virtio16 *p, u16 val))
 533{
 534	u16 avail;
 535
 536	if (!vrh->event_indices) {
 537		/* Old-school; update flags. */
 538		if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
 539			vringh_bad("Clearing used flags %p",
 540				   &vrh->vring.used->flags);
 541			return true;
 542		}
 543	} else {
 544		if (putu16(vrh, &vring_avail_event(&vrh->vring),
 545			   vrh->last_avail_idx) != 0) {
 546			vringh_bad("Updating avail event index %p",
 547				   &vring_avail_event(&vrh->vring));
 548			return true;
 549		}
 550	}
 551
 552	/* They could have slipped one in as we were doing that: make
 553	 * sure it's written, then check again. */
 554	virtio_mb(vrh->weak_barriers);
 555
 556	if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
 557		vringh_bad("Failed to check avail idx at %p",
 558			   &vrh->vring.avail->idx);
 559		return true;
 560	}
 561
 562	/* This is unlikely, so we just leave notifications enabled
 563	 * (if we're using event_indices, we'll only get one
 564	 * notification anyway). */
 565	return avail == vrh->last_avail_idx;
 566}
 567
 568static inline void __vringh_notify_disable(struct vringh *vrh,
 569					   int (*putu16)(const struct vringh *vrh,
 570							 __virtio16 *p, u16 val))
 571{
 572	if (!vrh->event_indices) {
 573		/* Old-school; update flags. */
 574		if (putu16(vrh, &vrh->vring.used->flags,
 575			   VRING_USED_F_NO_NOTIFY)) {
 576			vringh_bad("Setting used flags %p",
 577				   &vrh->vring.used->flags);
 578		}
 579	}
 580}
 581
 582/* Userspace access helpers: in this case, addresses are really userspace. */
 583static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
 584{
 585	__virtio16 v = 0;
 586	int rc = get_user(v, (__force __virtio16 __user *)p);
 587	*val = vringh16_to_cpu(vrh, v);
 588	return rc;
 589}
 590
 591static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
 592{
 593	__virtio16 v = cpu_to_vringh16(vrh, val);
 594	return put_user(v, (__force __virtio16 __user *)p);
 595}
 596
 597static inline int copydesc_user(const struct vringh *vrh,
 598				void *dst, const void *src, size_t len)
 599{
 600	return copy_from_user(dst, (__force void __user *)src, len) ?
 601		-EFAULT : 0;
 602}
 603
 604static inline int putused_user(const struct vringh *vrh,
 605			       struct vring_used_elem *dst,
 606			       const struct vring_used_elem *src,
 607			       unsigned int num)
 608{
 609	return copy_to_user((__force void __user *)dst, src,
 610			    sizeof(*dst) * num) ? -EFAULT : 0;
 611}
 612
 613static inline int xfer_from_user(const struct vringh *vrh, void *src,
 614				 void *dst, size_t len)
 615{
 616	return copy_from_user(dst, (__force void __user *)src, len) ?
 617		-EFAULT : 0;
 618}
 619
 620static inline int xfer_to_user(const struct vringh *vrh,
 621			       void *dst, void *src, size_t len)
 622{
 623	return copy_to_user((__force void __user *)dst, src, len) ?
 624		-EFAULT : 0;
 625}
 626
 627/**
 628 * vringh_init_user - initialize a vringh for a userspace vring.
 629 * @vrh: the vringh to initialize.
 630 * @features: the feature bits for this ring.
 631 * @num: the number of elements.
 632 * @weak_barriers: true if we only need memory barriers, not I/O.
 633 * @desc: the userpace descriptor pointer.
 634 * @avail: the userpace avail pointer.
 635 * @used: the userpace used pointer.
 636 *
 637 * Returns an error if num is invalid: you should check pointers
 638 * yourself!
 639 */
 640int vringh_init_user(struct vringh *vrh, u64 features,
 641		     unsigned int num, bool weak_barriers,
 642		     vring_desc_t __user *desc,
 643		     vring_avail_t __user *avail,
 644		     vring_used_t __user *used)
 645{
 646	/* Sane power of 2 please! */
 647	if (!num || num > 0xffff || (num & (num - 1))) {
 648		vringh_bad("Bad ring size %u", num);
 649		return -EINVAL;
 650	}
 651
 652	vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
 653	vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
 654	vrh->weak_barriers = weak_barriers;
 655	vrh->completed = 0;
 656	vrh->last_avail_idx = 0;
 657	vrh->last_used_idx = 0;
 658	vrh->vring.num = num;
 659	/* vring expects kernel addresses, but only used via accessors. */
 660	vrh->vring.desc = (__force struct vring_desc *)desc;
 661	vrh->vring.avail = (__force struct vring_avail *)avail;
 662	vrh->vring.used = (__force struct vring_used *)used;
 663	return 0;
 664}
 665EXPORT_SYMBOL(vringh_init_user);
 666
 667/**
 668 * vringh_getdesc_user - get next available descriptor from userspace ring.
 669 * @vrh: the userspace vring.
 670 * @riov: where to put the readable descriptors (or NULL)
 671 * @wiov: where to put the writable descriptors (or NULL)
 672 * @getrange: function to call to check ranges.
 673 * @head: head index we received, for passing to vringh_complete_user().
 674 *
 675 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
 676 *
 677 * Note that on error return, you can tell the difference between an
 678 * invalid ring and a single invalid descriptor: in the former case,
 679 * *head will be vrh->vring.num.  You may be able to ignore an invalid
 680 * descriptor, but there's not much you can do with an invalid ring.
 681 *
 682 * Note that you can reuse riov and wiov with subsequent calls. Content is
 683 * overwritten and memory reallocated if more space is needed.
 684 * When you don't have to use riov and wiov anymore, you should clean up them
 685 * calling vringh_iov_cleanup() to release the memory, even on error!
 686 */
 687int vringh_getdesc_user(struct vringh *vrh,
 688			struct vringh_iov *riov,
 689			struct vringh_iov *wiov,
 690			bool (*getrange)(struct vringh *vrh,
 691					 u64 addr, struct vringh_range *r),
 692			u16 *head)
 693{
 694	int err;
 695
 696	*head = vrh->vring.num;
 697	err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
 698	if (err < 0)
 699		return err;
 700
 701	/* Empty... */
 702	if (err == vrh->vring.num)
 703		return 0;
 704
 705	/* We need the layouts to be the identical for this to work */
 706	BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
 707	BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
 708		     offsetof(struct vringh_iov, iov));
 709	BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
 710		     offsetof(struct vringh_iov, i));
 711	BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
 712		     offsetof(struct vringh_iov, used));
 713	BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
 714		     offsetof(struct vringh_iov, max_num));
 715	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
 716	BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
 717		     offsetof(struct kvec, iov_base));
 718	BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
 719		     offsetof(struct kvec, iov_len));
 720	BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
 721		     != sizeof(((struct kvec *)NULL)->iov_base));
 722	BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
 723		     != sizeof(((struct kvec *)NULL)->iov_len));
 724
 725	*head = err;
 726	err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
 727			   (struct vringh_kiov *)wiov,
 728			   range_check, getrange, GFP_KERNEL, copydesc_user);
 729	if (err)
 730		return err;
 731
 732	return 1;
 733}
 734EXPORT_SYMBOL(vringh_getdesc_user);
 735
 736/**
 737 * vringh_iov_pull_user - copy bytes from vring_iov.
 738 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
 739 * @dst: the place to copy.
 740 * @len: the maximum length to copy.
 741 *
 742 * Returns the bytes copied <= len or a negative errno.
 743 */
 744ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
 745{
 746	return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
 747			       dst, len, xfer_from_user);
 748}
 749EXPORT_SYMBOL(vringh_iov_pull_user);
 750
 751/**
 752 * vringh_iov_push_user - copy bytes into vring_iov.
 753 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
 754 * @src: the place to copy from.
 755 * @len: the maximum length to copy.
 756 *
 757 * Returns the bytes copied <= len or a negative errno.
 758 */
 759ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
 760			     const void *src, size_t len)
 761{
 762	return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
 763			       (void *)src, len, xfer_to_user);
 764}
 765EXPORT_SYMBOL(vringh_iov_push_user);
 766
 767/**
 768 * vringh_abandon_user - we've decided not to handle the descriptor(s).
 769 * @vrh: the vring.
 770 * @num: the number of descriptors to put back (ie. num
 771 *	 vringh_get_user() to undo).
 772 *
 773 * The next vringh_get_user() will return the old descriptor(s) again.
 774 */
 775void vringh_abandon_user(struct vringh *vrh, unsigned int num)
 776{
 777	/* We only update vring_avail_event(vr) when we want to be notified,
 778	 * so we haven't changed that yet. */
 779	vrh->last_avail_idx -= num;
 780}
 781EXPORT_SYMBOL(vringh_abandon_user);
 782
 783/**
 784 * vringh_complete_user - we've finished with descriptor, publish it.
 785 * @vrh: the vring.
 786 * @head: the head as filled in by vringh_getdesc_user.
 787 * @len: the length of data we have written.
 788 *
 789 * You should check vringh_need_notify_user() after one or more calls
 790 * to this function.
 791 */
 792int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
 793{
 794	struct vring_used_elem used;
 795
 796	used.id = cpu_to_vringh32(vrh, head);
 797	used.len = cpu_to_vringh32(vrh, len);
 798	return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
 799}
 800EXPORT_SYMBOL(vringh_complete_user);
 801
 802/**
 803 * vringh_complete_multi_user - we've finished with many descriptors.
 804 * @vrh: the vring.
 805 * @used: the head, length pairs.
 806 * @num_used: the number of used elements.
 807 *
 808 * You should check vringh_need_notify_user() after one or more calls
 809 * to this function.
 810 */
 811int vringh_complete_multi_user(struct vringh *vrh,
 812			       const struct vring_used_elem used[],
 813			       unsigned num_used)
 814{
 815	return __vringh_complete(vrh, used, num_used,
 816				 putu16_user, putused_user);
 817}
 818EXPORT_SYMBOL(vringh_complete_multi_user);
 819
 820/**
 821 * vringh_notify_enable_user - we want to know if something changes.
 822 * @vrh: the vring.
 823 *
 824 * This always enables notifications, but returns false if there are
 825 * now more buffers available in the vring.
 826 */
 827bool vringh_notify_enable_user(struct vringh *vrh)
 828{
 829	return __vringh_notify_enable(vrh, getu16_user, putu16_user);
 830}
 831EXPORT_SYMBOL(vringh_notify_enable_user);
 832
 833/**
 834 * vringh_notify_disable_user - don't tell us if something changes.
 835 * @vrh: the vring.
 836 *
 837 * This is our normal running state: we disable and then only enable when
 838 * we're going to sleep.
 839 */
 840void vringh_notify_disable_user(struct vringh *vrh)
 841{
 842	__vringh_notify_disable(vrh, putu16_user);
 843}
 844EXPORT_SYMBOL(vringh_notify_disable_user);
 845
 846/**
 847 * vringh_need_notify_user - must we tell the other side about used buffers?
 848 * @vrh: the vring we've called vringh_complete_user() on.
 849 *
 850 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
 851 */
 852int vringh_need_notify_user(struct vringh *vrh)
 853{
 854	return __vringh_need_notify(vrh, getu16_user);
 855}
 856EXPORT_SYMBOL(vringh_need_notify_user);
 857
 858/* Kernelspace access helpers. */
 859static inline int getu16_kern(const struct vringh *vrh,
 860			      u16 *val, const __virtio16 *p)
 861{
 862	*val = vringh16_to_cpu(vrh, READ_ONCE(*p));
 863	return 0;
 864}
 865
 866static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
 867{
 868	WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
 869	return 0;
 870}
 871
 872static inline int copydesc_kern(const struct vringh *vrh,
 873				void *dst, const void *src, size_t len)
 874{
 875	memcpy(dst, src, len);
 876	return 0;
 877}
 878
 879static inline int putused_kern(const struct vringh *vrh,
 880			       struct vring_used_elem *dst,
 881			       const struct vring_used_elem *src,
 882			       unsigned int num)
 883{
 884	memcpy(dst, src, num * sizeof(*dst));
 885	return 0;
 886}
 887
 888static inline int xfer_kern(const struct vringh *vrh, void *src,
 889			    void *dst, size_t len)
 890{
 891	memcpy(dst, src, len);
 892	return 0;
 893}
 894
 895static inline int kern_xfer(const struct vringh *vrh, void *dst,
 896			    void *src, size_t len)
 897{
 898	memcpy(dst, src, len);
 899	return 0;
 900}
 901
 902/**
 903 * vringh_init_kern - initialize a vringh for a kernelspace vring.
 904 * @vrh: the vringh to initialize.
 905 * @features: the feature bits for this ring.
 906 * @num: the number of elements.
 907 * @weak_barriers: true if we only need memory barriers, not I/O.
 908 * @desc: the userpace descriptor pointer.
 909 * @avail: the userpace avail pointer.
 910 * @used: the userpace used pointer.
 911 *
 912 * Returns an error if num is invalid.
 913 */
 914int vringh_init_kern(struct vringh *vrh, u64 features,
 915		     unsigned int num, bool weak_barriers,
 916		     struct vring_desc *desc,
 917		     struct vring_avail *avail,
 918		     struct vring_used *used)
 919{
 920	/* Sane power of 2 please! */
 921	if (!num || num > 0xffff || (num & (num - 1))) {
 922		vringh_bad("Bad ring size %u", num);
 923		return -EINVAL;
 924	}
 925
 926	vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
 927	vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
 928	vrh->weak_barriers = weak_barriers;
 929	vrh->completed = 0;
 930	vrh->last_avail_idx = 0;
 931	vrh->last_used_idx = 0;
 932	vrh->vring.num = num;
 933	vrh->vring.desc = desc;
 934	vrh->vring.avail = avail;
 935	vrh->vring.used = used;
 936	return 0;
 937}
 938EXPORT_SYMBOL(vringh_init_kern);
 939
 940/**
 941 * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
 942 * @vrh: the kernelspace vring.
 943 * @riov: where to put the readable descriptors (or NULL)
 944 * @wiov: where to put the writable descriptors (or NULL)
 945 * @head: head index we received, for passing to vringh_complete_kern().
 946 * @gfp: flags for allocating larger riov/wiov.
 947 *
 948 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
 949 *
 950 * Note that on error return, you can tell the difference between an
 951 * invalid ring and a single invalid descriptor: in the former case,
 952 * *head will be vrh->vring.num.  You may be able to ignore an invalid
 953 * descriptor, but there's not much you can do with an invalid ring.
 954 *
 955 * Note that you can reuse riov and wiov with subsequent calls. Content is
 956 * overwritten and memory reallocated if more space is needed.
 957 * When you don't have to use riov and wiov anymore, you should clean up them
 958 * calling vringh_kiov_cleanup() to release the memory, even on error!
 959 */
 960int vringh_getdesc_kern(struct vringh *vrh,
 961			struct vringh_kiov *riov,
 962			struct vringh_kiov *wiov,
 963			u16 *head,
 964			gfp_t gfp)
 965{
 966	int err;
 967
 968	err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
 969	if (err < 0)
 970		return err;
 971
 972	/* Empty... */
 973	if (err == vrh->vring.num)
 974		return 0;
 975
 976	*head = err;
 977	err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
 978			   gfp, copydesc_kern);
 979	if (err)
 980		return err;
 981
 982	return 1;
 983}
 984EXPORT_SYMBOL(vringh_getdesc_kern);
 985
 986/**
 987 * vringh_iov_pull_kern - copy bytes from vring_iov.
 988 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
 989 * @dst: the place to copy.
 990 * @len: the maximum length to copy.
 991 *
 992 * Returns the bytes copied <= len or a negative errno.
 993 */
 994ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
 995{
 996	return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
 997}
 998EXPORT_SYMBOL(vringh_iov_pull_kern);
 999
1000/**
1001 * vringh_iov_push_kern - copy bytes into vring_iov.
1002 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
1003 * @src: the place to copy from.
1004 * @len: the maximum length to copy.
1005 *
1006 * Returns the bytes copied <= len or a negative errno.
1007 */
1008ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
1009			     const void *src, size_t len)
1010{
1011	return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
1012}
1013EXPORT_SYMBOL(vringh_iov_push_kern);
1014
1015/**
1016 * vringh_abandon_kern - we've decided not to handle the descriptor(s).
1017 * @vrh: the vring.
1018 * @num: the number of descriptors to put back (ie. num
1019 *	 vringh_get_kern() to undo).
1020 *
1021 * The next vringh_get_kern() will return the old descriptor(s) again.
1022 */
1023void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1024{
1025	/* We only update vring_avail_event(vr) when we want to be notified,
1026	 * so we haven't changed that yet. */
1027	vrh->last_avail_idx -= num;
1028}
1029EXPORT_SYMBOL(vringh_abandon_kern);
1030
1031/**
1032 * vringh_complete_kern - we've finished with descriptor, publish it.
1033 * @vrh: the vring.
1034 * @head: the head as filled in by vringh_getdesc_kern.
1035 * @len: the length of data we have written.
1036 *
1037 * You should check vringh_need_notify_kern() after one or more calls
1038 * to this function.
1039 */
1040int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1041{
1042	struct vring_used_elem used;
1043
1044	used.id = cpu_to_vringh32(vrh, head);
1045	used.len = cpu_to_vringh32(vrh, len);
1046
1047	return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1048}
1049EXPORT_SYMBOL(vringh_complete_kern);
1050
1051/**
1052 * vringh_notify_enable_kern - we want to know if something changes.
1053 * @vrh: the vring.
1054 *
1055 * This always enables notifications, but returns false if there are
1056 * now more buffers available in the vring.
1057 */
1058bool vringh_notify_enable_kern(struct vringh *vrh)
1059{
1060	return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1061}
1062EXPORT_SYMBOL(vringh_notify_enable_kern);
1063
1064/**
1065 * vringh_notify_disable_kern - don't tell us if something changes.
1066 * @vrh: the vring.
1067 *
1068 * This is our normal running state: we disable and then only enable when
1069 * we're going to sleep.
1070 */
1071void vringh_notify_disable_kern(struct vringh *vrh)
1072{
1073	__vringh_notify_disable(vrh, putu16_kern);
1074}
1075EXPORT_SYMBOL(vringh_notify_disable_kern);
1076
1077/**
1078 * vringh_need_notify_kern - must we tell the other side about used buffers?
1079 * @vrh: the vring we've called vringh_complete_kern() on.
1080 *
1081 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1082 */
1083int vringh_need_notify_kern(struct vringh *vrh)
1084{
1085	return __vringh_need_notify(vrh, getu16_kern);
1086}
1087EXPORT_SYMBOL(vringh_need_notify_kern);
1088
1089#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1090
 
 
 
 
 
 
 
 
1091static int iotlb_translate(const struct vringh *vrh,
1092			   u64 addr, u64 len, struct bio_vec iov[],
1093			   int iov_size, u32 perm)
1094{
1095	struct vhost_iotlb_map *map;
1096	struct vhost_iotlb *iotlb = vrh->iotlb;
1097	int ret = 0;
1098	u64 s = 0;
1099
1100	spin_lock(vrh->iotlb_lock);
1101
1102	while (len > s) {
1103		u64 size, pa, pfn;
 
 
1104
1105		if (unlikely(ret >= iov_size)) {
1106			ret = -ENOBUFS;
1107			break;
1108		}
1109
1110		map = vhost_iotlb_itree_first(iotlb, addr,
1111					      addr + len - 1);
1112		if (!map || map->start > addr) {
1113			ret = -EINVAL;
1114			break;
1115		} else if (!(map->perm & perm)) {
1116			ret = -EPERM;
1117			break;
1118		}
1119
1120		size = map->size - addr + map->start;
1121		pa = map->addr + addr - map->start;
1122		pfn = pa >> PAGE_SHIFT;
1123		iov[ret].bv_page = pfn_to_page(pfn);
1124		iov[ret].bv_len = min(len - s, size);
1125		iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
 
 
 
 
 
 
 
 
 
 
 
1126		s += size;
1127		addr += size;
1128		++ret;
1129	}
1130
1131	spin_unlock(vrh->iotlb_lock);
1132
 
 
 
1133	return ret;
1134}
1135
 
 
1136static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1137				  void *src, size_t len)
1138{
1139	struct iov_iter iter;
1140	struct bio_vec iov[16];
1141	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142
1143	ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1144			      len, iov, 16, VHOST_MAP_RO);
1145	if (ret < 0)
1146		return ret;
 
 
 
 
1147
1148	iov_iter_bvec(&iter, READ, iov, ret, len);
1149
1150	ret = copy_from_iter(dst, len, &iter);
1151
1152	return ret;
1153}
1154
1155static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1156				void *src, size_t len)
1157{
1158	struct iov_iter iter;
1159	struct bio_vec iov[16];
1160	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161
1162	ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1163			      len, iov, 16, VHOST_MAP_WO);
1164	if (ret < 0)
1165		return ret;
 
 
 
 
1166
1167	iov_iter_bvec(&iter, WRITE, iov, ret, len);
1168
1169	return copy_to_iter(src, len, &iter);
1170}
1171
1172static inline int getu16_iotlb(const struct vringh *vrh,
1173			       u16 *val, const __virtio16 *p)
1174{
1175	struct bio_vec iov;
1176	void *kaddr, *from;
 
 
 
 
1177	int ret;
1178
 
 
 
1179	/* Atomic read is needed for getu16 */
1180	ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1181			      &iov, 1, VHOST_MAP_RO);
1182	if (ret < 0)
1183		return ret;
1184
1185	kaddr = kmap_atomic(iov.bv_page);
1186	from = kaddr + iov.bv_offset;
1187	*val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1188	kunmap_atomic(kaddr);
 
 
 
 
 
 
 
 
 
1189
1190	return 0;
1191}
1192
1193static inline int putu16_iotlb(const struct vringh *vrh,
1194			       __virtio16 *p, u16 val)
1195{
1196	struct bio_vec iov;
1197	void *kaddr, *to;
 
 
 
 
1198	int ret;
1199
 
 
 
1200	/* Atomic write is needed for putu16 */
1201	ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1202			      &iov, 1, VHOST_MAP_WO);
1203	if (ret < 0)
1204		return ret;
1205
1206	kaddr = kmap_atomic(iov.bv_page);
1207	to = kaddr + iov.bv_offset;
1208	WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1209	kunmap_atomic(kaddr);
 
 
 
 
 
 
 
 
 
1210
1211	return 0;
1212}
1213
1214static inline int copydesc_iotlb(const struct vringh *vrh,
1215				 void *dst, const void *src, size_t len)
1216{
1217	int ret;
1218
1219	ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1220	if (ret != len)
1221		return -EFAULT;
1222
1223	return 0;
1224}
1225
1226static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1227				  void *dst, size_t len)
1228{
1229	int ret;
1230
1231	ret = copy_from_iotlb(vrh, dst, src, len);
1232	if (ret != len)
1233		return -EFAULT;
1234
1235	return 0;
1236}
1237
1238static inline int xfer_to_iotlb(const struct vringh *vrh,
1239			       void *dst, void *src, size_t len)
1240{
1241	int ret;
1242
1243	ret = copy_to_iotlb(vrh, dst, src, len);
1244	if (ret != len)
1245		return -EFAULT;
1246
1247	return 0;
1248}
1249
1250static inline int putused_iotlb(const struct vringh *vrh,
1251				struct vring_used_elem *dst,
1252				const struct vring_used_elem *src,
1253				unsigned int num)
1254{
1255	int size = num * sizeof(*dst);
1256	int ret;
1257
1258	ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1259	if (ret != size)
1260		return -EFAULT;
1261
1262	return 0;
1263}
1264
1265/**
1266 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1267 * @vrh: the vringh to initialize.
1268 * @features: the feature bits for this ring.
1269 * @num: the number of elements.
1270 * @weak_barriers: true if we only need memory barriers, not I/O.
1271 * @desc: the userpace descriptor pointer.
1272 * @avail: the userpace avail pointer.
1273 * @used: the userpace used pointer.
1274 *
1275 * Returns an error if num is invalid.
1276 */
1277int vringh_init_iotlb(struct vringh *vrh, u64 features,
1278		      unsigned int num, bool weak_barriers,
1279		      struct vring_desc *desc,
1280		      struct vring_avail *avail,
1281		      struct vring_used *used)
1282{
 
 
1283	return vringh_init_kern(vrh, features, num, weak_barriers,
1284				desc, avail, used);
1285}
1286EXPORT_SYMBOL(vringh_init_iotlb);
1287
1288/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1290 * @vrh: the vring
1291 * @iotlb: iotlb associated with this vring
1292 * @iotlb_lock: spinlock to synchronize the iotlb accesses
1293 */
1294void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
1295		      spinlock_t *iotlb_lock)
1296{
1297	vrh->iotlb = iotlb;
1298	vrh->iotlb_lock = iotlb_lock;
1299}
1300EXPORT_SYMBOL(vringh_set_iotlb);
1301
1302/**
1303 * vringh_getdesc_iotlb - get next available descriptor from ring with
1304 * IOTLB.
1305 * @vrh: the kernelspace vring.
1306 * @riov: where to put the readable descriptors (or NULL)
1307 * @wiov: where to put the writable descriptors (or NULL)
1308 * @head: head index we received, for passing to vringh_complete_iotlb().
1309 * @gfp: flags for allocating larger riov/wiov.
1310 *
1311 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1312 *
1313 * Note that on error return, you can tell the difference between an
1314 * invalid ring and a single invalid descriptor: in the former case,
1315 * *head will be vrh->vring.num.  You may be able to ignore an invalid
1316 * descriptor, but there's not much you can do with an invalid ring.
1317 *
1318 * Note that you can reuse riov and wiov with subsequent calls. Content is
1319 * overwritten and memory reallocated if more space is needed.
1320 * When you don't have to use riov and wiov anymore, you should clean up them
1321 * calling vringh_kiov_cleanup() to release the memory, even on error!
1322 */
1323int vringh_getdesc_iotlb(struct vringh *vrh,
1324			 struct vringh_kiov *riov,
1325			 struct vringh_kiov *wiov,
1326			 u16 *head,
1327			 gfp_t gfp)
1328{
1329	int err;
1330
1331	err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1332	if (err < 0)
1333		return err;
1334
1335	/* Empty... */
1336	if (err == vrh->vring.num)
1337		return 0;
1338
1339	*head = err;
1340	err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1341			   gfp, copydesc_iotlb);
1342	if (err)
1343		return err;
1344
1345	return 1;
1346}
1347EXPORT_SYMBOL(vringh_getdesc_iotlb);
1348
1349/**
1350 * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1351 * @vrh: the vring.
1352 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1353 * @dst: the place to copy.
1354 * @len: the maximum length to copy.
1355 *
1356 * Returns the bytes copied <= len or a negative errno.
1357 */
1358ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1359			      struct vringh_kiov *riov,
1360			      void *dst, size_t len)
1361{
1362	return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1363}
1364EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1365
1366/**
1367 * vringh_iov_push_iotlb - copy bytes into vring_iov.
1368 * @vrh: the vring.
1369 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1370 * @src: the place to copy from.
1371 * @len: the maximum length to copy.
1372 *
1373 * Returns the bytes copied <= len or a negative errno.
1374 */
1375ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1376			      struct vringh_kiov *wiov,
1377			      const void *src, size_t len)
1378{
1379	return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1380}
1381EXPORT_SYMBOL(vringh_iov_push_iotlb);
1382
1383/**
1384 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1385 * @vrh: the vring.
1386 * @num: the number of descriptors to put back (ie. num
1387 *	 vringh_get_iotlb() to undo).
1388 *
1389 * The next vringh_get_iotlb() will return the old descriptor(s) again.
1390 */
1391void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1392{
1393	/* We only update vring_avail_event(vr) when we want to be notified,
1394	 * so we haven't changed that yet.
1395	 */
1396	vrh->last_avail_idx -= num;
1397}
1398EXPORT_SYMBOL(vringh_abandon_iotlb);
1399
1400/**
1401 * vringh_complete_iotlb - we've finished with descriptor, publish it.
1402 * @vrh: the vring.
1403 * @head: the head as filled in by vringh_getdesc_iotlb.
1404 * @len: the length of data we have written.
1405 *
1406 * You should check vringh_need_notify_iotlb() after one or more calls
1407 * to this function.
1408 */
1409int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1410{
1411	struct vring_used_elem used;
1412
1413	used.id = cpu_to_vringh32(vrh, head);
1414	used.len = cpu_to_vringh32(vrh, len);
1415
1416	return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1417}
1418EXPORT_SYMBOL(vringh_complete_iotlb);
1419
1420/**
1421 * vringh_notify_enable_iotlb - we want to know if something changes.
1422 * @vrh: the vring.
1423 *
1424 * This always enables notifications, but returns false if there are
1425 * now more buffers available in the vring.
1426 */
1427bool vringh_notify_enable_iotlb(struct vringh *vrh)
1428{
1429	return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1430}
1431EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1432
1433/**
1434 * vringh_notify_disable_iotlb - don't tell us if something changes.
1435 * @vrh: the vring.
1436 *
1437 * This is our normal running state: we disable and then only enable when
1438 * we're going to sleep.
1439 */
1440void vringh_notify_disable_iotlb(struct vringh *vrh)
1441{
1442	__vringh_notify_disable(vrh, putu16_iotlb);
1443}
1444EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1445
1446/**
1447 * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1448 * @vrh: the vring we've called vringh_complete_iotlb() on.
1449 *
1450 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1451 */
1452int vringh_need_notify_iotlb(struct vringh *vrh)
1453{
1454	return __vringh_need_notify(vrh, getu16_iotlb);
1455}
1456EXPORT_SYMBOL(vringh_need_notify_iotlb);
1457
1458#endif
1459
 
1460MODULE_LICENSE("GPL");