Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018-2020 Intel Corporation.
   4 * Copyright (C) 2020 Red Hat, Inc.
   5 *
   6 * Author: Tiwei Bie <tiwei.bie@intel.com>
   7 *         Jason Wang <jasowang@redhat.com>
   8 *
   9 * Thanks Michael S. Tsirkin for the valuable comments and
  10 * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
  11 * their supports.
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/cdev.h>
  17#include <linux/device.h>
  18#include <linux/mm.h>
  19#include <linux/slab.h>
  20#include <linux/iommu.h>
  21#include <linux/uuid.h>
  22#include <linux/vdpa.h>
  23#include <linux/nospec.h>
  24#include <linux/vhost.h>
 
 
  25
  26#include "vhost.h"
  27
  28enum {
  29	VHOST_VDPA_BACKEND_FEATURES =
  30	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
  31	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
  32	(1ULL << VHOST_BACKEND_F_IOTLB_ASID),
  33};
  34
  35#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
  36
  37#define VHOST_VDPA_IOTLB_BUCKETS 16
  38
  39struct vhost_vdpa_as {
  40	struct hlist_node hash_link;
  41	struct vhost_iotlb iotlb;
  42	u32 id;
  43};
  44
  45struct vhost_vdpa {
  46	struct vhost_dev vdev;
  47	struct iommu_domain *domain;
  48	struct vhost_virtqueue *vqs;
  49	struct completion completion;
  50	struct vdpa_device *vdpa;
  51	struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
  52	struct device dev;
  53	struct cdev cdev;
  54	atomic_t opened;
  55	u32 nvqs;
  56	int virtio_id;
  57	int minor;
  58	struct eventfd_ctx *config_ctx;
  59	int in_batch;
  60	struct vdpa_iova_range range;
  61	u32 batch_asid;
  62	bool suspended;
  63};
  64
  65static DEFINE_IDA(vhost_vdpa_ida);
  66
  67static dev_t vhost_vdpa_major;
  68
  69static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
  70				   struct vhost_iotlb *iotlb, u64 start,
  71				   u64 last, u32 asid);
  72
  73static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
  74{
  75	struct vhost_vdpa_as *as = container_of(iotlb, struct
  76						vhost_vdpa_as, iotlb);
  77	return as->id;
  78}
  79
  80static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
  81{
  82	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
  83	struct vhost_vdpa_as *as;
  84
  85	hlist_for_each_entry(as, head, hash_link)
  86		if (as->id == asid)
  87			return as;
  88
  89	return NULL;
  90}
  91
  92static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
  93{
  94	struct vhost_vdpa_as *as = asid_to_as(v, asid);
  95
  96	if (!as)
  97		return NULL;
  98
  99	return &as->iotlb;
 100}
 101
 102static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
 103{
 104	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
 105	struct vhost_vdpa_as *as;
 106
 107	if (asid_to_as(v, asid))
 108		return NULL;
 109
 110	if (asid >= v->vdpa->nas)
 111		return NULL;
 112
 113	as = kmalloc(sizeof(*as), GFP_KERNEL);
 114	if (!as)
 115		return NULL;
 116
 117	vhost_iotlb_init(&as->iotlb, 0, 0);
 118	as->id = asid;
 119	hlist_add_head(&as->hash_link, head);
 120
 121	return as;
 122}
 123
 124static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
 125						      u32 asid)
 126{
 127	struct vhost_vdpa_as *as = asid_to_as(v, asid);
 128
 129	if (as)
 130		return as;
 131
 132	return vhost_vdpa_alloc_as(v, asid);
 133}
 134
 135static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
 136{
 137	struct vdpa_device *vdpa = v->vdpa;
 138	const struct vdpa_config_ops *ops = vdpa->config;
 139
 140	if (ops->reset_map)
 141		ops->reset_map(vdpa, asid);
 142}
 143
 144static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
 145{
 146	struct vhost_vdpa_as *as = asid_to_as(v, asid);
 147
 148	if (!as)
 149		return -EINVAL;
 150
 151	hlist_del(&as->hash_link);
 152	vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
 153	/*
 154	 * Devices with vendor specific IOMMU may need to restore
 155	 * iotlb to the initial or default state, which cannot be
 156	 * cleaned up in the all range unmap call above. Give them
 157	 * a chance to clean up or reset the map to the desired
 158	 * state.
 159	 */
 160	vhost_vdpa_reset_map(v, asid);
 161	kfree(as);
 162
 163	return 0;
 164}
 165
 166static void handle_vq_kick(struct vhost_work *work)
 167{
 168	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
 169						  poll.work);
 170	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
 171	const struct vdpa_config_ops *ops = v->vdpa->config;
 172
 173	ops->kick_vq(v->vdpa, vq - v->vqs);
 174}
 175
 176static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
 177{
 178	struct vhost_virtqueue *vq = private;
 179	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
 180
 181	if (call_ctx)
 182		eventfd_signal(call_ctx);
 183
 184	return IRQ_HANDLED;
 185}
 186
 187static irqreturn_t vhost_vdpa_config_cb(void *private)
 188{
 189	struct vhost_vdpa *v = private;
 190	struct eventfd_ctx *config_ctx = v->config_ctx;
 191
 192	if (config_ctx)
 193		eventfd_signal(config_ctx);
 194
 195	return IRQ_HANDLED;
 196}
 197
 198static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
 199{
 200	struct vhost_virtqueue *vq = &v->vqs[qid];
 201	const struct vdpa_config_ops *ops = v->vdpa->config;
 202	struct vdpa_device *vdpa = v->vdpa;
 203	int ret, irq;
 204
 205	if (!ops->get_vq_irq)
 206		return;
 207
 208	irq = ops->get_vq_irq(vdpa, qid);
 209	if (irq < 0)
 210		return;
 211
 212	irq_bypass_unregister_producer(&vq->call_ctx.producer);
 213	if (!vq->call_ctx.ctx)
 
 214		return;
 
 215
 216	vq->call_ctx.producer.token = vq->call_ctx.ctx;
 217	vq->call_ctx.producer.irq = irq;
 218	ret = irq_bypass_register_producer(&vq->call_ctx.producer);
 219	if (unlikely(ret))
 220		dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
 221			 qid, vq->call_ctx.producer.token, ret);
 222}
 223
 224static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
 225{
 226	struct vhost_virtqueue *vq = &v->vqs[qid];
 227
 
 228	irq_bypass_unregister_producer(&vq->call_ctx.producer);
 
 229}
 230
 231static int _compat_vdpa_reset(struct vhost_vdpa *v)
 232{
 233	struct vdpa_device *vdpa = v->vdpa;
 234	u32 flags = 0;
 235
 236	v->suspended = false;
 237
 238	if (v->vdev.vqs) {
 239		flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
 240						    VHOST_BACKEND_F_IOTLB_PERSIST) ?
 241			 VDPA_RESET_F_CLEAN_MAP : 0;
 242	}
 243
 244	return vdpa_reset(vdpa, flags);
 245}
 246
 247static int vhost_vdpa_reset(struct vhost_vdpa *v)
 248{
 249	v->in_batch = 0;
 250	return _compat_vdpa_reset(v);
 251}
 252
 253static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
 254{
 255	struct vdpa_device *vdpa = v->vdpa;
 256	const struct vdpa_config_ops *ops = vdpa->config;
 257
 258	if (!vdpa->use_va || !ops->bind_mm)
 259		return 0;
 260
 261	return ops->bind_mm(vdpa, v->vdev.mm);
 262}
 263
 264static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
 265{
 266	struct vdpa_device *vdpa = v->vdpa;
 267	const struct vdpa_config_ops *ops = vdpa->config;
 268
 269	if (!vdpa->use_va || !ops->unbind_mm)
 270		return;
 271
 272	ops->unbind_mm(vdpa);
 273}
 274
 275static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
 276{
 277	struct vdpa_device *vdpa = v->vdpa;
 278	const struct vdpa_config_ops *ops = vdpa->config;
 279	u32 device_id;
 280
 281	device_id = ops->get_device_id(vdpa);
 282
 283	if (copy_to_user(argp, &device_id, sizeof(device_id)))
 284		return -EFAULT;
 285
 286	return 0;
 287}
 288
 289static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
 290{
 291	struct vdpa_device *vdpa = v->vdpa;
 292	const struct vdpa_config_ops *ops = vdpa->config;
 293	u8 status;
 294
 295	status = ops->get_status(vdpa);
 296
 297	if (copy_to_user(statusp, &status, sizeof(status)))
 298		return -EFAULT;
 299
 300	return 0;
 301}
 302
 303static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
 304{
 305	struct vdpa_device *vdpa = v->vdpa;
 306	const struct vdpa_config_ops *ops = vdpa->config;
 307	u8 status, status_old;
 308	u32 nvqs = v->nvqs;
 309	int ret;
 310	u16 i;
 311
 312	if (copy_from_user(&status, statusp, sizeof(status)))
 313		return -EFAULT;
 314
 315	status_old = ops->get_status(vdpa);
 316
 317	/*
 318	 * Userspace shouldn't remove status bits unless reset the
 319	 * status to 0.
 320	 */
 321	if (status != 0 && (status_old & ~status) != 0)
 322		return -EINVAL;
 323
 324	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
 325		for (i = 0; i < nvqs; i++)
 326			vhost_vdpa_unsetup_vq_irq(v, i);
 327
 328	if (status == 0) {
 329		ret = _compat_vdpa_reset(v);
 330		if (ret)
 331			return ret;
 332	} else
 333		vdpa_set_status(vdpa, status);
 334
 335	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
 336		for (i = 0; i < nvqs; i++)
 337			vhost_vdpa_setup_vq_irq(v, i);
 338
 
 
 
 
 339	return 0;
 340}
 341
 342static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
 343				      struct vhost_vdpa_config *c)
 344{
 345	struct vdpa_device *vdpa = v->vdpa;
 346	size_t size = vdpa->config->get_config_size(vdpa);
 347
 348	if (c->len == 0 || c->off > size)
 
 
 
 
 
 
 349		return -EINVAL;
 350
 351	if (c->len > size - c->off)
 352		return -E2BIG;
 353
 354	return 0;
 355}
 356
 357static long vhost_vdpa_get_config(struct vhost_vdpa *v,
 358				  struct vhost_vdpa_config __user *c)
 359{
 360	struct vdpa_device *vdpa = v->vdpa;
 361	struct vhost_vdpa_config config;
 362	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
 363	u8 *buf;
 364
 365	if (copy_from_user(&config, c, size))
 366		return -EFAULT;
 367	if (vhost_vdpa_config_validate(v, &config))
 368		return -EINVAL;
 369	buf = kvzalloc(config.len, GFP_KERNEL);
 370	if (!buf)
 371		return -ENOMEM;
 372
 373	vdpa_get_config(vdpa, config.off, buf, config.len);
 374
 375	if (copy_to_user(c->buf, buf, config.len)) {
 376		kvfree(buf);
 377		return -EFAULT;
 378	}
 379
 380	kvfree(buf);
 381	return 0;
 382}
 383
 384static long vhost_vdpa_set_config(struct vhost_vdpa *v,
 385				  struct vhost_vdpa_config __user *c)
 386{
 387	struct vdpa_device *vdpa = v->vdpa;
 
 388	struct vhost_vdpa_config config;
 389	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
 390	u8 *buf;
 391
 392	if (copy_from_user(&config, c, size))
 393		return -EFAULT;
 394	if (vhost_vdpa_config_validate(v, &config))
 395		return -EINVAL;
 
 
 
 396
 397	buf = vmemdup_user(c->buf, config.len);
 398	if (IS_ERR(buf))
 399		return PTR_ERR(buf);
 
 400
 401	vdpa_set_config(vdpa, config.off, buf, config.len);
 402
 403	kvfree(buf);
 404	return 0;
 405}
 406
 407static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
 408{
 409	struct vdpa_device *vdpa = v->vdpa;
 410	const struct vdpa_config_ops *ops = vdpa->config;
 411
 412	return ops->suspend;
 413}
 414
 415static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
 416{
 417	struct vdpa_device *vdpa = v->vdpa;
 418	const struct vdpa_config_ops *ops = vdpa->config;
 419
 420	return ops->resume;
 421}
 422
 423static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
 424{
 425	struct vdpa_device *vdpa = v->vdpa;
 426	const struct vdpa_config_ops *ops = vdpa->config;
 427
 428	return ops->get_vq_desc_group;
 429}
 430
 431static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
 432{
 433	struct vdpa_device *vdpa = v->vdpa;
 434	const struct vdpa_config_ops *ops = vdpa->config;
 435	u64 features;
 436
 437	features = ops->get_device_features(vdpa);
 438
 439	if (copy_to_user(featurep, &features, sizeof(features)))
 440		return -EFAULT;
 441
 442	return 0;
 443}
 444
 445static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
 446{
 447	struct vdpa_device *vdpa = v->vdpa;
 448	const struct vdpa_config_ops *ops = vdpa->config;
 449
 450	if (!ops->get_backend_features)
 451		return 0;
 452	else
 453		return ops->get_backend_features(vdpa);
 454}
 455
 456static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
 457{
 458	struct vdpa_device *vdpa = v->vdpa;
 459	const struct vdpa_config_ops *ops = vdpa->config;
 460
 461	return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
 462	       vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
 463}
 464
 465static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
 466{
 467	struct vdpa_device *vdpa = v->vdpa;
 468	const struct vdpa_config_ops *ops = vdpa->config;
 469	struct vhost_dev *d = &v->vdev;
 470	u64 actual_features;
 471	u64 features;
 472	int i;
 473
 474	/*
 475	 * It's not allowed to change the features after they have
 476	 * been negotiated.
 477	 */
 478	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
 479		return -EBUSY;
 480
 481	if (copy_from_user(&features, featurep, sizeof(features)))
 482		return -EFAULT;
 483
 484	if (vdpa_set_features(vdpa, features))
 485		return -EINVAL;
 486
 487	/* let the vqs know what has been configured */
 488	actual_features = ops->get_driver_features(vdpa);
 489	for (i = 0; i < d->nvqs; ++i) {
 490		struct vhost_virtqueue *vq = d->vqs[i];
 491
 492		mutex_lock(&vq->mutex);
 493		vq->acked_features = actual_features;
 494		mutex_unlock(&vq->mutex);
 495	}
 496
 497	return 0;
 498}
 499
 500static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
 501{
 502	struct vdpa_device *vdpa = v->vdpa;
 503	const struct vdpa_config_ops *ops = vdpa->config;
 504	u16 num;
 505
 506	num = ops->get_vq_num_max(vdpa);
 507
 508	if (copy_to_user(argp, &num, sizeof(num)))
 509		return -EFAULT;
 510
 511	return 0;
 512}
 513
 514static void vhost_vdpa_config_put(struct vhost_vdpa *v)
 515{
 516	if (v->config_ctx) {
 517		eventfd_ctx_put(v->config_ctx);
 518		v->config_ctx = NULL;
 519	}
 520}
 521
 522static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
 523{
 524	struct vdpa_callback cb;
 525	int fd;
 526	struct eventfd_ctx *ctx;
 527
 528	cb.callback = vhost_vdpa_config_cb;
 529	cb.private = v;
 530	if (copy_from_user(&fd, argp, sizeof(fd)))
 531		return  -EFAULT;
 532
 533	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
 534	swap(ctx, v->config_ctx);
 535
 536	if (!IS_ERR_OR_NULL(ctx))
 537		eventfd_ctx_put(ctx);
 538
 539	if (IS_ERR(v->config_ctx)) {
 540		long ret = PTR_ERR(v->config_ctx);
 541
 542		v->config_ctx = NULL;
 543		return ret;
 544	}
 545
 546	v->vdpa->config->set_config_cb(v->vdpa, &cb);
 547
 548	return 0;
 549}
 550
 551static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
 552{
 553	struct vhost_vdpa_iova_range range = {
 554		.first = v->range.first,
 555		.last = v->range.last,
 556	};
 557
 558	if (copy_to_user(argp, &range, sizeof(range)))
 559		return -EFAULT;
 560	return 0;
 561}
 562
 563static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
 564{
 565	struct vdpa_device *vdpa = v->vdpa;
 566	const struct vdpa_config_ops *ops = vdpa->config;
 567	u32 size;
 568
 569	size = ops->get_config_size(vdpa);
 570
 571	if (copy_to_user(argp, &size, sizeof(size)))
 572		return -EFAULT;
 573
 574	return 0;
 575}
 576
 577static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
 578{
 579	struct vdpa_device *vdpa = v->vdpa;
 580
 581	if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
 582		return -EFAULT;
 583
 584	return 0;
 585}
 586
 587/* After a successful return of ioctl the device must not process more
 588 * virtqueue descriptors. The device can answer to read or writes of config
 589 * fields as if it were not suspended. In particular, writing to "queue_enable"
 590 * with a value of 1 will not make the device start processing buffers.
 591 */
 592static long vhost_vdpa_suspend(struct vhost_vdpa *v)
 593{
 594	struct vdpa_device *vdpa = v->vdpa;
 595	const struct vdpa_config_ops *ops = vdpa->config;
 596	int ret;
 597
 598	if (!ops->suspend)
 599		return -EOPNOTSUPP;
 600
 601	ret = ops->suspend(vdpa);
 602	if (!ret)
 603		v->suspended = true;
 604
 605	return ret;
 606}
 607
 608/* After a successful return of this ioctl the device resumes processing
 609 * virtqueue descriptors. The device becomes fully operational the same way it
 610 * was before it was suspended.
 611 */
 612static long vhost_vdpa_resume(struct vhost_vdpa *v)
 613{
 614	struct vdpa_device *vdpa = v->vdpa;
 615	const struct vdpa_config_ops *ops = vdpa->config;
 616	int ret;
 617
 618	if (!ops->resume)
 619		return -EOPNOTSUPP;
 620
 621	ret = ops->resume(vdpa);
 622	if (!ret)
 623		v->suspended = false;
 624
 625	return ret;
 626}
 627
 628static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
 629				   void __user *argp)
 630{
 631	struct vdpa_device *vdpa = v->vdpa;
 632	const struct vdpa_config_ops *ops = vdpa->config;
 633	struct vdpa_vq_state vq_state;
 634	struct vdpa_callback cb;
 635	struct vhost_virtqueue *vq;
 636	struct vhost_vring_state s;
 637	u32 idx;
 638	long r;
 639
 640	r = get_user(idx, (u32 __user *)argp);
 641	if (r < 0)
 642		return r;
 643
 644	if (idx >= v->nvqs)
 645		return -ENOBUFS;
 646
 647	idx = array_index_nospec(idx, v->nvqs);
 648	vq = &v->vqs[idx];
 649
 650	switch (cmd) {
 651	case VHOST_VDPA_SET_VRING_ENABLE:
 652		if (copy_from_user(&s, argp, sizeof(s)))
 653			return -EFAULT;
 654		ops->set_vq_ready(vdpa, idx, s.num);
 655		return 0;
 656	case VHOST_VDPA_GET_VRING_GROUP:
 657		if (!ops->get_vq_group)
 658			return -EOPNOTSUPP;
 659		s.index = idx;
 660		s.num = ops->get_vq_group(vdpa, idx);
 661		if (s.num >= vdpa->ngroups)
 662			return -EIO;
 663		else if (copy_to_user(argp, &s, sizeof(s)))
 664			return -EFAULT;
 665		return 0;
 666	case VHOST_VDPA_GET_VRING_DESC_GROUP:
 667		if (!vhost_vdpa_has_desc_group(v))
 668			return -EOPNOTSUPP;
 669		s.index = idx;
 670		s.num = ops->get_vq_desc_group(vdpa, idx);
 671		if (s.num >= vdpa->ngroups)
 672			return -EIO;
 673		else if (copy_to_user(argp, &s, sizeof(s)))
 674			return -EFAULT;
 675		return 0;
 676	case VHOST_VDPA_SET_GROUP_ASID:
 677		if (copy_from_user(&s, argp, sizeof(s)))
 678			return -EFAULT;
 679		if (s.num >= vdpa->nas)
 680			return -EINVAL;
 681		if (!ops->set_group_asid)
 682			return -EOPNOTSUPP;
 683		return ops->set_group_asid(vdpa, idx, s.num);
 684	case VHOST_GET_VRING_BASE:
 685		r = ops->get_vq_state(v->vdpa, idx, &vq_state);
 686		if (r)
 687			return r;
 688
 689		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
 690			vq->last_avail_idx = vq_state.packed.last_avail_idx |
 691					     (vq_state.packed.last_avail_counter << 15);
 692			vq->last_used_idx = vq_state.packed.last_used_idx |
 693					    (vq_state.packed.last_used_counter << 15);
 694		} else {
 695			vq->last_avail_idx = vq_state.split.avail_index;
 696		}
 697		break;
 698	}
 699
 700	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
 701	if (r)
 702		return r;
 703
 704	switch (cmd) {
 705	case VHOST_SET_VRING_ADDR:
 706		if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
 707			return -EINVAL;
 708
 709		if (ops->set_vq_address(vdpa, idx,
 710					(u64)(uintptr_t)vq->desc,
 711					(u64)(uintptr_t)vq->avail,
 712					(u64)(uintptr_t)vq->used))
 713			r = -EINVAL;
 714		break;
 715
 716	case VHOST_SET_VRING_BASE:
 717		if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
 718			return -EINVAL;
 719
 720		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
 721			vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
 722			vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
 723			vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
 724			vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
 725		} else {
 726			vq_state.split.avail_index = vq->last_avail_idx;
 727		}
 728		r = ops->set_vq_state(vdpa, idx, &vq_state);
 729		break;
 730
 731	case VHOST_SET_VRING_CALL:
 732		if (vq->call_ctx.ctx) {
 733			cb.callback = vhost_vdpa_virtqueue_cb;
 734			cb.private = vq;
 735			cb.trigger = vq->call_ctx.ctx;
 736		} else {
 737			cb.callback = NULL;
 738			cb.private = NULL;
 739			cb.trigger = NULL;
 740		}
 741		ops->set_vq_cb(vdpa, idx, &cb);
 742		vhost_vdpa_setup_vq_irq(v, idx);
 743		break;
 744
 745	case VHOST_SET_VRING_NUM:
 746		ops->set_vq_num(vdpa, idx, vq->num);
 747		break;
 748	}
 749
 750	return r;
 751}
 752
 753static long vhost_vdpa_unlocked_ioctl(struct file *filep,
 754				      unsigned int cmd, unsigned long arg)
 755{
 756	struct vhost_vdpa *v = filep->private_data;
 757	struct vhost_dev *d = &v->vdev;
 758	void __user *argp = (void __user *)arg;
 759	u64 __user *featurep = argp;
 760	u64 features;
 761	long r = 0;
 762
 763	if (cmd == VHOST_SET_BACKEND_FEATURES) {
 764		if (copy_from_user(&features, featurep, sizeof(features)))
 765			return -EFAULT;
 766		if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
 767				 BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
 768				 BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
 769				 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
 770				 BIT_ULL(VHOST_BACKEND_F_RESUME) |
 771				 BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
 772			return -EOPNOTSUPP;
 773		if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
 774		     !vhost_vdpa_can_suspend(v))
 775			return -EOPNOTSUPP;
 776		if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
 777		     !vhost_vdpa_can_resume(v))
 778			return -EOPNOTSUPP;
 779		if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
 780		    !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
 781			return -EINVAL;
 782		if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
 783		     !vhost_vdpa_has_desc_group(v))
 784			return -EOPNOTSUPP;
 785		if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
 786		     !vhost_vdpa_has_persistent_map(v))
 787			return -EOPNOTSUPP;
 788		vhost_set_backend_features(&v->vdev, features);
 789		return 0;
 790	}
 791
 792	mutex_lock(&d->mutex);
 793
 794	switch (cmd) {
 795	case VHOST_VDPA_GET_DEVICE_ID:
 796		r = vhost_vdpa_get_device_id(v, argp);
 797		break;
 798	case VHOST_VDPA_GET_STATUS:
 799		r = vhost_vdpa_get_status(v, argp);
 800		break;
 801	case VHOST_VDPA_SET_STATUS:
 802		r = vhost_vdpa_set_status(v, argp);
 803		break;
 804	case VHOST_VDPA_GET_CONFIG:
 805		r = vhost_vdpa_get_config(v, argp);
 806		break;
 807	case VHOST_VDPA_SET_CONFIG:
 808		r = vhost_vdpa_set_config(v, argp);
 809		break;
 810	case VHOST_GET_FEATURES:
 811		r = vhost_vdpa_get_features(v, argp);
 812		break;
 813	case VHOST_SET_FEATURES:
 814		r = vhost_vdpa_set_features(v, argp);
 815		break;
 816	case VHOST_VDPA_GET_VRING_NUM:
 817		r = vhost_vdpa_get_vring_num(v, argp);
 818		break;
 819	case VHOST_VDPA_GET_GROUP_NUM:
 820		if (copy_to_user(argp, &v->vdpa->ngroups,
 821				 sizeof(v->vdpa->ngroups)))
 822			r = -EFAULT;
 823		break;
 824	case VHOST_VDPA_GET_AS_NUM:
 825		if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
 826			r = -EFAULT;
 827		break;
 828	case VHOST_SET_LOG_BASE:
 829	case VHOST_SET_LOG_FD:
 830		r = -ENOIOCTLCMD;
 831		break;
 832	case VHOST_VDPA_SET_CONFIG_CALL:
 833		r = vhost_vdpa_set_config_call(v, argp);
 834		break;
 835	case VHOST_GET_BACKEND_FEATURES:
 836		features = VHOST_VDPA_BACKEND_FEATURES;
 837		if (vhost_vdpa_can_suspend(v))
 838			features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
 839		if (vhost_vdpa_can_resume(v))
 840			features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
 841		if (vhost_vdpa_has_desc_group(v))
 842			features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
 843		if (vhost_vdpa_has_persistent_map(v))
 844			features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
 845		features |= vhost_vdpa_get_backend_features(v);
 846		if (copy_to_user(featurep, &features, sizeof(features)))
 847			r = -EFAULT;
 848		break;
 849	case VHOST_VDPA_GET_IOVA_RANGE:
 850		r = vhost_vdpa_get_iova_range(v, argp);
 851		break;
 852	case VHOST_VDPA_GET_CONFIG_SIZE:
 853		r = vhost_vdpa_get_config_size(v, argp);
 854		break;
 855	case VHOST_VDPA_GET_VQS_COUNT:
 856		r = vhost_vdpa_get_vqs_count(v, argp);
 857		break;
 858	case VHOST_VDPA_SUSPEND:
 859		r = vhost_vdpa_suspend(v);
 860		break;
 861	case VHOST_VDPA_RESUME:
 862		r = vhost_vdpa_resume(v);
 863		break;
 864	default:
 865		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
 866		if (r == -ENOIOCTLCMD)
 867			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
 868		break;
 869	}
 870
 871	if (r)
 872		goto out;
 873
 874	switch (cmd) {
 875	case VHOST_SET_OWNER:
 876		r = vhost_vdpa_bind_mm(v);
 877		if (r)
 878			vhost_dev_reset_owner(d, NULL);
 879		break;
 880	}
 881out:
 882	mutex_unlock(&d->mutex);
 883	return r;
 884}
 885static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
 886				     struct vhost_iotlb_map *map, u32 asid)
 887{
 888	struct vdpa_device *vdpa = v->vdpa;
 889	const struct vdpa_config_ops *ops = vdpa->config;
 890	if (ops->dma_map) {
 891		ops->dma_unmap(vdpa, asid, map->start, map->size);
 892	} else if (ops->set_map == NULL) {
 893		iommu_unmap(v->domain, map->start, map->size);
 894	}
 895}
 896
 897static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
 898				u64 start, u64 last, u32 asid)
 899{
 900	struct vhost_dev *dev = &v->vdev;
 
 901	struct vhost_iotlb_map *map;
 902	struct page *page;
 903	unsigned long pfn, pinned;
 904
 905	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
 906		pinned = PFN_DOWN(map->size);
 907		for (pfn = PFN_DOWN(map->addr);
 908		     pinned > 0; pfn++, pinned--) {
 909			page = pfn_to_page(pfn);
 910			if (map->perm & VHOST_ACCESS_WO)
 911				set_page_dirty_lock(page);
 912			unpin_user_page(page);
 913		}
 914		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
 915		vhost_vdpa_general_unmap(v, map, asid);
 916		vhost_iotlb_map_free(iotlb, map);
 917	}
 918}
 919
 920static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
 921				u64 start, u64 last, u32 asid)
 922{
 923	struct vhost_iotlb_map *map;
 924	struct vdpa_map_file *map_file;
 925
 926	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
 927		map_file = (struct vdpa_map_file *)map->opaque;
 928		fput(map_file->file);
 929		kfree(map_file);
 930		vhost_vdpa_general_unmap(v, map, asid);
 931		vhost_iotlb_map_free(iotlb, map);
 932	}
 933}
 934
 935static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
 936				   struct vhost_iotlb *iotlb, u64 start,
 937				   u64 last, u32 asid)
 938{
 939	struct vdpa_device *vdpa = v->vdpa;
 940
 941	if (vdpa->use_va)
 942		return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
 943
 944	return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
 
 
 945}
 946
 947static int perm_to_iommu_flags(u32 perm)
 948{
 949	int flags = 0;
 950
 951	switch (perm) {
 952	case VHOST_ACCESS_WO:
 953		flags |= IOMMU_WRITE;
 954		break;
 955	case VHOST_ACCESS_RO:
 956		flags |= IOMMU_READ;
 957		break;
 958	case VHOST_ACCESS_RW:
 959		flags |= (IOMMU_WRITE | IOMMU_READ);
 960		break;
 961	default:
 962		WARN(1, "invalidate vhost IOTLB permission\n");
 963		break;
 964	}
 965
 966	return flags | IOMMU_CACHE;
 967}
 968
 969static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
 970			  u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
 971{
 972	struct vhost_dev *dev = &v->vdev;
 973	struct vdpa_device *vdpa = v->vdpa;
 974	const struct vdpa_config_ops *ops = vdpa->config;
 975	u32 asid = iotlb_to_asid(iotlb);
 976	int r = 0;
 977
 978	r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
 979				      pa, perm, opaque);
 980	if (r)
 981		return r;
 982
 983	if (ops->dma_map) {
 984		r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
 985	} else if (ops->set_map) {
 986		if (!v->in_batch)
 987			r = ops->set_map(vdpa, asid, iotlb);
 988	} else {
 989		r = iommu_map(v->domain, iova, pa, size,
 990			      perm_to_iommu_flags(perm),
 991			      GFP_KERNEL_ACCOUNT);
 992	}
 993	if (r) {
 994		vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
 995		return r;
 996	}
 997
 998	if (!vdpa->use_va)
 999		atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
1000
1001	return 0;
1002}
1003
1004static void vhost_vdpa_unmap(struct vhost_vdpa *v,
1005			     struct vhost_iotlb *iotlb,
1006			     u64 iova, u64 size)
1007{
 
1008	struct vdpa_device *vdpa = v->vdpa;
1009	const struct vdpa_config_ops *ops = vdpa->config;
1010	u32 asid = iotlb_to_asid(iotlb);
1011
1012	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
1013
1014	if (ops->set_map) {
 
 
1015		if (!v->in_batch)
1016			ops->set_map(vdpa, asid, iotlb);
1017	}
1018
1019}
1020
1021static int vhost_vdpa_va_map(struct vhost_vdpa *v,
1022			     struct vhost_iotlb *iotlb,
1023			     u64 iova, u64 size, u64 uaddr, u32 perm)
1024{
1025	struct vhost_dev *dev = &v->vdev;
1026	u64 offset, map_size, map_iova = iova;
1027	struct vdpa_map_file *map_file;
1028	struct vm_area_struct *vma;
1029	int ret = 0;
1030
1031	mmap_read_lock(dev->mm);
1032
1033	while (size) {
1034		vma = find_vma(dev->mm, uaddr);
1035		if (!vma) {
1036			ret = -EINVAL;
1037			break;
1038		}
1039		map_size = min(size, vma->vm_end - uaddr);
1040		if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
1041			!(vma->vm_flags & (VM_IO | VM_PFNMAP))))
1042			goto next;
1043
1044		map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
1045		if (!map_file) {
1046			ret = -ENOMEM;
1047			break;
1048		}
1049		offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
1050		map_file->offset = offset;
1051		map_file->file = get_file(vma->vm_file);
1052		ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
1053				     perm, map_file);
1054		if (ret) {
1055			fput(map_file->file);
1056			kfree(map_file);
1057			break;
1058		}
1059next:
1060		size -= map_size;
1061		uaddr += map_size;
1062		map_iova += map_size;
1063	}
1064	if (ret)
1065		vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
1066
1067	mmap_read_unlock(dev->mm);
1068
1069	return ret;
1070}
1071
1072static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
1073			     struct vhost_iotlb *iotlb,
1074			     u64 iova, u64 size, u64 uaddr, u32 perm)
1075{
1076	struct vhost_dev *dev = &v->vdev;
 
1077	struct page **page_list;
1078	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
1079	unsigned int gup_flags = FOLL_LONGTERM;
1080	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
1081	unsigned long lock_limit, sz2pin, nchunks, i;
1082	u64 start = iova;
 
1083	long pinned;
1084	int ret = 0;
1085
1086	/* Limit the use of memory for bookkeeping */
1087	page_list = (struct page **) __get_free_page(GFP_KERNEL);
1088	if (!page_list)
1089		return -ENOMEM;
1090
1091	if (perm & VHOST_ACCESS_WO)
1092		gup_flags |= FOLL_WRITE;
1093
1094	npages = PFN_UP(size + (iova & ~PAGE_MASK));
1095	if (!npages) {
1096		ret = -EINVAL;
 
 
 
 
 
 
1097		goto free;
1098	}
1099
1100	mmap_read_lock(dev->mm);
1101
1102	lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
1103	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
1104		ret = -ENOMEM;
1105		goto unlock;
1106	}
1107
1108	cur_base = uaddr & PAGE_MASK;
1109	iova &= PAGE_MASK;
1110	nchunks = 0;
1111
1112	while (npages) {
1113		sz2pin = min_t(unsigned long, npages, list_size);
1114		pinned = pin_user_pages(cur_base, sz2pin,
1115					gup_flags, page_list);
1116		if (sz2pin != pinned) {
1117			if (pinned < 0) {
1118				ret = pinned;
1119			} else {
1120				unpin_user_pages(page_list, pinned);
1121				ret = -ENOMEM;
1122			}
1123			goto out;
1124		}
1125		nchunks++;
 
1126
1127		if (!last_pfn)
1128			map_pfn = page_to_pfn(page_list[0]);
1129
1130		for (i = 0; i < pinned; i++) {
1131			unsigned long this_pfn = page_to_pfn(page_list[i]);
1132			u64 csize;
1133
1134			if (last_pfn && (this_pfn != last_pfn + 1)) {
1135				/* Pin a contiguous chunk of memory */
1136				csize = PFN_PHYS(last_pfn - map_pfn + 1);
1137				ret = vhost_vdpa_map(v, iotlb, iova, csize,
1138						     PFN_PHYS(map_pfn),
1139						     perm, NULL);
1140				if (ret) {
1141					/*
1142					 * Unpin the pages that are left unmapped
1143					 * from this point on in the current
1144					 * page_list. The remaining outstanding
1145					 * ones which may stride across several
1146					 * chunks will be covered in the common
1147					 * error path subsequently.
1148					 */
1149					unpin_user_pages(&page_list[i],
1150							 pinned - i);
1151					goto out;
1152				}
1153
1154				map_pfn = this_pfn;
1155				iova += csize;
1156				nchunks = 0;
 
 
 
 
1157			}
1158
1159			last_pfn = this_pfn;
 
 
1160		}
1161
1162		cur_base += PFN_PHYS(pinned);
1163		npages -= pinned;
1164	}
1165
1166	/* Pin the rest chunk */
1167	ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
1168			     PFN_PHYS(map_pfn), perm, NULL);
1169out:
1170	if (ret) {
1171		if (nchunks) {
1172			unsigned long pfn;
1173
1174			/*
1175			 * Unpin the outstanding pages which are yet to be
1176			 * mapped but haven't due to vdpa_map() or
1177			 * pin_user_pages() failure.
1178			 *
1179			 * Mapped pages are accounted in vdpa_map(), hence
1180			 * the corresponding unpinning will be handled by
1181			 * vdpa_unmap().
1182			 */
1183			WARN_ON(!last_pfn);
1184			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1185				unpin_user_page(pfn_to_page(pfn));
1186		}
1187		vhost_vdpa_unmap(v, iotlb, start, size);
1188	}
1189unlock:
1190	mmap_read_unlock(dev->mm);
1191free:
1192	free_page((unsigned long)page_list);
 
1193	return ret;
1194
1195}
1196
1197static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1198					   struct vhost_iotlb *iotlb,
1199					   struct vhost_iotlb_msg *msg)
1200{
1201	struct vdpa_device *vdpa = v->vdpa;
1202
1203	if (msg->iova < v->range.first || !msg->size ||
1204	    msg->iova > U64_MAX - msg->size + 1 ||
1205	    msg->iova + msg->size - 1 > v->range.last)
1206		return -EINVAL;
1207
1208	if (vhost_iotlb_itree_first(iotlb, msg->iova,
1209				    msg->iova + msg->size - 1))
1210		return -EEXIST;
1211
1212	if (vdpa->use_va)
1213		return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1214					 msg->uaddr, msg->perm);
1215
1216	return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1217				 msg->perm);
1218}
1219
1220static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1221					struct vhost_iotlb_msg *msg)
1222{
1223	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1224	struct vdpa_device *vdpa = v->vdpa;
1225	const struct vdpa_config_ops *ops = vdpa->config;
1226	struct vhost_iotlb *iotlb = NULL;
1227	struct vhost_vdpa_as *as = NULL;
1228	int r = 0;
1229
1230	mutex_lock(&dev->mutex);
1231
1232	r = vhost_dev_check_owner(dev);
1233	if (r)
1234		goto unlock;
1235
1236	if (msg->type == VHOST_IOTLB_UPDATE ||
1237	    msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1238		as = vhost_vdpa_find_alloc_as(v, asid);
1239		if (!as) {
1240			dev_err(&v->dev, "can't find and alloc asid %d\n",
1241				asid);
1242			r = -EINVAL;
1243			goto unlock;
1244		}
1245		iotlb = &as->iotlb;
1246	} else
1247		iotlb = asid_to_iotlb(v, asid);
1248
1249	if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1250		if (v->in_batch && v->batch_asid != asid) {
1251			dev_info(&v->dev, "batch id %d asid %d\n",
1252				 v->batch_asid, asid);
1253		}
1254		if (!iotlb)
1255			dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1256		r = -EINVAL;
1257		goto unlock;
1258	}
1259
1260	switch (msg->type) {
1261	case VHOST_IOTLB_UPDATE:
1262		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1263		break;
1264	case VHOST_IOTLB_INVALIDATE:
1265		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1266		break;
1267	case VHOST_IOTLB_BATCH_BEGIN:
1268		v->batch_asid = asid;
1269		v->in_batch = true;
1270		break;
1271	case VHOST_IOTLB_BATCH_END:
1272		if (v->in_batch && ops->set_map)
1273			ops->set_map(vdpa, asid, iotlb);
1274		v->in_batch = false;
1275		break;
1276	default:
1277		r = -EINVAL;
1278		break;
1279	}
1280unlock:
1281	mutex_unlock(&dev->mutex);
1282
1283	return r;
1284}
1285
1286static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1287					 struct iov_iter *from)
1288{
1289	struct file *file = iocb->ki_filp;
1290	struct vhost_vdpa *v = file->private_data;
1291	struct vhost_dev *dev = &v->vdev;
1292
1293	return vhost_chr_write_iter(dev, from);
1294}
1295
1296static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1297{
1298	struct vdpa_device *vdpa = v->vdpa;
1299	const struct vdpa_config_ops *ops = vdpa->config;
1300	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1301	const struct bus_type *bus;
1302	int ret;
1303
1304	/* Device want to do DMA by itself */
1305	if (ops->set_map || ops->dma_map)
1306		return 0;
1307
1308	bus = dma_dev->bus;
1309	if (!bus)
1310		return -EFAULT;
1311
1312	if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
1313		dev_warn_once(&v->dev,
1314			      "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
1315		return -ENOTSUPP;
1316	}
1317
1318	v->domain = iommu_domain_alloc(bus);
1319	if (!v->domain)
1320		return -EIO;
1321
1322	ret = iommu_attach_device(v->domain, dma_dev);
1323	if (ret)
1324		goto err_attach;
1325
1326	return 0;
1327
1328err_attach:
1329	iommu_domain_free(v->domain);
1330	v->domain = NULL;
1331	return ret;
1332}
1333
1334static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1335{
1336	struct vdpa_device *vdpa = v->vdpa;
1337	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1338
1339	if (v->domain) {
1340		iommu_detach_device(v->domain, dma_dev);
1341		iommu_domain_free(v->domain);
1342	}
1343
1344	v->domain = NULL;
1345}
1346
1347static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1348{
1349	struct vdpa_iova_range *range = &v->range;
1350	struct vdpa_device *vdpa = v->vdpa;
1351	const struct vdpa_config_ops *ops = vdpa->config;
1352
1353	if (ops->get_iova_range) {
1354		*range = ops->get_iova_range(vdpa);
1355	} else if (v->domain && v->domain->geometry.force_aperture) {
1356		range->first = v->domain->geometry.aperture_start;
1357		range->last = v->domain->geometry.aperture_end;
1358	} else {
1359		range->first = 0;
1360		range->last = ULLONG_MAX;
1361	}
1362}
1363
1364static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1365{
1366	struct vhost_vdpa_as *as;
1367	u32 asid;
1368
1369	for (asid = 0; asid < v->vdpa->nas; asid++) {
1370		as = asid_to_as(v, asid);
1371		if (as)
1372			vhost_vdpa_remove_as(v, asid);
1373	}
1374
1375	vhost_vdpa_free_domain(v);
1376	vhost_dev_cleanup(&v->vdev);
1377	kfree(v->vdev.vqs);
1378	v->vdev.vqs = NULL;
1379}
1380
1381static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1382{
1383	struct vhost_vdpa *v;
1384	struct vhost_dev *dev;
1385	struct vhost_virtqueue **vqs;
1386	int r, opened;
1387	u32 i, nvqs;
1388
1389	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1390
1391	opened = atomic_cmpxchg(&v->opened, 0, 1);
1392	if (opened)
1393		return -EBUSY;
1394
1395	nvqs = v->nvqs;
1396	r = vhost_vdpa_reset(v);
1397	if (r)
1398		goto err;
1399
1400	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1401	if (!vqs) {
1402		r = -ENOMEM;
1403		goto err;
1404	}
1405
1406	dev = &v->vdev;
1407	for (i = 0; i < nvqs; i++) {
1408		vqs[i] = &v->vqs[i];
1409		vqs[i]->handle_kick = handle_vq_kick;
1410	}
1411	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1412		       vhost_vdpa_process_iotlb_msg);
1413
 
 
 
 
 
 
1414	r = vhost_vdpa_alloc_domain(v);
1415	if (r)
1416		goto err_alloc_domain;
1417
1418	vhost_vdpa_set_iova_range(v);
1419
1420	filep->private_data = v;
1421
1422	return 0;
1423
1424err_alloc_domain:
1425	vhost_vdpa_cleanup(v);
 
1426err:
1427	atomic_dec(&v->opened);
1428	return r;
1429}
1430
1431static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1432{
1433	u32 i;
 
1434
1435	for (i = 0; i < v->nvqs; i++)
1436		vhost_vdpa_unsetup_vq_irq(v, i);
 
 
 
1437}
1438
1439static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1440{
1441	struct vhost_vdpa *v = filep->private_data;
1442	struct vhost_dev *d = &v->vdev;
1443
1444	mutex_lock(&d->mutex);
1445	filep->private_data = NULL;
1446	vhost_vdpa_clean_irq(v);
1447	vhost_vdpa_reset(v);
1448	vhost_dev_stop(&v->vdev);
1449	vhost_vdpa_unbind_mm(v);
 
1450	vhost_vdpa_config_put(v);
1451	vhost_vdpa_cleanup(v);
 
 
1452	mutex_unlock(&d->mutex);
1453
1454	atomic_dec(&v->opened);
1455	complete(&v->completion);
1456
1457	return 0;
1458}
1459
1460#ifdef CONFIG_MMU
1461static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1462{
1463	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1464	struct vdpa_device *vdpa = v->vdpa;
1465	const struct vdpa_config_ops *ops = vdpa->config;
1466	struct vdpa_notification_area notify;
1467	struct vm_area_struct *vma = vmf->vma;
1468	u16 index = vma->vm_pgoff;
1469
1470	notify = ops->get_vq_notification(vdpa, index);
1471
1472	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1473	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1474			    PFN_DOWN(notify.addr), PAGE_SIZE,
1475			    vma->vm_page_prot))
1476		return VM_FAULT_SIGBUS;
1477
1478	return VM_FAULT_NOPAGE;
1479}
1480
1481static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1482	.fault = vhost_vdpa_fault,
1483};
1484
1485static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1486{
1487	struct vhost_vdpa *v = vma->vm_file->private_data;
1488	struct vdpa_device *vdpa = v->vdpa;
1489	const struct vdpa_config_ops *ops = vdpa->config;
1490	struct vdpa_notification_area notify;
1491	unsigned long index = vma->vm_pgoff;
1492
1493	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1494		return -EINVAL;
1495	if ((vma->vm_flags & VM_SHARED) == 0)
1496		return -EINVAL;
1497	if (vma->vm_flags & VM_READ)
1498		return -EINVAL;
1499	if (index > 65535)
1500		return -EINVAL;
1501	if (!ops->get_vq_notification)
1502		return -ENOTSUPP;
1503
1504	/* To be safe and easily modelled by userspace, We only
1505	 * support the doorbell which sits on the page boundary and
1506	 * does not share the page with other registers.
1507	 */
1508	notify = ops->get_vq_notification(vdpa, index);
1509	if (notify.addr & (PAGE_SIZE - 1))
1510		return -EINVAL;
1511	if (vma->vm_end - vma->vm_start != notify.size)
1512		return -ENOTSUPP;
1513
1514	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1515	vma->vm_ops = &vhost_vdpa_vm_ops;
1516	return 0;
1517}
1518#endif /* CONFIG_MMU */
1519
1520static const struct file_operations vhost_vdpa_fops = {
1521	.owner		= THIS_MODULE,
1522	.open		= vhost_vdpa_open,
1523	.release	= vhost_vdpa_release,
1524	.write_iter	= vhost_vdpa_chr_write_iter,
1525	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
1526#ifdef CONFIG_MMU
1527	.mmap		= vhost_vdpa_mmap,
1528#endif /* CONFIG_MMU */
1529	.compat_ioctl	= compat_ptr_ioctl,
1530};
1531
1532static void vhost_vdpa_release_dev(struct device *device)
1533{
1534	struct vhost_vdpa *v =
1535	       container_of(device, struct vhost_vdpa, dev);
1536
1537	ida_simple_remove(&vhost_vdpa_ida, v->minor);
1538	kfree(v->vqs);
1539	kfree(v);
1540}
1541
1542static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1543{
1544	const struct vdpa_config_ops *ops = vdpa->config;
1545	struct vhost_vdpa *v;
1546	int minor;
1547	int i, r;
1548
1549	/* We can't support platform IOMMU device with more than 1
1550	 * group or as
1551	 */
1552	if (!ops->set_map && !ops->dma_map &&
1553	    (vdpa->ngroups > 1 || vdpa->nas > 1))
1554		return -EOPNOTSUPP;
1555
1556	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1557	if (!v)
1558		return -ENOMEM;
1559
1560	minor = ida_simple_get(&vhost_vdpa_ida, 0,
1561			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1562	if (minor < 0) {
1563		kfree(v);
1564		return minor;
1565	}
1566
1567	atomic_set(&v->opened, 0);
1568	v->minor = minor;
1569	v->vdpa = vdpa;
1570	v->nvqs = vdpa->nvqs;
1571	v->virtio_id = ops->get_device_id(vdpa);
1572
1573	device_initialize(&v->dev);
1574	v->dev.release = vhost_vdpa_release_dev;
1575	v->dev.parent = &vdpa->dev;
1576	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1577	v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1578			       GFP_KERNEL);
1579	if (!v->vqs) {
1580		r = -ENOMEM;
1581		goto err;
1582	}
1583
1584	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1585	if (r)
1586		goto err;
1587
1588	cdev_init(&v->cdev, &vhost_vdpa_fops);
1589	v->cdev.owner = THIS_MODULE;
1590
1591	r = cdev_device_add(&v->cdev, &v->dev);
1592	if (r)
1593		goto err;
1594
1595	init_completion(&v->completion);
1596	vdpa_set_drvdata(vdpa, v);
1597
1598	for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1599		INIT_HLIST_HEAD(&v->as[i]);
1600
1601	return 0;
1602
1603err:
1604	put_device(&v->dev);
1605	return r;
1606}
1607
1608static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1609{
1610	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1611	int opened;
1612
1613	cdev_device_del(&v->cdev, &v->dev);
1614
1615	do {
1616		opened = atomic_cmpxchg(&v->opened, 0, 1);
1617		if (!opened)
1618			break;
1619		wait_for_completion(&v->completion);
1620	} while (1);
1621
1622	put_device(&v->dev);
1623}
1624
1625static struct vdpa_driver vhost_vdpa_driver = {
1626	.driver = {
1627		.name	= "vhost_vdpa",
1628	},
1629	.probe	= vhost_vdpa_probe,
1630	.remove	= vhost_vdpa_remove,
1631};
1632
1633static int __init vhost_vdpa_init(void)
1634{
1635	int r;
1636
1637	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1638				"vhost-vdpa");
1639	if (r)
1640		goto err_alloc_chrdev;
1641
1642	r = vdpa_register_driver(&vhost_vdpa_driver);
1643	if (r)
1644		goto err_vdpa_register_driver;
1645
1646	return 0;
1647
1648err_vdpa_register_driver:
1649	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1650err_alloc_chrdev:
1651	return r;
1652}
1653module_init(vhost_vdpa_init);
1654
1655static void __exit vhost_vdpa_exit(void)
1656{
1657	vdpa_unregister_driver(&vhost_vdpa_driver);
1658	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1659}
1660module_exit(vhost_vdpa_exit);
1661
1662MODULE_VERSION("0.0.1");
1663MODULE_LICENSE("GPL v2");
1664MODULE_AUTHOR("Intel Corporation");
1665MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018-2020 Intel Corporation.
   4 * Copyright (C) 2020 Red Hat, Inc.
   5 *
   6 * Author: Tiwei Bie <tiwei.bie@intel.com>
   7 *         Jason Wang <jasowang@redhat.com>
   8 *
   9 * Thanks Michael S. Tsirkin for the valuable comments and
  10 * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
  11 * their supports.
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/cdev.h>
  17#include <linux/device.h>
  18#include <linux/mm.h>
 
  19#include <linux/iommu.h>
  20#include <linux/uuid.h>
  21#include <linux/vdpa.h>
  22#include <linux/nospec.h>
  23#include <linux/vhost.h>
  24#include <linux/virtio_net.h>
  25#include <linux/kernel.h>
  26
  27#include "vhost.h"
  28
  29enum {
  30	VHOST_VDPA_BACKEND_FEATURES =
  31	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
  32	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
 
  33};
  34
  35#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
  36
 
 
 
 
 
 
 
 
  37struct vhost_vdpa {
  38	struct vhost_dev vdev;
  39	struct iommu_domain *domain;
  40	struct vhost_virtqueue *vqs;
  41	struct completion completion;
  42	struct vdpa_device *vdpa;
 
  43	struct device dev;
  44	struct cdev cdev;
  45	atomic_t opened;
  46	int nvqs;
  47	int virtio_id;
  48	int minor;
  49	struct eventfd_ctx *config_ctx;
  50	int in_batch;
 
 
 
  51};
  52
  53static DEFINE_IDA(vhost_vdpa_ida);
  54
  55static dev_t vhost_vdpa_major;
  56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  57static void handle_vq_kick(struct vhost_work *work)
  58{
  59	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  60						  poll.work);
  61	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
  62	const struct vdpa_config_ops *ops = v->vdpa->config;
  63
  64	ops->kick_vq(v->vdpa, vq - v->vqs);
  65}
  66
  67static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
  68{
  69	struct vhost_virtqueue *vq = private;
  70	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
  71
  72	if (call_ctx)
  73		eventfd_signal(call_ctx, 1);
  74
  75	return IRQ_HANDLED;
  76}
  77
  78static irqreturn_t vhost_vdpa_config_cb(void *private)
  79{
  80	struct vhost_vdpa *v = private;
  81	struct eventfd_ctx *config_ctx = v->config_ctx;
  82
  83	if (config_ctx)
  84		eventfd_signal(config_ctx, 1);
  85
  86	return IRQ_HANDLED;
  87}
  88
  89static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
  90{
  91	struct vhost_virtqueue *vq = &v->vqs[qid];
  92	const struct vdpa_config_ops *ops = v->vdpa->config;
  93	struct vdpa_device *vdpa = v->vdpa;
  94	int ret, irq;
  95
  96	if (!ops->get_vq_irq)
  97		return;
  98
  99	irq = ops->get_vq_irq(vdpa, qid);
 100	spin_lock(&vq->call_ctx.ctx_lock);
 
 
 101	irq_bypass_unregister_producer(&vq->call_ctx.producer);
 102	if (!vq->call_ctx.ctx || irq < 0) {
 103		spin_unlock(&vq->call_ctx.ctx_lock);
 104		return;
 105	}
 106
 107	vq->call_ctx.producer.token = vq->call_ctx.ctx;
 108	vq->call_ctx.producer.irq = irq;
 109	ret = irq_bypass_register_producer(&vq->call_ctx.producer);
 110	spin_unlock(&vq->call_ctx.ctx_lock);
 
 
 111}
 112
 113static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
 114{
 115	struct vhost_virtqueue *vq = &v->vqs[qid];
 116
 117	spin_lock(&vq->call_ctx.ctx_lock);
 118	irq_bypass_unregister_producer(&vq->call_ctx.producer);
 119	spin_unlock(&vq->call_ctx.ctx_lock);
 120}
 121
 122static void vhost_vdpa_reset(struct vhost_vdpa *v)
 123{
 124	struct vdpa_device *vdpa = v->vdpa;
 
 
 
 
 
 
 
 
 
 
 
 
 125
 126	vdpa_reset(vdpa);
 
 127	v->in_batch = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128}
 129
 130static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
 131{
 132	struct vdpa_device *vdpa = v->vdpa;
 133	const struct vdpa_config_ops *ops = vdpa->config;
 134	u32 device_id;
 135
 136	device_id = ops->get_device_id(vdpa);
 137
 138	if (copy_to_user(argp, &device_id, sizeof(device_id)))
 139		return -EFAULT;
 140
 141	return 0;
 142}
 143
 144static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
 145{
 146	struct vdpa_device *vdpa = v->vdpa;
 147	const struct vdpa_config_ops *ops = vdpa->config;
 148	u8 status;
 149
 150	status = ops->get_status(vdpa);
 151
 152	if (copy_to_user(statusp, &status, sizeof(status)))
 153		return -EFAULT;
 154
 155	return 0;
 156}
 157
 158static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
 159{
 160	struct vdpa_device *vdpa = v->vdpa;
 161	const struct vdpa_config_ops *ops = vdpa->config;
 162	u8 status, status_old;
 163	int nvqs = v->nvqs;
 
 164	u16 i;
 165
 166	if (copy_from_user(&status, statusp, sizeof(status)))
 167		return -EFAULT;
 168
 169	status_old = ops->get_status(vdpa);
 170
 171	/*
 172	 * Userspace shouldn't remove status bits unless reset the
 173	 * status to 0.
 174	 */
 175	if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
 176		return -EINVAL;
 177
 178	ops->set_status(vdpa, status);
 
 
 
 
 
 
 
 
 
 179
 180	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
 181		for (i = 0; i < nvqs; i++)
 182			vhost_vdpa_setup_vq_irq(v, i);
 183
 184	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
 185		for (i = 0; i < nvqs; i++)
 186			vhost_vdpa_unsetup_vq_irq(v, i);
 187
 188	return 0;
 189}
 190
 191static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
 192				      struct vhost_vdpa_config *c)
 193{
 194	long size = 0;
 
 195
 196	switch (v->virtio_id) {
 197	case VIRTIO_ID_NET:
 198		size = sizeof(struct virtio_net_config);
 199		break;
 200	}
 201
 202	if (c->len == 0)
 203		return -EINVAL;
 204
 205	if (c->len > size - c->off)
 206		return -E2BIG;
 207
 208	return 0;
 209}
 210
 211static long vhost_vdpa_get_config(struct vhost_vdpa *v,
 212				  struct vhost_vdpa_config __user *c)
 213{
 214	struct vdpa_device *vdpa = v->vdpa;
 215	struct vhost_vdpa_config config;
 216	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
 217	u8 *buf;
 218
 219	if (copy_from_user(&config, c, size))
 220		return -EFAULT;
 221	if (vhost_vdpa_config_validate(v, &config))
 222		return -EINVAL;
 223	buf = kvzalloc(config.len, GFP_KERNEL);
 224	if (!buf)
 225		return -ENOMEM;
 226
 227	vdpa_get_config(vdpa, config.off, buf, config.len);
 228
 229	if (copy_to_user(c->buf, buf, config.len)) {
 230		kvfree(buf);
 231		return -EFAULT;
 232	}
 233
 234	kvfree(buf);
 235	return 0;
 236}
 237
 238static long vhost_vdpa_set_config(struct vhost_vdpa *v,
 239				  struct vhost_vdpa_config __user *c)
 240{
 241	struct vdpa_device *vdpa = v->vdpa;
 242	const struct vdpa_config_ops *ops = vdpa->config;
 243	struct vhost_vdpa_config config;
 244	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
 245	u8 *buf;
 246
 247	if (copy_from_user(&config, c, size))
 248		return -EFAULT;
 249	if (vhost_vdpa_config_validate(v, &config))
 250		return -EINVAL;
 251	buf = kvzalloc(config.len, GFP_KERNEL);
 252	if (!buf)
 253		return -ENOMEM;
 254
 255	if (copy_from_user(buf, c->buf, config.len)) {
 256		kvfree(buf);
 257		return -EFAULT;
 258	}
 259
 260	ops->set_config(vdpa, config.off, buf, config.len);
 261
 262	kvfree(buf);
 263	return 0;
 264}
 265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
 267{
 268	struct vdpa_device *vdpa = v->vdpa;
 269	const struct vdpa_config_ops *ops = vdpa->config;
 270	u64 features;
 271
 272	features = ops->get_features(vdpa);
 273
 274	if (copy_to_user(featurep, &features, sizeof(features)))
 275		return -EFAULT;
 276
 277	return 0;
 278}
 279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
 281{
 282	struct vdpa_device *vdpa = v->vdpa;
 283	const struct vdpa_config_ops *ops = vdpa->config;
 
 
 284	u64 features;
 
 285
 286	/*
 287	 * It's not allowed to change the features after they have
 288	 * been negotiated.
 289	 */
 290	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
 291		return -EBUSY;
 292
 293	if (copy_from_user(&features, featurep, sizeof(features)))
 294		return -EFAULT;
 295
 296	if (vdpa_set_features(vdpa, features))
 297		return -EINVAL;
 298
 
 
 
 
 
 
 
 
 
 
 299	return 0;
 300}
 301
 302static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
 303{
 304	struct vdpa_device *vdpa = v->vdpa;
 305	const struct vdpa_config_ops *ops = vdpa->config;
 306	u16 num;
 307
 308	num = ops->get_vq_num_max(vdpa);
 309
 310	if (copy_to_user(argp, &num, sizeof(num)))
 311		return -EFAULT;
 312
 313	return 0;
 314}
 315
 316static void vhost_vdpa_config_put(struct vhost_vdpa *v)
 317{
 318	if (v->config_ctx)
 319		eventfd_ctx_put(v->config_ctx);
 
 
 320}
 321
 322static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
 323{
 324	struct vdpa_callback cb;
 325	int fd;
 326	struct eventfd_ctx *ctx;
 327
 328	cb.callback = vhost_vdpa_config_cb;
 329	cb.private = v->vdpa;
 330	if (copy_from_user(&fd, argp, sizeof(fd)))
 331		return  -EFAULT;
 332
 333	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
 334	swap(ctx, v->config_ctx);
 335
 336	if (!IS_ERR_OR_NULL(ctx))
 337		eventfd_ctx_put(ctx);
 338
 339	if (IS_ERR(v->config_ctx))
 340		return PTR_ERR(v->config_ctx);
 
 
 
 
 341
 342	v->vdpa->config->set_config_cb(v->vdpa, &cb);
 343
 344	return 0;
 345}
 346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
 348				   void __user *argp)
 349{
 350	struct vdpa_device *vdpa = v->vdpa;
 351	const struct vdpa_config_ops *ops = vdpa->config;
 352	struct vdpa_vq_state vq_state;
 353	struct vdpa_callback cb;
 354	struct vhost_virtqueue *vq;
 355	struct vhost_vring_state s;
 356	u32 idx;
 357	long r;
 358
 359	r = get_user(idx, (u32 __user *)argp);
 360	if (r < 0)
 361		return r;
 362
 363	if (idx >= v->nvqs)
 364		return -ENOBUFS;
 365
 366	idx = array_index_nospec(idx, v->nvqs);
 367	vq = &v->vqs[idx];
 368
 369	switch (cmd) {
 370	case VHOST_VDPA_SET_VRING_ENABLE:
 371		if (copy_from_user(&s, argp, sizeof(s)))
 372			return -EFAULT;
 373		ops->set_vq_ready(vdpa, idx, s.num);
 374		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375	case VHOST_GET_VRING_BASE:
 376		r = ops->get_vq_state(v->vdpa, idx, &vq_state);
 377		if (r)
 378			return r;
 379
 380		vq->last_avail_idx = vq_state.avail_index;
 
 
 
 
 
 
 
 381		break;
 382	}
 383
 384	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
 385	if (r)
 386		return r;
 387
 388	switch (cmd) {
 389	case VHOST_SET_VRING_ADDR:
 
 
 
 390		if (ops->set_vq_address(vdpa, idx,
 391					(u64)(uintptr_t)vq->desc,
 392					(u64)(uintptr_t)vq->avail,
 393					(u64)(uintptr_t)vq->used))
 394			r = -EINVAL;
 395		break;
 396
 397	case VHOST_SET_VRING_BASE:
 398		vq_state.avail_index = vq->last_avail_idx;
 399		if (ops->set_vq_state(vdpa, idx, &vq_state))
 400			r = -EINVAL;
 
 
 
 
 
 
 
 
 
 401		break;
 402
 403	case VHOST_SET_VRING_CALL:
 404		if (vq->call_ctx.ctx) {
 405			cb.callback = vhost_vdpa_virtqueue_cb;
 406			cb.private = vq;
 
 407		} else {
 408			cb.callback = NULL;
 409			cb.private = NULL;
 
 410		}
 411		ops->set_vq_cb(vdpa, idx, &cb);
 412		vhost_vdpa_setup_vq_irq(v, idx);
 413		break;
 414
 415	case VHOST_SET_VRING_NUM:
 416		ops->set_vq_num(vdpa, idx, vq->num);
 417		break;
 418	}
 419
 420	return r;
 421}
 422
 423static long vhost_vdpa_unlocked_ioctl(struct file *filep,
 424				      unsigned int cmd, unsigned long arg)
 425{
 426	struct vhost_vdpa *v = filep->private_data;
 427	struct vhost_dev *d = &v->vdev;
 428	void __user *argp = (void __user *)arg;
 429	u64 __user *featurep = argp;
 430	u64 features;
 431	long r;
 432
 433	if (cmd == VHOST_SET_BACKEND_FEATURES) {
 434		r = copy_from_user(&features, featurep, sizeof(features));
 435		if (r)
 436			return r;
 437		if (features & ~VHOST_VDPA_BACKEND_FEATURES)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 438			return -EOPNOTSUPP;
 439		vhost_set_backend_features(&v->vdev, features);
 440		return 0;
 441	}
 442
 443	mutex_lock(&d->mutex);
 444
 445	switch (cmd) {
 446	case VHOST_VDPA_GET_DEVICE_ID:
 447		r = vhost_vdpa_get_device_id(v, argp);
 448		break;
 449	case VHOST_VDPA_GET_STATUS:
 450		r = vhost_vdpa_get_status(v, argp);
 451		break;
 452	case VHOST_VDPA_SET_STATUS:
 453		r = vhost_vdpa_set_status(v, argp);
 454		break;
 455	case VHOST_VDPA_GET_CONFIG:
 456		r = vhost_vdpa_get_config(v, argp);
 457		break;
 458	case VHOST_VDPA_SET_CONFIG:
 459		r = vhost_vdpa_set_config(v, argp);
 460		break;
 461	case VHOST_GET_FEATURES:
 462		r = vhost_vdpa_get_features(v, argp);
 463		break;
 464	case VHOST_SET_FEATURES:
 465		r = vhost_vdpa_set_features(v, argp);
 466		break;
 467	case VHOST_VDPA_GET_VRING_NUM:
 468		r = vhost_vdpa_get_vring_num(v, argp);
 469		break;
 
 
 
 
 
 
 
 
 
 470	case VHOST_SET_LOG_BASE:
 471	case VHOST_SET_LOG_FD:
 472		r = -ENOIOCTLCMD;
 473		break;
 474	case VHOST_VDPA_SET_CONFIG_CALL:
 475		r = vhost_vdpa_set_config_call(v, argp);
 476		break;
 477	case VHOST_GET_BACKEND_FEATURES:
 478		features = VHOST_VDPA_BACKEND_FEATURES;
 479		r = copy_to_user(featurep, &features, sizeof(features));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480		break;
 481	default:
 482		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
 483		if (r == -ENOIOCTLCMD)
 484			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
 485		break;
 486	}
 487
 
 
 
 
 
 
 
 
 
 
 
 488	mutex_unlock(&d->mutex);
 489	return r;
 490}
 
 
 
 
 
 
 
 
 
 
 
 491
 492static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
 
 493{
 494	struct vhost_dev *dev = &v->vdev;
 495	struct vhost_iotlb *iotlb = dev->iotlb;
 496	struct vhost_iotlb_map *map;
 497	struct page *page;
 498	unsigned long pfn, pinned;
 499
 500	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
 501		pinned = map->size >> PAGE_SHIFT;
 502		for (pfn = map->addr >> PAGE_SHIFT;
 503		     pinned > 0; pfn++, pinned--) {
 504			page = pfn_to_page(pfn);
 505			if (map->perm & VHOST_ACCESS_WO)
 506				set_page_dirty_lock(page);
 507			unpin_user_page(page);
 508		}
 509		atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
 
 510		vhost_iotlb_map_free(iotlb, map);
 511	}
 512}
 513
 514static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
 
 515{
 516	struct vhost_dev *dev = &v->vdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517
 518	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
 519	kfree(dev->iotlb);
 520	dev->iotlb = NULL;
 521}
 522
 523static int perm_to_iommu_flags(u32 perm)
 524{
 525	int flags = 0;
 526
 527	switch (perm) {
 528	case VHOST_ACCESS_WO:
 529		flags |= IOMMU_WRITE;
 530		break;
 531	case VHOST_ACCESS_RO:
 532		flags |= IOMMU_READ;
 533		break;
 534	case VHOST_ACCESS_RW:
 535		flags |= (IOMMU_WRITE | IOMMU_READ);
 536		break;
 537	default:
 538		WARN(1, "invalidate vhost IOTLB permission\n");
 539		break;
 540	}
 541
 542	return flags | IOMMU_CACHE;
 543}
 544
 545static int vhost_vdpa_map(struct vhost_vdpa *v,
 546			  u64 iova, u64 size, u64 pa, u32 perm)
 547{
 548	struct vhost_dev *dev = &v->vdev;
 549	struct vdpa_device *vdpa = v->vdpa;
 550	const struct vdpa_config_ops *ops = vdpa->config;
 
 551	int r = 0;
 552
 553	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
 554				  pa, perm);
 555	if (r)
 556		return r;
 557
 558	if (ops->dma_map) {
 559		r = ops->dma_map(vdpa, iova, size, pa, perm);
 560	} else if (ops->set_map) {
 561		if (!v->in_batch)
 562			r = ops->set_map(vdpa, dev->iotlb);
 563	} else {
 564		r = iommu_map(v->domain, iova, pa, size,
 565			      perm_to_iommu_flags(perm));
 
 
 
 
 
 566	}
 567
 568	if (r)
 569		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
 570
 571	return r;
 572}
 573
 574static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
 
 
 575{
 576	struct vhost_dev *dev = &v->vdev;
 577	struct vdpa_device *vdpa = v->vdpa;
 578	const struct vdpa_config_ops *ops = vdpa->config;
 
 579
 580	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
 581
 582	if (ops->dma_map) {
 583		ops->dma_unmap(vdpa, iova, size);
 584	} else if (ops->set_map) {
 585		if (!v->in_batch)
 586			ops->set_map(vdpa, dev->iotlb);
 587	} else {
 588		iommu_unmap(v->domain, iova, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589	}
 
 
 
 
 
 
 590}
 591
 592static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 593					   struct vhost_iotlb_msg *msg)
 
 594{
 595	struct vhost_dev *dev = &v->vdev;
 596	struct vhost_iotlb *iotlb = dev->iotlb;
 597	struct page **page_list;
 598	struct vm_area_struct **vmas;
 599	unsigned int gup_flags = FOLL_LONGTERM;
 600	unsigned long map_pfn, last_pfn = 0;
 601	unsigned long npages, lock_limit;
 602	unsigned long i, nmap = 0;
 603	u64 iova = msg->iova;
 604	long pinned;
 605	int ret = 0;
 606
 607	if (vhost_iotlb_itree_first(iotlb, msg->iova,
 608				    msg->iova + msg->size - 1))
 609		return -EEXIST;
 
 610
 611	if (msg->perm & VHOST_ACCESS_WO)
 612		gup_flags |= FOLL_WRITE;
 613
 614	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
 615	if (!npages)
 616		return -EINVAL;
 617
 618	page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 619	vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
 620			      GFP_KERNEL);
 621	if (!page_list || !vmas) {
 622		ret = -ENOMEM;
 623		goto free;
 624	}
 625
 626	mmap_read_lock(dev->mm);
 627
 628	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 629	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
 630		ret = -ENOMEM;
 631		goto unlock;
 632	}
 633
 634	pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
 635				page_list, vmas);
 636	if (npages != pinned) {
 637		if (pinned < 0) {
 638			ret = pinned;
 639		} else {
 640			unpin_user_pages(page_list, pinned);
 641			ret = -ENOMEM;
 
 
 
 
 
 
 
 
 642		}
 643		goto unlock;
 644	}
 645
 646	iova &= PAGE_MASK;
 647	map_pfn = page_to_pfn(page_list[0]);
 648
 649	/* One more iteration to avoid extra vdpa_map() call out of loop. */
 650	for (i = 0; i <= npages; i++) {
 651		unsigned long this_pfn;
 652		u64 csize;
 653
 654		/* The last chunk may have no valid PFN next to it */
 655		this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
 656
 657		if (last_pfn && (this_pfn == -1UL ||
 658				 this_pfn != last_pfn + 1)) {
 659			/* Pin a contiguous chunk of memory */
 660			csize = last_pfn - map_pfn + 1;
 661			ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
 662					     map_pfn << PAGE_SHIFT,
 663					     msg->perm);
 664			if (ret) {
 665				/*
 666				 * Unpin the rest chunks of memory on the
 667				 * flight with no corresponding vdpa_map()
 668				 * calls having been made yet. On the other
 669				 * hand, vdpa_unmap() in the failure path
 670				 * is in charge of accounting the number of
 671				 * pinned pages for its own.
 672				 * This asymmetrical pattern of accounting
 673				 * is for efficiency to pin all pages at
 674				 * once, while there is no other callsite
 675				 * of vdpa_map() than here above.
 676				 */
 677				unpin_user_pages(&page_list[nmap],
 678						 npages - nmap);
 679				goto out;
 680			}
 681			atomic64_add(csize, &dev->mm->pinned_vm);
 682			nmap += csize;
 683			iova += csize << PAGE_SHIFT;
 684			map_pfn = this_pfn;
 685		}
 686		last_pfn = this_pfn;
 
 
 687	}
 688
 689	WARN_ON(nmap != npages);
 
 
 690out:
 691	if (ret)
 692		vhost_vdpa_unmap(v, msg->iova, msg->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 693unlock:
 694	mmap_read_unlock(dev->mm);
 695free:
 696	kvfree(vmas);
 697	kvfree(page_list);
 698	return ret;
 
 699}
 700
 701static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702					struct vhost_iotlb_msg *msg)
 703{
 704	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
 705	struct vdpa_device *vdpa = v->vdpa;
 706	const struct vdpa_config_ops *ops = vdpa->config;
 
 
 707	int r = 0;
 708
 
 
 709	r = vhost_dev_check_owner(dev);
 710	if (r)
 711		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712
 713	switch (msg->type) {
 714	case VHOST_IOTLB_UPDATE:
 715		r = vhost_vdpa_process_iotlb_update(v, msg);
 716		break;
 717	case VHOST_IOTLB_INVALIDATE:
 718		vhost_vdpa_unmap(v, msg->iova, msg->size);
 719		break;
 720	case VHOST_IOTLB_BATCH_BEGIN:
 
 721		v->in_batch = true;
 722		break;
 723	case VHOST_IOTLB_BATCH_END:
 724		if (v->in_batch && ops->set_map)
 725			ops->set_map(vdpa, dev->iotlb);
 726		v->in_batch = false;
 727		break;
 728	default:
 729		r = -EINVAL;
 730		break;
 731	}
 
 
 732
 733	return r;
 734}
 735
 736static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
 737					 struct iov_iter *from)
 738{
 739	struct file *file = iocb->ki_filp;
 740	struct vhost_vdpa *v = file->private_data;
 741	struct vhost_dev *dev = &v->vdev;
 742
 743	return vhost_chr_write_iter(dev, from);
 744}
 745
 746static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
 747{
 748	struct vdpa_device *vdpa = v->vdpa;
 749	const struct vdpa_config_ops *ops = vdpa->config;
 750	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
 751	struct bus_type *bus;
 752	int ret;
 753
 754	/* Device want to do DMA by itself */
 755	if (ops->set_map || ops->dma_map)
 756		return 0;
 757
 758	bus = dma_dev->bus;
 759	if (!bus)
 760		return -EFAULT;
 761
 762	if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
 
 
 763		return -ENOTSUPP;
 
 764
 765	v->domain = iommu_domain_alloc(bus);
 766	if (!v->domain)
 767		return -EIO;
 768
 769	ret = iommu_attach_device(v->domain, dma_dev);
 770	if (ret)
 771		goto err_attach;
 772
 773	return 0;
 774
 775err_attach:
 776	iommu_domain_free(v->domain);
 
 777	return ret;
 778}
 779
 780static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
 781{
 782	struct vdpa_device *vdpa = v->vdpa;
 783	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
 784
 785	if (v->domain) {
 786		iommu_detach_device(v->domain, dma_dev);
 787		iommu_domain_free(v->domain);
 788	}
 789
 790	v->domain = NULL;
 791}
 792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793static int vhost_vdpa_open(struct inode *inode, struct file *filep)
 794{
 795	struct vhost_vdpa *v;
 796	struct vhost_dev *dev;
 797	struct vhost_virtqueue **vqs;
 798	int nvqs, i, r, opened;
 
 799
 800	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
 801
 802	opened = atomic_cmpxchg(&v->opened, 0, 1);
 803	if (opened)
 804		return -EBUSY;
 805
 806	nvqs = v->nvqs;
 807	vhost_vdpa_reset(v);
 
 
 808
 809	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
 810	if (!vqs) {
 811		r = -ENOMEM;
 812		goto err;
 813	}
 814
 815	dev = &v->vdev;
 816	for (i = 0; i < nvqs; i++) {
 817		vqs[i] = &v->vqs[i];
 818		vqs[i]->handle_kick = handle_vq_kick;
 819	}
 820	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
 821		       vhost_vdpa_process_iotlb_msg);
 822
 823	dev->iotlb = vhost_iotlb_alloc(0, 0);
 824	if (!dev->iotlb) {
 825		r = -ENOMEM;
 826		goto err_init_iotlb;
 827	}
 828
 829	r = vhost_vdpa_alloc_domain(v);
 830	if (r)
 831		goto err_init_iotlb;
 
 
 832
 833	filep->private_data = v;
 834
 835	return 0;
 836
 837err_init_iotlb:
 838	vhost_dev_cleanup(&v->vdev);
 839	kfree(vqs);
 840err:
 841	atomic_dec(&v->opened);
 842	return r;
 843}
 844
 845static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
 846{
 847	struct vhost_virtqueue *vq;
 848	int i;
 849
 850	for (i = 0; i < v->nvqs; i++) {
 851		vq = &v->vqs[i];
 852		if (vq->call_ctx.producer.irq)
 853			irq_bypass_unregister_producer(&vq->call_ctx.producer);
 854	}
 855}
 856
 857static int vhost_vdpa_release(struct inode *inode, struct file *filep)
 858{
 859	struct vhost_vdpa *v = filep->private_data;
 860	struct vhost_dev *d = &v->vdev;
 861
 862	mutex_lock(&d->mutex);
 863	filep->private_data = NULL;
 
 864	vhost_vdpa_reset(v);
 865	vhost_dev_stop(&v->vdev);
 866	vhost_vdpa_iotlb_free(v);
 867	vhost_vdpa_free_domain(v);
 868	vhost_vdpa_config_put(v);
 869	vhost_vdpa_clean_irq(v);
 870	vhost_dev_cleanup(&v->vdev);
 871	kfree(v->vdev.vqs);
 872	mutex_unlock(&d->mutex);
 873
 874	atomic_dec(&v->opened);
 875	complete(&v->completion);
 876
 877	return 0;
 878}
 879
 880#ifdef CONFIG_MMU
 881static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
 882{
 883	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
 884	struct vdpa_device *vdpa = v->vdpa;
 885	const struct vdpa_config_ops *ops = vdpa->config;
 886	struct vdpa_notification_area notify;
 887	struct vm_area_struct *vma = vmf->vma;
 888	u16 index = vma->vm_pgoff;
 889
 890	notify = ops->get_vq_notification(vdpa, index);
 891
 892	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 893	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
 894			    notify.addr >> PAGE_SHIFT, PAGE_SIZE,
 895			    vma->vm_page_prot))
 896		return VM_FAULT_SIGBUS;
 897
 898	return VM_FAULT_NOPAGE;
 899}
 900
 901static const struct vm_operations_struct vhost_vdpa_vm_ops = {
 902	.fault = vhost_vdpa_fault,
 903};
 904
 905static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
 906{
 907	struct vhost_vdpa *v = vma->vm_file->private_data;
 908	struct vdpa_device *vdpa = v->vdpa;
 909	const struct vdpa_config_ops *ops = vdpa->config;
 910	struct vdpa_notification_area notify;
 911	unsigned long index = vma->vm_pgoff;
 912
 913	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
 914		return -EINVAL;
 915	if ((vma->vm_flags & VM_SHARED) == 0)
 916		return -EINVAL;
 917	if (vma->vm_flags & VM_READ)
 918		return -EINVAL;
 919	if (index > 65535)
 920		return -EINVAL;
 921	if (!ops->get_vq_notification)
 922		return -ENOTSUPP;
 923
 924	/* To be safe and easily modelled by userspace, We only
 925	 * support the doorbell which sits on the page boundary and
 926	 * does not share the page with other registers.
 927	 */
 928	notify = ops->get_vq_notification(vdpa, index);
 929	if (notify.addr & (PAGE_SIZE - 1))
 930		return -EINVAL;
 931	if (vma->vm_end - vma->vm_start != notify.size)
 932		return -ENOTSUPP;
 933
 
 934	vma->vm_ops = &vhost_vdpa_vm_ops;
 935	return 0;
 936}
 937#endif /* CONFIG_MMU */
 938
 939static const struct file_operations vhost_vdpa_fops = {
 940	.owner		= THIS_MODULE,
 941	.open		= vhost_vdpa_open,
 942	.release	= vhost_vdpa_release,
 943	.write_iter	= vhost_vdpa_chr_write_iter,
 944	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
 945#ifdef CONFIG_MMU
 946	.mmap		= vhost_vdpa_mmap,
 947#endif /* CONFIG_MMU */
 948	.compat_ioctl	= compat_ptr_ioctl,
 949};
 950
 951static void vhost_vdpa_release_dev(struct device *device)
 952{
 953	struct vhost_vdpa *v =
 954	       container_of(device, struct vhost_vdpa, dev);
 955
 956	ida_simple_remove(&vhost_vdpa_ida, v->minor);
 957	kfree(v->vqs);
 958	kfree(v);
 959}
 960
 961static int vhost_vdpa_probe(struct vdpa_device *vdpa)
 962{
 963	const struct vdpa_config_ops *ops = vdpa->config;
 964	struct vhost_vdpa *v;
 965	int minor;
 966	int r;
 967
 968	/* Currently, we only accept the network devices. */
 969	if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
 970		return -ENOTSUPP;
 
 
 
 971
 972	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
 973	if (!v)
 974		return -ENOMEM;
 975
 976	minor = ida_simple_get(&vhost_vdpa_ida, 0,
 977			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
 978	if (minor < 0) {
 979		kfree(v);
 980		return minor;
 981	}
 982
 983	atomic_set(&v->opened, 0);
 984	v->minor = minor;
 985	v->vdpa = vdpa;
 986	v->nvqs = vdpa->nvqs;
 987	v->virtio_id = ops->get_device_id(vdpa);
 988
 989	device_initialize(&v->dev);
 990	v->dev.release = vhost_vdpa_release_dev;
 991	v->dev.parent = &vdpa->dev;
 992	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
 993	v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
 994			       GFP_KERNEL);
 995	if (!v->vqs) {
 996		r = -ENOMEM;
 997		goto err;
 998	}
 999
1000	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1001	if (r)
1002		goto err;
1003
1004	cdev_init(&v->cdev, &vhost_vdpa_fops);
1005	v->cdev.owner = THIS_MODULE;
1006
1007	r = cdev_device_add(&v->cdev, &v->dev);
1008	if (r)
1009		goto err;
1010
1011	init_completion(&v->completion);
1012	vdpa_set_drvdata(vdpa, v);
 
 
 
1013
1014	return 0;
1015
1016err:
1017	put_device(&v->dev);
1018	return r;
1019}
1020
1021static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1022{
1023	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1024	int opened;
1025
1026	cdev_device_del(&v->cdev, &v->dev);
1027
1028	do {
1029		opened = atomic_cmpxchg(&v->opened, 0, 1);
1030		if (!opened)
1031			break;
1032		wait_for_completion(&v->completion);
1033	} while (1);
1034
1035	put_device(&v->dev);
1036}
1037
1038static struct vdpa_driver vhost_vdpa_driver = {
1039	.driver = {
1040		.name	= "vhost_vdpa",
1041	},
1042	.probe	= vhost_vdpa_probe,
1043	.remove	= vhost_vdpa_remove,
1044};
1045
1046static int __init vhost_vdpa_init(void)
1047{
1048	int r;
1049
1050	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1051				"vhost-vdpa");
1052	if (r)
1053		goto err_alloc_chrdev;
1054
1055	r = vdpa_register_driver(&vhost_vdpa_driver);
1056	if (r)
1057		goto err_vdpa_register_driver;
1058
1059	return 0;
1060
1061err_vdpa_register_driver:
1062	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1063err_alloc_chrdev:
1064	return r;
1065}
1066module_init(vhost_vdpa_init);
1067
1068static void __exit vhost_vdpa_exit(void)
1069{
1070	vdpa_unregister_driver(&vhost_vdpa_driver);
1071	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1072}
1073module_exit(vhost_vdpa_exit);
1074
1075MODULE_VERSION("0.0.1");
1076MODULE_LICENSE("GPL v2");
1077MODULE_AUTHOR("Intel Corporation");
1078MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");