Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
   3 *
   4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the next
  14 * paragraph) shall be included in all copies or substantial portions of the
  15 * Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 * SOFTWARE.
  24 *
  25 * Authors:
  26 *    Kevin Tian <kevin.tian@intel.com>
  27 *    Jike Song <jike.song@intel.com>
  28 *    Xiaoguang Chen <xiaoguang.chen@intel.com>
  29 */
  30
  31#include <linux/init.h>
  32#include <linux/device.h>
  33#include <linux/mm.h>
  34#include <linux/kthread.h>
  35#include <linux/sched/mm.h>
  36#include <linux/types.h>
  37#include <linux/list.h>
  38#include <linux/rbtree.h>
  39#include <linux/spinlock.h>
  40#include <linux/eventfd.h>
  41#include <linux/uuid.h>
  42#include <linux/kvm_host.h>
  43#include <linux/vfio.h>
  44#include <linux/mdev.h>
  45#include <linux/debugfs.h>
  46
  47#include <linux/nospec.h>
  48
  49#include "i915_drv.h"
  50#include "gvt.h"
  51
  52static const struct intel_gvt_ops *intel_gvt_ops;
  53
  54/* helper macros copied from vfio-pci */
  55#define VFIO_PCI_OFFSET_SHIFT   40
  56#define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
  57#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
  58#define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
  59
  60#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
  61
  62#define OPREGION_SIGNATURE "IntelGraphicsMem"
  63
  64struct vfio_region;
  65struct intel_vgpu_regops {
  66	size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
  67			size_t count, loff_t *ppos, bool iswrite);
  68	void (*release)(struct intel_vgpu *vgpu,
  69			struct vfio_region *region);
  70};
  71
  72struct vfio_region {
  73	u32				type;
  74	u32				subtype;
  75	size_t				size;
  76	u32				flags;
  77	const struct intel_vgpu_regops	*ops;
  78	void				*data;
  79};
  80
  81struct vfio_edid_region {
  82	struct vfio_region_gfx_edid vfio_edid_regs;
  83	void *edid_blob;
  84};
  85
  86struct kvmgt_pgfn {
  87	gfn_t gfn;
  88	struct hlist_node hnode;
  89};
  90
  91struct kvmgt_guest_info {
  92	struct kvm *kvm;
  93	struct intel_vgpu *vgpu;
  94	struct kvm_page_track_notifier_node track_node;
  95#define NR_BKT (1 << 18)
  96	struct hlist_head ptable[NR_BKT];
  97#undef NR_BKT
  98	struct dentry *debugfs_cache_entries;
  99};
 100
 101struct gvt_dma {
 102	struct intel_vgpu *vgpu;
 103	struct rb_node gfn_node;
 104	struct rb_node dma_addr_node;
 105	gfn_t gfn;
 106	dma_addr_t dma_addr;
 107	unsigned long size;
 108	struct kref ref;
 109};
 110
 111struct kvmgt_vdev {
 112	struct intel_vgpu *vgpu;
 113	struct mdev_device *mdev;
 114	struct vfio_region *region;
 115	int num_regions;
 116	struct eventfd_ctx *intx_trigger;
 117	struct eventfd_ctx *msi_trigger;
 118
 119	/*
 120	 * Two caches are used to avoid mapping duplicated pages (eg.
 121	 * scratch pages). This help to reduce dma setup overhead.
 122	 */
 123	struct rb_root gfn_cache;
 124	struct rb_root dma_addr_cache;
 125	unsigned long nr_cache_entries;
 126	struct mutex cache_lock;
 127
 128	struct notifier_block iommu_notifier;
 129	struct notifier_block group_notifier;
 130	struct kvm *kvm;
 131	struct work_struct release_work;
 132	atomic_t released;
 133	struct vfio_device *vfio_device;
 134	struct vfio_group *vfio_group;
 135};
 136
 137static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
 138{
 139	return intel_vgpu_vdev(vgpu);
 140}
 141
 142static inline bool handle_valid(unsigned long handle)
 143{
 144	return !!(handle & ~0xff);
 145}
 146
 147static int kvmgt_guest_init(struct mdev_device *mdev);
 148static void intel_vgpu_release_work(struct work_struct *work);
 149static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
 150
 151static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 152		unsigned long size)
 153{
 154	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 155	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 156	int total_pages;
 157	int npage;
 158	int ret;
 159
 160	total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
 161
 162	for (npage = 0; npage < total_pages; npage++) {
 163		unsigned long cur_gfn = gfn + npage;
 164
 165		ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
 166		drm_WARN_ON(&i915->drm, ret != 1);
 167	}
 168}
 169
 170/* Pin a normal or compound guest page for dma. */
 171static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 172		unsigned long size, struct page **page)
 173{
 174	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 175	unsigned long base_pfn = 0;
 176	int total_pages;
 177	int npage;
 178	int ret;
 179
 180	total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
 181	/*
 182	 * We pin the pages one-by-one to avoid allocating a big arrary
 183	 * on stack to hold pfns.
 184	 */
 185	for (npage = 0; npage < total_pages; npage++) {
 186		unsigned long cur_gfn = gfn + npage;
 187		unsigned long pfn;
 188
 189		ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
 190					   IOMMU_READ | IOMMU_WRITE, &pfn);
 191		if (ret != 1) {
 192			gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
 193				     cur_gfn, ret);
 194			goto err;
 195		}
 196
 197		if (!pfn_valid(pfn)) {
 198			gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
 199			npage++;
 200			ret = -EFAULT;
 201			goto err;
 202		}
 203
 204		if (npage == 0)
 205			base_pfn = pfn;
 206		else if (base_pfn + npage != pfn) {
 207			gvt_vgpu_err("The pages are not continuous\n");
 208			ret = -EINVAL;
 209			npage++;
 210			goto err;
 211		}
 212	}
 213
 214	*page = pfn_to_page(base_pfn);
 215	return 0;
 216err:
 217	gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
 218	return ret;
 219}
 220
 221static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
 222		dma_addr_t *dma_addr, unsigned long size)
 223{
 224	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 225	struct page *page = NULL;
 226	int ret;
 227
 228	ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
 229	if (ret)
 230		return ret;
 231
 232	/* Setup DMA mapping. */
 233	*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
 234	if (dma_mapping_error(dev, *dma_addr)) {
 235		gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
 236			     page_to_pfn(page), ret);
 237		gvt_unpin_guest_page(vgpu, gfn, size);
 238		return -ENOMEM;
 239	}
 240
 241	return 0;
 242}
 243
 244static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
 245		dma_addr_t dma_addr, unsigned long size)
 246{
 247	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 248
 249	dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
 250	gvt_unpin_guest_page(vgpu, gfn, size);
 251}
 252
 253static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
 254		dma_addr_t dma_addr)
 255{
 256	struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
 257	struct gvt_dma *itr;
 258
 259	while (node) {
 260		itr = rb_entry(node, struct gvt_dma, dma_addr_node);
 261
 262		if (dma_addr < itr->dma_addr)
 263			node = node->rb_left;
 264		else if (dma_addr > itr->dma_addr)
 265			node = node->rb_right;
 266		else
 267			return itr;
 268	}
 269	return NULL;
 270}
 271
 272static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
 273{
 274	struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
 275	struct gvt_dma *itr;
 276
 277	while (node) {
 278		itr = rb_entry(node, struct gvt_dma, gfn_node);
 279
 280		if (gfn < itr->gfn)
 281			node = node->rb_left;
 282		else if (gfn > itr->gfn)
 283			node = node->rb_right;
 284		else
 285			return itr;
 286	}
 287	return NULL;
 288}
 289
 290static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 291		dma_addr_t dma_addr, unsigned long size)
 292{
 293	struct gvt_dma *new, *itr;
 294	struct rb_node **link, *parent = NULL;
 295	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 296
 297	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
 298	if (!new)
 299		return -ENOMEM;
 300
 301	new->vgpu = vgpu;
 302	new->gfn = gfn;
 303	new->dma_addr = dma_addr;
 304	new->size = size;
 305	kref_init(&new->ref);
 306
 307	/* gfn_cache maps gfn to struct gvt_dma. */
 308	link = &vdev->gfn_cache.rb_node;
 309	while (*link) {
 310		parent = *link;
 311		itr = rb_entry(parent, struct gvt_dma, gfn_node);
 312
 313		if (gfn < itr->gfn)
 314			link = &parent->rb_left;
 315		else
 316			link = &parent->rb_right;
 317	}
 318	rb_link_node(&new->gfn_node, parent, link);
 319	rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
 320
 321	/* dma_addr_cache maps dma addr to struct gvt_dma. */
 322	parent = NULL;
 323	link = &vdev->dma_addr_cache.rb_node;
 324	while (*link) {
 325		parent = *link;
 326		itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
 327
 328		if (dma_addr < itr->dma_addr)
 329			link = &parent->rb_left;
 330		else
 331			link = &parent->rb_right;
 332	}
 333	rb_link_node(&new->dma_addr_node, parent, link);
 334	rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
 335
 336	vdev->nr_cache_entries++;
 337	return 0;
 338}
 339
 340static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
 341				struct gvt_dma *entry)
 342{
 343	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 344
 345	rb_erase(&entry->gfn_node, &vdev->gfn_cache);
 346	rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
 347	kfree(entry);
 348	vdev->nr_cache_entries--;
 349}
 350
 351static void gvt_cache_destroy(struct intel_vgpu *vgpu)
 352{
 353	struct gvt_dma *dma;
 354	struct rb_node *node = NULL;
 355	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 356
 357	for (;;) {
 358		mutex_lock(&vdev->cache_lock);
 359		node = rb_first(&vdev->gfn_cache);
 360		if (!node) {
 361			mutex_unlock(&vdev->cache_lock);
 362			break;
 363		}
 364		dma = rb_entry(node, struct gvt_dma, gfn_node);
 365		gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
 366		__gvt_cache_remove_entry(vgpu, dma);
 367		mutex_unlock(&vdev->cache_lock);
 368	}
 369}
 370
 371static void gvt_cache_init(struct intel_vgpu *vgpu)
 372{
 373	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 374
 375	vdev->gfn_cache = RB_ROOT;
 376	vdev->dma_addr_cache = RB_ROOT;
 377	vdev->nr_cache_entries = 0;
 378	mutex_init(&vdev->cache_lock);
 379}
 380
 381static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
 382{
 383	hash_init(info->ptable);
 384}
 385
 386static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
 387{
 388	struct kvmgt_pgfn *p;
 389	struct hlist_node *tmp;
 390	int i;
 391
 392	hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
 393		hash_del(&p->hnode);
 394		kfree(p);
 395	}
 396}
 397
 398static struct kvmgt_pgfn *
 399__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
 400{
 401	struct kvmgt_pgfn *p, *res = NULL;
 402
 403	hash_for_each_possible(info->ptable, p, hnode, gfn) {
 404		if (gfn == p->gfn) {
 405			res = p;
 406			break;
 407		}
 408	}
 409
 410	return res;
 411}
 412
 413static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
 414				gfn_t gfn)
 415{
 416	struct kvmgt_pgfn *p;
 417
 418	p = __kvmgt_protect_table_find(info, gfn);
 419	return !!p;
 420}
 421
 422static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
 423{
 424	struct kvmgt_pgfn *p;
 425
 426	if (kvmgt_gfn_is_write_protected(info, gfn))
 427		return;
 428
 429	p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
 430	if (WARN(!p, "gfn: 0x%llx\n", gfn))
 431		return;
 432
 433	p->gfn = gfn;
 434	hash_add(info->ptable, &p->hnode, gfn);
 435}
 436
 437static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
 438				gfn_t gfn)
 439{
 440	struct kvmgt_pgfn *p;
 441
 442	p = __kvmgt_protect_table_find(info, gfn);
 443	if (p) {
 444		hash_del(&p->hnode);
 445		kfree(p);
 446	}
 447}
 448
 449static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
 450		size_t count, loff_t *ppos, bool iswrite)
 451{
 452	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 453	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
 454			VFIO_PCI_NUM_REGIONS;
 455	void *base = vdev->region[i].data;
 456	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
 457
 458
 459	if (pos >= vdev->region[i].size || iswrite) {
 460		gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
 461		return -EINVAL;
 462	}
 463	count = min(count, (size_t)(vdev->region[i].size - pos));
 464	memcpy(buf, base + pos, count);
 465
 466	return count;
 467}
 468
 469static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
 470		struct vfio_region *region)
 471{
 472}
 473
 474static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
 475	.rw = intel_vgpu_reg_rw_opregion,
 476	.release = intel_vgpu_reg_release_opregion,
 477};
 478
 479static int handle_edid_regs(struct intel_vgpu *vgpu,
 480			struct vfio_edid_region *region, char *buf,
 481			size_t count, u16 offset, bool is_write)
 482{
 483	struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
 484	unsigned int data;
 485
 486	if (offset + count > sizeof(*regs))
 487		return -EINVAL;
 488
 489	if (count != 4)
 490		return -EINVAL;
 491
 492	if (is_write) {
 493		data = *((unsigned int *)buf);
 494		switch (offset) {
 495		case offsetof(struct vfio_region_gfx_edid, link_state):
 496			if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
 497				if (!drm_edid_block_valid(
 498					(u8 *)region->edid_blob,
 499					0,
 500					true,
 501					NULL)) {
 502					gvt_vgpu_err("invalid EDID blob\n");
 503					return -EINVAL;
 504				}
 505				intel_gvt_ops->emulate_hotplug(vgpu, true);
 506			} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
 507				intel_gvt_ops->emulate_hotplug(vgpu, false);
 508			else {
 509				gvt_vgpu_err("invalid EDID link state %d\n",
 510					regs->link_state);
 511				return -EINVAL;
 512			}
 513			regs->link_state = data;
 514			break;
 515		case offsetof(struct vfio_region_gfx_edid, edid_size):
 516			if (data > regs->edid_max_size) {
 517				gvt_vgpu_err("EDID size is bigger than %d!\n",
 518					regs->edid_max_size);
 519				return -EINVAL;
 520			}
 521			regs->edid_size = data;
 522			break;
 523		default:
 524			/* read-only regs */
 525			gvt_vgpu_err("write read-only EDID region at offset %d\n",
 526				offset);
 527			return -EPERM;
 528		}
 529	} else {
 530		memcpy(buf, (char *)regs + offset, count);
 531	}
 532
 533	return count;
 534}
 535
 536static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
 537			size_t count, u16 offset, bool is_write)
 538{
 539	if (offset + count > region->vfio_edid_regs.edid_size)
 540		return -EINVAL;
 541
 542	if (is_write)
 543		memcpy(region->edid_blob + offset, buf, count);
 544	else
 545		memcpy(buf, region->edid_blob + offset, count);
 546
 547	return count;
 548}
 549
 550static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
 551		size_t count, loff_t *ppos, bool iswrite)
 552{
 553	int ret;
 554	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
 555			VFIO_PCI_NUM_REGIONS;
 556	struct vfio_edid_region *region =
 557		(struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
 558	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
 559
 560	if (pos < region->vfio_edid_regs.edid_offset) {
 561		ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
 562	} else {
 563		pos -= EDID_BLOB_OFFSET;
 564		ret = handle_edid_blob(region, buf, count, pos, iswrite);
 565	}
 566
 567	if (ret < 0)
 568		gvt_vgpu_err("failed to access EDID region\n");
 569
 570	return ret;
 571}
 572
 573static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
 574					struct vfio_region *region)
 575{
 576	kfree(region->data);
 577}
 578
 579static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
 580	.rw = intel_vgpu_reg_rw_edid,
 581	.release = intel_vgpu_reg_release_edid,
 582};
 583
 584static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
 585		unsigned int type, unsigned int subtype,
 586		const struct intel_vgpu_regops *ops,
 587		size_t size, u32 flags, void *data)
 588{
 589	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 590	struct vfio_region *region;
 591
 592	region = krealloc(vdev->region,
 593			(vdev->num_regions + 1) * sizeof(*region),
 594			GFP_KERNEL);
 595	if (!region)
 596		return -ENOMEM;
 597
 598	vdev->region = region;
 599	vdev->region[vdev->num_regions].type = type;
 600	vdev->region[vdev->num_regions].subtype = subtype;
 601	vdev->region[vdev->num_regions].ops = ops;
 602	vdev->region[vdev->num_regions].size = size;
 603	vdev->region[vdev->num_regions].flags = flags;
 604	vdev->region[vdev->num_regions].data = data;
 605	vdev->num_regions++;
 606	return 0;
 607}
 608
 609static int kvmgt_get_vfio_device(void *p_vgpu)
 610{
 611	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
 612	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 613
 614	vdev->vfio_device = vfio_device_get_from_dev(
 615		mdev_dev(vdev->mdev));
 616	if (!vdev->vfio_device) {
 617		gvt_vgpu_err("failed to get vfio device\n");
 618		return -ENODEV;
 619	}
 620	return 0;
 621}
 622
 623
 624static int kvmgt_set_opregion(void *p_vgpu)
 625{
 626	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
 627	void *base;
 628	int ret;
 629
 630	/* Each vgpu has its own opregion, although VFIO would create another
 631	 * one later. This one is used to expose opregion to VFIO. And the
 632	 * other one created by VFIO later, is used by guest actually.
 633	 */
 634	base = vgpu_opregion(vgpu)->va;
 635	if (!base)
 636		return -ENOMEM;
 637
 638	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
 639		memunmap(base);
 640		return -EINVAL;
 641	}
 642
 643	ret = intel_vgpu_register_reg(vgpu,
 644			PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
 645			VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
 646			&intel_vgpu_regops_opregion, OPREGION_SIZE,
 647			VFIO_REGION_INFO_FLAG_READ, base);
 648
 649	return ret;
 650}
 651
 652static int kvmgt_set_edid(void *p_vgpu, int port_num)
 653{
 654	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
 655	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 656	struct vfio_edid_region *base;
 657	int ret;
 658
 659	base = kzalloc(sizeof(*base), GFP_KERNEL);
 660	if (!base)
 661		return -ENOMEM;
 662
 663	/* TODO: Add multi-port and EDID extension block support */
 664	base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
 665	base->vfio_edid_regs.edid_max_size = EDID_SIZE;
 666	base->vfio_edid_regs.edid_size = EDID_SIZE;
 667	base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
 668	base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
 669	base->edid_blob = port->edid->edid_block;
 670
 671	ret = intel_vgpu_register_reg(vgpu,
 672			VFIO_REGION_TYPE_GFX,
 673			VFIO_REGION_SUBTYPE_GFX_EDID,
 674			&intel_vgpu_regops_edid, EDID_SIZE,
 675			VFIO_REGION_INFO_FLAG_READ |
 676			VFIO_REGION_INFO_FLAG_WRITE |
 677			VFIO_REGION_INFO_FLAG_CAPS, base);
 678
 679	return ret;
 680}
 681
 682static void kvmgt_put_vfio_device(void *vgpu)
 683{
 684	struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
 685
 686	if (WARN_ON(!vdev->vfio_device))
 687		return;
 688
 689	vfio_device_put(vdev->vfio_device);
 690}
 691
 692static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 693{
 694	struct intel_vgpu *vgpu = NULL;
 695	struct intel_vgpu_type *type;
 696	struct device *pdev;
 697	void *gvt;
 698	int ret;
 699
 700	pdev = mdev_parent_dev(mdev);
 701	gvt = kdev_to_i915(pdev)->gvt;
 702
 703	type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
 704	if (!type) {
 705		gvt_vgpu_err("failed to find type %s to create\n",
 706						kobject_name(kobj));
 707		ret = -EINVAL;
 708		goto out;
 709	}
 710
 711	vgpu = intel_gvt_ops->vgpu_create(gvt, type);
 712	if (IS_ERR_OR_NULL(vgpu)) {
 713		ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
 714		gvt_err("failed to create intel vgpu: %d\n", ret);
 715		goto out;
 716	}
 717
 718	INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
 719
 720	kvmgt_vdev(vgpu)->mdev = mdev;
 721	mdev_set_drvdata(mdev, vgpu);
 722
 723	gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
 724		     dev_name(mdev_dev(mdev)));
 725	ret = 0;
 726
 727out:
 728	return ret;
 729}
 730
 731static int intel_vgpu_remove(struct mdev_device *mdev)
 732{
 733	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
 734
 735	if (handle_valid(vgpu->handle))
 736		return -EBUSY;
 737
 738	intel_gvt_ops->vgpu_destroy(vgpu);
 739	return 0;
 740}
 741
 742static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
 743				     unsigned long action, void *data)
 744{
 745	struct kvmgt_vdev *vdev = container_of(nb,
 746					       struct kvmgt_vdev,
 747					       iommu_notifier);
 748	struct intel_vgpu *vgpu = vdev->vgpu;
 749
 750	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
 751		struct vfio_iommu_type1_dma_unmap *unmap = data;
 752		struct gvt_dma *entry;
 753		unsigned long iov_pfn, end_iov_pfn;
 754
 755		iov_pfn = unmap->iova >> PAGE_SHIFT;
 756		end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
 757
 758		mutex_lock(&vdev->cache_lock);
 759		for (; iov_pfn < end_iov_pfn; iov_pfn++) {
 760			entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
 761			if (!entry)
 762				continue;
 763
 764			gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
 765					   entry->size);
 766			__gvt_cache_remove_entry(vgpu, entry);
 767		}
 768		mutex_unlock(&vdev->cache_lock);
 769	}
 770
 771	return NOTIFY_OK;
 772}
 773
 774static int intel_vgpu_group_notifier(struct notifier_block *nb,
 775				     unsigned long action, void *data)
 776{
 777	struct kvmgt_vdev *vdev = container_of(nb,
 778					       struct kvmgt_vdev,
 779					       group_notifier);
 780
 781	/* the only action we care about */
 782	if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
 783		vdev->kvm = data;
 784
 785		if (!data)
 786			schedule_work(&vdev->release_work);
 787	}
 788
 789	return NOTIFY_OK;
 790}
 791
 792static int intel_vgpu_open(struct mdev_device *mdev)
 793{
 794	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
 795	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 796	unsigned long events;
 797	int ret;
 798	struct vfio_group *vfio_group;
 799
 800	vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
 801	vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
 802
 803	events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
 804	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
 805				&vdev->iommu_notifier);
 806	if (ret != 0) {
 807		gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
 808			ret);
 809		goto out;
 810	}
 811
 812	events = VFIO_GROUP_NOTIFY_SET_KVM;
 813	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
 814				&vdev->group_notifier);
 815	if (ret != 0) {
 816		gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
 817			ret);
 818		goto undo_iommu;
 819	}
 820
 821	vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
 822	if (IS_ERR_OR_NULL(vfio_group)) {
 823		ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
 824		gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
 825		goto undo_register;
 826	}
 827	vdev->vfio_group = vfio_group;
 828
 829	/* Take a module reference as mdev core doesn't take
 830	 * a reference for vendor driver.
 831	 */
 832	if (!try_module_get(THIS_MODULE))
 833		goto undo_group;
 834
 835	ret = kvmgt_guest_init(mdev);
 836	if (ret)
 837		goto undo_group;
 838
 839	intel_gvt_ops->vgpu_activate(vgpu);
 840
 841	atomic_set(&vdev->released, 0);
 842	return ret;
 843
 844undo_group:
 845	vfio_group_put_external_user(vdev->vfio_group);
 846	vdev->vfio_group = NULL;
 847
 848undo_register:
 849	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
 850					&vdev->group_notifier);
 851
 852undo_iommu:
 853	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 854					&vdev->iommu_notifier);
 855out:
 856	return ret;
 857}
 858
 859static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
 860{
 861	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 862	struct eventfd_ctx *trigger;
 863
 864	trigger = vdev->msi_trigger;
 865	if (trigger) {
 866		eventfd_ctx_put(trigger);
 867		vdev->msi_trigger = NULL;
 868	}
 869}
 870
 871static void __intel_vgpu_release(struct intel_vgpu *vgpu)
 872{
 873	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 874	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 875	struct kvmgt_guest_info *info;
 876	int ret;
 877
 878	if (!handle_valid(vgpu->handle))
 879		return;
 880
 881	if (atomic_cmpxchg(&vdev->released, 0, 1))
 882		return;
 883
 884	intel_gvt_ops->vgpu_release(vgpu);
 885
 886	ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
 887					&vdev->iommu_notifier);
 888	drm_WARN(&i915->drm, ret,
 889		 "vfio_unregister_notifier for iommu failed: %d\n", ret);
 890
 891	ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
 892					&vdev->group_notifier);
 893	drm_WARN(&i915->drm, ret,
 894		 "vfio_unregister_notifier for group failed: %d\n", ret);
 895
 896	/* dereference module reference taken at open */
 897	module_put(THIS_MODULE);
 898
 899	info = (struct kvmgt_guest_info *)vgpu->handle;
 900	kvmgt_guest_exit(info);
 901
 902	intel_vgpu_release_msi_eventfd_ctx(vgpu);
 903	vfio_group_put_external_user(vdev->vfio_group);
 904
 905	vdev->kvm = NULL;
 906	vgpu->handle = 0;
 907}
 908
 909static void intel_vgpu_release(struct mdev_device *mdev)
 910{
 911	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
 912
 913	__intel_vgpu_release(vgpu);
 914}
 915
 916static void intel_vgpu_release_work(struct work_struct *work)
 917{
 918	struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
 919					       release_work);
 920
 921	__intel_vgpu_release(vdev->vgpu);
 922}
 923
 924static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
 925{
 926	u32 start_lo, start_hi;
 927	u32 mem_type;
 928
 929	start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
 930			PCI_BASE_ADDRESS_MEM_MASK;
 931	mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
 932			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
 933
 934	switch (mem_type) {
 935	case PCI_BASE_ADDRESS_MEM_TYPE_64:
 936		start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
 937						+ bar + 4));
 938		break;
 939	case PCI_BASE_ADDRESS_MEM_TYPE_32:
 940	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
 941		/* 1M mem BAR treated as 32-bit BAR */
 942	default:
 943		/* mem unknown type treated as 32-bit BAR */
 944		start_hi = 0;
 945		break;
 946	}
 947
 948	return ((u64)start_hi << 32) | start_lo;
 949}
 950
 951static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
 952			     void *buf, unsigned int count, bool is_write)
 953{
 954	u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
 955	int ret;
 956
 957	if (is_write)
 958		ret = intel_gvt_ops->emulate_mmio_write(vgpu,
 959					bar_start + off, buf, count);
 960	else
 961		ret = intel_gvt_ops->emulate_mmio_read(vgpu,
 962					bar_start + off, buf, count);
 963	return ret;
 964}
 965
 966static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
 967{
 968	return off >= vgpu_aperture_offset(vgpu) &&
 969	       off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
 970}
 971
 972static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
 973		void *buf, unsigned long count, bool is_write)
 974{
 975	void __iomem *aperture_va;
 976
 977	if (!intel_vgpu_in_aperture(vgpu, off) ||
 978	    !intel_vgpu_in_aperture(vgpu, off + count)) {
 979		gvt_vgpu_err("Invalid aperture offset %llu\n", off);
 980		return -EINVAL;
 981	}
 982
 983	aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
 984					ALIGN_DOWN(off, PAGE_SIZE),
 985					count + offset_in_page(off));
 986	if (!aperture_va)
 987		return -EIO;
 988
 989	if (is_write)
 990		memcpy_toio(aperture_va + offset_in_page(off), buf, count);
 991	else
 992		memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
 993
 994	io_mapping_unmap(aperture_va);
 995
 996	return 0;
 997}
 998
 999static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
1000			size_t count, loff_t *ppos, bool is_write)
1001{
1002	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1003	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1004	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1005	u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
1006	int ret = -EINVAL;
1007
1008
1009	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
1010		gvt_vgpu_err("invalid index: %u\n", index);
1011		return -EINVAL;
1012	}
1013
1014	switch (index) {
1015	case VFIO_PCI_CONFIG_REGION_INDEX:
1016		if (is_write)
1017			ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
1018						buf, count);
1019		else
1020			ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
1021						buf, count);
1022		break;
1023	case VFIO_PCI_BAR0_REGION_INDEX:
1024		ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
1025					buf, count, is_write);
1026		break;
1027	case VFIO_PCI_BAR2_REGION_INDEX:
1028		ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
1029		break;
1030	case VFIO_PCI_BAR1_REGION_INDEX:
1031	case VFIO_PCI_BAR3_REGION_INDEX:
1032	case VFIO_PCI_BAR4_REGION_INDEX:
1033	case VFIO_PCI_BAR5_REGION_INDEX:
1034	case VFIO_PCI_VGA_REGION_INDEX:
1035	case VFIO_PCI_ROM_REGION_INDEX:
1036		break;
1037	default:
1038		if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1039			return -EINVAL;
1040
1041		index -= VFIO_PCI_NUM_REGIONS;
1042		return vdev->region[index].ops->rw(vgpu, buf, count,
1043				ppos, is_write);
1044	}
1045
1046	return ret == 0 ? count : ret;
1047}
1048
1049static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
1050{
1051	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1052	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1053	struct intel_gvt *gvt = vgpu->gvt;
1054	int offset;
1055
1056	/* Only allow MMIO GGTT entry access */
1057	if (index != PCI_BASE_ADDRESS_0)
1058		return false;
1059
1060	offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
1061		intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
1062
1063	return (offset >= gvt->device_info.gtt_start_offset &&
1064		offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
1065			true : false;
1066}
1067
1068static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
1069			size_t count, loff_t *ppos)
1070{
1071	unsigned int done = 0;
1072	int ret;
1073
1074	while (count) {
1075		size_t filled;
1076
1077		/* Only support GGTT entry 8 bytes read */
1078		if (count >= 8 && !(*ppos % 8) &&
1079			gtt_entry(mdev, ppos)) {
1080			u64 val;
1081
1082			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1083					ppos, false);
1084			if (ret <= 0)
1085				goto read_err;
1086
1087			if (copy_to_user(buf, &val, sizeof(val)))
1088				goto read_err;
1089
1090			filled = 8;
1091		} else if (count >= 4 && !(*ppos % 4)) {
1092			u32 val;
1093
1094			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1095					ppos, false);
1096			if (ret <= 0)
1097				goto read_err;
1098
1099			if (copy_to_user(buf, &val, sizeof(val)))
1100				goto read_err;
1101
1102			filled = 4;
1103		} else if (count >= 2 && !(*ppos % 2)) {
1104			u16 val;
1105
1106			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1107					ppos, false);
1108			if (ret <= 0)
1109				goto read_err;
1110
1111			if (copy_to_user(buf, &val, sizeof(val)))
1112				goto read_err;
1113
1114			filled = 2;
1115		} else {
1116			u8 val;
1117
1118			ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
1119					false);
1120			if (ret <= 0)
1121				goto read_err;
1122
1123			if (copy_to_user(buf, &val, sizeof(val)))
1124				goto read_err;
1125
1126			filled = 1;
1127		}
1128
1129		count -= filled;
1130		done += filled;
1131		*ppos += filled;
1132		buf += filled;
1133	}
1134
1135	return done;
1136
1137read_err:
1138	return -EFAULT;
1139}
1140
1141static ssize_t intel_vgpu_write(struct mdev_device *mdev,
1142				const char __user *buf,
1143				size_t count, loff_t *ppos)
1144{
1145	unsigned int done = 0;
1146	int ret;
1147
1148	while (count) {
1149		size_t filled;
1150
1151		/* Only support GGTT entry 8 bytes write */
1152		if (count >= 8 && !(*ppos % 8) &&
1153			gtt_entry(mdev, ppos)) {
1154			u64 val;
1155
1156			if (copy_from_user(&val, buf, sizeof(val)))
1157				goto write_err;
1158
1159			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1160					ppos, true);
1161			if (ret <= 0)
1162				goto write_err;
1163
1164			filled = 8;
1165		} else if (count >= 4 && !(*ppos % 4)) {
1166			u32 val;
1167
1168			if (copy_from_user(&val, buf, sizeof(val)))
1169				goto write_err;
1170
1171			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1172					ppos, true);
1173			if (ret <= 0)
1174				goto write_err;
1175
1176			filled = 4;
1177		} else if (count >= 2 && !(*ppos % 2)) {
1178			u16 val;
1179
1180			if (copy_from_user(&val, buf, sizeof(val)))
1181				goto write_err;
1182
1183			ret = intel_vgpu_rw(mdev, (char *)&val,
1184					sizeof(val), ppos, true);
1185			if (ret <= 0)
1186				goto write_err;
1187
1188			filled = 2;
1189		} else {
1190			u8 val;
1191
1192			if (copy_from_user(&val, buf, sizeof(val)))
1193				goto write_err;
1194
1195			ret = intel_vgpu_rw(mdev, &val, sizeof(val),
1196					ppos, true);
1197			if (ret <= 0)
1198				goto write_err;
1199
1200			filled = 1;
1201		}
1202
1203		count -= filled;
1204		done += filled;
1205		*ppos += filled;
1206		buf += filled;
1207	}
1208
1209	return done;
1210write_err:
1211	return -EFAULT;
1212}
1213
1214static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1215{
1216	unsigned int index;
1217	u64 virtaddr;
1218	unsigned long req_size, pgoff, req_start;
1219	pgprot_t pg_prot;
1220	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1221
1222	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1223	if (index >= VFIO_PCI_ROM_REGION_INDEX)
1224		return -EINVAL;
1225
1226	if (vma->vm_end < vma->vm_start)
1227		return -EINVAL;
1228	if ((vma->vm_flags & VM_SHARED) == 0)
1229		return -EINVAL;
1230	if (index != VFIO_PCI_BAR2_REGION_INDEX)
1231		return -EINVAL;
1232
1233	pg_prot = vma->vm_page_prot;
1234	virtaddr = vma->vm_start;
1235	req_size = vma->vm_end - vma->vm_start;
1236	pgoff = vma->vm_pgoff &
1237		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1238	req_start = pgoff << PAGE_SHIFT;
1239
1240	if (!intel_vgpu_in_aperture(vgpu, req_start))
1241		return -EINVAL;
1242	if (req_start + req_size >
1243	    vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1244		return -EINVAL;
1245
1246	pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1247
1248	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1249}
1250
1251static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1252{
1253	if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1254		return 1;
1255
1256	return 0;
1257}
1258
1259static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1260			unsigned int index, unsigned int start,
1261			unsigned int count, u32 flags,
1262			void *data)
1263{
1264	return 0;
1265}
1266
1267static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1268			unsigned int index, unsigned int start,
1269			unsigned int count, u32 flags, void *data)
1270{
1271	return 0;
1272}
1273
1274static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1275		unsigned int index, unsigned int start, unsigned int count,
1276		u32 flags, void *data)
1277{
1278	return 0;
1279}
1280
1281static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1282		unsigned int index, unsigned int start, unsigned int count,
1283		u32 flags, void *data)
1284{
1285	struct eventfd_ctx *trigger;
1286
1287	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1288		int fd = *(int *)data;
1289
1290		trigger = eventfd_ctx_fdget(fd);
1291		if (IS_ERR(trigger)) {
1292			gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1293			return PTR_ERR(trigger);
1294		}
1295		kvmgt_vdev(vgpu)->msi_trigger = trigger;
1296	} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1297		intel_vgpu_release_msi_eventfd_ctx(vgpu);
1298
1299	return 0;
1300}
1301
1302static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1303		unsigned int index, unsigned int start, unsigned int count,
1304		void *data)
1305{
1306	int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1307			unsigned int start, unsigned int count, u32 flags,
1308			void *data) = NULL;
1309
1310	switch (index) {
1311	case VFIO_PCI_INTX_IRQ_INDEX:
1312		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1313		case VFIO_IRQ_SET_ACTION_MASK:
1314			func = intel_vgpu_set_intx_mask;
1315			break;
1316		case VFIO_IRQ_SET_ACTION_UNMASK:
1317			func = intel_vgpu_set_intx_unmask;
1318			break;
1319		case VFIO_IRQ_SET_ACTION_TRIGGER:
1320			func = intel_vgpu_set_intx_trigger;
1321			break;
1322		}
1323		break;
1324	case VFIO_PCI_MSI_IRQ_INDEX:
1325		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1326		case VFIO_IRQ_SET_ACTION_MASK:
1327		case VFIO_IRQ_SET_ACTION_UNMASK:
1328			/* XXX Need masking support exported */
1329			break;
1330		case VFIO_IRQ_SET_ACTION_TRIGGER:
1331			func = intel_vgpu_set_msi_trigger;
1332			break;
1333		}
1334		break;
1335	}
1336
1337	if (!func)
1338		return -ENOTTY;
1339
1340	return func(vgpu, index, start, count, flags, data);
1341}
1342
1343static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1344			     unsigned long arg)
1345{
1346	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1347	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1348	unsigned long minsz;
1349
1350	gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1351
1352	if (cmd == VFIO_DEVICE_GET_INFO) {
1353		struct vfio_device_info info;
1354
1355		minsz = offsetofend(struct vfio_device_info, num_irqs);
1356
1357		if (copy_from_user(&info, (void __user *)arg, minsz))
1358			return -EFAULT;
1359
1360		if (info.argsz < minsz)
1361			return -EINVAL;
1362
1363		info.flags = VFIO_DEVICE_FLAGS_PCI;
1364		info.flags |= VFIO_DEVICE_FLAGS_RESET;
1365		info.num_regions = VFIO_PCI_NUM_REGIONS +
1366				vdev->num_regions;
1367		info.num_irqs = VFIO_PCI_NUM_IRQS;
1368
1369		return copy_to_user((void __user *)arg, &info, minsz) ?
1370			-EFAULT : 0;
1371
1372	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1373		struct vfio_region_info info;
1374		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1375		unsigned int i;
1376		int ret;
1377		struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1378		int nr_areas = 1;
1379		int cap_type_id;
1380
1381		minsz = offsetofend(struct vfio_region_info, offset);
1382
1383		if (copy_from_user(&info, (void __user *)arg, minsz))
1384			return -EFAULT;
1385
1386		if (info.argsz < minsz)
1387			return -EINVAL;
1388
1389		switch (info.index) {
1390		case VFIO_PCI_CONFIG_REGION_INDEX:
1391			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1392			info.size = vgpu->gvt->device_info.cfg_space_size;
1393			info.flags = VFIO_REGION_INFO_FLAG_READ |
1394				     VFIO_REGION_INFO_FLAG_WRITE;
1395			break;
1396		case VFIO_PCI_BAR0_REGION_INDEX:
1397			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1398			info.size = vgpu->cfg_space.bar[info.index].size;
1399			if (!info.size) {
1400				info.flags = 0;
1401				break;
1402			}
1403
1404			info.flags = VFIO_REGION_INFO_FLAG_READ |
1405				     VFIO_REGION_INFO_FLAG_WRITE;
1406			break;
1407		case VFIO_PCI_BAR1_REGION_INDEX:
1408			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1409			info.size = 0;
1410			info.flags = 0;
1411			break;
1412		case VFIO_PCI_BAR2_REGION_INDEX:
1413			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1414			info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1415					VFIO_REGION_INFO_FLAG_MMAP |
1416					VFIO_REGION_INFO_FLAG_READ |
1417					VFIO_REGION_INFO_FLAG_WRITE;
1418			info.size = gvt_aperture_sz(vgpu->gvt);
1419
1420			sparse = kzalloc(struct_size(sparse, areas, nr_areas),
1421					 GFP_KERNEL);
1422			if (!sparse)
1423				return -ENOMEM;
1424
1425			sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1426			sparse->header.version = 1;
1427			sparse->nr_areas = nr_areas;
1428			cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1429			sparse->areas[0].offset =
1430					PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1431			sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1432			break;
1433
1434		case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1435			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1436			info.size = 0;
1437			info.flags = 0;
1438
1439			gvt_dbg_core("get region info bar:%d\n", info.index);
1440			break;
1441
1442		case VFIO_PCI_ROM_REGION_INDEX:
1443		case VFIO_PCI_VGA_REGION_INDEX:
1444			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1445			info.size = 0;
1446			info.flags = 0;
1447
1448			gvt_dbg_core("get region info index:%d\n", info.index);
1449			break;
1450		default:
1451			{
1452				struct vfio_region_info_cap_type cap_type = {
1453					.header.id = VFIO_REGION_INFO_CAP_TYPE,
1454					.header.version = 1 };
1455
1456				if (info.index >= VFIO_PCI_NUM_REGIONS +
1457						vdev->num_regions)
1458					return -EINVAL;
1459				info.index =
1460					array_index_nospec(info.index,
1461							VFIO_PCI_NUM_REGIONS +
1462							vdev->num_regions);
1463
1464				i = info.index - VFIO_PCI_NUM_REGIONS;
1465
1466				info.offset =
1467					VFIO_PCI_INDEX_TO_OFFSET(info.index);
1468				info.size = vdev->region[i].size;
1469				info.flags = vdev->region[i].flags;
1470
1471				cap_type.type = vdev->region[i].type;
1472				cap_type.subtype = vdev->region[i].subtype;
1473
1474				ret = vfio_info_add_capability(&caps,
1475							&cap_type.header,
1476							sizeof(cap_type));
1477				if (ret)
1478					return ret;
1479			}
1480		}
1481
1482		if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1483			switch (cap_type_id) {
1484			case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1485				ret = vfio_info_add_capability(&caps,
1486					&sparse->header,
1487					struct_size(sparse, areas,
1488						    sparse->nr_areas));
1489				if (ret) {
1490					kfree(sparse);
1491					return ret;
1492				}
1493				break;
1494			default:
1495				kfree(sparse);
1496				return -EINVAL;
1497			}
1498		}
1499
1500		if (caps.size) {
1501			info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1502			if (info.argsz < sizeof(info) + caps.size) {
1503				info.argsz = sizeof(info) + caps.size;
1504				info.cap_offset = 0;
1505			} else {
1506				vfio_info_cap_shift(&caps, sizeof(info));
1507				if (copy_to_user((void __user *)arg +
1508						  sizeof(info), caps.buf,
1509						  caps.size)) {
1510					kfree(caps.buf);
1511					kfree(sparse);
1512					return -EFAULT;
1513				}
1514				info.cap_offset = sizeof(info);
1515			}
1516
1517			kfree(caps.buf);
1518		}
1519
1520		kfree(sparse);
1521		return copy_to_user((void __user *)arg, &info, minsz) ?
1522			-EFAULT : 0;
1523	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1524		struct vfio_irq_info info;
1525
1526		minsz = offsetofend(struct vfio_irq_info, count);
1527
1528		if (copy_from_user(&info, (void __user *)arg, minsz))
1529			return -EFAULT;
1530
1531		if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1532			return -EINVAL;
1533
1534		switch (info.index) {
1535		case VFIO_PCI_INTX_IRQ_INDEX:
1536		case VFIO_PCI_MSI_IRQ_INDEX:
1537			break;
1538		default:
1539			return -EINVAL;
1540		}
1541
1542		info.flags = VFIO_IRQ_INFO_EVENTFD;
1543
1544		info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1545
1546		if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1547			info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1548				       VFIO_IRQ_INFO_AUTOMASKED);
1549		else
1550			info.flags |= VFIO_IRQ_INFO_NORESIZE;
1551
1552		return copy_to_user((void __user *)arg, &info, minsz) ?
1553			-EFAULT : 0;
1554	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
1555		struct vfio_irq_set hdr;
1556		u8 *data = NULL;
1557		int ret = 0;
1558		size_t data_size = 0;
1559
1560		minsz = offsetofend(struct vfio_irq_set, count);
1561
1562		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1563			return -EFAULT;
1564
1565		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1566			int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1567
1568			ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1569						VFIO_PCI_NUM_IRQS, &data_size);
1570			if (ret) {
1571				gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1572				return -EINVAL;
1573			}
1574			if (data_size) {
1575				data = memdup_user((void __user *)(arg + minsz),
1576						   data_size);
1577				if (IS_ERR(data))
1578					return PTR_ERR(data);
1579			}
1580		}
1581
1582		ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1583					hdr.start, hdr.count, data);
1584		kfree(data);
1585
1586		return ret;
1587	} else if (cmd == VFIO_DEVICE_RESET) {
1588		intel_gvt_ops->vgpu_reset(vgpu);
1589		return 0;
1590	} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1591		struct vfio_device_gfx_plane_info dmabuf;
1592		int ret = 0;
1593
1594		minsz = offsetofend(struct vfio_device_gfx_plane_info,
1595				    dmabuf_id);
1596		if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1597			return -EFAULT;
1598		if (dmabuf.argsz < minsz)
1599			return -EINVAL;
1600
1601		ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1602		if (ret != 0)
1603			return ret;
1604
1605		return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1606								-EFAULT : 0;
1607	} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1608		__u32 dmabuf_id;
1609		__s32 dmabuf_fd;
1610
1611		if (get_user(dmabuf_id, (__u32 __user *)arg))
1612			return -EFAULT;
1613
1614		dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1615		return dmabuf_fd;
1616
1617	}
1618
1619	return -ENOTTY;
1620}
1621
1622static ssize_t
1623vgpu_id_show(struct device *dev, struct device_attribute *attr,
1624	     char *buf)
1625{
1626	struct mdev_device *mdev = mdev_from_dev(dev);
1627
1628	if (mdev) {
1629		struct intel_vgpu *vgpu = (struct intel_vgpu *)
1630			mdev_get_drvdata(mdev);
1631		return sprintf(buf, "%d\n", vgpu->id);
1632	}
1633	return sprintf(buf, "\n");
1634}
1635
1636static DEVICE_ATTR_RO(vgpu_id);
1637
1638static struct attribute *intel_vgpu_attrs[] = {
1639	&dev_attr_vgpu_id.attr,
1640	NULL
1641};
1642
1643static const struct attribute_group intel_vgpu_group = {
1644	.name = "intel_vgpu",
1645	.attrs = intel_vgpu_attrs,
1646};
1647
1648static const struct attribute_group *intel_vgpu_groups[] = {
1649	&intel_vgpu_group,
1650	NULL,
1651};
1652
1653static struct mdev_parent_ops intel_vgpu_ops = {
1654	.mdev_attr_groups       = intel_vgpu_groups,
1655	.create			= intel_vgpu_create,
1656	.remove			= intel_vgpu_remove,
1657
1658	.open			= intel_vgpu_open,
1659	.release		= intel_vgpu_release,
1660
1661	.read			= intel_vgpu_read,
1662	.write			= intel_vgpu_write,
1663	.mmap			= intel_vgpu_mmap,
1664	.ioctl			= intel_vgpu_ioctl,
1665};
1666
1667static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1668{
1669	struct attribute_group **kvm_vgpu_type_groups;
1670
1671	intel_gvt_ops = ops;
1672	if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
1673		return -EFAULT;
1674	intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1675
1676	return mdev_register_device(dev, &intel_vgpu_ops);
1677}
1678
1679static void kvmgt_host_exit(struct device *dev)
1680{
1681	mdev_unregister_device(dev);
1682}
1683
1684static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1685{
1686	struct kvmgt_guest_info *info;
1687	struct kvm *kvm;
1688	struct kvm_memory_slot *slot;
1689	int idx;
1690
1691	if (!handle_valid(handle))
1692		return -ESRCH;
1693
1694	info = (struct kvmgt_guest_info *)handle;
1695	kvm = info->kvm;
1696
1697	idx = srcu_read_lock(&kvm->srcu);
1698	slot = gfn_to_memslot(kvm, gfn);
1699	if (!slot) {
1700		srcu_read_unlock(&kvm->srcu, idx);
1701		return -EINVAL;
1702	}
1703
1704	spin_lock(&kvm->mmu_lock);
1705
1706	if (kvmgt_gfn_is_write_protected(info, gfn))
1707		goto out;
1708
1709	kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1710	kvmgt_protect_table_add(info, gfn);
1711
1712out:
1713	spin_unlock(&kvm->mmu_lock);
1714	srcu_read_unlock(&kvm->srcu, idx);
1715	return 0;
1716}
1717
1718static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1719{
1720	struct kvmgt_guest_info *info;
1721	struct kvm *kvm;
1722	struct kvm_memory_slot *slot;
1723	int idx;
1724
1725	if (!handle_valid(handle))
1726		return 0;
1727
1728	info = (struct kvmgt_guest_info *)handle;
1729	kvm = info->kvm;
1730
1731	idx = srcu_read_lock(&kvm->srcu);
1732	slot = gfn_to_memslot(kvm, gfn);
1733	if (!slot) {
1734		srcu_read_unlock(&kvm->srcu, idx);
1735		return -EINVAL;
1736	}
1737
1738	spin_lock(&kvm->mmu_lock);
1739
1740	if (!kvmgt_gfn_is_write_protected(info, gfn))
1741		goto out;
1742
1743	kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1744	kvmgt_protect_table_del(info, gfn);
1745
1746out:
1747	spin_unlock(&kvm->mmu_lock);
1748	srcu_read_unlock(&kvm->srcu, idx);
1749	return 0;
1750}
1751
1752static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1753		const u8 *val, int len,
1754		struct kvm_page_track_notifier_node *node)
1755{
1756	struct kvmgt_guest_info *info = container_of(node,
1757					struct kvmgt_guest_info, track_node);
1758
1759	if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1760		intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1761						     (void *)val, len);
1762}
1763
1764static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1765		struct kvm_memory_slot *slot,
1766		struct kvm_page_track_notifier_node *node)
1767{
1768	int i;
1769	gfn_t gfn;
1770	struct kvmgt_guest_info *info = container_of(node,
1771					struct kvmgt_guest_info, track_node);
1772
1773	spin_lock(&kvm->mmu_lock);
1774	for (i = 0; i < slot->npages; i++) {
1775		gfn = slot->base_gfn + i;
1776		if (kvmgt_gfn_is_write_protected(info, gfn)) {
1777			kvm_slot_page_track_remove_page(kvm, slot, gfn,
1778						KVM_PAGE_TRACK_WRITE);
1779			kvmgt_protect_table_del(info, gfn);
1780		}
1781	}
1782	spin_unlock(&kvm->mmu_lock);
1783}
1784
1785static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1786{
1787	struct intel_vgpu *itr;
1788	struct kvmgt_guest_info *info;
1789	int id;
1790	bool ret = false;
1791
1792	mutex_lock(&vgpu->gvt->lock);
1793	for_each_active_vgpu(vgpu->gvt, itr, id) {
1794		if (!handle_valid(itr->handle))
1795			continue;
1796
1797		info = (struct kvmgt_guest_info *)itr->handle;
1798		if (kvm && kvm == info->kvm) {
1799			ret = true;
1800			goto out;
1801		}
1802	}
1803out:
1804	mutex_unlock(&vgpu->gvt->lock);
1805	return ret;
1806}
1807
1808static int kvmgt_guest_init(struct mdev_device *mdev)
1809{
1810	struct kvmgt_guest_info *info;
1811	struct intel_vgpu *vgpu;
1812	struct kvmgt_vdev *vdev;
1813	struct kvm *kvm;
1814
1815	vgpu = mdev_get_drvdata(mdev);
1816	if (handle_valid(vgpu->handle))
1817		return -EEXIST;
1818
1819	vdev = kvmgt_vdev(vgpu);
1820	kvm = vdev->kvm;
1821	if (!kvm || kvm->mm != current->mm) {
1822		gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1823		return -ESRCH;
1824	}
1825
1826	if (__kvmgt_vgpu_exist(vgpu, kvm))
1827		return -EEXIST;
1828
1829	info = vzalloc(sizeof(struct kvmgt_guest_info));
1830	if (!info)
1831		return -ENOMEM;
1832
1833	vgpu->handle = (unsigned long)info;
1834	info->vgpu = vgpu;
1835	info->kvm = kvm;
1836	kvm_get_kvm(info->kvm);
1837
1838	kvmgt_protect_table_init(info);
1839	gvt_cache_init(vgpu);
1840
1841	info->track_node.track_write = kvmgt_page_track_write;
1842	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1843	kvm_page_track_register_notifier(kvm, &info->track_node);
1844
1845	info->debugfs_cache_entries = debugfs_create_ulong(
1846						"kvmgt_nr_cache_entries",
1847						0444, vgpu->debugfs,
1848						&vdev->nr_cache_entries);
1849	return 0;
1850}
1851
1852static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1853{
1854	debugfs_remove(info->debugfs_cache_entries);
1855
1856	kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1857	kvm_put_kvm(info->kvm);
1858	kvmgt_protect_table_destroy(info);
1859	gvt_cache_destroy(info->vgpu);
1860	vfree(info);
1861
1862	return true;
1863}
1864
1865static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
1866{
1867	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1868
1869	vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
1870
1871	if (!vgpu->vdev)
1872		return -ENOMEM;
1873
1874	kvmgt_vdev(vgpu)->vgpu = vgpu;
1875
1876	return 0;
1877}
1878
1879static void kvmgt_detach_vgpu(void *p_vgpu)
1880{
1881	int i;
1882	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1883	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1884
1885	if (!vdev->region)
1886		return;
1887
1888	for (i = 0; i < vdev->num_regions; i++)
1889		if (vdev->region[i].ops->release)
1890			vdev->region[i].ops->release(vgpu,
1891					&vdev->region[i]);
1892	vdev->num_regions = 0;
1893	kfree(vdev->region);
1894	vdev->region = NULL;
1895
1896	kfree(vdev);
1897}
1898
1899static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1900{
1901	struct kvmgt_guest_info *info;
1902	struct intel_vgpu *vgpu;
1903	struct kvmgt_vdev *vdev;
1904
1905	if (!handle_valid(handle))
1906		return -ESRCH;
1907
1908	info = (struct kvmgt_guest_info *)handle;
1909	vgpu = info->vgpu;
1910	vdev = kvmgt_vdev(vgpu);
1911
1912	/*
1913	 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1914	 * config and mmio register isn't restored to default during guest
1915	 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1916	 * may be enabled, then once this vgpu is active, it will get inject
1917	 * vblank interrupt request. But msi_trigger is null until msi is
1918	 * enabled by guest. so if msi_trigger is null, success is still
1919	 * returned and don't inject interrupt into guest.
1920	 */
1921	if (vdev->msi_trigger == NULL)
1922		return 0;
1923
1924	if (eventfd_signal(vdev->msi_trigger, 1) == 1)
1925		return 0;
1926
1927	return -EFAULT;
1928}
1929
1930static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1931{
1932	struct kvmgt_guest_info *info;
1933	kvm_pfn_t pfn;
1934
1935	if (!handle_valid(handle))
1936		return INTEL_GVT_INVALID_ADDR;
1937
1938	info = (struct kvmgt_guest_info *)handle;
1939
1940	pfn = gfn_to_pfn(info->kvm, gfn);
1941	if (is_error_noslot_pfn(pfn))
1942		return INTEL_GVT_INVALID_ADDR;
1943
1944	return pfn;
1945}
1946
1947static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1948		unsigned long size, dma_addr_t *dma_addr)
1949{
1950	struct intel_vgpu *vgpu;
1951	struct kvmgt_vdev *vdev;
1952	struct gvt_dma *entry;
1953	int ret;
1954
1955	if (!handle_valid(handle))
1956		return -EINVAL;
1957
1958	vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
1959	vdev = kvmgt_vdev(vgpu);
1960
1961	mutex_lock(&vdev->cache_lock);
1962
1963	entry = __gvt_cache_find_gfn(vgpu, gfn);
1964	if (!entry) {
1965		ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1966		if (ret)
1967			goto err_unlock;
1968
1969		ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1970		if (ret)
1971			goto err_unmap;
1972	} else if (entry->size != size) {
1973		/* the same gfn with different size: unmap and re-map */
1974		gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1975		__gvt_cache_remove_entry(vgpu, entry);
1976
1977		ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1978		if (ret)
1979			goto err_unlock;
1980
1981		ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1982		if (ret)
1983			goto err_unmap;
1984	} else {
1985		kref_get(&entry->ref);
1986		*dma_addr = entry->dma_addr;
1987	}
1988
1989	mutex_unlock(&vdev->cache_lock);
1990	return 0;
1991
1992err_unmap:
1993	gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1994err_unlock:
1995	mutex_unlock(&vdev->cache_lock);
1996	return ret;
1997}
1998
1999static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
2000{
2001	struct kvmgt_guest_info *info;
2002	struct kvmgt_vdev *vdev;
2003	struct gvt_dma *entry;
2004	int ret = 0;
2005
2006	if (!handle_valid(handle))
2007		return -ENODEV;
2008
2009	info = (struct kvmgt_guest_info *)handle;
2010	vdev = kvmgt_vdev(info->vgpu);
2011
2012	mutex_lock(&vdev->cache_lock);
2013	entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2014	if (entry)
2015		kref_get(&entry->ref);
2016	else
2017		ret = -ENOMEM;
2018	mutex_unlock(&vdev->cache_lock);
2019
2020	return ret;
2021}
2022
2023static void __gvt_dma_release(struct kref *ref)
2024{
2025	struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
2026
2027	gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
2028			   entry->size);
2029	__gvt_cache_remove_entry(entry->vgpu, entry);
2030}
2031
2032static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
2033{
2034	struct intel_vgpu *vgpu;
2035	struct kvmgt_vdev *vdev;
2036	struct gvt_dma *entry;
2037
2038	if (!handle_valid(handle))
2039		return;
2040
2041	vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
2042	vdev = kvmgt_vdev(vgpu);
2043
2044	mutex_lock(&vdev->cache_lock);
2045	entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
2046	if (entry)
2047		kref_put(&entry->ref, __gvt_dma_release);
2048	mutex_unlock(&vdev->cache_lock);
2049}
2050
2051static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
2052			void *buf, unsigned long len, bool write)
2053{
2054	struct kvmgt_guest_info *info;
2055
2056	if (!handle_valid(handle))
2057		return -ESRCH;
2058
2059	info = (struct kvmgt_guest_info *)handle;
2060
2061	return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
2062			   gpa, buf, len, write);
2063}
2064
2065static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
2066			void *buf, unsigned long len)
2067{
2068	return kvmgt_rw_gpa(handle, gpa, buf, len, false);
2069}
2070
2071static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
2072			void *buf, unsigned long len)
2073{
2074	return kvmgt_rw_gpa(handle, gpa, buf, len, true);
2075}
2076
2077static unsigned long kvmgt_virt_to_pfn(void *addr)
2078{
2079	return PFN_DOWN(__pa(addr));
2080}
2081
2082static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
2083{
2084	struct kvmgt_guest_info *info;
2085	struct kvm *kvm;
2086	int idx;
2087	bool ret;
2088
2089	if (!handle_valid(handle))
2090		return false;
2091
2092	info = (struct kvmgt_guest_info *)handle;
2093	kvm = info->kvm;
2094
2095	idx = srcu_read_lock(&kvm->srcu);
2096	ret = kvm_is_visible_gfn(kvm, gfn);
2097	srcu_read_unlock(&kvm->srcu, idx);
2098
2099	return ret;
2100}
2101
2102static struct intel_gvt_mpt kvmgt_mpt = {
2103	.type = INTEL_GVT_HYPERVISOR_KVM,
2104	.host_init = kvmgt_host_init,
2105	.host_exit = kvmgt_host_exit,
2106	.attach_vgpu = kvmgt_attach_vgpu,
2107	.detach_vgpu = kvmgt_detach_vgpu,
2108	.inject_msi = kvmgt_inject_msi,
2109	.from_virt_to_mfn = kvmgt_virt_to_pfn,
2110	.enable_page_track = kvmgt_page_track_add,
2111	.disable_page_track = kvmgt_page_track_remove,
2112	.read_gpa = kvmgt_read_gpa,
2113	.write_gpa = kvmgt_write_gpa,
2114	.gfn_to_mfn = kvmgt_gfn_to_pfn,
2115	.dma_map_guest_page = kvmgt_dma_map_guest_page,
2116	.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
2117	.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
2118	.set_opregion = kvmgt_set_opregion,
2119	.set_edid = kvmgt_set_edid,
2120	.get_vfio_device = kvmgt_get_vfio_device,
2121	.put_vfio_device = kvmgt_put_vfio_device,
2122	.is_valid_gfn = kvmgt_is_valid_gfn,
2123};
2124
2125static int __init kvmgt_init(void)
2126{
2127	if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
2128		return -ENODEV;
2129	return 0;
2130}
2131
2132static void __exit kvmgt_exit(void)
2133{
2134	intel_gvt_unregister_hypervisor();
2135}
2136
2137module_init(kvmgt_init);
2138module_exit(kvmgt_exit);
2139
2140MODULE_LICENSE("GPL and additional rights");
2141MODULE_AUTHOR("Intel Corporation");