Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * GTT virtualization
   3 *
   4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the next
  14 * paragraph) shall be included in all copies or substantial portions of the
  15 * Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23 * SOFTWARE.
  24 *
  25 * Authors:
  26 *    Zhi Wang <zhi.a.wang@intel.com>
  27 *    Zhenyu Wang <zhenyuw@linux.intel.com>
  28 *    Xiao Zheng <xiao.zheng@intel.com>
  29 *
  30 * Contributors:
  31 *    Min He <min.he@intel.com>
  32 *    Bing Niu <bing.niu@intel.com>
  33 *
  34 */
  35
  36#include "i915_drv.h"
  37#include "gvt.h"
  38#include "i915_pvinfo.h"
  39#include "trace.h"
  40
  41#include "gt/intel_gt_regs.h"
  42
  43#if defined(VERBOSE_DEBUG)
  44#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
  45#else
  46#define gvt_vdbg_mm(fmt, args...)
  47#endif
  48
  49static bool enable_out_of_sync = false;
  50static int preallocated_oos_pages = 8192;
  51
  52static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
  53{
  54	struct kvm *kvm = vgpu->vfio_device.kvm;
  55	int idx;
  56	bool ret;
  57
  58	if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
  59		return false;
  60
  61	idx = srcu_read_lock(&kvm->srcu);
  62	ret = kvm_is_visible_gfn(kvm, gfn);
  63	srcu_read_unlock(&kvm->srcu, idx);
  64
  65	return ret;
  66}
  67
  68/*
  69 * validate a gm address and related range size,
  70 * translate it to host gm address
  71 */
  72bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
  73{
  74	if (size == 0)
  75		return vgpu_gmadr_is_valid(vgpu, addr);
  76
  77	if (vgpu_gmadr_is_aperture(vgpu, addr) &&
  78	    vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
  79		return true;
  80	else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
  81		 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
  82		return true;
  83
  84	gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
  85		     addr, size);
  86	return false;
  87}
  88
  89/* translate a guest gmadr to host gmadr */
  90int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
  91{
  92	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
  93
  94	if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
  95		     "invalid guest gmadr %llx\n", g_addr))
  96		return -EACCES;
  97
  98	if (vgpu_gmadr_is_aperture(vgpu, g_addr))
  99		*h_addr = vgpu_aperture_gmadr_base(vgpu)
 100			  + (g_addr - vgpu_aperture_offset(vgpu));
 101	else
 102		*h_addr = vgpu_hidden_gmadr_base(vgpu)
 103			  + (g_addr - vgpu_hidden_offset(vgpu));
 104	return 0;
 105}
 106
 107/* translate a host gmadr to guest gmadr */
 108int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
 109{
 110	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 111
 112	if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
 113		     "invalid host gmadr %llx\n", h_addr))
 114		return -EACCES;
 115
 116	if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
 117		*g_addr = vgpu_aperture_gmadr_base(vgpu)
 118			+ (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
 119	else
 120		*g_addr = vgpu_hidden_gmadr_base(vgpu)
 121			+ (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
 122	return 0;
 123}
 124
 125int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 126			     unsigned long *h_index)
 127{
 128	u64 h_addr;
 129	int ret;
 130
 131	ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
 132				       &h_addr);
 133	if (ret)
 134		return ret;
 135
 136	*h_index = h_addr >> I915_GTT_PAGE_SHIFT;
 137	return 0;
 138}
 139
 140int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
 141			     unsigned long *g_index)
 142{
 143	u64 g_addr;
 144	int ret;
 145
 146	ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
 147				       &g_addr);
 148	if (ret)
 149		return ret;
 150
 151	*g_index = g_addr >> I915_GTT_PAGE_SHIFT;
 152	return 0;
 153}
 154
 155#define gtt_type_is_entry(type) \
 156	(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
 157	 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
 158	 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
 159
 160#define gtt_type_is_pt(type) \
 161	(type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
 162
 163#define gtt_type_is_pte_pt(type) \
 164	(type == GTT_TYPE_PPGTT_PTE_PT)
 165
 166#define gtt_type_is_root_pointer(type) \
 167	(gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
 168
 169#define gtt_init_entry(e, t, p, v) do { \
 170	(e)->type = t; \
 171	(e)->pdev = p; \
 172	memcpy(&(e)->val64, &v, sizeof(v)); \
 173} while (0)
 174
 175/*
 176 * Mappings between GTT_TYPE* enumerations.
 177 * Following information can be found according to the given type:
 178 * - type of next level page table
 179 * - type of entry inside this level page table
 180 * - type of entry with PSE set
 181 *
 182 * If the given type doesn't have such a kind of information,
 183 * e.g. give a l4 root entry type, then request to get its PSE type,
 184 * give a PTE page table type, then request to get its next level page
 185 * table type, as we know l4 root entry doesn't have a PSE bit,
 186 * and a PTE page table doesn't have a next level page table type,
 187 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
 188 * page table.
 189 */
 190
 191struct gtt_type_table_entry {
 192	int entry_type;
 193	int pt_type;
 194	int next_pt_type;
 195	int pse_entry_type;
 196};
 197
 198#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
 199	[type] = { \
 200		.entry_type = e_type, \
 201		.pt_type = cpt_type, \
 202		.next_pt_type = npt_type, \
 203		.pse_entry_type = pse_type, \
 204	}
 205
 206static const struct gtt_type_table_entry gtt_type_table[] = {
 207	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
 208			GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
 209			GTT_TYPE_INVALID,
 210			GTT_TYPE_PPGTT_PML4_PT,
 211			GTT_TYPE_INVALID),
 212	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
 213			GTT_TYPE_PPGTT_PML4_ENTRY,
 214			GTT_TYPE_PPGTT_PML4_PT,
 215			GTT_TYPE_PPGTT_PDP_PT,
 216			GTT_TYPE_INVALID),
 217	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
 218			GTT_TYPE_PPGTT_PML4_ENTRY,
 219			GTT_TYPE_PPGTT_PML4_PT,
 220			GTT_TYPE_PPGTT_PDP_PT,
 221			GTT_TYPE_INVALID),
 222	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
 223			GTT_TYPE_PPGTT_PDP_ENTRY,
 224			GTT_TYPE_PPGTT_PDP_PT,
 225			GTT_TYPE_PPGTT_PDE_PT,
 226			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 227	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
 228			GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
 229			GTT_TYPE_INVALID,
 230			GTT_TYPE_PPGTT_PDE_PT,
 231			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 232	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
 233			GTT_TYPE_PPGTT_PDP_ENTRY,
 234			GTT_TYPE_PPGTT_PDP_PT,
 235			GTT_TYPE_PPGTT_PDE_PT,
 236			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 237	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
 238			GTT_TYPE_PPGTT_PDE_ENTRY,
 239			GTT_TYPE_PPGTT_PDE_PT,
 240			GTT_TYPE_PPGTT_PTE_PT,
 241			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 242	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
 243			GTT_TYPE_PPGTT_PDE_ENTRY,
 244			GTT_TYPE_PPGTT_PDE_PT,
 245			GTT_TYPE_PPGTT_PTE_PT,
 246			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 247	/* We take IPS bit as 'PSE' for PTE level. */
 248	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
 249			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 250			GTT_TYPE_PPGTT_PTE_PT,
 251			GTT_TYPE_INVALID,
 252			GTT_TYPE_PPGTT_PTE_64K_ENTRY),
 253	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 254			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 255			GTT_TYPE_PPGTT_PTE_PT,
 256			GTT_TYPE_INVALID,
 257			GTT_TYPE_PPGTT_PTE_64K_ENTRY),
 258	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
 259			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 260			GTT_TYPE_PPGTT_PTE_PT,
 261			GTT_TYPE_INVALID,
 262			GTT_TYPE_PPGTT_PTE_64K_ENTRY),
 263	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
 264			GTT_TYPE_PPGTT_PDE_ENTRY,
 265			GTT_TYPE_PPGTT_PDE_PT,
 266			GTT_TYPE_INVALID,
 267			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 268	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
 269			GTT_TYPE_PPGTT_PDP_ENTRY,
 270			GTT_TYPE_PPGTT_PDP_PT,
 271			GTT_TYPE_INVALID,
 272			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
 273	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
 274			GTT_TYPE_GGTT_PTE,
 275			GTT_TYPE_INVALID,
 276			GTT_TYPE_INVALID,
 277			GTT_TYPE_INVALID),
 278};
 279
 280static inline int get_next_pt_type(int type)
 281{
 282	return gtt_type_table[type].next_pt_type;
 283}
 284
 285static inline int get_entry_type(int type)
 286{
 287	return gtt_type_table[type].entry_type;
 288}
 289
 290static inline int get_pse_type(int type)
 291{
 292	return gtt_type_table[type].pse_entry_type;
 293}
 294
 295static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
 296{
 297	void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
 298
 299	return readq(addr);
 300}
 301
 302static void ggtt_invalidate(struct intel_gt *gt)
 303{
 304	mmio_hw_access_pre(gt);
 305	intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 306	mmio_hw_access_post(gt);
 307}
 308
 309static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
 310{
 311	void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
 312
 313	writeq(pte, addr);
 314}
 315
 316static inline int gtt_get_entry64(void *pt,
 317		struct intel_gvt_gtt_entry *e,
 318		unsigned long index, bool hypervisor_access, unsigned long gpa,
 319		struct intel_vgpu *vgpu)
 320{
 321	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
 322	int ret;
 323
 324	if (WARN_ON(info->gtt_entry_size != 8))
 325		return -EINVAL;
 326
 327	if (hypervisor_access) {
 328		ret = intel_gvt_read_gpa(vgpu, gpa +
 329				(index << info->gtt_entry_size_shift),
 330				&e->val64, 8);
 331		if (WARN_ON(ret))
 332			return ret;
 333	} else if (!pt) {
 334		e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
 335	} else {
 336		e->val64 = *((u64 *)pt + index);
 337	}
 338	return 0;
 339}
 340
 341static inline int gtt_set_entry64(void *pt,
 342		struct intel_gvt_gtt_entry *e,
 343		unsigned long index, bool hypervisor_access, unsigned long gpa,
 344		struct intel_vgpu *vgpu)
 345{
 346	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
 347	int ret;
 348
 349	if (WARN_ON(info->gtt_entry_size != 8))
 350		return -EINVAL;
 351
 352	if (hypervisor_access) {
 353		ret = intel_gvt_write_gpa(vgpu, gpa +
 354				(index << info->gtt_entry_size_shift),
 355				&e->val64, 8);
 356		if (WARN_ON(ret))
 357			return ret;
 358	} else if (!pt) {
 359		write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
 360	} else {
 361		*((u64 *)pt + index) = e->val64;
 362	}
 363	return 0;
 364}
 365
 366#define GTT_HAW 46
 367
 368#define ADDR_1G_MASK	GENMASK_ULL(GTT_HAW - 1, 30)
 369#define ADDR_2M_MASK	GENMASK_ULL(GTT_HAW - 1, 21)
 370#define ADDR_64K_MASK	GENMASK_ULL(GTT_HAW - 1, 16)
 371#define ADDR_4K_MASK	GENMASK_ULL(GTT_HAW - 1, 12)
 372
 373#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
 374#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
 375
 376#define GTT_64K_PTE_STRIDE 16
 377
 378static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 379{
 380	unsigned long pfn;
 381
 382	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
 383		pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
 384	else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
 385		pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
 386	else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
 387		pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
 388	else
 389		pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
 390	return pfn;
 391}
 392
 393static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
 394{
 395	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
 396		e->val64 &= ~ADDR_1G_MASK;
 397		pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
 398	} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
 399		e->val64 &= ~ADDR_2M_MASK;
 400		pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
 401	} else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
 402		e->val64 &= ~ADDR_64K_MASK;
 403		pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
 404	} else {
 405		e->val64 &= ~ADDR_4K_MASK;
 406		pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
 407	}
 408
 409	e->val64 |= (pfn << PAGE_SHIFT);
 410}
 411
 412static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
 413{
 414	return !!(e->val64 & _PAGE_PSE);
 415}
 416
 417static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
 418{
 419	if (gen8_gtt_test_pse(e)) {
 420		switch (e->type) {
 421		case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
 422			e->val64 &= ~_PAGE_PSE;
 423			e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
 424			break;
 425		case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
 426			e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
 427			e->val64 &= ~_PAGE_PSE;
 428			break;
 429		default:
 430			WARN_ON(1);
 431		}
 432	}
 433}
 434
 435static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
 436{
 437	if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
 438		return false;
 439
 440	return !!(e->val64 & GEN8_PDE_IPS_64K);
 441}
 442
 443static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
 444{
 445	if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
 446		return;
 447
 448	e->val64 &= ~GEN8_PDE_IPS_64K;
 449}
 450
 451static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
 452{
 453	/*
 454	 * i915 writes PDP root pointer registers without present bit,
 455	 * it also works, so we need to treat root pointer entry
 456	 * specifically.
 457	 */
 458	if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
 459			|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
 460		return (e->val64 != 0);
 461	else
 462		return (e->val64 & GEN8_PAGE_PRESENT);
 463}
 464
 465static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
 466{
 467	e->val64 &= ~GEN8_PAGE_PRESENT;
 468}
 469
 470static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
 471{
 472	e->val64 |= GEN8_PAGE_PRESENT;
 473}
 474
 475static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
 476{
 477	return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
 478}
 479
 480static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
 481{
 482	e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
 483}
 484
 485static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
 486{
 487	e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
 488}
 489
 490/*
 491 * Per-platform GMA routines.
 492 */
 493static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
 494{
 495	unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
 496
 497	trace_gma_index(__func__, gma, x);
 498	return x;
 499}
 500
 501#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
 502static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
 503{ \
 504	unsigned long x = (exp); \
 505	trace_gma_index(__func__, gma, x); \
 506	return x; \
 507}
 508
 509DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
 510DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
 511DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
 512DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
 513DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
 514
 515static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
 516	.get_entry = gtt_get_entry64,
 517	.set_entry = gtt_set_entry64,
 518	.clear_present = gtt_entry_clear_present,
 519	.set_present = gtt_entry_set_present,
 520	.test_present = gen8_gtt_test_present,
 521	.test_pse = gen8_gtt_test_pse,
 522	.clear_pse = gen8_gtt_clear_pse,
 523	.clear_ips = gen8_gtt_clear_ips,
 524	.test_ips = gen8_gtt_test_ips,
 525	.clear_64k_splited = gen8_gtt_clear_64k_splited,
 526	.set_64k_splited = gen8_gtt_set_64k_splited,
 527	.test_64k_splited = gen8_gtt_test_64k_splited,
 528	.get_pfn = gen8_gtt_get_pfn,
 529	.set_pfn = gen8_gtt_set_pfn,
 530};
 531
 532static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
 533	.gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
 534	.gma_to_pte_index = gen8_gma_to_pte_index,
 535	.gma_to_pde_index = gen8_gma_to_pde_index,
 536	.gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
 537	.gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
 538	.gma_to_pml4_index = gen8_gma_to_pml4_index,
 539};
 540
 541/* Update entry type per pse and ips bit. */
 542static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops,
 543	struct intel_gvt_gtt_entry *entry, bool ips)
 544{
 545	switch (entry->type) {
 546	case GTT_TYPE_PPGTT_PDE_ENTRY:
 547	case GTT_TYPE_PPGTT_PDP_ENTRY:
 548		if (pte_ops->test_pse(entry))
 549			entry->type = get_pse_type(entry->type);
 550		break;
 551	case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
 552		if (ips)
 553			entry->type = get_pse_type(entry->type);
 554		break;
 555	default:
 556		GEM_BUG_ON(!gtt_type_is_entry(entry->type));
 557	}
 558
 559	GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
 560}
 561
 562/*
 563 * MM helpers.
 564 */
 565static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
 566		struct intel_gvt_gtt_entry *entry, unsigned long index,
 567		bool guest)
 568{
 569	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 570
 571	GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
 572
 573	entry->type = mm->ppgtt_mm.root_entry_type;
 574	pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
 575			   mm->ppgtt_mm.shadow_pdps,
 576			   entry, index, false, 0, mm->vgpu);
 577	update_entry_type_for_real(pte_ops, entry, false);
 578}
 579
 580static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
 581		struct intel_gvt_gtt_entry *entry, unsigned long index)
 582{
 583	_ppgtt_get_root_entry(mm, entry, index, true);
 584}
 585
 586static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
 587		struct intel_gvt_gtt_entry *entry, unsigned long index)
 588{
 589	_ppgtt_get_root_entry(mm, entry, index, false);
 590}
 591
 592static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
 593		struct intel_gvt_gtt_entry *entry, unsigned long index,
 594		bool guest)
 595{
 596	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 597
 598	pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
 599			   mm->ppgtt_mm.shadow_pdps,
 600			   entry, index, false, 0, mm->vgpu);
 601}
 602
 603static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
 604		struct intel_gvt_gtt_entry *entry, unsigned long index)
 605{
 606	_ppgtt_set_root_entry(mm, entry, index, false);
 607}
 608
 609static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
 610		struct intel_gvt_gtt_entry *entry, unsigned long index)
 611{
 612	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 613
 614	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
 615
 616	entry->type = GTT_TYPE_GGTT_PTE;
 617	pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
 618			   false, 0, mm->vgpu);
 619}
 620
 621static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
 622		struct intel_gvt_gtt_entry *entry, unsigned long index)
 623{
 624	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 625
 626	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
 627
 628	pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
 629			   false, 0, mm->vgpu);
 630}
 631
 632static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
 633		struct intel_gvt_gtt_entry *entry, unsigned long index)
 634{
 635	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 636
 637	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
 638
 639	pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
 640}
 641
 642static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
 643		struct intel_gvt_gtt_entry *entry, unsigned long index)
 644{
 645	const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
 646	unsigned long offset = index;
 647
 648	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
 649
 650	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
 651		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
 652		mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
 653	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
 654		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
 655		mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
 656	}
 657
 658	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
 659}
 660
 661/*
 662 * PPGTT shadow page table helpers.
 663 */
 664static inline int ppgtt_spt_get_entry(
 665		struct intel_vgpu_ppgtt_spt *spt,
 666		void *page_table, int type,
 667		struct intel_gvt_gtt_entry *e, unsigned long index,
 668		bool guest)
 669{
 670	struct intel_gvt *gvt = spt->vgpu->gvt;
 671	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
 672	int ret;
 673
 674	e->type = get_entry_type(type);
 675
 676	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
 677		return -EINVAL;
 678
 679	ret = ops->get_entry(page_table, e, index, guest,
 680			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
 681			spt->vgpu);
 682	if (ret)
 683		return ret;
 684
 685	update_entry_type_for_real(ops, e, guest ?
 686				   spt->guest_page.pde_ips : false);
 687
 688	gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
 689		    type, e->type, index, e->val64);
 690	return 0;
 691}
 692
 693static inline int ppgtt_spt_set_entry(
 694		struct intel_vgpu_ppgtt_spt *spt,
 695		void *page_table, int type,
 696		struct intel_gvt_gtt_entry *e, unsigned long index,
 697		bool guest)
 698{
 699	struct intel_gvt *gvt = spt->vgpu->gvt;
 700	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
 701
 702	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
 703		return -EINVAL;
 704
 705	gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
 706		    type, e->type, index, e->val64);
 707
 708	return ops->set_entry(page_table, e, index, guest,
 709			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
 710			spt->vgpu);
 711}
 712
 713#define ppgtt_get_guest_entry(spt, e, index) \
 714	ppgtt_spt_get_entry(spt, NULL, \
 715		spt->guest_page.type, e, index, true)
 716
 717#define ppgtt_set_guest_entry(spt, e, index) \
 718	ppgtt_spt_set_entry(spt, NULL, \
 719		spt->guest_page.type, e, index, true)
 720
 721#define ppgtt_get_shadow_entry(spt, e, index) \
 722	ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
 723		spt->shadow_page.type, e, index, false)
 724
 725#define ppgtt_set_shadow_entry(spt, e, index) \
 726	ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
 727		spt->shadow_page.type, e, index, false)
 728
 729static void *alloc_spt(gfp_t gfp_mask)
 730{
 731	struct intel_vgpu_ppgtt_spt *spt;
 732
 733	spt = kzalloc(sizeof(*spt), gfp_mask);
 734	if (!spt)
 735		return NULL;
 736
 737	spt->shadow_page.page = alloc_page(gfp_mask);
 738	if (!spt->shadow_page.page) {
 739		kfree(spt);
 740		return NULL;
 741	}
 742	return spt;
 743}
 744
 745static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
 746{
 747	__free_page(spt->shadow_page.page);
 748	kfree(spt);
 749}
 750
 751static int detach_oos_page(struct intel_vgpu *vgpu,
 752		struct intel_vgpu_oos_page *oos_page);
 753
 754static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 755{
 756	struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
 757
 758	trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
 759
 760	dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
 761		       DMA_BIDIRECTIONAL);
 762
 763	radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
 764
 765	if (spt->guest_page.gfn) {
 766		if (spt->guest_page.oos_page)
 767			detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
 768
 769		intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
 770	}
 771
 772	list_del_init(&spt->post_shadow_list);
 773	free_spt(spt);
 774}
 775
 776static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
 777{
 778	struct intel_vgpu_ppgtt_spt *spt, *spn;
 779	struct radix_tree_iter iter;
 780	LIST_HEAD(all_spt);
 781	void __rcu **slot;
 782
 783	rcu_read_lock();
 784	radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
 785		spt = radix_tree_deref_slot(slot);
 786		list_move(&spt->post_shadow_list, &all_spt);
 787	}
 788	rcu_read_unlock();
 789
 790	list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
 791		ppgtt_free_spt(spt);
 792}
 793
 794static int ppgtt_handle_guest_write_page_table_bytes(
 795		struct intel_vgpu_ppgtt_spt *spt,
 796		u64 pa, void *p_data, int bytes);
 797
 798static int ppgtt_write_protection_handler(
 799		struct intel_vgpu_page_track *page_track,
 800		u64 gpa, void *data, int bytes)
 801{
 802	struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
 803
 804	int ret;
 805
 806	if (bytes != 4 && bytes != 8)
 807		return -EINVAL;
 808
 809	ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
 810	if (ret)
 811		return ret;
 812	return ret;
 813}
 814
 815/* Find a spt by guest gfn. */
 816static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
 817		struct intel_vgpu *vgpu, unsigned long gfn)
 818{
 819	struct intel_vgpu_page_track *track;
 820
 821	track = intel_vgpu_find_page_track(vgpu, gfn);
 822	if (track && track->handler == ppgtt_write_protection_handler)
 823		return track->priv_data;
 824
 825	return NULL;
 826}
 827
 828/* Find the spt by shadow page mfn. */
 829static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
 830		struct intel_vgpu *vgpu, unsigned long mfn)
 831{
 832	return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
 833}
 834
 835static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
 836
 837/* Allocate shadow page table without guest page. */
 838static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
 839		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
 840{
 841	struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
 842	struct intel_vgpu_ppgtt_spt *spt = NULL;
 843	dma_addr_t daddr;
 844	int ret;
 845
 846retry:
 847	spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
 848	if (!spt) {
 849		if (reclaim_one_ppgtt_mm(vgpu->gvt))
 850			goto retry;
 851
 852		gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
 853		return ERR_PTR(-ENOMEM);
 854	}
 855
 856	spt->vgpu = vgpu;
 857	atomic_set(&spt->refcount, 1);
 858	INIT_LIST_HEAD(&spt->post_shadow_list);
 859
 860	/*
 861	 * Init shadow_page.
 862	 */
 863	spt->shadow_page.type = type;
 864	daddr = dma_map_page(kdev, spt->shadow_page.page,
 865			     0, 4096, DMA_BIDIRECTIONAL);
 866	if (dma_mapping_error(kdev, daddr)) {
 867		gvt_vgpu_err("fail to map dma addr\n");
 868		ret = -EINVAL;
 869		goto err_free_spt;
 870	}
 871	spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
 872	spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
 873
 874	ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
 875	if (ret)
 876		goto err_unmap_dma;
 877
 878	return spt;
 879
 880err_unmap_dma:
 881	dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
 882err_free_spt:
 883	free_spt(spt);
 884	return ERR_PTR(ret);
 885}
 886
 887/* Allocate shadow page table associated with specific gfn. */
 888static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
 889		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
 890		unsigned long gfn, bool guest_pde_ips)
 891{
 892	struct intel_vgpu_ppgtt_spt *spt;
 893	int ret;
 894
 895	spt = ppgtt_alloc_spt(vgpu, type);
 896	if (IS_ERR(spt))
 897		return spt;
 898
 899	/*
 900	 * Init guest_page.
 901	 */
 902	ret = intel_vgpu_register_page_track(vgpu, gfn,
 903			ppgtt_write_protection_handler, spt);
 904	if (ret) {
 905		ppgtt_free_spt(spt);
 906		return ERR_PTR(ret);
 907	}
 908
 909	spt->guest_page.type = type;
 910	spt->guest_page.gfn = gfn;
 911	spt->guest_page.pde_ips = guest_pde_ips;
 912
 913	trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
 914
 915	return spt;
 916}
 917
 918#define pt_entry_size_shift(spt) \
 919	((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
 920
 921#define pt_entries(spt) \
 922	(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
 923
 924#define for_each_present_guest_entry(spt, e, i) \
 925	for (i = 0; i < pt_entries(spt); \
 926	     i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
 927		if (!ppgtt_get_guest_entry(spt, e, i) && \
 928		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
 929
 930#define for_each_present_shadow_entry(spt, e, i) \
 931	for (i = 0; i < pt_entries(spt); \
 932	     i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
 933		if (!ppgtt_get_shadow_entry(spt, e, i) && \
 934		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
 935
 936#define for_each_shadow_entry(spt, e, i) \
 937	for (i = 0; i < pt_entries(spt); \
 938	     i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
 939		if (!ppgtt_get_shadow_entry(spt, e, i))
 940
 941static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
 942{
 943	int v = atomic_read(&spt->refcount);
 944
 945	trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
 946	atomic_inc(&spt->refcount);
 947}
 948
 949static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
 950{
 951	int v = atomic_read(&spt->refcount);
 952
 953	trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
 954	return atomic_dec_return(&spt->refcount);
 955}
 956
 957static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
 958
 959static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
 960		struct intel_gvt_gtt_entry *e)
 961{
 962	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 963	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 964	struct intel_vgpu_ppgtt_spt *s;
 965	enum intel_gvt_gtt_type cur_pt_type;
 966
 967	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
 968
 969	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
 970		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
 971		cur_pt_type = get_next_pt_type(e->type);
 972
 973		if (!gtt_type_is_pt(cur_pt_type) ||
 974				!gtt_type_is_pt(cur_pt_type + 1)) {
 975			drm_WARN(&i915->drm, 1,
 976				 "Invalid page table type, cur_pt_type is: %d\n",
 977				 cur_pt_type);
 978			return -EINVAL;
 979		}
 980
 981		cur_pt_type += 1;
 982
 983		if (ops->get_pfn(e) ==
 984			vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
 985			return 0;
 986	}
 987	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
 988	if (!s) {
 989		gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
 990				ops->get_pfn(e));
 991		return -ENXIO;
 992	}
 993	return ppgtt_invalidate_spt(s);
 994}
 995
 996static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
 997		struct intel_gvt_gtt_entry *entry)
 998{
 999	struct intel_vgpu *vgpu = spt->vgpu;
1000	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1001	unsigned long pfn;
1002	int type;
1003
1004	pfn = ops->get_pfn(entry);
1005	type = spt->shadow_page.type;
1006
1007	/* Uninitialized spte or unshadowed spte. */
1008	if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1009		return;
1010
1011	intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1012}
1013
1014static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1015{
1016	struct intel_vgpu *vgpu = spt->vgpu;
1017	struct intel_gvt_gtt_entry e;
1018	unsigned long index;
1019	int ret;
1020
1021	trace_spt_change(spt->vgpu->id, "die", spt,
1022			spt->guest_page.gfn, spt->shadow_page.type);
1023
1024	if (ppgtt_put_spt(spt) > 0)
1025		return 0;
1026
1027	for_each_present_shadow_entry(spt, &e, index) {
1028		switch (e.type) {
1029		case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1030			gvt_vdbg_mm("invalidate 4K entry\n");
1031			ppgtt_invalidate_pte(spt, &e);
1032			break;
1033		case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1034			/* We don't setup 64K shadow entry so far. */
1035			WARN(1, "suspicious 64K gtt entry\n");
1036			continue;
1037		case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1038			gvt_vdbg_mm("invalidate 2M entry\n");
1039			continue;
1040		case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1041			WARN(1, "GVT doesn't support 1GB page\n");
1042			continue;
1043		case GTT_TYPE_PPGTT_PML4_ENTRY:
1044		case GTT_TYPE_PPGTT_PDP_ENTRY:
1045		case GTT_TYPE_PPGTT_PDE_ENTRY:
1046			gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1047			ret = ppgtt_invalidate_spt_by_shadow_entry(
1048					spt->vgpu, &e);
1049			if (ret)
1050				goto fail;
1051			break;
1052		default:
1053			GEM_BUG_ON(1);
1054		}
1055	}
1056
1057	trace_spt_change(spt->vgpu->id, "release", spt,
1058			 spt->guest_page.gfn, spt->shadow_page.type);
1059	ppgtt_free_spt(spt);
1060	return 0;
1061fail:
1062	gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1063			spt, e.val64, e.type);
1064	return ret;
1065}
1066
1067static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1068{
1069	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1070
1071	if (GRAPHICS_VER(dev_priv) == 9) {
1072		u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1073			GAMW_ECO_ENABLE_64K_IPS_FIELD;
1074
1075		return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1076	} else if (GRAPHICS_VER(dev_priv) >= 11) {
1077		/* 64K paging only controlled by IPS bit in PTE now. */
1078		return true;
1079	} else
1080		return false;
1081}
1082
1083static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1084
1085static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1086		struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1087{
1088	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1089	struct intel_vgpu_ppgtt_spt *spt = NULL;
1090	bool ips = false;
1091	int ret;
1092
1093	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1094
1095	if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1096		ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1097
1098	spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1099	if (spt) {
1100		ppgtt_get_spt(spt);
1101
1102		if (ips != spt->guest_page.pde_ips) {
1103			spt->guest_page.pde_ips = ips;
1104
1105			gvt_dbg_mm("reshadow PDE since ips changed\n");
1106			clear_page(spt->shadow_page.vaddr);
1107			ret = ppgtt_populate_spt(spt);
1108			if (ret) {
1109				ppgtt_put_spt(spt);
1110				goto err;
1111			}
1112		}
1113	} else {
1114		int type = get_next_pt_type(we->type);
1115
1116		if (!gtt_type_is_pt(type)) {
1117			ret = -EINVAL;
1118			goto err;
1119		}
1120
1121		spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1122		if (IS_ERR(spt)) {
1123			ret = PTR_ERR(spt);
1124			goto err;
1125		}
1126
1127		ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1128		if (ret)
1129			goto err_free_spt;
1130
1131		ret = ppgtt_populate_spt(spt);
1132		if (ret)
1133			goto err_free_spt;
1134
1135		trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1136				 spt->shadow_page.type);
1137	}
1138	return spt;
1139
1140err_free_spt:
1141	ppgtt_free_spt(spt);
1142	spt = NULL;
1143err:
1144	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1145		     spt, we->val64, we->type);
1146	return ERR_PTR(ret);
1147}
1148
1149static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1150		struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1151{
1152	const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1153
1154	se->type = ge->type;
1155	se->val64 = ge->val64;
1156
1157	/* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1158	if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1159		ops->clear_ips(se);
1160
1161	ops->set_pfn(se, s->shadow_page.mfn);
1162}
1163
1164/*
1165 * Check if can do 2M page
1166 * @vgpu: target vgpu
1167 * @entry: target pfn's gtt entry
1168 *
1169 * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
1170 * negative if found err.
1171 */
1172static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1173	struct intel_gvt_gtt_entry *entry)
1174{
1175	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1176	kvm_pfn_t pfn;
1177
1178	if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1179		return 0;
1180
1181	if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1182		return -EINVAL;
1183	pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
1184	if (is_error_noslot_pfn(pfn))
1185		return -EINVAL;
1186	return PageTransHuge(pfn_to_page(pfn));
1187}
1188
1189static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1190	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1191	struct intel_gvt_gtt_entry *se)
1192{
1193	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1194	struct intel_vgpu_ppgtt_spt *sub_spt;
1195	struct intel_gvt_gtt_entry sub_se;
1196	unsigned long start_gfn;
1197	dma_addr_t dma_addr;
1198	unsigned long sub_index;
1199	int ret;
1200
1201	gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1202
1203	start_gfn = ops->get_pfn(se);
1204
1205	sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1206	if (IS_ERR(sub_spt))
1207		return PTR_ERR(sub_spt);
1208
1209	for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1210		ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
1211						   PAGE_SIZE, &dma_addr);
1212		if (ret)
1213			goto err;
1214		sub_se.val64 = se->val64;
1215
1216		/* Copy the PAT field from PDE. */
1217		sub_se.val64 &= ~_PAGE_PAT;
1218		sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1219
1220		ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1221		ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1222	}
1223
1224	/* Clear dirty field. */
1225	se->val64 &= ~_PAGE_DIRTY;
1226
1227	ops->clear_pse(se);
1228	ops->clear_ips(se);
1229	ops->set_pfn(se, sub_spt->shadow_page.mfn);
1230	ppgtt_set_shadow_entry(spt, se, index);
1231	return 0;
1232err:
1233	/* Cancel the existing addess mappings of DMA addr. */
1234	for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
1235		gvt_vdbg_mm("invalidate 4K entry\n");
1236		ppgtt_invalidate_pte(sub_spt, &sub_se);
1237	}
1238	/* Release the new allocated spt. */
1239	trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1240		sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1241	ppgtt_free_spt(sub_spt);
1242	return ret;
1243}
1244
1245static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1246	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1247	struct intel_gvt_gtt_entry *se)
1248{
1249	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1250	struct intel_gvt_gtt_entry entry = *se;
1251	unsigned long start_gfn;
1252	dma_addr_t dma_addr;
1253	int i, ret;
1254
1255	gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1256
1257	GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1258
1259	start_gfn = ops->get_pfn(se);
1260
1261	entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1262	ops->set_64k_splited(&entry);
1263
1264	for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1265		ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
1266						   PAGE_SIZE, &dma_addr);
1267		if (ret)
1268			return ret;
1269
1270		ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1271		ppgtt_set_shadow_entry(spt, &entry, index + i);
1272	}
1273	return 0;
1274}
1275
1276static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1277	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1278	struct intel_gvt_gtt_entry *ge)
1279{
1280	const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1281	struct intel_gvt_gtt_entry se = *ge;
1282	unsigned long gfn, page_size = PAGE_SIZE;
1283	dma_addr_t dma_addr;
1284	int ret;
1285
1286	if (!pte_ops->test_present(ge))
1287		return 0;
1288
1289	gfn = pte_ops->get_pfn(ge);
1290
1291	switch (ge->type) {
1292	case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1293		gvt_vdbg_mm("shadow 4K gtt entry\n");
1294		break;
1295	case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1296		gvt_vdbg_mm("shadow 64K gtt entry\n");
1297		/*
1298		 * The layout of 64K page is special, the page size is
1299		 * controlled by uper PDE. To be simple, we always split
1300		 * 64K page to smaller 4K pages in shadow PT.
1301		 */
1302		return split_64KB_gtt_entry(vgpu, spt, index, &se);
1303	case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1304		gvt_vdbg_mm("shadow 2M gtt entry\n");
1305		ret = is_2MB_gtt_possible(vgpu, ge);
1306		if (ret == 0)
1307			return split_2MB_gtt_entry(vgpu, spt, index, &se);
1308		else if (ret < 0)
1309			return ret;
1310		page_size = I915_GTT_PAGE_SIZE_2M;
1311		break;
1312	case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1313		gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1314		return -EINVAL;
1315	default:
1316		GEM_BUG_ON(1);
1317	}
1318
1319	/* direct shadow */
1320	ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
1321	if (ret)
1322		return -ENXIO;
1323
1324	pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1325	ppgtt_set_shadow_entry(spt, &se, index);
1326	return 0;
1327}
1328
1329static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1330{
1331	struct intel_vgpu *vgpu = spt->vgpu;
1332	struct intel_gvt *gvt = vgpu->gvt;
1333	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1334	struct intel_vgpu_ppgtt_spt *s;
1335	struct intel_gvt_gtt_entry se, ge;
1336	unsigned long gfn, i;
1337	int ret;
1338
1339	trace_spt_change(spt->vgpu->id, "born", spt,
1340			 spt->guest_page.gfn, spt->shadow_page.type);
1341
1342	for_each_present_guest_entry(spt, &ge, i) {
1343		if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1344			s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1345			if (IS_ERR(s)) {
1346				ret = PTR_ERR(s);
1347				goto fail;
1348			}
1349			ppgtt_get_shadow_entry(spt, &se, i);
1350			ppgtt_generate_shadow_entry(&se, s, &ge);
1351			ppgtt_set_shadow_entry(spt, &se, i);
1352		} else {
1353			gfn = ops->get_pfn(&ge);
1354			if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
1355				ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1356				ppgtt_set_shadow_entry(spt, &se, i);
1357				continue;
1358			}
1359
1360			ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1361			if (ret)
1362				goto fail;
1363		}
1364	}
1365	return 0;
1366fail:
1367	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1368			spt, ge.val64, ge.type);
1369	return ret;
1370}
1371
1372static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1373		struct intel_gvt_gtt_entry *se, unsigned long index)
1374{
1375	struct intel_vgpu *vgpu = spt->vgpu;
1376	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1377	int ret;
1378
1379	trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1380			       spt->shadow_page.type, se->val64, index);
1381
1382	gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1383		    se->type, index, se->val64);
1384
1385	if (!ops->test_present(se))
1386		return 0;
1387
1388	if (ops->get_pfn(se) ==
1389	    vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1390		return 0;
1391
1392	if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1393		struct intel_vgpu_ppgtt_spt *s =
1394			intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1395		if (!s) {
1396			gvt_vgpu_err("fail to find guest page\n");
1397			ret = -ENXIO;
1398			goto fail;
1399		}
1400		ret = ppgtt_invalidate_spt(s);
1401		if (ret)
1402			goto fail;
1403	} else {
1404		/* We don't setup 64K shadow entry so far. */
1405		WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1406		     "suspicious 64K entry\n");
1407		ppgtt_invalidate_pte(spt, se);
1408	}
1409
1410	return 0;
1411fail:
1412	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1413			spt, se->val64, se->type);
1414	return ret;
1415}
1416
1417static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1418		struct intel_gvt_gtt_entry *we, unsigned long index)
1419{
1420	struct intel_vgpu *vgpu = spt->vgpu;
1421	struct intel_gvt_gtt_entry m;
1422	struct intel_vgpu_ppgtt_spt *s;
1423	int ret;
1424
1425	trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1426			       we->val64, index);
1427
1428	gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1429		    we->type, index, we->val64);
1430
1431	if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1432		s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1433		if (IS_ERR(s)) {
1434			ret = PTR_ERR(s);
1435			goto fail;
1436		}
1437		ppgtt_get_shadow_entry(spt, &m, index);
1438		ppgtt_generate_shadow_entry(&m, s, we);
1439		ppgtt_set_shadow_entry(spt, &m, index);
1440	} else {
1441		ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1442		if (ret)
1443			goto fail;
1444	}
1445	return 0;
1446fail:
1447	gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1448		spt, we->val64, we->type);
1449	return ret;
1450}
1451
1452static int sync_oos_page(struct intel_vgpu *vgpu,
1453		struct intel_vgpu_oos_page *oos_page)
1454{
1455	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1456	struct intel_gvt *gvt = vgpu->gvt;
1457	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1458	struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1459	struct intel_gvt_gtt_entry old, new;
1460	int index;
1461	int ret;
1462
1463	trace_oos_change(vgpu->id, "sync", oos_page->id,
1464			 spt, spt->guest_page.type);
1465
1466	old.type = new.type = get_entry_type(spt->guest_page.type);
1467	old.val64 = new.val64 = 0;
1468
1469	for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1470				info->gtt_entry_size_shift); index++) {
1471		ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1472		ops->get_entry(NULL, &new, index, true,
1473			       spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1474
1475		if (old.val64 == new.val64
1476			&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
1477			continue;
1478
1479		trace_oos_sync(vgpu->id, oos_page->id,
1480				spt, spt->guest_page.type,
1481				new.val64, index);
1482
1483		ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1484		if (ret)
1485			return ret;
1486
1487		ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1488	}
1489
1490	spt->guest_page.write_cnt = 0;
1491	list_del_init(&spt->post_shadow_list);
1492	return 0;
1493}
1494
1495static int detach_oos_page(struct intel_vgpu *vgpu,
1496		struct intel_vgpu_oos_page *oos_page)
1497{
1498	struct intel_gvt *gvt = vgpu->gvt;
1499	struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1500
1501	trace_oos_change(vgpu->id, "detach", oos_page->id,
1502			 spt, spt->guest_page.type);
1503
1504	spt->guest_page.write_cnt = 0;
1505	spt->guest_page.oos_page = NULL;
1506	oos_page->spt = NULL;
1507
1508	list_del_init(&oos_page->vm_list);
1509	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1510
1511	return 0;
1512}
1513
1514static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1515		struct intel_vgpu_ppgtt_spt *spt)
1516{
1517	struct intel_gvt *gvt = spt->vgpu->gvt;
1518	int ret;
1519
1520	ret = intel_gvt_read_gpa(spt->vgpu,
1521			spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1522			oos_page->mem, I915_GTT_PAGE_SIZE);
1523	if (ret)
1524		return ret;
1525
1526	oos_page->spt = spt;
1527	spt->guest_page.oos_page = oos_page;
1528
1529	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1530
1531	trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1532			 spt, spt->guest_page.type);
1533	return 0;
1534}
1535
1536static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1537{
1538	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1539	int ret;
1540
1541	ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1542	if (ret)
1543		return ret;
1544
1545	trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1546			 spt, spt->guest_page.type);
1547
1548	list_del_init(&oos_page->vm_list);
1549	return sync_oos_page(spt->vgpu, oos_page);
1550}
1551
1552static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1553{
1554	struct intel_gvt *gvt = spt->vgpu->gvt;
1555	struct intel_gvt_gtt *gtt = &gvt->gtt;
1556	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1557	int ret;
1558
1559	WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1560
1561	if (list_empty(&gtt->oos_page_free_list_head)) {
1562		oos_page = container_of(gtt->oos_page_use_list_head.next,
1563			struct intel_vgpu_oos_page, list);
1564		ret = ppgtt_set_guest_page_sync(oos_page->spt);
1565		if (ret)
1566			return ret;
1567		ret = detach_oos_page(spt->vgpu, oos_page);
1568		if (ret)
1569			return ret;
1570	} else
1571		oos_page = container_of(gtt->oos_page_free_list_head.next,
1572			struct intel_vgpu_oos_page, list);
1573	return attach_oos_page(oos_page, spt);
1574}
1575
1576static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1577{
1578	struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1579
1580	if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1581		return -EINVAL;
1582
1583	trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1584			 spt, spt->guest_page.type);
1585
1586	list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1587	return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1588}
1589
1590/**
1591 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1592 * @vgpu: a vGPU
1593 *
1594 * This function is called before submitting a guest workload to host,
1595 * to sync all the out-of-synced shadow for vGPU
1596 *
1597 * Returns:
1598 * Zero on success, negative error code if failed.
1599 */
1600int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1601{
1602	struct list_head *pos, *n;
1603	struct intel_vgpu_oos_page *oos_page;
1604	int ret;
1605
1606	if (!enable_out_of_sync)
1607		return 0;
1608
1609	list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1610		oos_page = container_of(pos,
1611				struct intel_vgpu_oos_page, vm_list);
1612		ret = ppgtt_set_guest_page_sync(oos_page->spt);
1613		if (ret)
1614			return ret;
1615	}
1616	return 0;
1617}
1618
1619/*
1620 * The heart of PPGTT shadow page table.
1621 */
1622static int ppgtt_handle_guest_write_page_table(
1623		struct intel_vgpu_ppgtt_spt *spt,
1624		struct intel_gvt_gtt_entry *we, unsigned long index)
1625{
1626	struct intel_vgpu *vgpu = spt->vgpu;
1627	int type = spt->shadow_page.type;
1628	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1629	struct intel_gvt_gtt_entry old_se;
1630	int new_present;
1631	int i, ret;
1632
1633	new_present = ops->test_present(we);
1634
1635	/*
1636	 * Adding the new entry first and then removing the old one, that can
1637	 * guarantee the ppgtt table is validated during the window between
1638	 * adding and removal.
1639	 */
1640	ppgtt_get_shadow_entry(spt, &old_se, index);
1641
1642	if (new_present) {
1643		ret = ppgtt_handle_guest_entry_add(spt, we, index);
1644		if (ret)
1645			goto fail;
1646	}
1647
1648	ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1649	if (ret)
1650		goto fail;
1651
1652	if (!new_present) {
1653		/* For 64KB splited entries, we need clear them all. */
1654		if (ops->test_64k_splited(&old_se) &&
1655		    !(index % GTT_64K_PTE_STRIDE)) {
1656			gvt_vdbg_mm("remove splited 64K shadow entries\n");
1657			for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1658				ops->clear_64k_splited(&old_se);
1659				ops->set_pfn(&old_se,
1660					vgpu->gtt.scratch_pt[type].page_mfn);
1661				ppgtt_set_shadow_entry(spt, &old_se, index + i);
1662			}
1663		} else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1664			   old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1665			ops->clear_pse(&old_se);
1666			ops->set_pfn(&old_se,
1667				     vgpu->gtt.scratch_pt[type].page_mfn);
1668			ppgtt_set_shadow_entry(spt, &old_se, index);
1669		} else {
1670			ops->set_pfn(&old_se,
1671				     vgpu->gtt.scratch_pt[type].page_mfn);
1672			ppgtt_set_shadow_entry(spt, &old_se, index);
1673		}
1674	}
1675
1676	return 0;
1677fail:
1678	gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1679			spt, we->val64, we->type);
1680	return ret;
1681}
1682
1683
1684
1685static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1686{
1687	return enable_out_of_sync
1688		&& gtt_type_is_pte_pt(spt->guest_page.type)
1689		&& spt->guest_page.write_cnt >= 2;
1690}
1691
1692static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1693		unsigned long index)
1694{
1695	set_bit(index, spt->post_shadow_bitmap);
1696	if (!list_empty(&spt->post_shadow_list))
1697		return;
1698
1699	list_add_tail(&spt->post_shadow_list,
1700			&spt->vgpu->gtt.post_shadow_list_head);
1701}
1702
1703/**
1704 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1705 * @vgpu: a vGPU
1706 *
1707 * This function is called before submitting a guest workload to host,
1708 * to flush all the post shadows for a vGPU.
1709 *
1710 * Returns:
1711 * Zero on success, negative error code if failed.
1712 */
1713int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1714{
1715	struct list_head *pos, *n;
1716	struct intel_vgpu_ppgtt_spt *spt;
1717	struct intel_gvt_gtt_entry ge;
1718	unsigned long index;
1719	int ret;
1720
1721	list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1722		spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1723				post_shadow_list);
1724
1725		for_each_set_bit(index, spt->post_shadow_bitmap,
1726				GTT_ENTRY_NUM_IN_ONE_PAGE) {
1727			ppgtt_get_guest_entry(spt, &ge, index);
1728
1729			ret = ppgtt_handle_guest_write_page_table(spt,
1730							&ge, index);
1731			if (ret)
1732				return ret;
1733			clear_bit(index, spt->post_shadow_bitmap);
1734		}
1735		list_del_init(&spt->post_shadow_list);
1736	}
1737	return 0;
1738}
1739
1740static int ppgtt_handle_guest_write_page_table_bytes(
1741		struct intel_vgpu_ppgtt_spt *spt,
1742		u64 pa, void *p_data, int bytes)
1743{
1744	struct intel_vgpu *vgpu = spt->vgpu;
1745	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1746	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1747	struct intel_gvt_gtt_entry we, se;
1748	unsigned long index;
1749	int ret;
1750
1751	index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1752
1753	ppgtt_get_guest_entry(spt, &we, index);
1754
1755	/*
1756	 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1757	 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1758	 * ignored.
1759	 */
1760	if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1761	    (index % GTT_64K_PTE_STRIDE)) {
1762		gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1763			    index);
1764		return 0;
1765	}
1766
1767	if (bytes == info->gtt_entry_size) {
1768		ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1769		if (ret)
1770			return ret;
1771	} else {
1772		if (!test_bit(index, spt->post_shadow_bitmap)) {
1773			int type = spt->shadow_page.type;
1774
1775			ppgtt_get_shadow_entry(spt, &se, index);
1776			ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1777			if (ret)
1778				return ret;
1779			ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1780			ppgtt_set_shadow_entry(spt, &se, index);
1781		}
1782		ppgtt_set_post_shadow(spt, index);
1783	}
1784
1785	if (!enable_out_of_sync)
1786		return 0;
1787
1788	spt->guest_page.write_cnt++;
1789
1790	if (spt->guest_page.oos_page)
1791		ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1792				false, 0, vgpu);
1793
1794	if (can_do_out_of_sync(spt)) {
1795		if (!spt->guest_page.oos_page)
1796			ppgtt_allocate_oos_page(spt);
1797
1798		ret = ppgtt_set_guest_page_oos(spt);
1799		if (ret < 0)
1800			return ret;
1801	}
1802	return 0;
1803}
1804
1805static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1806{
1807	struct intel_vgpu *vgpu = mm->vgpu;
1808	struct intel_gvt *gvt = vgpu->gvt;
1809	struct intel_gvt_gtt *gtt = &gvt->gtt;
1810	const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1811	struct intel_gvt_gtt_entry se;
1812	int index;
1813
1814	if (!mm->ppgtt_mm.shadowed)
1815		return;
1816
1817	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1818		ppgtt_get_shadow_root_entry(mm, &se, index);
1819
1820		if (!ops->test_present(&se))
1821			continue;
1822
1823		ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1824		se.val64 = 0;
1825		ppgtt_set_shadow_root_entry(mm, &se, index);
1826
1827		trace_spt_guest_change(vgpu->id, "destroy root pointer",
1828				       NULL, se.type, se.val64, index);
1829	}
1830
1831	mm->ppgtt_mm.shadowed = false;
1832}
1833
1834
1835static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1836{
1837	struct intel_vgpu *vgpu = mm->vgpu;
1838	struct intel_gvt *gvt = vgpu->gvt;
1839	struct intel_gvt_gtt *gtt = &gvt->gtt;
1840	const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1841	struct intel_vgpu_ppgtt_spt *spt;
1842	struct intel_gvt_gtt_entry ge, se;
1843	int index, ret;
1844
1845	if (mm->ppgtt_mm.shadowed)
1846		return 0;
1847
1848	mm->ppgtt_mm.shadowed = true;
1849
1850	for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1851		ppgtt_get_guest_root_entry(mm, &ge, index);
1852
1853		if (!ops->test_present(&ge))
1854			continue;
1855
1856		trace_spt_guest_change(vgpu->id, __func__, NULL,
1857				       ge.type, ge.val64, index);
1858
1859		spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1860		if (IS_ERR(spt)) {
1861			gvt_vgpu_err("fail to populate guest root pointer\n");
1862			ret = PTR_ERR(spt);
1863			goto fail;
1864		}
1865		ppgtt_generate_shadow_entry(&se, spt, &ge);
1866		ppgtt_set_shadow_root_entry(mm, &se, index);
1867
1868		trace_spt_guest_change(vgpu->id, "populate root pointer",
1869				       NULL, se.type, se.val64, index);
1870	}
1871
1872	return 0;
1873fail:
1874	invalidate_ppgtt_mm(mm);
1875	return ret;
1876}
1877
1878static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1879{
1880	struct intel_vgpu_mm *mm;
1881
1882	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1883	if (!mm)
1884		return NULL;
1885
1886	mm->vgpu = vgpu;
1887	kref_init(&mm->ref);
1888	atomic_set(&mm->pincount, 0);
1889
1890	return mm;
1891}
1892
1893static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1894{
1895	kfree(mm);
1896}
1897
1898/**
1899 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1900 * @vgpu: a vGPU
1901 * @root_entry_type: ppgtt root entry type
1902 * @pdps: guest pdps.
1903 *
1904 * This function is used to create a ppgtt mm object for a vGPU.
1905 *
1906 * Returns:
1907 * Zero on success, negative error code in pointer if failed.
1908 */
1909struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1910		enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1911{
1912	struct intel_gvt *gvt = vgpu->gvt;
1913	struct intel_vgpu_mm *mm;
1914	int ret;
1915
1916	mm = vgpu_alloc_mm(vgpu);
1917	if (!mm)
1918		return ERR_PTR(-ENOMEM);
1919
1920	mm->type = INTEL_GVT_MM_PPGTT;
1921
1922	GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1923		   root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1924	mm->ppgtt_mm.root_entry_type = root_entry_type;
1925
1926	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1927	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1928	INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1929
1930	if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1931		mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1932	else
1933		memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1934		       sizeof(mm->ppgtt_mm.guest_pdps));
1935
1936	ret = shadow_ppgtt_mm(mm);
1937	if (ret) {
1938		gvt_vgpu_err("failed to shadow ppgtt mm\n");
1939		vgpu_free_mm(mm);
1940		return ERR_PTR(ret);
1941	}
1942
1943	list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1944
1945	mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1946	list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1947	mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1948
1949	return mm;
1950}
1951
1952static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1953{
1954	struct intel_vgpu_mm *mm;
1955	unsigned long nr_entries;
1956
1957	mm = vgpu_alloc_mm(vgpu);
1958	if (!mm)
1959		return ERR_PTR(-ENOMEM);
1960
1961	mm->type = INTEL_GVT_MM_GGTT;
1962
1963	nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1964	mm->ggtt_mm.virtual_ggtt =
1965		vzalloc(array_size(nr_entries,
1966				   vgpu->gvt->device_info.gtt_entry_size));
1967	if (!mm->ggtt_mm.virtual_ggtt) {
1968		vgpu_free_mm(mm);
1969		return ERR_PTR(-ENOMEM);
1970	}
1971
1972	mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1973	if (!mm->ggtt_mm.host_ggtt_aperture) {
1974		vfree(mm->ggtt_mm.virtual_ggtt);
1975		vgpu_free_mm(mm);
1976		return ERR_PTR(-ENOMEM);
1977	}
1978
1979	mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1980	if (!mm->ggtt_mm.host_ggtt_hidden) {
1981		vfree(mm->ggtt_mm.host_ggtt_aperture);
1982		vfree(mm->ggtt_mm.virtual_ggtt);
1983		vgpu_free_mm(mm);
1984		return ERR_PTR(-ENOMEM);
1985	}
1986
1987	return mm;
1988}
1989
1990/**
1991 * _intel_vgpu_mm_release - destroy a mm object
1992 * @mm_ref: a kref object
1993 *
1994 * This function is used to destroy a mm object for vGPU
1995 *
1996 */
1997void _intel_vgpu_mm_release(struct kref *mm_ref)
1998{
1999	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
2000
2001	if (GEM_WARN_ON(atomic_read(&mm->pincount)))
2002		gvt_err("vgpu mm pin count bug detected\n");
2003
2004	if (mm->type == INTEL_GVT_MM_PPGTT) {
2005		list_del(&mm->ppgtt_mm.list);
2006
2007		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2008		list_del(&mm->ppgtt_mm.lru_list);
2009		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2010
2011		invalidate_ppgtt_mm(mm);
2012	} else {
2013		vfree(mm->ggtt_mm.virtual_ggtt);
2014		vfree(mm->ggtt_mm.host_ggtt_aperture);
2015		vfree(mm->ggtt_mm.host_ggtt_hidden);
2016	}
2017
2018	vgpu_free_mm(mm);
2019}
2020
2021/**
2022 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
2023 * @mm: a vGPU mm object
2024 *
2025 * This function is called when user doesn't want to use a vGPU mm object
2026 */
2027void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
2028{
2029	atomic_dec_if_positive(&mm->pincount);
2030}
2031
2032/**
2033 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
2034 * @mm: target vgpu mm
2035 *
2036 * This function is called when user wants to use a vGPU mm object. If this
2037 * mm object hasn't been shadowed yet, the shadow will be populated at this
2038 * time.
2039 *
2040 * Returns:
2041 * Zero on success, negative error code if failed.
2042 */
2043int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2044{
2045	int ret;
2046
2047	atomic_inc(&mm->pincount);
2048
2049	if (mm->type == INTEL_GVT_MM_PPGTT) {
2050		ret = shadow_ppgtt_mm(mm);
2051		if (ret)
2052			return ret;
2053
2054		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2055		list_move_tail(&mm->ppgtt_mm.lru_list,
2056			       &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2057		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2058	}
2059
2060	return 0;
2061}
2062
2063static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2064{
2065	struct intel_vgpu_mm *mm;
2066	struct list_head *pos, *n;
2067
2068	mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2069
2070	list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2071		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2072
2073		if (atomic_read(&mm->pincount))
2074			continue;
2075
2076		list_del_init(&mm->ppgtt_mm.lru_list);
2077		mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2078		invalidate_ppgtt_mm(mm);
2079		return 1;
2080	}
2081	mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2082	return 0;
2083}
2084
2085/*
2086 * GMA translation APIs.
2087 */
2088static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2089		struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2090{
2091	struct intel_vgpu *vgpu = mm->vgpu;
2092	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2093	struct intel_vgpu_ppgtt_spt *s;
2094
2095	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2096	if (!s)
2097		return -ENXIO;
2098
2099	if (!guest)
2100		ppgtt_get_shadow_entry(s, e, index);
2101	else
2102		ppgtt_get_guest_entry(s, e, index);
2103	return 0;
2104}
2105
2106/**
2107 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2108 * @mm: mm object. could be a PPGTT or GGTT mm object
2109 * @gma: graphics memory address in this mm object
2110 *
2111 * This function is used to translate a graphics memory address in specific
2112 * graphics memory space to guest physical address.
2113 *
2114 * Returns:
2115 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2116 */
2117unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2118{
2119	struct intel_vgpu *vgpu = mm->vgpu;
2120	struct intel_gvt *gvt = vgpu->gvt;
2121	const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2122	const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2123	unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2124	unsigned long gma_index[4];
2125	struct intel_gvt_gtt_entry e;
2126	int i, levels = 0;
2127	int ret;
2128
2129	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2130		   mm->type != INTEL_GVT_MM_PPGTT);
2131
2132	if (mm->type == INTEL_GVT_MM_GGTT) {
2133		if (!vgpu_gmadr_is_valid(vgpu, gma))
2134			goto err;
2135
2136		ggtt_get_guest_entry(mm, &e,
2137			gma_ops->gma_to_ggtt_pte_index(gma));
2138
2139		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2140			+ (gma & ~I915_GTT_PAGE_MASK);
2141
2142		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2143	} else {
2144		switch (mm->ppgtt_mm.root_entry_type) {
2145		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2146			ppgtt_get_shadow_root_entry(mm, &e, 0);
2147
2148			gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2149			gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2150			gma_index[2] = gma_ops->gma_to_pde_index(gma);
2151			gma_index[3] = gma_ops->gma_to_pte_index(gma);
2152			levels = 4;
2153			break;
2154		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2155			ppgtt_get_shadow_root_entry(mm, &e,
2156					gma_ops->gma_to_l3_pdp_index(gma));
2157
2158			gma_index[0] = gma_ops->gma_to_pde_index(gma);
2159			gma_index[1] = gma_ops->gma_to_pte_index(gma);
2160			levels = 2;
2161			break;
2162		default:
2163			GEM_BUG_ON(1);
2164		}
2165
2166		/* walk the shadow page table and get gpa from guest entry */
2167		for (i = 0; i < levels; i++) {
2168			ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2169				(i == levels - 1));
2170			if (ret)
2171				goto err;
2172
2173			if (!pte_ops->test_present(&e)) {
2174				gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2175				goto err;
2176			}
2177		}
2178
2179		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2180					(gma & ~I915_GTT_PAGE_MASK);
2181		trace_gma_translate(vgpu->id, "ppgtt", 0,
2182				    mm->ppgtt_mm.root_entry_type, gma, gpa);
2183	}
2184
2185	return gpa;
2186err:
2187	gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2188	return INTEL_GVT_INVALID_ADDR;
2189}
2190
2191static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2192	unsigned int off, void *p_data, unsigned int bytes)
2193{
2194	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2195	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2196	unsigned long index = off >> info->gtt_entry_size_shift;
2197	unsigned long gma;
2198	struct intel_gvt_gtt_entry e;
2199
2200	if (bytes != 4 && bytes != 8)
2201		return -EINVAL;
2202
2203	gma = index << I915_GTT_PAGE_SHIFT;
2204	if (!intel_gvt_ggtt_validate_range(vgpu,
2205					   gma, 1 << I915_GTT_PAGE_SHIFT)) {
2206		gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2207		memset(p_data, 0, bytes);
2208		return 0;
2209	}
2210
2211	ggtt_get_guest_entry(ggtt_mm, &e, index);
2212	memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2213			bytes);
2214	return 0;
2215}
2216
2217/**
2218 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
2219 * @vgpu: a vGPU
2220 * @off: register offset
2221 * @p_data: data will be returned to guest
2222 * @bytes: data length
2223 *
2224 * This function is used to emulate the GTT MMIO register read
2225 *
2226 * Returns:
2227 * Zero on success, error code if failed.
2228 */
2229int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2230	void *p_data, unsigned int bytes)
2231{
2232	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2233	int ret;
2234
2235	if (bytes != 4 && bytes != 8)
2236		return -EINVAL;
2237
2238	off -= info->gtt_start_offset;
2239	ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2240	return ret;
2241}
2242
2243static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2244		struct intel_gvt_gtt_entry *entry)
2245{
2246	const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2247	unsigned long pfn;
2248
2249	pfn = pte_ops->get_pfn(entry);
2250	if (pfn != vgpu->gvt->gtt.scratch_mfn)
2251		intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
2252}
2253
2254static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2255	void *p_data, unsigned int bytes)
2256{
2257	struct intel_gvt *gvt = vgpu->gvt;
2258	const struct intel_gvt_device_info *info = &gvt->device_info;
2259	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2260	const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2261	unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2262	unsigned long gma, gfn;
2263	struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2264	struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2265	dma_addr_t dma_addr;
2266	int ret;
2267	struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2268	bool partial_update = false;
2269
2270	if (bytes != 4 && bytes != 8)
2271		return -EINVAL;
2272
2273	gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2274
2275	/* the VM may configure the whole GM space when ballooning is used */
2276	if (!vgpu_gmadr_is_valid(vgpu, gma))
2277		return 0;
2278
2279	e.type = GTT_TYPE_GGTT_PTE;
2280	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2281			bytes);
2282
2283	/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2284	 * write, save the first 4 bytes in a list and update virtual
2285	 * PTE. Only update shadow PTE when the second 4 bytes comes.
2286	 */
2287	if (bytes < info->gtt_entry_size) {
2288		bool found = false;
2289
2290		list_for_each_entry_safe(pos, n,
2291				&ggtt_mm->ggtt_mm.partial_pte_list, list) {
2292			if (g_gtt_index == pos->offset >>
2293					info->gtt_entry_size_shift) {
2294				if (off != pos->offset) {
2295					/* the second partial part*/
2296					int last_off = pos->offset &
2297						(info->gtt_entry_size - 1);
2298
2299					memcpy((void *)&e.val64 + last_off,
2300						(void *)&pos->data + last_off,
2301						bytes);
2302
2303					list_del(&pos->list);
2304					kfree(pos);
2305					found = true;
2306					break;
2307				}
2308
2309				/* update of the first partial part */
2310				pos->data = e.val64;
2311				ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2312				return 0;
2313			}
2314		}
2315
2316		if (!found) {
2317			/* the first partial part */
2318			partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2319			if (!partial_pte)
2320				return -ENOMEM;
2321			partial_pte->offset = off;
2322			partial_pte->data = e.val64;
2323			list_add_tail(&partial_pte->list,
2324				&ggtt_mm->ggtt_mm.partial_pte_list);
2325			partial_update = true;
2326		}
2327	}
2328
2329	if (!partial_update && (ops->test_present(&e))) {
2330		gfn = ops->get_pfn(&e);
2331		m.val64 = e.val64;
2332		m.type = e.type;
2333
2334		/* one PTE update may be issued in multiple writes and the
2335		 * first write may not construct a valid gfn
2336		 */
2337		if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
2338			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2339			goto out;
2340		}
2341
2342		ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
2343						   &dma_addr);
2344		if (ret) {
2345			gvt_vgpu_err("fail to populate guest ggtt entry\n");
2346			/* guest driver may read/write the entry when partial
2347			 * update the entry in this situation p2m will fail
2348			 * setting the shadow entry to point to a scratch page
2349			 */
2350			ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2351		} else
2352			ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2353	} else {
2354		ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2355		ops->clear_present(&m);
2356	}
2357
2358out:
2359	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2360
2361	ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2362	ggtt_invalidate_pte(vgpu, &e);
2363
2364	ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2365	ggtt_invalidate(gvt->gt);
2366	return 0;
2367}
2368
2369/*
2370 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2371 * @vgpu: a vGPU
2372 * @off: register offset
2373 * @p_data: data from guest write
2374 * @bytes: data length
2375 *
2376 * This function is used to emulate the GTT MMIO register write
2377 *
2378 * Returns:
2379 * Zero on success, error code if failed.
2380 */
2381int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2382		unsigned int off, void *p_data, unsigned int bytes)
2383{
2384	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2385	int ret;
2386	struct intel_vgpu_submission *s = &vgpu->submission;
2387	struct intel_engine_cs *engine;
2388	int i;
2389
2390	if (bytes != 4 && bytes != 8)
2391		return -EINVAL;
2392
2393	off -= info->gtt_start_offset;
2394	ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2395
2396	/* if ggtt of last submitted context is written,
2397	 * that context is probably got unpinned.
2398	 * Set last shadowed ctx to invalid.
2399	 */
2400	for_each_engine(engine, vgpu->gvt->gt, i) {
2401		if (!s->last_ctx[i].valid)
2402			continue;
2403
2404		if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2405			s->last_ctx[i].valid = false;
2406	}
2407	return ret;
2408}
2409
2410static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2411		enum intel_gvt_gtt_type type)
2412{
2413	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2414	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2415	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2416	int page_entry_num = I915_GTT_PAGE_SIZE >>
2417				vgpu->gvt->device_info.gtt_entry_size_shift;
2418	void *scratch_pt;
2419	int i;
2420	struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2421	dma_addr_t daddr;
2422
2423	if (drm_WARN_ON(&i915->drm,
2424			type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2425		return -EINVAL;
2426
2427	scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2428	if (!scratch_pt) {
2429		gvt_vgpu_err("fail to allocate scratch page\n");
2430		return -ENOMEM;
2431	}
2432
2433	daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
2434	if (dma_mapping_error(dev, daddr)) {
2435		gvt_vgpu_err("fail to dmamap scratch_pt\n");
2436		__free_page(virt_to_page(scratch_pt));
2437		return -ENOMEM;
2438	}
2439	gtt->scratch_pt[type].page_mfn =
2440		(unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2441	gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2442	gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2443			vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2444
2445	/* Build the tree by full filled the scratch pt with the entries which
2446	 * point to the next level scratch pt or scratch page. The
2447	 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2448	 * 'type' pt.
2449	 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2450	 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2451	 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2452	 */
2453	if (type > GTT_TYPE_PPGTT_PTE_PT) {
2454		struct intel_gvt_gtt_entry se;
2455
2456		memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2457		se.type = get_entry_type(type - 1);
2458		ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2459
2460		/* The entry parameters like present/writeable/cache type
2461		 * set to the same as i915's scratch page tree.
2462		 */
2463		se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
2464		if (type == GTT_TYPE_PPGTT_PDE_PT)
2465			se.val64 |= PPAT_CACHED;
2466
2467		for (i = 0; i < page_entry_num; i++)
2468			ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2469	}
2470
2471	return 0;
2472}
2473
2474static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2475{
2476	int i;
2477	struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2478	dma_addr_t daddr;
2479
2480	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2481		if (vgpu->gtt.scratch_pt[i].page != NULL) {
2482			daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2483					I915_GTT_PAGE_SHIFT);
2484			dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2485			__free_page(vgpu->gtt.scratch_pt[i].page);
2486			vgpu->gtt.scratch_pt[i].page = NULL;
2487			vgpu->gtt.scratch_pt[i].page_mfn = 0;
2488		}
2489	}
2490
2491	return 0;
2492}
2493
2494static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2495{
2496	int i, ret;
2497
2498	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2499		ret = alloc_scratch_pages(vgpu, i);
2500		if (ret)
2501			goto err;
2502	}
2503
2504	return 0;
2505
2506err:
2507	release_scratch_page_tree(vgpu);
2508	return ret;
2509}
2510
2511/**
2512 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2513 * @vgpu: a vGPU
2514 *
2515 * This function is used to initialize per-vGPU graphics memory virtualization
2516 * components.
2517 *
2518 * Returns:
2519 * Zero on success, error code if failed.
2520 */
2521int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2522{
2523	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2524
2525	INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
2526
2527	INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
2528	INIT_LIST_HEAD(&gtt->oos_page_list_head);
2529	INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2530
2531	gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2532	if (IS_ERR(gtt->ggtt_mm)) {
2533		gvt_vgpu_err("fail to create mm for ggtt.\n");
2534		return PTR_ERR(gtt->ggtt_mm);
2535	}
2536
2537	intel_vgpu_reset_ggtt(vgpu, false);
2538
2539	INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2540
2541	return create_scratch_page_tree(vgpu);
2542}
2543
2544void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2545{
2546	struct list_head *pos, *n;
2547	struct intel_vgpu_mm *mm;
2548
2549	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2550		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2551		intel_vgpu_destroy_mm(mm);
2552	}
2553
2554	if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2555		gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2556
2557	if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2558		gvt_err("Why we still has spt not freed?\n");
2559		ppgtt_free_all_spt(vgpu);
2560	}
2561}
2562
2563static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2564{
2565	struct intel_gvt_partial_pte *pos, *next;
2566
2567	list_for_each_entry_safe(pos, next,
2568				 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2569				 list) {
2570		gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2571			pos->offset, pos->data);
2572		kfree(pos);
2573	}
2574	intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2575	vgpu->gtt.ggtt_mm = NULL;
2576}
2577
2578/**
2579 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2580 * @vgpu: a vGPU
2581 *
2582 * This function is used to clean up per-vGPU graphics memory virtualization
2583 * components.
2584 *
2585 * Returns:
2586 * Zero on success, error code if failed.
2587 */
2588void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2589{
2590	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2591	intel_vgpu_destroy_ggtt_mm(vgpu);
2592	release_scratch_page_tree(vgpu);
2593}
2594
2595static void clean_spt_oos(struct intel_gvt *gvt)
2596{
2597	struct intel_gvt_gtt *gtt = &gvt->gtt;
2598	struct list_head *pos, *n;
2599	struct intel_vgpu_oos_page *oos_page;
2600
2601	WARN(!list_empty(&gtt->oos_page_use_list_head),
2602		"someone is still using oos page\n");
2603
2604	list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2605		oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2606		list_del(&oos_page->list);
2607		free_page((unsigned long)oos_page->mem);
2608		kfree(oos_page);
2609	}
2610}
2611
2612static int setup_spt_oos(struct intel_gvt *gvt)
2613{
2614	struct intel_gvt_gtt *gtt = &gvt->gtt;
2615	struct intel_vgpu_oos_page *oos_page;
2616	int i;
2617	int ret;
2618
2619	INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2620	INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2621
2622	for (i = 0; i < preallocated_oos_pages; i++) {
2623		oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2624		if (!oos_page) {
2625			ret = -ENOMEM;
2626			goto fail;
2627		}
2628		oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2629		if (!oos_page->mem) {
2630			ret = -ENOMEM;
2631			kfree(oos_page);
2632			goto fail;
2633		}
2634
2635		INIT_LIST_HEAD(&oos_page->list);
2636		INIT_LIST_HEAD(&oos_page->vm_list);
2637		oos_page->id = i;
2638		list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2639	}
2640
2641	gvt_dbg_mm("%d oos pages preallocated\n", i);
2642
2643	return 0;
2644fail:
2645	clean_spt_oos(gvt);
2646	return ret;
2647}
2648
2649/**
2650 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2651 * @vgpu: a vGPU
2652 * @pdps: pdp root array
2653 *
2654 * This function is used to find a PPGTT mm object from mm object pool
2655 *
2656 * Returns:
2657 * pointer to mm object on success, NULL if failed.
2658 */
2659struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2660		u64 pdps[])
2661{
2662	struct intel_vgpu_mm *mm;
2663	struct list_head *pos;
2664
2665	list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2666		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2667
2668		switch (mm->ppgtt_mm.root_entry_type) {
2669		case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2670			if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2671				return mm;
2672			break;
2673		case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2674			if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2675				    sizeof(mm->ppgtt_mm.guest_pdps)))
2676				return mm;
2677			break;
2678		default:
2679			GEM_BUG_ON(1);
2680		}
2681	}
2682	return NULL;
2683}
2684
2685/**
2686 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2687 * @vgpu: a vGPU
2688 * @root_entry_type: ppgtt root entry type
2689 * @pdps: guest pdps
2690 *
2691 * This function is used to find or create a PPGTT mm object from a guest.
2692 *
2693 * Returns:
2694 * Zero on success, negative error code if failed.
2695 */
2696struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2697		enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2698{
2699	struct intel_vgpu_mm *mm;
2700
2701	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2702	if (mm) {
2703		intel_vgpu_mm_get(mm);
2704	} else {
2705		mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2706		if (IS_ERR(mm))
2707			gvt_vgpu_err("fail to create mm\n");
2708	}
2709	return mm;
2710}
2711
2712/**
2713 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2714 * @vgpu: a vGPU
2715 * @pdps: guest pdps
2716 *
2717 * This function is used to find a PPGTT mm object from a guest and destroy it.
2718 *
2719 * Returns:
2720 * Zero on success, negative error code if failed.
2721 */
2722int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2723{
2724	struct intel_vgpu_mm *mm;
2725
2726	mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2727	if (!mm) {
2728		gvt_vgpu_err("fail to find ppgtt instance.\n");
2729		return -EINVAL;
2730	}
2731	intel_vgpu_mm_put(mm);
2732	return 0;
2733}
2734
2735/**
2736 * intel_gvt_init_gtt - initialize mm components of a GVT device
2737 * @gvt: GVT device
2738 *
2739 * This function is called at the initialization stage, to initialize
2740 * the mm components of a GVT device.
2741 *
2742 * Returns:
2743 * zero on success, negative error code if failed.
2744 */
2745int intel_gvt_init_gtt(struct intel_gvt *gvt)
2746{
2747	int ret;
2748	void *page;
2749	struct device *dev = gvt->gt->i915->drm.dev;
2750	dma_addr_t daddr;
2751
2752	gvt_dbg_core("init gtt\n");
2753
2754	gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2755	gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2756
2757	page = (void *)get_zeroed_page(GFP_KERNEL);
2758	if (!page) {
2759		gvt_err("fail to allocate scratch ggtt page\n");
2760		return -ENOMEM;
2761	}
2762
2763	daddr = dma_map_page(dev, virt_to_page(page), 0,
2764			4096, DMA_BIDIRECTIONAL);
2765	if (dma_mapping_error(dev, daddr)) {
2766		gvt_err("fail to dmamap scratch ggtt page\n");
2767		__free_page(virt_to_page(page));
2768		return -ENOMEM;
2769	}
2770
2771	gvt->gtt.scratch_page = virt_to_page(page);
2772	gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2773
2774	if (enable_out_of_sync) {
2775		ret = setup_spt_oos(gvt);
2776		if (ret) {
2777			gvt_err("fail to initialize SPT oos\n");
2778			dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2779			__free_page(gvt->gtt.scratch_page);
2780			return ret;
2781		}
2782	}
2783	INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2784	mutex_init(&gvt->gtt.ppgtt_mm_lock);
2785	return 0;
2786}
2787
2788/**
2789 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2790 * @gvt: GVT device
2791 *
2792 * This function is called at the driver unloading stage, to clean up
2793 * the mm components of a GVT device.
2794 *
2795 */
2796void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2797{
2798	struct device *dev = gvt->gt->i915->drm.dev;
2799	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2800					I915_GTT_PAGE_SHIFT);
2801
2802	dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2803
2804	__free_page(gvt->gtt.scratch_page);
2805
2806	if (enable_out_of_sync)
2807		clean_spt_oos(gvt);
2808}
2809
2810/**
2811 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2812 * @vgpu: a vGPU
2813 *
2814 * This function is called when invalidate all PPGTT instances of a vGPU.
2815 *
2816 */
2817void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2818{
2819	struct list_head *pos, *n;
2820	struct intel_vgpu_mm *mm;
2821
2822	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2823		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2824		if (mm->type == INTEL_GVT_MM_PPGTT) {
2825			mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2826			list_del_init(&mm->ppgtt_mm.lru_list);
2827			mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2828			if (mm->ppgtt_mm.shadowed)
2829				invalidate_ppgtt_mm(mm);
2830		}
2831	}
2832}
2833
2834/**
2835 * intel_vgpu_reset_ggtt - reset the GGTT entry
2836 * @vgpu: a vGPU
2837 * @invalidate_old: invalidate old entries
2838 *
2839 * This function is called at the vGPU create stage
2840 * to reset all the GGTT entries.
2841 *
2842 */
2843void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2844{
2845	struct intel_gvt *gvt = vgpu->gvt;
2846	const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2847	struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2848	struct intel_gvt_gtt_entry old_entry;
2849	u32 index;
2850	u32 num_entries;
2851
2852	pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2853	pte_ops->set_present(&entry);
2854
2855	index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2856	num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2857	while (num_entries--) {
2858		if (invalidate_old) {
2859			ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2860			ggtt_invalidate_pte(vgpu, &old_entry);
2861		}
2862		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2863	}
2864
2865	index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2866	num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2867	while (num_entries--) {
2868		if (invalidate_old) {
2869			ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2870			ggtt_invalidate_pte(vgpu, &old_entry);
2871		}
2872		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2873	}
2874
2875	ggtt_invalidate(gvt->gt);
2876}
2877
2878/**
2879 * intel_vgpu_reset_gtt - reset the all GTT related status
2880 * @vgpu: a vGPU
2881 *
2882 * This function is called from vfio core to reset reset all
2883 * GTT related status, including GGTT, PPGTT, scratch page.
2884 *
2885 */
2886void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2887{
2888	/* Shadow pages are only created when there is no page
2889	 * table tracking data, so remove page tracking data after
2890	 * removing the shadow pages.
2891	 */
2892	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2893	intel_vgpu_reset_ggtt(vgpu, true);
2894}
2895
2896/**
2897 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2898 * @gvt: intel gvt device
2899 *
2900 * This function is called at driver resume stage to restore
2901 * GGTT entries of every vGPU.
2902 *
2903 */
2904void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
2905{
2906	struct intel_vgpu *vgpu;
2907	struct intel_vgpu_mm *mm;
2908	int id;
2909	gen8_pte_t pte;
2910	u32 idx, num_low, num_hi, offset;
2911
2912	/* Restore dirty host ggtt for all vGPUs */
2913	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2914		mm = vgpu->gtt.ggtt_mm;
2915
2916		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2917		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2918		for (idx = 0; idx < num_low; idx++) {
2919			pte = mm->ggtt_mm.host_ggtt_aperture[idx];
2920			if (pte & GEN8_PAGE_PRESENT)
2921				write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2922		}
2923
2924		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2925		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2926		for (idx = 0; idx < num_hi; idx++) {
2927			pte = mm->ggtt_mm.host_ggtt_hidden[idx];
2928			if (pte & GEN8_PAGE_PRESENT)
2929				write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2930		}
2931	}
2932}