Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/log2.h>
   7
   8#include "gem/i915_gem_lmem.h"
   9
  10#include "gen8_ppgtt.h"
  11#include "i915_scatterlist.h"
  12#include "i915_trace.h"
  13#include "i915_pvinfo.h"
  14#include "i915_vgpu.h"
  15#include "intel_gt.h"
  16#include "intel_gtt.h"
  17
  18static u64 gen8_pde_encode(const dma_addr_t addr,
  19			   const enum i915_cache_level level)
  20{
  21	u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
  22
  23	if (level != I915_CACHE_NONE)
  24		pde |= PPAT_CACHED_PDE;
  25	else
  26		pde |= PPAT_UNCACHED;
  27
  28	return pde;
  29}
  30
  31static u64 gen8_pte_encode(dma_addr_t addr,
  32			   enum i915_cache_level level,
  33			   u32 flags)
  34{
  35	gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
  36
  37	if (unlikely(flags & PTE_READ_ONLY))
  38		pte &= ~GEN8_PAGE_RW;
  39
  40	if (flags & PTE_LM)
  41		pte |= GEN12_PPGTT_PTE_LM;
  42
  43	switch (level) {
  44	case I915_CACHE_NONE:
  45		pte |= PPAT_UNCACHED;
  46		break;
  47	case I915_CACHE_WT:
  48		pte |= PPAT_DISPLAY_ELLC;
  49		break;
  50	default:
  51		pte |= PPAT_CACHED;
  52		break;
  53	}
  54
  55	return pte;
  56}
  57
  58static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
  59{
  60	struct drm_i915_private *i915 = ppgtt->vm.i915;
  61	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
  62	enum vgt_g2v_type msg;
  63	int i;
  64
  65	if (create)
  66		atomic_inc(px_used(ppgtt->pd)); /* never remove */
  67	else
  68		atomic_dec(px_used(ppgtt->pd));
  69
  70	mutex_lock(&i915->vgpu.lock);
  71
  72	if (i915_vm_is_4lvl(&ppgtt->vm)) {
  73		const u64 daddr = px_dma(ppgtt->pd);
  74
  75		intel_uncore_write(uncore,
  76				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
  77		intel_uncore_write(uncore,
  78				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
  79
  80		msg = create ?
  81			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
  82			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
  83	} else {
  84		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
  85			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
  86
  87			intel_uncore_write(uncore,
  88					   vgtif_reg(pdp[i].lo),
  89					   lower_32_bits(daddr));
  90			intel_uncore_write(uncore,
  91					   vgtif_reg(pdp[i].hi),
  92					   upper_32_bits(daddr));
  93		}
  94
  95		msg = create ?
  96			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
  97			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
  98	}
  99
 100	/* g2v_notify atomically (via hv trap) consumes the message packet. */
 101	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
 102
 103	mutex_unlock(&i915->vgpu.lock);
 104}
 105
 106/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
 107#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
 108#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
 109#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
 110#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
 111#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
 112#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
 113#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
 114
 115#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
 116
 117static unsigned int
 118gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
 119{
 120	const int shift = gen8_pd_shift(lvl);
 121	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 122
 123	GEM_BUG_ON(start >= end);
 124	end += ~mask >> gen8_pd_shift(1);
 125
 126	*idx = i915_pde_index(start, shift);
 127	if ((start ^ end) & mask)
 128		return GEN8_PDES - *idx;
 129	else
 130		return i915_pde_index(end, shift) - *idx;
 131}
 132
 133static bool gen8_pd_contains(u64 start, u64 end, int lvl)
 134{
 135	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 136
 137	GEM_BUG_ON(start >= end);
 138	return (start ^ end) & mask && (start & ~mask) == 0;
 139}
 140
 141static unsigned int gen8_pt_count(u64 start, u64 end)
 142{
 143	GEM_BUG_ON(start >= end);
 144	if ((start ^ end) >> gen8_pd_shift(1))
 145		return GEN8_PDES - (start & (GEN8_PDES - 1));
 146	else
 147		return end - start;
 148}
 149
 150static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
 151{
 152	unsigned int shift = __gen8_pte_shift(vm->top);
 153
 154	return (vm->total + (1ull << shift) - 1) >> shift;
 155}
 156
 157static struct i915_page_directory *
 158gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
 159{
 160	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
 161
 162	if (vm->top == 2)
 163		return ppgtt->pd;
 164	else
 165		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
 166}
 167
 168static struct i915_page_directory *
 169gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
 170{
 171	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
 172}
 173
 174static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
 175				 struct i915_page_directory *pd,
 176				 int count, int lvl)
 177{
 178	if (lvl) {
 179		void **pde = pd->entry;
 180
 181		do {
 182			if (!*pde)
 183				continue;
 184
 185			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
 186		} while (pde++, --count);
 187	}
 188
 189	free_px(vm, &pd->pt, lvl);
 190}
 191
 192static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 193{
 194	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 195
 196	if (intel_vgpu_active(vm->i915))
 197		gen8_ppgtt_notify_vgt(ppgtt, false);
 198
 199	if (ppgtt->pd)
 200		__gen8_ppgtt_cleanup(vm, ppgtt->pd,
 201				     gen8_pd_top_count(vm), vm->top);
 202
 203	free_scratch(vm);
 204}
 205
 206static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
 207			      struct i915_page_directory * const pd,
 208			      u64 start, const u64 end, int lvl)
 209{
 210	const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
 211	unsigned int idx, len;
 212
 213	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
 214
 215	len = gen8_pd_range(start, end, lvl--, &idx);
 216	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
 217	    __func__, vm, lvl + 1, start, end,
 218	    idx, len, atomic_read(px_used(pd)));
 219	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
 220
 221	do {
 222		struct i915_page_table *pt = pd->entry[idx];
 223
 224		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
 225		    gen8_pd_contains(start, end, lvl)) {
 226			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
 227			    __func__, vm, lvl + 1, idx, start, end);
 228			clear_pd_entry(pd, idx, scratch);
 229			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
 230			start += (u64)I915_PDES << gen8_pd_shift(lvl);
 231			continue;
 232		}
 233
 234		if (lvl) {
 235			start = __gen8_ppgtt_clear(vm, as_pd(pt),
 236						   start, end, lvl);
 237		} else {
 238			unsigned int count;
 239			unsigned int pte = gen8_pd_index(start, 0);
 240			unsigned int num_ptes;
 241			u64 *vaddr;
 242
 243			count = gen8_pt_count(start, end);
 244			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
 245			    __func__, vm, lvl, start, end,
 246			    gen8_pd_index(start, 0), count,
 247			    atomic_read(&pt->used));
 248			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
 249
 250			num_ptes = count;
 251			if (pt->is_compact) {
 252				GEM_BUG_ON(num_ptes % 16);
 253				GEM_BUG_ON(pte % 16);
 254				num_ptes /= 16;
 255				pte /= 16;
 256			}
 257
 258			vaddr = px_vaddr(pt);
 259			memset64(vaddr + pte,
 260				 vm->scratch[0]->encode,
 261				 num_ptes);
 262
 263			atomic_sub(count, &pt->used);
 264			start += count;
 265		}
 266
 267		if (release_pd_entry(pd, idx, pt, scratch))
 268			free_px(vm, pt, lvl);
 269	} while (idx++, --len);
 270
 271	return start;
 272}
 273
 274static void gen8_ppgtt_clear(struct i915_address_space *vm,
 275			     u64 start, u64 length)
 276{
 277	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
 278	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
 279	GEM_BUG_ON(range_overflows(start, length, vm->total));
 280
 281	start >>= GEN8_PTE_SHIFT;
 282	length >>= GEN8_PTE_SHIFT;
 283	GEM_BUG_ON(length == 0);
 284
 285	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
 286			   start, start + length, vm->top);
 287}
 288
 289static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
 290			       struct i915_vm_pt_stash *stash,
 291			       struct i915_page_directory * const pd,
 292			       u64 * const start, const u64 end, int lvl)
 293{
 294	unsigned int idx, len;
 295
 296	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
 297
 298	len = gen8_pd_range(*start, end, lvl--, &idx);
 299	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
 300	    __func__, vm, lvl + 1, *start, end,
 301	    idx, len, atomic_read(px_used(pd)));
 302	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
 303
 304	spin_lock(&pd->lock);
 305	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
 306	do {
 307		struct i915_page_table *pt = pd->entry[idx];
 308
 309		if (!pt) {
 310			spin_unlock(&pd->lock);
 311
 312			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
 313			    __func__, vm, lvl + 1, idx);
 314
 315			pt = stash->pt[!!lvl];
 316			__i915_gem_object_pin_pages(pt->base);
 317
 318			fill_px(pt, vm->scratch[lvl]->encode);
 319
 320			spin_lock(&pd->lock);
 321			if (likely(!pd->entry[idx])) {
 322				stash->pt[!!lvl] = pt->stash;
 323				atomic_set(&pt->used, 0);
 324				set_pd_entry(pd, idx, pt);
 325			} else {
 326				pt = pd->entry[idx];
 327			}
 328		}
 329
 330		if (lvl) {
 331			atomic_inc(&pt->used);
 332			spin_unlock(&pd->lock);
 333
 334			__gen8_ppgtt_alloc(vm, stash,
 335					   as_pd(pt), start, end, lvl);
 336
 337			spin_lock(&pd->lock);
 338			atomic_dec(&pt->used);
 339			GEM_BUG_ON(!atomic_read(&pt->used));
 340		} else {
 341			unsigned int count = gen8_pt_count(*start, end);
 342
 343			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
 344			    __func__, vm, lvl, *start, end,
 345			    gen8_pd_index(*start, 0), count,
 346			    atomic_read(&pt->used));
 347
 348			atomic_add(count, &pt->used);
 349			/* All other pdes may be simultaneously removed */
 350			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
 351			*start += count;
 352		}
 353	} while (idx++, --len);
 354	spin_unlock(&pd->lock);
 355}
 356
 357static void gen8_ppgtt_alloc(struct i915_address_space *vm,
 358			     struct i915_vm_pt_stash *stash,
 359			     u64 start, u64 length)
 360{
 361	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
 362	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
 363	GEM_BUG_ON(range_overflows(start, length, vm->total));
 364
 365	start >>= GEN8_PTE_SHIFT;
 366	length >>= GEN8_PTE_SHIFT;
 367	GEM_BUG_ON(length == 0);
 368
 369	__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
 370			   &start, start + length, vm->top);
 371}
 372
 373static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
 374				 struct i915_page_directory *pd,
 375				 u64 *start, u64 end, int lvl,
 376				 void (*fn)(struct i915_address_space *vm,
 377					    struct i915_page_table *pt,
 378					    void *data),
 379				 void *data)
 380{
 381	unsigned int idx, len;
 382
 383	len = gen8_pd_range(*start, end, lvl--, &idx);
 384
 385	spin_lock(&pd->lock);
 386	do {
 387		struct i915_page_table *pt = pd->entry[idx];
 388
 389		atomic_inc(&pt->used);
 390		spin_unlock(&pd->lock);
 391
 392		if (lvl) {
 393			__gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
 394					     fn, data);
 395		} else {
 396			fn(vm, pt, data);
 397			*start += gen8_pt_count(*start, end);
 398		}
 399
 400		spin_lock(&pd->lock);
 401		atomic_dec(&pt->used);
 402	} while (idx++, --len);
 403	spin_unlock(&pd->lock);
 404}
 405
 406static void gen8_ppgtt_foreach(struct i915_address_space *vm,
 407			       u64 start, u64 length,
 408			       void (*fn)(struct i915_address_space *vm,
 409					  struct i915_page_table *pt,
 410					  void *data),
 411			       void *data)
 412{
 413	start >>= GEN8_PTE_SHIFT;
 414	length >>= GEN8_PTE_SHIFT;
 415
 416	__gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
 417			     &start, start + length, vm->top,
 418			     fn, data);
 419}
 420
 421static __always_inline u64
 422gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
 423		      struct i915_page_directory *pdp,
 424		      struct sgt_dma *iter,
 425		      u64 idx,
 426		      enum i915_cache_level cache_level,
 427		      u32 flags)
 428{
 429	struct i915_page_directory *pd;
 430	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
 431	gen8_pte_t *vaddr;
 432
 433	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
 434	vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
 435	do {
 436		GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
 437		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
 438
 439		iter->dma += I915_GTT_PAGE_SIZE;
 440		if (iter->dma >= iter->max) {
 441			iter->sg = __sg_next(iter->sg);
 442			if (!iter->sg || sg_dma_len(iter->sg) == 0) {
 443				idx = 0;
 444				break;
 445			}
 446
 447			iter->dma = sg_dma_address(iter->sg);
 448			iter->max = iter->dma + sg_dma_len(iter->sg);
 449		}
 450
 451		if (gen8_pd_index(++idx, 0) == 0) {
 452			if (gen8_pd_index(idx, 1) == 0) {
 453				/* Limited by sg length for 3lvl */
 454				if (gen8_pd_index(idx, 2) == 0)
 455					break;
 456
 457				pd = pdp->entry[gen8_pd_index(idx, 2)];
 458			}
 459
 460			drm_clflush_virt_range(vaddr, PAGE_SIZE);
 461			vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
 462		}
 463	} while (1);
 464	drm_clflush_virt_range(vaddr, PAGE_SIZE);
 465
 466	return idx;
 467}
 468
 469static void
 470xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
 471			  struct i915_vma_resource *vma_res,
 472			  struct sgt_dma *iter,
 473			  enum i915_cache_level cache_level,
 474			  u32 flags)
 475{
 476	const gen8_pte_t pte_encode = vm->pte_encode(0, cache_level, flags);
 477	unsigned int rem = sg_dma_len(iter->sg);
 478	u64 start = vma_res->start;
 479	u64 end = start + vma_res->vma_size;
 480
 481	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
 482
 483	do {
 484		struct i915_page_directory * const pdp =
 485			gen8_pdp_for_page_address(vm, start);
 486		struct i915_page_directory * const pd =
 487			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
 488		struct i915_page_table *pt =
 489			i915_pt_entry(pd, __gen8_pte_index(start, 1));
 490		gen8_pte_t encode = pte_encode;
 491		unsigned int page_size;
 492		gen8_pte_t *vaddr;
 493		u16 index, max, nent, i;
 494
 495		max = I915_PDES;
 496		nent = 1;
 497
 498		if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
 499		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
 500		    rem >= I915_GTT_PAGE_SIZE_2M &&
 501		    !__gen8_pte_index(start, 0)) {
 502			index = __gen8_pte_index(start, 1);
 503			encode |= GEN8_PDE_PS_2M;
 504			page_size = I915_GTT_PAGE_SIZE_2M;
 505
 506			vaddr = px_vaddr(pd);
 507		} else {
 508			index =  __gen8_pte_index(start, 0);
 509			page_size = I915_GTT_PAGE_SIZE;
 510
 511			if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
 512				/*
 513				 * Device local-memory on these platforms should
 514				 * always use 64K pages or larger (including GTT
 515				 * alignment), therefore if we know the whole
 516				 * page-table needs to be filled we can always
 517				 * safely use the compact-layout. Otherwise fall
 518				 * back to the TLB hint with PS64. If this is
 519				 * system memory we only bother with PS64.
 520				 */
 521				if ((encode & GEN12_PPGTT_PTE_LM) &&
 522				    end - start >= SZ_2M && !index) {
 523					index = __gen8_pte_index(start, 0) / 16;
 524					page_size = I915_GTT_PAGE_SIZE_64K;
 525
 526					max /= 16;
 527
 528					vaddr = px_vaddr(pd);
 529					vaddr[__gen8_pte_index(start, 1)] |= GEN12_PDE_64K;
 530
 531					pt->is_compact = true;
 532				} else if (IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
 533					   rem >= I915_GTT_PAGE_SIZE_64K &&
 534					   !(index % 16)) {
 535					encode |= GEN12_PTE_PS64;
 536					page_size = I915_GTT_PAGE_SIZE_64K;
 537					nent = 16;
 538				}
 539			}
 540
 541			vaddr = px_vaddr(pt);
 542		}
 543
 544		do {
 545			GEM_BUG_ON(rem < page_size);
 546
 547			for (i = 0; i < nent; i++) {
 548				vaddr[index++] =
 549					encode | (iter->dma + i *
 550						  I915_GTT_PAGE_SIZE);
 551			}
 552
 553			start += page_size;
 554			iter->dma += page_size;
 555			rem -= page_size;
 556			if (iter->dma >= iter->max) {
 557				iter->sg = __sg_next(iter->sg);
 558				if (!iter->sg)
 559					break;
 560
 561				rem = sg_dma_len(iter->sg);
 562				if (!rem)
 563					break;
 564
 565				iter->dma = sg_dma_address(iter->sg);
 566				iter->max = iter->dma + rem;
 567
 568				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
 569					break;
 570			}
 571		} while (rem >= page_size && index < max);
 572
 573		vma_res->page_sizes_gtt |= page_size;
 574	} while (iter->sg && sg_dma_len(iter->sg));
 575}
 576
 577static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
 578				   struct i915_vma_resource *vma_res,
 579				   struct sgt_dma *iter,
 580				   enum i915_cache_level cache_level,
 581				   u32 flags)
 582{
 583	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
 584	unsigned int rem = sg_dma_len(iter->sg);
 585	u64 start = vma_res->start;
 586
 587	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
 588
 589	do {
 590		struct i915_page_directory * const pdp =
 591			gen8_pdp_for_page_address(vm, start);
 592		struct i915_page_directory * const pd =
 593			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
 594		gen8_pte_t encode = pte_encode;
 595		unsigned int maybe_64K = -1;
 596		unsigned int page_size;
 597		gen8_pte_t *vaddr;
 598		u16 index;
 599
 600		if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
 601		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
 602		    rem >= I915_GTT_PAGE_SIZE_2M &&
 603		    !__gen8_pte_index(start, 0)) {
 604			index = __gen8_pte_index(start, 1);
 605			encode |= GEN8_PDE_PS_2M;
 606			page_size = I915_GTT_PAGE_SIZE_2M;
 607
 608			vaddr = px_vaddr(pd);
 609		} else {
 610			struct i915_page_table *pt =
 611				i915_pt_entry(pd, __gen8_pte_index(start, 1));
 612
 613			index = __gen8_pte_index(start, 0);
 614			page_size = I915_GTT_PAGE_SIZE;
 615
 616			if (!index &&
 617			    vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
 618			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
 619			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
 620			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
 621				maybe_64K = __gen8_pte_index(start, 1);
 622
 623			vaddr = px_vaddr(pt);
 624		}
 625
 626		do {
 627			GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
 628			vaddr[index++] = encode | iter->dma;
 629
 630			start += page_size;
 631			iter->dma += page_size;
 632			rem -= page_size;
 633			if (iter->dma >= iter->max) {
 634				iter->sg = __sg_next(iter->sg);
 635				if (!iter->sg)
 636					break;
 637
 638				rem = sg_dma_len(iter->sg);
 639				if (!rem)
 640					break;
 641
 642				iter->dma = sg_dma_address(iter->sg);
 643				iter->max = iter->dma + rem;
 644
 645				if (maybe_64K != -1 && index < I915_PDES &&
 646				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
 647				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
 648				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
 649					maybe_64K = -1;
 650
 651				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
 652					break;
 653			}
 654		} while (rem >= page_size && index < I915_PDES);
 655
 656		drm_clflush_virt_range(vaddr, PAGE_SIZE);
 657
 658		/*
 659		 * Is it safe to mark the 2M block as 64K? -- Either we have
 660		 * filled whole page-table with 64K entries, or filled part of
 661		 * it and have reached the end of the sg table and we have
 662		 * enough padding.
 663		 */
 664		if (maybe_64K != -1 &&
 665		    (index == I915_PDES ||
 666		     (i915_vm_has_scratch_64K(vm) &&
 667		      !iter->sg && IS_ALIGNED(vma_res->start +
 668					      vma_res->node_size,
 669					      I915_GTT_PAGE_SIZE_2M)))) {
 670			vaddr = px_vaddr(pd);
 671			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
 672			drm_clflush_virt_range(vaddr, PAGE_SIZE);
 673			page_size = I915_GTT_PAGE_SIZE_64K;
 674
 675			/*
 676			 * We write all 4K page entries, even when using 64K
 677			 * pages. In order to verify that the HW isn't cheating
 678			 * by using the 4K PTE instead of the 64K PTE, we want
 679			 * to remove all the surplus entries. If the HW skipped
 680			 * the 64K PTE, it will read/write into the scratch page
 681			 * instead - which we detect as missing results during
 682			 * selftests.
 683			 */
 684			if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
 685				u16 i;
 686
 687				encode = vm->scratch[0]->encode;
 688				vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
 689
 690				for (i = 1; i < index; i += 16)
 691					memset64(vaddr + i, encode, 15);
 692
 693				drm_clflush_virt_range(vaddr, PAGE_SIZE);
 694			}
 695		}
 696
 697		vma_res->page_sizes_gtt |= page_size;
 698	} while (iter->sg && sg_dma_len(iter->sg));
 699}
 700
 701static void gen8_ppgtt_insert(struct i915_address_space *vm,
 702			      struct i915_vma_resource *vma_res,
 703			      enum i915_cache_level cache_level,
 704			      u32 flags)
 705{
 706	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
 707	struct sgt_dma iter = sgt_dma(vma_res);
 708
 709	if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
 710		if (HAS_64K_PAGES(vm->i915))
 711			xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
 712		else
 713			gen8_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
 714	} else  {
 715		u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
 716
 717		do {
 718			struct i915_page_directory * const pdp =
 719				gen8_pdp_for_page_index(vm, idx);
 720
 721			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
 722						    cache_level, flags);
 723		} while (idx);
 724
 725		vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
 726	}
 727}
 728
 729static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
 730				    dma_addr_t addr,
 731				    u64 offset,
 732				    enum i915_cache_level level,
 733				    u32 flags)
 734{
 735	u64 idx = offset >> GEN8_PTE_SHIFT;
 736	struct i915_page_directory * const pdp =
 737		gen8_pdp_for_page_index(vm, idx);
 738	struct i915_page_directory *pd =
 739		i915_pd_entry(pdp, gen8_pd_index(idx, 2));
 740	struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
 741	gen8_pte_t *vaddr;
 742
 743	GEM_BUG_ON(pt->is_compact);
 744
 745	vaddr = px_vaddr(pt);
 746	vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
 747	drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
 748}
 749
 750static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
 751					    dma_addr_t addr,
 752					    u64 offset,
 753					    enum i915_cache_level level,
 754					    u32 flags)
 755{
 756	u64 idx = offset >> GEN8_PTE_SHIFT;
 757	struct i915_page_directory * const pdp =
 758		gen8_pdp_for_page_index(vm, idx);
 759	struct i915_page_directory *pd =
 760		i915_pd_entry(pdp, gen8_pd_index(idx, 2));
 761	struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
 762	gen8_pte_t *vaddr;
 763
 764	GEM_BUG_ON(!IS_ALIGNED(addr, SZ_64K));
 765	GEM_BUG_ON(!IS_ALIGNED(offset, SZ_64K));
 766
 767	/* XXX: we don't strictly need to use this layout */
 768
 769	if (!pt->is_compact) {
 770		vaddr = px_vaddr(pd);
 771		vaddr[gen8_pd_index(idx, 1)] |= GEN12_PDE_64K;
 772		pt->is_compact = true;
 773	}
 774
 775	vaddr = px_vaddr(pt);
 776	vaddr[gen8_pd_index(idx, 0) / 16] = gen8_pte_encode(addr, level, flags);
 777}
 778
 779static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
 780				       dma_addr_t addr,
 781				       u64 offset,
 782				       enum i915_cache_level level,
 783				       u32 flags)
 784{
 785	if (flags & PTE_LM)
 786		return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
 787						       level, flags);
 788
 789	return gen8_ppgtt_insert_entry(vm, addr, offset, level, flags);
 790}
 791
 792static int gen8_init_scratch(struct i915_address_space *vm)
 793{
 794	u32 pte_flags;
 795	int ret;
 796	int i;
 797
 798	/*
 799	 * If everybody agrees to not to write into the scratch page,
 800	 * we can reuse it for all vm, keeping contexts and processes separate.
 801	 */
 802	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
 803		struct i915_address_space *clone = vm->gt->vm;
 804
 805		GEM_BUG_ON(!clone->has_read_only);
 806
 807		vm->scratch_order = clone->scratch_order;
 808		for (i = 0; i <= vm->top; i++)
 809			vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
 810
 811		return 0;
 812	}
 813
 814	ret = setup_scratch_page(vm);
 815	if (ret)
 816		return ret;
 817
 818	pte_flags = vm->has_read_only;
 819	if (i915_gem_object_is_lmem(vm->scratch[0]))
 820		pte_flags |= PTE_LM;
 821
 822	vm->scratch[0]->encode =
 823		gen8_pte_encode(px_dma(vm->scratch[0]),
 824				I915_CACHE_NONE, pte_flags);
 825
 826	for (i = 1; i <= vm->top; i++) {
 827		struct drm_i915_gem_object *obj;
 828
 829		obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
 830		if (IS_ERR(obj)) {
 831			ret = PTR_ERR(obj);
 832			goto free_scratch;
 833		}
 834
 835		ret = map_pt_dma(vm, obj);
 836		if (ret) {
 837			i915_gem_object_put(obj);
 838			goto free_scratch;
 839		}
 840
 841		fill_px(obj, vm->scratch[i - 1]->encode);
 842		obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
 843
 844		vm->scratch[i] = obj;
 845	}
 846
 847	return 0;
 848
 849free_scratch:
 850	while (i--)
 851		i915_gem_object_put(vm->scratch[i]);
 852	vm->scratch[0] = NULL;
 853	return ret;
 854}
 855
 856static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 857{
 858	struct i915_address_space *vm = &ppgtt->vm;
 859	struct i915_page_directory *pd = ppgtt->pd;
 860	unsigned int idx;
 861
 862	GEM_BUG_ON(vm->top != 2);
 863	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
 864
 865	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
 866		struct i915_page_directory *pde;
 867		int err;
 868
 869		pde = alloc_pd(vm);
 870		if (IS_ERR(pde))
 871			return PTR_ERR(pde);
 872
 873		err = map_pt_dma(vm, pde->pt.base);
 874		if (err) {
 875			free_pd(vm, pde);
 876			return err;
 877		}
 878
 879		fill_px(pde, vm->scratch[1]->encode);
 880		set_pd_entry(pd, idx, pde);
 881		atomic_inc(px_used(pde)); /* keep pinned */
 882	}
 883	wmb();
 884
 885	return 0;
 886}
 887
 888static struct i915_page_directory *
 889gen8_alloc_top_pd(struct i915_address_space *vm)
 890{
 891	const unsigned int count = gen8_pd_top_count(vm);
 892	struct i915_page_directory *pd;
 893	int err;
 894
 895	GEM_BUG_ON(count > I915_PDES);
 896
 897	pd = __alloc_pd(count);
 898	if (unlikely(!pd))
 899		return ERR_PTR(-ENOMEM);
 900
 901	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
 902	if (IS_ERR(pd->pt.base)) {
 903		err = PTR_ERR(pd->pt.base);
 904		pd->pt.base = NULL;
 905		goto err_pd;
 906	}
 907
 908	err = map_pt_dma(vm, pd->pt.base);
 909	if (err)
 910		goto err_pd;
 911
 912	fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
 913	atomic_inc(px_used(pd)); /* mark as pinned */
 914	return pd;
 915
 916err_pd:
 917	free_pd(vm, pd);
 918	return ERR_PTR(err);
 919}
 920
 921/*
 922 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 923 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 924 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 925 * space.
 926 *
 927 */
 928struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
 929				     unsigned long lmem_pt_obj_flags)
 930{
 931	struct i915_page_directory *pd;
 932	struct i915_ppgtt *ppgtt;
 933	int err;
 934
 935	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
 936	if (!ppgtt)
 937		return ERR_PTR(-ENOMEM);
 938
 939	ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
 940	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
 941	ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
 942
 943	/*
 944	 * From bdw, there is hw support for read-only pages in the PPGTT.
 945	 *
 946	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
 947	 * for now.
 948	 *
 949	 * Gen12 has inherited the same read-only fault issue from gen11.
 950	 */
 951	ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
 952
 953	if (HAS_LMEM(gt->i915))
 954		ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
 955	else
 956		ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
 957
 958	/*
 959	 * Using SMEM here instead of LMEM has the advantage of not reserving
 960	 * high performance memory for a "never" used filler page. It also
 961	 * removes the device access that would be required to initialise the
 962	 * scratch page, reducing pressure on an even scarcer resource.
 963	 */
 964	ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
 965
 966	ppgtt->vm.pte_encode = gen8_pte_encode;
 967
 968	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
 969	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
 970	if (HAS_64K_PAGES(gt->i915))
 971		ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
 972	else
 973		ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
 974	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
 975	ppgtt->vm.clear_range = gen8_ppgtt_clear;
 976	ppgtt->vm.foreach = gen8_ppgtt_foreach;
 977	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
 978
 979	err = gen8_init_scratch(&ppgtt->vm);
 980	if (err)
 981		goto err_put;
 982
 983	pd = gen8_alloc_top_pd(&ppgtt->vm);
 984	if (IS_ERR(pd)) {
 985		err = PTR_ERR(pd);
 986		goto err_put;
 987	}
 988	ppgtt->pd = pd;
 989
 990	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
 991		err = gen8_preallocate_top_level_pdp(ppgtt);
 992		if (err)
 993			goto err_put;
 994	}
 995
 996	if (intel_vgpu_active(gt->i915))
 997		gen8_ppgtt_notify_vgt(ppgtt, true);
 998
 999	return ppgtt;
1000
1001err_put:
1002	i915_vm_put(&ppgtt->vm);
1003	return ERR_PTR(err);
1004}