Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include <linux/log2.h>
  7
  8#include "gem/i915_gem_lmem.h"
  9
 10#include "gen8_ppgtt.h"
 11#include "i915_scatterlist.h"
 12#include "i915_trace.h"
 13#include "i915_pvinfo.h"
 14#include "i915_vgpu.h"
 15#include "intel_gt.h"
 16#include "intel_gtt.h"
 17
 18static u64 gen8_pde_encode(const dma_addr_t addr,
 19			   const enum i915_cache_level level)
 20{
 21	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
 22
 23	if (level != I915_CACHE_NONE)
 24		pde |= PPAT_CACHED_PDE;
 25	else
 26		pde |= PPAT_UNCACHED;
 27
 28	return pde;
 29}
 30
 31static u64 gen8_pte_encode(dma_addr_t addr,
 32			   enum i915_cache_level level,
 33			   u32 flags)
 34{
 35	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 36
 37	if (unlikely(flags & PTE_READ_ONLY))
 38		pte &= ~_PAGE_RW;
 39
 40	if (flags & PTE_LM)
 41		pte |= GEN12_PPGTT_PTE_LM;
 42
 43	switch (level) {
 44	case I915_CACHE_NONE:
 45		pte |= PPAT_UNCACHED;
 46		break;
 47	case I915_CACHE_WT:
 48		pte |= PPAT_DISPLAY_ELLC;
 49		break;
 50	default:
 51		pte |= PPAT_CACHED;
 52		break;
 53	}
 54
 55	return pte;
 56}
 57
 58static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 59{
 60	struct drm_i915_private *i915 = ppgtt->vm.i915;
 61	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
 62	enum vgt_g2v_type msg;
 63	int i;
 64
 65	if (create)
 66		atomic_inc(px_used(ppgtt->pd)); /* never remove */
 67	else
 68		atomic_dec(px_used(ppgtt->pd));
 69
 70	mutex_lock(&i915->vgpu.lock);
 71
 72	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 73		const u64 daddr = px_dma(ppgtt->pd);
 74
 75		intel_uncore_write(uncore,
 76				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
 77		intel_uncore_write(uncore,
 78				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 79
 80		msg = create ?
 81			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
 82			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
 83	} else {
 84		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
 85			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 86
 87			intel_uncore_write(uncore,
 88					   vgtif_reg(pdp[i].lo),
 89					   lower_32_bits(daddr));
 90			intel_uncore_write(uncore,
 91					   vgtif_reg(pdp[i].hi),
 92					   upper_32_bits(daddr));
 93		}
 94
 95		msg = create ?
 96			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
 97			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
 98	}
 99
100	/* g2v_notify atomically (via hv trap) consumes the message packet. */
101	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
102
103	mutex_unlock(&i915->vgpu.lock);
104}
105
106/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
107#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
108#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
109#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
110#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
111#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
112#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
113#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
114
115#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
116
117static unsigned int
118gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
119{
120	const int shift = gen8_pd_shift(lvl);
121	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
122
123	GEM_BUG_ON(start >= end);
124	end += ~mask >> gen8_pd_shift(1);
125
126	*idx = i915_pde_index(start, shift);
127	if ((start ^ end) & mask)
128		return GEN8_PDES - *idx;
129	else
130		return i915_pde_index(end, shift) - *idx;
131}
132
133static bool gen8_pd_contains(u64 start, u64 end, int lvl)
134{
135	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
136
137	GEM_BUG_ON(start >= end);
138	return (start ^ end) & mask && (start & ~mask) == 0;
139}
140
141static unsigned int gen8_pt_count(u64 start, u64 end)
142{
143	GEM_BUG_ON(start >= end);
144	if ((start ^ end) >> gen8_pd_shift(1))
145		return GEN8_PDES - (start & (GEN8_PDES - 1));
146	else
147		return end - start;
148}
149
150static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
151{
152	unsigned int shift = __gen8_pte_shift(vm->top);
153
154	return (vm->total + (1ull << shift) - 1) >> shift;
155}
156
157static struct i915_page_directory *
158gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
159{
160	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
161
162	if (vm->top == 2)
163		return ppgtt->pd;
164	else
165		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
166}
167
168static struct i915_page_directory *
169gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
170{
171	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
172}
173
174static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
175				 struct i915_page_directory *pd,
176				 int count, int lvl)
177{
178	if (lvl) {
179		void **pde = pd->entry;
180
181		do {
182			if (!*pde)
183				continue;
184
185			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
186		} while (pde++, --count);
187	}
188
189	free_px(vm, &pd->pt, lvl);
190}
191
192static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
193{
194	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
195
196	if (intel_vgpu_active(vm->i915))
197		gen8_ppgtt_notify_vgt(ppgtt, false);
198
199	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
200	free_scratch(vm);
201}
202
203static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
204			      struct i915_page_directory * const pd,
205			      u64 start, const u64 end, int lvl)
206{
207	const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
208	unsigned int idx, len;
209
210	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
211
212	len = gen8_pd_range(start, end, lvl--, &idx);
213	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
214	    __func__, vm, lvl + 1, start, end,
215	    idx, len, atomic_read(px_used(pd)));
216	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
217
218	do {
219		struct i915_page_table *pt = pd->entry[idx];
220
221		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
222		    gen8_pd_contains(start, end, lvl)) {
223			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
224			    __func__, vm, lvl + 1, idx, start, end);
225			clear_pd_entry(pd, idx, scratch);
226			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
227			start += (u64)I915_PDES << gen8_pd_shift(lvl);
228			continue;
229		}
230
231		if (lvl) {
232			start = __gen8_ppgtt_clear(vm, as_pd(pt),
233						   start, end, lvl);
234		} else {
235			unsigned int count;
236			u64 *vaddr;
237
238			count = gen8_pt_count(start, end);
239			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
240			    __func__, vm, lvl, start, end,
241			    gen8_pd_index(start, 0), count,
242			    atomic_read(&pt->used));
243			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
244
245			vaddr = px_vaddr(pt);
246			memset64(vaddr + gen8_pd_index(start, 0),
247				 vm->scratch[0]->encode,
248				 count);
249
250			atomic_sub(count, &pt->used);
251			start += count;
252		}
253
254		if (release_pd_entry(pd, idx, pt, scratch))
255			free_px(vm, pt, lvl);
256	} while (idx++, --len);
257
258	return start;
259}
260
261static void gen8_ppgtt_clear(struct i915_address_space *vm,
262			     u64 start, u64 length)
263{
264	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
265	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
266	GEM_BUG_ON(range_overflows(start, length, vm->total));
267
268	start >>= GEN8_PTE_SHIFT;
269	length >>= GEN8_PTE_SHIFT;
270	GEM_BUG_ON(length == 0);
271
272	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
273			   start, start + length, vm->top);
274}
275
276static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
277			       struct i915_vm_pt_stash *stash,
278			       struct i915_page_directory * const pd,
279			       u64 * const start, const u64 end, int lvl)
280{
281	unsigned int idx, len;
282
283	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
284
285	len = gen8_pd_range(*start, end, lvl--, &idx);
286	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
287	    __func__, vm, lvl + 1, *start, end,
288	    idx, len, atomic_read(px_used(pd)));
289	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
290
291	spin_lock(&pd->lock);
292	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
293	do {
294		struct i915_page_table *pt = pd->entry[idx];
295
296		if (!pt) {
297			spin_unlock(&pd->lock);
298
299			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
300			    __func__, vm, lvl + 1, idx);
301
302			pt = stash->pt[!!lvl];
303			__i915_gem_object_pin_pages(pt->base);
304			i915_gem_object_make_unshrinkable(pt->base);
305
306			fill_px(pt, vm->scratch[lvl]->encode);
307
308			spin_lock(&pd->lock);
309			if (likely(!pd->entry[idx])) {
310				stash->pt[!!lvl] = pt->stash;
311				atomic_set(&pt->used, 0);
312				set_pd_entry(pd, idx, pt);
313			} else {
314				pt = pd->entry[idx];
315			}
316		}
317
318		if (lvl) {
319			atomic_inc(&pt->used);
320			spin_unlock(&pd->lock);
321
322			__gen8_ppgtt_alloc(vm, stash,
323					   as_pd(pt), start, end, lvl);
324
325			spin_lock(&pd->lock);
326			atomic_dec(&pt->used);
327			GEM_BUG_ON(!atomic_read(&pt->used));
328		} else {
329			unsigned int count = gen8_pt_count(*start, end);
330
331			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
332			    __func__, vm, lvl, *start, end,
333			    gen8_pd_index(*start, 0), count,
334			    atomic_read(&pt->used));
335
336			atomic_add(count, &pt->used);
337			/* All other pdes may be simultaneously removed */
338			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
339			*start += count;
340		}
341	} while (idx++, --len);
342	spin_unlock(&pd->lock);
343}
344
345static void gen8_ppgtt_alloc(struct i915_address_space *vm,
346			     struct i915_vm_pt_stash *stash,
347			     u64 start, u64 length)
348{
349	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
350	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
351	GEM_BUG_ON(range_overflows(start, length, vm->total));
352
353	start >>= GEN8_PTE_SHIFT;
354	length >>= GEN8_PTE_SHIFT;
355	GEM_BUG_ON(length == 0);
356
357	__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
358			   &start, start + length, vm->top);
359}
360
361static __always_inline u64
362gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
363		      struct i915_page_directory *pdp,
364		      struct sgt_dma *iter,
365		      u64 idx,
366		      enum i915_cache_level cache_level,
367		      u32 flags)
368{
369	struct i915_page_directory *pd;
370	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
371	gen8_pte_t *vaddr;
372
373	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
374	vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
375	do {
376		GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
377		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
378
379		iter->dma += I915_GTT_PAGE_SIZE;
380		if (iter->dma >= iter->max) {
381			iter->sg = __sg_next(iter->sg);
382			if (!iter->sg || sg_dma_len(iter->sg) == 0) {
383				idx = 0;
384				break;
385			}
386
387			iter->dma = sg_dma_address(iter->sg);
388			iter->max = iter->dma + sg_dma_len(iter->sg);
389		}
390
391		if (gen8_pd_index(++idx, 0) == 0) {
392			if (gen8_pd_index(idx, 1) == 0) {
393				/* Limited by sg length for 3lvl */
394				if (gen8_pd_index(idx, 2) == 0)
395					break;
396
397				pd = pdp->entry[gen8_pd_index(idx, 2)];
398			}
399
400			clflush_cache_range(vaddr, PAGE_SIZE);
401			vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
402		}
403	} while (1);
404	clflush_cache_range(vaddr, PAGE_SIZE);
405
406	return idx;
407}
408
409static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
410				   struct sgt_dma *iter,
411				   enum i915_cache_level cache_level,
412				   u32 flags)
413{
414	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
415	unsigned int rem = sg_dma_len(iter->sg);
416	u64 start = vma->node.start;
417
418	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
419
420	do {
421		struct i915_page_directory * const pdp =
422			gen8_pdp_for_page_address(vma->vm, start);
423		struct i915_page_directory * const pd =
424			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
425		gen8_pte_t encode = pte_encode;
426		unsigned int maybe_64K = -1;
427		unsigned int page_size;
428		gen8_pte_t *vaddr;
429		u16 index;
430
431		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
432		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
433		    rem >= I915_GTT_PAGE_SIZE_2M &&
434		    !__gen8_pte_index(start, 0)) {
435			index = __gen8_pte_index(start, 1);
436			encode |= GEN8_PDE_PS_2M;
437			page_size = I915_GTT_PAGE_SIZE_2M;
438
439			vaddr = px_vaddr(pd);
440		} else {
441			struct i915_page_table *pt =
442				i915_pt_entry(pd, __gen8_pte_index(start, 1));
443
444			index = __gen8_pte_index(start, 0);
445			page_size = I915_GTT_PAGE_SIZE;
446
447			if (!index &&
448			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
449			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
450			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
451			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
452				maybe_64K = __gen8_pte_index(start, 1);
453
454			vaddr = px_vaddr(pt);
455		}
456
457		do {
458			GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
459			vaddr[index++] = encode | iter->dma;
460
461			start += page_size;
462			iter->dma += page_size;
463			rem -= page_size;
464			if (iter->dma >= iter->max) {
465				iter->sg = __sg_next(iter->sg);
466				if (!iter->sg)
467					break;
468
469				rem = sg_dma_len(iter->sg);
470				if (!rem)
471					break;
472
473				iter->dma = sg_dma_address(iter->sg);
474				iter->max = iter->dma + rem;
475
476				if (maybe_64K != -1 && index < I915_PDES &&
477				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
478				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
479				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
480					maybe_64K = -1;
481
482				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
483					break;
484			}
485		} while (rem >= page_size && index < I915_PDES);
486
487		clflush_cache_range(vaddr, PAGE_SIZE);
488
489		/*
490		 * Is it safe to mark the 2M block as 64K? -- Either we have
491		 * filled whole page-table with 64K entries, or filled part of
492		 * it and have reached the end of the sg table and we have
493		 * enough padding.
494		 */
495		if (maybe_64K != -1 &&
496		    (index == I915_PDES ||
497		     (i915_vm_has_scratch_64K(vma->vm) &&
498		      !iter->sg && IS_ALIGNED(vma->node.start +
499					      vma->node.size,
500					      I915_GTT_PAGE_SIZE_2M)))) {
501			vaddr = px_vaddr(pd);
502			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
503			page_size = I915_GTT_PAGE_SIZE_64K;
504
505			/*
506			 * We write all 4K page entries, even when using 64K
507			 * pages. In order to verify that the HW isn't cheating
508			 * by using the 4K PTE instead of the 64K PTE, we want
509			 * to remove all the surplus entries. If the HW skipped
510			 * the 64K PTE, it will read/write into the scratch page
511			 * instead - which we detect as missing results during
512			 * selftests.
513			 */
514			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
515				u16 i;
516
517				encode = vma->vm->scratch[0]->encode;
518				vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
519
520				for (i = 1; i < index; i += 16)
521					memset64(vaddr + i, encode, 15);
522
523			}
524		}
525
526		vma->page_sizes.gtt |= page_size;
527	} while (iter->sg && sg_dma_len(iter->sg));
528}
529
530static void gen8_ppgtt_insert(struct i915_address_space *vm,
531			      struct i915_vma *vma,
532			      enum i915_cache_level cache_level,
533			      u32 flags)
534{
535	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
536	struct sgt_dma iter = sgt_dma(vma);
537
538	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
539		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
540	} else  {
541		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
542
543		do {
544			struct i915_page_directory * const pdp =
545				gen8_pdp_for_page_index(vm, idx);
546
547			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
548						    cache_level, flags);
549		} while (idx);
550
551		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
552	}
553}
554
555static int gen8_init_scratch(struct i915_address_space *vm)
556{
557	u32 pte_flags;
558	int ret;
559	int i;
560
561	/*
562	 * If everybody agrees to not to write into the scratch page,
563	 * we can reuse it for all vm, keeping contexts and processes separate.
564	 */
565	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
566		struct i915_address_space *clone = vm->gt->vm;
567
568		GEM_BUG_ON(!clone->has_read_only);
569
570		vm->scratch_order = clone->scratch_order;
571		for (i = 0; i <= vm->top; i++)
572			vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
573
574		return 0;
575	}
576
577	ret = setup_scratch_page(vm);
578	if (ret)
579		return ret;
580
581	pte_flags = vm->has_read_only;
582	if (i915_gem_object_is_lmem(vm->scratch[0]))
583		pte_flags |= PTE_LM;
584
585	vm->scratch[0]->encode =
586		gen8_pte_encode(px_dma(vm->scratch[0]),
587				I915_CACHE_LLC, pte_flags);
588
589	for (i = 1; i <= vm->top; i++) {
590		struct drm_i915_gem_object *obj;
591
592		obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
593		if (IS_ERR(obj))
594			goto free_scratch;
595
596		ret = map_pt_dma(vm, obj);
597		if (ret) {
598			i915_gem_object_put(obj);
599			goto free_scratch;
600		}
601
602		fill_px(obj, vm->scratch[i - 1]->encode);
603		obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
604
605		vm->scratch[i] = obj;
606	}
607
608	return 0;
609
610free_scratch:
611	while (i--)
612		i915_gem_object_put(vm->scratch[i]);
613	return -ENOMEM;
614}
615
616static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
617{
618	struct i915_address_space *vm = &ppgtt->vm;
619	struct i915_page_directory *pd = ppgtt->pd;
620	unsigned int idx;
621
622	GEM_BUG_ON(vm->top != 2);
623	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
624
625	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
626		struct i915_page_directory *pde;
627		int err;
628
629		pde = alloc_pd(vm);
630		if (IS_ERR(pde))
631			return PTR_ERR(pde);
632
633		err = map_pt_dma(vm, pde->pt.base);
634		if (err) {
635			free_pd(vm, pde);
636			return err;
637		}
638
639		fill_px(pde, vm->scratch[1]->encode);
640		set_pd_entry(pd, idx, pde);
641		atomic_inc(px_used(pde)); /* keep pinned */
642	}
643	wmb();
644
645	return 0;
646}
647
648static struct i915_page_directory *
649gen8_alloc_top_pd(struct i915_address_space *vm)
650{
651	const unsigned int count = gen8_pd_top_count(vm);
652	struct i915_page_directory *pd;
653	int err;
654
655	GEM_BUG_ON(count > I915_PDES);
656
657	pd = __alloc_pd(count);
658	if (unlikely(!pd))
659		return ERR_PTR(-ENOMEM);
660
661	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
662	if (IS_ERR(pd->pt.base)) {
663		err = PTR_ERR(pd->pt.base);
664		pd->pt.base = NULL;
665		goto err_pd;
666	}
667
668	err = map_pt_dma(vm, pd->pt.base);
669	if (err)
670		goto err_pd;
671
672	fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
673	atomic_inc(px_used(pd)); /* mark as pinned */
674	return pd;
675
676err_pd:
677	free_pd(vm, pd);
678	return ERR_PTR(err);
679}
680
681/*
682 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
683 * with a net effect resembling a 2-level page table in normal x86 terms. Each
684 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
685 * space.
686 *
687 */
688struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
689{
690	struct i915_ppgtt *ppgtt;
691	int err;
692
693	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
694	if (!ppgtt)
695		return ERR_PTR(-ENOMEM);
696
697	ppgtt_init(ppgtt, gt);
698	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
699	ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
700
701	/*
702	 * From bdw, there is hw support for read-only pages in the PPGTT.
703	 *
704	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
705	 * for now.
706	 *
707	 * Gen12 has inherited the same read-only fault issue from gen11.
708	 */
709	ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
710
711	if (HAS_LMEM(gt->i915))
712		ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
713	else
714		ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
715
716	err = gen8_init_scratch(&ppgtt->vm);
717	if (err)
718		goto err_free;
719
720	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
721	if (IS_ERR(ppgtt->pd)) {
722		err = PTR_ERR(ppgtt->pd);
723		goto err_free_scratch;
724	}
725
726	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
727		err = gen8_preallocate_top_level_pdp(ppgtt);
728		if (err)
729			goto err_free_pd;
730	}
731
732	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
733	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
734	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
735	ppgtt->vm.clear_range = gen8_ppgtt_clear;
736
737	ppgtt->vm.pte_encode = gen8_pte_encode;
738
739	if (intel_vgpu_active(gt->i915))
740		gen8_ppgtt_notify_vgt(ppgtt, true);
741
742	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
743
744	return ppgtt;
745
746err_free_pd:
747	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
748			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
749err_free_scratch:
750	free_scratch(&ppgtt->vm);
751err_free:
752	kfree(ppgtt);
753	return ERR_PTR(err);
754}