Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include <linux/slab.h> /* fault-inject.h is not standalone! */
  7
  8#include <linux/fault-inject.h>
  9
 10#include "gem/i915_gem_lmem.h"
 11#include "i915_trace.h"
 12#include "intel_gt.h"
 13#include "intel_gtt.h"
 14
 15struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
 16{
 17	struct drm_i915_gem_object *obj;
 
 
 18
 19	obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
 20	/*
 21	 * Ensure all paging structures for this vm share the same dma-resv
 22	 * object underneath, with the idea that one object_lock() will lock
 23	 * them all at once.
 24	 */
 25	if (!IS_ERR(obj)) {
 26		obj->base.resv = i915_vm_resv_get(vm);
 27		obj->shares_resv_from = vm;
 28	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29
 30	return obj;
 31}
 32
 33struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
 34{
 35	struct drm_i915_gem_object *obj;
 
 36
 37	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 38		i915_gem_shrink_all(vm->i915);
 39
 40	obj = i915_gem_object_create_internal(vm->i915, sz);
 
 
 
 
 
 
 
 
 
 
 
 41	/*
 42	 * Ensure all paging structures for this vm share the same dma-resv
 43	 * object underneath, with the idea that one object_lock() will lock
 44	 * them all at once.
 
 
 
 45	 */
 46	if (!IS_ERR(obj)) {
 47		obj->base.resv = i915_vm_resv_get(vm);
 48		obj->shares_resv_from = vm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49	}
 50
 51	return obj;
 52}
 53
 54int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
 
 55{
 56	enum i915_map_type type;
 57	void *vaddr;
 58
 59	type = i915_coherent_map_type(vm->i915, obj, true);
 60	vaddr = i915_gem_object_pin_map_unlocked(obj, type);
 61	if (IS_ERR(vaddr))
 62		return PTR_ERR(vaddr);
 63
 64	i915_gem_object_make_unshrinkable(obj);
 65	return 0;
 66}
 
 
 
 67
 68int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
 69{
 70	enum i915_map_type type;
 71	void *vaddr;
 
 
 
 
 72
 73	type = i915_coherent_map_type(vm->i915, obj, true);
 74	vaddr = i915_gem_object_pin_map(obj, type);
 75	if (IS_ERR(vaddr))
 76		return PTR_ERR(vaddr);
 
 
 
 
 77
 78	i915_gem_object_make_unshrinkable(obj);
 79	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80}
 81
 82void __i915_vm_close(struct i915_address_space *vm)
 83{
 84	struct i915_vma *vma, *vn;
 85
 86	if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
 87		return;
 88
 89	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
 90		struct drm_i915_gem_object *obj = vma->obj;
 91
 92		/* Keep the obj (and hence the vma) alive as _we_ destroy it */
 93		if (!kref_get_unless_zero(&obj->base.refcount))
 94			continue;
 95
 96		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
 97		WARN_ON(__i915_vma_unbind(vma));
 98		__i915_vma_put(vma);
 99
100		i915_gem_object_put(obj);
101	}
102	GEM_BUG_ON(!list_empty(&vm->bound_list));
103
104	mutex_unlock(&vm->mutex);
105}
106
107/* lock the vm into the current ww, if we lock one, we lock all */
108int i915_vm_lock_objects(struct i915_address_space *vm,
109			 struct i915_gem_ww_ctx *ww)
110{
111	if (vm->scratch[0]->base.resv == &vm->_resv) {
112		return i915_gem_object_lock(vm->scratch[0], ww);
113	} else {
114		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
115
116		/* We borrowed the scratch page from ggtt, take the top level object */
117		return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
118	}
119}
120
121void i915_address_space_fini(struct i915_address_space *vm)
122{
123	drm_mm_takedown(&vm->mm);
124	mutex_destroy(&vm->mutex);
125}
 
 
126
127/**
128 * i915_vm_resv_release - Final struct i915_address_space destructor
129 * @kref: Pointer to the &i915_address_space.resv_ref member.
130 *
131 * This function is called when the last lock sharer no longer shares the
132 * &i915_address_space._resv lock.
133 */
134void i915_vm_resv_release(struct kref *kref)
135{
136	struct i915_address_space *vm =
137		container_of(kref, typeof(*vm), resv_ref);
138
139	dma_resv_fini(&vm->_resv);
140	kfree(vm);
141}
142
143static void __i915_vm_release(struct work_struct *work)
144{
145	struct i915_address_space *vm =
146		container_of(work, struct i915_address_space, rcu.work);
147
148	vm->cleanup(vm);
149	i915_address_space_fini(vm);
150
151	i915_vm_resv_put(vm);
152}
153
154void i915_vm_release(struct kref *kref)
155{
156	struct i915_address_space *vm =
157		container_of(kref, struct i915_address_space, ref);
158
159	GEM_BUG_ON(i915_is_ggtt(vm));
160	trace_i915_ppgtt_release(vm);
161
162	queue_rcu_work(vm->i915->wq, &vm->rcu);
163}
164
165void i915_address_space_init(struct i915_address_space *vm, int subclass)
166{
167	kref_init(&vm->ref);
168
169	/*
170	 * Special case for GGTT that has already done an early
171	 * kref_init here.
172	 */
173	if (!kref_read(&vm->resv_ref))
174		kref_init(&vm->resv_ref);
175
176	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
177	atomic_set(&vm->open, 1);
178
179	/*
180	 * The vm->mutex must be reclaim safe (for use in the shrinker).
181	 * Do a dummy acquire now under fs_reclaim so that any allocation
182	 * attempt holding the lock is immediately reported by lockdep.
183	 */
184	mutex_init(&vm->mutex);
185	lockdep_set_subclass(&vm->mutex, subclass);
186
187	if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
188		i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
189	} else {
190		/*
191		 * CHV + BXT VTD workaround use stop_machine(),
192		 * which is allowed to allocate memory. This means &vm->mutex
193		 * is the outer lock, and in theory we can allocate memory inside
194		 * it through stop_machine().
195		 *
196		 * Add the annotation for this, we use trylock in shrinker.
197		 */
198		mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
199		might_alloc(GFP_KERNEL);
200		mutex_release(&vm->mutex.dep_map, _THIS_IP_);
201	}
202	dma_resv_init(&vm->_resv);
203
204	GEM_BUG_ON(!vm->total);
205	drm_mm_init(&vm->mm, 0, vm->total);
206	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
207
 
 
208	INIT_LIST_HEAD(&vm->bound_list);
209}
210
211void clear_pages(struct i915_vma *vma)
212{
213	GEM_BUG_ON(!vma->pages);
214
215	if (vma->pages != vma->obj->mm.pages) {
216		sg_free_table(vma->pages);
217		kfree(vma->pages);
218	}
219	vma->pages = NULL;
220
221	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
222}
223
224void *__px_vaddr(struct drm_i915_gem_object *p)
225{
226	enum i915_map_type type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
228	GEM_BUG_ON(!i915_gem_object_has_pages(p));
229	return page_unpack_bits(p->mm.mapping, &type);
230}
231
232dma_addr_t __px_dma(struct drm_i915_gem_object *p)
233{
234	GEM_BUG_ON(!i915_gem_object_has_pages(p));
235	return sg_dma_address(p->mm.pages->sgl);
236}
237
238struct page *__px_page(struct drm_i915_gem_object *p)
239{
240	GEM_BUG_ON(!i915_gem_object_has_pages(p));
241	return sg_page(p->mm.pages->sgl);
242}
243
244void
245fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
246{
247	void *vaddr = __px_vaddr(p);
248
249	memset64(vaddr, val, count);
250	clflush_cache_range(vaddr, PAGE_SIZE);
251}
252
253static void poison_scratch_page(struct drm_i915_gem_object *scratch)
254{
255	void *vaddr = __px_vaddr(scratch);
256	u8 val;
 
 
257
258	val = 0;
259	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
260		val = POISON_FREE;
261
262	memset(vaddr, val, scratch->base.size);
 
 
 
 
 
 
263}
264
265int setup_scratch_page(struct i915_address_space *vm)
266{
267	unsigned long size;
268
269	/*
270	 * In order to utilize 64K pages for an object with a size < 2M, we will
271	 * need to support a 64K scratch page, given that every 16th entry for a
272	 * page-table operating in 64K mode must point to a properly aligned 64K
273	 * region, including any PTEs which happen to point to scratch.
274	 *
275	 * This is only relevant for the 48b PPGTT where we support
276	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
277	 * scratch (read-only) between all vm, we create one 64k scratch page
278	 * for all.
279	 */
280	size = I915_GTT_PAGE_SIZE_4K;
281	if (i915_vm_is_4lvl(vm) &&
282	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
283		size = I915_GTT_PAGE_SIZE_64K;
 
 
 
284
285	do {
286		struct drm_i915_gem_object *obj;
 
 
287
288		obj = vm->alloc_pt_dma(vm, size);
289		if (IS_ERR(obj))
290			goto skip;
291
292		if (map_pt_dma(vm, obj))
293			goto skip_obj;
294
295		/* We need a single contiguous page for our scratch */
296		if (obj->mm.page_sizes.sg < size)
297			goto skip_obj;
298
299		/* And it needs to be correspondingly aligned */
300		if (__px_dma(obj) & (size - 1))
301			goto skip_obj;
302
303		/*
304		 * Use a non-zero scratch page for debugging.
305		 *
306		 * We want a value that should be reasonably obvious
307		 * to spot in the error state, while also causing a GPU hang
308		 * if executed. We prefer using a clear page in production, so
309		 * should it ever be accidentally used, the effect should be
310		 * fairly benign.
311		 */
312		poison_scratch_page(obj);
313
314		vm->scratch[0] = obj;
315		vm->scratch_order = get_order(size);
 
 
 
 
 
 
 
 
 
 
 
 
316		return 0;
317
318skip_obj:
319		i915_gem_object_put(obj);
 
 
320skip:
321		if (size == I915_GTT_PAGE_SIZE_4K)
322			return -ENOMEM;
323
324		size = I915_GTT_PAGE_SIZE_4K;
 
325	} while (1);
326}
327
 
 
 
 
 
 
 
 
 
 
328void free_scratch(struct i915_address_space *vm)
329{
330	int i;
331
332	for (i = 0; i <= vm->top; i++)
333		i915_gem_object_put(vm->scratch[i]);
 
 
 
 
 
 
 
 
334}
335
336void gtt_write_workarounds(struct intel_gt *gt)
337{
338	struct drm_i915_private *i915 = gt->i915;
339	struct intel_uncore *uncore = gt->uncore;
340
341	/*
342	 * This function is for gtt related workarounds. This function is
343	 * called on driver load and after a GPU reset, so you can place
344	 * workarounds here even if they get overwritten by GPU reset.
345	 */
346	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
347	if (IS_BROADWELL(i915))
348		intel_uncore_write(uncore,
349				   GEN8_L3_LRA_1_GPGPU,
350				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
351	else if (IS_CHERRYVIEW(i915))
352		intel_uncore_write(uncore,
353				   GEN8_L3_LRA_1_GPGPU,
354				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
355	else if (IS_GEN9_LP(i915))
356		intel_uncore_write(uncore,
357				   GEN8_L3_LRA_1_GPGPU,
358				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
359	else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
360		intel_uncore_write(uncore,
361				   GEN8_L3_LRA_1_GPGPU,
362				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
363
364	/*
365	 * To support 64K PTEs we need to first enable the use of the
366	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
367	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
368	 * shouldn't be needed after GEN10.
369	 *
370	 * 64K pages were first introduced from BDW+, although technically they
371	 * only *work* from gen9+. For pre-BDW we instead have the option for
372	 * 32K pages, but we don't currently have any support for it in our
373	 * driver.
374	 */
375	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
376	    GRAPHICS_VER(i915) <= 10)
377		intel_uncore_rmw(uncore,
378				 GEN8_GAMW_ECO_DEV_RW_IA,
379				 0,
380				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
381
382	if (IS_GRAPHICS_VER(i915, 8, 11)) {
383		bool can_use_gtt_cache = true;
384
385		/*
386		 * According to the BSpec if we use 2M/1G pages then we also
387		 * need to disable the GTT cache. At least on BDW we can see
388		 * visual corruption when using 2M pages, and not disabling the
389		 * GTT cache.
390		 */
391		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
392			can_use_gtt_cache = false;
393
394		/* WaGttCachingOffByDefault */
395		intel_uncore_write(uncore,
396				   HSW_GTT_CACHE_EN,
397				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
398		drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
399				 intel_uncore_read(uncore,
400						   HSW_GTT_CACHE_EN) == 0);
401	}
402}
403
404static void tgl_setup_private_ppat(struct intel_uncore *uncore)
405{
406	/* TGL doesn't support LLC or AGE settings */
407	intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
408	intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
409	intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
410	intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
411	intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
412	intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
413	intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
414	intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
415}
416
417static void cnl_setup_private_ppat(struct intel_uncore *uncore)
418{
419	intel_uncore_write(uncore,
420			   GEN10_PAT_INDEX(0),
421			   GEN8_PPAT_WB | GEN8_PPAT_LLC);
422	intel_uncore_write(uncore,
423			   GEN10_PAT_INDEX(1),
424			   GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
425	intel_uncore_write(uncore,
426			   GEN10_PAT_INDEX(2),
427			   GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
428	intel_uncore_write(uncore,
429			   GEN10_PAT_INDEX(3),
430			   GEN8_PPAT_UC);
431	intel_uncore_write(uncore,
432			   GEN10_PAT_INDEX(4),
433			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
434	intel_uncore_write(uncore,
435			   GEN10_PAT_INDEX(5),
436			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
437	intel_uncore_write(uncore,
438			   GEN10_PAT_INDEX(6),
439			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
440	intel_uncore_write(uncore,
441			   GEN10_PAT_INDEX(7),
442			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
443}
444
445/*
446 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
447 * bits. When using advanced contexts each context stores its own PAT, but
448 * writing this data shouldn't be harmful even in those cases.
449 */
450static void bdw_setup_private_ppat(struct intel_uncore *uncore)
451{
452	struct drm_i915_private *i915 = uncore->i915;
453	u64 pat;
454
455	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
456	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
 
457	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
458	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
459	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
460	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
461	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
462
463	/* for scanout with eLLC */
464	if (GRAPHICS_VER(i915) >= 9)
465		pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
466	else
467		pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
468
469	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
470	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
471}
472
473static void chv_setup_private_ppat(struct intel_uncore *uncore)
474{
475	u64 pat;
476
477	/*
478	 * Map WB on BDW to snooped on CHV.
479	 *
480	 * Only the snoop bit has meaning for CHV, the rest is
481	 * ignored.
482	 *
483	 * The hardware will never snoop for certain types of accesses:
484	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
485	 * - PPGTT page tables
486	 * - some other special cycles
487	 *
488	 * As with BDW, we also need to consider the following for GT accesses:
489	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
490	 * so RTL will always use the value corresponding to
491	 * pat_sel = 000".
492	 * Which means we must set the snoop bit in PAT entry 0
493	 * in order to keep the global status page working.
494	 */
495
496	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
497	      GEN8_PPAT(1, 0) |
498	      GEN8_PPAT(2, 0) |
499	      GEN8_PPAT(3, 0) |
500	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
501	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
502	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
503	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
504
505	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
506	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
507}
508
509void setup_private_pat(struct intel_uncore *uncore)
510{
511	struct drm_i915_private *i915 = uncore->i915;
512
513	GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
514
515	if (GRAPHICS_VER(i915) >= 12)
516		tgl_setup_private_ppat(uncore);
517	else if (GRAPHICS_VER(i915) >= 10)
518		cnl_setup_private_ppat(uncore);
519	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
520		chv_setup_private_ppat(uncore);
521	else
522		bdw_setup_private_ppat(uncore);
523}
524
525struct i915_vma *
526__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
527{
528	struct drm_i915_gem_object *obj;
529	struct i915_vma *vma;
530
531	obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
532	if (IS_ERR(obj))
533		return ERR_CAST(obj);
534
535	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
536
537	vma = i915_vma_instance(obj, vm, NULL);
538	if (IS_ERR(vma)) {
539		i915_gem_object_put(obj);
540		return vma;
541	}
542
543	return vma;
544}
545
546struct i915_vma *
547__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
548{
549	struct i915_vma *vma;
550	int err;
551
552	vma = __vm_create_scratch_for_read(vm, size);
553	if (IS_ERR(vma))
554		return vma;
555
556	err = i915_vma_pin(vma, 0, 0,
557			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
558	if (err) {
559		i915_vma_put(vma);
560		return ERR_PTR(err);
561	}
562
563	return vma;
564}
565
566#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
567#include "selftests/mock_gtt.c"
568#endif
v5.9
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include <linux/slab.h> /* fault-inject.h is not standalone! */
  7
  8#include <linux/fault-inject.h>
  9
 
 10#include "i915_trace.h"
 11#include "intel_gt.h"
 12#include "intel_gtt.h"
 13
 14void stash_init(struct pagestash *stash)
 15{
 16	pagevec_init(&stash->pvec);
 17	spin_lock_init(&stash->lock);
 18}
 19
 20static struct page *stash_pop_page(struct pagestash *stash)
 21{
 22	struct page *page = NULL;
 23
 24	spin_lock(&stash->lock);
 25	if (likely(stash->pvec.nr))
 26		page = stash->pvec.pages[--stash->pvec.nr];
 27	spin_unlock(&stash->lock);
 28
 29	return page;
 30}
 31
 32static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
 33{
 34	unsigned int nr;
 35
 36	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
 37
 38	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
 39	memcpy(stash->pvec.pages + stash->pvec.nr,
 40	       pvec->pages + pvec->nr - nr,
 41	       sizeof(pvec->pages[0]) * nr);
 42	stash->pvec.nr += nr;
 43
 44	spin_unlock(&stash->lock);
 45
 46	pvec->nr -= nr;
 47}
 48
 49static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 50{
 51	struct pagevec stack;
 52	struct page *page;
 53
 54	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 55		i915_gem_shrink_all(vm->i915);
 56
 57	page = stash_pop_page(&vm->free_pages);
 58	if (page)
 59		return page;
 60
 61	if (!vm->pt_kmap_wc)
 62		return alloc_page(gfp);
 63
 64	/* Look in our global stash of WC pages... */
 65	page = stash_pop_page(&vm->i915->mm.wc_stash);
 66	if (page)
 67		return page;
 68
 69	/*
 70	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
 71	 *
 72	 * We have to be careful as page allocation may trigger the shrinker
 73	 * (via direct reclaim) which will fill up the WC stash underneath us.
 74	 * So we add our WB pages into a temporary pvec on the stack and merge
 75	 * them into the WC stash after all the allocations are complete.
 76	 */
 77	pagevec_init(&stack);
 78	do {
 79		struct page *page;
 80
 81		page = alloc_page(gfp);
 82		if (unlikely(!page))
 83			break;
 84
 85		stack.pages[stack.nr++] = page;
 86	} while (pagevec_space(&stack));
 87
 88	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
 89		page = stack.pages[--stack.nr];
 90
 91		/* Merge spare WC pages to the global stash */
 92		if (stack.nr)
 93			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
 94
 95		/* Push any surplus WC pages onto the local VM stash */
 96		if (stack.nr)
 97			stash_push_pagevec(&vm->free_pages, &stack);
 98	}
 99
100	/* Return unwanted leftovers */
101	if (unlikely(stack.nr)) {
102		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
103		__pagevec_release(&stack);
104	}
105
106	return page;
107}
108
109static void vm_free_pages_release(struct i915_address_space *vm,
110				  bool immediate)
111{
112	struct pagevec *pvec = &vm->free_pages.pvec;
113	struct pagevec stack;
114
115	lockdep_assert_held(&vm->free_pages.lock);
116	GEM_BUG_ON(!pagevec_count(pvec));
 
 
117
118	if (vm->pt_kmap_wc) {
119		/*
120		 * When we use WC, first fill up the global stash and then
121		 * only if full immediately free the overflow.
122		 */
123		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
124
125		/*
126		 * As we have made some room in the VM's free_pages,
127		 * we can wait for it to fill again. Unless we are
128		 * inside i915_address_space_fini() and must
129		 * immediately release the pages!
130		 */
131		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
132			return;
133
134		/*
135		 * We have to drop the lock to allow ourselves to sleep,
136		 * so take a copy of the pvec and clear the stash for
137		 * others to use it as we sleep.
138		 */
139		stack = *pvec;
140		pagevec_reinit(pvec);
141		spin_unlock(&vm->free_pages.lock);
142
143		pvec = &stack;
144		set_pages_array_wb(pvec->pages, pvec->nr);
145
146		spin_lock(&vm->free_pages.lock);
147	}
148
149	__pagevec_release(pvec);
150}
151
152static void vm_free_page(struct i915_address_space *vm, struct page *page)
153{
154	/*
155	 * On !llc, we need to change the pages back to WB. We only do so
156	 * in bulk, so we rarely need to change the page attributes here,
157	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
158	 * To make detection of the possible sleep more likely, use an
159	 * unconditional might_sleep() for everybody.
160	 */
161	might_sleep();
162	spin_lock(&vm->free_pages.lock);
163	while (!pagevec_space(&vm->free_pages.pvec))
164		vm_free_pages_release(vm, false);
165	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
166	pagevec_add(&vm->free_pages.pvec, page);
167	spin_unlock(&vm->free_pages.lock);
168}
169
170void __i915_vm_close(struct i915_address_space *vm)
171{
172	struct i915_vma *vma, *vn;
173
174	if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
175		return;
176
177	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
178		struct drm_i915_gem_object *obj = vma->obj;
179
180		/* Keep the obj (and hence the vma) alive as _we_ destroy it */
181		if (!kref_get_unless_zero(&obj->base.refcount))
182			continue;
183
184		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
185		WARN_ON(__i915_vma_unbind(vma));
186		__i915_vma_put(vma);
187
188		i915_gem_object_put(obj);
189	}
190	GEM_BUG_ON(!list_empty(&vm->bound_list));
191
192	mutex_unlock(&vm->mutex);
193}
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195void i915_address_space_fini(struct i915_address_space *vm)
196{
197	spin_lock(&vm->free_pages.lock);
198	if (pagevec_count(&vm->free_pages.pvec))
199		vm_free_pages_release(vm, true);
200	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
201	spin_unlock(&vm->free_pages.lock);
202
203	drm_mm_takedown(&vm->mm);
 
 
 
 
 
 
 
 
 
 
204
205	mutex_destroy(&vm->mutex);
 
206}
207
208static void __i915_vm_release(struct work_struct *work)
209{
210	struct i915_address_space *vm =
211		container_of(work, struct i915_address_space, rcu.work);
212
213	vm->cleanup(vm);
214	i915_address_space_fini(vm);
215
216	kfree(vm);
217}
218
219void i915_vm_release(struct kref *kref)
220{
221	struct i915_address_space *vm =
222		container_of(kref, struct i915_address_space, ref);
223
224	GEM_BUG_ON(i915_is_ggtt(vm));
225	trace_i915_ppgtt_release(vm);
226
227	queue_rcu_work(vm->i915->wq, &vm->rcu);
228}
229
230void i915_address_space_init(struct i915_address_space *vm, int subclass)
231{
232	kref_init(&vm->ref);
 
 
 
 
 
 
 
 
233	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
234	atomic_set(&vm->open, 1);
235
236	/*
237	 * The vm->mutex must be reclaim safe (for use in the shrinker).
238	 * Do a dummy acquire now under fs_reclaim so that any allocation
239	 * attempt holding the lock is immediately reported by lockdep.
240	 */
241	mutex_init(&vm->mutex);
242	lockdep_set_subclass(&vm->mutex, subclass);
243	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245	GEM_BUG_ON(!vm->total);
246	drm_mm_init(&vm->mm, 0, vm->total);
247	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
248
249	stash_init(&vm->free_pages);
250
251	INIT_LIST_HEAD(&vm->bound_list);
252}
253
254void clear_pages(struct i915_vma *vma)
255{
256	GEM_BUG_ON(!vma->pages);
257
258	if (vma->pages != vma->obj->mm.pages) {
259		sg_free_table(vma->pages);
260		kfree(vma->pages);
261	}
262	vma->pages = NULL;
263
264	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
265}
266
267static int __setup_page_dma(struct i915_address_space *vm,
268			    struct i915_page_dma *p,
269			    gfp_t gfp)
270{
271	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
272	if (unlikely(!p->page))
273		return -ENOMEM;
274
275	p->daddr = dma_map_page_attrs(vm->dma,
276				      p->page, 0, PAGE_SIZE,
277				      PCI_DMA_BIDIRECTIONAL,
278				      DMA_ATTR_SKIP_CPU_SYNC |
279				      DMA_ATTR_NO_WARN);
280	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
281		vm_free_page(vm, p->page);
282		return -ENOMEM;
283	}
284
285	return 0;
 
286}
287
288int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
289{
290	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 
291}
292
293void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
294{
295	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
296	vm_free_page(vm, p->page);
297}
298
299void
300fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
301{
302	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
 
 
 
303}
304
305static void poison_scratch_page(struct page *page, unsigned long size)
306{
307	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
308		return;
309
310	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
311
312	do {
313		void *vaddr;
 
314
315		vaddr = kmap(page);
316		memset(vaddr, POISON_FREE, PAGE_SIZE);
317		kunmap(page);
318
319		page = pfn_to_page(page_to_pfn(page) + 1);
320		size -= PAGE_SIZE;
321	} while (size);
322}
323
324int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
325{
326	unsigned long size;
327
328	/*
329	 * In order to utilize 64K pages for an object with a size < 2M, we will
330	 * need to support a 64K scratch page, given that every 16th entry for a
331	 * page-table operating in 64K mode must point to a properly aligned 64K
332	 * region, including any PTEs which happen to point to scratch.
333	 *
334	 * This is only relevant for the 48b PPGTT where we support
335	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
336	 * scratch (read-only) between all vm, we create one 64k scratch page
337	 * for all.
338	 */
339	size = I915_GTT_PAGE_SIZE_4K;
340	if (i915_vm_is_4lvl(vm) &&
341	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
342		size = I915_GTT_PAGE_SIZE_64K;
343		gfp |= __GFP_NOWARN;
344	}
345	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
346
347	do {
348		unsigned int order = get_order(size);
349		struct page *page;
350		dma_addr_t addr;
351
352		page = alloc_pages(gfp, order);
353		if (unlikely(!page))
354			goto skip;
355
 
 
 
 
 
 
 
 
 
 
 
356		/*
357		 * Use a non-zero scratch page for debugging.
358		 *
359		 * We want a value that should be reasonably obvious
360		 * to spot in the error state, while also causing a GPU hang
361		 * if executed. We prefer using a clear page in production, so
362		 * should it ever be accidentally used, the effect should be
363		 * fairly benign.
364		 */
365		poison_scratch_page(page, size);
366
367		addr = dma_map_page_attrs(vm->dma,
368					  page, 0, size,
369					  PCI_DMA_BIDIRECTIONAL,
370					  DMA_ATTR_SKIP_CPU_SYNC |
371					  DMA_ATTR_NO_WARN);
372		if (unlikely(dma_mapping_error(vm->dma, addr)))
373			goto free_page;
374
375		if (unlikely(!IS_ALIGNED(addr, size)))
376			goto unmap_page;
377
378		vm->scratch[0].base.page = page;
379		vm->scratch[0].base.daddr = addr;
380		vm->scratch_order = order;
381		return 0;
382
383unmap_page:
384		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
385free_page:
386		__free_pages(page, order);
387skip:
388		if (size == I915_GTT_PAGE_SIZE_4K)
389			return -ENOMEM;
390
391		size = I915_GTT_PAGE_SIZE_4K;
392		gfp &= ~__GFP_NOWARN;
393	} while (1);
394}
395
396void cleanup_scratch_page(struct i915_address_space *vm)
397{
398	struct i915_page_dma *p = px_base(&vm->scratch[0]);
399	unsigned int order = vm->scratch_order;
400
401	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
402		       PCI_DMA_BIDIRECTIONAL);
403	__free_pages(p->page, order);
404}
405
406void free_scratch(struct i915_address_space *vm)
407{
408	int i;
409
410	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
411		return;
412
413	for (i = 1; i <= vm->top; i++) {
414		if (!px_dma(&vm->scratch[i]))
415			break;
416		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
417	}
418
419	cleanup_scratch_page(vm);
420}
421
422void gtt_write_workarounds(struct intel_gt *gt)
423{
424	struct drm_i915_private *i915 = gt->i915;
425	struct intel_uncore *uncore = gt->uncore;
426
427	/*
428	 * This function is for gtt related workarounds. This function is
429	 * called on driver load and after a GPU reset, so you can place
430	 * workarounds here even if they get overwritten by GPU reset.
431	 */
432	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
433	if (IS_BROADWELL(i915))
434		intel_uncore_write(uncore,
435				   GEN8_L3_LRA_1_GPGPU,
436				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
437	else if (IS_CHERRYVIEW(i915))
438		intel_uncore_write(uncore,
439				   GEN8_L3_LRA_1_GPGPU,
440				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
441	else if (IS_GEN9_LP(i915))
442		intel_uncore_write(uncore,
443				   GEN8_L3_LRA_1_GPGPU,
444				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
445	else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
446		intel_uncore_write(uncore,
447				   GEN8_L3_LRA_1_GPGPU,
448				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
449
450	/*
451	 * To support 64K PTEs we need to first enable the use of the
452	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
453	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
454	 * shouldn't be needed after GEN10.
455	 *
456	 * 64K pages were first introduced from BDW+, although technically they
457	 * only *work* from gen9+. For pre-BDW we instead have the option for
458	 * 32K pages, but we don't currently have any support for it in our
459	 * driver.
460	 */
461	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
462	    INTEL_GEN(i915) <= 10)
463		intel_uncore_rmw(uncore,
464				 GEN8_GAMW_ECO_DEV_RW_IA,
465				 0,
466				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
467
468	if (IS_GEN_RANGE(i915, 8, 11)) {
469		bool can_use_gtt_cache = true;
470
471		/*
472		 * According to the BSpec if we use 2M/1G pages then we also
473		 * need to disable the GTT cache. At least on BDW we can see
474		 * visual corruption when using 2M pages, and not disabling the
475		 * GTT cache.
476		 */
477		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
478			can_use_gtt_cache = false;
479
480		/* WaGttCachingOffByDefault */
481		intel_uncore_write(uncore,
482				   HSW_GTT_CACHE_EN,
483				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
484		drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
485				 intel_uncore_read(uncore,
486						   HSW_GTT_CACHE_EN) == 0);
487	}
488}
489
490static void tgl_setup_private_ppat(struct intel_uncore *uncore)
491{
492	/* TGL doesn't support LLC or AGE settings */
493	intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
494	intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
495	intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
496	intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
497	intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
498	intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
499	intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
500	intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
501}
502
503static void cnl_setup_private_ppat(struct intel_uncore *uncore)
504{
505	intel_uncore_write(uncore,
506			   GEN10_PAT_INDEX(0),
507			   GEN8_PPAT_WB | GEN8_PPAT_LLC);
508	intel_uncore_write(uncore,
509			   GEN10_PAT_INDEX(1),
510			   GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
511	intel_uncore_write(uncore,
512			   GEN10_PAT_INDEX(2),
513			   GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
514	intel_uncore_write(uncore,
515			   GEN10_PAT_INDEX(3),
516			   GEN8_PPAT_UC);
517	intel_uncore_write(uncore,
518			   GEN10_PAT_INDEX(4),
519			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
520	intel_uncore_write(uncore,
521			   GEN10_PAT_INDEX(5),
522			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
523	intel_uncore_write(uncore,
524			   GEN10_PAT_INDEX(6),
525			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
526	intel_uncore_write(uncore,
527			   GEN10_PAT_INDEX(7),
528			   GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
529}
530
531/*
532 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
533 * bits. When using advanced contexts each context stores its own PAT, but
534 * writing this data shouldn't be harmful even in those cases.
535 */
536static void bdw_setup_private_ppat(struct intel_uncore *uncore)
537{
 
538	u64 pat;
539
540	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
541	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
542	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
543	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
544	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
545	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
546	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
547	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
548
 
 
 
 
 
 
549	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
550	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
551}
552
553static void chv_setup_private_ppat(struct intel_uncore *uncore)
554{
555	u64 pat;
556
557	/*
558	 * Map WB on BDW to snooped on CHV.
559	 *
560	 * Only the snoop bit has meaning for CHV, the rest is
561	 * ignored.
562	 *
563	 * The hardware will never snoop for certain types of accesses:
564	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
565	 * - PPGTT page tables
566	 * - some other special cycles
567	 *
568	 * As with BDW, we also need to consider the following for GT accesses:
569	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
570	 * so RTL will always use the value corresponding to
571	 * pat_sel = 000".
572	 * Which means we must set the snoop bit in PAT entry 0
573	 * in order to keep the global status page working.
574	 */
575
576	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
577	      GEN8_PPAT(1, 0) |
578	      GEN8_PPAT(2, 0) |
579	      GEN8_PPAT(3, 0) |
580	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
581	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
582	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
583	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
584
585	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
586	intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
587}
588
589void setup_private_pat(struct intel_uncore *uncore)
590{
591	struct drm_i915_private *i915 = uncore->i915;
592
593	GEM_BUG_ON(INTEL_GEN(i915) < 8);
594
595	if (INTEL_GEN(i915) >= 12)
596		tgl_setup_private_ppat(uncore);
597	else if (INTEL_GEN(i915) >= 10)
598		cnl_setup_private_ppat(uncore);
599	else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
600		chv_setup_private_ppat(uncore);
601	else
602		bdw_setup_private_ppat(uncore);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603}
604
605#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
606#include "selftests/mock_gtt.c"
607#endif