Loading...
Note: File does not exist in v4.6.
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2020 Intel Corporation
4 *
5 * Please try to maintain the following order within this file unless it makes
6 * sense to do otherwise. From top to bottom:
7 * 1. typedefs
8 * 2. #defines, and macros
9 * 3. structure definitions
10 * 4. function prototypes
11 *
12 * Within each section, please try to order by generation in ascending order,
13 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14 */
15
16#ifndef __INTEL_GTT_H__
17#define __INTEL_GTT_H__
18
19#include <linux/io-mapping.h>
20#include <linux/kref.h>
21#include <linux/mm.h>
22#include <linux/pagevec.h>
23#include <linux/scatterlist.h>
24#include <linux/workqueue.h>
25
26#include <drm/drm_mm.h>
27
28#include "gt/intel_reset.h"
29#include "i915_selftest.h"
30#include "i915_vma_resource.h"
31#include "i915_vma_types.h"
32#include "i915_params.h"
33#include "intel_memory_region.h"
34
35#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
36
37#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
38#define DBG(...) trace_printk(__VA_ARGS__)
39#else
40#define DBG(...)
41#endif
42
43#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
44
45#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
46#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
47#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
48
49#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
50#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
51
52#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
53
54#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
55
56#define I915_FENCE_REG_NONE -1
57#define I915_MAX_NUM_FENCES 32
58/* 32 fences + sign bit for FENCE_REG_NONE */
59#define I915_MAX_NUM_FENCE_BITS 6
60
61typedef u32 gen6_pte_t;
62typedef u64 gen8_pte_t;
63
64#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
65
66#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
67#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
68#define I915_PDES 512
69#define I915_PDE_MASK (I915_PDES - 1)
70
71/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
72#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
73#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
74#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
75#define GEN6_PTE_CACHE_LLC (2 << 1)
76#define GEN6_PTE_UNCACHED (1 << 1)
77#define GEN6_PTE_VALID REG_BIT(0)
78
79#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
80#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
81#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
82#define GEN6_PDE_SHIFT 22
83#define GEN6_PDE_VALID REG_BIT(0)
84#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
85
86#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
87
88#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
89#define BYT_PTE_WRITEABLE REG_BIT(1)
90
91#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
92
93#define GEN12_GGTT_PTE_LM BIT_ULL(1)
94
95#define GEN12_PDE_64K BIT(6)
96#define GEN12_PTE_PS64 BIT(8)
97
98/*
99 * Cacheability Control is a 4-bit value. The low three bits are stored in bits
100 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
101 */
102#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
103 (((bits) & 0x8) << (11 - 3)))
104#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
105#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
106#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
107#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
108#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
109#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
110#define HSW_PTE_UNCACHED (0)
111#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
112#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
113
114/*
115 * GEN8 32b style address is defined as a 3 level page table:
116 * 31:30 | 29:21 | 20:12 | 11:0
117 * PDPE | PDE | PTE | offset
118 * The difference as compared to normal x86 3 level page table is the PDPEs are
119 * programmed via register.
120 *
121 * GEN8 48b style address is defined as a 4 level page table:
122 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
123 * PML4E | PDPE | PDE | PTE | offset
124 */
125#define GEN8_3LVL_PDPES 4
126
127#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
128#define PPAT_CACHED_PDE 0 /* WB LLC */
129#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
130#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
131
132#define CHV_PPAT_SNOOP REG_BIT(6)
133#define GEN8_PPAT_AGE(x) ((x)<<4)
134#define GEN8_PPAT_LLCeLLC (3<<2)
135#define GEN8_PPAT_LLCELLC (2<<2)
136#define GEN8_PPAT_LLC (1<<2)
137#define GEN8_PPAT_WB (3<<0)
138#define GEN8_PPAT_WT (2<<0)
139#define GEN8_PPAT_WC (1<<0)
140#define GEN8_PPAT_UC (0<<0)
141#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
142#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
143
144#define GEN8_PAGE_PRESENT BIT_ULL(0)
145#define GEN8_PAGE_RW BIT_ULL(1)
146
147#define GEN8_PDE_IPS_64K BIT(11)
148#define GEN8_PDE_PS_2M BIT(7)
149
150enum i915_cache_level;
151
152struct drm_i915_gem_object;
153struct i915_fence_reg;
154struct i915_vma;
155struct intel_gt;
156
157#define for_each_sgt_daddr(__dp, __iter, __sgt) \
158 __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
159
160struct i915_page_table {
161 struct drm_i915_gem_object *base;
162 union {
163 atomic_t used;
164 struct i915_page_table *stash;
165 };
166 bool is_compact;
167};
168
169struct i915_page_directory {
170 struct i915_page_table pt;
171 spinlock_t lock;
172 void **entry;
173};
174
175#define __px_choose_expr(x, type, expr, other) \
176 __builtin_choose_expr( \
177 __builtin_types_compatible_p(typeof(x), type) || \
178 __builtin_types_compatible_p(typeof(x), const type), \
179 ({ type __x = (type)(x); expr; }), \
180 other)
181
182#define px_base(px) \
183 __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
184 __px_choose_expr(px, struct i915_page_table *, __x->base, \
185 __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
186 (void)0)))
187
188struct page *__px_page(struct drm_i915_gem_object *p);
189dma_addr_t __px_dma(struct drm_i915_gem_object *p);
190#define px_dma(px) (__px_dma(px_base(px)))
191
192void *__px_vaddr(struct drm_i915_gem_object *p);
193#define px_vaddr(px) (__px_vaddr(px_base(px)))
194
195#define px_pt(px) \
196 __px_choose_expr(px, struct i915_page_table *, __x, \
197 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
198 (void)0))
199#define px_used(px) (&px_pt(px)->used)
200
201struct i915_vm_pt_stash {
202 /* preallocated chains of page tables/directories */
203 struct i915_page_table *pt[2];
204 /*
205 * Optionally override the alignment/size of the physical page that
206 * contains each PT. If not set defaults back to the usual
207 * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging
208 * structures. MUST be a power-of-two. ONLY applicable on discrete
209 * platforms.
210 */
211 int pt_sz;
212};
213
214struct i915_vma_ops {
215 /* Map an object into an address space with the given cache flags. */
216 void (*bind_vma)(struct i915_address_space *vm,
217 struct i915_vm_pt_stash *stash,
218 struct i915_vma_resource *vma_res,
219 enum i915_cache_level cache_level,
220 u32 flags);
221 /*
222 * Unmap an object from an address space. This usually consists of
223 * setting the valid PTE entries to a reserved scratch page.
224 */
225 void (*unbind_vma)(struct i915_address_space *vm,
226 struct i915_vma_resource *vma_res);
227
228};
229
230struct i915_address_space {
231 struct kref ref;
232 struct work_struct release_work;
233
234 struct drm_mm mm;
235 struct intel_gt *gt;
236 struct drm_i915_private *i915;
237 struct device *dma;
238 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
239 u64 reserved; /* size addr space reserved */
240 u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
241
242 unsigned int bind_async_flags;
243
244 struct mutex mutex; /* protects vma and our lists */
245
246 struct kref resv_ref; /* kref to keep the reservation lock alive. */
247 struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
248#define VM_CLASS_GGTT 0
249#define VM_CLASS_PPGTT 1
250#define VM_CLASS_DPT 2
251
252 struct drm_i915_gem_object *scratch[4];
253 /**
254 * List of vma currently bound.
255 */
256 struct list_head bound_list;
257
258 /**
259 * List of vmas not yet bound or evicted.
260 */
261 struct list_head unbound_list;
262
263 /* Global GTT */
264 bool is_ggtt:1;
265
266 /* Display page table */
267 bool is_dpt:1;
268
269 /* Some systems support read-only mappings for GGTT and/or PPGTT */
270 bool has_read_only:1;
271
272 /* Skip pte rewrite on unbind for suspend. Protected by @mutex */
273 bool skip_pte_rewrite:1;
274
275 u8 top;
276 u8 pd_shift;
277 u8 scratch_order;
278
279 /* Flags used when creating page-table objects for this vm */
280 unsigned long lmem_pt_obj_flags;
281
282 /* Interval tree for pending unbind vma resources */
283 struct rb_root_cached pending_unbind;
284
285 struct drm_i915_gem_object *
286 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
287 struct drm_i915_gem_object *
288 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
289
290 u64 (*pte_encode)(dma_addr_t addr,
291 enum i915_cache_level level,
292 u32 flags); /* Create a valid PTE */
293#define PTE_READ_ONLY BIT(0)
294#define PTE_LM BIT(1)
295
296 void (*allocate_va_range)(struct i915_address_space *vm,
297 struct i915_vm_pt_stash *stash,
298 u64 start, u64 length);
299 void (*clear_range)(struct i915_address_space *vm,
300 u64 start, u64 length);
301 void (*insert_page)(struct i915_address_space *vm,
302 dma_addr_t addr,
303 u64 offset,
304 enum i915_cache_level cache_level,
305 u32 flags);
306 void (*insert_entries)(struct i915_address_space *vm,
307 struct i915_vma_resource *vma_res,
308 enum i915_cache_level cache_level,
309 u32 flags);
310 void (*raw_insert_page)(struct i915_address_space *vm,
311 dma_addr_t addr,
312 u64 offset,
313 enum i915_cache_level cache_level,
314 u32 flags);
315 void (*raw_insert_entries)(struct i915_address_space *vm,
316 struct i915_vma_resource *vma_res,
317 enum i915_cache_level cache_level,
318 u32 flags);
319 void (*cleanup)(struct i915_address_space *vm);
320
321 void (*foreach)(struct i915_address_space *vm,
322 u64 start, u64 length,
323 void (*fn)(struct i915_address_space *vm,
324 struct i915_page_table *pt,
325 void *data),
326 void *data);
327
328 struct i915_vma_ops vma_ops;
329
330 I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
331 I915_SELFTEST_DECLARE(bool scrub_64K);
332};
333
334/*
335 * The Graphics Translation Table is the way in which GEN hardware translates a
336 * Graphics Virtual Address into a Physical Address. In addition to the normal
337 * collateral associated with any va->pa translations GEN hardware also has a
338 * portion of the GTT which can be mapped by the CPU and remain both coherent
339 * and correct (in cases like swizzling). That region is referred to as GMADR in
340 * the spec.
341 */
342struct i915_ggtt {
343 struct i915_address_space vm;
344
345 struct io_mapping iomap; /* Mapping to our CPU mappable region */
346 struct resource gmadr; /* GMADR resource */
347 resource_size_t mappable_end; /* End offset that we can CPU map */
348
349 /** "Graphics Stolen Memory" holds the global PTEs */
350 void __iomem *gsm;
351 void (*invalidate)(struct i915_ggtt *ggtt);
352
353 /** PPGTT used for aliasing the PPGTT with the GTT */
354 struct i915_ppgtt *alias;
355
356 bool do_idle_maps;
357
358 /**
359 * @pte_lost: Are ptes lost on resume?
360 *
361 * Whether the system was recently restored from hibernate and
362 * thus may have lost pte content.
363 */
364 bool pte_lost;
365
366 /**
367 * @probed_pte: Probed pte value on suspend. Re-checked on resume.
368 */
369 u64 probed_pte;
370
371 int mtrr;
372
373 /** Bit 6 swizzling required for X tiling */
374 u32 bit_6_swizzle_x;
375 /** Bit 6 swizzling required for Y tiling */
376 u32 bit_6_swizzle_y;
377
378 u32 pin_bias;
379
380 unsigned int num_fences;
381 struct i915_fence_reg *fence_regs;
382 struct list_head fence_list;
383
384 /**
385 * List of all objects in gtt_space, currently mmaped by userspace.
386 * All objects within this list must also be on bound_list.
387 */
388 struct list_head userfault_list;
389
390 struct mutex error_mutex;
391 struct drm_mm_node error_capture;
392 struct drm_mm_node uc_fw;
393};
394
395struct i915_ppgtt {
396 struct i915_address_space vm;
397
398 struct i915_page_directory *pd;
399};
400
401#define i915_is_ggtt(vm) ((vm)->is_ggtt)
402#define i915_is_dpt(vm) ((vm)->is_dpt)
403#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
404
405bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
406
407int __must_check
408i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
409
410static inline bool
411i915_vm_is_4lvl(const struct i915_address_space *vm)
412{
413 return (vm->total - 1) >> 32;
414}
415
416static inline bool
417i915_vm_has_scratch_64K(struct i915_address_space *vm)
418{
419 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
420}
421
422static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
423 enum intel_memory_type type)
424{
425 /* avoid INTEL_MEMORY_MOCK overflow */
426 if ((int)type >= ARRAY_SIZE(vm->min_alignment))
427 type = INTEL_MEMORY_SYSTEM;
428
429 return vm->min_alignment[type];
430}
431
432static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
433 struct drm_i915_gem_object *obj)
434{
435 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
436 enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
437
438 return i915_vm_min_alignment(vm, type);
439}
440
441static inline bool
442i915_vm_has_cache_coloring(struct i915_address_space *vm)
443{
444 return i915_is_ggtt(vm) && vm->mm.color_adjust;
445}
446
447static inline struct i915_ggtt *
448i915_vm_to_ggtt(struct i915_address_space *vm)
449{
450 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
451 GEM_BUG_ON(!i915_is_ggtt(vm));
452 return container_of(vm, struct i915_ggtt, vm);
453}
454
455static inline struct i915_ppgtt *
456i915_vm_to_ppgtt(struct i915_address_space *vm)
457{
458 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
459 GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
460 return container_of(vm, struct i915_ppgtt, vm);
461}
462
463static inline struct i915_address_space *
464i915_vm_get(struct i915_address_space *vm)
465{
466 kref_get(&vm->ref);
467 return vm;
468}
469
470static inline struct i915_address_space *
471i915_vm_tryget(struct i915_address_space *vm)
472{
473 return kref_get_unless_zero(&vm->ref) ? vm : NULL;
474}
475
476static inline void assert_vm_alive(struct i915_address_space *vm)
477{
478 GEM_BUG_ON(!kref_read(&vm->ref));
479}
480
481/**
482 * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
483 * @vm: The vm whose reservation lock we want to share.
484 *
485 * Return: A pointer to the vm's reservation lock.
486 */
487static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
488{
489 kref_get(&vm->resv_ref);
490 return &vm->_resv;
491}
492
493void i915_vm_release(struct kref *kref);
494
495void i915_vm_resv_release(struct kref *kref);
496
497static inline void i915_vm_put(struct i915_address_space *vm)
498{
499 kref_put(&vm->ref, i915_vm_release);
500}
501
502/**
503 * i915_vm_resv_put - Release a reference on the vm's reservation lock
504 * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
505 */
506static inline void i915_vm_resv_put(struct i915_address_space *vm)
507{
508 kref_put(&vm->resv_ref, i915_vm_resv_release);
509}
510
511void i915_address_space_init(struct i915_address_space *vm, int subclass);
512void i915_address_space_fini(struct i915_address_space *vm);
513
514static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
515{
516 const u32 mask = NUM_PTE(pde_shift) - 1;
517
518 return (address >> PAGE_SHIFT) & mask;
519}
520
521/*
522 * Helper to counts the number of PTEs within the given length. This count
523 * does not cross a page table boundary, so the max value would be
524 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
525 */
526static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
527{
528 const u64 mask = ~((1ULL << pde_shift) - 1);
529 u64 end;
530
531 GEM_BUG_ON(length == 0);
532 GEM_BUG_ON(offset_in_page(addr | length));
533
534 end = addr + length;
535
536 if ((addr & mask) != (end & mask))
537 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
538
539 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
540}
541
542static inline u32 i915_pde_index(u64 addr, u32 shift)
543{
544 return (addr >> shift) & I915_PDE_MASK;
545}
546
547static inline struct i915_page_table *
548i915_pt_entry(const struct i915_page_directory * const pd,
549 const unsigned short n)
550{
551 return pd->entry[n];
552}
553
554static inline struct i915_page_directory *
555i915_pd_entry(const struct i915_page_directory * const pdp,
556 const unsigned short n)
557{
558 return pdp->entry[n];
559}
560
561static inline dma_addr_t
562i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
563{
564 struct i915_page_table *pt = ppgtt->pd->entry[n];
565
566 return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
567}
568
569void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
570 unsigned long lmem_pt_obj_flags);
571void intel_ggtt_bind_vma(struct i915_address_space *vm,
572 struct i915_vm_pt_stash *stash,
573 struct i915_vma_resource *vma_res,
574 enum i915_cache_level cache_level,
575 u32 flags);
576void intel_ggtt_unbind_vma(struct i915_address_space *vm,
577 struct i915_vma_resource *vma_res);
578
579int i915_ggtt_probe_hw(struct drm_i915_private *i915);
580int i915_ggtt_init_hw(struct drm_i915_private *i915);
581int i915_ggtt_enable_hw(struct drm_i915_private *i915);
582void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
583void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
584int i915_init_ggtt(struct drm_i915_private *i915);
585void i915_ggtt_driver_release(struct drm_i915_private *i915);
586void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
587
588static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
589{
590 return ggtt->mappable_end > 0;
591}
592
593int i915_ppgtt_init_hw(struct intel_gt *gt);
594
595struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
596 unsigned long lmem_pt_obj_flags);
597
598void i915_ggtt_suspend_vm(struct i915_address_space *vm);
599bool i915_ggtt_resume_vm(struct i915_address_space *vm);
600void i915_ggtt_suspend(struct i915_ggtt *gtt);
601void i915_ggtt_resume(struct i915_ggtt *ggtt);
602
603/**
604 * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
605 * @i915 The device private.
606 * @val whether the ptes should be marked as lost.
607 *
608 * In some cases pte content is retained across suspend, but typically lost
609 * across hibernate. Typically they should be marked as lost on
610 * hibernation restore and such marking cleared on suspend.
611 */
612void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
613
614void
615fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
616
617#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
618#define fill32_px(px, v) do { \
619 u64 v__ = lower_32_bits(v); \
620 fill_px((px), v__ << 32 | v__); \
621} while (0)
622
623int setup_scratch_page(struct i915_address_space *vm);
624void free_scratch(struct i915_address_space *vm);
625
626struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
627struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
628struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
629struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
630struct i915_page_directory *__alloc_pd(int npde);
631
632int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
633int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
634
635void free_px(struct i915_address_space *vm,
636 struct i915_page_table *pt, int lvl);
637#define free_pt(vm, px) free_px(vm, px, 0)
638#define free_pd(vm, px) free_px(vm, px_pt(px), 1)
639
640void
641__set_pd_entry(struct i915_page_directory * const pd,
642 const unsigned short idx,
643 struct i915_page_table *pt,
644 u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
645
646#define set_pd_entry(pd, idx, to) \
647 __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
648
649void
650clear_pd_entry(struct i915_page_directory * const pd,
651 const unsigned short idx,
652 const struct drm_i915_gem_object * const scratch);
653
654bool
655release_pd_entry(struct i915_page_directory * const pd,
656 const unsigned short idx,
657 struct i915_page_table * const pt,
658 const struct drm_i915_gem_object * const scratch);
659void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
660
661void ppgtt_bind_vma(struct i915_address_space *vm,
662 struct i915_vm_pt_stash *stash,
663 struct i915_vma_resource *vma_res,
664 enum i915_cache_level cache_level,
665 u32 flags);
666void ppgtt_unbind_vma(struct i915_address_space *vm,
667 struct i915_vma_resource *vma_res);
668
669void gtt_write_workarounds(struct intel_gt *gt);
670
671void setup_private_pat(struct intel_gt *gt);
672
673int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
674 struct i915_vm_pt_stash *stash,
675 u64 size);
676int i915_vm_map_pt_stash(struct i915_address_space *vm,
677 struct i915_vm_pt_stash *stash);
678void i915_vm_free_pt_stash(struct i915_address_space *vm,
679 struct i915_vm_pt_stash *stash);
680
681struct i915_vma *
682__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
683
684struct i915_vma *
685__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
686
687static inline struct sgt_dma {
688 struct scatterlist *sg;
689 dma_addr_t dma, max;
690} sgt_dma(struct i915_vma_resource *vma_res) {
691 struct scatterlist *sg = vma_res->bi.pages->sgl;
692 dma_addr_t addr = sg_dma_address(sg);
693
694 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
695}
696
697#endif