Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include <linux/slab.h>
7
8#include "gem/i915_gem_lmem.h"
9
10#include "i915_trace.h"
11#include "intel_gtt.h"
12#include "gen6_ppgtt.h"
13#include "gen8_ppgtt.h"
14
15struct i915_page_table *alloc_pt(struct i915_address_space *vm)
16{
17 struct i915_page_table *pt;
18
19 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
20 if (unlikely(!pt))
21 return ERR_PTR(-ENOMEM);
22
23 pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
24 if (IS_ERR(pt->base)) {
25 kfree(pt);
26 return ERR_PTR(-ENOMEM);
27 }
28
29 atomic_set(&pt->used, 0);
30 return pt;
31}
32
33struct i915_page_directory *__alloc_pd(int count)
34{
35 struct i915_page_directory *pd;
36
37 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
38 if (unlikely(!pd))
39 return NULL;
40
41 pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
42 if (unlikely(!pd->entry)) {
43 kfree(pd);
44 return NULL;
45 }
46
47 spin_lock_init(&pd->lock);
48 return pd;
49}
50
51struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
52{
53 struct i915_page_directory *pd;
54
55 pd = __alloc_pd(I915_PDES);
56 if (unlikely(!pd))
57 return ERR_PTR(-ENOMEM);
58
59 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
60 if (IS_ERR(pd->pt.base)) {
61 kfree(pd->entry);
62 kfree(pd);
63 return ERR_PTR(-ENOMEM);
64 }
65
66 return pd;
67}
68
69void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
70{
71 BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
72
73 if (lvl) {
74 struct i915_page_directory *pd =
75 container_of(pt, typeof(*pd), pt);
76 kfree(pd->entry);
77 }
78
79 if (pt->base)
80 i915_gem_object_put(pt->base);
81
82 kfree(pt);
83}
84
85static void
86write_dma_entry(struct drm_i915_gem_object * const pdma,
87 const unsigned short idx,
88 const u64 encoded_entry)
89{
90 u64 * const vaddr = __px_vaddr(pdma);
91
92 vaddr[idx] = encoded_entry;
93 clflush_cache_range(&vaddr[idx], sizeof(u64));
94}
95
96void
97__set_pd_entry(struct i915_page_directory * const pd,
98 const unsigned short idx,
99 struct i915_page_table * const to,
100 u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
101{
102 /* Each thread pre-pins the pd, and we may have a thread per pde. */
103 GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
104
105 atomic_inc(px_used(pd));
106 pd->entry[idx] = to;
107 write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
108}
109
110void
111clear_pd_entry(struct i915_page_directory * const pd,
112 const unsigned short idx,
113 const struct drm_i915_gem_object * const scratch)
114{
115 GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
116
117 write_dma_entry(px_base(pd), idx, scratch->encode);
118 pd->entry[idx] = NULL;
119 atomic_dec(px_used(pd));
120}
121
122bool
123release_pd_entry(struct i915_page_directory * const pd,
124 const unsigned short idx,
125 struct i915_page_table * const pt,
126 const struct drm_i915_gem_object * const scratch)
127{
128 bool free = false;
129
130 if (atomic_add_unless(&pt->used, -1, 1))
131 return false;
132
133 spin_lock(&pd->lock);
134 if (atomic_dec_and_test(&pt->used)) {
135 clear_pd_entry(pd, idx, scratch);
136 free = true;
137 }
138 spin_unlock(&pd->lock);
139
140 return free;
141}
142
143int i915_ppgtt_init_hw(struct intel_gt *gt)
144{
145 struct drm_i915_private *i915 = gt->i915;
146
147 gtt_write_workarounds(gt);
148
149 if (GRAPHICS_VER(i915) == 6)
150 gen6_ppgtt_enable(gt);
151 else if (GRAPHICS_VER(i915) == 7)
152 gen7_ppgtt_enable(gt);
153
154 return 0;
155}
156
157static struct i915_ppgtt *
158__ppgtt_create(struct intel_gt *gt)
159{
160 if (GRAPHICS_VER(gt->i915) < 8)
161 return gen6_ppgtt_create(gt);
162 else
163 return gen8_ppgtt_create(gt);
164}
165
166struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
167{
168 struct i915_ppgtt *ppgtt;
169
170 ppgtt = __ppgtt_create(gt);
171 if (IS_ERR(ppgtt))
172 return ppgtt;
173
174 trace_i915_ppgtt_create(&ppgtt->vm);
175
176 return ppgtt;
177}
178
179void ppgtt_bind_vma(struct i915_address_space *vm,
180 struct i915_vm_pt_stash *stash,
181 struct i915_vma *vma,
182 enum i915_cache_level cache_level,
183 u32 flags)
184{
185 u32 pte_flags;
186
187 if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
188 vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
189 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
190 }
191
192 /* Applicable to VLV, and gen8+ */
193 pte_flags = 0;
194 if (i915_gem_object_is_readonly(vma->obj))
195 pte_flags |= PTE_READ_ONLY;
196 if (i915_gem_object_is_lmem(vma->obj))
197 pte_flags |= PTE_LM;
198
199 vm->insert_entries(vm, vma, cache_level, pte_flags);
200 wmb();
201}
202
203void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
204{
205 if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
206 vm->clear_range(vm, vma->node.start, vma->size);
207}
208
209static unsigned long pd_count(u64 size, int shift)
210{
211 /* Beware later misalignment */
212 return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
213}
214
215int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
216 struct i915_vm_pt_stash *stash,
217 u64 size)
218{
219 unsigned long count;
220 int shift, n;
221
222 shift = vm->pd_shift;
223 if (!shift)
224 return 0;
225
226 count = pd_count(size, shift);
227 while (count--) {
228 struct i915_page_table *pt;
229
230 pt = alloc_pt(vm);
231 if (IS_ERR(pt)) {
232 i915_vm_free_pt_stash(vm, stash);
233 return PTR_ERR(pt);
234 }
235
236 pt->stash = stash->pt[0];
237 stash->pt[0] = pt;
238 }
239
240 for (n = 1; n < vm->top; n++) {
241 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
242 count = pd_count(size, shift);
243 while (count--) {
244 struct i915_page_directory *pd;
245
246 pd = alloc_pd(vm);
247 if (IS_ERR(pd)) {
248 i915_vm_free_pt_stash(vm, stash);
249 return PTR_ERR(pd);
250 }
251
252 pd->pt.stash = stash->pt[1];
253 stash->pt[1] = &pd->pt;
254 }
255 }
256
257 return 0;
258}
259
260int i915_vm_map_pt_stash(struct i915_address_space *vm,
261 struct i915_vm_pt_stash *stash)
262{
263 struct i915_page_table *pt;
264 int n, err;
265
266 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
267 for (pt = stash->pt[n]; pt; pt = pt->stash) {
268 err = map_pt_dma_locked(vm, pt->base);
269 if (err)
270 return err;
271 }
272 }
273
274 return 0;
275}
276
277void i915_vm_free_pt_stash(struct i915_address_space *vm,
278 struct i915_vm_pt_stash *stash)
279{
280 struct i915_page_table *pt;
281 int n;
282
283 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
284 while ((pt = stash->pt[n])) {
285 stash->pt[n] = pt->stash;
286 free_px(vm, pt, n);
287 }
288 }
289}
290
291int ppgtt_set_pages(struct i915_vma *vma)
292{
293 GEM_BUG_ON(vma->pages);
294
295 vma->pages = vma->obj->mm.pages;
296 vma->page_sizes = vma->obj->mm.page_sizes;
297
298 return 0;
299}
300
301void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
302{
303 struct drm_i915_private *i915 = gt->i915;
304
305 ppgtt->vm.gt = gt;
306 ppgtt->vm.i915 = i915;
307 ppgtt->vm.dma = i915->drm.dev;
308 ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
309
310 dma_resv_init(&ppgtt->vm._resv);
311 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
312
313 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
314 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
315 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
316 ppgtt->vm.vma_ops.clear_pages = clear_pages;
317}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include <linux/slab.h>
7
8#include "gem/i915_gem_lmem.h"
9
10#include "i915_trace.h"
11#include "intel_gt.h"
12#include "intel_gtt.h"
13#include "gen6_ppgtt.h"
14#include "gen8_ppgtt.h"
15
16struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
17{
18 struct i915_page_table *pt;
19
20 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
21 if (unlikely(!pt))
22 return ERR_PTR(-ENOMEM);
23
24 pt->base = vm->alloc_pt_dma(vm, sz);
25 if (IS_ERR(pt->base)) {
26 kfree(pt);
27 return ERR_PTR(-ENOMEM);
28 }
29
30 pt->is_compact = false;
31 atomic_set(&pt->used, 0);
32 return pt;
33}
34
35struct i915_page_directory *__alloc_pd(int count)
36{
37 struct i915_page_directory *pd;
38
39 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
40 if (unlikely(!pd))
41 return NULL;
42
43 pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
44 if (unlikely(!pd->entry)) {
45 kfree(pd);
46 return NULL;
47 }
48
49 spin_lock_init(&pd->lock);
50 return pd;
51}
52
53struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
54{
55 struct i915_page_directory *pd;
56
57 pd = __alloc_pd(I915_PDES);
58 if (unlikely(!pd))
59 return ERR_PTR(-ENOMEM);
60
61 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
62 if (IS_ERR(pd->pt.base)) {
63 kfree(pd->entry);
64 kfree(pd);
65 return ERR_PTR(-ENOMEM);
66 }
67
68 return pd;
69}
70
71void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
72{
73 BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
74
75 if (lvl) {
76 struct i915_page_directory *pd =
77 container_of(pt, typeof(*pd), pt);
78 kfree(pd->entry);
79 }
80
81 if (pt->base)
82 i915_gem_object_put(pt->base);
83
84 kfree(pt);
85}
86
87static void
88write_dma_entry(struct drm_i915_gem_object * const pdma,
89 const unsigned short idx,
90 const u64 encoded_entry)
91{
92 u64 * const vaddr = __px_vaddr(pdma);
93
94 vaddr[idx] = encoded_entry;
95 drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
96}
97
98void
99__set_pd_entry(struct i915_page_directory * const pd,
100 const unsigned short idx,
101 struct i915_page_table * const to,
102 u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
103{
104 /* Each thread pre-pins the pd, and we may have a thread per pde. */
105 GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
106
107 atomic_inc(px_used(pd));
108 pd->entry[idx] = to;
109 write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
110}
111
112void
113clear_pd_entry(struct i915_page_directory * const pd,
114 const unsigned short idx,
115 const struct drm_i915_gem_object * const scratch)
116{
117 GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
118
119 write_dma_entry(px_base(pd), idx, scratch->encode);
120 pd->entry[idx] = NULL;
121 atomic_dec(px_used(pd));
122}
123
124bool
125release_pd_entry(struct i915_page_directory * const pd,
126 const unsigned short idx,
127 struct i915_page_table * const pt,
128 const struct drm_i915_gem_object * const scratch)
129{
130 bool free = false;
131
132 if (atomic_add_unless(&pt->used, -1, 1))
133 return false;
134
135 spin_lock(&pd->lock);
136 if (atomic_dec_and_test(&pt->used)) {
137 clear_pd_entry(pd, idx, scratch);
138 free = true;
139 }
140 spin_unlock(&pd->lock);
141
142 return free;
143}
144
145int i915_ppgtt_init_hw(struct intel_gt *gt)
146{
147 struct drm_i915_private *i915 = gt->i915;
148
149 gtt_write_workarounds(gt);
150
151 if (GRAPHICS_VER(i915) == 6)
152 gen6_ppgtt_enable(gt);
153 else if (GRAPHICS_VER(i915) == 7)
154 gen7_ppgtt_enable(gt);
155
156 return 0;
157}
158
159static struct i915_ppgtt *
160__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
161{
162 if (GRAPHICS_VER(gt->i915) < 8)
163 return gen6_ppgtt_create(gt);
164 else
165 return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
166}
167
168struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
169 unsigned long lmem_pt_obj_flags)
170{
171 struct i915_ppgtt *ppgtt;
172
173 ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
174 if (IS_ERR(ppgtt))
175 return ppgtt;
176
177 trace_i915_ppgtt_create(&ppgtt->vm);
178
179 return ppgtt;
180}
181
182void ppgtt_bind_vma(struct i915_address_space *vm,
183 struct i915_vm_pt_stash *stash,
184 struct i915_vma_resource *vma_res,
185 unsigned int pat_index,
186 u32 flags)
187{
188 u32 pte_flags;
189
190 if (!vma_res->allocated) {
191 vm->allocate_va_range(vm, stash, vma_res->start,
192 vma_res->vma_size);
193 vma_res->allocated = true;
194 }
195
196 /* Applicable to VLV, and gen8+ */
197 pte_flags = 0;
198 if (vma_res->bi.readonly)
199 pte_flags |= PTE_READ_ONLY;
200 if (vma_res->bi.lmem)
201 pte_flags |= PTE_LM;
202
203 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
204 wmb();
205}
206
207void ppgtt_unbind_vma(struct i915_address_space *vm,
208 struct i915_vma_resource *vma_res)
209{
210 if (!vma_res->allocated)
211 return;
212
213 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
214 vma_invalidate_tlb(vm, vma_res->tlb);
215}
216
217static unsigned long pd_count(u64 size, int shift)
218{
219 /* Beware later misalignment */
220 return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
221}
222
223int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
224 struct i915_vm_pt_stash *stash,
225 u64 size)
226{
227 unsigned long count;
228 int shift, n, pt_sz;
229
230 shift = vm->pd_shift;
231 if (!shift)
232 return 0;
233
234 pt_sz = stash->pt_sz;
235 if (!pt_sz)
236 pt_sz = I915_GTT_PAGE_SIZE_4K;
237 else
238 GEM_BUG_ON(!IS_DGFX(vm->i915));
239
240 GEM_BUG_ON(!is_power_of_2(pt_sz));
241
242 count = pd_count(size, shift);
243 while (count--) {
244 struct i915_page_table *pt;
245
246 pt = alloc_pt(vm, pt_sz);
247 if (IS_ERR(pt)) {
248 i915_vm_free_pt_stash(vm, stash);
249 return PTR_ERR(pt);
250 }
251
252 pt->stash = stash->pt[0];
253 stash->pt[0] = pt;
254 }
255
256 for (n = 1; n < vm->top; n++) {
257 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
258 count = pd_count(size, shift);
259 while (count--) {
260 struct i915_page_directory *pd;
261
262 pd = alloc_pd(vm);
263 if (IS_ERR(pd)) {
264 i915_vm_free_pt_stash(vm, stash);
265 return PTR_ERR(pd);
266 }
267
268 pd->pt.stash = stash->pt[1];
269 stash->pt[1] = &pd->pt;
270 }
271 }
272
273 return 0;
274}
275
276int i915_vm_map_pt_stash(struct i915_address_space *vm,
277 struct i915_vm_pt_stash *stash)
278{
279 struct i915_page_table *pt;
280 int n, err;
281
282 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
283 for (pt = stash->pt[n]; pt; pt = pt->stash) {
284 err = map_pt_dma_locked(vm, pt->base);
285 if (err)
286 return err;
287 }
288 }
289
290 return 0;
291}
292
293void i915_vm_free_pt_stash(struct i915_address_space *vm,
294 struct i915_vm_pt_stash *stash)
295{
296 struct i915_page_table *pt;
297 int n;
298
299 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
300 while ((pt = stash->pt[n])) {
301 stash->pt[n] = pt->stash;
302 free_px(vm, pt, n);
303 }
304 }
305}
306
307void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
308 unsigned long lmem_pt_obj_flags)
309{
310 struct drm_i915_private *i915 = gt->i915;
311
312 ppgtt->vm.gt = gt;
313 ppgtt->vm.i915 = i915;
314 ppgtt->vm.dma = i915->drm.dev;
315 ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
316 ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
317
318 dma_resv_init(&ppgtt->vm._resv);
319 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
320
321 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
322 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
323}