Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include <linux/slab.h>
  7
  8#include "i915_trace.h"
  9#include "intel_gtt.h"
 10#include "gen6_ppgtt.h"
 11#include "gen8_ppgtt.h"
 12
 13struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 14{
 15	struct i915_page_table *pt;
 16
 17	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
 18	if (unlikely(!pt))
 19		return ERR_PTR(-ENOMEM);
 20
 21	if (unlikely(setup_page_dma(vm, &pt->base))) {
 22		kfree(pt);
 23		return ERR_PTR(-ENOMEM);
 24	}
 25
 26	atomic_set(&pt->used, 0);
 27	return pt;
 28}
 29
 30struct i915_page_directory *__alloc_pd(size_t sz)
 31{
 32	struct i915_page_directory *pd;
 33
 34	pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
 35	if (unlikely(!pd))
 36		return NULL;
 37
 38	spin_lock_init(&pd->lock);
 39	return pd;
 40}
 41
 42struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 43{
 44	struct i915_page_directory *pd;
 45
 46	pd = __alloc_pd(sizeof(*pd));
 47	if (unlikely(!pd))
 48		return ERR_PTR(-ENOMEM);
 49
 50	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
 51		kfree(pd);
 52		return ERR_PTR(-ENOMEM);
 53	}
 54
 55	return pd;
 56}
 57
 58void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
 59{
 60	cleanup_page_dma(vm, pd);
 61	kfree(pd);
 62}
 63
 64static inline void
 65write_dma_entry(struct i915_page_dma * const pdma,
 66		const unsigned short idx,
 67		const u64 encoded_entry)
 68{
 69	u64 * const vaddr = kmap_atomic(pdma->page);
 70
 71	vaddr[idx] = encoded_entry;
 72	kunmap_atomic(vaddr);
 73}
 74
 75void
 76__set_pd_entry(struct i915_page_directory * const pd,
 77	       const unsigned short idx,
 78	       struct i915_page_dma * const to,
 79	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
 80{
 81	/* Each thread pre-pins the pd, and we may have a thread per pde. */
 82	GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
 83
 84	atomic_inc(px_used(pd));
 85	pd->entry[idx] = to;
 86	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
 87}
 88
 89void
 90clear_pd_entry(struct i915_page_directory * const pd,
 91	       const unsigned short idx,
 92	       const struct i915_page_scratch * const scratch)
 93{
 94	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
 95
 96	write_dma_entry(px_base(pd), idx, scratch->encode);
 97	pd->entry[idx] = NULL;
 98	atomic_dec(px_used(pd));
 99}
100
101bool
102release_pd_entry(struct i915_page_directory * const pd,
103		 const unsigned short idx,
104		 struct i915_page_table * const pt,
105		 const struct i915_page_scratch * const scratch)
106{
107	bool free = false;
108
109	if (atomic_add_unless(&pt->used, -1, 1))
110		return false;
111
112	spin_lock(&pd->lock);
113	if (atomic_dec_and_test(&pt->used)) {
114		clear_pd_entry(pd, idx, scratch);
115		free = true;
116	}
117	spin_unlock(&pd->lock);
118
119	return free;
120}
121
122int i915_ppgtt_init_hw(struct intel_gt *gt)
123{
124	struct drm_i915_private *i915 = gt->i915;
125
126	gtt_write_workarounds(gt);
127
128	if (IS_GEN(i915, 6))
129		gen6_ppgtt_enable(gt);
130	else if (IS_GEN(i915, 7))
131		gen7_ppgtt_enable(gt);
132
133	return 0;
134}
135
136static struct i915_ppgtt *
137__ppgtt_create(struct intel_gt *gt)
138{
139	if (INTEL_GEN(gt->i915) < 8)
140		return gen6_ppgtt_create(gt);
141	else
142		return gen8_ppgtt_create(gt);
143}
144
145struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
146{
147	struct i915_ppgtt *ppgtt;
148
149	ppgtt = __ppgtt_create(gt);
150	if (IS_ERR(ppgtt))
151		return ppgtt;
152
153	trace_i915_ppgtt_create(&ppgtt->vm);
154
155	return ppgtt;
156}
157
158int ppgtt_bind_vma(struct i915_address_space *vm,
159		   struct i915_vma *vma,
160		   enum i915_cache_level cache_level,
161		   u32 flags)
162{
163	u32 pte_flags;
164	int err;
165
166	if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
167		err = vm->allocate_va_range(vm, vma->node.start, vma->size);
168		if (err)
169			return err;
170
171		set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
172	}
173
174	/* Applicable to VLV, and gen8+ */
175	pte_flags = 0;
176	if (i915_gem_object_is_readonly(vma->obj))
177		pte_flags |= PTE_READ_ONLY;
178
179	vm->insert_entries(vm, vma, cache_level, pte_flags);
180	wmb();
181
182	return 0;
183}
184
185void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
186{
187	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
188		vm->clear_range(vm, vma->node.start, vma->size);
189}
190
191int ppgtt_set_pages(struct i915_vma *vma)
192{
193	GEM_BUG_ON(vma->pages);
194
195	vma->pages = vma->obj->mm.pages;
196
197	vma->page_sizes = vma->obj->mm.page_sizes;
198
199	return 0;
200}
201
202void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
203{
204	struct drm_i915_private *i915 = gt->i915;
205
206	ppgtt->vm.gt = gt;
207	ppgtt->vm.i915 = i915;
208	ppgtt->vm.dma = &i915->drm.pdev->dev;
209	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
210
211	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
212
213	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
214	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
215	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
216	ppgtt->vm.vma_ops.clear_pages = clear_pages;
217}