Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2019 Intel Corporation
  5 */
  6
  7#include "gem/i915_gem_context.h"
  8#include "gem/i915_gem_pm.h"
  9
 10#include "i915_drv.h"
 11#include "i915_globals.h"
 12
 13#include "intel_context.h"
 14#include "intel_engine.h"
 15#include "intel_engine_pm.h"
 16
 17static struct i915_global_context {
 18	struct i915_global base;
 19	struct kmem_cache *slab_ce;
 20} global;
 21
 22static struct intel_context *intel_context_alloc(void)
 23{
 24	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
 25}
 26
 27void intel_context_free(struct intel_context *ce)
 28{
 29	kmem_cache_free(global.slab_ce, ce);
 30}
 31
 32struct intel_context *
 33intel_context_create(struct i915_gem_context *ctx,
 34		     struct intel_engine_cs *engine)
 35{
 36	struct intel_context *ce;
 37
 38	ce = intel_context_alloc();
 39	if (!ce)
 40		return ERR_PTR(-ENOMEM);
 41
 42	intel_context_init(ce, ctx, engine);
 43	return ce;
 44}
 45
 46int __intel_context_do_pin(struct intel_context *ce)
 47{
 48	int err;
 49
 50	if (mutex_lock_interruptible(&ce->pin_mutex))
 51		return -EINTR;
 52
 53	if (likely(!atomic_read(&ce->pin_count))) {
 54		intel_wakeref_t wakeref;
 55
 56		if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
 57			err = ce->ops->alloc(ce);
 58			if (unlikely(err))
 59				goto err;
 60
 61			__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
 62		}
 63
 64		err = 0;
 65		with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
 66			err = ce->ops->pin(ce);
 67		if (err)
 68			goto err;
 69
 70		GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
 71			  ce->engine->name, ce->timeline->fence_context,
 72			  ce->ring->head, ce->ring->tail);
 73
 74		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
 75
 76		smp_mb__before_atomic(); /* flush pin before it is visible */
 77	}
 78
 79	atomic_inc(&ce->pin_count);
 80	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
 81
 82	mutex_unlock(&ce->pin_mutex);
 83	return 0;
 84
 85err:
 86	mutex_unlock(&ce->pin_mutex);
 87	return err;
 88}
 89
 90void intel_context_unpin(struct intel_context *ce)
 91{
 92	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
 93		return;
 94
 95	/* We may be called from inside intel_context_pin() to evict another */
 96	intel_context_get(ce);
 97	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
 98
 99	if (likely(atomic_dec_and_test(&ce->pin_count))) {
100		GEM_TRACE("%s context:%llx retire\n",
101			  ce->engine->name, ce->timeline->fence_context);
102
103		ce->ops->unpin(ce);
104
105		i915_gem_context_put(ce->gem_context);
106		intel_context_active_release(ce);
107	}
108
109	mutex_unlock(&ce->pin_mutex);
110	intel_context_put(ce);
111}
112
113static int __context_pin_state(struct i915_vma *vma)
114{
115	u64 flags;
116	int err;
117
118	flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
119	flags |= PIN_HIGH | PIN_GLOBAL;
120
121	err = i915_vma_pin(vma, 0, 0, flags);
122	if (err)
123		return err;
124
125	/*
126	 * And mark it as a globally pinned object to let the shrinker know
127	 * it cannot reclaim the object until we release it.
128	 */
129	i915_vma_make_unshrinkable(vma);
130	vma->obj->mm.dirty = true;
131
132	return 0;
133}
134
135static void __context_unpin_state(struct i915_vma *vma)
136{
137	__i915_vma_unpin(vma);
138	i915_vma_make_shrinkable(vma);
139}
140
141static void __intel_context_retire(struct i915_active *active)
142{
143	struct intel_context *ce = container_of(active, typeof(*ce), active);
144
145	GEM_TRACE("%s context:%llx retire\n",
146		  ce->engine->name, ce->timeline->fence_context);
147
148	if (ce->state)
149		__context_unpin_state(ce->state);
150
151	intel_timeline_unpin(ce->timeline);
152	intel_ring_unpin(ce->ring);
153	intel_context_put(ce);
154}
155
156static int __intel_context_active(struct i915_active *active)
157{
158	struct intel_context *ce = container_of(active, typeof(*ce), active);
159	int err;
160
161	intel_context_get(ce);
162
163	err = intel_ring_pin(ce->ring);
164	if (err)
165		goto err_put;
166
167	err = intel_timeline_pin(ce->timeline);
168	if (err)
169		goto err_ring;
170
171	if (!ce->state)
172		return 0;
173
174	err = __context_pin_state(ce->state);
175	if (err)
176		goto err_timeline;
177
178	return 0;
179
180err_timeline:
181	intel_timeline_unpin(ce->timeline);
182err_ring:
183	intel_ring_unpin(ce->ring);
184err_put:
185	intel_context_put(ce);
186	return err;
187}
188
189int intel_context_active_acquire(struct intel_context *ce)
190{
191	int err;
192
193	err = i915_active_acquire(&ce->active);
194	if (err)
195		return err;
196
197	/* Preallocate tracking nodes */
198	if (!i915_gem_context_is_kernel(ce->gem_context)) {
199		err = i915_active_acquire_preallocate_barrier(&ce->active,
200							      ce->engine);
201		if (err) {
202			i915_active_release(&ce->active);
203			return err;
204		}
205	}
206
207	return 0;
208}
209
210void intel_context_active_release(struct intel_context *ce)
211{
212	/* Nodes preallocated in intel_context_active() */
213	i915_active_acquire_barrier(&ce->active);
214	i915_active_release(&ce->active);
215}
216
217void
218intel_context_init(struct intel_context *ce,
219		   struct i915_gem_context *ctx,
220		   struct intel_engine_cs *engine)
221{
222	GEM_BUG_ON(!engine->cops);
223
224	kref_init(&ce->ref);
225
226	ce->gem_context = ctx;
227	ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
228	if (ctx->timeline)
229		ce->timeline = intel_timeline_get(ctx->timeline);
230
231	ce->engine = engine;
232	ce->ops = engine->cops;
233	ce->sseu = engine->sseu;
234	ce->ring = __intel_context_ring_size(SZ_16K);
235
236	INIT_LIST_HEAD(&ce->signal_link);
237	INIT_LIST_HEAD(&ce->signals);
238
239	mutex_init(&ce->pin_mutex);
240
241	i915_active_init(ctx->i915, &ce->active,
242			 __intel_context_active, __intel_context_retire);
243}
244
245void intel_context_fini(struct intel_context *ce)
246{
247	if (ce->timeline)
248		intel_timeline_put(ce->timeline);
249	i915_vm_put(ce->vm);
250
251	mutex_destroy(&ce->pin_mutex);
252	i915_active_fini(&ce->active);
253}
254
255static void i915_global_context_shrink(void)
256{
257	kmem_cache_shrink(global.slab_ce);
258}
259
260static void i915_global_context_exit(void)
261{
262	kmem_cache_destroy(global.slab_ce);
263}
264
265static struct i915_global_context global = { {
266	.shrink = i915_global_context_shrink,
267	.exit = i915_global_context_exit,
268} };
269
270int __init i915_global_context_init(void)
271{
272	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
273	if (!global.slab_ce)
274		return -ENOMEM;
275
276	i915_global_register(&global.base);
277	return 0;
278}
279
280void intel_context_enter_engine(struct intel_context *ce)
281{
282	intel_engine_pm_get(ce->engine);
283	intel_timeline_enter(ce->timeline);
284}
285
286void intel_context_exit_engine(struct intel_context *ce)
287{
288	intel_timeline_exit(ce->timeline);
289	intel_engine_pm_put(ce->engine);
290}
291
292int intel_context_prepare_remote_request(struct intel_context *ce,
293					 struct i915_request *rq)
294{
295	struct intel_timeline *tl = ce->timeline;
296	int err;
297
298	/* Only suitable for use in remotely modifying this context */
299	GEM_BUG_ON(rq->hw_context == ce);
300
301	if (rq->timeline != tl) { /* beware timeline sharing */
302		err = mutex_lock_interruptible_nested(&tl->mutex,
303						      SINGLE_DEPTH_NESTING);
304		if (err)
305			return err;
306
307		/* Queue this switch after current activity by this context. */
308		err = i915_active_request_set(&tl->last_request, rq);
309		mutex_unlock(&tl->mutex);
310		if (err)
311			return err;
312	}
313
314	/*
315	 * Guarantee context image and the timeline remains pinned until the
316	 * modifying request is retired by setting the ce activity tracker.
317	 *
318	 * But we only need to take one pin on the account of it. Or in other
319	 * words transfer the pinned ce object to tracked active request.
320	 */
321	GEM_BUG_ON(i915_active_is_idle(&ce->active));
322	return i915_active_ref(&ce->active, rq->timeline, rq);
323}
324
325struct i915_request *intel_context_create_request(struct intel_context *ce)
326{
327	struct i915_request *rq;
328	int err;
329
330	err = intel_context_pin(ce);
331	if (unlikely(err))
332		return ERR_PTR(err);
333
334	rq = i915_request_create(ce);
335	intel_context_unpin(ce);
336
337	return rq;
338}
339
340#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
341#include "selftest_context.c"
342#endif