Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7
8#include "intel_breadcrumbs.h"
9#include "intel_context.h"
10#include "intel_engine.h"
11#include "intel_engine_heartbeat.h"
12#include "intel_engine_pm.h"
13#include "intel_gt.h"
14#include "intel_gt_pm.h"
15#include "intel_rc6.h"
16#include "intel_ring.h"
17#include "shmem_utils.h"
18
19static void dbg_poison_ce(struct intel_context *ce)
20{
21 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
22 return;
23
24 if (ce->state) {
25 struct drm_i915_gem_object *obj = ce->state->obj;
26 int type = i915_coherent_map_type(ce->engine->i915, obj, true);
27 void *map;
28
29 if (!i915_gem_object_trylock(obj))
30 return;
31
32 map = i915_gem_object_pin_map(obj, type);
33 if (!IS_ERR(map)) {
34 memset(map, CONTEXT_REDZONE, obj->base.size);
35 i915_gem_object_flush_map(obj);
36 i915_gem_object_unpin_map(obj);
37 }
38 i915_gem_object_unlock(obj);
39 }
40}
41
42static int __engine_unpark(struct intel_wakeref *wf)
43{
44 struct intel_engine_cs *engine =
45 container_of(wf, typeof(*engine), wakeref);
46 struct intel_context *ce;
47
48 ENGINE_TRACE(engine, "\n");
49
50 intel_gt_pm_get(engine->gt);
51
52 /* Discard stale context state from across idling */
53 ce = engine->kernel_context;
54 if (ce) {
55 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
56
57 /* Flush all pending HW writes before we touch the context */
58 while (unlikely(intel_context_inflight(ce)))
59 intel_engine_flush_submission(engine);
60
61 /* First poison the image to verify we never fully trust it */
62 dbg_poison_ce(ce);
63
64 /* Scrub the context image after our loss of control */
65 ce->ops->reset(ce);
66
67 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
68 ce->timeline->seqno,
69 READ_ONCE(*ce->timeline->hwsp_seqno),
70 ce->ring->emit);
71 GEM_BUG_ON(ce->timeline->seqno !=
72 READ_ONCE(*ce->timeline->hwsp_seqno));
73 }
74
75 if (engine->unpark)
76 engine->unpark(engine);
77
78 intel_breadcrumbs_unpark(engine->breadcrumbs);
79 intel_engine_unpark_heartbeat(engine);
80 return 0;
81}
82
83#if IS_ENABLED(CONFIG_LOCKDEP)
84
85static unsigned long __timeline_mark_lock(struct intel_context *ce)
86{
87 unsigned long flags;
88
89 local_irq_save(flags);
90 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
91
92 return flags;
93}
94
95static void __timeline_mark_unlock(struct intel_context *ce,
96 unsigned long flags)
97{
98 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
99 local_irq_restore(flags);
100}
101
102#else
103
104static unsigned long __timeline_mark_lock(struct intel_context *ce)
105{
106 return 0;
107}
108
109static void __timeline_mark_unlock(struct intel_context *ce,
110 unsigned long flags)
111{
112}
113
114#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
115
116static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
117{
118 struct i915_request *rq = to_request(fence);
119
120 ewma__engine_latency_add(&rq->engine->latency,
121 ktime_us_delta(rq->fence.timestamp,
122 rq->duration.emitted));
123}
124
125static void
126__queue_and_release_pm(struct i915_request *rq,
127 struct intel_timeline *tl,
128 struct intel_engine_cs *engine)
129{
130 struct intel_gt_timelines *timelines = &engine->gt->timelines;
131
132 ENGINE_TRACE(engine, "parking\n");
133
134 /*
135 * We have to serialise all potential retirement paths with our
136 * submission, as we don't want to underflow either the
137 * engine->wakeref.counter or our timeline->active_count.
138 *
139 * Equally, we cannot allow a new submission to start until
140 * after we finish queueing, nor could we allow that submitter
141 * to retire us before we are ready!
142 */
143 spin_lock(&timelines->lock);
144
145 /* Let intel_gt_retire_requests() retire us (acquired under lock) */
146 if (!atomic_fetch_inc(&tl->active_count))
147 list_add_tail(&tl->link, &timelines->active_list);
148
149 /* Hand the request over to HW and so engine_retire() */
150 __i915_request_queue_bh(rq);
151
152 /* Let new submissions commence (and maybe retire this timeline) */
153 __intel_wakeref_defer_park(&engine->wakeref);
154
155 spin_unlock(&timelines->lock);
156}
157
158static bool switch_to_kernel_context(struct intel_engine_cs *engine)
159{
160 struct intel_context *ce = engine->kernel_context;
161 struct i915_request *rq;
162 unsigned long flags;
163 bool result = true;
164
165 /* GPU is pointing to the void, as good as in the kernel context. */
166 if (intel_gt_is_wedged(engine->gt))
167 return true;
168
169 GEM_BUG_ON(!intel_context_is_barrier(ce));
170 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
171
172 /* Already inside the kernel context, safe to power down. */
173 if (engine->wakeref_serial == engine->serial)
174 return true;
175
176 /*
177 * Note, we do this without taking the timeline->mutex. We cannot
178 * as we may be called while retiring the kernel context and so
179 * already underneath the timeline->mutex. Instead we rely on the
180 * exclusive property of the __engine_park that prevents anyone
181 * else from creating a request on this engine. This also requires
182 * that the ring is empty and we avoid any waits while constructing
183 * the context, as they assume protection by the timeline->mutex.
184 * This should hold true as we can only park the engine after
185 * retiring the last request, thus all rings should be empty and
186 * all timelines idle.
187 *
188 * For unlocking, there are 2 other parties and the GPU who have a
189 * stake here.
190 *
191 * A new gpu user will be waiting on the engine-pm to start their
192 * engine_unpark. New waiters are predicated on engine->wakeref.count
193 * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
194 * engine->wakeref.
195 *
196 * The other party is intel_gt_retire_requests(), which is walking the
197 * list of active timelines looking for completions. Meanwhile as soon
198 * as we call __i915_request_queue(), the GPU may complete our request.
199 * Ergo, if we put ourselves on the timelines.active_list
200 * (se intel_timeline_enter()) before we increment the
201 * engine->wakeref.count, we may see the request completion and retire
202 * it causing an underflow of the engine->wakeref.
203 */
204 flags = __timeline_mark_lock(ce);
205 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
206
207 rq = __i915_request_create(ce, GFP_NOWAIT);
208 if (IS_ERR(rq))
209 /* Context switch failed, hope for the best! Maybe reset? */
210 goto out_unlock;
211
212 /* Check again on the next retirement. */
213 engine->wakeref_serial = engine->serial + 1;
214 i915_request_add_active_barriers(rq);
215
216 /* Install ourselves as a preemption barrier */
217 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
218 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
219 /*
220 * Use an interrupt for precise measurement of duration,
221 * otherwise we rely on someone else retiring all the requests
222 * which may delay the signaling (i.e. we will likely wait
223 * until the background request retirement running every
224 * second or two).
225 */
226 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
227 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
228 rq->duration.emitted = ktime_get();
229 }
230
231 /* Expose ourselves to the world */
232 __queue_and_release_pm(rq, ce->timeline, engine);
233
234 result = false;
235out_unlock:
236 __timeline_mark_unlock(ce, flags);
237 return result;
238}
239
240static void call_idle_barriers(struct intel_engine_cs *engine)
241{
242 struct llist_node *node, *next;
243
244 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
245 struct dma_fence_cb *cb =
246 container_of((struct list_head *)node,
247 typeof(*cb), node);
248
249 cb->func(ERR_PTR(-EAGAIN), cb);
250 }
251}
252
253static int __engine_park(struct intel_wakeref *wf)
254{
255 struct intel_engine_cs *engine =
256 container_of(wf, typeof(*engine), wakeref);
257
258 engine->saturated = 0;
259
260 /*
261 * If one and only one request is completed between pm events,
262 * we know that we are inside the kernel context and it is
263 * safe to power down. (We are paranoid in case that runtime
264 * suspend causes corruption to the active context image, and
265 * want to avoid that impacting userspace.)
266 */
267 if (!switch_to_kernel_context(engine))
268 return -EBUSY;
269
270 ENGINE_TRACE(engine, "parked\n");
271
272 call_idle_barriers(engine); /* cleanup after wedging */
273
274 intel_engine_park_heartbeat(engine);
275 intel_breadcrumbs_park(engine->breadcrumbs);
276
277 /* Must be reset upon idling, or we may miss the busy wakeup. */
278 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
279
280 if (engine->park)
281 engine->park(engine);
282
283 engine->execlists.no_priolist = false;
284
285 /* While gt calls i915_vma_parked(), we have to break the lock cycle */
286 intel_gt_pm_put_async(engine->gt);
287 return 0;
288}
289
290static const struct intel_wakeref_ops wf_ops = {
291 .get = __engine_unpark,
292 .put = __engine_park,
293};
294
295void intel_engine_init__pm(struct intel_engine_cs *engine)
296{
297 struct intel_runtime_pm *rpm = engine->uncore->rpm;
298
299 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
300 intel_engine_init_heartbeat(engine);
301}
302
303#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
304#include "selftest_engine_pm.c"
305#endif