Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "i915_drv.h"
 26
 27static bool
 28ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
 29{
 30	if (INTEL_GEN(engine->i915) >= 8) {
 31		return (ipehr >> 23) == 0x1c;
 32	} else {
 33		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
 34		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
 35				 MI_SEMAPHORE_REGISTER);
 36	}
 37}
 38
 39static struct intel_engine_cs *
 40semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 41				 u64 offset)
 42{
 43	struct drm_i915_private *dev_priv = engine->i915;
 44	struct intel_engine_cs *signaller;
 45	enum intel_engine_id id;
 46
 47	if (INTEL_GEN(dev_priv) >= 8) {
 48		for_each_engine(signaller, dev_priv, id) {
 49			if (engine == signaller)
 50				continue;
 51
 52			if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
 53				return signaller;
 54		}
 55	} else {
 56		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 57
 58		for_each_engine(signaller, dev_priv, id) {
 59			if(engine == signaller)
 60				continue;
 61
 62			if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
 63				return signaller;
 64		}
 65	}
 66
 67	DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
 68			 engine->name, ipehr, offset);
 69
 70	return ERR_PTR(-ENODEV);
 71}
 72
 73static struct intel_engine_cs *
 74semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 75{
 76	struct drm_i915_private *dev_priv = engine->i915;
 77	void __iomem *vaddr;
 78	u32 cmd, ipehr, head;
 79	u64 offset = 0;
 80	int i, backwards;
 81
 82	/*
 83	 * This function does not support execlist mode - any attempt to
 84	 * proceed further into this function will result in a kernel panic
 85	 * when dereferencing ring->buffer, which is not set up in execlist
 86	 * mode.
 87	 *
 88	 * The correct way of doing it would be to derive the currently
 89	 * executing ring buffer from the current context, which is derived
 90	 * from the currently running request. Unfortunately, to get the
 91	 * current request we would have to grab the struct_mutex before doing
 92	 * anything else, which would be ill-advised since some other thread
 93	 * might have grabbed it already and managed to hang itself, causing
 94	 * the hang checker to deadlock.
 95	 *
 96	 * Therefore, this function does not support execlist mode in its
 97	 * current form. Just return NULL and move on.
 98	 */
 99	if (engine->buffer == NULL)
100		return NULL;
101
102	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
103	if (!ipehr_is_semaphore_wait(engine, ipehr))
104		return NULL;
105
106	/*
107	 * HEAD is likely pointing to the dword after the actual command,
108	 * so scan backwards until we find the MBOX. But limit it to just 3
109	 * or 4 dwords depending on the semaphore wait command size.
110	 * Note that we don't care about ACTHD here since that might
111	 * point at at batch, and semaphores are always emitted into the
112	 * ringbuffer itself.
113	 */
114	head = I915_READ_HEAD(engine) & HEAD_ADDR;
115	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
116	vaddr = (void __iomem *)engine->buffer->vaddr;
117
118	for (i = backwards; i; --i) {
119		/*
120		 * Be paranoid and presume the hw has gone off into the wild -
121		 * our ring is smaller than what the hardware (and hence
122		 * HEAD_ADDR) allows. Also handles wrap-around.
123		 */
124		head &= engine->buffer->size - 1;
125
126		/* This here seems to blow up */
127		cmd = ioread32(vaddr + head);
128		if (cmd == ipehr)
129			break;
130
131		head -= 4;
132	}
133
134	if (!i)
135		return NULL;
136
137	*seqno = ioread32(vaddr + head + 4) + 1;
138	if (INTEL_GEN(dev_priv) >= 8) {
139		offset = ioread32(vaddr + head + 12);
140		offset <<= 32;
141		offset |= ioread32(vaddr + head + 8);
142	}
143	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
144}
145
146static int semaphore_passed(struct intel_engine_cs *engine)
147{
148	struct drm_i915_private *dev_priv = engine->i915;
149	struct intel_engine_cs *signaller;
150	u32 seqno;
151
152	engine->hangcheck.deadlock++;
153
154	signaller = semaphore_waits_for(engine, &seqno);
155	if (signaller == NULL)
156		return -1;
157
158	if (IS_ERR(signaller))
159		return 0;
160
161	/* Prevent pathological recursion due to driver bugs */
162	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
163		return -1;
164
165	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
166		return 1;
167
168	/* cursory check for an unkickable deadlock */
169	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
170	    semaphore_passed(signaller) < 0)
171		return -1;
172
173	return 0;
174}
175
176static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
177{
178	struct intel_engine_cs *engine;
179	enum intel_engine_id id;
180
181	for_each_engine(engine, dev_priv, id)
182		engine->hangcheck.deadlock = 0;
183}
184
185static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
186{
187	u32 tmp = current_instdone | *old_instdone;
188	bool unchanged;
189
190	unchanged = tmp == *old_instdone;
191	*old_instdone |= tmp;
192
193	return unchanged;
194}
195
196static bool subunits_stuck(struct intel_engine_cs *engine)
197{
198	struct drm_i915_private *dev_priv = engine->i915;
199	struct intel_instdone instdone;
200	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
201	bool stuck;
202	int slice;
203	int subslice;
204
205	if (engine->id != RCS)
206		return true;
207
208	intel_engine_get_instdone(engine, &instdone);
209
210	/* There might be unstable subunit states even when
211	 * actual head is not moving. Filter out the unstable ones by
212	 * accumulating the undone -> done transitions and only
213	 * consider those as progress.
214	 */
215	stuck = instdone_unchanged(instdone.instdone,
216				   &accu_instdone->instdone);
217	stuck &= instdone_unchanged(instdone.slice_common,
218				    &accu_instdone->slice_common);
219
220	for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
221		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
222					    &accu_instdone->sampler[slice][subslice]);
223		stuck &= instdone_unchanged(instdone.row[slice][subslice],
224					    &accu_instdone->row[slice][subslice]);
225	}
226
227	return stuck;
228}
229
230static enum intel_engine_hangcheck_action
231head_stuck(struct intel_engine_cs *engine, u64 acthd)
232{
233	if (acthd != engine->hangcheck.acthd) {
234
235		/* Clear subunit states on head movement */
236		memset(&engine->hangcheck.instdone, 0,
237		       sizeof(engine->hangcheck.instdone));
238
239		return HANGCHECK_ACTIVE;
240	}
241
242	if (!subunits_stuck(engine))
243		return HANGCHECK_ACTIVE;
244
245	return HANGCHECK_HUNG;
246}
247
248static enum intel_engine_hangcheck_action
249engine_stuck(struct intel_engine_cs *engine, u64 acthd)
250{
251	struct drm_i915_private *dev_priv = engine->i915;
252	enum intel_engine_hangcheck_action ha;
253	u32 tmp;
254
255	ha = head_stuck(engine, acthd);
256	if (ha != HANGCHECK_HUNG)
257		return ha;
258
259	if (IS_GEN2(dev_priv))
260		return HANGCHECK_HUNG;
261
262	/* Is the chip hanging on a WAIT_FOR_EVENT?
263	 * If so we can simply poke the RB_WAIT bit
264	 * and break the hang. This should work on
265	 * all but the second generation chipsets.
266	 */
267	tmp = I915_READ_CTL(engine);
268	if (tmp & RING_WAIT) {
269		i915_handle_error(dev_priv, 0,
270				  "Kicking stuck wait on %s",
271				  engine->name);
272		I915_WRITE_CTL(engine, tmp);
273		return HANGCHECK_KICK;
274	}
275
276	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
277		switch (semaphore_passed(engine)) {
278		default:
279			return HANGCHECK_HUNG;
280		case 1:
281			i915_handle_error(dev_priv, 0,
282					  "Kicking stuck semaphore on %s",
283					  engine->name);
284			I915_WRITE_CTL(engine, tmp);
285			return HANGCHECK_KICK;
286		case 0:
287			return HANGCHECK_WAIT;
288		}
289	}
290
291	return HANGCHECK_HUNG;
292}
293
294/*
295 * This is called when the chip hasn't reported back with completed
296 * batchbuffers in a long time. We keep track per ring seqno progress and
297 * if there are no progress, hangcheck score for that ring is increased.
298 * Further, acthd is inspected to see if the ring is stuck. On stuck case
299 * we kick the ring. If we see no progress on three subsequent calls
300 * we assume chip is wedged and try to fix it by resetting the chip.
301 */
302static void i915_hangcheck_elapsed(struct work_struct *work)
303{
304	struct drm_i915_private *dev_priv =
305		container_of(work, typeof(*dev_priv),
306			     gpu_error.hangcheck_work.work);
307	struct intel_engine_cs *engine;
308	enum intel_engine_id id;
309	unsigned int hung = 0, stuck = 0;
310	int busy_count = 0;
311#define BUSY 1
312#define KICK 5
313#define HUNG 20
314#define ACTIVE_DECAY 15
315
316	if (!i915.enable_hangcheck)
317		return;
318
319	if (!READ_ONCE(dev_priv->gt.awake))
320		return;
321
322	/* As enabling the GPU requires fairly extensive mmio access,
323	 * periodically arm the mmio checker to see if we are triggering
324	 * any invalid access.
325	 */
326	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
327
328	for_each_engine(engine, dev_priv, id) {
329		bool busy = intel_engine_has_waiter(engine);
330		u64 acthd;
331		u32 seqno;
332		u32 submit;
333
334		semaphore_clear_deadlocks(dev_priv);
335
336		/* We don't strictly need an irq-barrier here, as we are not
337		 * serving an interrupt request, be paranoid in case the
338		 * barrier has side-effects (such as preventing a broken
339		 * cacheline snoop) and so be sure that we can see the seqno
340		 * advance. If the seqno should stick, due to a stale
341		 * cacheline, we would erroneously declare the GPU hung.
342		 */
343		if (engine->irq_seqno_barrier)
344			engine->irq_seqno_barrier(engine);
345
346		acthd = intel_engine_get_active_head(engine);
347		seqno = intel_engine_get_seqno(engine);
348		submit = intel_engine_last_submit(engine);
349
350		if (engine->hangcheck.seqno == seqno) {
351			if (i915_seqno_passed(seqno, submit)) {
352				engine->hangcheck.action = HANGCHECK_IDLE;
353			} else {
354				/* We always increment the hangcheck score
355				 * if the engine is busy and still processing
356				 * the same request, so that no single request
357				 * can run indefinitely (such as a chain of
358				 * batches). The only time we do not increment
359				 * the hangcheck score on this ring, if this
360				 * engine is in a legitimate wait for another
361				 * engine. In that case the waiting engine is a
362				 * victim and we want to be sure we catch the
363				 * right culprit. Then every time we do kick
364				 * the ring, add a small increment to the
365				 * score so that we can catch a batch that is
366				 * being repeatedly kicked and so responsible
367				 * for stalling the machine.
368				 */
369				engine->hangcheck.action =
370					engine_stuck(engine, acthd);
371
372				switch (engine->hangcheck.action) {
373				case HANGCHECK_IDLE:
374				case HANGCHECK_WAIT:
375					break;
376				case HANGCHECK_ACTIVE:
377					engine->hangcheck.score += BUSY;
378					break;
379				case HANGCHECK_KICK:
380					engine->hangcheck.score += KICK;
381					break;
382				case HANGCHECK_HUNG:
383					engine->hangcheck.score += HUNG;
384					break;
385				}
386			}
387
388			if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
389				hung |= intel_engine_flag(engine);
390				if (engine->hangcheck.action != HANGCHECK_HUNG)
391					stuck |= intel_engine_flag(engine);
392			}
393		} else {
394			engine->hangcheck.action = HANGCHECK_ACTIVE;
395
396			/* Gradually reduce the count so that we catch DoS
397			 * attempts across multiple batches.
398			 */
399			if (engine->hangcheck.score > 0)
400				engine->hangcheck.score -= ACTIVE_DECAY;
401			if (engine->hangcheck.score < 0)
402				engine->hangcheck.score = 0;
403
404			/* Clear head and subunit states on seqno movement */
405			acthd = 0;
406
407			memset(&engine->hangcheck.instdone, 0,
408			       sizeof(engine->hangcheck.instdone));
409		}
410
411		engine->hangcheck.seqno = seqno;
412		engine->hangcheck.acthd = acthd;
413		busy_count += busy;
414	}
415
416	if (hung) {
417		char msg[80];
418		unsigned int tmp;
419		int len;
420
421		/* If some rings hung but others were still busy, only
422		 * blame the hanging rings in the synopsis.
423		 */
424		if (stuck != hung)
425			hung &= ~stuck;
426		len = scnprintf(msg, sizeof(msg),
427				"%s on ", stuck == hung ? "No progress" : "Hang");
428		for_each_engine_masked(engine, dev_priv, hung, tmp)
429			len += scnprintf(msg + len, sizeof(msg) - len,
430					 "%s, ", engine->name);
431		msg[len-2] = '\0';
432
433		return i915_handle_error(dev_priv, hung, msg);
434	}
435
436	/* Reset timer in case GPU hangs without another request being added */
437	if (busy_count)
438		i915_queue_hangcheck(dev_priv);
439}
440
441void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
442{
443	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
444}
445
446void intel_hangcheck_init(struct drm_i915_private *i915)
447{
448	INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
449			  i915_hangcheck_elapsed);
450}