Loading...
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2018 Intel Corporation
5 */
6
7#include <linux/sched/mm.h>
8#include <linux/stop_machine.h>
9
10#include "display/intel_display_types.h"
11#include "display/intel_overlay.h"
12
13#include "gem/i915_gem_context.h"
14
15#include "i915_drv.h"
16#include "i915_gpu_error.h"
17#include "i915_irq.h"
18#include "intel_engine_pm.h"
19#include "intel_gt.h"
20#include "intel_gt_pm.h"
21#include "intel_reset.h"
22
23#include "uc/intel_guc.h"
24
25#define RESET_MAX_RETRIES 3
26
27/* XXX How to handle concurrent GGTT updates using tiling registers? */
28#define RESET_UNDER_STOP_MACHINE 0
29
30static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
31{
32 intel_uncore_rmw_fw(uncore, reg, 0, set);
33}
34
35static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
36{
37 intel_uncore_rmw_fw(uncore, reg, clr, 0);
38}
39
40static void engine_skip_context(struct i915_request *rq)
41{
42 struct intel_engine_cs *engine = rq->engine;
43 struct i915_gem_context *hung_ctx = rq->gem_context;
44
45 if (!i915_request_is_active(rq))
46 return;
47
48 lockdep_assert_held(&engine->active.lock);
49 list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
50 if (rq->gem_context == hung_ctx)
51 i915_request_skip(rq, -EIO);
52}
53
54static void client_mark_guilty(struct drm_i915_file_private *file_priv,
55 const struct i915_gem_context *ctx)
56{
57 unsigned int score;
58 unsigned long prev_hang;
59
60 if (i915_gem_context_is_banned(ctx))
61 score = I915_CLIENT_SCORE_CONTEXT_BAN;
62 else
63 score = 0;
64
65 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
66 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
67 score += I915_CLIENT_SCORE_HANG_FAST;
68
69 if (score) {
70 atomic_add(score, &file_priv->ban_score);
71
72 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
73 ctx->name, score,
74 atomic_read(&file_priv->ban_score));
75 }
76}
77
78static bool context_mark_guilty(struct i915_gem_context *ctx)
79{
80 unsigned long prev_hang;
81 bool banned;
82 int i;
83
84 atomic_inc(&ctx->guilty_count);
85
86 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
87 if (!i915_gem_context_is_bannable(ctx))
88 return false;
89
90 /* Record the timestamp for the last N hangs */
91 prev_hang = ctx->hang_timestamp[0];
92 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
93 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
94 ctx->hang_timestamp[i] = jiffies;
95
96 /* If we have hung N+1 times in rapid succession, we ban the context! */
97 banned = !i915_gem_context_is_recoverable(ctx);
98 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
99 banned = true;
100 if (banned) {
101 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
102 ctx->name, atomic_read(&ctx->guilty_count));
103 i915_gem_context_set_banned(ctx);
104 }
105
106 if (!IS_ERR_OR_NULL(ctx->file_priv))
107 client_mark_guilty(ctx->file_priv, ctx);
108
109 return banned;
110}
111
112static void context_mark_innocent(struct i915_gem_context *ctx)
113{
114 atomic_inc(&ctx->active_count);
115}
116
117void __i915_request_reset(struct i915_request *rq, bool guilty)
118{
119 GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
120 rq->engine->name,
121 rq->fence.context,
122 rq->fence.seqno,
123 yesno(guilty));
124
125 GEM_BUG_ON(i915_request_completed(rq));
126
127 if (guilty) {
128 i915_request_skip(rq, -EIO);
129 if (context_mark_guilty(rq->gem_context))
130 engine_skip_context(rq);
131 } else {
132 dma_fence_set_error(&rq->fence, -EAGAIN);
133 context_mark_innocent(rq->gem_context);
134 }
135}
136
137static bool i915_in_reset(struct pci_dev *pdev)
138{
139 u8 gdrst;
140
141 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
142 return gdrst & GRDOM_RESET_STATUS;
143}
144
145static int i915_do_reset(struct intel_gt *gt,
146 intel_engine_mask_t engine_mask,
147 unsigned int retry)
148{
149 struct pci_dev *pdev = gt->i915->drm.pdev;
150 int err;
151
152 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
153 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
154 udelay(50);
155 err = wait_for_atomic(i915_in_reset(pdev), 50);
156
157 /* Clear the reset request. */
158 pci_write_config_byte(pdev, I915_GDRST, 0);
159 udelay(50);
160 if (!err)
161 err = wait_for_atomic(!i915_in_reset(pdev), 50);
162
163 return err;
164}
165
166static bool g4x_reset_complete(struct pci_dev *pdev)
167{
168 u8 gdrst;
169
170 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
171 return (gdrst & GRDOM_RESET_ENABLE) == 0;
172}
173
174static int g33_do_reset(struct intel_gt *gt,
175 intel_engine_mask_t engine_mask,
176 unsigned int retry)
177{
178 struct pci_dev *pdev = gt->i915->drm.pdev;
179
180 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
181 return wait_for_atomic(g4x_reset_complete(pdev), 50);
182}
183
184static int g4x_do_reset(struct intel_gt *gt,
185 intel_engine_mask_t engine_mask,
186 unsigned int retry)
187{
188 struct pci_dev *pdev = gt->i915->drm.pdev;
189 struct intel_uncore *uncore = gt->uncore;
190 int ret;
191
192 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
193 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
194 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
195
196 pci_write_config_byte(pdev, I915_GDRST,
197 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
198 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
199 if (ret) {
200 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
201 goto out;
202 }
203
204 pci_write_config_byte(pdev, I915_GDRST,
205 GRDOM_RENDER | GRDOM_RESET_ENABLE);
206 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
207 if (ret) {
208 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
209 goto out;
210 }
211
212out:
213 pci_write_config_byte(pdev, I915_GDRST, 0);
214
215 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
216 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
217
218 return ret;
219}
220
221static int ironlake_do_reset(struct intel_gt *gt,
222 intel_engine_mask_t engine_mask,
223 unsigned int retry)
224{
225 struct intel_uncore *uncore = gt->uncore;
226 int ret;
227
228 intel_uncore_write_fw(uncore, ILK_GDSR,
229 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
230 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
231 ILK_GRDOM_RESET_ENABLE, 0,
232 5000, 0,
233 NULL);
234 if (ret) {
235 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
236 goto out;
237 }
238
239 intel_uncore_write_fw(uncore, ILK_GDSR,
240 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
241 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
242 ILK_GRDOM_RESET_ENABLE, 0,
243 5000, 0,
244 NULL);
245 if (ret) {
246 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
247 goto out;
248 }
249
250out:
251 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
252 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
253 return ret;
254}
255
256/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
257static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
258{
259 struct intel_uncore *uncore = gt->uncore;
260 int err;
261
262 /*
263 * GEN6_GDRST is not in the gt power well, no need to check
264 * for fifo space for the write or forcewake the chip for
265 * the read
266 */
267 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
268
269 /* Wait for the device to ack the reset requests */
270 err = __intel_wait_for_register_fw(uncore,
271 GEN6_GDRST, hw_domain_mask, 0,
272 500, 0,
273 NULL);
274 if (err)
275 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
276 hw_domain_mask);
277
278 return err;
279}
280
281static int gen6_reset_engines(struct intel_gt *gt,
282 intel_engine_mask_t engine_mask,
283 unsigned int retry)
284{
285 struct intel_engine_cs *engine;
286 const u32 hw_engine_mask[] = {
287 [RCS0] = GEN6_GRDOM_RENDER,
288 [BCS0] = GEN6_GRDOM_BLT,
289 [VCS0] = GEN6_GRDOM_MEDIA,
290 [VCS1] = GEN8_GRDOM_MEDIA2,
291 [VECS0] = GEN6_GRDOM_VECS,
292 };
293 u32 hw_mask;
294
295 if (engine_mask == ALL_ENGINES) {
296 hw_mask = GEN6_GRDOM_FULL;
297 } else {
298 intel_engine_mask_t tmp;
299
300 hw_mask = 0;
301 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
302 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
303 hw_mask |= hw_engine_mask[engine->id];
304 }
305 }
306
307 return gen6_hw_domain_reset(gt, hw_mask);
308}
309
310static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
311{
312 struct intel_uncore *uncore = engine->uncore;
313 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
314 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
315 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
316 i915_reg_t sfc_usage;
317 u32 sfc_usage_bit;
318 u32 sfc_reset_bit;
319
320 switch (engine->class) {
321 case VIDEO_DECODE_CLASS:
322 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
323 return 0;
324
325 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
326 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
327
328 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
329 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
330
331 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
332 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
333 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
334 break;
335
336 case VIDEO_ENHANCEMENT_CLASS:
337 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
338 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
339
340 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
341 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
342
343 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
344 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
345 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
346 break;
347
348 default:
349 return 0;
350 }
351
352 /*
353 * Tell the engine that a software reset is going to happen. The engine
354 * will then try to force lock the SFC (if currently locked, it will
355 * remain so until we tell the engine it is safe to unlock; if currently
356 * unlocked, it will ignore this and all new lock requests). If SFC
357 * ends up being locked to the engine we want to reset, we have to reset
358 * it as well (we will unlock it once the reset sequence is completed).
359 */
360 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
361
362 if (__intel_wait_for_register_fw(uncore,
363 sfc_forced_lock_ack,
364 sfc_forced_lock_ack_bit,
365 sfc_forced_lock_ack_bit,
366 1000, 0, NULL)) {
367 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
368 return 0;
369 }
370
371 if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
372 return sfc_reset_bit;
373
374 return 0;
375}
376
377static void gen11_unlock_sfc(struct intel_engine_cs *engine)
378{
379 struct intel_uncore *uncore = engine->uncore;
380 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
381 i915_reg_t sfc_forced_lock;
382 u32 sfc_forced_lock_bit;
383
384 switch (engine->class) {
385 case VIDEO_DECODE_CLASS:
386 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
387 return;
388
389 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
390 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
391 break;
392
393 case VIDEO_ENHANCEMENT_CLASS:
394 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
395 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
396 break;
397
398 default:
399 return;
400 }
401
402 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
403}
404
405static int gen11_reset_engines(struct intel_gt *gt,
406 intel_engine_mask_t engine_mask,
407 unsigned int retry)
408{
409 const u32 hw_engine_mask[] = {
410 [RCS0] = GEN11_GRDOM_RENDER,
411 [BCS0] = GEN11_GRDOM_BLT,
412 [VCS0] = GEN11_GRDOM_MEDIA,
413 [VCS1] = GEN11_GRDOM_MEDIA2,
414 [VCS2] = GEN11_GRDOM_MEDIA3,
415 [VCS3] = GEN11_GRDOM_MEDIA4,
416 [VECS0] = GEN11_GRDOM_VECS,
417 [VECS1] = GEN11_GRDOM_VECS2,
418 };
419 struct intel_engine_cs *engine;
420 intel_engine_mask_t tmp;
421 u32 hw_mask;
422 int ret;
423
424 if (engine_mask == ALL_ENGINES) {
425 hw_mask = GEN11_GRDOM_FULL;
426 } else {
427 hw_mask = 0;
428 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
429 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
430 hw_mask |= hw_engine_mask[engine->id];
431 hw_mask |= gen11_lock_sfc(engine);
432 }
433 }
434
435 ret = gen6_hw_domain_reset(gt, hw_mask);
436
437 if (engine_mask != ALL_ENGINES)
438 for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
439 gen11_unlock_sfc(engine);
440
441 return ret;
442}
443
444static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
445{
446 struct intel_uncore *uncore = engine->uncore;
447 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
448 u32 request, mask, ack;
449 int ret;
450
451 ack = intel_uncore_read_fw(uncore, reg);
452 if (ack & RESET_CTL_CAT_ERROR) {
453 /*
454 * For catastrophic errors, ready-for-reset sequence
455 * needs to be bypassed: HAS#396813
456 */
457 request = RESET_CTL_CAT_ERROR;
458 mask = RESET_CTL_CAT_ERROR;
459
460 /* Catastrophic errors need to be cleared by HW */
461 ack = 0;
462 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
463 request = RESET_CTL_REQUEST_RESET;
464 mask = RESET_CTL_READY_TO_RESET;
465 ack = RESET_CTL_READY_TO_RESET;
466 } else {
467 return 0;
468 }
469
470 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
471 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
472 700, 0, NULL);
473 if (ret)
474 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
475 engine->name, request,
476 intel_uncore_read_fw(uncore, reg));
477
478 return ret;
479}
480
481static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
482{
483 intel_uncore_write_fw(engine->uncore,
484 RING_RESET_CTL(engine->mmio_base),
485 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
486}
487
488static int gen8_reset_engines(struct intel_gt *gt,
489 intel_engine_mask_t engine_mask,
490 unsigned int retry)
491{
492 struct intel_engine_cs *engine;
493 const bool reset_non_ready = retry >= 1;
494 intel_engine_mask_t tmp;
495 int ret;
496
497 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
498 ret = gen8_engine_reset_prepare(engine);
499 if (ret && !reset_non_ready)
500 goto skip_reset;
501
502 /*
503 * If this is not the first failed attempt to prepare,
504 * we decide to proceed anyway.
505 *
506 * By doing so we risk context corruption and with
507 * some gens (kbl), possible system hang if reset
508 * happens during active bb execution.
509 *
510 * We rather take context corruption instead of
511 * failed reset with a wedged driver/gpu. And
512 * active bb execution case should be covered by
513 * stop_engines() we have before the reset.
514 */
515 }
516
517 if (INTEL_GEN(gt->i915) >= 11)
518 ret = gen11_reset_engines(gt, engine_mask, retry);
519 else
520 ret = gen6_reset_engines(gt, engine_mask, retry);
521
522skip_reset:
523 for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
524 gen8_engine_reset_cancel(engine);
525
526 return ret;
527}
528
529typedef int (*reset_func)(struct intel_gt *,
530 intel_engine_mask_t engine_mask,
531 unsigned int retry);
532
533static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
534{
535 if (INTEL_GEN(i915) >= 8)
536 return gen8_reset_engines;
537 else if (INTEL_GEN(i915) >= 6)
538 return gen6_reset_engines;
539 else if (INTEL_GEN(i915) >= 5)
540 return ironlake_do_reset;
541 else if (IS_G4X(i915))
542 return g4x_do_reset;
543 else if (IS_G33(i915) || IS_PINEVIEW(i915))
544 return g33_do_reset;
545 else if (INTEL_GEN(i915) >= 3)
546 return i915_do_reset;
547 else
548 return NULL;
549}
550
551int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
552{
553 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
554 reset_func reset;
555 int ret = -ETIMEDOUT;
556 int retry;
557
558 reset = intel_get_gpu_reset(gt->i915);
559 if (!reset)
560 return -ENODEV;
561
562 /*
563 * If the power well sleeps during the reset, the reset
564 * request may be dropped and never completes (causing -EIO).
565 */
566 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
567 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
568 GEM_TRACE("engine_mask=%x\n", engine_mask);
569 preempt_disable();
570 ret = reset(gt, engine_mask, retry);
571 preempt_enable();
572 }
573 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
574
575 return ret;
576}
577
578bool intel_has_gpu_reset(struct drm_i915_private *i915)
579{
580 if (!i915_modparams.reset)
581 return NULL;
582
583 return intel_get_gpu_reset(i915);
584}
585
586bool intel_has_reset_engine(struct drm_i915_private *i915)
587{
588 return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
589}
590
591int intel_reset_guc(struct intel_gt *gt)
592{
593 u32 guc_domain =
594 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
595 int ret;
596
597 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
598
599 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
600 ret = gen6_hw_domain_reset(gt, guc_domain);
601 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
602
603 return ret;
604}
605
606/*
607 * Ensure irq handler finishes, and not run again.
608 * Also return the active request so that we only search for it once.
609 */
610static void reset_prepare_engine(struct intel_engine_cs *engine)
611{
612 /*
613 * During the reset sequence, we must prevent the engine from
614 * entering RC6. As the context state is undefined until we restart
615 * the engine, if it does enter RC6 during the reset, the state
616 * written to the powercontext is undefined and so we may lose
617 * GPU state upon resume, i.e. fail to restart after a reset.
618 */
619 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
620 engine->reset.prepare(engine);
621}
622
623static void revoke_mmaps(struct intel_gt *gt)
624{
625 int i;
626
627 for (i = 0; i < gt->ggtt->num_fences; i++) {
628 struct drm_vma_offset_node *node;
629 struct i915_vma *vma;
630 u64 vma_offset;
631
632 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
633 if (!vma)
634 continue;
635
636 if (!i915_vma_has_userfault(vma))
637 continue;
638
639 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
640 node = &vma->obj->base.vma_node;
641 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
642 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
643 drm_vma_node_offset_addr(node) + vma_offset,
644 vma->size,
645 1);
646 }
647}
648
649static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
650{
651 struct intel_engine_cs *engine;
652 intel_engine_mask_t awake = 0;
653 enum intel_engine_id id;
654
655 for_each_engine(engine, gt->i915, id) {
656 if (intel_engine_pm_get_if_awake(engine))
657 awake |= engine->mask;
658 reset_prepare_engine(engine);
659 }
660
661 intel_uc_reset_prepare(>->uc);
662
663 return awake;
664}
665
666static void gt_revoke(struct intel_gt *gt)
667{
668 revoke_mmaps(gt);
669}
670
671static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
672{
673 struct intel_engine_cs *engine;
674 enum intel_engine_id id;
675 int err;
676
677 /*
678 * Everything depends on having the GTT running, so we need to start
679 * there.
680 */
681 err = i915_ggtt_enable_hw(gt->i915);
682 if (err)
683 return err;
684
685 for_each_engine(engine, gt->i915, id)
686 __intel_engine_reset(engine, stalled_mask & engine->mask);
687
688 i915_gem_restore_fences(gt->i915);
689
690 return err;
691}
692
693static void reset_finish_engine(struct intel_engine_cs *engine)
694{
695 engine->reset.finish(engine);
696 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
697
698 intel_engine_signal_breadcrumbs(engine);
699}
700
701static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
702{
703 struct intel_engine_cs *engine;
704 enum intel_engine_id id;
705
706 for_each_engine(engine, gt->i915, id) {
707 reset_finish_engine(engine);
708 if (awake & engine->mask)
709 intel_engine_pm_put(engine);
710 }
711}
712
713static void nop_submit_request(struct i915_request *request)
714{
715 struct intel_engine_cs *engine = request->engine;
716 unsigned long flags;
717
718 GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
719 engine->name, request->fence.context, request->fence.seqno);
720 dma_fence_set_error(&request->fence, -EIO);
721
722 spin_lock_irqsave(&engine->active.lock, flags);
723 __i915_request_submit(request);
724 i915_request_mark_complete(request);
725 spin_unlock_irqrestore(&engine->active.lock, flags);
726
727 intel_engine_queue_breadcrumbs(engine);
728}
729
730static void __intel_gt_set_wedged(struct intel_gt *gt)
731{
732 struct intel_engine_cs *engine;
733 intel_engine_mask_t awake;
734 enum intel_engine_id id;
735
736 if (test_bit(I915_WEDGED, >->reset.flags))
737 return;
738
739 if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
740 struct drm_printer p = drm_debug_printer(__func__);
741
742 for_each_engine(engine, gt->i915, id)
743 intel_engine_dump(engine, &p, "%s\n", engine->name);
744 }
745
746 GEM_TRACE("start\n");
747
748 /*
749 * First, stop submission to hw, but do not yet complete requests by
750 * rolling the global seqno forward (since this would complete requests
751 * for which we haven't set the fence error to EIO yet).
752 */
753 awake = reset_prepare(gt);
754
755 /* Even if the GPU reset fails, it should still stop the engines */
756 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
757 __intel_gt_reset(gt, ALL_ENGINES);
758
759 for_each_engine(engine, gt->i915, id)
760 engine->submit_request = nop_submit_request;
761
762 /*
763 * Make sure no request can slip through without getting completed by
764 * either this call here to intel_engine_write_global_seqno, or the one
765 * in nop_submit_request.
766 */
767 synchronize_rcu_expedited();
768 set_bit(I915_WEDGED, >->reset.flags);
769
770 /* Mark all executing requests as skipped */
771 for_each_engine(engine, gt->i915, id)
772 engine->cancel_requests(engine);
773
774 reset_finish(gt, awake);
775
776 GEM_TRACE("end\n");
777}
778
779void intel_gt_set_wedged(struct intel_gt *gt)
780{
781 intel_wakeref_t wakeref;
782
783 mutex_lock(>->reset.mutex);
784 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
785 __intel_gt_set_wedged(gt);
786 mutex_unlock(>->reset.mutex);
787}
788
789static bool __intel_gt_unset_wedged(struct intel_gt *gt)
790{
791 struct intel_gt_timelines *timelines = >->timelines;
792 struct intel_timeline *tl;
793 unsigned long flags;
794
795 if (!test_bit(I915_WEDGED, >->reset.flags))
796 return true;
797
798 if (!gt->scratch) /* Never full initialised, recovery impossible */
799 return false;
800
801 GEM_TRACE("start\n");
802
803 /*
804 * Before unwedging, make sure that all pending operations
805 * are flushed and errored out - we may have requests waiting upon
806 * third party fences. We marked all inflight requests as EIO, and
807 * every execbuf since returned EIO, for consistency we want all
808 * the currently pending requests to also be marked as EIO, which
809 * is done inside our nop_submit_request - and so we must wait.
810 *
811 * No more can be submitted until we reset the wedged bit.
812 */
813 spin_lock_irqsave(&timelines->lock, flags);
814 list_for_each_entry(tl, &timelines->active_list, link) {
815 struct i915_request *rq;
816
817 rq = i915_active_request_get_unlocked(&tl->last_request);
818 if (!rq)
819 continue;
820
821 spin_unlock_irqrestore(&timelines->lock, flags);
822
823 /*
824 * All internal dependencies (i915_requests) will have
825 * been flushed by the set-wedge, but we may be stuck waiting
826 * for external fences. These should all be capped to 10s
827 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
828 * in the worst case.
829 */
830 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
831 i915_request_put(rq);
832
833 /* Restart iteration after droping lock */
834 spin_lock_irqsave(&timelines->lock, flags);
835 tl = list_entry(&timelines->active_list, typeof(*tl), link);
836 }
837 spin_unlock_irqrestore(&timelines->lock, flags);
838
839 intel_gt_sanitize(gt, false);
840
841 /*
842 * Undo nop_submit_request. We prevent all new i915 requests from
843 * being queued (by disallowing execbuf whilst wedged) so having
844 * waited for all active requests above, we know the system is idle
845 * and do not have to worry about a thread being inside
846 * engine->submit_request() as we swap over. So unlike installing
847 * the nop_submit_request on reset, we can do this from normal
848 * context and do not require stop_machine().
849 */
850 intel_engines_reset_default_submission(gt);
851
852 GEM_TRACE("end\n");
853
854 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
855 clear_bit(I915_WEDGED, >->reset.flags);
856
857 return true;
858}
859
860bool intel_gt_unset_wedged(struct intel_gt *gt)
861{
862 bool result;
863
864 mutex_lock(>->reset.mutex);
865 result = __intel_gt_unset_wedged(gt);
866 mutex_unlock(>->reset.mutex);
867
868 return result;
869}
870
871static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
872{
873 int err, i;
874
875 gt_revoke(gt);
876
877 err = __intel_gt_reset(gt, ALL_ENGINES);
878 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
879 msleep(10 * (i + 1));
880 err = __intel_gt_reset(gt, ALL_ENGINES);
881 }
882 if (err)
883 return err;
884
885 return gt_reset(gt, stalled_mask);
886}
887
888static int resume(struct intel_gt *gt)
889{
890 struct intel_engine_cs *engine;
891 enum intel_engine_id id;
892 int ret;
893
894 for_each_engine(engine, gt->i915, id) {
895 ret = engine->resume(engine);
896 if (ret)
897 return ret;
898 }
899
900 return 0;
901}
902
903/**
904 * intel_gt_reset - reset chip after a hang
905 * @gt: #intel_gt to reset
906 * @stalled_mask: mask of the stalled engines with the guilty requests
907 * @reason: user error message for why we are resetting
908 *
909 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
910 * on failure.
911 *
912 * Procedure is fairly simple:
913 * - reset the chip using the reset reg
914 * - re-init context state
915 * - re-init hardware status page
916 * - re-init ring buffer
917 * - re-init interrupt state
918 * - re-init display
919 */
920void intel_gt_reset(struct intel_gt *gt,
921 intel_engine_mask_t stalled_mask,
922 const char *reason)
923{
924 intel_engine_mask_t awake;
925 int ret;
926
927 GEM_TRACE("flags=%lx\n", gt->reset.flags);
928
929 might_sleep();
930 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
931 mutex_lock(>->reset.mutex);
932
933 /* Clear any previous failed attempts at recovery. Time to try again. */
934 if (!__intel_gt_unset_wedged(gt))
935 goto unlock;
936
937 if (reason)
938 dev_notice(gt->i915->drm.dev,
939 "Resetting chip for %s\n", reason);
940 atomic_inc(>->i915->gpu_error.reset_count);
941
942 awake = reset_prepare(gt);
943
944 if (!intel_has_gpu_reset(gt->i915)) {
945 if (i915_modparams.reset)
946 dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
947 else
948 DRM_DEBUG_DRIVER("GPU reset disabled\n");
949 goto error;
950 }
951
952 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
953 intel_runtime_pm_disable_interrupts(gt->i915);
954
955 if (do_reset(gt, stalled_mask)) {
956 dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
957 goto taint;
958 }
959
960 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
961 intel_runtime_pm_enable_interrupts(gt->i915);
962
963 intel_overlay_reset(gt->i915);
964
965 /*
966 * Next we need to restore the context, but we don't use those
967 * yet either...
968 *
969 * Ring buffer needs to be re-initialized in the KMS case, or if X
970 * was running at the time of the reset (i.e. we weren't VT
971 * switched away).
972 */
973 ret = i915_gem_init_hw(gt->i915);
974 if (ret) {
975 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
976 ret);
977 goto taint;
978 }
979
980 ret = resume(gt);
981 if (ret)
982 goto taint;
983
984 intel_gt_queue_hangcheck(gt);
985
986finish:
987 reset_finish(gt, awake);
988unlock:
989 mutex_unlock(>->reset.mutex);
990 return;
991
992taint:
993 /*
994 * History tells us that if we cannot reset the GPU now, we
995 * never will. This then impacts everything that is run
996 * subsequently. On failing the reset, we mark the driver
997 * as wedged, preventing further execution on the GPU.
998 * We also want to go one step further and add a taint to the
999 * kernel so that any subsequent faults can be traced back to
1000 * this failure. This is important for CI, where if the
1001 * GPU/driver fails we would like to reboot and restart testing
1002 * rather than continue on into oblivion. For everyone else,
1003 * the system should still plod along, but they have been warned!
1004 */
1005 add_taint_for_CI(TAINT_WARN);
1006error:
1007 __intel_gt_set_wedged(gt);
1008 goto finish;
1009}
1010
1011static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1012{
1013 return __intel_gt_reset(engine->gt, engine->mask);
1014}
1015
1016/**
1017 * intel_engine_reset - reset GPU engine to recover from a hang
1018 * @engine: engine to reset
1019 * @msg: reason for GPU reset; or NULL for no dev_notice()
1020 *
1021 * Reset a specific GPU engine. Useful if a hang is detected.
1022 * Returns zero on successful reset or otherwise an error code.
1023 *
1024 * Procedure is:
1025 * - identifies the request that caused the hang and it is dropped
1026 * - reset engine (which will force the engine to idle)
1027 * - re-init/configure engine
1028 */
1029int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1030{
1031 struct intel_gt *gt = engine->gt;
1032 int ret;
1033
1034 GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
1035 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1036
1037 if (!intel_engine_pm_get_if_awake(engine))
1038 return 0;
1039
1040 reset_prepare_engine(engine);
1041
1042 if (msg)
1043 dev_notice(engine->i915->drm.dev,
1044 "Resetting %s for %s\n", engine->name, msg);
1045 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1046
1047 if (!engine->gt->uc.guc.execbuf_client)
1048 ret = intel_gt_reset_engine(engine);
1049 else
1050 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1051 if (ret) {
1052 /* If we fail here, we expect to fallback to a global reset */
1053 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1054 engine->gt->uc.guc.execbuf_client ? "GuC " : "",
1055 engine->name, ret);
1056 goto out;
1057 }
1058
1059 /*
1060 * The request that caused the hang is stuck on elsp, we know the
1061 * active request and can drop it, adjust head to skip the offending
1062 * request to resume executing remaining requests in the queue.
1063 */
1064 __intel_engine_reset(engine, true);
1065
1066 /*
1067 * The engine and its registers (and workarounds in case of render)
1068 * have been reset to their default values. Follow the init_ring
1069 * process to program RING_MODE, HWSP and re-enable submission.
1070 */
1071 ret = engine->resume(engine);
1072
1073out:
1074 intel_engine_cancel_stop_cs(engine);
1075 reset_finish_engine(engine);
1076 intel_engine_pm_put(engine);
1077 return ret;
1078}
1079
1080static void intel_gt_reset_global(struct intel_gt *gt,
1081 u32 engine_mask,
1082 const char *reason)
1083{
1084 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1085 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1086 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1087 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1088 struct intel_wedge_me w;
1089
1090 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1091
1092 DRM_DEBUG_DRIVER("resetting chip\n");
1093 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1094
1095 /* Use a watchdog to ensure that our reset completes */
1096 intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1097 intel_prepare_reset(gt->i915);
1098
1099 /* Flush everyone using a resource about to be clobbered */
1100 synchronize_srcu_expedited(>->reset.backoff_srcu);
1101
1102 intel_gt_reset(gt, engine_mask, reason);
1103
1104 intel_finish_reset(gt->i915);
1105 }
1106
1107 if (!test_bit(I915_WEDGED, >->reset.flags))
1108 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1109}
1110
1111/**
1112 * intel_gt_handle_error - handle a gpu error
1113 * @gt: the intel_gt
1114 * @engine_mask: mask representing engines that are hung
1115 * @flags: control flags
1116 * @fmt: Error message format string
1117 *
1118 * Do some basic checking of register state at error time and
1119 * dump it to the syslog. Also call i915_capture_error_state() to make
1120 * sure we get a record and make it available in debugfs. Fire a uevent
1121 * so userspace knows something bad happened (should trigger collection
1122 * of a ring dump etc.).
1123 */
1124void intel_gt_handle_error(struct intel_gt *gt,
1125 intel_engine_mask_t engine_mask,
1126 unsigned long flags,
1127 const char *fmt, ...)
1128{
1129 struct intel_engine_cs *engine;
1130 intel_wakeref_t wakeref;
1131 intel_engine_mask_t tmp;
1132 char error_msg[80];
1133 char *msg = NULL;
1134
1135 if (fmt) {
1136 va_list args;
1137
1138 va_start(args, fmt);
1139 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1140 va_end(args);
1141
1142 msg = error_msg;
1143 }
1144
1145 /*
1146 * In most cases it's guaranteed that we get here with an RPM
1147 * reference held, for example because there is a pending GPU
1148 * request that won't finish until the reset is done. This
1149 * isn't the case at least when we get here by doing a
1150 * simulated reset via debugfs, so get an RPM reference.
1151 */
1152 wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
1153
1154 engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
1155
1156 if (flags & I915_ERROR_CAPTURE) {
1157 i915_capture_error_state(gt->i915, engine_mask, msg);
1158 intel_gt_clear_error_registers(gt, engine_mask);
1159 }
1160
1161 /*
1162 * Try engine reset when available. We fall back to full reset if
1163 * single reset fails.
1164 */
1165 if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
1166 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
1167 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1168 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1169 >->reset.flags))
1170 continue;
1171
1172 if (intel_engine_reset(engine, msg) == 0)
1173 engine_mask &= ~engine->mask;
1174
1175 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1176 >->reset.flags);
1177 }
1178 }
1179
1180 if (!engine_mask)
1181 goto out;
1182
1183 /* Full reset needs the mutex, stop any other user trying to do so. */
1184 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1185 wait_event(gt->reset.queue,
1186 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1187 goto out; /* piggy-back on the other reset */
1188 }
1189
1190 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1191 synchronize_rcu_expedited();
1192
1193 /* Prevent any other reset-engine attempt. */
1194 for_each_engine(engine, gt->i915, tmp) {
1195 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1196 >->reset.flags))
1197 wait_on_bit(>->reset.flags,
1198 I915_RESET_ENGINE + engine->id,
1199 TASK_UNINTERRUPTIBLE);
1200 }
1201
1202 intel_gt_reset_global(gt, engine_mask, msg);
1203
1204 for_each_engine(engine, gt->i915, tmp)
1205 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1206 >->reset.flags);
1207 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1208 smp_mb__after_atomic();
1209 wake_up_all(>->reset.queue);
1210
1211out:
1212 intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
1213}
1214
1215int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1216{
1217 might_lock(>->reset.backoff_srcu);
1218 might_sleep();
1219
1220 rcu_read_lock();
1221 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1222 rcu_read_unlock();
1223
1224 if (wait_event_interruptible(gt->reset.queue,
1225 !test_bit(I915_RESET_BACKOFF,
1226 >->reset.flags)))
1227 return -EINTR;
1228
1229 rcu_read_lock();
1230 }
1231 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1232 rcu_read_unlock();
1233
1234 return 0;
1235}
1236
1237void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1238__releases(>->reset.backoff_srcu)
1239{
1240 srcu_read_unlock(>->reset.backoff_srcu, tag);
1241}
1242
1243int intel_gt_terminally_wedged(struct intel_gt *gt)
1244{
1245 might_sleep();
1246
1247 if (!intel_gt_is_wedged(gt))
1248 return 0;
1249
1250 /* Reset still in progress? Maybe we will recover? */
1251 if (!test_bit(I915_RESET_BACKOFF, >->reset.flags))
1252 return -EIO;
1253
1254 /* XXX intel_reset_finish() still takes struct_mutex!!! */
1255 if (mutex_is_locked(>->i915->drm.struct_mutex))
1256 return -EAGAIN;
1257
1258 if (wait_event_interruptible(gt->reset.queue,
1259 !test_bit(I915_RESET_BACKOFF,
1260 >->reset.flags)))
1261 return -EINTR;
1262
1263 return intel_gt_is_wedged(gt) ? -EIO : 0;
1264}
1265
1266void intel_gt_init_reset(struct intel_gt *gt)
1267{
1268 init_waitqueue_head(>->reset.queue);
1269 mutex_init(>->reset.mutex);
1270 init_srcu_struct(>->reset.backoff_srcu);
1271}
1272
1273void intel_gt_fini_reset(struct intel_gt *gt)
1274{
1275 cleanup_srcu_struct(>->reset.backoff_srcu);
1276}
1277
1278static void intel_wedge_me(struct work_struct *work)
1279{
1280 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1281
1282 dev_err(w->gt->i915->drm.dev,
1283 "%s timed out, cancelling all in-flight rendering.\n",
1284 w->name);
1285 intel_gt_set_wedged(w->gt);
1286}
1287
1288void __intel_init_wedge(struct intel_wedge_me *w,
1289 struct intel_gt *gt,
1290 long timeout,
1291 const char *name)
1292{
1293 w->gt = gt;
1294 w->name = name;
1295
1296 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1297 schedule_delayed_work(&w->work, timeout);
1298}
1299
1300void __intel_fini_wedge(struct intel_wedge_me *w)
1301{
1302 cancel_delayed_work_sync(&w->work);
1303 destroy_delayed_work_on_stack(&w->work);
1304 w->gt = NULL;
1305}
1306
1307#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1308#include "selftest_reset.c"
1309#endif
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2008-2018 Intel Corporation
4 */
5
6#include <linux/sched/mm.h>
7#include <linux/stop_machine.h>
8#include <linux/string_helpers.h>
9
10#include "display/intel_display.h"
11#include "display/intel_overlay.h"
12
13#include "gem/i915_gem_context.h"
14
15#include "gt/intel_gt_regs.h"
16
17#include "i915_drv.h"
18#include "i915_file_private.h"
19#include "i915_gpu_error.h"
20#include "i915_irq.h"
21#include "intel_breadcrumbs.h"
22#include "intel_engine_pm.h"
23#include "intel_engine_regs.h"
24#include "intel_gt.h"
25#include "intel_gt_pm.h"
26#include "intel_gt_requests.h"
27#include "intel_mchbar_regs.h"
28#include "intel_pci_config.h"
29#include "intel_reset.h"
30
31#include "uc/intel_guc.h"
32
33#define RESET_MAX_RETRIES 3
34
35/* XXX How to handle concurrent GGTT updates using tiling registers? */
36#define RESET_UNDER_STOP_MACHINE 0
37
38static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
39{
40 intel_uncore_rmw_fw(uncore, reg, 0, set);
41}
42
43static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
44{
45 intel_uncore_rmw_fw(uncore, reg, clr, 0);
46}
47
48static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
49{
50 struct drm_i915_file_private *file_priv = ctx->file_priv;
51 unsigned long prev_hang;
52 unsigned int score;
53
54 if (IS_ERR_OR_NULL(file_priv))
55 return;
56
57 score = 0;
58 if (banned)
59 score = I915_CLIENT_SCORE_CONTEXT_BAN;
60
61 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
62 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
63 score += I915_CLIENT_SCORE_HANG_FAST;
64
65 if (score) {
66 atomic_add(score, &file_priv->ban_score);
67
68 drm_dbg(&ctx->i915->drm,
69 "client %s: gained %u ban score, now %u\n",
70 ctx->name, score,
71 atomic_read(&file_priv->ban_score));
72 }
73}
74
75static bool mark_guilty(struct i915_request *rq)
76{
77 struct i915_gem_context *ctx;
78 unsigned long prev_hang;
79 bool banned;
80 int i;
81
82 if (intel_context_is_closed(rq->context))
83 return true;
84
85 rcu_read_lock();
86 ctx = rcu_dereference(rq->context->gem_context);
87 if (ctx && !kref_get_unless_zero(&ctx->ref))
88 ctx = NULL;
89 rcu_read_unlock();
90 if (!ctx)
91 return intel_context_is_banned(rq->context);
92
93 atomic_inc(&ctx->guilty_count);
94
95 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
96 if (!i915_gem_context_is_bannable(ctx)) {
97 banned = false;
98 goto out;
99 }
100
101 drm_notice(&ctx->i915->drm,
102 "%s context reset due to GPU hang\n",
103 ctx->name);
104
105 /* Record the timestamp for the last N hangs */
106 prev_hang = ctx->hang_timestamp[0];
107 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
108 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
109 ctx->hang_timestamp[i] = jiffies;
110
111 /* If we have hung N+1 times in rapid succession, we ban the context! */
112 banned = !i915_gem_context_is_recoverable(ctx);
113 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
114 banned = true;
115 if (banned)
116 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
117 ctx->name, atomic_read(&ctx->guilty_count));
118
119 client_mark_guilty(ctx, banned);
120
121out:
122 i915_gem_context_put(ctx);
123 return banned;
124}
125
126static void mark_innocent(struct i915_request *rq)
127{
128 struct i915_gem_context *ctx;
129
130 rcu_read_lock();
131 ctx = rcu_dereference(rq->context->gem_context);
132 if (ctx)
133 atomic_inc(&ctx->active_count);
134 rcu_read_unlock();
135}
136
137void __i915_request_reset(struct i915_request *rq, bool guilty)
138{
139 bool banned = false;
140
141 RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
142 GEM_BUG_ON(__i915_request_is_complete(rq));
143
144 rcu_read_lock(); /* protect the GEM context */
145 if (guilty) {
146 i915_request_set_error_once(rq, -EIO);
147 __i915_request_skip(rq);
148 banned = mark_guilty(rq);
149 } else {
150 i915_request_set_error_once(rq, -EAGAIN);
151 mark_innocent(rq);
152 }
153 rcu_read_unlock();
154
155 if (banned)
156 intel_context_ban(rq->context, rq);
157}
158
159static bool i915_in_reset(struct pci_dev *pdev)
160{
161 u8 gdrst;
162
163 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
164 return gdrst & GRDOM_RESET_STATUS;
165}
166
167static int i915_do_reset(struct intel_gt *gt,
168 intel_engine_mask_t engine_mask,
169 unsigned int retry)
170{
171 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
172 int err;
173
174 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
175 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
176 udelay(50);
177 err = wait_for_atomic(i915_in_reset(pdev), 50);
178
179 /* Clear the reset request. */
180 pci_write_config_byte(pdev, I915_GDRST, 0);
181 udelay(50);
182 if (!err)
183 err = wait_for_atomic(!i915_in_reset(pdev), 50);
184
185 return err;
186}
187
188static bool g4x_reset_complete(struct pci_dev *pdev)
189{
190 u8 gdrst;
191
192 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
193 return (gdrst & GRDOM_RESET_ENABLE) == 0;
194}
195
196static int g33_do_reset(struct intel_gt *gt,
197 intel_engine_mask_t engine_mask,
198 unsigned int retry)
199{
200 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
201
202 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
203 return wait_for_atomic(g4x_reset_complete(pdev), 50);
204}
205
206static int g4x_do_reset(struct intel_gt *gt,
207 intel_engine_mask_t engine_mask,
208 unsigned int retry)
209{
210 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
211 struct intel_uncore *uncore = gt->uncore;
212 int ret;
213
214 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
215 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
216 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
217
218 pci_write_config_byte(pdev, I915_GDRST,
219 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
220 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
221 if (ret) {
222 GT_TRACE(gt, "Wait for media reset failed\n");
223 goto out;
224 }
225
226 pci_write_config_byte(pdev, I915_GDRST,
227 GRDOM_RENDER | GRDOM_RESET_ENABLE);
228 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
229 if (ret) {
230 GT_TRACE(gt, "Wait for render reset failed\n");
231 goto out;
232 }
233
234out:
235 pci_write_config_byte(pdev, I915_GDRST, 0);
236
237 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
238 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
239
240 return ret;
241}
242
243static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
244 unsigned int retry)
245{
246 struct intel_uncore *uncore = gt->uncore;
247 int ret;
248
249 intel_uncore_write_fw(uncore, ILK_GDSR,
250 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
251 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
252 ILK_GRDOM_RESET_ENABLE, 0,
253 5000, 0,
254 NULL);
255 if (ret) {
256 GT_TRACE(gt, "Wait for render reset failed\n");
257 goto out;
258 }
259
260 intel_uncore_write_fw(uncore, ILK_GDSR,
261 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
262 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
263 ILK_GRDOM_RESET_ENABLE, 0,
264 5000, 0,
265 NULL);
266 if (ret) {
267 GT_TRACE(gt, "Wait for media reset failed\n");
268 goto out;
269 }
270
271out:
272 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
273 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
274 return ret;
275}
276
277/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
278static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
279{
280 struct intel_uncore *uncore = gt->uncore;
281 int loops = 2;
282 int err;
283
284 /*
285 * GEN6_GDRST is not in the gt power well, no need to check
286 * for fifo space for the write or forcewake the chip for
287 * the read
288 */
289 do {
290 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
291
292 /*
293 * Wait for the device to ack the reset requests.
294 *
295 * On some platforms, e.g. Jasperlake, we see that the
296 * engine register state is not cleared until shortly after
297 * GDRST reports completion, causing a failure as we try
298 * to immediately resume while the internal state is still
299 * in flux. If we immediately repeat the reset, the second
300 * reset appears to serialise with the first, and since
301 * it is a no-op, the registers should retain their reset
302 * value. However, there is still a concern that upon
303 * leaving the second reset, the internal engine state
304 * is still in flux and not ready for resuming.
305 */
306 err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
307 hw_domain_mask, 0,
308 2000, 0,
309 NULL);
310 } while (err == 0 && --loops);
311 if (err)
312 GT_TRACE(gt,
313 "Wait for 0x%08x engines reset failed\n",
314 hw_domain_mask);
315
316 /*
317 * As we have observed that the engine state is still volatile
318 * after GDRST is acked, impose a small delay to let everything settle.
319 */
320 udelay(50);
321
322 return err;
323}
324
325static int __gen6_reset_engines(struct intel_gt *gt,
326 intel_engine_mask_t engine_mask,
327 unsigned int retry)
328{
329 struct intel_engine_cs *engine;
330 u32 hw_mask;
331
332 if (engine_mask == ALL_ENGINES) {
333 hw_mask = GEN6_GRDOM_FULL;
334 } else {
335 intel_engine_mask_t tmp;
336
337 hw_mask = 0;
338 for_each_engine_masked(engine, gt, engine_mask, tmp) {
339 hw_mask |= engine->reset_domain;
340 }
341 }
342
343 return gen6_hw_domain_reset(gt, hw_mask);
344}
345
346static int gen6_reset_engines(struct intel_gt *gt,
347 intel_engine_mask_t engine_mask,
348 unsigned int retry)
349{
350 unsigned long flags;
351 int ret;
352
353 spin_lock_irqsave(>->uncore->lock, flags);
354 ret = __gen6_reset_engines(gt, engine_mask, retry);
355 spin_unlock_irqrestore(>->uncore->lock, flags);
356
357 return ret;
358}
359
360static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
361{
362 int vecs_id;
363
364 GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
365
366 vecs_id = _VECS((engine->instance) / 2);
367
368 return engine->gt->engine[vecs_id];
369}
370
371struct sfc_lock_data {
372 i915_reg_t lock_reg;
373 i915_reg_t ack_reg;
374 i915_reg_t usage_reg;
375 u32 lock_bit;
376 u32 ack_bit;
377 u32 usage_bit;
378 u32 reset_bit;
379};
380
381static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
382 struct sfc_lock_data *sfc_lock)
383{
384 switch (engine->class) {
385 default:
386 MISSING_CASE(engine->class);
387 fallthrough;
388 case VIDEO_DECODE_CLASS:
389 sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
390 sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
391
392 sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
393 sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
394
395 sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
396 sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
397 sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
398
399 break;
400 case VIDEO_ENHANCEMENT_CLASS:
401 sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
402 sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
403
404 sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
405 sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
406
407 sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
408 sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
409 sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
410
411 break;
412 }
413}
414
415static int gen11_lock_sfc(struct intel_engine_cs *engine,
416 u32 *reset_mask,
417 u32 *unlock_mask)
418{
419 struct intel_uncore *uncore = engine->uncore;
420 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
421 struct sfc_lock_data sfc_lock;
422 bool lock_obtained, lock_to_other = false;
423 int ret;
424
425 switch (engine->class) {
426 case VIDEO_DECODE_CLASS:
427 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
428 return 0;
429
430 fallthrough;
431 case VIDEO_ENHANCEMENT_CLASS:
432 get_sfc_forced_lock_data(engine, &sfc_lock);
433
434 break;
435 default:
436 return 0;
437 }
438
439 if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
440 struct intel_engine_cs *paired_vecs;
441
442 if (engine->class != VIDEO_DECODE_CLASS ||
443 GRAPHICS_VER(engine->i915) != 12)
444 return 0;
445
446 /*
447 * Wa_14010733141
448 *
449 * If the VCS-MFX isn't using the SFC, we also need to check
450 * whether VCS-HCP is using it. If so, we need to issue a *VE*
451 * forced lock on the VE engine that shares the same SFC.
452 */
453 if (!(intel_uncore_read_fw(uncore,
454 GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
455 GEN12_HCP_SFC_USAGE_BIT))
456 return 0;
457
458 paired_vecs = find_sfc_paired_vecs_engine(engine);
459 get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
460 lock_to_other = true;
461 *unlock_mask |= paired_vecs->mask;
462 } else {
463 *unlock_mask |= engine->mask;
464 }
465
466 /*
467 * If the engine is using an SFC, tell the engine that a software reset
468 * is going to happen. The engine will then try to force lock the SFC.
469 * If SFC ends up being locked to the engine we want to reset, we have
470 * to reset it as well (we will unlock it once the reset sequence is
471 * completed).
472 */
473 rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
474
475 ret = __intel_wait_for_register_fw(uncore,
476 sfc_lock.ack_reg,
477 sfc_lock.ack_bit,
478 sfc_lock.ack_bit,
479 1000, 0, NULL);
480
481 /*
482 * Was the SFC released while we were trying to lock it?
483 *
484 * We should reset both the engine and the SFC if:
485 * - We were locking the SFC to this engine and the lock succeeded
486 * OR
487 * - We were locking the SFC to a different engine (Wa_14010733141)
488 * but the SFC was released before the lock was obtained.
489 *
490 * Otherwise we need only reset the engine by itself and we can
491 * leave the SFC alone.
492 */
493 lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
494 sfc_lock.usage_bit) != 0;
495 if (lock_obtained == lock_to_other)
496 return 0;
497
498 if (ret) {
499 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
500 return ret;
501 }
502
503 *reset_mask |= sfc_lock.reset_bit;
504 return 0;
505}
506
507static void gen11_unlock_sfc(struct intel_engine_cs *engine)
508{
509 struct intel_uncore *uncore = engine->uncore;
510 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
511 struct sfc_lock_data sfc_lock = {};
512
513 if (engine->class != VIDEO_DECODE_CLASS &&
514 engine->class != VIDEO_ENHANCEMENT_CLASS)
515 return;
516
517 if (engine->class == VIDEO_DECODE_CLASS &&
518 (BIT(engine->instance) & vdbox_sfc_access) == 0)
519 return;
520
521 get_sfc_forced_lock_data(engine, &sfc_lock);
522
523 rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
524}
525
526static int __gen11_reset_engines(struct intel_gt *gt,
527 intel_engine_mask_t engine_mask,
528 unsigned int retry)
529{
530 struct intel_engine_cs *engine;
531 intel_engine_mask_t tmp;
532 u32 reset_mask, unlock_mask = 0;
533 int ret;
534
535 if (engine_mask == ALL_ENGINES) {
536 reset_mask = GEN11_GRDOM_FULL;
537 } else {
538 reset_mask = 0;
539 for_each_engine_masked(engine, gt, engine_mask, tmp) {
540 reset_mask |= engine->reset_domain;
541 ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
542 if (ret)
543 goto sfc_unlock;
544 }
545 }
546
547 ret = gen6_hw_domain_reset(gt, reset_mask);
548
549sfc_unlock:
550 /*
551 * We unlock the SFC based on the lock status and not the result of
552 * gen11_lock_sfc to make sure that we clean properly if something
553 * wrong happened during the lock (e.g. lock acquired after timeout
554 * expiration).
555 *
556 * Due to Wa_14010733141, we may have locked an SFC to an engine that
557 * wasn't being reset. So instead of calling gen11_unlock_sfc()
558 * on engine_mask, we instead call it on the mask of engines that our
559 * gen11_lock_sfc() calls told us actually had locks attempted.
560 */
561 for_each_engine_masked(engine, gt, unlock_mask, tmp)
562 gen11_unlock_sfc(engine);
563
564 return ret;
565}
566
567static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
568{
569 struct intel_uncore *uncore = engine->uncore;
570 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
571 u32 request, mask, ack;
572 int ret;
573
574 if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
575 return -ETIMEDOUT;
576
577 ack = intel_uncore_read_fw(uncore, reg);
578 if (ack & RESET_CTL_CAT_ERROR) {
579 /*
580 * For catastrophic errors, ready-for-reset sequence
581 * needs to be bypassed: HAS#396813
582 */
583 request = RESET_CTL_CAT_ERROR;
584 mask = RESET_CTL_CAT_ERROR;
585
586 /* Catastrophic errors need to be cleared by HW */
587 ack = 0;
588 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
589 request = RESET_CTL_REQUEST_RESET;
590 mask = RESET_CTL_READY_TO_RESET;
591 ack = RESET_CTL_READY_TO_RESET;
592 } else {
593 return 0;
594 }
595
596 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
597 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
598 700, 0, NULL);
599 if (ret)
600 drm_err(&engine->i915->drm,
601 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
602 engine->name, request,
603 intel_uncore_read_fw(uncore, reg));
604
605 return ret;
606}
607
608static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
609{
610 intel_uncore_write_fw(engine->uncore,
611 RING_RESET_CTL(engine->mmio_base),
612 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
613}
614
615static int gen8_reset_engines(struct intel_gt *gt,
616 intel_engine_mask_t engine_mask,
617 unsigned int retry)
618{
619 struct intel_engine_cs *engine;
620 const bool reset_non_ready = retry >= 1;
621 intel_engine_mask_t tmp;
622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(>->uncore->lock, flags);
626
627 for_each_engine_masked(engine, gt, engine_mask, tmp) {
628 ret = gen8_engine_reset_prepare(engine);
629 if (ret && !reset_non_ready)
630 goto skip_reset;
631
632 /*
633 * If this is not the first failed attempt to prepare,
634 * we decide to proceed anyway.
635 *
636 * By doing so we risk context corruption and with
637 * some gens (kbl), possible system hang if reset
638 * happens during active bb execution.
639 *
640 * We rather take context corruption instead of
641 * failed reset with a wedged driver/gpu. And
642 * active bb execution case should be covered by
643 * stop_engines() we have before the reset.
644 */
645 }
646
647 /*
648 * Wa_22011100796:dg2, whenever Full soft reset is required,
649 * reset all individual engines firstly, and then do a full soft reset.
650 *
651 * This is best effort, so ignore any error from the initial reset.
652 */
653 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
654 __gen11_reset_engines(gt, gt->info.engine_mask, 0);
655
656 if (GRAPHICS_VER(gt->i915) >= 11)
657 ret = __gen11_reset_engines(gt, engine_mask, retry);
658 else
659 ret = __gen6_reset_engines(gt, engine_mask, retry);
660
661skip_reset:
662 for_each_engine_masked(engine, gt, engine_mask, tmp)
663 gen8_engine_reset_cancel(engine);
664
665 spin_unlock_irqrestore(>->uncore->lock, flags);
666
667 return ret;
668}
669
670static int mock_reset(struct intel_gt *gt,
671 intel_engine_mask_t mask,
672 unsigned int retry)
673{
674 return 0;
675}
676
677typedef int (*reset_func)(struct intel_gt *,
678 intel_engine_mask_t engine_mask,
679 unsigned int retry);
680
681static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
682{
683 struct drm_i915_private *i915 = gt->i915;
684
685 if (is_mock_gt(gt))
686 return mock_reset;
687 else if (GRAPHICS_VER(i915) >= 8)
688 return gen8_reset_engines;
689 else if (GRAPHICS_VER(i915) >= 6)
690 return gen6_reset_engines;
691 else if (GRAPHICS_VER(i915) >= 5)
692 return ilk_do_reset;
693 else if (IS_G4X(i915))
694 return g4x_do_reset;
695 else if (IS_G33(i915) || IS_PINEVIEW(i915))
696 return g33_do_reset;
697 else if (GRAPHICS_VER(i915) >= 3)
698 return i915_do_reset;
699 else
700 return NULL;
701}
702
703int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
704{
705 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
706 reset_func reset;
707 int ret = -ETIMEDOUT;
708 int retry;
709
710 reset = intel_get_gpu_reset(gt);
711 if (!reset)
712 return -ENODEV;
713
714 /*
715 * If the power well sleeps during the reset, the reset
716 * request may be dropped and never completes (causing -EIO).
717 */
718 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
719 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
720 GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
721 preempt_disable();
722 ret = reset(gt, engine_mask, retry);
723 preempt_enable();
724 }
725 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
726
727 return ret;
728}
729
730bool intel_has_gpu_reset(const struct intel_gt *gt)
731{
732 if (!gt->i915->params.reset)
733 return NULL;
734
735 return intel_get_gpu_reset(gt);
736}
737
738bool intel_has_reset_engine(const struct intel_gt *gt)
739{
740 if (gt->i915->params.reset < 2)
741 return false;
742
743 return INTEL_INFO(gt->i915)->has_reset_engine;
744}
745
746int intel_reset_guc(struct intel_gt *gt)
747{
748 u32 guc_domain =
749 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
750 int ret;
751
752 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
753
754 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
755 ret = gen6_hw_domain_reset(gt, guc_domain);
756 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
757
758 return ret;
759}
760
761/*
762 * Ensure irq handler finishes, and not run again.
763 * Also return the active request so that we only search for it once.
764 */
765static void reset_prepare_engine(struct intel_engine_cs *engine)
766{
767 /*
768 * During the reset sequence, we must prevent the engine from
769 * entering RC6. As the context state is undefined until we restart
770 * the engine, if it does enter RC6 during the reset, the state
771 * written to the powercontext is undefined and so we may lose
772 * GPU state upon resume, i.e. fail to restart after a reset.
773 */
774 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
775 if (engine->reset.prepare)
776 engine->reset.prepare(engine);
777}
778
779static void revoke_mmaps(struct intel_gt *gt)
780{
781 int i;
782
783 for (i = 0; i < gt->ggtt->num_fences; i++) {
784 struct drm_vma_offset_node *node;
785 struct i915_vma *vma;
786 u64 vma_offset;
787
788 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
789 if (!vma)
790 continue;
791
792 if (!i915_vma_has_userfault(vma))
793 continue;
794
795 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
796
797 if (!vma->mmo)
798 continue;
799
800 node = &vma->mmo->vma_node;
801 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
802
803 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
804 drm_vma_node_offset_addr(node) + vma_offset,
805 vma->size,
806 1);
807 }
808}
809
810static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
811{
812 struct intel_engine_cs *engine;
813 intel_engine_mask_t awake = 0;
814 enum intel_engine_id id;
815
816 /* For GuC mode, ensure submission is disabled before stopping ring */
817 intel_uc_reset_prepare(>->uc);
818
819 for_each_engine(engine, gt, id) {
820 if (intel_engine_pm_get_if_awake(engine))
821 awake |= engine->mask;
822 reset_prepare_engine(engine);
823 }
824
825 return awake;
826}
827
828static void gt_revoke(struct intel_gt *gt)
829{
830 revoke_mmaps(gt);
831}
832
833static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
834{
835 struct intel_engine_cs *engine;
836 enum intel_engine_id id;
837 int err;
838
839 /*
840 * Everything depends on having the GTT running, so we need to start
841 * there.
842 */
843 err = i915_ggtt_enable_hw(gt->i915);
844 if (err)
845 return err;
846
847 local_bh_disable();
848 for_each_engine(engine, gt, id)
849 __intel_engine_reset(engine, stalled_mask & engine->mask);
850 local_bh_enable();
851
852 intel_uc_reset(>->uc, ALL_ENGINES);
853
854 intel_ggtt_restore_fences(gt->ggtt);
855
856 return err;
857}
858
859static void reset_finish_engine(struct intel_engine_cs *engine)
860{
861 if (engine->reset.finish)
862 engine->reset.finish(engine);
863 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
864
865 intel_engine_signal_breadcrumbs(engine);
866}
867
868static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
869{
870 struct intel_engine_cs *engine;
871 enum intel_engine_id id;
872
873 for_each_engine(engine, gt, id) {
874 reset_finish_engine(engine);
875 if (awake & engine->mask)
876 intel_engine_pm_put(engine);
877 }
878
879 intel_uc_reset_finish(>->uc);
880}
881
882static void nop_submit_request(struct i915_request *request)
883{
884 RQ_TRACE(request, "-EIO\n");
885
886 request = i915_request_mark_eio(request);
887 if (request) {
888 i915_request_submit(request);
889 intel_engine_signal_breadcrumbs(request->engine);
890
891 i915_request_put(request);
892 }
893}
894
895static void __intel_gt_set_wedged(struct intel_gt *gt)
896{
897 struct intel_engine_cs *engine;
898 intel_engine_mask_t awake;
899 enum intel_engine_id id;
900
901 if (test_bit(I915_WEDGED, >->reset.flags))
902 return;
903
904 GT_TRACE(gt, "start\n");
905
906 /*
907 * First, stop submission to hw, but do not yet complete requests by
908 * rolling the global seqno forward (since this would complete requests
909 * for which we haven't set the fence error to EIO yet).
910 */
911 awake = reset_prepare(gt);
912
913 /* Even if the GPU reset fails, it should still stop the engines */
914 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
915 __intel_gt_reset(gt, ALL_ENGINES);
916
917 for_each_engine(engine, gt, id)
918 engine->submit_request = nop_submit_request;
919
920 /*
921 * Make sure no request can slip through without getting completed by
922 * either this call here to intel_engine_write_global_seqno, or the one
923 * in nop_submit_request.
924 */
925 synchronize_rcu_expedited();
926 set_bit(I915_WEDGED, >->reset.flags);
927
928 /* Mark all executing requests as skipped */
929 local_bh_disable();
930 for_each_engine(engine, gt, id)
931 if (engine->reset.cancel)
932 engine->reset.cancel(engine);
933 intel_uc_cancel_requests(>->uc);
934 local_bh_enable();
935
936 reset_finish(gt, awake);
937
938 GT_TRACE(gt, "end\n");
939}
940
941void intel_gt_set_wedged(struct intel_gt *gt)
942{
943 intel_wakeref_t wakeref;
944
945 if (test_bit(I915_WEDGED, >->reset.flags))
946 return;
947
948 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
949 mutex_lock(>->reset.mutex);
950
951 if (GEM_SHOW_DEBUG()) {
952 struct drm_printer p = drm_debug_printer(__func__);
953 struct intel_engine_cs *engine;
954 enum intel_engine_id id;
955
956 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
957 for_each_engine(engine, gt, id) {
958 if (intel_engine_is_idle(engine))
959 continue;
960
961 intel_engine_dump(engine, &p, "%s\n", engine->name);
962 }
963 }
964
965 __intel_gt_set_wedged(gt);
966
967 mutex_unlock(>->reset.mutex);
968 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
969}
970
971static bool __intel_gt_unset_wedged(struct intel_gt *gt)
972{
973 struct intel_gt_timelines *timelines = >->timelines;
974 struct intel_timeline *tl;
975 bool ok;
976
977 if (!test_bit(I915_WEDGED, >->reset.flags))
978 return true;
979
980 /* Never fully initialised, recovery impossible */
981 if (intel_gt_has_unrecoverable_error(gt))
982 return false;
983
984 GT_TRACE(gt, "start\n");
985
986 /*
987 * Before unwedging, make sure that all pending operations
988 * are flushed and errored out - we may have requests waiting upon
989 * third party fences. We marked all inflight requests as EIO, and
990 * every execbuf since returned EIO, for consistency we want all
991 * the currently pending requests to also be marked as EIO, which
992 * is done inside our nop_submit_request - and so we must wait.
993 *
994 * No more can be submitted until we reset the wedged bit.
995 */
996 spin_lock(&timelines->lock);
997 list_for_each_entry(tl, &timelines->active_list, link) {
998 struct dma_fence *fence;
999
1000 fence = i915_active_fence_get(&tl->last_request);
1001 if (!fence)
1002 continue;
1003
1004 spin_unlock(&timelines->lock);
1005
1006 /*
1007 * All internal dependencies (i915_requests) will have
1008 * been flushed by the set-wedge, but we may be stuck waiting
1009 * for external fences. These should all be capped to 10s
1010 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1011 * in the worst case.
1012 */
1013 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1014 dma_fence_put(fence);
1015
1016 /* Restart iteration after droping lock */
1017 spin_lock(&timelines->lock);
1018 tl = list_entry(&timelines->active_list, typeof(*tl), link);
1019 }
1020 spin_unlock(&timelines->lock);
1021
1022 /* We must reset pending GPU events before restoring our submission */
1023 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1024 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1025 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
1026 if (!ok) {
1027 /*
1028 * Warn CI about the unrecoverable wedged condition.
1029 * Time for a reboot.
1030 */
1031 add_taint_for_CI(gt->i915, TAINT_WARN);
1032 return false;
1033 }
1034
1035 /*
1036 * Undo nop_submit_request. We prevent all new i915 requests from
1037 * being queued (by disallowing execbuf whilst wedged) so having
1038 * waited for all active requests above, we know the system is idle
1039 * and do not have to worry about a thread being inside
1040 * engine->submit_request() as we swap over. So unlike installing
1041 * the nop_submit_request on reset, we can do this from normal
1042 * context and do not require stop_machine().
1043 */
1044 intel_engines_reset_default_submission(gt);
1045
1046 GT_TRACE(gt, "end\n");
1047
1048 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1049 clear_bit(I915_WEDGED, >->reset.flags);
1050
1051 return true;
1052}
1053
1054bool intel_gt_unset_wedged(struct intel_gt *gt)
1055{
1056 bool result;
1057
1058 mutex_lock(>->reset.mutex);
1059 result = __intel_gt_unset_wedged(gt);
1060 mutex_unlock(>->reset.mutex);
1061
1062 return result;
1063}
1064
1065static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1066{
1067 int err, i;
1068
1069 err = __intel_gt_reset(gt, ALL_ENGINES);
1070 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1071 msleep(10 * (i + 1));
1072 err = __intel_gt_reset(gt, ALL_ENGINES);
1073 }
1074 if (err)
1075 return err;
1076
1077 return gt_reset(gt, stalled_mask);
1078}
1079
1080static int resume(struct intel_gt *gt)
1081{
1082 struct intel_engine_cs *engine;
1083 enum intel_engine_id id;
1084 int ret;
1085
1086 for_each_engine(engine, gt, id) {
1087 ret = intel_engine_resume(engine);
1088 if (ret)
1089 return ret;
1090 }
1091
1092 return 0;
1093}
1094
1095/**
1096 * intel_gt_reset - reset chip after a hang
1097 * @gt: #intel_gt to reset
1098 * @stalled_mask: mask of the stalled engines with the guilty requests
1099 * @reason: user error message for why we are resetting
1100 *
1101 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1102 * on failure.
1103 *
1104 * Procedure is fairly simple:
1105 * - reset the chip using the reset reg
1106 * - re-init context state
1107 * - re-init hardware status page
1108 * - re-init ring buffer
1109 * - re-init interrupt state
1110 * - re-init display
1111 */
1112void intel_gt_reset(struct intel_gt *gt,
1113 intel_engine_mask_t stalled_mask,
1114 const char *reason)
1115{
1116 intel_engine_mask_t awake;
1117 int ret;
1118
1119 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1120
1121 might_sleep();
1122 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
1123
1124 /*
1125 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1126 * critical section like gpu reset.
1127 */
1128 gt_revoke(gt);
1129
1130 mutex_lock(>->reset.mutex);
1131
1132 /* Clear any previous failed attempts at recovery. Time to try again. */
1133 if (!__intel_gt_unset_wedged(gt))
1134 goto unlock;
1135
1136 if (reason)
1137 drm_notice(>->i915->drm,
1138 "Resetting chip for %s\n", reason);
1139 atomic_inc(>->i915->gpu_error.reset_count);
1140
1141 awake = reset_prepare(gt);
1142
1143 if (!intel_has_gpu_reset(gt)) {
1144 if (gt->i915->params.reset)
1145 drm_err(>->i915->drm, "GPU reset not supported\n");
1146 else
1147 drm_dbg(>->i915->drm, "GPU reset disabled\n");
1148 goto error;
1149 }
1150
1151 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1152 intel_runtime_pm_disable_interrupts(gt->i915);
1153
1154 if (do_reset(gt, stalled_mask)) {
1155 drm_err(>->i915->drm, "Failed to reset chip\n");
1156 goto taint;
1157 }
1158
1159 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1160 intel_runtime_pm_enable_interrupts(gt->i915);
1161
1162 intel_overlay_reset(gt->i915);
1163
1164 /*
1165 * Next we need to restore the context, but we don't use those
1166 * yet either...
1167 *
1168 * Ring buffer needs to be re-initialized in the KMS case, or if X
1169 * was running at the time of the reset (i.e. we weren't VT
1170 * switched away).
1171 */
1172 ret = intel_gt_init_hw(gt);
1173 if (ret) {
1174 drm_err(>->i915->drm,
1175 "Failed to initialise HW following reset (%d)\n",
1176 ret);
1177 goto taint;
1178 }
1179
1180 ret = resume(gt);
1181 if (ret)
1182 goto taint;
1183
1184finish:
1185 reset_finish(gt, awake);
1186unlock:
1187 mutex_unlock(>->reset.mutex);
1188 return;
1189
1190taint:
1191 /*
1192 * History tells us that if we cannot reset the GPU now, we
1193 * never will. This then impacts everything that is run
1194 * subsequently. On failing the reset, we mark the driver
1195 * as wedged, preventing further execution on the GPU.
1196 * We also want to go one step further and add a taint to the
1197 * kernel so that any subsequent faults can be traced back to
1198 * this failure. This is important for CI, where if the
1199 * GPU/driver fails we would like to reboot and restart testing
1200 * rather than continue on into oblivion. For everyone else,
1201 * the system should still plod along, but they have been warned!
1202 */
1203 add_taint_for_CI(gt->i915, TAINT_WARN);
1204error:
1205 __intel_gt_set_wedged(gt);
1206 goto finish;
1207}
1208
1209static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1210{
1211 return __intel_gt_reset(engine->gt, engine->mask);
1212}
1213
1214int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1215{
1216 struct intel_gt *gt = engine->gt;
1217 int ret;
1218
1219 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1220 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1221
1222 if (intel_engine_uses_guc(engine))
1223 return -ENODEV;
1224
1225 if (!intel_engine_pm_get_if_awake(engine))
1226 return 0;
1227
1228 reset_prepare_engine(engine);
1229
1230 if (msg)
1231 drm_notice(&engine->i915->drm,
1232 "Resetting %s for %s\n", engine->name, msg);
1233 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1234
1235 ret = intel_gt_reset_engine(engine);
1236 if (ret) {
1237 /* If we fail here, we expect to fallback to a global reset */
1238 ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1239 goto out;
1240 }
1241
1242 /*
1243 * The request that caused the hang is stuck on elsp, we know the
1244 * active request and can drop it, adjust head to skip the offending
1245 * request to resume executing remaining requests in the queue.
1246 */
1247 __intel_engine_reset(engine, true);
1248
1249 /*
1250 * The engine and its registers (and workarounds in case of render)
1251 * have been reset to their default values. Follow the init_ring
1252 * process to program RING_MODE, HWSP and re-enable submission.
1253 */
1254 ret = intel_engine_resume(engine);
1255
1256out:
1257 intel_engine_cancel_stop_cs(engine);
1258 reset_finish_engine(engine);
1259 intel_engine_pm_put_async(engine);
1260 return ret;
1261}
1262
1263/**
1264 * intel_engine_reset - reset GPU engine to recover from a hang
1265 * @engine: engine to reset
1266 * @msg: reason for GPU reset; or NULL for no drm_notice()
1267 *
1268 * Reset a specific GPU engine. Useful if a hang is detected.
1269 * Returns zero on successful reset or otherwise an error code.
1270 *
1271 * Procedure is:
1272 * - identifies the request that caused the hang and it is dropped
1273 * - reset engine (which will force the engine to idle)
1274 * - re-init/configure engine
1275 */
1276int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1277{
1278 int err;
1279
1280 local_bh_disable();
1281 err = __intel_engine_reset_bh(engine, msg);
1282 local_bh_enable();
1283
1284 return err;
1285}
1286
1287static void intel_gt_reset_global(struct intel_gt *gt,
1288 u32 engine_mask,
1289 const char *reason)
1290{
1291 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1292 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1293 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1294 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1295 struct intel_wedge_me w;
1296
1297 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1298
1299 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1300 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1301
1302 /* Use a watchdog to ensure that our reset completes */
1303 intel_wedge_on_timeout(&w, gt, 60 * HZ) {
1304 intel_display_prepare_reset(gt->i915);
1305
1306 intel_gt_reset(gt, engine_mask, reason);
1307
1308 intel_display_finish_reset(gt->i915);
1309 }
1310
1311 if (!test_bit(I915_WEDGED, >->reset.flags))
1312 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1313}
1314
1315/**
1316 * intel_gt_handle_error - handle a gpu error
1317 * @gt: the intel_gt
1318 * @engine_mask: mask representing engines that are hung
1319 * @flags: control flags
1320 * @fmt: Error message format string
1321 *
1322 * Do some basic checking of register state at error time and
1323 * dump it to the syslog. Also call i915_capture_error_state() to make
1324 * sure we get a record and make it available in debugfs. Fire a uevent
1325 * so userspace knows something bad happened (should trigger collection
1326 * of a ring dump etc.).
1327 */
1328void intel_gt_handle_error(struct intel_gt *gt,
1329 intel_engine_mask_t engine_mask,
1330 unsigned long flags,
1331 const char *fmt, ...)
1332{
1333 struct intel_engine_cs *engine;
1334 intel_wakeref_t wakeref;
1335 intel_engine_mask_t tmp;
1336 char error_msg[80];
1337 char *msg = NULL;
1338
1339 if (fmt) {
1340 va_list args;
1341
1342 va_start(args, fmt);
1343 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1344 va_end(args);
1345
1346 msg = error_msg;
1347 }
1348
1349 /*
1350 * In most cases it's guaranteed that we get here with an RPM
1351 * reference held, for example because there is a pending GPU
1352 * request that won't finish until the reset is done. This
1353 * isn't the case at least when we get here by doing a
1354 * simulated reset via debugfs, so get an RPM reference.
1355 */
1356 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1357
1358 engine_mask &= gt->info.engine_mask;
1359
1360 if (flags & I915_ERROR_CAPTURE) {
1361 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1362 intel_gt_clear_error_registers(gt, engine_mask);
1363 }
1364
1365 /*
1366 * Try engine reset when available. We fall back to full reset if
1367 * single reset fails.
1368 */
1369 if (!intel_uc_uses_guc_submission(>->uc) &&
1370 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1371 local_bh_disable();
1372 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1373 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1374 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1375 >->reset.flags))
1376 continue;
1377
1378 if (__intel_engine_reset_bh(engine, msg) == 0)
1379 engine_mask &= ~engine->mask;
1380
1381 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1382 >->reset.flags);
1383 }
1384 local_bh_enable();
1385 }
1386
1387 if (!engine_mask)
1388 goto out;
1389
1390 /* Full reset needs the mutex, stop any other user trying to do so. */
1391 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1392 wait_event(gt->reset.queue,
1393 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1394 goto out; /* piggy-back on the other reset */
1395 }
1396
1397 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1398 synchronize_rcu_expedited();
1399
1400 /*
1401 * Prevent any other reset-engine attempt. We don't do this for GuC
1402 * submission the GuC owns the per-engine reset, not the i915.
1403 */
1404 if (!intel_uc_uses_guc_submission(>->uc)) {
1405 for_each_engine(engine, gt, tmp) {
1406 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1407 >->reset.flags))
1408 wait_on_bit(>->reset.flags,
1409 I915_RESET_ENGINE + engine->id,
1410 TASK_UNINTERRUPTIBLE);
1411 }
1412 }
1413
1414 /* Flush everyone using a resource about to be clobbered */
1415 synchronize_srcu_expedited(>->reset.backoff_srcu);
1416
1417 intel_gt_reset_global(gt, engine_mask, msg);
1418
1419 if (!intel_uc_uses_guc_submission(>->uc)) {
1420 for_each_engine(engine, gt, tmp)
1421 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1422 >->reset.flags);
1423 }
1424 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1425 smp_mb__after_atomic();
1426 wake_up_all(>->reset.queue);
1427
1428out:
1429 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1430}
1431
1432static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
1433{
1434 might_lock(>->reset.backoff_srcu);
1435 if (retry)
1436 might_sleep();
1437
1438 rcu_read_lock();
1439 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1440 rcu_read_unlock();
1441
1442 if (!retry)
1443 return -EBUSY;
1444
1445 if (wait_event_interruptible(gt->reset.queue,
1446 !test_bit(I915_RESET_BACKOFF,
1447 >->reset.flags)))
1448 return -EINTR;
1449
1450 rcu_read_lock();
1451 }
1452 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1453 rcu_read_unlock();
1454
1455 return 0;
1456}
1457
1458int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1459{
1460 return _intel_gt_reset_lock(gt, srcu, false);
1461}
1462
1463int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
1464{
1465 return _intel_gt_reset_lock(gt, srcu, true);
1466}
1467
1468void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1469__releases(>->reset.backoff_srcu)
1470{
1471 srcu_read_unlock(>->reset.backoff_srcu, tag);
1472}
1473
1474int intel_gt_terminally_wedged(struct intel_gt *gt)
1475{
1476 might_sleep();
1477
1478 if (!intel_gt_is_wedged(gt))
1479 return 0;
1480
1481 if (intel_gt_has_unrecoverable_error(gt))
1482 return -EIO;
1483
1484 /* Reset still in progress? Maybe we will recover? */
1485 if (wait_event_interruptible(gt->reset.queue,
1486 !test_bit(I915_RESET_BACKOFF,
1487 >->reset.flags)))
1488 return -EINTR;
1489
1490 return intel_gt_is_wedged(gt) ? -EIO : 0;
1491}
1492
1493void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1494{
1495 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1496 I915_WEDGED_ON_INIT);
1497 intel_gt_set_wedged(gt);
1498 i915_disable_error_state(gt->i915, -ENODEV);
1499 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1500
1501 /* Wedged on init is non-recoverable */
1502 add_taint_for_CI(gt->i915, TAINT_WARN);
1503}
1504
1505void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1506{
1507 intel_gt_set_wedged(gt);
1508 i915_disable_error_state(gt->i915, -ENODEV);
1509 set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
1510 intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1511}
1512
1513void intel_gt_init_reset(struct intel_gt *gt)
1514{
1515 init_waitqueue_head(>->reset.queue);
1516 mutex_init(>->reset.mutex);
1517 init_srcu_struct(>->reset.backoff_srcu);
1518
1519 /*
1520 * While undesirable to wait inside the shrinker, complain anyway.
1521 *
1522 * If we have to wait during shrinking, we guarantee forward progress
1523 * by forcing the reset. Therefore during the reset we must not
1524 * re-enter the shrinker. By declaring that we take the reset mutex
1525 * within the shrinker, we forbid ourselves from performing any
1526 * fs-reclaim or taking related locks during reset.
1527 */
1528 i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
1529
1530 /* no GPU until we are ready! */
1531 __set_bit(I915_WEDGED, >->reset.flags);
1532}
1533
1534void intel_gt_fini_reset(struct intel_gt *gt)
1535{
1536 cleanup_srcu_struct(>->reset.backoff_srcu);
1537}
1538
1539static void intel_wedge_me(struct work_struct *work)
1540{
1541 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1542
1543 drm_err(&w->gt->i915->drm,
1544 "%s timed out, cancelling all in-flight rendering.\n",
1545 w->name);
1546 intel_gt_set_wedged(w->gt);
1547}
1548
1549void __intel_init_wedge(struct intel_wedge_me *w,
1550 struct intel_gt *gt,
1551 long timeout,
1552 const char *name)
1553{
1554 w->gt = gt;
1555 w->name = name;
1556
1557 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1558 schedule_delayed_work(&w->work, timeout);
1559}
1560
1561void __intel_fini_wedge(struct intel_wedge_me *w)
1562{
1563 cancel_delayed_work_sync(&w->work);
1564 destroy_delayed_work_on_stack(&w->work);
1565 w->gt = NULL;
1566}
1567
1568#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1569#include "selftest_reset.c"
1570#include "selftest_hangcheck.c"
1571#endif