Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_irq.h"
7
8#include <linux/sched/clock.h>
9
10#include <drm/drm_managed.h>
11
12#include "regs/xe_gt_regs.h"
13#include "regs/xe_regs.h"
14#include "xe_device.h"
15#include "xe_display.h"
16#include "xe_drv.h"
17#include "xe_gt.h"
18#include "xe_guc.h"
19#include "xe_hw_engine.h"
20#include "xe_mmio.h"
21
22/*
23 * Interrupt registers for a unit are always consecutive and ordered
24 * ISR, IMR, IIR, IER.
25 */
26#define IMR(offset) XE_REG(offset + 0x4)
27#define IIR(offset) XE_REG(offset + 0x8)
28#define IER(offset) XE_REG(offset + 0xc)
29
30static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
31{
32 u32 val = xe_mmio_read32(mmio, reg);
33
34 if (val == 0)
35 return;
36
37 drm_WARN(>_to_xe(mmio)->drm, 1,
38 "Interrupt register 0x%x is not zero: 0x%08x\n",
39 reg.addr, val);
40 xe_mmio_write32(mmio, reg, 0xffffffff);
41 xe_mmio_read32(mmio, reg);
42 xe_mmio_write32(mmio, reg, 0xffffffff);
43 xe_mmio_read32(mmio, reg);
44}
45
46/*
47 * Unmask and enable the specified interrupts. Does not check current state,
48 * so any bits not specified here will become masked and disabled.
49 */
50static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
51{
52 struct xe_gt *mmio = tile->primary_gt;
53
54 /*
55 * If we're just enabling an interrupt now, it shouldn't already
56 * be raised in the IIR.
57 */
58 assert_iir_is_zero(mmio, IIR(irqregs));
59
60 xe_mmio_write32(mmio, IER(irqregs), bits);
61 xe_mmio_write32(mmio, IMR(irqregs), ~bits);
62
63 /* Posting read */
64 xe_mmio_read32(mmio, IMR(irqregs));
65}
66
67/* Mask and disable all interrupts. */
68static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
69{
70 struct xe_gt *mmio = tile->primary_gt;
71
72 xe_mmio_write32(mmio, IMR(irqregs), ~0);
73 /* Posting read */
74 xe_mmio_read32(mmio, IMR(irqregs));
75
76 xe_mmio_write32(mmio, IER(irqregs), 0);
77
78 /* IIR can theoretically queue up two events. Be paranoid. */
79 xe_mmio_write32(mmio, IIR(irqregs), ~0);
80 xe_mmio_read32(mmio, IIR(irqregs));
81 xe_mmio_write32(mmio, IIR(irqregs), ~0);
82 xe_mmio_read32(mmio, IIR(irqregs));
83}
84
85static u32 xelp_intr_disable(struct xe_device *xe)
86{
87 struct xe_gt *mmio = xe_root_mmio_gt(xe);
88
89 xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
90
91 /*
92 * Now with master disabled, get a sample of level indications
93 * for this interrupt. Indications will be cleared on related acks.
94 * New indications can and will light up during processing,
95 * and will generate new interrupt after enabling master.
96 */
97 return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
98}
99
100static u32
101gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
102{
103 struct xe_gt *mmio = xe_root_mmio_gt(xe);
104 u32 iir;
105
106 if (!(master_ctl & GU_MISC_IRQ))
107 return 0;
108
109 iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
110 if (likely(iir))
111 xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
112
113 return iir;
114}
115
116static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
117{
118 struct xe_gt *mmio = xe_root_mmio_gt(xe);
119
120 xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
121 if (stall)
122 xe_mmio_read32(mmio, GFX_MSTR_IRQ);
123}
124
125/* Enable/unmask the HWE interrupts for a specific GT's engines. */
126void xe_irq_enable_hwe(struct xe_gt *gt)
127{
128 struct xe_device *xe = gt_to_xe(gt);
129 u32 ccs_mask, bcs_mask;
130 u32 irqs, dmask, smask;
131 u32 gsc_mask = 0;
132
133 if (xe_device_uc_enabled(xe)) {
134 irqs = GT_RENDER_USER_INTERRUPT |
135 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
136 } else {
137 irqs = GT_RENDER_USER_INTERRUPT |
138 GT_CS_MASTER_ERROR_INTERRUPT |
139 GT_CONTEXT_SWITCH_INTERRUPT |
140 GT_WAIT_SEMAPHORE_INTERRUPT;
141 }
142
143 ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
144 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
145
146 dmask = irqs << 16 | irqs;
147 smask = irqs << 16;
148
149 if (!xe_gt_is_media_type(gt)) {
150 /* Enable interrupts for each engine class */
151 xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask);
152 if (ccs_mask)
153 xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask);
154
155 /* Unmask interrupts for each engine instance */
156 xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask);
157 xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask);
158 if (bcs_mask & (BIT(1)|BIT(2)))
159 xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
160 if (bcs_mask & (BIT(3)|BIT(4)))
161 xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
162 if (bcs_mask & (BIT(5)|BIT(6)))
163 xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
164 if (bcs_mask & (BIT(7)|BIT(8)))
165 xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
166 if (ccs_mask & (BIT(0)|BIT(1)))
167 xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
168 if (ccs_mask & (BIT(2)|BIT(3)))
169 xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
170 }
171
172 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
173 /* Enable interrupts for each engine class */
174 xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask);
175
176 /* Unmask interrupts for each engine instance */
177 xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask);
178 xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
179 xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
180
181 if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER))
182 gsc_mask = irqs;
183 else if (HAS_HECI_GSCFI(xe))
184 gsc_mask = GSC_IRQ_INTF(1);
185 if (gsc_mask) {
186 xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask);
187 xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
188 }
189 }
190}
191
192static u32
193gt_engine_identity(struct xe_device *xe,
194 struct xe_gt *mmio,
195 const unsigned int bank,
196 const unsigned int bit)
197{
198 u32 timeout_ts;
199 u32 ident;
200
201 lockdep_assert_held(&xe->irq.lock);
202
203 xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
204
205 /*
206 * NB: Specs do not specify how long to spin wait,
207 * so we do ~100us as an educated guess.
208 */
209 timeout_ts = (local_clock() >> 10) + 100;
210 do {
211 ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
212 } while (!(ident & INTR_DATA_VALID) &&
213 !time_after32(local_clock() >> 10, timeout_ts));
214
215 if (unlikely(!(ident & INTR_DATA_VALID))) {
216 drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
217 bank, bit, ident);
218 return 0;
219 }
220
221 xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
222
223 return ident;
224}
225
226#define OTHER_MEDIA_GUC_INSTANCE 16
227
228static void
229gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
230{
231 if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
232 return xe_guc_irq_handler(>->uc.guc, iir);
233 if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
234 return xe_guc_irq_handler(>->uc.guc, iir);
235
236 if (instance != OTHER_GUC_INSTANCE &&
237 instance != OTHER_MEDIA_GUC_INSTANCE) {
238 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
239 instance, iir);
240 }
241}
242
243static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
244 enum xe_engine_class class,
245 unsigned int instance)
246{
247 struct xe_device *xe = tile_to_xe(tile);
248
249 if (MEDIA_VER(xe) < 13)
250 return tile->primary_gt;
251
252 if (class == XE_ENGINE_CLASS_VIDEO_DECODE ||
253 class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
254 return tile->media_gt;
255
256 if (class == XE_ENGINE_CLASS_OTHER &&
257 (instance == OTHER_MEDIA_GUC_INSTANCE || instance == OTHER_GSC_INSTANCE))
258 return tile->media_gt;
259
260 return tile->primary_gt;
261}
262
263static void gt_irq_handler(struct xe_tile *tile,
264 u32 master_ctl, unsigned long *intr_dw,
265 u32 *identity)
266{
267 struct xe_device *xe = tile_to_xe(tile);
268 struct xe_gt *mmio = tile->primary_gt;
269 unsigned int bank, bit;
270 u16 instance, intr_vec;
271 enum xe_engine_class class;
272 struct xe_hw_engine *hwe;
273
274 spin_lock(&xe->irq.lock);
275
276 for (bank = 0; bank < 2; bank++) {
277 if (!(master_ctl & GT_DW_IRQ(bank)))
278 continue;
279
280 intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
281 for_each_set_bit(bit, intr_dw + bank, 32)
282 identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
283 xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
284
285 for_each_set_bit(bit, intr_dw + bank, 32) {
286 struct xe_gt *engine_gt;
287
288 class = INTR_ENGINE_CLASS(identity[bit]);
289 instance = INTR_ENGINE_INSTANCE(identity[bit]);
290 intr_vec = INTR_ENGINE_INTR(identity[bit]);
291
292 engine_gt = pick_engine_gt(tile, class, instance);
293
294 hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
295 if (hwe) {
296 xe_hw_engine_handle_irq(hwe, intr_vec);
297 continue;
298 }
299
300 if (class == XE_ENGINE_CLASS_OTHER) {
301 /* HECI GSCFI interrupts come from outside of GT */
302 if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
303 xe_heci_gsc_irq_handler(xe, intr_vec);
304 else
305 gt_other_irq_handler(engine_gt, instance, intr_vec);
306 continue;
307 }
308 }
309 }
310
311 spin_unlock(&xe->irq.lock);
312}
313
314/*
315 * Top-level interrupt handler for Xe_LP platforms (which did not have
316 * a "master tile" interrupt register.
317 */
318static irqreturn_t xelp_irq_handler(int irq, void *arg)
319{
320 struct xe_device *xe = arg;
321 struct xe_tile *tile = xe_device_get_root_tile(xe);
322 u32 master_ctl, gu_misc_iir;
323 unsigned long intr_dw[2];
324 u32 identity[32];
325
326 spin_lock(&xe->irq.lock);
327 if (!xe->irq.enabled) {
328 spin_unlock(&xe->irq.lock);
329 return IRQ_NONE;
330 }
331 spin_unlock(&xe->irq.lock);
332
333 master_ctl = xelp_intr_disable(xe);
334 if (!master_ctl) {
335 xelp_intr_enable(xe, false);
336 return IRQ_NONE;
337 }
338
339 gt_irq_handler(tile, master_ctl, intr_dw, identity);
340
341 xe_display_irq_handler(xe, master_ctl);
342
343 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
344
345 xelp_intr_enable(xe, false);
346
347 xe_display_irq_enable(xe, gu_misc_iir);
348
349 return IRQ_HANDLED;
350}
351
352static u32 dg1_intr_disable(struct xe_device *xe)
353{
354 struct xe_gt *mmio = xe_root_mmio_gt(xe);
355 u32 val;
356
357 /* First disable interrupts */
358 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
359
360 /* Get the indication levels and ack the master unit */
361 val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
362 if (unlikely(!val))
363 return 0;
364
365 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
366
367 return val;
368}
369
370static void dg1_intr_enable(struct xe_device *xe, bool stall)
371{
372 struct xe_gt *mmio = xe_root_mmio_gt(xe);
373
374 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
375 if (stall)
376 xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
377}
378
379/*
380 * Top-level interrupt handler for Xe_LP+ and beyond. These platforms have
381 * a "master tile" interrupt register which must be consulted before the
382 * "graphics master" interrupt register.
383 */
384static irqreturn_t dg1_irq_handler(int irq, void *arg)
385{
386 struct xe_device *xe = arg;
387 struct xe_tile *tile;
388 u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
389 unsigned long intr_dw[2];
390 u32 identity[32];
391 u8 id;
392
393 /* TODO: This really shouldn't be copied+pasted */
394
395 spin_lock(&xe->irq.lock);
396 if (!xe->irq.enabled) {
397 spin_unlock(&xe->irq.lock);
398 return IRQ_NONE;
399 }
400 spin_unlock(&xe->irq.lock);
401
402 master_tile_ctl = dg1_intr_disable(xe);
403 if (!master_tile_ctl) {
404 dg1_intr_enable(xe, false);
405 return IRQ_NONE;
406 }
407
408 for_each_tile(tile, xe, id) {
409 struct xe_gt *mmio = tile->primary_gt;
410
411 if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
412 continue;
413
414 master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
415
416 /*
417 * We might be in irq handler just when PCIe DPC is initiated
418 * and all MMIO reads will be returned with all 1's. Ignore this
419 * irq as device is inaccessible.
420 */
421 if (master_ctl == REG_GENMASK(31, 0)) {
422 dev_dbg(tile_to_xe(tile)->drm.dev,
423 "Ignore this IRQ as device might be in DPC containment.\n");
424 return IRQ_HANDLED;
425 }
426
427 xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
428
429 gt_irq_handler(tile, master_ctl, intr_dw, identity);
430
431 /*
432 * Display interrupts (including display backlight operations
433 * that get reported as Gunit GSE) would only be hooked up to
434 * the primary tile.
435 */
436 if (id == 0) {
437 xe_display_irq_handler(xe, master_ctl);
438 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
439 }
440 }
441
442 dg1_intr_enable(xe, false);
443 xe_display_irq_enable(xe, gu_misc_iir);
444
445 return IRQ_HANDLED;
446}
447
448static void gt_irq_reset(struct xe_tile *tile)
449{
450 struct xe_gt *mmio = tile->primary_gt;
451
452 u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
453 XE_ENGINE_CLASS_COMPUTE);
454 u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
455 XE_ENGINE_CLASS_COPY);
456
457 /* Disable RCS, BCS, VCS and VECS class engines. */
458 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
459 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
460 if (ccs_mask)
461 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
462
463 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
464 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~0);
465 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~0);
466 if (bcs_mask & (BIT(1)|BIT(2)))
467 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
468 if (bcs_mask & (BIT(3)|BIT(4)))
469 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
470 if (bcs_mask & (BIT(5)|BIT(6)))
471 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
472 if (bcs_mask & (BIT(7)|BIT(8)))
473 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
474 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~0);
475 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~0);
476 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~0);
477 if (ccs_mask & (BIT(0)|BIT(1)))
478 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
479 if (ccs_mask & (BIT(2)|BIT(3)))
480 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
481
482 if ((tile->media_gt &&
483 xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
484 HAS_HECI_GSCFI(tile_to_xe(tile))) {
485 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
486 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
487 }
488
489 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
490 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0);
491 xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0);
492 xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0);
493}
494
495static void xelp_irq_reset(struct xe_tile *tile)
496{
497 xelp_intr_disable(tile_to_xe(tile));
498
499 gt_irq_reset(tile);
500
501 mask_and_disable(tile, PCU_IRQ_OFFSET);
502}
503
504static void dg1_irq_reset(struct xe_tile *tile)
505{
506 if (tile->id == 0)
507 dg1_intr_disable(tile_to_xe(tile));
508
509 gt_irq_reset(tile);
510
511 mask_and_disable(tile, PCU_IRQ_OFFSET);
512}
513
514static void dg1_irq_reset_mstr(struct xe_tile *tile)
515{
516 struct xe_gt *mmio = tile->primary_gt;
517
518 xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
519}
520
521static void xe_irq_reset(struct xe_device *xe)
522{
523 struct xe_tile *tile;
524 u8 id;
525
526 for_each_tile(tile, xe, id) {
527 if (GRAPHICS_VERx100(xe) >= 1210)
528 dg1_irq_reset(tile);
529 else
530 xelp_irq_reset(tile);
531 }
532
533 tile = xe_device_get_root_tile(xe);
534 mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
535 xe_display_irq_reset(xe);
536
537 /*
538 * The tile's top-level status register should be the last one
539 * to be reset to avoid possible bit re-latching from lower
540 * level interrupts.
541 */
542 if (GRAPHICS_VERx100(xe) >= 1210) {
543 for_each_tile(tile, xe, id)
544 dg1_irq_reset_mstr(tile);
545 }
546}
547
548static void xe_irq_postinstall(struct xe_device *xe)
549{
550 xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
551
552 /*
553 * ASLE backlight operations are reported via GUnit GSE interrupts
554 * on the root tile.
555 */
556 unmask_and_enable(xe_device_get_root_tile(xe),
557 GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
558
559 /* Enable top-level interrupts */
560 if (GRAPHICS_VERx100(xe) >= 1210)
561 dg1_intr_enable(xe, true);
562 else
563 xelp_intr_enable(xe, true);
564}
565
566static irq_handler_t xe_irq_handler(struct xe_device *xe)
567{
568 if (GRAPHICS_VERx100(xe) >= 1210)
569 return dg1_irq_handler;
570 else
571 return xelp_irq_handler;
572}
573
574static void irq_uninstall(struct drm_device *drm, void *arg)
575{
576 struct xe_device *xe = arg;
577 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
578 int irq;
579
580 if (!xe->irq.enabled)
581 return;
582
583 xe->irq.enabled = false;
584 xe_irq_reset(xe);
585
586 irq = pci_irq_vector(pdev, 0);
587 free_irq(irq, xe);
588}
589
590int xe_irq_install(struct xe_device *xe)
591{
592 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
593 irq_handler_t irq_handler;
594 int err, irq;
595
596 irq_handler = xe_irq_handler(xe);
597 if (!irq_handler) {
598 drm_err(&xe->drm, "No supported interrupt handler");
599 return -EINVAL;
600 }
601
602 xe_irq_reset(xe);
603
604 err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
605 if (err < 0) {
606 drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
607 return err;
608 }
609
610 irq = pci_irq_vector(pdev, 0);
611 err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
612 if (err < 0) {
613 drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
614 return err;
615 }
616
617 xe->irq.enabled = true;
618
619 xe_irq_postinstall(xe);
620
621 err = drmm_add_action_or_reset(&xe->drm, irq_uninstall, xe);
622 if (err)
623 goto free_irq_handler;
624
625 return 0;
626
627free_irq_handler:
628 free_irq(irq, xe);
629
630 return err;
631}
632
633void xe_irq_shutdown(struct xe_device *xe)
634{
635 irq_uninstall(&xe->drm, xe);
636}
637
638void xe_irq_suspend(struct xe_device *xe)
639{
640 int irq = to_pci_dev(xe->drm.dev)->irq;
641
642 spin_lock_irq(&xe->irq.lock);
643 xe->irq.enabled = false; /* no new irqs */
644 spin_unlock_irq(&xe->irq.lock);
645
646 synchronize_irq(irq); /* flush irqs */
647 xe_irq_reset(xe); /* turn irqs off */
648}
649
650void xe_irq_resume(struct xe_device *xe)
651{
652 struct xe_gt *gt;
653 int id;
654
655 /*
656 * lock not needed:
657 * 1. no irq will arrive before the postinstall
658 * 2. display is not yet resumed
659 */
660 xe->irq.enabled = true;
661 xe_irq_reset(xe);
662 xe_irq_postinstall(xe); /* turn irqs on */
663
664 for_each_gt(gt, xe, id)
665 xe_irq_enable_hwe(gt);
666}