Loading...
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
35#include <drm/drm_drv.h>
36
37#include "display/intel_de.h"
38#include "display/intel_display_types.h"
39#include "display/intel_fifo_underrun.h"
40#include "display/intel_hotplug.h"
41#include "display/intel_lpe_audio.h"
42#include "display/intel_psr.h"
43
44#include "gt/intel_breadcrumbs.h"
45#include "gt/intel_gt.h"
46#include "gt/intel_gt_irq.h"
47#include "gt/intel_gt_pm_irq.h"
48#include "gt/intel_rps.h"
49
50#include "i915_drv.h"
51#include "i915_irq.h"
52#include "i915_trace.h"
53#include "intel_pm.h"
54
55/**
56 * DOC: interrupt handling
57 *
58 * These functions provide the basic support for enabling and disabling the
59 * interrupt handling support. There's a lot more functionality in i915_irq.c
60 * and related files, but that will be described in separate chapters.
61 */
62
63/*
64 * Interrupt statistic for PMU. Increments the counter only if the
65 * interrupt originated from the the GPU so interrupts from a device which
66 * shares the interrupt line are not accounted.
67 */
68static inline void pmu_irq_stats(struct drm_i915_private *i915,
69 irqreturn_t res)
70{
71 if (unlikely(res != IRQ_HANDLED))
72 return;
73
74 /*
75 * A clever compiler translates that into INC. A not so clever one
76 * should at least prevent store tearing.
77 */
78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
79}
80
81typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
82typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
83 enum hpd_pin pin);
84
85static const u32 hpd_ilk[HPD_NUM_PINS] = {
86 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
87};
88
89static const u32 hpd_ivb[HPD_NUM_PINS] = {
90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
91};
92
93static const u32 hpd_bdw[HPD_NUM_PINS] = {
94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
95};
96
97static const u32 hpd_ibx[HPD_NUM_PINS] = {
98 [HPD_CRT] = SDE_CRT_HOTPLUG,
99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
103};
104
105static const u32 hpd_cpt[HPD_NUM_PINS] = {
106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
111};
112
113static const u32 hpd_spt[HPD_NUM_PINS] = {
114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
119};
120
121static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
122 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
128};
129
130static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
137};
138
139static const u32 hpd_status_i915[HPD_NUM_PINS] = {
140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
146};
147
148static const u32 hpd_bxt[HPD_NUM_PINS] = {
149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
152};
153
154static const u32 hpd_gen11[HPD_NUM_PINS] = {
155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
161};
162
163static const u32 hpd_icp[HPD_NUM_PINS] = {
164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
173};
174
175static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
180};
181
182static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
183{
184 struct i915_hotplug *hpd = &dev_priv->hotplug;
185
186 if (HAS_GMCH(dev_priv)) {
187 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
188 IS_CHERRYVIEW(dev_priv))
189 hpd->hpd = hpd_status_g4x;
190 else
191 hpd->hpd = hpd_status_i915;
192 return;
193 }
194
195 if (DISPLAY_VER(dev_priv) >= 11)
196 hpd->hpd = hpd_gen11;
197 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
198 hpd->hpd = hpd_bxt;
199 else if (DISPLAY_VER(dev_priv) >= 8)
200 hpd->hpd = hpd_bdw;
201 else if (DISPLAY_VER(dev_priv) >= 7)
202 hpd->hpd = hpd_ivb;
203 else
204 hpd->hpd = hpd_ilk;
205
206 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
207 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
208 return;
209
210 if (HAS_PCH_DG1(dev_priv))
211 hpd->pch_hpd = hpd_sde_dg1;
212 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
213 hpd->pch_hpd = hpd_icp;
214 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
215 hpd->pch_hpd = hpd_spt;
216 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
217 hpd->pch_hpd = hpd_cpt;
218 else if (HAS_PCH_IBX(dev_priv))
219 hpd->pch_hpd = hpd_ibx;
220 else
221 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
222}
223
224static void
225intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
226{
227 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
228
229 drm_crtc_handle_vblank(&crtc->base);
230}
231
232void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
233 i915_reg_t iir, i915_reg_t ier)
234{
235 intel_uncore_write(uncore, imr, 0xffffffff);
236 intel_uncore_posting_read(uncore, imr);
237
238 intel_uncore_write(uncore, ier, 0);
239
240 /* IIR can theoretically queue up two events. Be paranoid. */
241 intel_uncore_write(uncore, iir, 0xffffffff);
242 intel_uncore_posting_read(uncore, iir);
243 intel_uncore_write(uncore, iir, 0xffffffff);
244 intel_uncore_posting_read(uncore, iir);
245}
246
247void gen2_irq_reset(struct intel_uncore *uncore)
248{
249 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
250 intel_uncore_posting_read16(uncore, GEN2_IMR);
251
252 intel_uncore_write16(uncore, GEN2_IER, 0);
253
254 /* IIR can theoretically queue up two events. Be paranoid. */
255 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
256 intel_uncore_posting_read16(uncore, GEN2_IIR);
257 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
258 intel_uncore_posting_read16(uncore, GEN2_IIR);
259}
260
261/*
262 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
263 */
264static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
265{
266 u32 val = intel_uncore_read(uncore, reg);
267
268 if (val == 0)
269 return;
270
271 drm_WARN(&uncore->i915->drm, 1,
272 "Interrupt register 0x%x is not zero: 0x%08x\n",
273 i915_mmio_reg_offset(reg), val);
274 intel_uncore_write(uncore, reg, 0xffffffff);
275 intel_uncore_posting_read(uncore, reg);
276 intel_uncore_write(uncore, reg, 0xffffffff);
277 intel_uncore_posting_read(uncore, reg);
278}
279
280static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
281{
282 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
283
284 if (val == 0)
285 return;
286
287 drm_WARN(&uncore->i915->drm, 1,
288 "Interrupt register 0x%x is not zero: 0x%08x\n",
289 i915_mmio_reg_offset(GEN2_IIR), val);
290 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
291 intel_uncore_posting_read16(uncore, GEN2_IIR);
292 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
293 intel_uncore_posting_read16(uncore, GEN2_IIR);
294}
295
296void gen3_irq_init(struct intel_uncore *uncore,
297 i915_reg_t imr, u32 imr_val,
298 i915_reg_t ier, u32 ier_val,
299 i915_reg_t iir)
300{
301 gen3_assert_iir_is_zero(uncore, iir);
302
303 intel_uncore_write(uncore, ier, ier_val);
304 intel_uncore_write(uncore, imr, imr_val);
305 intel_uncore_posting_read(uncore, imr);
306}
307
308void gen2_irq_init(struct intel_uncore *uncore,
309 u32 imr_val, u32 ier_val)
310{
311 gen2_assert_iir_is_zero(uncore);
312
313 intel_uncore_write16(uncore, GEN2_IER, ier_val);
314 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
315 intel_uncore_posting_read16(uncore, GEN2_IMR);
316}
317
318/* For display hotplug interrupt */
319static inline void
320i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
321 u32 mask,
322 u32 bits)
323{
324 u32 val;
325
326 lockdep_assert_held(&dev_priv->irq_lock);
327 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
328
329 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
330 val &= ~mask;
331 val |= bits;
332 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
333}
334
335/**
336 * i915_hotplug_interrupt_update - update hotplug interrupt enable
337 * @dev_priv: driver private
338 * @mask: bits to update
339 * @bits: bits to enable
340 * NOTE: the HPD enable bits are modified both inside and outside
341 * of an interrupt context. To avoid that read-modify-write cycles
342 * interfer, these bits are protected by a spinlock. Since this
343 * function is usually not called from a context where the lock is
344 * held already, this function acquires the lock itself. A non-locking
345 * version is also available.
346 */
347void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
348 u32 mask,
349 u32 bits)
350{
351 spin_lock_irq(&dev_priv->irq_lock);
352 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
353 spin_unlock_irq(&dev_priv->irq_lock);
354}
355
356/**
357 * ilk_update_display_irq - update DEIMR
358 * @dev_priv: driver private
359 * @interrupt_mask: mask of interrupt bits to update
360 * @enabled_irq_mask: mask of interrupt bits to enable
361 */
362void ilk_update_display_irq(struct drm_i915_private *dev_priv,
363 u32 interrupt_mask,
364 u32 enabled_irq_mask)
365{
366 u32 new_val;
367
368 lockdep_assert_held(&dev_priv->irq_lock);
369 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
370
371 new_val = dev_priv->irq_mask;
372 new_val &= ~interrupt_mask;
373 new_val |= (~enabled_irq_mask & interrupt_mask);
374
375 if (new_val != dev_priv->irq_mask &&
376 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
377 dev_priv->irq_mask = new_val;
378 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
379 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
380 }
381}
382
383/**
384 * bdw_update_port_irq - update DE port interrupt
385 * @dev_priv: driver private
386 * @interrupt_mask: mask of interrupt bits to update
387 * @enabled_irq_mask: mask of interrupt bits to enable
388 */
389static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
390 u32 interrupt_mask,
391 u32 enabled_irq_mask)
392{
393 u32 new_val;
394 u32 old_val;
395
396 lockdep_assert_held(&dev_priv->irq_lock);
397
398 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
399
400 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
401 return;
402
403 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
404
405 new_val = old_val;
406 new_val &= ~interrupt_mask;
407 new_val |= (~enabled_irq_mask & interrupt_mask);
408
409 if (new_val != old_val) {
410 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
411 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
412 }
413}
414
415/**
416 * bdw_update_pipe_irq - update DE pipe interrupt
417 * @dev_priv: driver private
418 * @pipe: pipe whose interrupt to update
419 * @interrupt_mask: mask of interrupt bits to update
420 * @enabled_irq_mask: mask of interrupt bits to enable
421 */
422void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
423 enum pipe pipe,
424 u32 interrupt_mask,
425 u32 enabled_irq_mask)
426{
427 u32 new_val;
428
429 lockdep_assert_held(&dev_priv->irq_lock);
430
431 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
432
433 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
434 return;
435
436 new_val = dev_priv->de_irq_mask[pipe];
437 new_val &= ~interrupt_mask;
438 new_val |= (~enabled_irq_mask & interrupt_mask);
439
440 if (new_val != dev_priv->de_irq_mask[pipe]) {
441 dev_priv->de_irq_mask[pipe] = new_val;
442 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
443 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
444 }
445}
446
447/**
448 * ibx_display_interrupt_update - update SDEIMR
449 * @dev_priv: driver private
450 * @interrupt_mask: mask of interrupt bits to update
451 * @enabled_irq_mask: mask of interrupt bits to enable
452 */
453void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
454 u32 interrupt_mask,
455 u32 enabled_irq_mask)
456{
457 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
458 sdeimr &= ~interrupt_mask;
459 sdeimr |= (~enabled_irq_mask & interrupt_mask);
460
461 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
462
463 lockdep_assert_held(&dev_priv->irq_lock);
464
465 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
466 return;
467
468 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
469 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
470}
471
472u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
473 enum pipe pipe)
474{
475 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
476 u32 enable_mask = status_mask << 16;
477
478 lockdep_assert_held(&dev_priv->irq_lock);
479
480 if (DISPLAY_VER(dev_priv) < 5)
481 goto out;
482
483 /*
484 * On pipe A we don't support the PSR interrupt yet,
485 * on pipe B and C the same bit MBZ.
486 */
487 if (drm_WARN_ON_ONCE(&dev_priv->drm,
488 status_mask & PIPE_A_PSR_STATUS_VLV))
489 return 0;
490 /*
491 * On pipe B and C we don't support the PSR interrupt yet, on pipe
492 * A the same bit is for perf counters which we don't use either.
493 */
494 if (drm_WARN_ON_ONCE(&dev_priv->drm,
495 status_mask & PIPE_B_PSR_STATUS_VLV))
496 return 0;
497
498 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
499 SPRITE0_FLIP_DONE_INT_EN_VLV |
500 SPRITE1_FLIP_DONE_INT_EN_VLV);
501 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
502 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
503 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
504 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
505
506out:
507 drm_WARN_ONCE(&dev_priv->drm,
508 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
509 status_mask & ~PIPESTAT_INT_STATUS_MASK,
510 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
511 pipe_name(pipe), enable_mask, status_mask);
512
513 return enable_mask;
514}
515
516void i915_enable_pipestat(struct drm_i915_private *dev_priv,
517 enum pipe pipe, u32 status_mask)
518{
519 i915_reg_t reg = PIPESTAT(pipe);
520 u32 enable_mask;
521
522 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
523 "pipe %c: status_mask=0x%x\n",
524 pipe_name(pipe), status_mask);
525
526 lockdep_assert_held(&dev_priv->irq_lock);
527 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
528
529 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
530 return;
531
532 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
533 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
534
535 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
536 intel_uncore_posting_read(&dev_priv->uncore, reg);
537}
538
539void i915_disable_pipestat(struct drm_i915_private *dev_priv,
540 enum pipe pipe, u32 status_mask)
541{
542 i915_reg_t reg = PIPESTAT(pipe);
543 u32 enable_mask;
544
545 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
546 "pipe %c: status_mask=0x%x\n",
547 pipe_name(pipe), status_mask);
548
549 lockdep_assert_held(&dev_priv->irq_lock);
550 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
551
552 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
553 return;
554
555 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
556 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
557
558 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
559 intel_uncore_posting_read(&dev_priv->uncore, reg);
560}
561
562static bool i915_has_asle(struct drm_i915_private *dev_priv)
563{
564 if (!dev_priv->opregion.asle)
565 return false;
566
567 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
568}
569
570/**
571 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
572 * @dev_priv: i915 device private
573 */
574static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
575{
576 if (!i915_has_asle(dev_priv))
577 return;
578
579 spin_lock_irq(&dev_priv->irq_lock);
580
581 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
582 if (DISPLAY_VER(dev_priv) >= 4)
583 i915_enable_pipestat(dev_priv, PIPE_A,
584 PIPE_LEGACY_BLC_EVENT_STATUS);
585
586 spin_unlock_irq(&dev_priv->irq_lock);
587}
588
589/*
590 * This timing diagram depicts the video signal in and
591 * around the vertical blanking period.
592 *
593 * Assumptions about the fictitious mode used in this example:
594 * vblank_start >= 3
595 * vsync_start = vblank_start + 1
596 * vsync_end = vblank_start + 2
597 * vtotal = vblank_start + 3
598 *
599 * start of vblank:
600 * latch double buffered registers
601 * increment frame counter (ctg+)
602 * generate start of vblank interrupt (gen4+)
603 * |
604 * | frame start:
605 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
606 * | may be shifted forward 1-3 extra lines via PIPECONF
607 * | |
608 * | | start of vsync:
609 * | | generate vsync interrupt
610 * | | |
611 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
612 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
613 * ----va---> <-----------------vb--------------------> <--------va-------------
614 * | | <----vs-----> |
615 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
616 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
617 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
618 * | | |
619 * last visible pixel first visible pixel
620 * | increment frame counter (gen3/4)
621 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
622 *
623 * x = horizontal active
624 * _ = horizontal blanking
625 * hs = horizontal sync
626 * va = vertical active
627 * vb = vertical blanking
628 * vs = vertical sync
629 * vbs = vblank_start (number)
630 *
631 * Summary:
632 * - most events happen at the start of horizontal sync
633 * - frame start happens at the start of horizontal blank, 1-4 lines
634 * (depending on PIPECONF settings) after the start of vblank
635 * - gen3/4 pixel and frame counter are synchronized with the start
636 * of horizontal active on the first line of vertical active
637 */
638
639/* Called from drm generic code, passed a 'crtc', which
640 * we use as a pipe index
641 */
642u32 i915_get_vblank_counter(struct drm_crtc *crtc)
643{
644 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
645 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
646 const struct drm_display_mode *mode = &vblank->hwmode;
647 enum pipe pipe = to_intel_crtc(crtc)->pipe;
648 i915_reg_t high_frame, low_frame;
649 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
650 unsigned long irqflags;
651
652 /*
653 * On i965gm TV output the frame counter only works up to
654 * the point when we enable the TV encoder. After that the
655 * frame counter ceases to work and reads zero. We need a
656 * vblank wait before enabling the TV encoder and so we
657 * have to enable vblank interrupts while the frame counter
658 * is still in a working state. However the core vblank code
659 * does not like us returning non-zero frame counter values
660 * when we've told it that we don't have a working frame
661 * counter. Thus we must stop non-zero values leaking out.
662 */
663 if (!vblank->max_vblank_count)
664 return 0;
665
666 htotal = mode->crtc_htotal;
667 hsync_start = mode->crtc_hsync_start;
668 vbl_start = mode->crtc_vblank_start;
669 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
670 vbl_start = DIV_ROUND_UP(vbl_start, 2);
671
672 /* Convert to pixel count */
673 vbl_start *= htotal;
674
675 /* Start of vblank event occurs at start of hsync */
676 vbl_start -= htotal - hsync_start;
677
678 high_frame = PIPEFRAME(pipe);
679 low_frame = PIPEFRAMEPIXEL(pipe);
680
681 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
682
683 /*
684 * High & low register fields aren't synchronized, so make sure
685 * we get a low value that's stable across two reads of the high
686 * register.
687 */
688 do {
689 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
690 low = intel_de_read_fw(dev_priv, low_frame);
691 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
692 } while (high1 != high2);
693
694 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
695
696 high1 >>= PIPE_FRAME_HIGH_SHIFT;
697 pixel = low & PIPE_PIXEL_MASK;
698 low >>= PIPE_FRAME_LOW_SHIFT;
699
700 /*
701 * The frame counter increments at beginning of active.
702 * Cook up a vblank counter by also checking the pixel
703 * counter against vblank start.
704 */
705 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
706}
707
708u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
709{
710 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
711 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
712 enum pipe pipe = to_intel_crtc(crtc)->pipe;
713
714 if (!vblank->max_vblank_count)
715 return 0;
716
717 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
718}
719
720static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
721{
722 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
723 struct drm_vblank_crtc *vblank =
724 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
725 const struct drm_display_mode *mode = &vblank->hwmode;
726 u32 htotal = mode->crtc_htotal;
727 u32 clock = mode->crtc_clock;
728 u32 scan_prev_time, scan_curr_time, scan_post_time;
729
730 /*
731 * To avoid the race condition where we might cross into the
732 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
733 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
734 * during the same frame.
735 */
736 do {
737 /*
738 * This field provides read back of the display
739 * pipe frame time stamp. The time stamp value
740 * is sampled at every start of vertical blank.
741 */
742 scan_prev_time = intel_de_read_fw(dev_priv,
743 PIPE_FRMTMSTMP(crtc->pipe));
744
745 /*
746 * The TIMESTAMP_CTR register has the current
747 * time stamp value.
748 */
749 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
750
751 scan_post_time = intel_de_read_fw(dev_priv,
752 PIPE_FRMTMSTMP(crtc->pipe));
753 } while (scan_post_time != scan_prev_time);
754
755 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
756 clock), 1000 * htotal);
757}
758
759/*
760 * On certain encoders on certain platforms, pipe
761 * scanline register will not work to get the scanline,
762 * since the timings are driven from the PORT or issues
763 * with scanline register updates.
764 * This function will use Framestamp and current
765 * timestamp registers to calculate the scanline.
766 */
767static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
768{
769 struct drm_vblank_crtc *vblank =
770 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
771 const struct drm_display_mode *mode = &vblank->hwmode;
772 u32 vblank_start = mode->crtc_vblank_start;
773 u32 vtotal = mode->crtc_vtotal;
774 u32 scanline;
775
776 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
777 scanline = min(scanline, vtotal - 1);
778 scanline = (scanline + vblank_start) % vtotal;
779
780 return scanline;
781}
782
783/*
784 * intel_de_read_fw(), only for fast reads of display block, no need for
785 * forcewake etc.
786 */
787static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
788{
789 struct drm_device *dev = crtc->base.dev;
790 struct drm_i915_private *dev_priv = to_i915(dev);
791 const struct drm_display_mode *mode;
792 struct drm_vblank_crtc *vblank;
793 enum pipe pipe = crtc->pipe;
794 int position, vtotal;
795
796 if (!crtc->active)
797 return 0;
798
799 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
800 mode = &vblank->hwmode;
801
802 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
803 return __intel_get_crtc_scanline_from_timestamp(crtc);
804
805 vtotal = mode->crtc_vtotal;
806 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
807 vtotal /= 2;
808
809 if (DISPLAY_VER(dev_priv) == 2)
810 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
811 else
812 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
813
814 /*
815 * On HSW, the DSL reg (0x70000) appears to return 0 if we
816 * read it just before the start of vblank. So try it again
817 * so we don't accidentally end up spanning a vblank frame
818 * increment, causing the pipe_update_end() code to squak at us.
819 *
820 * The nature of this problem means we can't simply check the ISR
821 * bit and return the vblank start value; nor can we use the scanline
822 * debug register in the transcoder as it appears to have the same
823 * problem. We may need to extend this to include other platforms,
824 * but so far testing only shows the problem on HSW.
825 */
826 if (HAS_DDI(dev_priv) && !position) {
827 int i, temp;
828
829 for (i = 0; i < 100; i++) {
830 udelay(1);
831 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
832 if (temp != position) {
833 position = temp;
834 break;
835 }
836 }
837 }
838
839 /*
840 * See update_scanline_offset() for the details on the
841 * scanline_offset adjustment.
842 */
843 return (position + crtc->scanline_offset) % vtotal;
844}
845
846static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
847 bool in_vblank_irq,
848 int *vpos, int *hpos,
849 ktime_t *stime, ktime_t *etime,
850 const struct drm_display_mode *mode)
851{
852 struct drm_device *dev = _crtc->dev;
853 struct drm_i915_private *dev_priv = to_i915(dev);
854 struct intel_crtc *crtc = to_intel_crtc(_crtc);
855 enum pipe pipe = crtc->pipe;
856 int position;
857 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
858 unsigned long irqflags;
859 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
860 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
861 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
862
863 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
864 drm_dbg(&dev_priv->drm,
865 "trying to get scanoutpos for disabled "
866 "pipe %c\n", pipe_name(pipe));
867 return false;
868 }
869
870 htotal = mode->crtc_htotal;
871 hsync_start = mode->crtc_hsync_start;
872 vtotal = mode->crtc_vtotal;
873 vbl_start = mode->crtc_vblank_start;
874 vbl_end = mode->crtc_vblank_end;
875
876 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
877 vbl_start = DIV_ROUND_UP(vbl_start, 2);
878 vbl_end /= 2;
879 vtotal /= 2;
880 }
881
882 /*
883 * Lock uncore.lock, as we will do multiple timing critical raw
884 * register reads, potentially with preemption disabled, so the
885 * following code must not block on uncore.lock.
886 */
887 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
888
889 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
890
891 /* Get optional system timestamp before query. */
892 if (stime)
893 *stime = ktime_get();
894
895 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
896 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
897
898 position = __intel_get_crtc_scanline(crtc);
899
900 /*
901 * Already exiting vblank? If so, shift our position
902 * so it looks like we're already apporaching the full
903 * vblank end. This should make the generated timestamp
904 * more or less match when the active portion will start.
905 */
906 if (position >= vbl_start && scanlines < position)
907 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
908 } else if (use_scanline_counter) {
909 /* No obvious pixelcount register. Only query vertical
910 * scanout position from Display scan line register.
911 */
912 position = __intel_get_crtc_scanline(crtc);
913 } else {
914 /* Have access to pixelcount since start of frame.
915 * We can split this into vertical and horizontal
916 * scanout position.
917 */
918 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
919
920 /* convert to pixel counts */
921 vbl_start *= htotal;
922 vbl_end *= htotal;
923 vtotal *= htotal;
924
925 /*
926 * In interlaced modes, the pixel counter counts all pixels,
927 * so one field will have htotal more pixels. In order to avoid
928 * the reported position from jumping backwards when the pixel
929 * counter is beyond the length of the shorter field, just
930 * clamp the position the length of the shorter field. This
931 * matches how the scanline counter based position works since
932 * the scanline counter doesn't count the two half lines.
933 */
934 if (position >= vtotal)
935 position = vtotal - 1;
936
937 /*
938 * Start of vblank interrupt is triggered at start of hsync,
939 * just prior to the first active line of vblank. However we
940 * consider lines to start at the leading edge of horizontal
941 * active. So, should we get here before we've crossed into
942 * the horizontal active of the first line in vblank, we would
943 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
944 * always add htotal-hsync_start to the current pixel position.
945 */
946 position = (position + htotal - hsync_start) % vtotal;
947 }
948
949 /* Get optional system timestamp after query. */
950 if (etime)
951 *etime = ktime_get();
952
953 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
954
955 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
956
957 /*
958 * While in vblank, position will be negative
959 * counting up towards 0 at vbl_end. And outside
960 * vblank, position will be positive counting
961 * up since vbl_end.
962 */
963 if (position >= vbl_start)
964 position -= vbl_end;
965 else
966 position += vtotal - vbl_end;
967
968 if (use_scanline_counter) {
969 *vpos = position;
970 *hpos = 0;
971 } else {
972 *vpos = position / htotal;
973 *hpos = position - (*vpos * htotal);
974 }
975
976 return true;
977}
978
979bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
980 ktime_t *vblank_time, bool in_vblank_irq)
981{
982 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
983 crtc, max_error, vblank_time, in_vblank_irq,
984 i915_get_crtc_scanoutpos);
985}
986
987int intel_get_crtc_scanline(struct intel_crtc *crtc)
988{
989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
990 unsigned long irqflags;
991 int position;
992
993 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
994 position = __intel_get_crtc_scanline(crtc);
995 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
996
997 return position;
998}
999
1000/**
1001 * ivb_parity_work - Workqueue called when a parity error interrupt
1002 * occurred.
1003 * @work: workqueue struct
1004 *
1005 * Doesn't actually do anything except notify userspace. As a consequence of
1006 * this event, userspace should try to remap the bad rows since statistically
1007 * it is likely the same row is more likely to go bad again.
1008 */
1009static void ivb_parity_work(struct work_struct *work)
1010{
1011 struct drm_i915_private *dev_priv =
1012 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1013 struct intel_gt *gt = &dev_priv->gt;
1014 u32 error_status, row, bank, subbank;
1015 char *parity_event[6];
1016 u32 misccpctl;
1017 u8 slice = 0;
1018
1019 /* We must turn off DOP level clock gating to access the L3 registers.
1020 * In order to prevent a get/put style interface, acquire struct mutex
1021 * any time we access those registers.
1022 */
1023 mutex_lock(&dev_priv->drm.struct_mutex);
1024
1025 /* If we've screwed up tracking, just let the interrupt fire again */
1026 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1027 goto out;
1028
1029 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1030 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1031 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1032
1033 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1034 i915_reg_t reg;
1035
1036 slice--;
1037 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1038 slice >= NUM_L3_SLICES(dev_priv)))
1039 break;
1040
1041 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1042
1043 reg = GEN7_L3CDERRST1(slice);
1044
1045 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1046 row = GEN7_PARITY_ERROR_ROW(error_status);
1047 bank = GEN7_PARITY_ERROR_BANK(error_status);
1048 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1049
1050 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1051 intel_uncore_posting_read(&dev_priv->uncore, reg);
1052
1053 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1054 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1055 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1056 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1057 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1058 parity_event[5] = NULL;
1059
1060 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1061 KOBJ_CHANGE, parity_event);
1062
1063 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1064 slice, row, bank, subbank);
1065
1066 kfree(parity_event[4]);
1067 kfree(parity_event[3]);
1068 kfree(parity_event[2]);
1069 kfree(parity_event[1]);
1070 }
1071
1072 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1073
1074out:
1075 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1076 spin_lock_irq(>->irq_lock);
1077 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1078 spin_unlock_irq(>->irq_lock);
1079
1080 mutex_unlock(&dev_priv->drm.struct_mutex);
1081}
1082
1083static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1084{
1085 switch (pin) {
1086 case HPD_PORT_TC1:
1087 case HPD_PORT_TC2:
1088 case HPD_PORT_TC3:
1089 case HPD_PORT_TC4:
1090 case HPD_PORT_TC5:
1091 case HPD_PORT_TC6:
1092 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1093 default:
1094 return false;
1095 }
1096}
1097
1098static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1099{
1100 switch (pin) {
1101 case HPD_PORT_A:
1102 return val & PORTA_HOTPLUG_LONG_DETECT;
1103 case HPD_PORT_B:
1104 return val & PORTB_HOTPLUG_LONG_DETECT;
1105 case HPD_PORT_C:
1106 return val & PORTC_HOTPLUG_LONG_DETECT;
1107 default:
1108 return false;
1109 }
1110}
1111
1112static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1113{
1114 switch (pin) {
1115 case HPD_PORT_A:
1116 case HPD_PORT_B:
1117 case HPD_PORT_C:
1118 case HPD_PORT_D:
1119 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1120 default:
1121 return false;
1122 }
1123}
1124
1125static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1126{
1127 switch (pin) {
1128 case HPD_PORT_TC1:
1129 case HPD_PORT_TC2:
1130 case HPD_PORT_TC3:
1131 case HPD_PORT_TC4:
1132 case HPD_PORT_TC5:
1133 case HPD_PORT_TC6:
1134 return val & ICP_TC_HPD_LONG_DETECT(pin);
1135 default:
1136 return false;
1137 }
1138}
1139
1140static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1141{
1142 switch (pin) {
1143 case HPD_PORT_E:
1144 return val & PORTE_HOTPLUG_LONG_DETECT;
1145 default:
1146 return false;
1147 }
1148}
1149
1150static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1151{
1152 switch (pin) {
1153 case HPD_PORT_A:
1154 return val & PORTA_HOTPLUG_LONG_DETECT;
1155 case HPD_PORT_B:
1156 return val & PORTB_HOTPLUG_LONG_DETECT;
1157 case HPD_PORT_C:
1158 return val & PORTC_HOTPLUG_LONG_DETECT;
1159 case HPD_PORT_D:
1160 return val & PORTD_HOTPLUG_LONG_DETECT;
1161 default:
1162 return false;
1163 }
1164}
1165
1166static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1167{
1168 switch (pin) {
1169 case HPD_PORT_A:
1170 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1171 default:
1172 return false;
1173 }
1174}
1175
1176static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177{
1178 switch (pin) {
1179 case HPD_PORT_B:
1180 return val & PORTB_HOTPLUG_LONG_DETECT;
1181 case HPD_PORT_C:
1182 return val & PORTC_HOTPLUG_LONG_DETECT;
1183 case HPD_PORT_D:
1184 return val & PORTD_HOTPLUG_LONG_DETECT;
1185 default:
1186 return false;
1187 }
1188}
1189
1190static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1191{
1192 switch (pin) {
1193 case HPD_PORT_B:
1194 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1195 case HPD_PORT_C:
1196 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1197 case HPD_PORT_D:
1198 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1199 default:
1200 return false;
1201 }
1202}
1203
1204/*
1205 * Get a bit mask of pins that have triggered, and which ones may be long.
1206 * This can be called multiple times with the same masks to accumulate
1207 * hotplug detection results from several registers.
1208 *
1209 * Note that the caller is expected to zero out the masks initially.
1210 */
1211static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1212 u32 *pin_mask, u32 *long_mask,
1213 u32 hotplug_trigger, u32 dig_hotplug_reg,
1214 const u32 hpd[HPD_NUM_PINS],
1215 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1216{
1217 enum hpd_pin pin;
1218
1219 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1220
1221 for_each_hpd_pin(pin) {
1222 if ((hpd[pin] & hotplug_trigger) == 0)
1223 continue;
1224
1225 *pin_mask |= BIT(pin);
1226
1227 if (long_pulse_detect(pin, dig_hotplug_reg))
1228 *long_mask |= BIT(pin);
1229 }
1230
1231 drm_dbg(&dev_priv->drm,
1232 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1233 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1234
1235}
1236
1237static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1238 const u32 hpd[HPD_NUM_PINS])
1239{
1240 struct intel_encoder *encoder;
1241 u32 enabled_irqs = 0;
1242
1243 for_each_intel_encoder(&dev_priv->drm, encoder)
1244 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1245 enabled_irqs |= hpd[encoder->hpd_pin];
1246
1247 return enabled_irqs;
1248}
1249
1250static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1251 const u32 hpd[HPD_NUM_PINS])
1252{
1253 struct intel_encoder *encoder;
1254 u32 hotplug_irqs = 0;
1255
1256 for_each_intel_encoder(&dev_priv->drm, encoder)
1257 hotplug_irqs |= hpd[encoder->hpd_pin];
1258
1259 return hotplug_irqs;
1260}
1261
1262static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1263 hotplug_enables_func hotplug_enables)
1264{
1265 struct intel_encoder *encoder;
1266 u32 hotplug = 0;
1267
1268 for_each_intel_encoder(&i915->drm, encoder)
1269 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1270
1271 return hotplug;
1272}
1273
1274static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1275{
1276 wake_up_all(&dev_priv->gmbus_wait_queue);
1277}
1278
1279static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1280{
1281 wake_up_all(&dev_priv->gmbus_wait_queue);
1282}
1283
1284#if defined(CONFIG_DEBUG_FS)
1285static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1286 enum pipe pipe,
1287 u32 crc0, u32 crc1,
1288 u32 crc2, u32 crc3,
1289 u32 crc4)
1290{
1291 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1292 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1293 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1294
1295 trace_intel_pipe_crc(crtc, crcs);
1296
1297 spin_lock(&pipe_crc->lock);
1298 /*
1299 * For some not yet identified reason, the first CRC is
1300 * bonkers. So let's just wait for the next vblank and read
1301 * out the buggy result.
1302 *
1303 * On GEN8+ sometimes the second CRC is bonkers as well, so
1304 * don't trust that one either.
1305 */
1306 if (pipe_crc->skipped <= 0 ||
1307 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1308 pipe_crc->skipped++;
1309 spin_unlock(&pipe_crc->lock);
1310 return;
1311 }
1312 spin_unlock(&pipe_crc->lock);
1313
1314 drm_crtc_add_crc_entry(&crtc->base, true,
1315 drm_crtc_accurate_vblank_count(&crtc->base),
1316 crcs);
1317}
1318#else
1319static inline void
1320display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1321 enum pipe pipe,
1322 u32 crc0, u32 crc1,
1323 u32 crc2, u32 crc3,
1324 u32 crc4) {}
1325#endif
1326
1327static void flip_done_handler(struct drm_i915_private *i915,
1328 enum pipe pipe)
1329{
1330 struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
1331 struct drm_crtc_state *crtc_state = crtc->base.state;
1332 struct drm_pending_vblank_event *e = crtc_state->event;
1333 struct drm_device *dev = &i915->drm;
1334 unsigned long irqflags;
1335
1336 spin_lock_irqsave(&dev->event_lock, irqflags);
1337
1338 crtc_state->event = NULL;
1339
1340 drm_crtc_send_vblank_event(&crtc->base, e);
1341
1342 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1343}
1344
1345static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1346 enum pipe pipe)
1347{
1348 display_pipe_crc_irq_handler(dev_priv, pipe,
1349 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1350 0, 0, 0, 0);
1351}
1352
1353static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1354 enum pipe pipe)
1355{
1356 display_pipe_crc_irq_handler(dev_priv, pipe,
1357 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1358 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1359 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1360 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1361 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1362}
1363
1364static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1365 enum pipe pipe)
1366{
1367 u32 res1, res2;
1368
1369 if (DISPLAY_VER(dev_priv) >= 3)
1370 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1371 else
1372 res1 = 0;
1373
1374 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1375 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1376 else
1377 res2 = 0;
1378
1379 display_pipe_crc_irq_handler(dev_priv, pipe,
1380 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1381 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1382 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1383 res1, res2);
1384}
1385
1386static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1387{
1388 enum pipe pipe;
1389
1390 for_each_pipe(dev_priv, pipe) {
1391 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1392 PIPESTAT_INT_STATUS_MASK |
1393 PIPE_FIFO_UNDERRUN_STATUS);
1394
1395 dev_priv->pipestat_irq_mask[pipe] = 0;
1396 }
1397}
1398
1399static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1400 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1401{
1402 enum pipe pipe;
1403
1404 spin_lock(&dev_priv->irq_lock);
1405
1406 if (!dev_priv->display_irqs_enabled) {
1407 spin_unlock(&dev_priv->irq_lock);
1408 return;
1409 }
1410
1411 for_each_pipe(dev_priv, pipe) {
1412 i915_reg_t reg;
1413 u32 status_mask, enable_mask, iir_bit = 0;
1414
1415 /*
1416 * PIPESTAT bits get signalled even when the interrupt is
1417 * disabled with the mask bits, and some of the status bits do
1418 * not generate interrupts at all (like the underrun bit). Hence
1419 * we need to be careful that we only handle what we want to
1420 * handle.
1421 */
1422
1423 /* fifo underruns are filterered in the underrun handler. */
1424 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1425
1426 switch (pipe) {
1427 default:
1428 case PIPE_A:
1429 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1430 break;
1431 case PIPE_B:
1432 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1433 break;
1434 case PIPE_C:
1435 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1436 break;
1437 }
1438 if (iir & iir_bit)
1439 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1440
1441 if (!status_mask)
1442 continue;
1443
1444 reg = PIPESTAT(pipe);
1445 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1446 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1447
1448 /*
1449 * Clear the PIPE*STAT regs before the IIR
1450 *
1451 * Toggle the enable bits to make sure we get an
1452 * edge in the ISR pipe event bit if we don't clear
1453 * all the enabled status bits. Otherwise the edge
1454 * triggered IIR on i965/g4x wouldn't notice that
1455 * an interrupt is still pending.
1456 */
1457 if (pipe_stats[pipe]) {
1458 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1459 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1460 }
1461 }
1462 spin_unlock(&dev_priv->irq_lock);
1463}
1464
1465static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1466 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1467{
1468 enum pipe pipe;
1469
1470 for_each_pipe(dev_priv, pipe) {
1471 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1472 intel_handle_vblank(dev_priv, pipe);
1473
1474 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1475 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1476
1477 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1478 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1479 }
1480}
1481
1482static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1483 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1484{
1485 bool blc_event = false;
1486 enum pipe pipe;
1487
1488 for_each_pipe(dev_priv, pipe) {
1489 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1490 intel_handle_vblank(dev_priv, pipe);
1491
1492 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1493 blc_event = true;
1494
1495 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1496 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1497
1498 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1499 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1500 }
1501
1502 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1503 intel_opregion_asle_intr(dev_priv);
1504}
1505
1506static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1507 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1508{
1509 bool blc_event = false;
1510 enum pipe pipe;
1511
1512 for_each_pipe(dev_priv, pipe) {
1513 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1514 intel_handle_vblank(dev_priv, pipe);
1515
1516 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1517 blc_event = true;
1518
1519 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1520 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1521
1522 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1523 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1524 }
1525
1526 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1527 intel_opregion_asle_intr(dev_priv);
1528
1529 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1530 gmbus_irq_handler(dev_priv);
1531}
1532
1533static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1534 u32 pipe_stats[I915_MAX_PIPES])
1535{
1536 enum pipe pipe;
1537
1538 for_each_pipe(dev_priv, pipe) {
1539 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540 intel_handle_vblank(dev_priv, pipe);
1541
1542 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1543 flip_done_handler(dev_priv, pipe);
1544
1545 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547
1548 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 }
1551
1552 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1553 gmbus_irq_handler(dev_priv);
1554}
1555
1556static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1557{
1558 u32 hotplug_status = 0, hotplug_status_mask;
1559 int i;
1560
1561 if (IS_G4X(dev_priv) ||
1562 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1563 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1564 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1565 else
1566 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1567
1568 /*
1569 * We absolutely have to clear all the pending interrupt
1570 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1571 * interrupt bit won't have an edge, and the i965/g4x
1572 * edge triggered IIR will not notice that an interrupt
1573 * is still pending. We can't use PORT_HOTPLUG_EN to
1574 * guarantee the edge as the act of toggling the enable
1575 * bits can itself generate a new hotplug interrupt :(
1576 */
1577 for (i = 0; i < 10; i++) {
1578 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1579
1580 if (tmp == 0)
1581 return hotplug_status;
1582
1583 hotplug_status |= tmp;
1584 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1585 }
1586
1587 drm_WARN_ONCE(&dev_priv->drm, 1,
1588 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1589 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1590
1591 return hotplug_status;
1592}
1593
1594static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1595 u32 hotplug_status)
1596{
1597 u32 pin_mask = 0, long_mask = 0;
1598 u32 hotplug_trigger;
1599
1600 if (IS_G4X(dev_priv) ||
1601 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1602 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1603 else
1604 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1605
1606 if (hotplug_trigger) {
1607 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1608 hotplug_trigger, hotplug_trigger,
1609 dev_priv->hotplug.hpd,
1610 i9xx_port_hotplug_long_detect);
1611
1612 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1613 }
1614
1615 if ((IS_G4X(dev_priv) ||
1616 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1617 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1618 dp_aux_irq_handler(dev_priv);
1619}
1620
1621static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1622{
1623 struct drm_i915_private *dev_priv = arg;
1624 irqreturn_t ret = IRQ_NONE;
1625
1626 if (!intel_irqs_enabled(dev_priv))
1627 return IRQ_NONE;
1628
1629 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1630 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1631
1632 do {
1633 u32 iir, gt_iir, pm_iir;
1634 u32 pipe_stats[I915_MAX_PIPES] = {};
1635 u32 hotplug_status = 0;
1636 u32 ier = 0;
1637
1638 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1639 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1640 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1641
1642 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1643 break;
1644
1645 ret = IRQ_HANDLED;
1646
1647 /*
1648 * Theory on interrupt generation, based on empirical evidence:
1649 *
1650 * x = ((VLV_IIR & VLV_IER) ||
1651 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1652 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1653 *
1654 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1655 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1656 * guarantee the CPU interrupt will be raised again even if we
1657 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1658 * bits this time around.
1659 */
1660 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1661 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1662 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1663
1664 if (gt_iir)
1665 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1666 if (pm_iir)
1667 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1668
1669 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1670 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1671
1672 /* Call regardless, as some status bits might not be
1673 * signalled in iir */
1674 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1675
1676 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1677 I915_LPE_PIPE_B_INTERRUPT))
1678 intel_lpe_audio_irq_handler(dev_priv);
1679
1680 /*
1681 * VLV_IIR is single buffered, and reflects the level
1682 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1683 */
1684 if (iir)
1685 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1686
1687 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1688 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1689
1690 if (gt_iir)
1691 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1692 if (pm_iir)
1693 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1694
1695 if (hotplug_status)
1696 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1697
1698 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1699 } while (0);
1700
1701 pmu_irq_stats(dev_priv, ret);
1702
1703 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1704
1705 return ret;
1706}
1707
1708static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1709{
1710 struct drm_i915_private *dev_priv = arg;
1711 irqreturn_t ret = IRQ_NONE;
1712
1713 if (!intel_irqs_enabled(dev_priv))
1714 return IRQ_NONE;
1715
1716 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1717 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1718
1719 do {
1720 u32 master_ctl, iir;
1721 u32 pipe_stats[I915_MAX_PIPES] = {};
1722 u32 hotplug_status = 0;
1723 u32 ier = 0;
1724
1725 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1726 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1727
1728 if (master_ctl == 0 && iir == 0)
1729 break;
1730
1731 ret = IRQ_HANDLED;
1732
1733 /*
1734 * Theory on interrupt generation, based on empirical evidence:
1735 *
1736 * x = ((VLV_IIR & VLV_IER) ||
1737 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1738 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1739 *
1740 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1741 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1742 * guarantee the CPU interrupt will be raised again even if we
1743 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1744 * bits this time around.
1745 */
1746 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1747 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1748 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1749
1750 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1751
1752 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1753 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1754
1755 /* Call regardless, as some status bits might not be
1756 * signalled in iir */
1757 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1758
1759 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1760 I915_LPE_PIPE_B_INTERRUPT |
1761 I915_LPE_PIPE_C_INTERRUPT))
1762 intel_lpe_audio_irq_handler(dev_priv);
1763
1764 /*
1765 * VLV_IIR is single buffered, and reflects the level
1766 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1767 */
1768 if (iir)
1769 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1770
1771 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1772 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1773
1774 if (hotplug_status)
1775 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1776
1777 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1778 } while (0);
1779
1780 pmu_irq_stats(dev_priv, ret);
1781
1782 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1783
1784 return ret;
1785}
1786
1787static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1788 u32 hotplug_trigger)
1789{
1790 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1791
1792 /*
1793 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1794 * unless we touch the hotplug register, even if hotplug_trigger is
1795 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1796 * errors.
1797 */
1798 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1799 if (!hotplug_trigger) {
1800 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1801 PORTD_HOTPLUG_STATUS_MASK |
1802 PORTC_HOTPLUG_STATUS_MASK |
1803 PORTB_HOTPLUG_STATUS_MASK;
1804 dig_hotplug_reg &= ~mask;
1805 }
1806
1807 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1808 if (!hotplug_trigger)
1809 return;
1810
1811 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1812 hotplug_trigger, dig_hotplug_reg,
1813 dev_priv->hotplug.pch_hpd,
1814 pch_port_hotplug_long_detect);
1815
1816 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1817}
1818
1819static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1820{
1821 enum pipe pipe;
1822 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1823
1824 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1825
1826 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1827 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1828 SDE_AUDIO_POWER_SHIFT);
1829 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1830 port_name(port));
1831 }
1832
1833 if (pch_iir & SDE_AUX_MASK)
1834 dp_aux_irq_handler(dev_priv);
1835
1836 if (pch_iir & SDE_GMBUS)
1837 gmbus_irq_handler(dev_priv);
1838
1839 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1840 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1841
1842 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1843 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1844
1845 if (pch_iir & SDE_POISON)
1846 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1847
1848 if (pch_iir & SDE_FDI_MASK) {
1849 for_each_pipe(dev_priv, pipe)
1850 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1851 pipe_name(pipe),
1852 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1853 }
1854
1855 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1856 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1857
1858 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1859 drm_dbg(&dev_priv->drm,
1860 "PCH transcoder CRC error interrupt\n");
1861
1862 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1863 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1864
1865 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1866 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1867}
1868
1869static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1870{
1871 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1872 enum pipe pipe;
1873
1874 if (err_int & ERR_INT_POISON)
1875 drm_err(&dev_priv->drm, "Poison interrupt\n");
1876
1877 for_each_pipe(dev_priv, pipe) {
1878 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1879 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1880
1881 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1882 if (IS_IVYBRIDGE(dev_priv))
1883 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1884 else
1885 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1886 }
1887 }
1888
1889 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1890}
1891
1892static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1893{
1894 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1895 enum pipe pipe;
1896
1897 if (serr_int & SERR_INT_POISON)
1898 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1899
1900 for_each_pipe(dev_priv, pipe)
1901 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1902 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1903
1904 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1905}
1906
1907static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1908{
1909 enum pipe pipe;
1910 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1911
1912 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1913
1914 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1915 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1916 SDE_AUDIO_POWER_SHIFT_CPT);
1917 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1918 port_name(port));
1919 }
1920
1921 if (pch_iir & SDE_AUX_MASK_CPT)
1922 dp_aux_irq_handler(dev_priv);
1923
1924 if (pch_iir & SDE_GMBUS_CPT)
1925 gmbus_irq_handler(dev_priv);
1926
1927 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1928 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1929
1930 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1931 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1932
1933 if (pch_iir & SDE_FDI_MASK_CPT) {
1934 for_each_pipe(dev_priv, pipe)
1935 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1936 pipe_name(pipe),
1937 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1938 }
1939
1940 if (pch_iir & SDE_ERROR_CPT)
1941 cpt_serr_int_handler(dev_priv);
1942}
1943
1944static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1945{
1946 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1947 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1948 u32 pin_mask = 0, long_mask = 0;
1949
1950 if (ddi_hotplug_trigger) {
1951 u32 dig_hotplug_reg;
1952
1953 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1954 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1955
1956 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1957 ddi_hotplug_trigger, dig_hotplug_reg,
1958 dev_priv->hotplug.pch_hpd,
1959 icp_ddi_port_hotplug_long_detect);
1960 }
1961
1962 if (tc_hotplug_trigger) {
1963 u32 dig_hotplug_reg;
1964
1965 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
1966 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1967
1968 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1969 tc_hotplug_trigger, dig_hotplug_reg,
1970 dev_priv->hotplug.pch_hpd,
1971 icp_tc_port_hotplug_long_detect);
1972 }
1973
1974 if (pin_mask)
1975 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1976
1977 if (pch_iir & SDE_GMBUS_ICP)
1978 gmbus_irq_handler(dev_priv);
1979}
1980
1981static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1982{
1983 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1984 ~SDE_PORTE_HOTPLUG_SPT;
1985 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1986 u32 pin_mask = 0, long_mask = 0;
1987
1988 if (hotplug_trigger) {
1989 u32 dig_hotplug_reg;
1990
1991 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1992 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1993
1994 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1995 hotplug_trigger, dig_hotplug_reg,
1996 dev_priv->hotplug.pch_hpd,
1997 spt_port_hotplug_long_detect);
1998 }
1999
2000 if (hotplug2_trigger) {
2001 u32 dig_hotplug_reg;
2002
2003 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2004 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2005
2006 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2007 hotplug2_trigger, dig_hotplug_reg,
2008 dev_priv->hotplug.pch_hpd,
2009 spt_port_hotplug2_long_detect);
2010 }
2011
2012 if (pin_mask)
2013 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2014
2015 if (pch_iir & SDE_GMBUS_CPT)
2016 gmbus_irq_handler(dev_priv);
2017}
2018
2019static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2020 u32 hotplug_trigger)
2021{
2022 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2023
2024 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2025 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2026
2027 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2028 hotplug_trigger, dig_hotplug_reg,
2029 dev_priv->hotplug.hpd,
2030 ilk_port_hotplug_long_detect);
2031
2032 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2033}
2034
2035static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2036 u32 de_iir)
2037{
2038 enum pipe pipe;
2039 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2040
2041 if (hotplug_trigger)
2042 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2043
2044 if (de_iir & DE_AUX_CHANNEL_A)
2045 dp_aux_irq_handler(dev_priv);
2046
2047 if (de_iir & DE_GSE)
2048 intel_opregion_asle_intr(dev_priv);
2049
2050 if (de_iir & DE_POISON)
2051 drm_err(&dev_priv->drm, "Poison interrupt\n");
2052
2053 for_each_pipe(dev_priv, pipe) {
2054 if (de_iir & DE_PIPE_VBLANK(pipe))
2055 intel_handle_vblank(dev_priv, pipe);
2056
2057 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2058 flip_done_handler(dev_priv, pipe);
2059
2060 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2061 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062
2063 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2064 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2065 }
2066
2067 /* check event from PCH */
2068 if (de_iir & DE_PCH_EVENT) {
2069 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2070
2071 if (HAS_PCH_CPT(dev_priv))
2072 cpt_irq_handler(dev_priv, pch_iir);
2073 else
2074 ibx_irq_handler(dev_priv, pch_iir);
2075
2076 /* should clear PCH hotplug event before clear CPU irq */
2077 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2078 }
2079
2080 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2081 gen5_rps_irq_handler(&dev_priv->gt.rps);
2082}
2083
2084static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2085 u32 de_iir)
2086{
2087 enum pipe pipe;
2088 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2089
2090 if (hotplug_trigger)
2091 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2092
2093 if (de_iir & DE_ERR_INT_IVB)
2094 ivb_err_int_handler(dev_priv);
2095
2096 if (de_iir & DE_EDP_PSR_INT_HSW) {
2097 struct intel_encoder *encoder;
2098
2099 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2100 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2101
2102 u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
2103 EDP_PSR_IIR);
2104
2105 intel_psr_irq_handler(intel_dp, psr_iir);
2106 intel_uncore_write(&dev_priv->uncore,
2107 EDP_PSR_IIR, psr_iir);
2108 break;
2109 }
2110 }
2111
2112 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2113 dp_aux_irq_handler(dev_priv);
2114
2115 if (de_iir & DE_GSE_IVB)
2116 intel_opregion_asle_intr(dev_priv);
2117
2118 for_each_pipe(dev_priv, pipe) {
2119 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2120 intel_handle_vblank(dev_priv, pipe);
2121
2122 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2123 flip_done_handler(dev_priv, pipe);
2124 }
2125
2126 /* check event from PCH */
2127 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2128 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2129
2130 cpt_irq_handler(dev_priv, pch_iir);
2131
2132 /* clear PCH hotplug event before clear CPU irq */
2133 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2134 }
2135}
2136
2137/*
2138 * To handle irqs with the minimum potential races with fresh interrupts, we:
2139 * 1 - Disable Master Interrupt Control.
2140 * 2 - Find the source(s) of the interrupt.
2141 * 3 - Clear the Interrupt Identity bits (IIR).
2142 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2143 * 5 - Re-enable Master Interrupt Control.
2144 */
2145static irqreturn_t ilk_irq_handler(int irq, void *arg)
2146{
2147 struct drm_i915_private *i915 = arg;
2148 void __iomem * const regs = i915->uncore.regs;
2149 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2150 irqreturn_t ret = IRQ_NONE;
2151
2152 if (unlikely(!intel_irqs_enabled(i915)))
2153 return IRQ_NONE;
2154
2155 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2156 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2157
2158 /* disable master interrupt before clearing iir */
2159 de_ier = raw_reg_read(regs, DEIER);
2160 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2161
2162 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2163 * interrupts will will be stored on its back queue, and then we'll be
2164 * able to process them after we restore SDEIER (as soon as we restore
2165 * it, we'll get an interrupt if SDEIIR still has something to process
2166 * due to its back queue). */
2167 if (!HAS_PCH_NOP(i915)) {
2168 sde_ier = raw_reg_read(regs, SDEIER);
2169 raw_reg_write(regs, SDEIER, 0);
2170 }
2171
2172 /* Find, clear, then process each source of interrupt */
2173
2174 gt_iir = raw_reg_read(regs, GTIIR);
2175 if (gt_iir) {
2176 raw_reg_write(regs, GTIIR, gt_iir);
2177 if (GRAPHICS_VER(i915) >= 6)
2178 gen6_gt_irq_handler(&i915->gt, gt_iir);
2179 else
2180 gen5_gt_irq_handler(&i915->gt, gt_iir);
2181 ret = IRQ_HANDLED;
2182 }
2183
2184 de_iir = raw_reg_read(regs, DEIIR);
2185 if (de_iir) {
2186 raw_reg_write(regs, DEIIR, de_iir);
2187 if (DISPLAY_VER(i915) >= 7)
2188 ivb_display_irq_handler(i915, de_iir);
2189 else
2190 ilk_display_irq_handler(i915, de_iir);
2191 ret = IRQ_HANDLED;
2192 }
2193
2194 if (GRAPHICS_VER(i915) >= 6) {
2195 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2196 if (pm_iir) {
2197 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2198 gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2199 ret = IRQ_HANDLED;
2200 }
2201 }
2202
2203 raw_reg_write(regs, DEIER, de_ier);
2204 if (sde_ier)
2205 raw_reg_write(regs, SDEIER, sde_ier);
2206
2207 pmu_irq_stats(i915, ret);
2208
2209 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2210 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2211
2212 return ret;
2213}
2214
2215static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2216 u32 hotplug_trigger)
2217{
2218 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2219
2220 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2221 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2222
2223 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2224 hotplug_trigger, dig_hotplug_reg,
2225 dev_priv->hotplug.hpd,
2226 bxt_port_hotplug_long_detect);
2227
2228 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2229}
2230
2231static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2232{
2233 u32 pin_mask = 0, long_mask = 0;
2234 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2235 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2236
2237 if (trigger_tc) {
2238 u32 dig_hotplug_reg;
2239
2240 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2241 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2242
2243 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2244 trigger_tc, dig_hotplug_reg,
2245 dev_priv->hotplug.hpd,
2246 gen11_port_hotplug_long_detect);
2247 }
2248
2249 if (trigger_tbt) {
2250 u32 dig_hotplug_reg;
2251
2252 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2253 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2254
2255 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2256 trigger_tbt, dig_hotplug_reg,
2257 dev_priv->hotplug.hpd,
2258 gen11_port_hotplug_long_detect);
2259 }
2260
2261 if (pin_mask)
2262 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2263 else
2264 drm_err(&dev_priv->drm,
2265 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2266}
2267
2268static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2269{
2270 u32 mask;
2271
2272 if (DISPLAY_VER(dev_priv) >= 13)
2273 return TGL_DE_PORT_AUX_DDIA |
2274 TGL_DE_PORT_AUX_DDIB |
2275 TGL_DE_PORT_AUX_DDIC |
2276 XELPD_DE_PORT_AUX_DDID |
2277 XELPD_DE_PORT_AUX_DDIE |
2278 TGL_DE_PORT_AUX_USBC1 |
2279 TGL_DE_PORT_AUX_USBC2 |
2280 TGL_DE_PORT_AUX_USBC3 |
2281 TGL_DE_PORT_AUX_USBC4;
2282 else if (DISPLAY_VER(dev_priv) >= 12)
2283 return TGL_DE_PORT_AUX_DDIA |
2284 TGL_DE_PORT_AUX_DDIB |
2285 TGL_DE_PORT_AUX_DDIC |
2286 TGL_DE_PORT_AUX_USBC1 |
2287 TGL_DE_PORT_AUX_USBC2 |
2288 TGL_DE_PORT_AUX_USBC3 |
2289 TGL_DE_PORT_AUX_USBC4 |
2290 TGL_DE_PORT_AUX_USBC5 |
2291 TGL_DE_PORT_AUX_USBC6;
2292
2293
2294 mask = GEN8_AUX_CHANNEL_A;
2295 if (DISPLAY_VER(dev_priv) >= 9)
2296 mask |= GEN9_AUX_CHANNEL_B |
2297 GEN9_AUX_CHANNEL_C |
2298 GEN9_AUX_CHANNEL_D;
2299
2300 if (IS_CNL_WITH_PORT_F(dev_priv) || DISPLAY_VER(dev_priv) == 11)
2301 mask |= CNL_AUX_CHANNEL_F;
2302
2303 if (DISPLAY_VER(dev_priv) == 11)
2304 mask |= ICL_AUX_CHANNEL_E;
2305
2306 return mask;
2307}
2308
2309static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2310{
2311 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2312 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2313 else if (DISPLAY_VER(dev_priv) >= 11)
2314 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2315 else if (DISPLAY_VER(dev_priv) >= 9)
2316 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2317 else
2318 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2319}
2320
2321static void
2322gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2323{
2324 bool found = false;
2325
2326 if (iir & GEN8_DE_MISC_GSE) {
2327 intel_opregion_asle_intr(dev_priv);
2328 found = true;
2329 }
2330
2331 if (iir & GEN8_DE_EDP_PSR) {
2332 struct intel_encoder *encoder;
2333 u32 psr_iir;
2334 i915_reg_t iir_reg;
2335
2336 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2337 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2338
2339 if (DISPLAY_VER(dev_priv) >= 12)
2340 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2341 else
2342 iir_reg = EDP_PSR_IIR;
2343
2344 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2345 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2346
2347 if (psr_iir)
2348 found = true;
2349
2350 intel_psr_irq_handler(intel_dp, psr_iir);
2351
2352 /* prior GEN12 only have one EDP PSR */
2353 if (DISPLAY_VER(dev_priv) < 12)
2354 break;
2355 }
2356 }
2357
2358 if (!found)
2359 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2360}
2361
2362static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2363 u32 te_trigger)
2364{
2365 enum pipe pipe = INVALID_PIPE;
2366 enum transcoder dsi_trans;
2367 enum port port;
2368 u32 val, tmp;
2369
2370 /*
2371 * Incase of dual link, TE comes from DSI_1
2372 * this is to check if dual link is enabled
2373 */
2374 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2375 val &= PORT_SYNC_MODE_ENABLE;
2376
2377 /*
2378 * if dual link is enabled, then read DSI_0
2379 * transcoder registers
2380 */
2381 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2382 PORT_A : PORT_B;
2383 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2384
2385 /* Check if DSI configured in command mode */
2386 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2387 val = val & OP_MODE_MASK;
2388
2389 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2390 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2391 return;
2392 }
2393
2394 /* Get PIPE for handling VBLANK event */
2395 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2396 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2397 case TRANS_DDI_EDP_INPUT_A_ON:
2398 pipe = PIPE_A;
2399 break;
2400 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2401 pipe = PIPE_B;
2402 break;
2403 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2404 pipe = PIPE_C;
2405 break;
2406 default:
2407 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2408 return;
2409 }
2410
2411 intel_handle_vblank(dev_priv, pipe);
2412
2413 /* clear TE in dsi IIR */
2414 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2415 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2416 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2417}
2418
2419static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2420{
2421 if (DISPLAY_VER(i915) >= 9)
2422 return GEN9_PIPE_PLANE1_FLIP_DONE;
2423 else
2424 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2425}
2426
2427u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2428{
2429 u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2430
2431 if (DISPLAY_VER(dev_priv) >= 13)
2432 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2433 XELPD_PIPE_HARD_UNDERRUN;
2434
2435 return mask;
2436}
2437
2438static irqreturn_t
2439gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2440{
2441 irqreturn_t ret = IRQ_NONE;
2442 u32 iir;
2443 enum pipe pipe;
2444
2445 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2446
2447 if (master_ctl & GEN8_DE_MISC_IRQ) {
2448 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2449 if (iir) {
2450 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2451 ret = IRQ_HANDLED;
2452 gen8_de_misc_irq_handler(dev_priv, iir);
2453 } else {
2454 drm_err(&dev_priv->drm,
2455 "The master control interrupt lied (DE MISC)!\n");
2456 }
2457 }
2458
2459 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2460 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2461 if (iir) {
2462 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2463 ret = IRQ_HANDLED;
2464 gen11_hpd_irq_handler(dev_priv, iir);
2465 } else {
2466 drm_err(&dev_priv->drm,
2467 "The master control interrupt lied, (DE HPD)!\n");
2468 }
2469 }
2470
2471 if (master_ctl & GEN8_DE_PORT_IRQ) {
2472 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2473 if (iir) {
2474 bool found = false;
2475
2476 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2477 ret = IRQ_HANDLED;
2478
2479 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2480 dp_aux_irq_handler(dev_priv);
2481 found = true;
2482 }
2483
2484 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2485 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2486
2487 if (hotplug_trigger) {
2488 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2489 found = true;
2490 }
2491 } else if (IS_BROADWELL(dev_priv)) {
2492 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2493
2494 if (hotplug_trigger) {
2495 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2496 found = true;
2497 }
2498 }
2499
2500 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2501 (iir & BXT_DE_PORT_GMBUS)) {
2502 gmbus_irq_handler(dev_priv);
2503 found = true;
2504 }
2505
2506 if (DISPLAY_VER(dev_priv) >= 11) {
2507 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2508
2509 if (te_trigger) {
2510 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2511 found = true;
2512 }
2513 }
2514
2515 if (!found)
2516 drm_err(&dev_priv->drm,
2517 "Unexpected DE Port interrupt\n");
2518 }
2519 else
2520 drm_err(&dev_priv->drm,
2521 "The master control interrupt lied (DE PORT)!\n");
2522 }
2523
2524 for_each_pipe(dev_priv, pipe) {
2525 u32 fault_errors;
2526
2527 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2528 continue;
2529
2530 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2531 if (!iir) {
2532 drm_err(&dev_priv->drm,
2533 "The master control interrupt lied (DE PIPE)!\n");
2534 continue;
2535 }
2536
2537 ret = IRQ_HANDLED;
2538 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2539
2540 if (iir & GEN8_PIPE_VBLANK)
2541 intel_handle_vblank(dev_priv, pipe);
2542
2543 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2544 flip_done_handler(dev_priv, pipe);
2545
2546 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2547 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2548
2549 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2550 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2551
2552 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2553 if (fault_errors)
2554 drm_err(&dev_priv->drm,
2555 "Fault errors on pipe %c: 0x%08x\n",
2556 pipe_name(pipe),
2557 fault_errors);
2558 }
2559
2560 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2561 master_ctl & GEN8_DE_PCH_IRQ) {
2562 /*
2563 * FIXME(BDW): Assume for now that the new interrupt handling
2564 * scheme also closed the SDE interrupt handling race we've seen
2565 * on older pch-split platforms. But this needs testing.
2566 */
2567 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2568 if (iir) {
2569 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2570 ret = IRQ_HANDLED;
2571
2572 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2573 icp_irq_handler(dev_priv, iir);
2574 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2575 spt_irq_handler(dev_priv, iir);
2576 else
2577 cpt_irq_handler(dev_priv, iir);
2578 } else {
2579 /*
2580 * Like on previous PCH there seems to be something
2581 * fishy going on with forwarding PCH interrupts.
2582 */
2583 drm_dbg(&dev_priv->drm,
2584 "The master control interrupt lied (SDE)!\n");
2585 }
2586 }
2587
2588 return ret;
2589}
2590
2591static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2592{
2593 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2594
2595 /*
2596 * Now with master disabled, get a sample of level indications
2597 * for this interrupt. Indications will be cleared on related acks.
2598 * New indications can and will light up during processing,
2599 * and will generate new interrupt after enabling master.
2600 */
2601 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2602}
2603
2604static inline void gen8_master_intr_enable(void __iomem * const regs)
2605{
2606 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2607}
2608
2609static irqreturn_t gen8_irq_handler(int irq, void *arg)
2610{
2611 struct drm_i915_private *dev_priv = arg;
2612 void __iomem * const regs = dev_priv->uncore.regs;
2613 u32 master_ctl;
2614
2615 if (!intel_irqs_enabled(dev_priv))
2616 return IRQ_NONE;
2617
2618 master_ctl = gen8_master_intr_disable(regs);
2619 if (!master_ctl) {
2620 gen8_master_intr_enable(regs);
2621 return IRQ_NONE;
2622 }
2623
2624 /* Find, queue (onto bottom-halves), then clear each source */
2625 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2626
2627 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2628 if (master_ctl & ~GEN8_GT_IRQS) {
2629 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2630 gen8_de_irq_handler(dev_priv, master_ctl);
2631 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2632 }
2633
2634 gen8_master_intr_enable(regs);
2635
2636 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2637
2638 return IRQ_HANDLED;
2639}
2640
2641static u32
2642gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2643{
2644 void __iomem * const regs = gt->uncore->regs;
2645 u32 iir;
2646
2647 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2648 return 0;
2649
2650 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2651 if (likely(iir))
2652 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2653
2654 return iir;
2655}
2656
2657static void
2658gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2659{
2660 if (iir & GEN11_GU_MISC_GSE)
2661 intel_opregion_asle_intr(gt->i915);
2662}
2663
2664static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2665{
2666 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2667
2668 /*
2669 * Now with master disabled, get a sample of level indications
2670 * for this interrupt. Indications will be cleared on related acks.
2671 * New indications can and will light up during processing,
2672 * and will generate new interrupt after enabling master.
2673 */
2674 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2675}
2676
2677static inline void gen11_master_intr_enable(void __iomem * const regs)
2678{
2679 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2680}
2681
2682static void
2683gen11_display_irq_handler(struct drm_i915_private *i915)
2684{
2685 void __iomem * const regs = i915->uncore.regs;
2686 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2687
2688 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2689 /*
2690 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2691 * for the display related bits.
2692 */
2693 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2694 gen8_de_irq_handler(i915, disp_ctl);
2695 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2696 GEN11_DISPLAY_IRQ_ENABLE);
2697
2698 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2699}
2700
2701static __always_inline irqreturn_t
2702__gen11_irq_handler(struct drm_i915_private * const i915,
2703 u32 (*intr_disable)(void __iomem * const regs),
2704 void (*intr_enable)(void __iomem * const regs))
2705{
2706 void __iomem * const regs = i915->uncore.regs;
2707 struct intel_gt *gt = &i915->gt;
2708 u32 master_ctl;
2709 u32 gu_misc_iir;
2710
2711 if (!intel_irqs_enabled(i915))
2712 return IRQ_NONE;
2713
2714 master_ctl = intr_disable(regs);
2715 if (!master_ctl) {
2716 intr_enable(regs);
2717 return IRQ_NONE;
2718 }
2719
2720 /* Find, queue (onto bottom-halves), then clear each source */
2721 gen11_gt_irq_handler(gt, master_ctl);
2722
2723 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2724 if (master_ctl & GEN11_DISPLAY_IRQ)
2725 gen11_display_irq_handler(i915);
2726
2727 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2728
2729 intr_enable(regs);
2730
2731 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2732
2733 pmu_irq_stats(i915, IRQ_HANDLED);
2734
2735 return IRQ_HANDLED;
2736}
2737
2738static irqreturn_t gen11_irq_handler(int irq, void *arg)
2739{
2740 return __gen11_irq_handler(arg,
2741 gen11_master_intr_disable,
2742 gen11_master_intr_enable);
2743}
2744
2745static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
2746{
2747 u32 val;
2748
2749 /* First disable interrupts */
2750 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
2751
2752 /* Get the indication levels and ack the master unit */
2753 val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
2754 if (unlikely(!val))
2755 return 0;
2756
2757 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
2758
2759 /*
2760 * Now with master disabled, get a sample of level indications
2761 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
2762 * out as this bit doesn't exist anymore for DG1
2763 */
2764 val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
2765 if (unlikely(!val))
2766 return 0;
2767
2768 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
2769
2770 return val;
2771}
2772
2773static inline void dg1_master_intr_enable(void __iomem * const regs)
2774{
2775 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
2776}
2777
2778static irqreturn_t dg1_irq_handler(int irq, void *arg)
2779{
2780 return __gen11_irq_handler(arg,
2781 dg1_master_intr_disable_and_ack,
2782 dg1_master_intr_enable);
2783}
2784
2785/* Called from drm generic code, passed 'crtc' which
2786 * we use as a pipe index
2787 */
2788int i8xx_enable_vblank(struct drm_crtc *crtc)
2789{
2790 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2791 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2792 unsigned long irqflags;
2793
2794 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2795 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2796 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2797
2798 return 0;
2799}
2800
2801int i915gm_enable_vblank(struct drm_crtc *crtc)
2802{
2803 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2804
2805 /*
2806 * Vblank interrupts fail to wake the device up from C2+.
2807 * Disabling render clock gating during C-states avoids
2808 * the problem. There is a small power cost so we do this
2809 * only when vblank interrupts are actually enabled.
2810 */
2811 if (dev_priv->vblank_enabled++ == 0)
2812 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2813
2814 return i8xx_enable_vblank(crtc);
2815}
2816
2817int i965_enable_vblank(struct drm_crtc *crtc)
2818{
2819 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2820 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2821 unsigned long irqflags;
2822
2823 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2824 i915_enable_pipestat(dev_priv, pipe,
2825 PIPE_START_VBLANK_INTERRUPT_STATUS);
2826 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2827
2828 return 0;
2829}
2830
2831int ilk_enable_vblank(struct drm_crtc *crtc)
2832{
2833 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2834 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2835 unsigned long irqflags;
2836 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2837 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2838
2839 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2840 ilk_enable_display_irq(dev_priv, bit);
2841 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2842
2843 /* Even though there is no DMC, frame counter can get stuck when
2844 * PSR is active as no frames are generated.
2845 */
2846 if (HAS_PSR(dev_priv))
2847 drm_crtc_vblank_restore(crtc);
2848
2849 return 0;
2850}
2851
2852static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2853 bool enable)
2854{
2855 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2856 enum port port;
2857 u32 tmp;
2858
2859 if (!(intel_crtc->mode_flags &
2860 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2861 return false;
2862
2863 /* for dual link cases we consider TE from slave */
2864 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2865 port = PORT_B;
2866 else
2867 port = PORT_A;
2868
2869 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2870 if (enable)
2871 tmp &= ~DSI_TE_EVENT;
2872 else
2873 tmp |= DSI_TE_EVENT;
2874
2875 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2876
2877 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2878 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2879
2880 return true;
2881}
2882
2883int bdw_enable_vblank(struct drm_crtc *crtc)
2884{
2885 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2887 enum pipe pipe = intel_crtc->pipe;
2888 unsigned long irqflags;
2889
2890 if (gen11_dsi_configure_te(intel_crtc, true))
2891 return 0;
2892
2893 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2894 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2895 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2896
2897 /* Even if there is no DMC, frame counter can get stuck when
2898 * PSR is active as no frames are generated, so check only for PSR.
2899 */
2900 if (HAS_PSR(dev_priv))
2901 drm_crtc_vblank_restore(crtc);
2902
2903 return 0;
2904}
2905
2906/* Called from drm generic code, passed 'crtc' which
2907 * we use as a pipe index
2908 */
2909void i8xx_disable_vblank(struct drm_crtc *crtc)
2910{
2911 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2912 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2913 unsigned long irqflags;
2914
2915 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2916 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2917 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2918}
2919
2920void i915gm_disable_vblank(struct drm_crtc *crtc)
2921{
2922 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2923
2924 i8xx_disable_vblank(crtc);
2925
2926 if (--dev_priv->vblank_enabled == 0)
2927 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2928}
2929
2930void i965_disable_vblank(struct drm_crtc *crtc)
2931{
2932 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2933 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2934 unsigned long irqflags;
2935
2936 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2937 i915_disable_pipestat(dev_priv, pipe,
2938 PIPE_START_VBLANK_INTERRUPT_STATUS);
2939 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2940}
2941
2942void ilk_disable_vblank(struct drm_crtc *crtc)
2943{
2944 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2945 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2946 unsigned long irqflags;
2947 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2948 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2949
2950 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2951 ilk_disable_display_irq(dev_priv, bit);
2952 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2953}
2954
2955void bdw_disable_vblank(struct drm_crtc *crtc)
2956{
2957 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2958 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2959 enum pipe pipe = intel_crtc->pipe;
2960 unsigned long irqflags;
2961
2962 if (gen11_dsi_configure_te(intel_crtc, false))
2963 return;
2964
2965 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2966 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2967 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2968}
2969
2970static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2971{
2972 struct intel_uncore *uncore = &dev_priv->uncore;
2973
2974 if (HAS_PCH_NOP(dev_priv))
2975 return;
2976
2977 GEN3_IRQ_RESET(uncore, SDE);
2978
2979 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2980 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2981}
2982
2983static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2984{
2985 struct intel_uncore *uncore = &dev_priv->uncore;
2986
2987 if (IS_CHERRYVIEW(dev_priv))
2988 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2989 else
2990 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2991
2992 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2993 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
2994
2995 i9xx_pipestat_irq_reset(dev_priv);
2996
2997 GEN3_IRQ_RESET(uncore, VLV_);
2998 dev_priv->irq_mask = ~0u;
2999}
3000
3001static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3002{
3003 struct intel_uncore *uncore = &dev_priv->uncore;
3004
3005 u32 pipestat_mask;
3006 u32 enable_mask;
3007 enum pipe pipe;
3008
3009 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3010
3011 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3012 for_each_pipe(dev_priv, pipe)
3013 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3014
3015 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3016 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3017 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3018 I915_LPE_PIPE_A_INTERRUPT |
3019 I915_LPE_PIPE_B_INTERRUPT;
3020
3021 if (IS_CHERRYVIEW(dev_priv))
3022 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3023 I915_LPE_PIPE_C_INTERRUPT;
3024
3025 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3026
3027 dev_priv->irq_mask = ~enable_mask;
3028
3029 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3030}
3031
3032/* drm_dma.h hooks
3033*/
3034static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3035{
3036 struct intel_uncore *uncore = &dev_priv->uncore;
3037
3038 GEN3_IRQ_RESET(uncore, DE);
3039 dev_priv->irq_mask = ~0u;
3040
3041 if (GRAPHICS_VER(dev_priv) == 7)
3042 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3043
3044 if (IS_HASWELL(dev_priv)) {
3045 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3046 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3047 }
3048
3049 gen5_gt_irq_reset(&dev_priv->gt);
3050
3051 ibx_irq_reset(dev_priv);
3052}
3053
3054static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3055{
3056 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3057 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3058
3059 gen5_gt_irq_reset(&dev_priv->gt);
3060
3061 spin_lock_irq(&dev_priv->irq_lock);
3062 if (dev_priv->display_irqs_enabled)
3063 vlv_display_irq_reset(dev_priv);
3064 spin_unlock_irq(&dev_priv->irq_lock);
3065}
3066
3067static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3068{
3069 struct intel_uncore *uncore = &dev_priv->uncore;
3070 enum pipe pipe;
3071
3072 if (!HAS_DISPLAY(dev_priv))
3073 return;
3074
3075 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3076 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3077
3078 for_each_pipe(dev_priv, pipe)
3079 if (intel_display_power_is_enabled(dev_priv,
3080 POWER_DOMAIN_PIPE(pipe)))
3081 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3082
3083 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3084 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3085}
3086
3087static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3088{
3089 struct intel_uncore *uncore = &dev_priv->uncore;
3090
3091 gen8_master_intr_disable(dev_priv->uncore.regs);
3092
3093 gen8_gt_irq_reset(&dev_priv->gt);
3094 gen8_display_irq_reset(dev_priv);
3095 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3096
3097 if (HAS_PCH_SPLIT(dev_priv))
3098 ibx_irq_reset(dev_priv);
3099
3100}
3101
3102static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3103{
3104 struct intel_uncore *uncore = &dev_priv->uncore;
3105 enum pipe pipe;
3106 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3107 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3108
3109 if (!HAS_DISPLAY(dev_priv))
3110 return;
3111
3112 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3113
3114 if (DISPLAY_VER(dev_priv) >= 12) {
3115 enum transcoder trans;
3116
3117 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3118 enum intel_display_power_domain domain;
3119
3120 domain = POWER_DOMAIN_TRANSCODER(trans);
3121 if (!intel_display_power_is_enabled(dev_priv, domain))
3122 continue;
3123
3124 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3125 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3126 }
3127 } else {
3128 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3129 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3130 }
3131
3132 for_each_pipe(dev_priv, pipe)
3133 if (intel_display_power_is_enabled(dev_priv,
3134 POWER_DOMAIN_PIPE(pipe)))
3135 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3136
3137 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3138 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3139 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3140
3141 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3142 GEN3_IRQ_RESET(uncore, SDE);
3143}
3144
3145static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3146{
3147 struct intel_uncore *uncore = &dev_priv->uncore;
3148
3149 if (HAS_MASTER_UNIT_IRQ(dev_priv))
3150 dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
3151 else
3152 gen11_master_intr_disable(dev_priv->uncore.regs);
3153
3154 gen11_gt_irq_reset(&dev_priv->gt);
3155 gen11_display_irq_reset(dev_priv);
3156
3157 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3158 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3159}
3160
3161void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3162 u8 pipe_mask)
3163{
3164 struct intel_uncore *uncore = &dev_priv->uncore;
3165 u32 extra_ier = GEN8_PIPE_VBLANK |
3166 gen8_de_pipe_underrun_mask(dev_priv) |
3167 gen8_de_pipe_flip_done_mask(dev_priv);
3168 enum pipe pipe;
3169
3170 spin_lock_irq(&dev_priv->irq_lock);
3171
3172 if (!intel_irqs_enabled(dev_priv)) {
3173 spin_unlock_irq(&dev_priv->irq_lock);
3174 return;
3175 }
3176
3177 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3178 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3179 dev_priv->de_irq_mask[pipe],
3180 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3181
3182 spin_unlock_irq(&dev_priv->irq_lock);
3183}
3184
3185void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3186 u8 pipe_mask)
3187{
3188 struct intel_uncore *uncore = &dev_priv->uncore;
3189 enum pipe pipe;
3190
3191 spin_lock_irq(&dev_priv->irq_lock);
3192
3193 if (!intel_irqs_enabled(dev_priv)) {
3194 spin_unlock_irq(&dev_priv->irq_lock);
3195 return;
3196 }
3197
3198 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3199 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3200
3201 spin_unlock_irq(&dev_priv->irq_lock);
3202
3203 /* make sure we're done processing display irqs */
3204 intel_synchronize_irq(dev_priv);
3205}
3206
3207static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3208{
3209 struct intel_uncore *uncore = &dev_priv->uncore;
3210
3211 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3212 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3213
3214 gen8_gt_irq_reset(&dev_priv->gt);
3215
3216 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3217
3218 spin_lock_irq(&dev_priv->irq_lock);
3219 if (dev_priv->display_irqs_enabled)
3220 vlv_display_irq_reset(dev_priv);
3221 spin_unlock_irq(&dev_priv->irq_lock);
3222}
3223
3224static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3225 enum hpd_pin pin)
3226{
3227 switch (pin) {
3228 case HPD_PORT_A:
3229 /*
3230 * When CPU and PCH are on the same package, port A
3231 * HPD must be enabled in both north and south.
3232 */
3233 return HAS_PCH_LPT_LP(i915) ?
3234 PORTA_HOTPLUG_ENABLE : 0;
3235 case HPD_PORT_B:
3236 return PORTB_HOTPLUG_ENABLE |
3237 PORTB_PULSE_DURATION_2ms;
3238 case HPD_PORT_C:
3239 return PORTC_HOTPLUG_ENABLE |
3240 PORTC_PULSE_DURATION_2ms;
3241 case HPD_PORT_D:
3242 return PORTD_HOTPLUG_ENABLE |
3243 PORTD_PULSE_DURATION_2ms;
3244 default:
3245 return 0;
3246 }
3247}
3248
3249static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3250{
3251 u32 hotplug;
3252
3253 /*
3254 * Enable digital hotplug on the PCH, and configure the DP short pulse
3255 * duration to 2ms (which is the minimum in the Display Port spec).
3256 * The pulse duration bits are reserved on LPT+.
3257 */
3258 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3259 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3260 PORTB_HOTPLUG_ENABLE |
3261 PORTC_HOTPLUG_ENABLE |
3262 PORTD_HOTPLUG_ENABLE |
3263 PORTB_PULSE_DURATION_MASK |
3264 PORTC_PULSE_DURATION_MASK |
3265 PORTD_PULSE_DURATION_MASK);
3266 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3267 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3268}
3269
3270static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3271{
3272 u32 hotplug_irqs, enabled_irqs;
3273
3274 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3275 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3276
3277 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3278
3279 ibx_hpd_detection_setup(dev_priv);
3280}
3281
3282static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3283 enum hpd_pin pin)
3284{
3285 switch (pin) {
3286 case HPD_PORT_A:
3287 case HPD_PORT_B:
3288 case HPD_PORT_C:
3289 case HPD_PORT_D:
3290 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3291 default:
3292 return 0;
3293 }
3294}
3295
3296static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3297 enum hpd_pin pin)
3298{
3299 switch (pin) {
3300 case HPD_PORT_TC1:
3301 case HPD_PORT_TC2:
3302 case HPD_PORT_TC3:
3303 case HPD_PORT_TC4:
3304 case HPD_PORT_TC5:
3305 case HPD_PORT_TC6:
3306 return ICP_TC_HPD_ENABLE(pin);
3307 default:
3308 return 0;
3309 }
3310}
3311
3312static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3313{
3314 u32 hotplug;
3315
3316 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3317 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3318 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3319 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3320 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3321 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3322 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3323}
3324
3325static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3326{
3327 u32 hotplug;
3328
3329 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3330 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3331 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3332 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3333 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3334 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3335 ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3336 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3337 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3338}
3339
3340static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3341{
3342 u32 hotplug_irqs, enabled_irqs;
3343
3344 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3345 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3346
3347 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3348 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3349
3350 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3351
3352 icp_ddi_hpd_detection_setup(dev_priv);
3353 icp_tc_hpd_detection_setup(dev_priv);
3354}
3355
3356static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3357 enum hpd_pin pin)
3358{
3359 switch (pin) {
3360 case HPD_PORT_TC1:
3361 case HPD_PORT_TC2:
3362 case HPD_PORT_TC3:
3363 case HPD_PORT_TC4:
3364 case HPD_PORT_TC5:
3365 case HPD_PORT_TC6:
3366 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3367 default:
3368 return 0;
3369 }
3370}
3371
3372static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3373{
3374 u32 val;
3375
3376 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3377 val |= (INVERT_DDIA_HPD |
3378 INVERT_DDIB_HPD |
3379 INVERT_DDIC_HPD |
3380 INVERT_DDID_HPD);
3381 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3382
3383 icp_hpd_irq_setup(dev_priv);
3384}
3385
3386static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3387{
3388 u32 hotplug;
3389
3390 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3391 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3392 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3393 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3394 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3395 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3396 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3397 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3398 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3399}
3400
3401static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3402{
3403 u32 hotplug;
3404
3405 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3406 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3407 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3408 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3409 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3410 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3411 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3412 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3413 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3414}
3415
3416static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3417{
3418 u32 hotplug_irqs, enabled_irqs;
3419 u32 val;
3420
3421 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3422 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3423
3424 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3425 val &= ~hotplug_irqs;
3426 val |= ~enabled_irqs & hotplug_irqs;
3427 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3428 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3429
3430 gen11_tc_hpd_detection_setup(dev_priv);
3431 gen11_tbt_hpd_detection_setup(dev_priv);
3432
3433 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3434 icp_hpd_irq_setup(dev_priv);
3435}
3436
3437static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3438 enum hpd_pin pin)
3439{
3440 switch (pin) {
3441 case HPD_PORT_A:
3442 return PORTA_HOTPLUG_ENABLE;
3443 case HPD_PORT_B:
3444 return PORTB_HOTPLUG_ENABLE;
3445 case HPD_PORT_C:
3446 return PORTC_HOTPLUG_ENABLE;
3447 case HPD_PORT_D:
3448 return PORTD_HOTPLUG_ENABLE;
3449 default:
3450 return 0;
3451 }
3452}
3453
3454static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3455 enum hpd_pin pin)
3456{
3457 switch (pin) {
3458 case HPD_PORT_E:
3459 return PORTE_HOTPLUG_ENABLE;
3460 default:
3461 return 0;
3462 }
3463}
3464
3465static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3466{
3467 u32 val, hotplug;
3468
3469 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3470 if (HAS_PCH_CNP(dev_priv)) {
3471 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3472 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3473 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3474 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3475 }
3476
3477 /* Enable digital hotplug on the PCH */
3478 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3479 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3480 PORTB_HOTPLUG_ENABLE |
3481 PORTC_HOTPLUG_ENABLE |
3482 PORTD_HOTPLUG_ENABLE);
3483 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3484 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3485
3486 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3487 hotplug &= ~PORTE_HOTPLUG_ENABLE;
3488 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3489 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3490}
3491
3492static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3493{
3494 u32 hotplug_irqs, enabled_irqs;
3495
3496 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3497 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3498
3499 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3500 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3501
3502 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3503
3504 spt_hpd_detection_setup(dev_priv);
3505}
3506
3507static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3508 enum hpd_pin pin)
3509{
3510 switch (pin) {
3511 case HPD_PORT_A:
3512 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3513 DIGITAL_PORTA_PULSE_DURATION_2ms;
3514 default:
3515 return 0;
3516 }
3517}
3518
3519static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3520{
3521 u32 hotplug;
3522
3523 /*
3524 * Enable digital hotplug on the CPU, and configure the DP short pulse
3525 * duration to 2ms (which is the minimum in the Display Port spec)
3526 * The pulse duration bits are reserved on HSW+.
3527 */
3528 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3529 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3530 DIGITAL_PORTA_PULSE_DURATION_MASK);
3531 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3532 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3533}
3534
3535static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3536{
3537 u32 hotplug_irqs, enabled_irqs;
3538
3539 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3540 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3541
3542 if (DISPLAY_VER(dev_priv) >= 8)
3543 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3544 else
3545 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3546
3547 ilk_hpd_detection_setup(dev_priv);
3548
3549 ibx_hpd_irq_setup(dev_priv);
3550}
3551
3552static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3553 enum hpd_pin pin)
3554{
3555 u32 hotplug;
3556
3557 switch (pin) {
3558 case HPD_PORT_A:
3559 hotplug = PORTA_HOTPLUG_ENABLE;
3560 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3561 hotplug |= BXT_DDIA_HPD_INVERT;
3562 return hotplug;
3563 case HPD_PORT_B:
3564 hotplug = PORTB_HOTPLUG_ENABLE;
3565 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3566 hotplug |= BXT_DDIB_HPD_INVERT;
3567 return hotplug;
3568 case HPD_PORT_C:
3569 hotplug = PORTC_HOTPLUG_ENABLE;
3570 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3571 hotplug |= BXT_DDIC_HPD_INVERT;
3572 return hotplug;
3573 default:
3574 return 0;
3575 }
3576}
3577
3578static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3579{
3580 u32 hotplug;
3581
3582 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3583 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3584 PORTB_HOTPLUG_ENABLE |
3585 PORTC_HOTPLUG_ENABLE |
3586 BXT_DDIA_HPD_INVERT |
3587 BXT_DDIB_HPD_INVERT |
3588 BXT_DDIC_HPD_INVERT);
3589 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3590 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3591}
3592
3593static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3594{
3595 u32 hotplug_irqs, enabled_irqs;
3596
3597 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3598 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3599
3600 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3601
3602 bxt_hpd_detection_setup(dev_priv);
3603}
3604
3605/*
3606 * SDEIER is also touched by the interrupt handler to work around missed PCH
3607 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3608 * instead we unconditionally enable all PCH interrupt sources here, but then
3609 * only unmask them as needed with SDEIMR.
3610 *
3611 * Note that we currently do this after installing the interrupt handler,
3612 * but before we enable the master interrupt. That should be sufficient
3613 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3614 * interrupts could still race.
3615 */
3616static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3617{
3618 struct intel_uncore *uncore = &dev_priv->uncore;
3619 u32 mask;
3620
3621 if (HAS_PCH_NOP(dev_priv))
3622 return;
3623
3624 if (HAS_PCH_IBX(dev_priv))
3625 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3626 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3627 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3628 else
3629 mask = SDE_GMBUS_CPT;
3630
3631 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3632}
3633
3634static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3635{
3636 struct intel_uncore *uncore = &dev_priv->uncore;
3637 u32 display_mask, extra_mask;
3638
3639 if (GRAPHICS_VER(dev_priv) >= 7) {
3640 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3641 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3642 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3643 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3644 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3645 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3646 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3647 DE_DP_A_HOTPLUG_IVB);
3648 } else {
3649 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3650 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3651 DE_PIPEA_CRC_DONE | DE_POISON);
3652 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3653 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3654 DE_PLANE_FLIP_DONE(PLANE_A) |
3655 DE_PLANE_FLIP_DONE(PLANE_B) |
3656 DE_DP_A_HOTPLUG);
3657 }
3658
3659 if (IS_HASWELL(dev_priv)) {
3660 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3661 display_mask |= DE_EDP_PSR_INT_HSW;
3662 }
3663
3664 if (IS_IRONLAKE_M(dev_priv))
3665 extra_mask |= DE_PCU_EVENT;
3666
3667 dev_priv->irq_mask = ~display_mask;
3668
3669 ibx_irq_postinstall(dev_priv);
3670
3671 gen5_gt_irq_postinstall(&dev_priv->gt);
3672
3673 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3674 display_mask | extra_mask);
3675}
3676
3677void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3678{
3679 lockdep_assert_held(&dev_priv->irq_lock);
3680
3681 if (dev_priv->display_irqs_enabled)
3682 return;
3683
3684 dev_priv->display_irqs_enabled = true;
3685
3686 if (intel_irqs_enabled(dev_priv)) {
3687 vlv_display_irq_reset(dev_priv);
3688 vlv_display_irq_postinstall(dev_priv);
3689 }
3690}
3691
3692void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3693{
3694 lockdep_assert_held(&dev_priv->irq_lock);
3695
3696 if (!dev_priv->display_irqs_enabled)
3697 return;
3698
3699 dev_priv->display_irqs_enabled = false;
3700
3701 if (intel_irqs_enabled(dev_priv))
3702 vlv_display_irq_reset(dev_priv);
3703}
3704
3705
3706static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3707{
3708 gen5_gt_irq_postinstall(&dev_priv->gt);
3709
3710 spin_lock_irq(&dev_priv->irq_lock);
3711 if (dev_priv->display_irqs_enabled)
3712 vlv_display_irq_postinstall(dev_priv);
3713 spin_unlock_irq(&dev_priv->irq_lock);
3714
3715 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3716 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3717}
3718
3719static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3720{
3721 struct intel_uncore *uncore = &dev_priv->uncore;
3722
3723 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3724 GEN8_PIPE_CDCLK_CRC_DONE;
3725 u32 de_pipe_enables;
3726 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3727 u32 de_port_enables;
3728 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3729 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3730 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3731 enum pipe pipe;
3732
3733 if (!HAS_DISPLAY(dev_priv))
3734 return;
3735
3736 if (DISPLAY_VER(dev_priv) <= 10)
3737 de_misc_masked |= GEN8_DE_MISC_GSE;
3738
3739 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3740 de_port_masked |= BXT_DE_PORT_GMBUS;
3741
3742 if (DISPLAY_VER(dev_priv) >= 11) {
3743 enum port port;
3744
3745 if (intel_bios_is_dsi_present(dev_priv, &port))
3746 de_port_masked |= DSI0_TE | DSI1_TE;
3747 }
3748
3749 de_pipe_enables = de_pipe_masked |
3750 GEN8_PIPE_VBLANK |
3751 gen8_de_pipe_underrun_mask(dev_priv) |
3752 gen8_de_pipe_flip_done_mask(dev_priv);
3753
3754 de_port_enables = de_port_masked;
3755 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3756 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3757 else if (IS_BROADWELL(dev_priv))
3758 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3759
3760 if (DISPLAY_VER(dev_priv) >= 12) {
3761 enum transcoder trans;
3762
3763 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3764 enum intel_display_power_domain domain;
3765
3766 domain = POWER_DOMAIN_TRANSCODER(trans);
3767 if (!intel_display_power_is_enabled(dev_priv, domain))
3768 continue;
3769
3770 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3771 }
3772 } else {
3773 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3774 }
3775
3776 for_each_pipe(dev_priv, pipe) {
3777 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3778
3779 if (intel_display_power_is_enabled(dev_priv,
3780 POWER_DOMAIN_PIPE(pipe)))
3781 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3782 dev_priv->de_irq_mask[pipe],
3783 de_pipe_enables);
3784 }
3785
3786 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3787 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3788
3789 if (DISPLAY_VER(dev_priv) >= 11) {
3790 u32 de_hpd_masked = 0;
3791 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3792 GEN11_DE_TBT_HOTPLUG_MASK;
3793
3794 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3795 de_hpd_enables);
3796 }
3797}
3798
3799static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3800{
3801 struct intel_uncore *uncore = &dev_priv->uncore;
3802 u32 mask = SDE_GMBUS_ICP;
3803
3804 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3805}
3806
3807static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3808{
3809 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3810 icp_irq_postinstall(dev_priv);
3811 else if (HAS_PCH_SPLIT(dev_priv))
3812 ibx_irq_postinstall(dev_priv);
3813
3814 gen8_gt_irq_postinstall(&dev_priv->gt);
3815 gen8_de_irq_postinstall(dev_priv);
3816
3817 gen8_master_intr_enable(dev_priv->uncore.regs);
3818}
3819
3820static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3821{
3822 if (!HAS_DISPLAY(dev_priv))
3823 return;
3824
3825 gen8_de_irq_postinstall(dev_priv);
3826
3827 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3828 GEN11_DISPLAY_IRQ_ENABLE);
3829}
3830
3831static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3832{
3833 struct intel_uncore *uncore = &dev_priv->uncore;
3834 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3835
3836 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3837 icp_irq_postinstall(dev_priv);
3838
3839 gen11_gt_irq_postinstall(&dev_priv->gt);
3840 gen11_de_irq_postinstall(dev_priv);
3841
3842 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3843
3844 if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
3845 dg1_master_intr_enable(uncore->regs);
3846 intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
3847 } else {
3848 gen11_master_intr_enable(uncore->regs);
3849 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3850 }
3851}
3852
3853static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3854{
3855 gen8_gt_irq_postinstall(&dev_priv->gt);
3856
3857 spin_lock_irq(&dev_priv->irq_lock);
3858 if (dev_priv->display_irqs_enabled)
3859 vlv_display_irq_postinstall(dev_priv);
3860 spin_unlock_irq(&dev_priv->irq_lock);
3861
3862 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3863 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3864}
3865
3866static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3867{
3868 struct intel_uncore *uncore = &dev_priv->uncore;
3869
3870 i9xx_pipestat_irq_reset(dev_priv);
3871
3872 GEN2_IRQ_RESET(uncore);
3873 dev_priv->irq_mask = ~0u;
3874}
3875
3876static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3877{
3878 struct intel_uncore *uncore = &dev_priv->uncore;
3879 u16 enable_mask;
3880
3881 intel_uncore_write16(uncore,
3882 EMR,
3883 ~(I915_ERROR_PAGE_TABLE |
3884 I915_ERROR_MEMORY_REFRESH));
3885
3886 /* Unmask the interrupts that we always want on. */
3887 dev_priv->irq_mask =
3888 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3889 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3890 I915_MASTER_ERROR_INTERRUPT);
3891
3892 enable_mask =
3893 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3894 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3895 I915_MASTER_ERROR_INTERRUPT |
3896 I915_USER_INTERRUPT;
3897
3898 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3899
3900 /* Interrupt setup is already guaranteed to be single-threaded, this is
3901 * just to make the assert_spin_locked check happy. */
3902 spin_lock_irq(&dev_priv->irq_lock);
3903 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3904 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3905 spin_unlock_irq(&dev_priv->irq_lock);
3906}
3907
3908static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3909 u16 *eir, u16 *eir_stuck)
3910{
3911 struct intel_uncore *uncore = &i915->uncore;
3912 u16 emr;
3913
3914 *eir = intel_uncore_read16(uncore, EIR);
3915
3916 if (*eir)
3917 intel_uncore_write16(uncore, EIR, *eir);
3918
3919 *eir_stuck = intel_uncore_read16(uncore, EIR);
3920 if (*eir_stuck == 0)
3921 return;
3922
3923 /*
3924 * Toggle all EMR bits to make sure we get an edge
3925 * in the ISR master error bit if we don't clear
3926 * all the EIR bits. Otherwise the edge triggered
3927 * IIR on i965/g4x wouldn't notice that an interrupt
3928 * is still pending. Also some EIR bits can't be
3929 * cleared except by handling the underlying error
3930 * (or by a GPU reset) so we mask any bit that
3931 * remains set.
3932 */
3933 emr = intel_uncore_read16(uncore, EMR);
3934 intel_uncore_write16(uncore, EMR, 0xffff);
3935 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3936}
3937
3938static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3939 u16 eir, u16 eir_stuck)
3940{
3941 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3942
3943 if (eir_stuck)
3944 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3945 eir_stuck);
3946}
3947
3948static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3949 u32 *eir, u32 *eir_stuck)
3950{
3951 u32 emr;
3952
3953 *eir = intel_uncore_read(&dev_priv->uncore, EIR);
3954
3955 intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3956
3957 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3958 if (*eir_stuck == 0)
3959 return;
3960
3961 /*
3962 * Toggle all EMR bits to make sure we get an edge
3963 * in the ISR master error bit if we don't clear
3964 * all the EIR bits. Otherwise the edge triggered
3965 * IIR on i965/g4x wouldn't notice that an interrupt
3966 * is still pending. Also some EIR bits can't be
3967 * cleared except by handling the underlying error
3968 * (or by a GPU reset) so we mask any bit that
3969 * remains set.
3970 */
3971 emr = intel_uncore_read(&dev_priv->uncore, EMR);
3972 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
3973 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3974}
3975
3976static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3977 u32 eir, u32 eir_stuck)
3978{
3979 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3980
3981 if (eir_stuck)
3982 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3983 eir_stuck);
3984}
3985
3986static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3987{
3988 struct drm_i915_private *dev_priv = arg;
3989 irqreturn_t ret = IRQ_NONE;
3990
3991 if (!intel_irqs_enabled(dev_priv))
3992 return IRQ_NONE;
3993
3994 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3995 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3996
3997 do {
3998 u32 pipe_stats[I915_MAX_PIPES] = {};
3999 u16 eir = 0, eir_stuck = 0;
4000 u16 iir;
4001
4002 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4003 if (iir == 0)
4004 break;
4005
4006 ret = IRQ_HANDLED;
4007
4008 /* Call regardless, as some status bits might not be
4009 * signalled in iir */
4010 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4011
4012 if (iir & I915_MASTER_ERROR_INTERRUPT)
4013 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4014
4015 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4016
4017 if (iir & I915_USER_INTERRUPT)
4018 intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
4019
4020 if (iir & I915_MASTER_ERROR_INTERRUPT)
4021 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4022
4023 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4024 } while (0);
4025
4026 pmu_irq_stats(dev_priv, ret);
4027
4028 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4029
4030 return ret;
4031}
4032
4033static void i915_irq_reset(struct drm_i915_private *dev_priv)
4034{
4035 struct intel_uncore *uncore = &dev_priv->uncore;
4036
4037 if (I915_HAS_HOTPLUG(dev_priv)) {
4038 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4039 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4040 }
4041
4042 i9xx_pipestat_irq_reset(dev_priv);
4043
4044 GEN3_IRQ_RESET(uncore, GEN2_);
4045 dev_priv->irq_mask = ~0u;
4046}
4047
4048static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4049{
4050 struct intel_uncore *uncore = &dev_priv->uncore;
4051 u32 enable_mask;
4052
4053 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4054 I915_ERROR_MEMORY_REFRESH));
4055
4056 /* Unmask the interrupts that we always want on. */
4057 dev_priv->irq_mask =
4058 ~(I915_ASLE_INTERRUPT |
4059 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4060 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4061 I915_MASTER_ERROR_INTERRUPT);
4062
4063 enable_mask =
4064 I915_ASLE_INTERRUPT |
4065 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4066 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4067 I915_MASTER_ERROR_INTERRUPT |
4068 I915_USER_INTERRUPT;
4069
4070 if (I915_HAS_HOTPLUG(dev_priv)) {
4071 /* Enable in IER... */
4072 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4073 /* and unmask in IMR */
4074 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4075 }
4076
4077 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4078
4079 /* Interrupt setup is already guaranteed to be single-threaded, this is
4080 * just to make the assert_spin_locked check happy. */
4081 spin_lock_irq(&dev_priv->irq_lock);
4082 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4083 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4084 spin_unlock_irq(&dev_priv->irq_lock);
4085
4086 i915_enable_asle_pipestat(dev_priv);
4087}
4088
4089static irqreturn_t i915_irq_handler(int irq, void *arg)
4090{
4091 struct drm_i915_private *dev_priv = arg;
4092 irqreturn_t ret = IRQ_NONE;
4093
4094 if (!intel_irqs_enabled(dev_priv))
4095 return IRQ_NONE;
4096
4097 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4098 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4099
4100 do {
4101 u32 pipe_stats[I915_MAX_PIPES] = {};
4102 u32 eir = 0, eir_stuck = 0;
4103 u32 hotplug_status = 0;
4104 u32 iir;
4105
4106 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4107 if (iir == 0)
4108 break;
4109
4110 ret = IRQ_HANDLED;
4111
4112 if (I915_HAS_HOTPLUG(dev_priv) &&
4113 iir & I915_DISPLAY_PORT_INTERRUPT)
4114 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4115
4116 /* Call regardless, as some status bits might not be
4117 * signalled in iir */
4118 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4119
4120 if (iir & I915_MASTER_ERROR_INTERRUPT)
4121 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4122
4123 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4124
4125 if (iir & I915_USER_INTERRUPT)
4126 intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
4127
4128 if (iir & I915_MASTER_ERROR_INTERRUPT)
4129 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4130
4131 if (hotplug_status)
4132 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4133
4134 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4135 } while (0);
4136
4137 pmu_irq_stats(dev_priv, ret);
4138
4139 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4140
4141 return ret;
4142}
4143
4144static void i965_irq_reset(struct drm_i915_private *dev_priv)
4145{
4146 struct intel_uncore *uncore = &dev_priv->uncore;
4147
4148 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4149 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4150
4151 i9xx_pipestat_irq_reset(dev_priv);
4152
4153 GEN3_IRQ_RESET(uncore, GEN2_);
4154 dev_priv->irq_mask = ~0u;
4155}
4156
4157static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4158{
4159 struct intel_uncore *uncore = &dev_priv->uncore;
4160 u32 enable_mask;
4161 u32 error_mask;
4162
4163 /*
4164 * Enable some error detection, note the instruction error mask
4165 * bit is reserved, so we leave it masked.
4166 */
4167 if (IS_G4X(dev_priv)) {
4168 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4169 GM45_ERROR_MEM_PRIV |
4170 GM45_ERROR_CP_PRIV |
4171 I915_ERROR_MEMORY_REFRESH);
4172 } else {
4173 error_mask = ~(I915_ERROR_PAGE_TABLE |
4174 I915_ERROR_MEMORY_REFRESH);
4175 }
4176 intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4177
4178 /* Unmask the interrupts that we always want on. */
4179 dev_priv->irq_mask =
4180 ~(I915_ASLE_INTERRUPT |
4181 I915_DISPLAY_PORT_INTERRUPT |
4182 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4183 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4184 I915_MASTER_ERROR_INTERRUPT);
4185
4186 enable_mask =
4187 I915_ASLE_INTERRUPT |
4188 I915_DISPLAY_PORT_INTERRUPT |
4189 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4190 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4191 I915_MASTER_ERROR_INTERRUPT |
4192 I915_USER_INTERRUPT;
4193
4194 if (IS_G4X(dev_priv))
4195 enable_mask |= I915_BSD_USER_INTERRUPT;
4196
4197 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4198
4199 /* Interrupt setup is already guaranteed to be single-threaded, this is
4200 * just to make the assert_spin_locked check happy. */
4201 spin_lock_irq(&dev_priv->irq_lock);
4202 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4203 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4204 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4205 spin_unlock_irq(&dev_priv->irq_lock);
4206
4207 i915_enable_asle_pipestat(dev_priv);
4208}
4209
4210static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4211{
4212 u32 hotplug_en;
4213
4214 lockdep_assert_held(&dev_priv->irq_lock);
4215
4216 /* Note HDMI and DP share hotplug bits */
4217 /* enable bits are the same for all generations */
4218 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4219 /* Programming the CRT detection parameters tends
4220 to generate a spurious hotplug event about three
4221 seconds later. So just do it once.
4222 */
4223 if (IS_G4X(dev_priv))
4224 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4225 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4226
4227 /* Ignore TV since it's buggy */
4228 i915_hotplug_interrupt_update_locked(dev_priv,
4229 HOTPLUG_INT_EN_MASK |
4230 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4231 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4232 hotplug_en);
4233}
4234
4235static irqreturn_t i965_irq_handler(int irq, void *arg)
4236{
4237 struct drm_i915_private *dev_priv = arg;
4238 irqreturn_t ret = IRQ_NONE;
4239
4240 if (!intel_irqs_enabled(dev_priv))
4241 return IRQ_NONE;
4242
4243 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4244 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4245
4246 do {
4247 u32 pipe_stats[I915_MAX_PIPES] = {};
4248 u32 eir = 0, eir_stuck = 0;
4249 u32 hotplug_status = 0;
4250 u32 iir;
4251
4252 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4253 if (iir == 0)
4254 break;
4255
4256 ret = IRQ_HANDLED;
4257
4258 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4259 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4260
4261 /* Call regardless, as some status bits might not be
4262 * signalled in iir */
4263 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4264
4265 if (iir & I915_MASTER_ERROR_INTERRUPT)
4266 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4267
4268 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4269
4270 if (iir & I915_USER_INTERRUPT)
4271 intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
4272 iir);
4273
4274 if (iir & I915_BSD_USER_INTERRUPT)
4275 intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
4276 iir >> 25);
4277
4278 if (iir & I915_MASTER_ERROR_INTERRUPT)
4279 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4280
4281 if (hotplug_status)
4282 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4283
4284 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4285 } while (0);
4286
4287 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4288
4289 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4290
4291 return ret;
4292}
4293
4294/**
4295 * intel_irq_init - initializes irq support
4296 * @dev_priv: i915 device instance
4297 *
4298 * This function initializes all the irq support including work items, timers
4299 * and all the vtables. It does not setup the interrupt itself though.
4300 */
4301void intel_irq_init(struct drm_i915_private *dev_priv)
4302{
4303 struct drm_device *dev = &dev_priv->drm;
4304 int i;
4305
4306 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4307 for (i = 0; i < MAX_L3_SLICES; ++i)
4308 dev_priv->l3_parity.remap_info[i] = NULL;
4309
4310 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4311 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4312 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4313
4314 if (!HAS_DISPLAY(dev_priv))
4315 return;
4316
4317 intel_hpd_init_pins(dev_priv);
4318
4319 intel_hpd_init_work(dev_priv);
4320
4321 dev->vblank_disable_immediate = true;
4322
4323 /* Most platforms treat the display irq block as an always-on
4324 * power domain. vlv/chv can disable it at runtime and need
4325 * special care to avoid writing any of the display block registers
4326 * outside of the power domain. We defer setting up the display irqs
4327 * in this case to the runtime pm.
4328 */
4329 dev_priv->display_irqs_enabled = true;
4330 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4331 dev_priv->display_irqs_enabled = false;
4332
4333 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4334 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4335 * detection, as short HPD storms will occur as a natural part of
4336 * sideband messaging with MST.
4337 * On older platforms however, IRQ storms can occur with both long and
4338 * short pulses, as seen on some G4x systems.
4339 */
4340 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4341
4342 if (HAS_GMCH(dev_priv)) {
4343 if (I915_HAS_HOTPLUG(dev_priv))
4344 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4345 } else {
4346 if (HAS_PCH_DG1(dev_priv))
4347 dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
4348 else if (DISPLAY_VER(dev_priv) >= 11)
4349 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4350 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4351 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4352 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4353 dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
4354 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4355 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4356 else
4357 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4358 }
4359}
4360
4361/**
4362 * intel_irq_fini - deinitializes IRQ support
4363 * @i915: i915 device instance
4364 *
4365 * This function deinitializes all the IRQ support.
4366 */
4367void intel_irq_fini(struct drm_i915_private *i915)
4368{
4369 int i;
4370
4371 for (i = 0; i < MAX_L3_SLICES; ++i)
4372 kfree(i915->l3_parity.remap_info[i]);
4373}
4374
4375static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4376{
4377 if (HAS_GMCH(dev_priv)) {
4378 if (IS_CHERRYVIEW(dev_priv))
4379 return cherryview_irq_handler;
4380 else if (IS_VALLEYVIEW(dev_priv))
4381 return valleyview_irq_handler;
4382 else if (GRAPHICS_VER(dev_priv) == 4)
4383 return i965_irq_handler;
4384 else if (GRAPHICS_VER(dev_priv) == 3)
4385 return i915_irq_handler;
4386 else
4387 return i8xx_irq_handler;
4388 } else {
4389 if (HAS_MASTER_UNIT_IRQ(dev_priv))
4390 return dg1_irq_handler;
4391 if (GRAPHICS_VER(dev_priv) >= 11)
4392 return gen11_irq_handler;
4393 else if (GRAPHICS_VER(dev_priv) >= 8)
4394 return gen8_irq_handler;
4395 else
4396 return ilk_irq_handler;
4397 }
4398}
4399
4400static void intel_irq_reset(struct drm_i915_private *dev_priv)
4401{
4402 if (HAS_GMCH(dev_priv)) {
4403 if (IS_CHERRYVIEW(dev_priv))
4404 cherryview_irq_reset(dev_priv);
4405 else if (IS_VALLEYVIEW(dev_priv))
4406 valleyview_irq_reset(dev_priv);
4407 else if (GRAPHICS_VER(dev_priv) == 4)
4408 i965_irq_reset(dev_priv);
4409 else if (GRAPHICS_VER(dev_priv) == 3)
4410 i915_irq_reset(dev_priv);
4411 else
4412 i8xx_irq_reset(dev_priv);
4413 } else {
4414 if (GRAPHICS_VER(dev_priv) >= 11)
4415 gen11_irq_reset(dev_priv);
4416 else if (GRAPHICS_VER(dev_priv) >= 8)
4417 gen8_irq_reset(dev_priv);
4418 else
4419 ilk_irq_reset(dev_priv);
4420 }
4421}
4422
4423static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4424{
4425 if (HAS_GMCH(dev_priv)) {
4426 if (IS_CHERRYVIEW(dev_priv))
4427 cherryview_irq_postinstall(dev_priv);
4428 else if (IS_VALLEYVIEW(dev_priv))
4429 valleyview_irq_postinstall(dev_priv);
4430 else if (GRAPHICS_VER(dev_priv) == 4)
4431 i965_irq_postinstall(dev_priv);
4432 else if (GRAPHICS_VER(dev_priv) == 3)
4433 i915_irq_postinstall(dev_priv);
4434 else
4435 i8xx_irq_postinstall(dev_priv);
4436 } else {
4437 if (GRAPHICS_VER(dev_priv) >= 11)
4438 gen11_irq_postinstall(dev_priv);
4439 else if (GRAPHICS_VER(dev_priv) >= 8)
4440 gen8_irq_postinstall(dev_priv);
4441 else
4442 ilk_irq_postinstall(dev_priv);
4443 }
4444}
4445
4446/**
4447 * intel_irq_install - enables the hardware interrupt
4448 * @dev_priv: i915 device instance
4449 *
4450 * This function enables the hardware interrupt handling, but leaves the hotplug
4451 * handling still disabled. It is called after intel_irq_init().
4452 *
4453 * In the driver load and resume code we need working interrupts in a few places
4454 * but don't want to deal with the hassle of concurrent probe and hotplug
4455 * workers. Hence the split into this two-stage approach.
4456 */
4457int intel_irq_install(struct drm_i915_private *dev_priv)
4458{
4459 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4460 int ret;
4461
4462 /*
4463 * We enable some interrupt sources in our postinstall hooks, so mark
4464 * interrupts as enabled _before_ actually enabling them to avoid
4465 * special cases in our ordering checks.
4466 */
4467 dev_priv->runtime_pm.irqs_enabled = true;
4468
4469 dev_priv->drm.irq_enabled = true;
4470
4471 intel_irq_reset(dev_priv);
4472
4473 ret = request_irq(irq, intel_irq_handler(dev_priv),
4474 IRQF_SHARED, DRIVER_NAME, dev_priv);
4475 if (ret < 0) {
4476 dev_priv->drm.irq_enabled = false;
4477 return ret;
4478 }
4479
4480 intel_irq_postinstall(dev_priv);
4481
4482 return ret;
4483}
4484
4485/**
4486 * intel_irq_uninstall - finilizes all irq handling
4487 * @dev_priv: i915 device instance
4488 *
4489 * This stops interrupt and hotplug handling and unregisters and frees all
4490 * resources acquired in the init functions.
4491 */
4492void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4493{
4494 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4495
4496 /*
4497 * FIXME we can get called twice during driver probe
4498 * error handling as well as during driver remove due to
4499 * intel_modeset_driver_remove() calling us out of sequence.
4500 * Would be nice if it didn't do that...
4501 */
4502 if (!dev_priv->drm.irq_enabled)
4503 return;
4504
4505 dev_priv->drm.irq_enabled = false;
4506
4507 intel_irq_reset(dev_priv);
4508
4509 free_irq(irq, dev_priv);
4510
4511 intel_hpd_cancel_work(dev_priv);
4512 dev_priv->runtime_pm.irqs_enabled = false;
4513}
4514
4515/**
4516 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4517 * @dev_priv: i915 device instance
4518 *
4519 * This function is used to disable interrupts at runtime, both in the runtime
4520 * pm and the system suspend/resume code.
4521 */
4522void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4523{
4524 intel_irq_reset(dev_priv);
4525 dev_priv->runtime_pm.irqs_enabled = false;
4526 intel_synchronize_irq(dev_priv);
4527}
4528
4529/**
4530 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4531 * @dev_priv: i915 device instance
4532 *
4533 * This function is used to enable interrupts at runtime, both in the runtime
4534 * pm and the system suspend/resume code.
4535 */
4536void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4537{
4538 dev_priv->runtime_pm.irqs_enabled = true;
4539 intel_irq_reset(dev_priv);
4540 intel_irq_postinstall(dev_priv);
4541}
4542
4543bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4544{
4545 return dev_priv->runtime_pm.irqs_enabled;
4546}
4547
4548void intel_synchronize_irq(struct drm_i915_private *i915)
4549{
4550 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4551}
4552
4553void intel_synchronize_hardirq(struct drm_i915_private *i915)
4554{
4555 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4556}
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/slab.h>
32#include <linux/sysrq.h>
33
34#include <drm/drm_drv.h>
35
36#include "display/icl_dsi_regs.h"
37#include "display/intel_de.h"
38#include "display/intel_display_trace.h"
39#include "display/intel_display_types.h"
40#include "display/intel_fifo_underrun.h"
41#include "display/intel_hotplug.h"
42#include "display/intel_lpe_audio.h"
43#include "display/intel_psr.h"
44
45#include "gt/intel_breadcrumbs.h"
46#include "gt/intel_gt.h"
47#include "gt/intel_gt_irq.h"
48#include "gt/intel_gt_pm_irq.h"
49#include "gt/intel_gt_regs.h"
50#include "gt/intel_rps.h"
51
52#include "i915_driver.h"
53#include "i915_drv.h"
54#include "i915_irq.h"
55#include "intel_pm.h"
56
57/**
58 * DOC: interrupt handling
59 *
60 * These functions provide the basic support for enabling and disabling the
61 * interrupt handling support. There's a lot more functionality in i915_irq.c
62 * and related files, but that will be described in separate chapters.
63 */
64
65/*
66 * Interrupt statistic for PMU. Increments the counter only if the
67 * interrupt originated from the GPU so interrupts from a device which
68 * shares the interrupt line are not accounted.
69 */
70static inline void pmu_irq_stats(struct drm_i915_private *i915,
71 irqreturn_t res)
72{
73 if (unlikely(res != IRQ_HANDLED))
74 return;
75
76 /*
77 * A clever compiler translates that into INC. A not so clever one
78 * should at least prevent store tearing.
79 */
80 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
81}
82
83typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
84typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
85 enum hpd_pin pin);
86
87static const u32 hpd_ilk[HPD_NUM_PINS] = {
88 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
89};
90
91static const u32 hpd_ivb[HPD_NUM_PINS] = {
92 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
93};
94
95static const u32 hpd_bdw[HPD_NUM_PINS] = {
96 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
97};
98
99static const u32 hpd_ibx[HPD_NUM_PINS] = {
100 [HPD_CRT] = SDE_CRT_HOTPLUG,
101 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
102 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
103 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
104 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
105};
106
107static const u32 hpd_cpt[HPD_NUM_PINS] = {
108 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
109 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
110 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
111 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
112 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
113};
114
115static const u32 hpd_spt[HPD_NUM_PINS] = {
116 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
117 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
118 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
119 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
120 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
121};
122
123static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
124 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
125 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
126 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
127 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
128 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
129 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
130};
131
132static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
133 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
134 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
135 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
136 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
137 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
138 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
139};
140
141static const u32 hpd_status_i915[HPD_NUM_PINS] = {
142 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
143 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
144 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
145 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
146 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
147 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
148};
149
150static const u32 hpd_bxt[HPD_NUM_PINS] = {
151 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
152 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
153 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
154};
155
156static const u32 hpd_gen11[HPD_NUM_PINS] = {
157 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
158 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
159 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
160 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
161 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
162 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
163};
164
165static const u32 hpd_icp[HPD_NUM_PINS] = {
166 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
167 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
168 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
169 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
170 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
171 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
172 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
173 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
174 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
175};
176
177static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
178 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
179 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
180 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
181 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
182 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
183};
184
185static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
186{
187 struct intel_hotplug *hpd = &dev_priv->display.hotplug;
188
189 if (HAS_GMCH(dev_priv)) {
190 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
191 IS_CHERRYVIEW(dev_priv))
192 hpd->hpd = hpd_status_g4x;
193 else
194 hpd->hpd = hpd_status_i915;
195 return;
196 }
197
198 if (DISPLAY_VER(dev_priv) >= 11)
199 hpd->hpd = hpd_gen11;
200 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
201 hpd->hpd = hpd_bxt;
202 else if (DISPLAY_VER(dev_priv) >= 8)
203 hpd->hpd = hpd_bdw;
204 else if (DISPLAY_VER(dev_priv) >= 7)
205 hpd->hpd = hpd_ivb;
206 else
207 hpd->hpd = hpd_ilk;
208
209 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
210 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
211 return;
212
213 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
214 hpd->pch_hpd = hpd_sde_dg1;
215 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
216 hpd->pch_hpd = hpd_icp;
217 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
218 hpd->pch_hpd = hpd_spt;
219 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
220 hpd->pch_hpd = hpd_cpt;
221 else if (HAS_PCH_IBX(dev_priv))
222 hpd->pch_hpd = hpd_ibx;
223 else
224 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
225}
226
227static void
228intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
229{
230 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
231
232 drm_crtc_handle_vblank(&crtc->base);
233}
234
235void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
236 i915_reg_t iir, i915_reg_t ier)
237{
238 intel_uncore_write(uncore, imr, 0xffffffff);
239 intel_uncore_posting_read(uncore, imr);
240
241 intel_uncore_write(uncore, ier, 0);
242
243 /* IIR can theoretically queue up two events. Be paranoid. */
244 intel_uncore_write(uncore, iir, 0xffffffff);
245 intel_uncore_posting_read(uncore, iir);
246 intel_uncore_write(uncore, iir, 0xffffffff);
247 intel_uncore_posting_read(uncore, iir);
248}
249
250static void gen2_irq_reset(struct intel_uncore *uncore)
251{
252 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
253 intel_uncore_posting_read16(uncore, GEN2_IMR);
254
255 intel_uncore_write16(uncore, GEN2_IER, 0);
256
257 /* IIR can theoretically queue up two events. Be paranoid. */
258 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
259 intel_uncore_posting_read16(uncore, GEN2_IIR);
260 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
261 intel_uncore_posting_read16(uncore, GEN2_IIR);
262}
263
264/*
265 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
266 */
267static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
268{
269 u32 val = intel_uncore_read(uncore, reg);
270
271 if (val == 0)
272 return;
273
274 drm_WARN(&uncore->i915->drm, 1,
275 "Interrupt register 0x%x is not zero: 0x%08x\n",
276 i915_mmio_reg_offset(reg), val);
277 intel_uncore_write(uncore, reg, 0xffffffff);
278 intel_uncore_posting_read(uncore, reg);
279 intel_uncore_write(uncore, reg, 0xffffffff);
280 intel_uncore_posting_read(uncore, reg);
281}
282
283static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
284{
285 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
286
287 if (val == 0)
288 return;
289
290 drm_WARN(&uncore->i915->drm, 1,
291 "Interrupt register 0x%x is not zero: 0x%08x\n",
292 i915_mmio_reg_offset(GEN2_IIR), val);
293 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
294 intel_uncore_posting_read16(uncore, GEN2_IIR);
295 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
296 intel_uncore_posting_read16(uncore, GEN2_IIR);
297}
298
299void gen3_irq_init(struct intel_uncore *uncore,
300 i915_reg_t imr, u32 imr_val,
301 i915_reg_t ier, u32 ier_val,
302 i915_reg_t iir)
303{
304 gen3_assert_iir_is_zero(uncore, iir);
305
306 intel_uncore_write(uncore, ier, ier_val);
307 intel_uncore_write(uncore, imr, imr_val);
308 intel_uncore_posting_read(uncore, imr);
309}
310
311static void gen2_irq_init(struct intel_uncore *uncore,
312 u32 imr_val, u32 ier_val)
313{
314 gen2_assert_iir_is_zero(uncore);
315
316 intel_uncore_write16(uncore, GEN2_IER, ier_val);
317 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
318 intel_uncore_posting_read16(uncore, GEN2_IMR);
319}
320
321/* For display hotplug interrupt */
322static inline void
323i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
324 u32 mask,
325 u32 bits)
326{
327 lockdep_assert_held(&dev_priv->irq_lock);
328 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
329
330 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
331}
332
333/**
334 * i915_hotplug_interrupt_update - update hotplug interrupt enable
335 * @dev_priv: driver private
336 * @mask: bits to update
337 * @bits: bits to enable
338 * NOTE: the HPD enable bits are modified both inside and outside
339 * of an interrupt context. To avoid that read-modify-write cycles
340 * interfer, these bits are protected by a spinlock. Since this
341 * function is usually not called from a context where the lock is
342 * held already, this function acquires the lock itself. A non-locking
343 * version is also available.
344 */
345void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
346 u32 mask,
347 u32 bits)
348{
349 spin_lock_irq(&dev_priv->irq_lock);
350 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
351 spin_unlock_irq(&dev_priv->irq_lock);
352}
353
354/**
355 * ilk_update_display_irq - update DEIMR
356 * @dev_priv: driver private
357 * @interrupt_mask: mask of interrupt bits to update
358 * @enabled_irq_mask: mask of interrupt bits to enable
359 */
360static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
361 u32 interrupt_mask, u32 enabled_irq_mask)
362{
363 u32 new_val;
364
365 lockdep_assert_held(&dev_priv->irq_lock);
366 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
367
368 new_val = dev_priv->irq_mask;
369 new_val &= ~interrupt_mask;
370 new_val |= (~enabled_irq_mask & interrupt_mask);
371
372 if (new_val != dev_priv->irq_mask &&
373 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
374 dev_priv->irq_mask = new_val;
375 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
376 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
377 }
378}
379
380void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
381{
382 ilk_update_display_irq(i915, bits, bits);
383}
384
385void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
386{
387 ilk_update_display_irq(i915, bits, 0);
388}
389
390/**
391 * bdw_update_port_irq - update DE port interrupt
392 * @dev_priv: driver private
393 * @interrupt_mask: mask of interrupt bits to update
394 * @enabled_irq_mask: mask of interrupt bits to enable
395 */
396static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
397 u32 interrupt_mask,
398 u32 enabled_irq_mask)
399{
400 u32 new_val;
401 u32 old_val;
402
403 lockdep_assert_held(&dev_priv->irq_lock);
404
405 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
406
407 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
408 return;
409
410 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
411
412 new_val = old_val;
413 new_val &= ~interrupt_mask;
414 new_val |= (~enabled_irq_mask & interrupt_mask);
415
416 if (new_val != old_val) {
417 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
418 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
419 }
420}
421
422/**
423 * bdw_update_pipe_irq - update DE pipe interrupt
424 * @dev_priv: driver private
425 * @pipe: pipe whose interrupt to update
426 * @interrupt_mask: mask of interrupt bits to update
427 * @enabled_irq_mask: mask of interrupt bits to enable
428 */
429static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
430 enum pipe pipe, u32 interrupt_mask,
431 u32 enabled_irq_mask)
432{
433 u32 new_val;
434
435 lockdep_assert_held(&dev_priv->irq_lock);
436
437 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
438
439 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
440 return;
441
442 new_val = dev_priv->de_irq_mask[pipe];
443 new_val &= ~interrupt_mask;
444 new_val |= (~enabled_irq_mask & interrupt_mask);
445
446 if (new_val != dev_priv->de_irq_mask[pipe]) {
447 dev_priv->de_irq_mask[pipe] = new_val;
448 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
449 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
450 }
451}
452
453void bdw_enable_pipe_irq(struct drm_i915_private *i915,
454 enum pipe pipe, u32 bits)
455{
456 bdw_update_pipe_irq(i915, pipe, bits, bits);
457}
458
459void bdw_disable_pipe_irq(struct drm_i915_private *i915,
460 enum pipe pipe, u32 bits)
461{
462 bdw_update_pipe_irq(i915, pipe, bits, 0);
463}
464
465/**
466 * ibx_display_interrupt_update - update SDEIMR
467 * @dev_priv: driver private
468 * @interrupt_mask: mask of interrupt bits to update
469 * @enabled_irq_mask: mask of interrupt bits to enable
470 */
471static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
472 u32 interrupt_mask,
473 u32 enabled_irq_mask)
474{
475 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
476 sdeimr &= ~interrupt_mask;
477 sdeimr |= (~enabled_irq_mask & interrupt_mask);
478
479 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
480
481 lockdep_assert_held(&dev_priv->irq_lock);
482
483 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
484 return;
485
486 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
487 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
488}
489
490void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
491{
492 ibx_display_interrupt_update(i915, bits, bits);
493}
494
495void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
496{
497 ibx_display_interrupt_update(i915, bits, 0);
498}
499
500u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
501 enum pipe pipe)
502{
503 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
504 u32 enable_mask = status_mask << 16;
505
506 lockdep_assert_held(&dev_priv->irq_lock);
507
508 if (DISPLAY_VER(dev_priv) < 5)
509 goto out;
510
511 /*
512 * On pipe A we don't support the PSR interrupt yet,
513 * on pipe B and C the same bit MBZ.
514 */
515 if (drm_WARN_ON_ONCE(&dev_priv->drm,
516 status_mask & PIPE_A_PSR_STATUS_VLV))
517 return 0;
518 /*
519 * On pipe B and C we don't support the PSR interrupt yet, on pipe
520 * A the same bit is for perf counters which we don't use either.
521 */
522 if (drm_WARN_ON_ONCE(&dev_priv->drm,
523 status_mask & PIPE_B_PSR_STATUS_VLV))
524 return 0;
525
526 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
527 SPRITE0_FLIP_DONE_INT_EN_VLV |
528 SPRITE1_FLIP_DONE_INT_EN_VLV);
529 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
530 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
531 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
532 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
533
534out:
535 drm_WARN_ONCE(&dev_priv->drm,
536 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
537 status_mask & ~PIPESTAT_INT_STATUS_MASK,
538 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
539 pipe_name(pipe), enable_mask, status_mask);
540
541 return enable_mask;
542}
543
544void i915_enable_pipestat(struct drm_i915_private *dev_priv,
545 enum pipe pipe, u32 status_mask)
546{
547 i915_reg_t reg = PIPESTAT(pipe);
548 u32 enable_mask;
549
550 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
551 "pipe %c: status_mask=0x%x\n",
552 pipe_name(pipe), status_mask);
553
554 lockdep_assert_held(&dev_priv->irq_lock);
555 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
556
557 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
558 return;
559
560 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
561 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
562
563 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
564 intel_uncore_posting_read(&dev_priv->uncore, reg);
565}
566
567void i915_disable_pipestat(struct drm_i915_private *dev_priv,
568 enum pipe pipe, u32 status_mask)
569{
570 i915_reg_t reg = PIPESTAT(pipe);
571 u32 enable_mask;
572
573 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
574 "pipe %c: status_mask=0x%x\n",
575 pipe_name(pipe), status_mask);
576
577 lockdep_assert_held(&dev_priv->irq_lock);
578 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
579
580 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
581 return;
582
583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
585
586 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
587 intel_uncore_posting_read(&dev_priv->uncore, reg);
588}
589
590static bool i915_has_asle(struct drm_i915_private *dev_priv)
591{
592 if (!dev_priv->display.opregion.asle)
593 return false;
594
595 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
596}
597
598/**
599 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
600 * @dev_priv: i915 device private
601 */
602static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
603{
604 if (!i915_has_asle(dev_priv))
605 return;
606
607 spin_lock_irq(&dev_priv->irq_lock);
608
609 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
610 if (DISPLAY_VER(dev_priv) >= 4)
611 i915_enable_pipestat(dev_priv, PIPE_A,
612 PIPE_LEGACY_BLC_EVENT_STATUS);
613
614 spin_unlock_irq(&dev_priv->irq_lock);
615}
616
617/*
618 * This timing diagram depicts the video signal in and
619 * around the vertical blanking period.
620 *
621 * Assumptions about the fictitious mode used in this example:
622 * vblank_start >= 3
623 * vsync_start = vblank_start + 1
624 * vsync_end = vblank_start + 2
625 * vtotal = vblank_start + 3
626 *
627 * start of vblank:
628 * latch double buffered registers
629 * increment frame counter (ctg+)
630 * generate start of vblank interrupt (gen4+)
631 * |
632 * | frame start:
633 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
634 * | may be shifted forward 1-3 extra lines via PIPECONF
635 * | |
636 * | | start of vsync:
637 * | | generate vsync interrupt
638 * | | |
639 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
640 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
641 * ----va---> <-----------------vb--------------------> <--------va-------------
642 * | | <----vs-----> |
643 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
644 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
645 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
646 * | | |
647 * last visible pixel first visible pixel
648 * | increment frame counter (gen3/4)
649 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
650 *
651 * x = horizontal active
652 * _ = horizontal blanking
653 * hs = horizontal sync
654 * va = vertical active
655 * vb = vertical blanking
656 * vs = vertical sync
657 * vbs = vblank_start (number)
658 *
659 * Summary:
660 * - most events happen at the start of horizontal sync
661 * - frame start happens at the start of horizontal blank, 1-4 lines
662 * (depending on PIPECONF settings) after the start of vblank
663 * - gen3/4 pixel and frame counter are synchronized with the start
664 * of horizontal active on the first line of vertical active
665 */
666
667/* Called from drm generic code, passed a 'crtc', which
668 * we use as a pipe index
669 */
670u32 i915_get_vblank_counter(struct drm_crtc *crtc)
671{
672 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
673 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
674 const struct drm_display_mode *mode = &vblank->hwmode;
675 enum pipe pipe = to_intel_crtc(crtc)->pipe;
676 i915_reg_t high_frame, low_frame;
677 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
678 unsigned long irqflags;
679
680 /*
681 * On i965gm TV output the frame counter only works up to
682 * the point when we enable the TV encoder. After that the
683 * frame counter ceases to work and reads zero. We need a
684 * vblank wait before enabling the TV encoder and so we
685 * have to enable vblank interrupts while the frame counter
686 * is still in a working state. However the core vblank code
687 * does not like us returning non-zero frame counter values
688 * when we've told it that we don't have a working frame
689 * counter. Thus we must stop non-zero values leaking out.
690 */
691 if (!vblank->max_vblank_count)
692 return 0;
693
694 htotal = mode->crtc_htotal;
695 hsync_start = mode->crtc_hsync_start;
696 vbl_start = mode->crtc_vblank_start;
697 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
698 vbl_start = DIV_ROUND_UP(vbl_start, 2);
699
700 /* Convert to pixel count */
701 vbl_start *= htotal;
702
703 /* Start of vblank event occurs at start of hsync */
704 vbl_start -= htotal - hsync_start;
705
706 high_frame = PIPEFRAME(pipe);
707 low_frame = PIPEFRAMEPIXEL(pipe);
708
709 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
710
711 /*
712 * High & low register fields aren't synchronized, so make sure
713 * we get a low value that's stable across two reads of the high
714 * register.
715 */
716 do {
717 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
718 low = intel_de_read_fw(dev_priv, low_frame);
719 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
720 } while (high1 != high2);
721
722 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
723
724 high1 >>= PIPE_FRAME_HIGH_SHIFT;
725 pixel = low & PIPE_PIXEL_MASK;
726 low >>= PIPE_FRAME_LOW_SHIFT;
727
728 /*
729 * The frame counter increments at beginning of active.
730 * Cook up a vblank counter by also checking the pixel
731 * counter against vblank start.
732 */
733 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
734}
735
736u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
737{
738 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
739 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
740 enum pipe pipe = to_intel_crtc(crtc)->pipe;
741
742 if (!vblank->max_vblank_count)
743 return 0;
744
745 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
746}
747
748static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
749{
750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
751 struct drm_vblank_crtc *vblank =
752 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
753 const struct drm_display_mode *mode = &vblank->hwmode;
754 u32 htotal = mode->crtc_htotal;
755 u32 clock = mode->crtc_clock;
756 u32 scan_prev_time, scan_curr_time, scan_post_time;
757
758 /*
759 * To avoid the race condition where we might cross into the
760 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
761 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
762 * during the same frame.
763 */
764 do {
765 /*
766 * This field provides read back of the display
767 * pipe frame time stamp. The time stamp value
768 * is sampled at every start of vertical blank.
769 */
770 scan_prev_time = intel_de_read_fw(dev_priv,
771 PIPE_FRMTMSTMP(crtc->pipe));
772
773 /*
774 * The TIMESTAMP_CTR register has the current
775 * time stamp value.
776 */
777 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
778
779 scan_post_time = intel_de_read_fw(dev_priv,
780 PIPE_FRMTMSTMP(crtc->pipe));
781 } while (scan_post_time != scan_prev_time);
782
783 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
784 clock), 1000 * htotal);
785}
786
787/*
788 * On certain encoders on certain platforms, pipe
789 * scanline register will not work to get the scanline,
790 * since the timings are driven from the PORT or issues
791 * with scanline register updates.
792 * This function will use Framestamp and current
793 * timestamp registers to calculate the scanline.
794 */
795static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
796{
797 struct drm_vblank_crtc *vblank =
798 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
799 const struct drm_display_mode *mode = &vblank->hwmode;
800 u32 vblank_start = mode->crtc_vblank_start;
801 u32 vtotal = mode->crtc_vtotal;
802 u32 scanline;
803
804 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
805 scanline = min(scanline, vtotal - 1);
806 scanline = (scanline + vblank_start) % vtotal;
807
808 return scanline;
809}
810
811/*
812 * intel_de_read_fw(), only for fast reads of display block, no need for
813 * forcewake etc.
814 */
815static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
816{
817 struct drm_device *dev = crtc->base.dev;
818 struct drm_i915_private *dev_priv = to_i915(dev);
819 const struct drm_display_mode *mode;
820 struct drm_vblank_crtc *vblank;
821 enum pipe pipe = crtc->pipe;
822 int position, vtotal;
823
824 if (!crtc->active)
825 return 0;
826
827 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
828 mode = &vblank->hwmode;
829
830 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
831 return __intel_get_crtc_scanline_from_timestamp(crtc);
832
833 vtotal = mode->crtc_vtotal;
834 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
835 vtotal /= 2;
836
837 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
838
839 /*
840 * On HSW, the DSL reg (0x70000) appears to return 0 if we
841 * read it just before the start of vblank. So try it again
842 * so we don't accidentally end up spanning a vblank frame
843 * increment, causing the pipe_update_end() code to squak at us.
844 *
845 * The nature of this problem means we can't simply check the ISR
846 * bit and return the vblank start value; nor can we use the scanline
847 * debug register in the transcoder as it appears to have the same
848 * problem. We may need to extend this to include other platforms,
849 * but so far testing only shows the problem on HSW.
850 */
851 if (HAS_DDI(dev_priv) && !position) {
852 int i, temp;
853
854 for (i = 0; i < 100; i++) {
855 udelay(1);
856 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
857 if (temp != position) {
858 position = temp;
859 break;
860 }
861 }
862 }
863
864 /*
865 * See update_scanline_offset() for the details on the
866 * scanline_offset adjustment.
867 */
868 return (position + crtc->scanline_offset) % vtotal;
869}
870
871static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
872 bool in_vblank_irq,
873 int *vpos, int *hpos,
874 ktime_t *stime, ktime_t *etime,
875 const struct drm_display_mode *mode)
876{
877 struct drm_device *dev = _crtc->dev;
878 struct drm_i915_private *dev_priv = to_i915(dev);
879 struct intel_crtc *crtc = to_intel_crtc(_crtc);
880 enum pipe pipe = crtc->pipe;
881 int position;
882 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
883 unsigned long irqflags;
884 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
885 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
886 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
887
888 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
889 drm_dbg(&dev_priv->drm,
890 "trying to get scanoutpos for disabled "
891 "pipe %c\n", pipe_name(pipe));
892 return false;
893 }
894
895 htotal = mode->crtc_htotal;
896 hsync_start = mode->crtc_hsync_start;
897 vtotal = mode->crtc_vtotal;
898 vbl_start = mode->crtc_vblank_start;
899 vbl_end = mode->crtc_vblank_end;
900
901 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
902 vbl_start = DIV_ROUND_UP(vbl_start, 2);
903 vbl_end /= 2;
904 vtotal /= 2;
905 }
906
907 /*
908 * Lock uncore.lock, as we will do multiple timing critical raw
909 * register reads, potentially with preemption disabled, so the
910 * following code must not block on uncore.lock.
911 */
912 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
913
914 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
915
916 /* Get optional system timestamp before query. */
917 if (stime)
918 *stime = ktime_get();
919
920 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
921 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
922
923 position = __intel_get_crtc_scanline(crtc);
924
925 /*
926 * Already exiting vblank? If so, shift our position
927 * so it looks like we're already apporaching the full
928 * vblank end. This should make the generated timestamp
929 * more or less match when the active portion will start.
930 */
931 if (position >= vbl_start && scanlines < position)
932 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
933 } else if (use_scanline_counter) {
934 /* No obvious pixelcount register. Only query vertical
935 * scanout position from Display scan line register.
936 */
937 position = __intel_get_crtc_scanline(crtc);
938 } else {
939 /* Have access to pixelcount since start of frame.
940 * We can split this into vertical and horizontal
941 * scanout position.
942 */
943 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
944
945 /* convert to pixel counts */
946 vbl_start *= htotal;
947 vbl_end *= htotal;
948 vtotal *= htotal;
949
950 /*
951 * In interlaced modes, the pixel counter counts all pixels,
952 * so one field will have htotal more pixels. In order to avoid
953 * the reported position from jumping backwards when the pixel
954 * counter is beyond the length of the shorter field, just
955 * clamp the position the length of the shorter field. This
956 * matches how the scanline counter based position works since
957 * the scanline counter doesn't count the two half lines.
958 */
959 if (position >= vtotal)
960 position = vtotal - 1;
961
962 /*
963 * Start of vblank interrupt is triggered at start of hsync,
964 * just prior to the first active line of vblank. However we
965 * consider lines to start at the leading edge of horizontal
966 * active. So, should we get here before we've crossed into
967 * the horizontal active of the first line in vblank, we would
968 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
969 * always add htotal-hsync_start to the current pixel position.
970 */
971 position = (position + htotal - hsync_start) % vtotal;
972 }
973
974 /* Get optional system timestamp after query. */
975 if (etime)
976 *etime = ktime_get();
977
978 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
979
980 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
981
982 /*
983 * While in vblank, position will be negative
984 * counting up towards 0 at vbl_end. And outside
985 * vblank, position will be positive counting
986 * up since vbl_end.
987 */
988 if (position >= vbl_start)
989 position -= vbl_end;
990 else
991 position += vtotal - vbl_end;
992
993 if (use_scanline_counter) {
994 *vpos = position;
995 *hpos = 0;
996 } else {
997 *vpos = position / htotal;
998 *hpos = position - (*vpos * htotal);
999 }
1000
1001 return true;
1002}
1003
1004bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1005 ktime_t *vblank_time, bool in_vblank_irq)
1006{
1007 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1008 crtc, max_error, vblank_time, in_vblank_irq,
1009 i915_get_crtc_scanoutpos);
1010}
1011
1012int intel_get_crtc_scanline(struct intel_crtc *crtc)
1013{
1014 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1015 unsigned long irqflags;
1016 int position;
1017
1018 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1019 position = __intel_get_crtc_scanline(crtc);
1020 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1021
1022 return position;
1023}
1024
1025/**
1026 * ivb_parity_work - Workqueue called when a parity error interrupt
1027 * occurred.
1028 * @work: workqueue struct
1029 *
1030 * Doesn't actually do anything except notify userspace. As a consequence of
1031 * this event, userspace should try to remap the bad rows since statistically
1032 * it is likely the same row is more likely to go bad again.
1033 */
1034static void ivb_parity_work(struct work_struct *work)
1035{
1036 struct drm_i915_private *dev_priv =
1037 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1038 struct intel_gt *gt = to_gt(dev_priv);
1039 u32 error_status, row, bank, subbank;
1040 char *parity_event[6];
1041 u32 misccpctl;
1042 u8 slice = 0;
1043
1044 /* We must turn off DOP level clock gating to access the L3 registers.
1045 * In order to prevent a get/put style interface, acquire struct mutex
1046 * any time we access those registers.
1047 */
1048 mutex_lock(&dev_priv->drm.struct_mutex);
1049
1050 /* If we've screwed up tracking, just let the interrupt fire again */
1051 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1052 goto out;
1053
1054 misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
1055 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1056 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1057
1058 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1059 i915_reg_t reg;
1060
1061 slice--;
1062 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1063 slice >= NUM_L3_SLICES(dev_priv)))
1064 break;
1065
1066 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1067
1068 reg = GEN7_L3CDERRST1(slice);
1069
1070 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1071 row = GEN7_PARITY_ERROR_ROW(error_status);
1072 bank = GEN7_PARITY_ERROR_BANK(error_status);
1073 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1074
1075 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1076 intel_uncore_posting_read(&dev_priv->uncore, reg);
1077
1078 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1079 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1080 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1081 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1082 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1083 parity_event[5] = NULL;
1084
1085 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1086 KOBJ_CHANGE, parity_event);
1087
1088 drm_dbg(&dev_priv->drm,
1089 "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1090 slice, row, bank, subbank);
1091
1092 kfree(parity_event[4]);
1093 kfree(parity_event[3]);
1094 kfree(parity_event[2]);
1095 kfree(parity_event[1]);
1096 }
1097
1098 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1099
1100out:
1101 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1102 spin_lock_irq(gt->irq_lock);
1103 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1104 spin_unlock_irq(gt->irq_lock);
1105
1106 mutex_unlock(&dev_priv->drm.struct_mutex);
1107}
1108
1109static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1110{
1111 switch (pin) {
1112 case HPD_PORT_TC1:
1113 case HPD_PORT_TC2:
1114 case HPD_PORT_TC3:
1115 case HPD_PORT_TC4:
1116 case HPD_PORT_TC5:
1117 case HPD_PORT_TC6:
1118 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1119 default:
1120 return false;
1121 }
1122}
1123
1124static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125{
1126 switch (pin) {
1127 case HPD_PORT_A:
1128 return val & PORTA_HOTPLUG_LONG_DETECT;
1129 case HPD_PORT_B:
1130 return val & PORTB_HOTPLUG_LONG_DETECT;
1131 case HPD_PORT_C:
1132 return val & PORTC_HOTPLUG_LONG_DETECT;
1133 default:
1134 return false;
1135 }
1136}
1137
1138static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1139{
1140 switch (pin) {
1141 case HPD_PORT_A:
1142 case HPD_PORT_B:
1143 case HPD_PORT_C:
1144 case HPD_PORT_D:
1145 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1146 default:
1147 return false;
1148 }
1149}
1150
1151static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1152{
1153 switch (pin) {
1154 case HPD_PORT_TC1:
1155 case HPD_PORT_TC2:
1156 case HPD_PORT_TC3:
1157 case HPD_PORT_TC4:
1158 case HPD_PORT_TC5:
1159 case HPD_PORT_TC6:
1160 return val & ICP_TC_HPD_LONG_DETECT(pin);
1161 default:
1162 return false;
1163 }
1164}
1165
1166static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1167{
1168 switch (pin) {
1169 case HPD_PORT_E:
1170 return val & PORTE_HOTPLUG_LONG_DETECT;
1171 default:
1172 return false;
1173 }
1174}
1175
1176static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177{
1178 switch (pin) {
1179 case HPD_PORT_A:
1180 return val & PORTA_HOTPLUG_LONG_DETECT;
1181 case HPD_PORT_B:
1182 return val & PORTB_HOTPLUG_LONG_DETECT;
1183 case HPD_PORT_C:
1184 return val & PORTC_HOTPLUG_LONG_DETECT;
1185 case HPD_PORT_D:
1186 return val & PORTD_HOTPLUG_LONG_DETECT;
1187 default:
1188 return false;
1189 }
1190}
1191
1192static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1193{
1194 switch (pin) {
1195 case HPD_PORT_A:
1196 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1197 default:
1198 return false;
1199 }
1200}
1201
1202static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1203{
1204 switch (pin) {
1205 case HPD_PORT_B:
1206 return val & PORTB_HOTPLUG_LONG_DETECT;
1207 case HPD_PORT_C:
1208 return val & PORTC_HOTPLUG_LONG_DETECT;
1209 case HPD_PORT_D:
1210 return val & PORTD_HOTPLUG_LONG_DETECT;
1211 default:
1212 return false;
1213 }
1214}
1215
1216static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1217{
1218 switch (pin) {
1219 case HPD_PORT_B:
1220 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1221 case HPD_PORT_C:
1222 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1223 case HPD_PORT_D:
1224 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1225 default:
1226 return false;
1227 }
1228}
1229
1230/*
1231 * Get a bit mask of pins that have triggered, and which ones may be long.
1232 * This can be called multiple times with the same masks to accumulate
1233 * hotplug detection results from several registers.
1234 *
1235 * Note that the caller is expected to zero out the masks initially.
1236 */
1237static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1238 u32 *pin_mask, u32 *long_mask,
1239 u32 hotplug_trigger, u32 dig_hotplug_reg,
1240 const u32 hpd[HPD_NUM_PINS],
1241 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1242{
1243 enum hpd_pin pin;
1244
1245 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1246
1247 for_each_hpd_pin(pin) {
1248 if ((hpd[pin] & hotplug_trigger) == 0)
1249 continue;
1250
1251 *pin_mask |= BIT(pin);
1252
1253 if (long_pulse_detect(pin, dig_hotplug_reg))
1254 *long_mask |= BIT(pin);
1255 }
1256
1257 drm_dbg(&dev_priv->drm,
1258 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1259 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1260
1261}
1262
1263static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1264 const u32 hpd[HPD_NUM_PINS])
1265{
1266 struct intel_encoder *encoder;
1267 u32 enabled_irqs = 0;
1268
1269 for_each_intel_encoder(&dev_priv->drm, encoder)
1270 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1271 enabled_irqs |= hpd[encoder->hpd_pin];
1272
1273 return enabled_irqs;
1274}
1275
1276static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1277 const u32 hpd[HPD_NUM_PINS])
1278{
1279 struct intel_encoder *encoder;
1280 u32 hotplug_irqs = 0;
1281
1282 for_each_intel_encoder(&dev_priv->drm, encoder)
1283 hotplug_irqs |= hpd[encoder->hpd_pin];
1284
1285 return hotplug_irqs;
1286}
1287
1288static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1289 hotplug_enables_func hotplug_enables)
1290{
1291 struct intel_encoder *encoder;
1292 u32 hotplug = 0;
1293
1294 for_each_intel_encoder(&i915->drm, encoder)
1295 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1296
1297 return hotplug;
1298}
1299
1300static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1301{
1302 wake_up_all(&dev_priv->display.gmbus.wait_queue);
1303}
1304
1305static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1306{
1307 wake_up_all(&dev_priv->display.gmbus.wait_queue);
1308}
1309
1310#if defined(CONFIG_DEBUG_FS)
1311static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1312 enum pipe pipe,
1313 u32 crc0, u32 crc1,
1314 u32 crc2, u32 crc3,
1315 u32 crc4)
1316{
1317 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1318 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1319 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1320
1321 trace_intel_pipe_crc(crtc, crcs);
1322
1323 spin_lock(&pipe_crc->lock);
1324 /*
1325 * For some not yet identified reason, the first CRC is
1326 * bonkers. So let's just wait for the next vblank and read
1327 * out the buggy result.
1328 *
1329 * On GEN8+ sometimes the second CRC is bonkers as well, so
1330 * don't trust that one either.
1331 */
1332 if (pipe_crc->skipped <= 0 ||
1333 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1334 pipe_crc->skipped++;
1335 spin_unlock(&pipe_crc->lock);
1336 return;
1337 }
1338 spin_unlock(&pipe_crc->lock);
1339
1340 drm_crtc_add_crc_entry(&crtc->base, true,
1341 drm_crtc_accurate_vblank_count(&crtc->base),
1342 crcs);
1343}
1344#else
1345static inline void
1346display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1347 enum pipe pipe,
1348 u32 crc0, u32 crc1,
1349 u32 crc2, u32 crc3,
1350 u32 crc4) {}
1351#endif
1352
1353static void flip_done_handler(struct drm_i915_private *i915,
1354 enum pipe pipe)
1355{
1356 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1357 struct drm_crtc_state *crtc_state = crtc->base.state;
1358 struct drm_pending_vblank_event *e = crtc_state->event;
1359 struct drm_device *dev = &i915->drm;
1360 unsigned long irqflags;
1361
1362 spin_lock_irqsave(&dev->event_lock, irqflags);
1363
1364 crtc_state->event = NULL;
1365
1366 drm_crtc_send_vblank_event(&crtc->base, e);
1367
1368 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1369}
1370
1371static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1372 enum pipe pipe)
1373{
1374 display_pipe_crc_irq_handler(dev_priv, pipe,
1375 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1376 0, 0, 0, 0);
1377}
1378
1379static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1380 enum pipe pipe)
1381{
1382 display_pipe_crc_irq_handler(dev_priv, pipe,
1383 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1384 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1385 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1386 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1387 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1388}
1389
1390static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1391 enum pipe pipe)
1392{
1393 u32 res1, res2;
1394
1395 if (DISPLAY_VER(dev_priv) >= 3)
1396 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1397 else
1398 res1 = 0;
1399
1400 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1401 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1402 else
1403 res2 = 0;
1404
1405 display_pipe_crc_irq_handler(dev_priv, pipe,
1406 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1407 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1408 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1409 res1, res2);
1410}
1411
1412static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1413{
1414 enum pipe pipe;
1415
1416 for_each_pipe(dev_priv, pipe) {
1417 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1418 PIPESTAT_INT_STATUS_MASK |
1419 PIPE_FIFO_UNDERRUN_STATUS);
1420
1421 dev_priv->pipestat_irq_mask[pipe] = 0;
1422 }
1423}
1424
1425static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1426 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1427{
1428 enum pipe pipe;
1429
1430 spin_lock(&dev_priv->irq_lock);
1431
1432 if (!dev_priv->display_irqs_enabled) {
1433 spin_unlock(&dev_priv->irq_lock);
1434 return;
1435 }
1436
1437 for_each_pipe(dev_priv, pipe) {
1438 i915_reg_t reg;
1439 u32 status_mask, enable_mask, iir_bit = 0;
1440
1441 /*
1442 * PIPESTAT bits get signalled even when the interrupt is
1443 * disabled with the mask bits, and some of the status bits do
1444 * not generate interrupts at all (like the underrun bit). Hence
1445 * we need to be careful that we only handle what we want to
1446 * handle.
1447 */
1448
1449 /* fifo underruns are filterered in the underrun handler. */
1450 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1451
1452 switch (pipe) {
1453 default:
1454 case PIPE_A:
1455 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1456 break;
1457 case PIPE_B:
1458 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1459 break;
1460 case PIPE_C:
1461 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1462 break;
1463 }
1464 if (iir & iir_bit)
1465 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1466
1467 if (!status_mask)
1468 continue;
1469
1470 reg = PIPESTAT(pipe);
1471 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1472 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1473
1474 /*
1475 * Clear the PIPE*STAT regs before the IIR
1476 *
1477 * Toggle the enable bits to make sure we get an
1478 * edge in the ISR pipe event bit if we don't clear
1479 * all the enabled status bits. Otherwise the edge
1480 * triggered IIR on i965/g4x wouldn't notice that
1481 * an interrupt is still pending.
1482 */
1483 if (pipe_stats[pipe]) {
1484 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1485 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1486 }
1487 }
1488 spin_unlock(&dev_priv->irq_lock);
1489}
1490
1491static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1492 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1493{
1494 enum pipe pipe;
1495
1496 for_each_pipe(dev_priv, pipe) {
1497 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1498 intel_handle_vblank(dev_priv, pipe);
1499
1500 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1501 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1502
1503 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1504 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1505 }
1506}
1507
1508static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1509 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1510{
1511 bool blc_event = false;
1512 enum pipe pipe;
1513
1514 for_each_pipe(dev_priv, pipe) {
1515 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1516 intel_handle_vblank(dev_priv, pipe);
1517
1518 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1519 blc_event = true;
1520
1521 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1522 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1523
1524 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1525 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1526 }
1527
1528 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1529 intel_opregion_asle_intr(dev_priv);
1530}
1531
1532static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1533 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1534{
1535 bool blc_event = false;
1536 enum pipe pipe;
1537
1538 for_each_pipe(dev_priv, pipe) {
1539 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540 intel_handle_vblank(dev_priv, pipe);
1541
1542 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1543 blc_event = true;
1544
1545 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547
1548 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 }
1551
1552 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1553 intel_opregion_asle_intr(dev_priv);
1554
1555 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1556 gmbus_irq_handler(dev_priv);
1557}
1558
1559static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1560 u32 pipe_stats[I915_MAX_PIPES])
1561{
1562 enum pipe pipe;
1563
1564 for_each_pipe(dev_priv, pipe) {
1565 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1566 intel_handle_vblank(dev_priv, pipe);
1567
1568 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1569 flip_done_handler(dev_priv, pipe);
1570
1571 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1572 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1573
1574 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1575 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1576 }
1577
1578 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1579 gmbus_irq_handler(dev_priv);
1580}
1581
1582static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1583{
1584 u32 hotplug_status = 0, hotplug_status_mask;
1585 int i;
1586
1587 if (IS_G4X(dev_priv) ||
1588 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1589 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1590 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1591 else
1592 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1593
1594 /*
1595 * We absolutely have to clear all the pending interrupt
1596 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1597 * interrupt bit won't have an edge, and the i965/g4x
1598 * edge triggered IIR will not notice that an interrupt
1599 * is still pending. We can't use PORT_HOTPLUG_EN to
1600 * guarantee the edge as the act of toggling the enable
1601 * bits can itself generate a new hotplug interrupt :(
1602 */
1603 for (i = 0; i < 10; i++) {
1604 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1605
1606 if (tmp == 0)
1607 return hotplug_status;
1608
1609 hotplug_status |= tmp;
1610 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1611 }
1612
1613 drm_WARN_ONCE(&dev_priv->drm, 1,
1614 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1615 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1616
1617 return hotplug_status;
1618}
1619
1620static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1621 u32 hotplug_status)
1622{
1623 u32 pin_mask = 0, long_mask = 0;
1624 u32 hotplug_trigger;
1625
1626 if (IS_G4X(dev_priv) ||
1627 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1628 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1629 else
1630 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1631
1632 if (hotplug_trigger) {
1633 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1634 hotplug_trigger, hotplug_trigger,
1635 dev_priv->display.hotplug.hpd,
1636 i9xx_port_hotplug_long_detect);
1637
1638 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1639 }
1640
1641 if ((IS_G4X(dev_priv) ||
1642 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1643 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1644 dp_aux_irq_handler(dev_priv);
1645}
1646
1647static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1648{
1649 struct drm_i915_private *dev_priv = arg;
1650 irqreturn_t ret = IRQ_NONE;
1651
1652 if (!intel_irqs_enabled(dev_priv))
1653 return IRQ_NONE;
1654
1655 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1656 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1657
1658 do {
1659 u32 iir, gt_iir, pm_iir;
1660 u32 pipe_stats[I915_MAX_PIPES] = {};
1661 u32 hotplug_status = 0;
1662 u32 ier = 0;
1663
1664 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1665 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1666 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1667
1668 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1669 break;
1670
1671 ret = IRQ_HANDLED;
1672
1673 /*
1674 * Theory on interrupt generation, based on empirical evidence:
1675 *
1676 * x = ((VLV_IIR & VLV_IER) ||
1677 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1678 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1679 *
1680 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1681 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1682 * guarantee the CPU interrupt will be raised again even if we
1683 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1684 * bits this time around.
1685 */
1686 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1687 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1688
1689 if (gt_iir)
1690 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1691 if (pm_iir)
1692 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1693
1694 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1695 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1696
1697 /* Call regardless, as some status bits might not be
1698 * signalled in iir */
1699 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1700
1701 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1702 I915_LPE_PIPE_B_INTERRUPT))
1703 intel_lpe_audio_irq_handler(dev_priv);
1704
1705 /*
1706 * VLV_IIR is single buffered, and reflects the level
1707 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1708 */
1709 if (iir)
1710 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1711
1712 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1713 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1714
1715 if (gt_iir)
1716 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1717 if (pm_iir)
1718 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1719
1720 if (hotplug_status)
1721 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1722
1723 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1724 } while (0);
1725
1726 pmu_irq_stats(dev_priv, ret);
1727
1728 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1729
1730 return ret;
1731}
1732
1733static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1734{
1735 struct drm_i915_private *dev_priv = arg;
1736 irqreturn_t ret = IRQ_NONE;
1737
1738 if (!intel_irqs_enabled(dev_priv))
1739 return IRQ_NONE;
1740
1741 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1742 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1743
1744 do {
1745 u32 master_ctl, iir;
1746 u32 pipe_stats[I915_MAX_PIPES] = {};
1747 u32 hotplug_status = 0;
1748 u32 ier = 0;
1749
1750 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1751 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1752
1753 if (master_ctl == 0 && iir == 0)
1754 break;
1755
1756 ret = IRQ_HANDLED;
1757
1758 /*
1759 * Theory on interrupt generation, based on empirical evidence:
1760 *
1761 * x = ((VLV_IIR & VLV_IER) ||
1762 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1763 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1764 *
1765 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1766 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1767 * guarantee the CPU interrupt will be raised again even if we
1768 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1769 * bits this time around.
1770 */
1771 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1772 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1773
1774 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1775
1776 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1777 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1778
1779 /* Call regardless, as some status bits might not be
1780 * signalled in iir */
1781 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1782
1783 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1784 I915_LPE_PIPE_B_INTERRUPT |
1785 I915_LPE_PIPE_C_INTERRUPT))
1786 intel_lpe_audio_irq_handler(dev_priv);
1787
1788 /*
1789 * VLV_IIR is single buffered, and reflects the level
1790 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1791 */
1792 if (iir)
1793 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1794
1795 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1796 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1797
1798 if (hotplug_status)
1799 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1800
1801 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1802 } while (0);
1803
1804 pmu_irq_stats(dev_priv, ret);
1805
1806 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1807
1808 return ret;
1809}
1810
1811static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1812 u32 hotplug_trigger)
1813{
1814 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1815
1816 /*
1817 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1818 * unless we touch the hotplug register, even if hotplug_trigger is
1819 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1820 * errors.
1821 */
1822 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1823 if (!hotplug_trigger) {
1824 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1825 PORTD_HOTPLUG_STATUS_MASK |
1826 PORTC_HOTPLUG_STATUS_MASK |
1827 PORTB_HOTPLUG_STATUS_MASK;
1828 dig_hotplug_reg &= ~mask;
1829 }
1830
1831 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1832 if (!hotplug_trigger)
1833 return;
1834
1835 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1836 hotplug_trigger, dig_hotplug_reg,
1837 dev_priv->display.hotplug.pch_hpd,
1838 pch_port_hotplug_long_detect);
1839
1840 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1841}
1842
1843static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1844{
1845 enum pipe pipe;
1846 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1847
1848 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1849
1850 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1851 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1852 SDE_AUDIO_POWER_SHIFT);
1853 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1854 port_name(port));
1855 }
1856
1857 if (pch_iir & SDE_AUX_MASK)
1858 dp_aux_irq_handler(dev_priv);
1859
1860 if (pch_iir & SDE_GMBUS)
1861 gmbus_irq_handler(dev_priv);
1862
1863 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1864 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1865
1866 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1867 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1868
1869 if (pch_iir & SDE_POISON)
1870 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1871
1872 if (pch_iir & SDE_FDI_MASK) {
1873 for_each_pipe(dev_priv, pipe)
1874 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1875 pipe_name(pipe),
1876 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1877 }
1878
1879 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1880 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1881
1882 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1883 drm_dbg(&dev_priv->drm,
1884 "PCH transcoder CRC error interrupt\n");
1885
1886 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1887 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1888
1889 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1890 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1891}
1892
1893static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1894{
1895 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1896 enum pipe pipe;
1897
1898 if (err_int & ERR_INT_POISON)
1899 drm_err(&dev_priv->drm, "Poison interrupt\n");
1900
1901 for_each_pipe(dev_priv, pipe) {
1902 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1903 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1904
1905 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1906 if (IS_IVYBRIDGE(dev_priv))
1907 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1908 else
1909 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1910 }
1911 }
1912
1913 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1914}
1915
1916static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1917{
1918 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1919 enum pipe pipe;
1920
1921 if (serr_int & SERR_INT_POISON)
1922 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1923
1924 for_each_pipe(dev_priv, pipe)
1925 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1926 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1927
1928 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1929}
1930
1931static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1932{
1933 enum pipe pipe;
1934 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1935
1936 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1937
1938 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1939 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1940 SDE_AUDIO_POWER_SHIFT_CPT);
1941 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1942 port_name(port));
1943 }
1944
1945 if (pch_iir & SDE_AUX_MASK_CPT)
1946 dp_aux_irq_handler(dev_priv);
1947
1948 if (pch_iir & SDE_GMBUS_CPT)
1949 gmbus_irq_handler(dev_priv);
1950
1951 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1952 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1953
1954 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1955 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1956
1957 if (pch_iir & SDE_FDI_MASK_CPT) {
1958 for_each_pipe(dev_priv, pipe)
1959 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1960 pipe_name(pipe),
1961 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1962 }
1963
1964 if (pch_iir & SDE_ERROR_CPT)
1965 cpt_serr_int_handler(dev_priv);
1966}
1967
1968static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1969{
1970 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1971 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1972 u32 pin_mask = 0, long_mask = 0;
1973
1974 if (ddi_hotplug_trigger) {
1975 u32 dig_hotplug_reg;
1976
1977 /* Locking due to DSI native GPIO sequences */
1978 spin_lock(&dev_priv->irq_lock);
1979 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1980 spin_unlock(&dev_priv->irq_lock);
1981
1982 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1983 ddi_hotplug_trigger, dig_hotplug_reg,
1984 dev_priv->display.hotplug.pch_hpd,
1985 icp_ddi_port_hotplug_long_detect);
1986 }
1987
1988 if (tc_hotplug_trigger) {
1989 u32 dig_hotplug_reg;
1990
1991 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1992
1993 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1994 tc_hotplug_trigger, dig_hotplug_reg,
1995 dev_priv->display.hotplug.pch_hpd,
1996 icp_tc_port_hotplug_long_detect);
1997 }
1998
1999 if (pin_mask)
2000 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2001
2002 if (pch_iir & SDE_GMBUS_ICP)
2003 gmbus_irq_handler(dev_priv);
2004}
2005
2006static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2007{
2008 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2009 ~SDE_PORTE_HOTPLUG_SPT;
2010 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2011 u32 pin_mask = 0, long_mask = 0;
2012
2013 if (hotplug_trigger) {
2014 u32 dig_hotplug_reg;
2015
2016 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2017
2018 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2019 hotplug_trigger, dig_hotplug_reg,
2020 dev_priv->display.hotplug.pch_hpd,
2021 spt_port_hotplug_long_detect);
2022 }
2023
2024 if (hotplug2_trigger) {
2025 u32 dig_hotplug_reg;
2026
2027 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
2028
2029 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2030 hotplug2_trigger, dig_hotplug_reg,
2031 dev_priv->display.hotplug.pch_hpd,
2032 spt_port_hotplug2_long_detect);
2033 }
2034
2035 if (pin_mask)
2036 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2037
2038 if (pch_iir & SDE_GMBUS_CPT)
2039 gmbus_irq_handler(dev_priv);
2040}
2041
2042static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2043 u32 hotplug_trigger)
2044{
2045 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2046
2047 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
2048
2049 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2050 hotplug_trigger, dig_hotplug_reg,
2051 dev_priv->display.hotplug.hpd,
2052 ilk_port_hotplug_long_detect);
2053
2054 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2055}
2056
2057static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2058 u32 de_iir)
2059{
2060 enum pipe pipe;
2061 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2062
2063 if (hotplug_trigger)
2064 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2065
2066 if (de_iir & DE_AUX_CHANNEL_A)
2067 dp_aux_irq_handler(dev_priv);
2068
2069 if (de_iir & DE_GSE)
2070 intel_opregion_asle_intr(dev_priv);
2071
2072 if (de_iir & DE_POISON)
2073 drm_err(&dev_priv->drm, "Poison interrupt\n");
2074
2075 for_each_pipe(dev_priv, pipe) {
2076 if (de_iir & DE_PIPE_VBLANK(pipe))
2077 intel_handle_vblank(dev_priv, pipe);
2078
2079 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2080 flip_done_handler(dev_priv, pipe);
2081
2082 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2083 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2084
2085 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2086 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2087 }
2088
2089 /* check event from PCH */
2090 if (de_iir & DE_PCH_EVENT) {
2091 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2092
2093 if (HAS_PCH_CPT(dev_priv))
2094 cpt_irq_handler(dev_priv, pch_iir);
2095 else
2096 ibx_irq_handler(dev_priv, pch_iir);
2097
2098 /* should clear PCH hotplug event before clear CPU irq */
2099 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2100 }
2101
2102 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2103 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2104}
2105
2106static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2107 u32 de_iir)
2108{
2109 enum pipe pipe;
2110 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2111
2112 if (hotplug_trigger)
2113 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2114
2115 if (de_iir & DE_ERR_INT_IVB)
2116 ivb_err_int_handler(dev_priv);
2117
2118 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2119 dp_aux_irq_handler(dev_priv);
2120
2121 if (de_iir & DE_GSE_IVB)
2122 intel_opregion_asle_intr(dev_priv);
2123
2124 for_each_pipe(dev_priv, pipe) {
2125 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2126 intel_handle_vblank(dev_priv, pipe);
2127
2128 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2129 flip_done_handler(dev_priv, pipe);
2130 }
2131
2132 /* check event from PCH */
2133 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2134 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2135
2136 cpt_irq_handler(dev_priv, pch_iir);
2137
2138 /* clear PCH hotplug event before clear CPU irq */
2139 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2140 }
2141}
2142
2143/*
2144 * To handle irqs with the minimum potential races with fresh interrupts, we:
2145 * 1 - Disable Master Interrupt Control.
2146 * 2 - Find the source(s) of the interrupt.
2147 * 3 - Clear the Interrupt Identity bits (IIR).
2148 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2149 * 5 - Re-enable Master Interrupt Control.
2150 */
2151static irqreturn_t ilk_irq_handler(int irq, void *arg)
2152{
2153 struct drm_i915_private *i915 = arg;
2154 void __iomem * const regs = i915->uncore.regs;
2155 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2156 irqreturn_t ret = IRQ_NONE;
2157
2158 if (unlikely(!intel_irqs_enabled(i915)))
2159 return IRQ_NONE;
2160
2161 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2162 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2163
2164 /* disable master interrupt before clearing iir */
2165 de_ier = raw_reg_read(regs, DEIER);
2166 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2167
2168 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2169 * interrupts will will be stored on its back queue, and then we'll be
2170 * able to process them after we restore SDEIER (as soon as we restore
2171 * it, we'll get an interrupt if SDEIIR still has something to process
2172 * due to its back queue). */
2173 if (!HAS_PCH_NOP(i915)) {
2174 sde_ier = raw_reg_read(regs, SDEIER);
2175 raw_reg_write(regs, SDEIER, 0);
2176 }
2177
2178 /* Find, clear, then process each source of interrupt */
2179
2180 gt_iir = raw_reg_read(regs, GTIIR);
2181 if (gt_iir) {
2182 raw_reg_write(regs, GTIIR, gt_iir);
2183 if (GRAPHICS_VER(i915) >= 6)
2184 gen6_gt_irq_handler(to_gt(i915), gt_iir);
2185 else
2186 gen5_gt_irq_handler(to_gt(i915), gt_iir);
2187 ret = IRQ_HANDLED;
2188 }
2189
2190 de_iir = raw_reg_read(regs, DEIIR);
2191 if (de_iir) {
2192 raw_reg_write(regs, DEIIR, de_iir);
2193 if (DISPLAY_VER(i915) >= 7)
2194 ivb_display_irq_handler(i915, de_iir);
2195 else
2196 ilk_display_irq_handler(i915, de_iir);
2197 ret = IRQ_HANDLED;
2198 }
2199
2200 if (GRAPHICS_VER(i915) >= 6) {
2201 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2202 if (pm_iir) {
2203 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2204 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2205 ret = IRQ_HANDLED;
2206 }
2207 }
2208
2209 raw_reg_write(regs, DEIER, de_ier);
2210 if (sde_ier)
2211 raw_reg_write(regs, SDEIER, sde_ier);
2212
2213 pmu_irq_stats(i915, ret);
2214
2215 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2216 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2217
2218 return ret;
2219}
2220
2221static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2222 u32 hotplug_trigger)
2223{
2224 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2225
2226 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2227
2228 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2229 hotplug_trigger, dig_hotplug_reg,
2230 dev_priv->display.hotplug.hpd,
2231 bxt_port_hotplug_long_detect);
2232
2233 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2234}
2235
2236static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2237{
2238 u32 pin_mask = 0, long_mask = 0;
2239 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2240 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2241
2242 if (trigger_tc) {
2243 u32 dig_hotplug_reg;
2244
2245 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
2246
2247 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2248 trigger_tc, dig_hotplug_reg,
2249 dev_priv->display.hotplug.hpd,
2250 gen11_port_hotplug_long_detect);
2251 }
2252
2253 if (trigger_tbt) {
2254 u32 dig_hotplug_reg;
2255
2256 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
2257
2258 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2259 trigger_tbt, dig_hotplug_reg,
2260 dev_priv->display.hotplug.hpd,
2261 gen11_port_hotplug_long_detect);
2262 }
2263
2264 if (pin_mask)
2265 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2266 else
2267 drm_err(&dev_priv->drm,
2268 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2269}
2270
2271static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2272{
2273 u32 mask;
2274
2275 if (DISPLAY_VER(dev_priv) >= 13)
2276 return TGL_DE_PORT_AUX_DDIA |
2277 TGL_DE_PORT_AUX_DDIB |
2278 TGL_DE_PORT_AUX_DDIC |
2279 XELPD_DE_PORT_AUX_DDID |
2280 XELPD_DE_PORT_AUX_DDIE |
2281 TGL_DE_PORT_AUX_USBC1 |
2282 TGL_DE_PORT_AUX_USBC2 |
2283 TGL_DE_PORT_AUX_USBC3 |
2284 TGL_DE_PORT_AUX_USBC4;
2285 else if (DISPLAY_VER(dev_priv) >= 12)
2286 return TGL_DE_PORT_AUX_DDIA |
2287 TGL_DE_PORT_AUX_DDIB |
2288 TGL_DE_PORT_AUX_DDIC |
2289 TGL_DE_PORT_AUX_USBC1 |
2290 TGL_DE_PORT_AUX_USBC2 |
2291 TGL_DE_PORT_AUX_USBC3 |
2292 TGL_DE_PORT_AUX_USBC4 |
2293 TGL_DE_PORT_AUX_USBC5 |
2294 TGL_DE_PORT_AUX_USBC6;
2295
2296
2297 mask = GEN8_AUX_CHANNEL_A;
2298 if (DISPLAY_VER(dev_priv) >= 9)
2299 mask |= GEN9_AUX_CHANNEL_B |
2300 GEN9_AUX_CHANNEL_C |
2301 GEN9_AUX_CHANNEL_D;
2302
2303 if (DISPLAY_VER(dev_priv) == 11) {
2304 mask |= ICL_AUX_CHANNEL_F;
2305 mask |= ICL_AUX_CHANNEL_E;
2306 }
2307
2308 return mask;
2309}
2310
2311static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2312{
2313 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2314 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2315 else if (DISPLAY_VER(dev_priv) >= 11)
2316 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2317 else if (DISPLAY_VER(dev_priv) >= 9)
2318 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2319 else
2320 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2321}
2322
2323static void
2324gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2325{
2326 bool found = false;
2327
2328 if (iir & GEN8_DE_MISC_GSE) {
2329 intel_opregion_asle_intr(dev_priv);
2330 found = true;
2331 }
2332
2333 if (iir & GEN8_DE_EDP_PSR) {
2334 struct intel_encoder *encoder;
2335 u32 psr_iir;
2336 i915_reg_t iir_reg;
2337
2338 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2339 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2340
2341 if (DISPLAY_VER(dev_priv) >= 12)
2342 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2343 else
2344 iir_reg = EDP_PSR_IIR;
2345
2346 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
2347
2348 if (psr_iir)
2349 found = true;
2350
2351 intel_psr_irq_handler(intel_dp, psr_iir);
2352
2353 /* prior GEN12 only have one EDP PSR */
2354 if (DISPLAY_VER(dev_priv) < 12)
2355 break;
2356 }
2357 }
2358
2359 if (!found)
2360 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2361}
2362
2363static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2364 u32 te_trigger)
2365{
2366 enum pipe pipe = INVALID_PIPE;
2367 enum transcoder dsi_trans;
2368 enum port port;
2369 u32 val, tmp;
2370
2371 /*
2372 * Incase of dual link, TE comes from DSI_1
2373 * this is to check if dual link is enabled
2374 */
2375 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2376 val &= PORT_SYNC_MODE_ENABLE;
2377
2378 /*
2379 * if dual link is enabled, then read DSI_0
2380 * transcoder registers
2381 */
2382 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2383 PORT_A : PORT_B;
2384 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2385
2386 /* Check if DSI configured in command mode */
2387 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2388 val = val & OP_MODE_MASK;
2389
2390 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2391 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2392 return;
2393 }
2394
2395 /* Get PIPE for handling VBLANK event */
2396 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2397 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2398 case TRANS_DDI_EDP_INPUT_A_ON:
2399 pipe = PIPE_A;
2400 break;
2401 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2402 pipe = PIPE_B;
2403 break;
2404 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2405 pipe = PIPE_C;
2406 break;
2407 default:
2408 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2409 return;
2410 }
2411
2412 intel_handle_vblank(dev_priv, pipe);
2413
2414 /* clear TE in dsi IIR */
2415 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2416 tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2417}
2418
2419static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2420{
2421 if (DISPLAY_VER(i915) >= 9)
2422 return GEN9_PIPE_PLANE1_FLIP_DONE;
2423 else
2424 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2425}
2426
2427u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2428{
2429 u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2430
2431 if (DISPLAY_VER(dev_priv) >= 13)
2432 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2433 XELPD_PIPE_HARD_UNDERRUN;
2434
2435 return mask;
2436}
2437
2438static irqreturn_t
2439gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2440{
2441 irqreturn_t ret = IRQ_NONE;
2442 u32 iir;
2443 enum pipe pipe;
2444
2445 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2446
2447 if (master_ctl & GEN8_DE_MISC_IRQ) {
2448 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2449 if (iir) {
2450 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2451 ret = IRQ_HANDLED;
2452 gen8_de_misc_irq_handler(dev_priv, iir);
2453 } else {
2454 drm_err(&dev_priv->drm,
2455 "The master control interrupt lied (DE MISC)!\n");
2456 }
2457 }
2458
2459 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2460 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2461 if (iir) {
2462 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2463 ret = IRQ_HANDLED;
2464 gen11_hpd_irq_handler(dev_priv, iir);
2465 } else {
2466 drm_err(&dev_priv->drm,
2467 "The master control interrupt lied, (DE HPD)!\n");
2468 }
2469 }
2470
2471 if (master_ctl & GEN8_DE_PORT_IRQ) {
2472 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2473 if (iir) {
2474 bool found = false;
2475
2476 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2477 ret = IRQ_HANDLED;
2478
2479 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2480 dp_aux_irq_handler(dev_priv);
2481 found = true;
2482 }
2483
2484 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2485 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2486
2487 if (hotplug_trigger) {
2488 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2489 found = true;
2490 }
2491 } else if (IS_BROADWELL(dev_priv)) {
2492 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2493
2494 if (hotplug_trigger) {
2495 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2496 found = true;
2497 }
2498 }
2499
2500 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2501 (iir & BXT_DE_PORT_GMBUS)) {
2502 gmbus_irq_handler(dev_priv);
2503 found = true;
2504 }
2505
2506 if (DISPLAY_VER(dev_priv) >= 11) {
2507 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2508
2509 if (te_trigger) {
2510 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2511 found = true;
2512 }
2513 }
2514
2515 if (!found)
2516 drm_err(&dev_priv->drm,
2517 "Unexpected DE Port interrupt\n");
2518 }
2519 else
2520 drm_err(&dev_priv->drm,
2521 "The master control interrupt lied (DE PORT)!\n");
2522 }
2523
2524 for_each_pipe(dev_priv, pipe) {
2525 u32 fault_errors;
2526
2527 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2528 continue;
2529
2530 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2531 if (!iir) {
2532 drm_err(&dev_priv->drm,
2533 "The master control interrupt lied (DE PIPE)!\n");
2534 continue;
2535 }
2536
2537 ret = IRQ_HANDLED;
2538 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2539
2540 if (iir & GEN8_PIPE_VBLANK)
2541 intel_handle_vblank(dev_priv, pipe);
2542
2543 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2544 flip_done_handler(dev_priv, pipe);
2545
2546 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2547 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2548
2549 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2550 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2551
2552 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2553 if (fault_errors)
2554 drm_err(&dev_priv->drm,
2555 "Fault errors on pipe %c: 0x%08x\n",
2556 pipe_name(pipe),
2557 fault_errors);
2558 }
2559
2560 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2561 master_ctl & GEN8_DE_PCH_IRQ) {
2562 /*
2563 * FIXME(BDW): Assume for now that the new interrupt handling
2564 * scheme also closed the SDE interrupt handling race we've seen
2565 * on older pch-split platforms. But this needs testing.
2566 */
2567 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2568 if (iir) {
2569 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2570 ret = IRQ_HANDLED;
2571
2572 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2573 icp_irq_handler(dev_priv, iir);
2574 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2575 spt_irq_handler(dev_priv, iir);
2576 else
2577 cpt_irq_handler(dev_priv, iir);
2578 } else {
2579 /*
2580 * Like on previous PCH there seems to be something
2581 * fishy going on with forwarding PCH interrupts.
2582 */
2583 drm_dbg(&dev_priv->drm,
2584 "The master control interrupt lied (SDE)!\n");
2585 }
2586 }
2587
2588 return ret;
2589}
2590
2591static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2592{
2593 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2594
2595 /*
2596 * Now with master disabled, get a sample of level indications
2597 * for this interrupt. Indications will be cleared on related acks.
2598 * New indications can and will light up during processing,
2599 * and will generate new interrupt after enabling master.
2600 */
2601 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2602}
2603
2604static inline void gen8_master_intr_enable(void __iomem * const regs)
2605{
2606 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2607}
2608
2609static irqreturn_t gen8_irq_handler(int irq, void *arg)
2610{
2611 struct drm_i915_private *dev_priv = arg;
2612 void __iomem * const regs = dev_priv->uncore.regs;
2613 u32 master_ctl;
2614
2615 if (!intel_irqs_enabled(dev_priv))
2616 return IRQ_NONE;
2617
2618 master_ctl = gen8_master_intr_disable(regs);
2619 if (!master_ctl) {
2620 gen8_master_intr_enable(regs);
2621 return IRQ_NONE;
2622 }
2623
2624 /* Find, queue (onto bottom-halves), then clear each source */
2625 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2626
2627 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2628 if (master_ctl & ~GEN8_GT_IRQS) {
2629 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2630 gen8_de_irq_handler(dev_priv, master_ctl);
2631 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2632 }
2633
2634 gen8_master_intr_enable(regs);
2635
2636 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2637
2638 return IRQ_HANDLED;
2639}
2640
2641static u32
2642gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2643{
2644 void __iomem * const regs = i915->uncore.regs;
2645 u32 iir;
2646
2647 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2648 return 0;
2649
2650 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2651 if (likely(iir))
2652 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2653
2654 return iir;
2655}
2656
2657static void
2658gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2659{
2660 if (iir & GEN11_GU_MISC_GSE)
2661 intel_opregion_asle_intr(i915);
2662}
2663
2664static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2665{
2666 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2667
2668 /*
2669 * Now with master disabled, get a sample of level indications
2670 * for this interrupt. Indications will be cleared on related acks.
2671 * New indications can and will light up during processing,
2672 * and will generate new interrupt after enabling master.
2673 */
2674 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2675}
2676
2677static inline void gen11_master_intr_enable(void __iomem * const regs)
2678{
2679 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2680}
2681
2682static void
2683gen11_display_irq_handler(struct drm_i915_private *i915)
2684{
2685 void __iomem * const regs = i915->uncore.regs;
2686 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2687
2688 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2689 /*
2690 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2691 * for the display related bits.
2692 */
2693 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2694 gen8_de_irq_handler(i915, disp_ctl);
2695 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2696 GEN11_DISPLAY_IRQ_ENABLE);
2697
2698 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2699}
2700
2701static irqreturn_t gen11_irq_handler(int irq, void *arg)
2702{
2703 struct drm_i915_private *i915 = arg;
2704 void __iomem * const regs = i915->uncore.regs;
2705 struct intel_gt *gt = to_gt(i915);
2706 u32 master_ctl;
2707 u32 gu_misc_iir;
2708
2709 if (!intel_irqs_enabled(i915))
2710 return IRQ_NONE;
2711
2712 master_ctl = gen11_master_intr_disable(regs);
2713 if (!master_ctl) {
2714 gen11_master_intr_enable(regs);
2715 return IRQ_NONE;
2716 }
2717
2718 /* Find, queue (onto bottom-halves), then clear each source */
2719 gen11_gt_irq_handler(gt, master_ctl);
2720
2721 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2722 if (master_ctl & GEN11_DISPLAY_IRQ)
2723 gen11_display_irq_handler(i915);
2724
2725 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2726
2727 gen11_master_intr_enable(regs);
2728
2729 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2730
2731 pmu_irq_stats(i915, IRQ_HANDLED);
2732
2733 return IRQ_HANDLED;
2734}
2735
2736static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2737{
2738 u32 val;
2739
2740 /* First disable interrupts */
2741 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2742
2743 /* Get the indication levels and ack the master unit */
2744 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2745 if (unlikely(!val))
2746 return 0;
2747
2748 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2749
2750 return val;
2751}
2752
2753static inline void dg1_master_intr_enable(void __iomem * const regs)
2754{
2755 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2756}
2757
2758static irqreturn_t dg1_irq_handler(int irq, void *arg)
2759{
2760 struct drm_i915_private * const i915 = arg;
2761 struct intel_gt *gt = to_gt(i915);
2762 void __iomem * const regs = gt->uncore->regs;
2763 u32 master_tile_ctl, master_ctl;
2764 u32 gu_misc_iir;
2765
2766 if (!intel_irqs_enabled(i915))
2767 return IRQ_NONE;
2768
2769 master_tile_ctl = dg1_master_intr_disable(regs);
2770 if (!master_tile_ctl) {
2771 dg1_master_intr_enable(regs);
2772 return IRQ_NONE;
2773 }
2774
2775 /* FIXME: we only support tile 0 for now. */
2776 if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2777 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2778 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2779 } else {
2780 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2781 master_tile_ctl);
2782 dg1_master_intr_enable(regs);
2783 return IRQ_NONE;
2784 }
2785
2786 gen11_gt_irq_handler(gt, master_ctl);
2787
2788 if (master_ctl & GEN11_DISPLAY_IRQ)
2789 gen11_display_irq_handler(i915);
2790
2791 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2792
2793 dg1_master_intr_enable(regs);
2794
2795 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2796
2797 pmu_irq_stats(i915, IRQ_HANDLED);
2798
2799 return IRQ_HANDLED;
2800}
2801
2802/* Called from drm generic code, passed 'crtc' which
2803 * we use as a pipe index
2804 */
2805int i8xx_enable_vblank(struct drm_crtc *crtc)
2806{
2807 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2808 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2809 unsigned long irqflags;
2810
2811 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2812 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2813 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2814
2815 return 0;
2816}
2817
2818int i915gm_enable_vblank(struct drm_crtc *crtc)
2819{
2820 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2821
2822 /*
2823 * Vblank interrupts fail to wake the device up from C2+.
2824 * Disabling render clock gating during C-states avoids
2825 * the problem. There is a small power cost so we do this
2826 * only when vblank interrupts are actually enabled.
2827 */
2828 if (dev_priv->vblank_enabled++ == 0)
2829 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2830
2831 return i8xx_enable_vblank(crtc);
2832}
2833
2834int i965_enable_vblank(struct drm_crtc *crtc)
2835{
2836 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2837 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2838 unsigned long irqflags;
2839
2840 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2841 i915_enable_pipestat(dev_priv, pipe,
2842 PIPE_START_VBLANK_INTERRUPT_STATUS);
2843 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2844
2845 return 0;
2846}
2847
2848int ilk_enable_vblank(struct drm_crtc *crtc)
2849{
2850 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2851 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2852 unsigned long irqflags;
2853 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2854 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2855
2856 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2857 ilk_enable_display_irq(dev_priv, bit);
2858 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2859
2860 /* Even though there is no DMC, frame counter can get stuck when
2861 * PSR is active as no frames are generated.
2862 */
2863 if (HAS_PSR(dev_priv))
2864 drm_crtc_vblank_restore(crtc);
2865
2866 return 0;
2867}
2868
2869static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2870 bool enable)
2871{
2872 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2873 enum port port;
2874
2875 if (!(intel_crtc->mode_flags &
2876 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2877 return false;
2878
2879 /* for dual link cases we consider TE from slave */
2880 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2881 port = PORT_B;
2882 else
2883 port = PORT_A;
2884
2885 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2886 enable ? 0 : DSI_TE_EVENT);
2887
2888 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2889
2890 return true;
2891}
2892
2893int bdw_enable_vblank(struct drm_crtc *_crtc)
2894{
2895 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2896 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2897 enum pipe pipe = crtc->pipe;
2898 unsigned long irqflags;
2899
2900 if (gen11_dsi_configure_te(crtc, true))
2901 return 0;
2902
2903 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2904 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2905 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2906
2907 /* Even if there is no DMC, frame counter can get stuck when
2908 * PSR is active as no frames are generated, so check only for PSR.
2909 */
2910 if (HAS_PSR(dev_priv))
2911 drm_crtc_vblank_restore(&crtc->base);
2912
2913 return 0;
2914}
2915
2916/* Called from drm generic code, passed 'crtc' which
2917 * we use as a pipe index
2918 */
2919void i8xx_disable_vblank(struct drm_crtc *crtc)
2920{
2921 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2922 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2923 unsigned long irqflags;
2924
2925 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2926 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2927 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2928}
2929
2930void i915gm_disable_vblank(struct drm_crtc *crtc)
2931{
2932 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2933
2934 i8xx_disable_vblank(crtc);
2935
2936 if (--dev_priv->vblank_enabled == 0)
2937 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2938}
2939
2940void i965_disable_vblank(struct drm_crtc *crtc)
2941{
2942 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2943 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2944 unsigned long irqflags;
2945
2946 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2947 i915_disable_pipestat(dev_priv, pipe,
2948 PIPE_START_VBLANK_INTERRUPT_STATUS);
2949 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2950}
2951
2952void ilk_disable_vblank(struct drm_crtc *crtc)
2953{
2954 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2955 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2956 unsigned long irqflags;
2957 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2958 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2959
2960 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2961 ilk_disable_display_irq(dev_priv, bit);
2962 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2963}
2964
2965void bdw_disable_vblank(struct drm_crtc *_crtc)
2966{
2967 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2968 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2969 enum pipe pipe = crtc->pipe;
2970 unsigned long irqflags;
2971
2972 if (gen11_dsi_configure_te(crtc, false))
2973 return;
2974
2975 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2976 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2977 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2978}
2979
2980static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2981{
2982 struct intel_uncore *uncore = &dev_priv->uncore;
2983
2984 if (HAS_PCH_NOP(dev_priv))
2985 return;
2986
2987 GEN3_IRQ_RESET(uncore, SDE);
2988
2989 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2990 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2991}
2992
2993static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2994{
2995 struct intel_uncore *uncore = &dev_priv->uncore;
2996
2997 if (IS_CHERRYVIEW(dev_priv))
2998 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2999 else
3000 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
3001
3002 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3003 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3004
3005 i9xx_pipestat_irq_reset(dev_priv);
3006
3007 GEN3_IRQ_RESET(uncore, VLV_);
3008 dev_priv->irq_mask = ~0u;
3009}
3010
3011static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3012{
3013 struct intel_uncore *uncore = &dev_priv->uncore;
3014
3015 u32 pipestat_mask;
3016 u32 enable_mask;
3017 enum pipe pipe;
3018
3019 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3020
3021 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3022 for_each_pipe(dev_priv, pipe)
3023 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3024
3025 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3026 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3027 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3028 I915_LPE_PIPE_A_INTERRUPT |
3029 I915_LPE_PIPE_B_INTERRUPT;
3030
3031 if (IS_CHERRYVIEW(dev_priv))
3032 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3033 I915_LPE_PIPE_C_INTERRUPT;
3034
3035 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3036
3037 dev_priv->irq_mask = ~enable_mask;
3038
3039 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3040}
3041
3042/* drm_dma.h hooks
3043*/
3044static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3045{
3046 struct intel_uncore *uncore = &dev_priv->uncore;
3047
3048 GEN3_IRQ_RESET(uncore, DE);
3049 dev_priv->irq_mask = ~0u;
3050
3051 if (GRAPHICS_VER(dev_priv) == 7)
3052 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3053
3054 if (IS_HASWELL(dev_priv)) {
3055 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3056 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3057 }
3058
3059 gen5_gt_irq_reset(to_gt(dev_priv));
3060
3061 ibx_irq_reset(dev_priv);
3062}
3063
3064static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3065{
3066 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3067 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3068
3069 gen5_gt_irq_reset(to_gt(dev_priv));
3070
3071 spin_lock_irq(&dev_priv->irq_lock);
3072 if (dev_priv->display_irqs_enabled)
3073 vlv_display_irq_reset(dev_priv);
3074 spin_unlock_irq(&dev_priv->irq_lock);
3075}
3076
3077static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3078{
3079 struct intel_uncore *uncore = &dev_priv->uncore;
3080 enum pipe pipe;
3081
3082 if (!HAS_DISPLAY(dev_priv))
3083 return;
3084
3085 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3086 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3087
3088 for_each_pipe(dev_priv, pipe)
3089 if (intel_display_power_is_enabled(dev_priv,
3090 POWER_DOMAIN_PIPE(pipe)))
3091 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3092
3093 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3094 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3095}
3096
3097static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3098{
3099 struct intel_uncore *uncore = &dev_priv->uncore;
3100
3101 gen8_master_intr_disable(uncore->regs);
3102
3103 gen8_gt_irq_reset(to_gt(dev_priv));
3104 gen8_display_irq_reset(dev_priv);
3105 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3106
3107 if (HAS_PCH_SPLIT(dev_priv))
3108 ibx_irq_reset(dev_priv);
3109
3110}
3111
3112static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3113{
3114 struct intel_uncore *uncore = &dev_priv->uncore;
3115 enum pipe pipe;
3116 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3117 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3118
3119 if (!HAS_DISPLAY(dev_priv))
3120 return;
3121
3122 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3123
3124 if (DISPLAY_VER(dev_priv) >= 12) {
3125 enum transcoder trans;
3126
3127 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3128 enum intel_display_power_domain domain;
3129
3130 domain = POWER_DOMAIN_TRANSCODER(trans);
3131 if (!intel_display_power_is_enabled(dev_priv, domain))
3132 continue;
3133
3134 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3135 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3136 }
3137 } else {
3138 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3139 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3140 }
3141
3142 for_each_pipe(dev_priv, pipe)
3143 if (intel_display_power_is_enabled(dev_priv,
3144 POWER_DOMAIN_PIPE(pipe)))
3145 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3146
3147 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3148 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3149 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3150
3151 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3152 GEN3_IRQ_RESET(uncore, SDE);
3153}
3154
3155static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3156{
3157 struct intel_gt *gt = to_gt(dev_priv);
3158 struct intel_uncore *uncore = gt->uncore;
3159
3160 gen11_master_intr_disable(dev_priv->uncore.regs);
3161
3162 gen11_gt_irq_reset(gt);
3163 gen11_display_irq_reset(dev_priv);
3164
3165 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3166 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3167}
3168
3169static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3170{
3171 struct intel_gt *gt = to_gt(dev_priv);
3172 struct intel_uncore *uncore = gt->uncore;
3173
3174 dg1_master_intr_disable(dev_priv->uncore.regs);
3175
3176 gen11_gt_irq_reset(gt);
3177 gen11_display_irq_reset(dev_priv);
3178
3179 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3180 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3181}
3182
3183void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3184 u8 pipe_mask)
3185{
3186 struct intel_uncore *uncore = &dev_priv->uncore;
3187 u32 extra_ier = GEN8_PIPE_VBLANK |
3188 gen8_de_pipe_underrun_mask(dev_priv) |
3189 gen8_de_pipe_flip_done_mask(dev_priv);
3190 enum pipe pipe;
3191
3192 spin_lock_irq(&dev_priv->irq_lock);
3193
3194 if (!intel_irqs_enabled(dev_priv)) {
3195 spin_unlock_irq(&dev_priv->irq_lock);
3196 return;
3197 }
3198
3199 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3200 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3201 dev_priv->de_irq_mask[pipe],
3202 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3203
3204 spin_unlock_irq(&dev_priv->irq_lock);
3205}
3206
3207void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3208 u8 pipe_mask)
3209{
3210 struct intel_uncore *uncore = &dev_priv->uncore;
3211 enum pipe pipe;
3212
3213 spin_lock_irq(&dev_priv->irq_lock);
3214
3215 if (!intel_irqs_enabled(dev_priv)) {
3216 spin_unlock_irq(&dev_priv->irq_lock);
3217 return;
3218 }
3219
3220 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3221 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3222
3223 spin_unlock_irq(&dev_priv->irq_lock);
3224
3225 /* make sure we're done processing display irqs */
3226 intel_synchronize_irq(dev_priv);
3227}
3228
3229static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3230{
3231 struct intel_uncore *uncore = &dev_priv->uncore;
3232
3233 intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
3234 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3235
3236 gen8_gt_irq_reset(to_gt(dev_priv));
3237
3238 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3239
3240 spin_lock_irq(&dev_priv->irq_lock);
3241 if (dev_priv->display_irqs_enabled)
3242 vlv_display_irq_reset(dev_priv);
3243 spin_unlock_irq(&dev_priv->irq_lock);
3244}
3245
3246static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3247 enum hpd_pin pin)
3248{
3249 switch (pin) {
3250 case HPD_PORT_A:
3251 /*
3252 * When CPU and PCH are on the same package, port A
3253 * HPD must be enabled in both north and south.
3254 */
3255 return HAS_PCH_LPT_LP(i915) ?
3256 PORTA_HOTPLUG_ENABLE : 0;
3257 case HPD_PORT_B:
3258 return PORTB_HOTPLUG_ENABLE |
3259 PORTB_PULSE_DURATION_2ms;
3260 case HPD_PORT_C:
3261 return PORTC_HOTPLUG_ENABLE |
3262 PORTC_PULSE_DURATION_2ms;
3263 case HPD_PORT_D:
3264 return PORTD_HOTPLUG_ENABLE |
3265 PORTD_PULSE_DURATION_2ms;
3266 default:
3267 return 0;
3268 }
3269}
3270
3271static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3272{
3273 /*
3274 * Enable digital hotplug on the PCH, and configure the DP short pulse
3275 * duration to 2ms (which is the minimum in the Display Port spec).
3276 * The pulse duration bits are reserved on LPT+.
3277 */
3278 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3279 PORTA_HOTPLUG_ENABLE |
3280 PORTB_HOTPLUG_ENABLE |
3281 PORTC_HOTPLUG_ENABLE |
3282 PORTD_HOTPLUG_ENABLE |
3283 PORTB_PULSE_DURATION_MASK |
3284 PORTC_PULSE_DURATION_MASK |
3285 PORTD_PULSE_DURATION_MASK,
3286 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
3287}
3288
3289static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3290{
3291 u32 hotplug_irqs, enabled_irqs;
3292
3293 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3294 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3295
3296 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3297
3298 ibx_hpd_detection_setup(dev_priv);
3299}
3300
3301static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3302 enum hpd_pin pin)
3303{
3304 switch (pin) {
3305 case HPD_PORT_A:
3306 case HPD_PORT_B:
3307 case HPD_PORT_C:
3308 case HPD_PORT_D:
3309 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3310 default:
3311 return 0;
3312 }
3313}
3314
3315static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3316 enum hpd_pin pin)
3317{
3318 switch (pin) {
3319 case HPD_PORT_TC1:
3320 case HPD_PORT_TC2:
3321 case HPD_PORT_TC3:
3322 case HPD_PORT_TC4:
3323 case HPD_PORT_TC5:
3324 case HPD_PORT_TC6:
3325 return ICP_TC_HPD_ENABLE(pin);
3326 default:
3327 return 0;
3328 }
3329}
3330
3331static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3332{
3333 intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
3334 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3335 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3336 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3337 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
3338 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
3339}
3340
3341static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3342{
3343 intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
3344 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3345 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3346 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3347 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3348 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3349 ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
3350 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
3351}
3352
3353static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3354{
3355 u32 hotplug_irqs, enabled_irqs;
3356
3357 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3358 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3359
3360 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3361 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3362
3363 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3364
3365 icp_ddi_hpd_detection_setup(dev_priv);
3366 icp_tc_hpd_detection_setup(dev_priv);
3367}
3368
3369static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3370 enum hpd_pin pin)
3371{
3372 switch (pin) {
3373 case HPD_PORT_TC1:
3374 case HPD_PORT_TC2:
3375 case HPD_PORT_TC3:
3376 case HPD_PORT_TC4:
3377 case HPD_PORT_TC5:
3378 case HPD_PORT_TC6:
3379 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3380 default:
3381 return 0;
3382 }
3383}
3384
3385static void dg1_hpd_invert(struct drm_i915_private *i915)
3386{
3387 u32 val = (INVERT_DDIA_HPD |
3388 INVERT_DDIB_HPD |
3389 INVERT_DDIC_HPD |
3390 INVERT_DDID_HPD);
3391 intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
3392}
3393
3394static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3395{
3396 dg1_hpd_invert(dev_priv);
3397 icp_hpd_irq_setup(dev_priv);
3398}
3399
3400static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3401{
3402 intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
3403 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3404 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3405 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3406 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3407 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3408 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3409 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3410}
3411
3412static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3413{
3414 intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3415 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3416 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3417 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3418 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3419 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3420 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3421 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3422}
3423
3424static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3425{
3426 u32 hotplug_irqs, enabled_irqs;
3427
3428 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3429 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3430
3431 intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3432 ~enabled_irqs & hotplug_irqs);
3433 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3434
3435 gen11_tc_hpd_detection_setup(dev_priv);
3436 gen11_tbt_hpd_detection_setup(dev_priv);
3437
3438 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3439 icp_hpd_irq_setup(dev_priv);
3440}
3441
3442static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3443 enum hpd_pin pin)
3444{
3445 switch (pin) {
3446 case HPD_PORT_A:
3447 return PORTA_HOTPLUG_ENABLE;
3448 case HPD_PORT_B:
3449 return PORTB_HOTPLUG_ENABLE;
3450 case HPD_PORT_C:
3451 return PORTC_HOTPLUG_ENABLE;
3452 case HPD_PORT_D:
3453 return PORTD_HOTPLUG_ENABLE;
3454 default:
3455 return 0;
3456 }
3457}
3458
3459static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3460 enum hpd_pin pin)
3461{
3462 switch (pin) {
3463 case HPD_PORT_E:
3464 return PORTE_HOTPLUG_ENABLE;
3465 default:
3466 return 0;
3467 }
3468}
3469
3470static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3471{
3472 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3473 if (HAS_PCH_CNP(dev_priv)) {
3474 intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3475 CHASSIS_CLK_REQ_DURATION(0xf));
3476 }
3477
3478 /* Enable digital hotplug on the PCH */
3479 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3480 PORTA_HOTPLUG_ENABLE |
3481 PORTB_HOTPLUG_ENABLE |
3482 PORTC_HOTPLUG_ENABLE |
3483 PORTD_HOTPLUG_ENABLE,
3484 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3485
3486 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3487 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3488}
3489
3490static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3491{
3492 u32 hotplug_irqs, enabled_irqs;
3493
3494 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3495 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3496
3497 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3498 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3499
3500 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3501
3502 spt_hpd_detection_setup(dev_priv);
3503}
3504
3505static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3506 enum hpd_pin pin)
3507{
3508 switch (pin) {
3509 case HPD_PORT_A:
3510 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3511 DIGITAL_PORTA_PULSE_DURATION_2ms;
3512 default:
3513 return 0;
3514 }
3515}
3516
3517static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3518{
3519 /*
3520 * Enable digital hotplug on the CPU, and configure the DP short pulse
3521 * duration to 2ms (which is the minimum in the Display Port spec)
3522 * The pulse duration bits are reserved on HSW+.
3523 */
3524 intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3525 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3526 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3527}
3528
3529static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3530{
3531 u32 hotplug_irqs, enabled_irqs;
3532
3533 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3534 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3535
3536 if (DISPLAY_VER(dev_priv) >= 8)
3537 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3538 else
3539 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3540
3541 ilk_hpd_detection_setup(dev_priv);
3542
3543 ibx_hpd_irq_setup(dev_priv);
3544}
3545
3546static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3547 enum hpd_pin pin)
3548{
3549 u32 hotplug;
3550
3551 switch (pin) {
3552 case HPD_PORT_A:
3553 hotplug = PORTA_HOTPLUG_ENABLE;
3554 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3555 hotplug |= BXT_DDIA_HPD_INVERT;
3556 return hotplug;
3557 case HPD_PORT_B:
3558 hotplug = PORTB_HOTPLUG_ENABLE;
3559 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3560 hotplug |= BXT_DDIB_HPD_INVERT;
3561 return hotplug;
3562 case HPD_PORT_C:
3563 hotplug = PORTC_HOTPLUG_ENABLE;
3564 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3565 hotplug |= BXT_DDIC_HPD_INVERT;
3566 return hotplug;
3567 default:
3568 return 0;
3569 }
3570}
3571
3572static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3573{
3574 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3575 PORTA_HOTPLUG_ENABLE |
3576 PORTB_HOTPLUG_ENABLE |
3577 PORTC_HOTPLUG_ENABLE |
3578 BXT_DDI_HPD_INVERT_MASK,
3579 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3580}
3581
3582static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3583{
3584 u32 hotplug_irqs, enabled_irqs;
3585
3586 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3587 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3588
3589 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3590
3591 bxt_hpd_detection_setup(dev_priv);
3592}
3593
3594/*
3595 * SDEIER is also touched by the interrupt handler to work around missed PCH
3596 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3597 * instead we unconditionally enable all PCH interrupt sources here, but then
3598 * only unmask them as needed with SDEIMR.
3599 *
3600 * Note that we currently do this after installing the interrupt handler,
3601 * but before we enable the master interrupt. That should be sufficient
3602 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3603 * interrupts could still race.
3604 */
3605static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3606{
3607 struct intel_uncore *uncore = &dev_priv->uncore;
3608 u32 mask;
3609
3610 if (HAS_PCH_NOP(dev_priv))
3611 return;
3612
3613 if (HAS_PCH_IBX(dev_priv))
3614 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3615 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3616 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3617 else
3618 mask = SDE_GMBUS_CPT;
3619
3620 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3621}
3622
3623static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3624{
3625 struct intel_uncore *uncore = &dev_priv->uncore;
3626 u32 display_mask, extra_mask;
3627
3628 if (GRAPHICS_VER(dev_priv) >= 7) {
3629 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3630 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3631 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3632 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3633 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3634 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3635 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3636 DE_DP_A_HOTPLUG_IVB);
3637 } else {
3638 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3639 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3640 DE_PIPEA_CRC_DONE | DE_POISON);
3641 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3642 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3643 DE_PLANE_FLIP_DONE(PLANE_A) |
3644 DE_PLANE_FLIP_DONE(PLANE_B) |
3645 DE_DP_A_HOTPLUG);
3646 }
3647
3648 if (IS_HASWELL(dev_priv)) {
3649 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3650 display_mask |= DE_EDP_PSR_INT_HSW;
3651 }
3652
3653 if (IS_IRONLAKE_M(dev_priv))
3654 extra_mask |= DE_PCU_EVENT;
3655
3656 dev_priv->irq_mask = ~display_mask;
3657
3658 ibx_irq_postinstall(dev_priv);
3659
3660 gen5_gt_irq_postinstall(to_gt(dev_priv));
3661
3662 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3663 display_mask | extra_mask);
3664}
3665
3666void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3667{
3668 lockdep_assert_held(&dev_priv->irq_lock);
3669
3670 if (dev_priv->display_irqs_enabled)
3671 return;
3672
3673 dev_priv->display_irqs_enabled = true;
3674
3675 if (intel_irqs_enabled(dev_priv)) {
3676 vlv_display_irq_reset(dev_priv);
3677 vlv_display_irq_postinstall(dev_priv);
3678 }
3679}
3680
3681void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3682{
3683 lockdep_assert_held(&dev_priv->irq_lock);
3684
3685 if (!dev_priv->display_irqs_enabled)
3686 return;
3687
3688 dev_priv->display_irqs_enabled = false;
3689
3690 if (intel_irqs_enabled(dev_priv))
3691 vlv_display_irq_reset(dev_priv);
3692}
3693
3694
3695static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3696{
3697 gen5_gt_irq_postinstall(to_gt(dev_priv));
3698
3699 spin_lock_irq(&dev_priv->irq_lock);
3700 if (dev_priv->display_irqs_enabled)
3701 vlv_display_irq_postinstall(dev_priv);
3702 spin_unlock_irq(&dev_priv->irq_lock);
3703
3704 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3705 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3706}
3707
3708static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3709{
3710 struct intel_uncore *uncore = &dev_priv->uncore;
3711
3712 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3713 GEN8_PIPE_CDCLK_CRC_DONE;
3714 u32 de_pipe_enables;
3715 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3716 u32 de_port_enables;
3717 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3718 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3719 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3720 enum pipe pipe;
3721
3722 if (!HAS_DISPLAY(dev_priv))
3723 return;
3724
3725 if (DISPLAY_VER(dev_priv) <= 10)
3726 de_misc_masked |= GEN8_DE_MISC_GSE;
3727
3728 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3729 de_port_masked |= BXT_DE_PORT_GMBUS;
3730
3731 if (DISPLAY_VER(dev_priv) >= 11) {
3732 enum port port;
3733
3734 if (intel_bios_is_dsi_present(dev_priv, &port))
3735 de_port_masked |= DSI0_TE | DSI1_TE;
3736 }
3737
3738 de_pipe_enables = de_pipe_masked |
3739 GEN8_PIPE_VBLANK |
3740 gen8_de_pipe_underrun_mask(dev_priv) |
3741 gen8_de_pipe_flip_done_mask(dev_priv);
3742
3743 de_port_enables = de_port_masked;
3744 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3745 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3746 else if (IS_BROADWELL(dev_priv))
3747 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3748
3749 if (DISPLAY_VER(dev_priv) >= 12) {
3750 enum transcoder trans;
3751
3752 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3753 enum intel_display_power_domain domain;
3754
3755 domain = POWER_DOMAIN_TRANSCODER(trans);
3756 if (!intel_display_power_is_enabled(dev_priv, domain))
3757 continue;
3758
3759 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3760 }
3761 } else {
3762 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3763 }
3764
3765 for_each_pipe(dev_priv, pipe) {
3766 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3767
3768 if (intel_display_power_is_enabled(dev_priv,
3769 POWER_DOMAIN_PIPE(pipe)))
3770 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3771 dev_priv->de_irq_mask[pipe],
3772 de_pipe_enables);
3773 }
3774
3775 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3776 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3777
3778 if (DISPLAY_VER(dev_priv) >= 11) {
3779 u32 de_hpd_masked = 0;
3780 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3781 GEN11_DE_TBT_HOTPLUG_MASK;
3782
3783 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3784 de_hpd_enables);
3785 }
3786}
3787
3788static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3789{
3790 struct intel_uncore *uncore = &dev_priv->uncore;
3791 u32 mask = SDE_GMBUS_ICP;
3792
3793 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3794}
3795
3796static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3797{
3798 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3799 icp_irq_postinstall(dev_priv);
3800 else if (HAS_PCH_SPLIT(dev_priv))
3801 ibx_irq_postinstall(dev_priv);
3802
3803 gen8_gt_irq_postinstall(to_gt(dev_priv));
3804 gen8_de_irq_postinstall(dev_priv);
3805
3806 gen8_master_intr_enable(dev_priv->uncore.regs);
3807}
3808
3809static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3810{
3811 if (!HAS_DISPLAY(dev_priv))
3812 return;
3813
3814 gen8_de_irq_postinstall(dev_priv);
3815
3816 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3817 GEN11_DISPLAY_IRQ_ENABLE);
3818}
3819
3820static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3821{
3822 struct intel_gt *gt = to_gt(dev_priv);
3823 struct intel_uncore *uncore = gt->uncore;
3824 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3825
3826 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3827 icp_irq_postinstall(dev_priv);
3828
3829 gen11_gt_irq_postinstall(gt);
3830 gen11_de_irq_postinstall(dev_priv);
3831
3832 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3833
3834 gen11_master_intr_enable(uncore->regs);
3835 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3836}
3837
3838static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3839{
3840 struct intel_gt *gt = to_gt(dev_priv);
3841 struct intel_uncore *uncore = gt->uncore;
3842 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3843
3844 gen11_gt_irq_postinstall(gt);
3845
3846 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3847
3848 if (HAS_DISPLAY(dev_priv)) {
3849 icp_irq_postinstall(dev_priv);
3850 gen8_de_irq_postinstall(dev_priv);
3851 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3852 GEN11_DISPLAY_IRQ_ENABLE);
3853 }
3854
3855 dg1_master_intr_enable(uncore->regs);
3856 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3857}
3858
3859static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3860{
3861 gen8_gt_irq_postinstall(to_gt(dev_priv));
3862
3863 spin_lock_irq(&dev_priv->irq_lock);
3864 if (dev_priv->display_irqs_enabled)
3865 vlv_display_irq_postinstall(dev_priv);
3866 spin_unlock_irq(&dev_priv->irq_lock);
3867
3868 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3869 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3870}
3871
3872static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3873{
3874 struct intel_uncore *uncore = &dev_priv->uncore;
3875
3876 i9xx_pipestat_irq_reset(dev_priv);
3877
3878 gen2_irq_reset(uncore);
3879 dev_priv->irq_mask = ~0u;
3880}
3881
3882static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3883{
3884 struct intel_uncore *uncore = &dev_priv->uncore;
3885 u16 enable_mask;
3886
3887 intel_uncore_write16(uncore,
3888 EMR,
3889 ~(I915_ERROR_PAGE_TABLE |
3890 I915_ERROR_MEMORY_REFRESH));
3891
3892 /* Unmask the interrupts that we always want on. */
3893 dev_priv->irq_mask =
3894 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3895 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3896 I915_MASTER_ERROR_INTERRUPT);
3897
3898 enable_mask =
3899 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3900 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3901 I915_MASTER_ERROR_INTERRUPT |
3902 I915_USER_INTERRUPT;
3903
3904 gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3905
3906 /* Interrupt setup is already guaranteed to be single-threaded, this is
3907 * just to make the assert_spin_locked check happy. */
3908 spin_lock_irq(&dev_priv->irq_lock);
3909 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3910 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3911 spin_unlock_irq(&dev_priv->irq_lock);
3912}
3913
3914static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3915 u16 *eir, u16 *eir_stuck)
3916{
3917 struct intel_uncore *uncore = &i915->uncore;
3918 u16 emr;
3919
3920 *eir = intel_uncore_read16(uncore, EIR);
3921
3922 if (*eir)
3923 intel_uncore_write16(uncore, EIR, *eir);
3924
3925 *eir_stuck = intel_uncore_read16(uncore, EIR);
3926 if (*eir_stuck == 0)
3927 return;
3928
3929 /*
3930 * Toggle all EMR bits to make sure we get an edge
3931 * in the ISR master error bit if we don't clear
3932 * all the EIR bits. Otherwise the edge triggered
3933 * IIR on i965/g4x wouldn't notice that an interrupt
3934 * is still pending. Also some EIR bits can't be
3935 * cleared except by handling the underlying error
3936 * (or by a GPU reset) so we mask any bit that
3937 * remains set.
3938 */
3939 emr = intel_uncore_read16(uncore, EMR);
3940 intel_uncore_write16(uncore, EMR, 0xffff);
3941 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3942}
3943
3944static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3945 u16 eir, u16 eir_stuck)
3946{
3947 drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3948
3949 if (eir_stuck)
3950 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3951 eir_stuck);
3952}
3953
3954static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3955 u32 *eir, u32 *eir_stuck)
3956{
3957 u32 emr;
3958
3959 *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
3960
3961 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3962 if (*eir_stuck == 0)
3963 return;
3964
3965 /*
3966 * Toggle all EMR bits to make sure we get an edge
3967 * in the ISR master error bit if we don't clear
3968 * all the EIR bits. Otherwise the edge triggered
3969 * IIR on i965/g4x wouldn't notice that an interrupt
3970 * is still pending. Also some EIR bits can't be
3971 * cleared except by handling the underlying error
3972 * (or by a GPU reset) so we mask any bit that
3973 * remains set.
3974 */
3975 emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
3976 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3977}
3978
3979static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3980 u32 eir, u32 eir_stuck)
3981{
3982 drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3983
3984 if (eir_stuck)
3985 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3986 eir_stuck);
3987}
3988
3989static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3990{
3991 struct drm_i915_private *dev_priv = arg;
3992 irqreturn_t ret = IRQ_NONE;
3993
3994 if (!intel_irqs_enabled(dev_priv))
3995 return IRQ_NONE;
3996
3997 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3998 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3999
4000 do {
4001 u32 pipe_stats[I915_MAX_PIPES] = {};
4002 u16 eir = 0, eir_stuck = 0;
4003 u16 iir;
4004
4005 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4006 if (iir == 0)
4007 break;
4008
4009 ret = IRQ_HANDLED;
4010
4011 /* Call regardless, as some status bits might not be
4012 * signalled in iir */
4013 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4014
4015 if (iir & I915_MASTER_ERROR_INTERRUPT)
4016 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4017
4018 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4019
4020 if (iir & I915_USER_INTERRUPT)
4021 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4022
4023 if (iir & I915_MASTER_ERROR_INTERRUPT)
4024 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4025
4026 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4027 } while (0);
4028
4029 pmu_irq_stats(dev_priv, ret);
4030
4031 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4032
4033 return ret;
4034}
4035
4036static void i915_irq_reset(struct drm_i915_private *dev_priv)
4037{
4038 struct intel_uncore *uncore = &dev_priv->uncore;
4039
4040 if (I915_HAS_HOTPLUG(dev_priv)) {
4041 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4042 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
4043 }
4044
4045 i9xx_pipestat_irq_reset(dev_priv);
4046
4047 GEN3_IRQ_RESET(uncore, GEN2_);
4048 dev_priv->irq_mask = ~0u;
4049}
4050
4051static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4052{
4053 struct intel_uncore *uncore = &dev_priv->uncore;
4054 u32 enable_mask;
4055
4056 intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4057 I915_ERROR_MEMORY_REFRESH));
4058
4059 /* Unmask the interrupts that we always want on. */
4060 dev_priv->irq_mask =
4061 ~(I915_ASLE_INTERRUPT |
4062 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4063 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4064 I915_MASTER_ERROR_INTERRUPT);
4065
4066 enable_mask =
4067 I915_ASLE_INTERRUPT |
4068 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4069 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4070 I915_MASTER_ERROR_INTERRUPT |
4071 I915_USER_INTERRUPT;
4072
4073 if (I915_HAS_HOTPLUG(dev_priv)) {
4074 /* Enable in IER... */
4075 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4076 /* and unmask in IMR */
4077 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4078 }
4079
4080 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4081
4082 /* Interrupt setup is already guaranteed to be single-threaded, this is
4083 * just to make the assert_spin_locked check happy. */
4084 spin_lock_irq(&dev_priv->irq_lock);
4085 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4086 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4087 spin_unlock_irq(&dev_priv->irq_lock);
4088
4089 i915_enable_asle_pipestat(dev_priv);
4090}
4091
4092static irqreturn_t i915_irq_handler(int irq, void *arg)
4093{
4094 struct drm_i915_private *dev_priv = arg;
4095 irqreturn_t ret = IRQ_NONE;
4096
4097 if (!intel_irqs_enabled(dev_priv))
4098 return IRQ_NONE;
4099
4100 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4101 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4102
4103 do {
4104 u32 pipe_stats[I915_MAX_PIPES] = {};
4105 u32 eir = 0, eir_stuck = 0;
4106 u32 hotplug_status = 0;
4107 u32 iir;
4108
4109 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4110 if (iir == 0)
4111 break;
4112
4113 ret = IRQ_HANDLED;
4114
4115 if (I915_HAS_HOTPLUG(dev_priv) &&
4116 iir & I915_DISPLAY_PORT_INTERRUPT)
4117 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4118
4119 /* Call regardless, as some status bits might not be
4120 * signalled in iir */
4121 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4122
4123 if (iir & I915_MASTER_ERROR_INTERRUPT)
4124 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4125
4126 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4127
4128 if (iir & I915_USER_INTERRUPT)
4129 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4130
4131 if (iir & I915_MASTER_ERROR_INTERRUPT)
4132 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4133
4134 if (hotplug_status)
4135 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4136
4137 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4138 } while (0);
4139
4140 pmu_irq_stats(dev_priv, ret);
4141
4142 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4143
4144 return ret;
4145}
4146
4147static void i965_irq_reset(struct drm_i915_private *dev_priv)
4148{
4149 struct intel_uncore *uncore = &dev_priv->uncore;
4150
4151 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4152 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
4153
4154 i9xx_pipestat_irq_reset(dev_priv);
4155
4156 GEN3_IRQ_RESET(uncore, GEN2_);
4157 dev_priv->irq_mask = ~0u;
4158}
4159
4160static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4161{
4162 struct intel_uncore *uncore = &dev_priv->uncore;
4163 u32 enable_mask;
4164 u32 error_mask;
4165
4166 /*
4167 * Enable some error detection, note the instruction error mask
4168 * bit is reserved, so we leave it masked.
4169 */
4170 if (IS_G4X(dev_priv)) {
4171 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4172 GM45_ERROR_MEM_PRIV |
4173 GM45_ERROR_CP_PRIV |
4174 I915_ERROR_MEMORY_REFRESH);
4175 } else {
4176 error_mask = ~(I915_ERROR_PAGE_TABLE |
4177 I915_ERROR_MEMORY_REFRESH);
4178 }
4179 intel_uncore_write(uncore, EMR, error_mask);
4180
4181 /* Unmask the interrupts that we always want on. */
4182 dev_priv->irq_mask =
4183 ~(I915_ASLE_INTERRUPT |
4184 I915_DISPLAY_PORT_INTERRUPT |
4185 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4186 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4187 I915_MASTER_ERROR_INTERRUPT);
4188
4189 enable_mask =
4190 I915_ASLE_INTERRUPT |
4191 I915_DISPLAY_PORT_INTERRUPT |
4192 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4193 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4194 I915_MASTER_ERROR_INTERRUPT |
4195 I915_USER_INTERRUPT;
4196
4197 if (IS_G4X(dev_priv))
4198 enable_mask |= I915_BSD_USER_INTERRUPT;
4199
4200 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4201
4202 /* Interrupt setup is already guaranteed to be single-threaded, this is
4203 * just to make the assert_spin_locked check happy. */
4204 spin_lock_irq(&dev_priv->irq_lock);
4205 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4206 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4207 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4208 spin_unlock_irq(&dev_priv->irq_lock);
4209
4210 i915_enable_asle_pipestat(dev_priv);
4211}
4212
4213static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4214{
4215 u32 hotplug_en;
4216
4217 lockdep_assert_held(&dev_priv->irq_lock);
4218
4219 /* Note HDMI and DP share hotplug bits */
4220 /* enable bits are the same for all generations */
4221 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4222 /* Programming the CRT detection parameters tends
4223 to generate a spurious hotplug event about three
4224 seconds later. So just do it once.
4225 */
4226 if (IS_G4X(dev_priv))
4227 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4228 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4229
4230 /* Ignore TV since it's buggy */
4231 i915_hotplug_interrupt_update_locked(dev_priv,
4232 HOTPLUG_INT_EN_MASK |
4233 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4234 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4235 hotplug_en);
4236}
4237
4238static irqreturn_t i965_irq_handler(int irq, void *arg)
4239{
4240 struct drm_i915_private *dev_priv = arg;
4241 irqreturn_t ret = IRQ_NONE;
4242
4243 if (!intel_irqs_enabled(dev_priv))
4244 return IRQ_NONE;
4245
4246 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4247 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4248
4249 do {
4250 u32 pipe_stats[I915_MAX_PIPES] = {};
4251 u32 eir = 0, eir_stuck = 0;
4252 u32 hotplug_status = 0;
4253 u32 iir;
4254
4255 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4256 if (iir == 0)
4257 break;
4258
4259 ret = IRQ_HANDLED;
4260
4261 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4262 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4263
4264 /* Call regardless, as some status bits might not be
4265 * signalled in iir */
4266 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4267
4268 if (iir & I915_MASTER_ERROR_INTERRUPT)
4269 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4270
4271 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4272
4273 if (iir & I915_USER_INTERRUPT)
4274 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4275 iir);
4276
4277 if (iir & I915_BSD_USER_INTERRUPT)
4278 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4279 iir >> 25);
4280
4281 if (iir & I915_MASTER_ERROR_INTERRUPT)
4282 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4283
4284 if (hotplug_status)
4285 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4286
4287 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4288 } while (0);
4289
4290 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4291
4292 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4293
4294 return ret;
4295}
4296
4297struct intel_hotplug_funcs {
4298 void (*hpd_irq_setup)(struct drm_i915_private *i915);
4299};
4300
4301#define HPD_FUNCS(platform) \
4302static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4303 .hpd_irq_setup = platform##_hpd_irq_setup, \
4304}
4305
4306HPD_FUNCS(i915);
4307HPD_FUNCS(dg1);
4308HPD_FUNCS(gen11);
4309HPD_FUNCS(bxt);
4310HPD_FUNCS(icp);
4311HPD_FUNCS(spt);
4312HPD_FUNCS(ilk);
4313#undef HPD_FUNCS
4314
4315void intel_hpd_irq_setup(struct drm_i915_private *i915)
4316{
4317 if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4318 i915->display.funcs.hotplug->hpd_irq_setup(i915);
4319}
4320
4321/**
4322 * intel_irq_init - initializes irq support
4323 * @dev_priv: i915 device instance
4324 *
4325 * This function initializes all the irq support including work items, timers
4326 * and all the vtables. It does not setup the interrupt itself though.
4327 */
4328void intel_irq_init(struct drm_i915_private *dev_priv)
4329{
4330 int i;
4331
4332 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4333 for (i = 0; i < MAX_L3_SLICES; ++i)
4334 dev_priv->l3_parity.remap_info[i] = NULL;
4335
4336 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4337 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4338 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4339
4340 if (!HAS_DISPLAY(dev_priv))
4341 return;
4342
4343 intel_hpd_init_pins(dev_priv);
4344
4345 intel_hpd_init_early(dev_priv);
4346
4347 dev_priv->drm.vblank_disable_immediate = true;
4348
4349 /* Most platforms treat the display irq block as an always-on
4350 * power domain. vlv/chv can disable it at runtime and need
4351 * special care to avoid writing any of the display block registers
4352 * outside of the power domain. We defer setting up the display irqs
4353 * in this case to the runtime pm.
4354 */
4355 dev_priv->display_irqs_enabled = true;
4356 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4357 dev_priv->display_irqs_enabled = false;
4358
4359 if (HAS_GMCH(dev_priv)) {
4360 if (I915_HAS_HOTPLUG(dev_priv))
4361 dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4362 } else {
4363 if (HAS_PCH_DG2(dev_priv))
4364 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4365 else if (HAS_PCH_DG1(dev_priv))
4366 dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4367 else if (DISPLAY_VER(dev_priv) >= 11)
4368 dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4369 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4370 dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4371 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4372 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4373 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4374 dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4375 else
4376 dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4377 }
4378}
4379
4380/**
4381 * intel_irq_fini - deinitializes IRQ support
4382 * @i915: i915 device instance
4383 *
4384 * This function deinitializes all the IRQ support.
4385 */
4386void intel_irq_fini(struct drm_i915_private *i915)
4387{
4388 int i;
4389
4390 for (i = 0; i < MAX_L3_SLICES; ++i)
4391 kfree(i915->l3_parity.remap_info[i]);
4392}
4393
4394static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4395{
4396 if (HAS_GMCH(dev_priv)) {
4397 if (IS_CHERRYVIEW(dev_priv))
4398 return cherryview_irq_handler;
4399 else if (IS_VALLEYVIEW(dev_priv))
4400 return valleyview_irq_handler;
4401 else if (GRAPHICS_VER(dev_priv) == 4)
4402 return i965_irq_handler;
4403 else if (GRAPHICS_VER(dev_priv) == 3)
4404 return i915_irq_handler;
4405 else
4406 return i8xx_irq_handler;
4407 } else {
4408 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4409 return dg1_irq_handler;
4410 else if (GRAPHICS_VER(dev_priv) >= 11)
4411 return gen11_irq_handler;
4412 else if (GRAPHICS_VER(dev_priv) >= 8)
4413 return gen8_irq_handler;
4414 else
4415 return ilk_irq_handler;
4416 }
4417}
4418
4419static void intel_irq_reset(struct drm_i915_private *dev_priv)
4420{
4421 if (HAS_GMCH(dev_priv)) {
4422 if (IS_CHERRYVIEW(dev_priv))
4423 cherryview_irq_reset(dev_priv);
4424 else if (IS_VALLEYVIEW(dev_priv))
4425 valleyview_irq_reset(dev_priv);
4426 else if (GRAPHICS_VER(dev_priv) == 4)
4427 i965_irq_reset(dev_priv);
4428 else if (GRAPHICS_VER(dev_priv) == 3)
4429 i915_irq_reset(dev_priv);
4430 else
4431 i8xx_irq_reset(dev_priv);
4432 } else {
4433 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4434 dg1_irq_reset(dev_priv);
4435 else if (GRAPHICS_VER(dev_priv) >= 11)
4436 gen11_irq_reset(dev_priv);
4437 else if (GRAPHICS_VER(dev_priv) >= 8)
4438 gen8_irq_reset(dev_priv);
4439 else
4440 ilk_irq_reset(dev_priv);
4441 }
4442}
4443
4444static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4445{
4446 if (HAS_GMCH(dev_priv)) {
4447 if (IS_CHERRYVIEW(dev_priv))
4448 cherryview_irq_postinstall(dev_priv);
4449 else if (IS_VALLEYVIEW(dev_priv))
4450 valleyview_irq_postinstall(dev_priv);
4451 else if (GRAPHICS_VER(dev_priv) == 4)
4452 i965_irq_postinstall(dev_priv);
4453 else if (GRAPHICS_VER(dev_priv) == 3)
4454 i915_irq_postinstall(dev_priv);
4455 else
4456 i8xx_irq_postinstall(dev_priv);
4457 } else {
4458 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4459 dg1_irq_postinstall(dev_priv);
4460 else if (GRAPHICS_VER(dev_priv) >= 11)
4461 gen11_irq_postinstall(dev_priv);
4462 else if (GRAPHICS_VER(dev_priv) >= 8)
4463 gen8_irq_postinstall(dev_priv);
4464 else
4465 ilk_irq_postinstall(dev_priv);
4466 }
4467}
4468
4469/**
4470 * intel_irq_install - enables the hardware interrupt
4471 * @dev_priv: i915 device instance
4472 *
4473 * This function enables the hardware interrupt handling, but leaves the hotplug
4474 * handling still disabled. It is called after intel_irq_init().
4475 *
4476 * In the driver load and resume code we need working interrupts in a few places
4477 * but don't want to deal with the hassle of concurrent probe and hotplug
4478 * workers. Hence the split into this two-stage approach.
4479 */
4480int intel_irq_install(struct drm_i915_private *dev_priv)
4481{
4482 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4483 int ret;
4484
4485 /*
4486 * We enable some interrupt sources in our postinstall hooks, so mark
4487 * interrupts as enabled _before_ actually enabling them to avoid
4488 * special cases in our ordering checks.
4489 */
4490 dev_priv->runtime_pm.irqs_enabled = true;
4491
4492 dev_priv->irq_enabled = true;
4493
4494 intel_irq_reset(dev_priv);
4495
4496 ret = request_irq(irq, intel_irq_handler(dev_priv),
4497 IRQF_SHARED, DRIVER_NAME, dev_priv);
4498 if (ret < 0) {
4499 dev_priv->irq_enabled = false;
4500 return ret;
4501 }
4502
4503 intel_irq_postinstall(dev_priv);
4504
4505 return ret;
4506}
4507
4508/**
4509 * intel_irq_uninstall - finilizes all irq handling
4510 * @dev_priv: i915 device instance
4511 *
4512 * This stops interrupt and hotplug handling and unregisters and frees all
4513 * resources acquired in the init functions.
4514 */
4515void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4516{
4517 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4518
4519 /*
4520 * FIXME we can get called twice during driver probe
4521 * error handling as well as during driver remove due to
4522 * intel_modeset_driver_remove() calling us out of sequence.
4523 * Would be nice if it didn't do that...
4524 */
4525 if (!dev_priv->irq_enabled)
4526 return;
4527
4528 dev_priv->irq_enabled = false;
4529
4530 intel_irq_reset(dev_priv);
4531
4532 free_irq(irq, dev_priv);
4533
4534 intel_hpd_cancel_work(dev_priv);
4535 dev_priv->runtime_pm.irqs_enabled = false;
4536}
4537
4538/**
4539 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4540 * @dev_priv: i915 device instance
4541 *
4542 * This function is used to disable interrupts at runtime, both in the runtime
4543 * pm and the system suspend/resume code.
4544 */
4545void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4546{
4547 intel_irq_reset(dev_priv);
4548 dev_priv->runtime_pm.irqs_enabled = false;
4549 intel_synchronize_irq(dev_priv);
4550}
4551
4552/**
4553 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4554 * @dev_priv: i915 device instance
4555 *
4556 * This function is used to enable interrupts at runtime, both in the runtime
4557 * pm and the system suspend/resume code.
4558 */
4559void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4560{
4561 dev_priv->runtime_pm.irqs_enabled = true;
4562 intel_irq_reset(dev_priv);
4563 intel_irq_postinstall(dev_priv);
4564}
4565
4566bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4567{
4568 return dev_priv->runtime_pm.irqs_enabled;
4569}
4570
4571void intel_synchronize_irq(struct drm_i915_private *i915)
4572{
4573 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4574}
4575
4576void intel_synchronize_hardirq(struct drm_i915_private *i915)
4577{
4578 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4579}