Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2023 Intel Corporation
   4 */
   5
   6#include <drm/drm_vblank.h>
   7
   8#include "gt/intel_rps.h"
   9#include "i915_drv.h"
  10#include "i915_irq.h"
  11#include "i915_reg.h"
  12#include "icl_dsi_regs.h"
  13#include "intel_crtc.h"
  14#include "intel_de.h"
  15#include "intel_display_irq.h"
  16#include "intel_display_trace.h"
  17#include "intel_display_types.h"
  18#include "intel_dp_aux.h"
  19#include "intel_dsb.h"
  20#include "intel_fdi_regs.h"
  21#include "intel_fifo_underrun.h"
  22#include "intel_gmbus.h"
  23#include "intel_hotplug_irq.h"
  24#include "intel_pipe_crc_regs.h"
  25#include "intel_pmdemand.h"
  26#include "intel_psr.h"
  27#include "intel_psr_regs.h"
  28
  29static void
  30intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
  31{
  32	struct intel_display *display = &dev_priv->display;
  33	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
  34
  35	drm_crtc_handle_vblank(&crtc->base);
  36}
  37
  38/**
  39 * ilk_update_display_irq - update DEIMR
  40 * @dev_priv: driver private
  41 * @interrupt_mask: mask of interrupt bits to update
  42 * @enabled_irq_mask: mask of interrupt bits to enable
  43 */
  44void ilk_update_display_irq(struct drm_i915_private *dev_priv,
  45			    u32 interrupt_mask, u32 enabled_irq_mask)
  46{
  47	u32 new_val;
  48
  49	lockdep_assert_held(&dev_priv->irq_lock);
  50	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
  51
  52	new_val = dev_priv->irq_mask;
  53	new_val &= ~interrupt_mask;
  54	new_val |= (~enabled_irq_mask & interrupt_mask);
  55
  56	if (new_val != dev_priv->irq_mask &&
  57	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
  58		dev_priv->irq_mask = new_val;
  59		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
  60		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
  61	}
  62}
  63
  64void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
  65{
  66	ilk_update_display_irq(i915, bits, bits);
  67}
  68
  69void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
  70{
  71	ilk_update_display_irq(i915, bits, 0);
  72}
  73
  74/**
  75 * bdw_update_port_irq - update DE port interrupt
  76 * @dev_priv: driver private
  77 * @interrupt_mask: mask of interrupt bits to update
  78 * @enabled_irq_mask: mask of interrupt bits to enable
  79 */
  80void bdw_update_port_irq(struct drm_i915_private *dev_priv,
  81			 u32 interrupt_mask, u32 enabled_irq_mask)
  82{
  83	u32 new_val;
  84	u32 old_val;
  85
  86	lockdep_assert_held(&dev_priv->irq_lock);
  87
  88	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
  89
  90	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
  91		return;
  92
  93	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
  94
  95	new_val = old_val;
  96	new_val &= ~interrupt_mask;
  97	new_val |= (~enabled_irq_mask & interrupt_mask);
  98
  99	if (new_val != old_val) {
 100		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
 101		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
 102	}
 103}
 104
 105/**
 106 * bdw_update_pipe_irq - update DE pipe interrupt
 107 * @dev_priv: driver private
 108 * @pipe: pipe whose interrupt to update
 109 * @interrupt_mask: mask of interrupt bits to update
 110 * @enabled_irq_mask: mask of interrupt bits to enable
 111 */
 112static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 113				enum pipe pipe, u32 interrupt_mask,
 114				u32 enabled_irq_mask)
 115{
 116	u32 new_val;
 117
 118	lockdep_assert_held(&dev_priv->irq_lock);
 119
 120	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 121
 122	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 123		return;
 124
 125	new_val = dev_priv->display.irq.de_irq_mask[pipe];
 126	new_val &= ~interrupt_mask;
 127	new_val |= (~enabled_irq_mask & interrupt_mask);
 128
 129	if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
 130		dev_priv->display.irq.de_irq_mask[pipe] = new_val;
 131		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
 132				   dev_priv->display.irq.de_irq_mask[pipe]);
 133		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
 134	}
 135}
 136
 137void bdw_enable_pipe_irq(struct drm_i915_private *i915,
 138			 enum pipe pipe, u32 bits)
 139{
 140	bdw_update_pipe_irq(i915, pipe, bits, bits);
 141}
 142
 143void bdw_disable_pipe_irq(struct drm_i915_private *i915,
 144			  enum pipe pipe, u32 bits)
 145{
 146	bdw_update_pipe_irq(i915, pipe, bits, 0);
 147}
 148
 149/**
 150 * ibx_display_interrupt_update - update SDEIMR
 151 * @dev_priv: driver private
 152 * @interrupt_mask: mask of interrupt bits to update
 153 * @enabled_irq_mask: mask of interrupt bits to enable
 154 */
 155void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 156				  u32 interrupt_mask,
 157				  u32 enabled_irq_mask)
 158{
 159	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
 160
 161	sdeimr &= ~interrupt_mask;
 162	sdeimr |= (~enabled_irq_mask & interrupt_mask);
 163
 164	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 165
 166	lockdep_assert_held(&dev_priv->irq_lock);
 167
 168	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 169		return;
 170
 171	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
 172	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
 173}
 174
 175void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
 176{
 177	ibx_display_interrupt_update(i915, bits, bits);
 178}
 179
 180void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
 181{
 182	ibx_display_interrupt_update(i915, bits, 0);
 183}
 184
 185u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
 186			      enum pipe pipe)
 187{
 188	u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
 189	u32 enable_mask = status_mask << 16;
 190
 191	lockdep_assert_held(&dev_priv->irq_lock);
 192
 193	if (DISPLAY_VER(dev_priv) < 5)
 194		goto out;
 195
 196	/*
 197	 * On pipe A we don't support the PSR interrupt yet,
 198	 * on pipe B and C the same bit MBZ.
 199	 */
 200	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 201			     status_mask & PIPE_A_PSR_STATUS_VLV))
 202		return 0;
 203	/*
 204	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
 205	 * A the same bit is for perf counters which we don't use either.
 206	 */
 207	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 208			     status_mask & PIPE_B_PSR_STATUS_VLV))
 209		return 0;
 210
 211	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
 212			 SPRITE0_FLIP_DONE_INT_EN_VLV |
 213			 SPRITE1_FLIP_DONE_INT_EN_VLV);
 214	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
 215		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
 216	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
 217		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 218
 219out:
 220	drm_WARN_ONCE(&dev_priv->drm,
 221		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 222		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
 223		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
 224		      pipe_name(pipe), enable_mask, status_mask);
 225
 226	return enable_mask;
 227}
 228
 229void i915_enable_pipestat(struct drm_i915_private *dev_priv,
 230			  enum pipe pipe, u32 status_mask)
 231{
 232	i915_reg_t reg = PIPESTAT(dev_priv, pipe);
 233	u32 enable_mask;
 234
 235	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
 236		      "pipe %c: status_mask=0x%x\n",
 237		      pipe_name(pipe), status_mask);
 238
 239	lockdep_assert_held(&dev_priv->irq_lock);
 240	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 241
 242	if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
 243		return;
 244
 245	dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
 246	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 247
 248	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
 249	intel_uncore_posting_read(&dev_priv->uncore, reg);
 250}
 251
 252void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 253			   enum pipe pipe, u32 status_mask)
 254{
 255	i915_reg_t reg = PIPESTAT(dev_priv, pipe);
 256	u32 enable_mask;
 257
 258	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
 259		      "pipe %c: status_mask=0x%x\n",
 260		      pipe_name(pipe), status_mask);
 261
 262	lockdep_assert_held(&dev_priv->irq_lock);
 263	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 264
 265	if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
 266		return;
 267
 268	dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
 269	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 270
 271	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
 272	intel_uncore_posting_read(&dev_priv->uncore, reg);
 273}
 274
 275static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
 276{
 277	struct drm_i915_private *i915 = to_i915(display->drm);
 278
 279	if (IS_I85X(i915))
 280		return true;
 281
 282	if (IS_PINEVIEW(i915))
 283		return true;
 284
 285	return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
 286}
 287
 288/**
 289 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
 290 * @dev_priv: i915 device private
 291 */
 292void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 293{
 294	struct intel_display *display = &dev_priv->display;
 295
 296	if (!intel_opregion_asle_present(display))
 297		return;
 298
 299	if (!i915_has_legacy_blc_interrupt(display))
 300		return;
 301
 302	spin_lock_irq(&dev_priv->irq_lock);
 303
 304	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
 305	if (DISPLAY_VER(dev_priv) >= 4)
 306		i915_enable_pipestat(dev_priv, PIPE_A,
 307				     PIPE_LEGACY_BLC_EVENT_STATUS);
 308
 309	spin_unlock_irq(&dev_priv->irq_lock);
 310}
 311
 312#if IS_ENABLED(CONFIG_DEBUG_FS)
 313static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 314					 enum pipe pipe,
 315					 u32 crc0, u32 crc1,
 316					 u32 crc2, u32 crc3,
 317					 u32 crc4)
 318{
 319	struct intel_display *display = &dev_priv->display;
 320	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
 321	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
 322	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
 323
 324	trace_intel_pipe_crc(crtc, crcs);
 325
 326	spin_lock(&pipe_crc->lock);
 327	/*
 328	 * For some not yet identified reason, the first CRC is
 329	 * bonkers. So let's just wait for the next vblank and read
 330	 * out the buggy result.
 331	 *
 332	 * On GEN8+ sometimes the second CRC is bonkers as well, so
 333	 * don't trust that one either.
 334	 */
 335	if (pipe_crc->skipped <= 0 ||
 336	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
 337		pipe_crc->skipped++;
 338		spin_unlock(&pipe_crc->lock);
 339		return;
 340	}
 341	spin_unlock(&pipe_crc->lock);
 342
 343	drm_crtc_add_crc_entry(&crtc->base, true,
 344			       drm_crtc_accurate_vblank_count(&crtc->base),
 345			       crcs);
 346}
 347#else
 348static inline void
 349display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 350			     enum pipe pipe,
 351			     u32 crc0, u32 crc1,
 352			     u32 crc2, u32 crc3,
 353			     u32 crc4) {}
 354#endif
 355
 356static void flip_done_handler(struct drm_i915_private *i915,
 357			      enum pipe pipe)
 358{
 359	struct intel_display *display = &i915->display;
 360	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
 361
 362	spin_lock(&i915->drm.event_lock);
 363
 364	if (crtc->flip_done_event) {
 365		trace_intel_crtc_flip_done(crtc);
 366		drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
 367		crtc->flip_done_event = NULL;
 368	}
 369
 370	spin_unlock(&i915->drm.event_lock);
 371}
 372
 373static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 374				     enum pipe pipe)
 375{
 376	display_pipe_crc_irq_handler(dev_priv, pipe,
 377				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)),
 378				     0, 0, 0, 0);
 379}
 380
 381static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 382				     enum pipe pipe)
 383{
 384	display_pipe_crc_irq_handler(dev_priv, pipe,
 385				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
 386				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
 387				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
 388				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
 389				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
 390}
 391
 392static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 393				      enum pipe pipe)
 394{
 395	u32 res1, res2;
 396
 397	if (DISPLAY_VER(dev_priv) >= 3)
 398		res1 = intel_uncore_read(&dev_priv->uncore,
 399					 PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
 400	else
 401		res1 = 0;
 402
 403	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
 404		res2 = intel_uncore_read(&dev_priv->uncore,
 405					 PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
 406	else
 407		res2 = 0;
 408
 409	display_pipe_crc_irq_handler(dev_priv, pipe,
 410				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)),
 411				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
 412				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
 413				     res1, res2);
 414}
 415
 416static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
 417{
 418	enum pipe pipe;
 419
 420	for_each_pipe(dev_priv, pipe) {
 421		intel_uncore_write(&dev_priv->uncore,
 422				   PIPESTAT(dev_priv, pipe),
 423				   PIPESTAT_INT_STATUS_MASK |
 424				   PIPE_FIFO_UNDERRUN_STATUS);
 425
 426		dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
 427	}
 428}
 429
 430void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 431			   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
 432{
 433	enum pipe pipe;
 434
 435	spin_lock(&dev_priv->irq_lock);
 436
 437	if (!dev_priv->display.irq.display_irqs_enabled) {
 438		spin_unlock(&dev_priv->irq_lock);
 439		return;
 440	}
 441
 442	for_each_pipe(dev_priv, pipe) {
 443		i915_reg_t reg;
 444		u32 status_mask, enable_mask, iir_bit = 0;
 445
 446		/*
 447		 * PIPESTAT bits get signalled even when the interrupt is
 448		 * disabled with the mask bits, and some of the status bits do
 449		 * not generate interrupts at all (like the underrun bit). Hence
 450		 * we need to be careful that we only handle what we want to
 451		 * handle.
 452		 */
 453
 454		/* fifo underruns are filterered in the underrun handler. */
 455		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
 456
 457		switch (pipe) {
 458		default:
 459		case PIPE_A:
 460			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 461			break;
 462		case PIPE_B:
 463			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
 464			break;
 465		case PIPE_C:
 466			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
 467			break;
 468		}
 469		if (iir & iir_bit)
 470			status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
 471
 472		if (!status_mask)
 473			continue;
 474
 475		reg = PIPESTAT(dev_priv, pipe);
 476		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
 477		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 478
 479		/*
 480		 * Clear the PIPE*STAT regs before the IIR
 481		 *
 482		 * Toggle the enable bits to make sure we get an
 483		 * edge in the ISR pipe event bit if we don't clear
 484		 * all the enabled status bits. Otherwise the edge
 485		 * triggered IIR on i965/g4x wouldn't notice that
 486		 * an interrupt is still pending.
 487		 */
 488		if (pipe_stats[pipe]) {
 489			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
 490			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
 491		}
 492	}
 493	spin_unlock(&dev_priv->irq_lock);
 494}
 495
 496void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 497			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
 498{
 499	struct intel_display *display = &dev_priv->display;
 500	bool blc_event = false;
 501	enum pipe pipe;
 502
 503	for_each_pipe(dev_priv, pipe) {
 504		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
 505			intel_handle_vblank(dev_priv, pipe);
 506
 507		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 508			blc_event = true;
 509
 510		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 511			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 512
 513		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 514			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 515	}
 516
 517	if (blc_event || (iir & I915_ASLE_INTERRUPT))
 518		intel_opregion_asle_intr(display);
 519}
 520
 521void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 522			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
 523{
 524	struct intel_display *display = &dev_priv->display;
 525	bool blc_event = false;
 526	enum pipe pipe;
 527
 528	for_each_pipe(dev_priv, pipe) {
 529		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 530			intel_handle_vblank(dev_priv, pipe);
 531
 532		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 533			blc_event = true;
 534
 535		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 536			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 537
 538		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 539			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 540	}
 541
 542	if (blc_event || (iir & I915_ASLE_INTERRUPT))
 543		intel_opregion_asle_intr(display);
 544
 545	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
 546		intel_gmbus_irq_handler(display);
 547}
 548
 549void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 550				     u32 pipe_stats[I915_MAX_PIPES])
 551{
 552	struct intel_display *display = &dev_priv->display;
 553	enum pipe pipe;
 554
 555	for_each_pipe(dev_priv, pipe) {
 556		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 557			intel_handle_vblank(dev_priv, pipe);
 558
 559		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
 560			flip_done_handler(dev_priv, pipe);
 561
 562		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 563			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 564
 565		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 566			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 567	}
 568
 569	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
 570		intel_gmbus_irq_handler(display);
 571}
 572
 573static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 574{
 575	struct intel_display *display = &dev_priv->display;
 576	enum pipe pipe;
 577	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 578
 579	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
 580
 581	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 582		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
 583			       SDE_AUDIO_POWER_SHIFT);
 584		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
 585			port_name(port));
 586	}
 587
 588	if (pch_iir & SDE_AUX_MASK)
 589		intel_dp_aux_irq_handler(display);
 590
 591	if (pch_iir & SDE_GMBUS)
 592		intel_gmbus_irq_handler(display);
 593
 594	if (pch_iir & SDE_AUDIO_HDCP_MASK)
 595		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
 596
 597	if (pch_iir & SDE_AUDIO_TRANS_MASK)
 598		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
 599
 600	if (pch_iir & SDE_POISON)
 601		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
 602
 603	if (pch_iir & SDE_FDI_MASK) {
 604		for_each_pipe(dev_priv, pipe)
 605			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
 606				pipe_name(pipe),
 607				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
 608	}
 609
 610	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
 611		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
 612
 613	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
 614		drm_dbg(&dev_priv->drm,
 615			"PCH transcoder CRC error interrupt\n");
 616
 617	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
 618		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
 619
 620	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
 621		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
 622}
 623
 624static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
 625{
 626	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
 627	enum pipe pipe;
 628
 629	if (err_int & ERR_INT_POISON)
 630		drm_err(&dev_priv->drm, "Poison interrupt\n");
 631
 632	for_each_pipe(dev_priv, pipe) {
 633		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
 634			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 635
 636		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
 637			if (IS_IVYBRIDGE(dev_priv))
 638				ivb_pipe_crc_irq_handler(dev_priv, pipe);
 639			else
 640				hsw_pipe_crc_irq_handler(dev_priv, pipe);
 641		}
 642	}
 643
 644	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
 645}
 646
 647static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
 648{
 649	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
 650	enum pipe pipe;
 651
 652	if (serr_int & SERR_INT_POISON)
 653		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
 654
 655	for_each_pipe(dev_priv, pipe)
 656		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
 657			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
 658
 659	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
 660}
 661
 662static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 663{
 664	struct intel_display *display = &dev_priv->display;
 665	enum pipe pipe;
 666	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 667
 668	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
 669
 670	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 671		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
 672			       SDE_AUDIO_POWER_SHIFT_CPT);
 673		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
 674			port_name(port));
 675	}
 676
 677	if (pch_iir & SDE_AUX_MASK_CPT)
 678		intel_dp_aux_irq_handler(display);
 679
 680	if (pch_iir & SDE_GMBUS_CPT)
 681		intel_gmbus_irq_handler(display);
 682
 683	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
 684		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
 685
 686	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
 687		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
 688
 689	if (pch_iir & SDE_FDI_MASK_CPT) {
 690		for_each_pipe(dev_priv, pipe)
 691			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
 692				pipe_name(pipe),
 693				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
 694	}
 695
 696	if (pch_iir & SDE_ERROR_CPT)
 697		cpt_serr_int_handler(dev_priv);
 698}
 699
 700void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
 701{
 702	struct intel_display *display = &dev_priv->display;
 703	enum pipe pipe;
 704	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
 705
 706	if (hotplug_trigger)
 707		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
 708
 709	if (de_iir & DE_AUX_CHANNEL_A)
 710		intel_dp_aux_irq_handler(display);
 711
 712	if (de_iir & DE_GSE)
 713		intel_opregion_asle_intr(display);
 714
 715	if (de_iir & DE_POISON)
 716		drm_err(&dev_priv->drm, "Poison interrupt\n");
 717
 718	for_each_pipe(dev_priv, pipe) {
 719		if (de_iir & DE_PIPE_VBLANK(pipe))
 720			intel_handle_vblank(dev_priv, pipe);
 721
 722		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
 723			flip_done_handler(dev_priv, pipe);
 724
 725		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
 726			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 727
 728		if (de_iir & DE_PIPE_CRC_DONE(pipe))
 729			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 730	}
 731
 732	/* check event from PCH */
 733	if (de_iir & DE_PCH_EVENT) {
 734		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
 735
 736		if (HAS_PCH_CPT(dev_priv))
 737			cpt_irq_handler(dev_priv, pch_iir);
 738		else
 739			ibx_irq_handler(dev_priv, pch_iir);
 740
 741		/* should clear PCH hotplug event before clear CPU irq */
 742		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
 743	}
 744
 745	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
 746		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
 747}
 748
 749void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
 750{
 751	struct intel_display *display = &dev_priv->display;
 752	enum pipe pipe;
 753	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
 754
 755	if (hotplug_trigger)
 756		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
 757
 758	if (de_iir & DE_ERR_INT_IVB)
 759		ivb_err_int_handler(dev_priv);
 760
 761	if (de_iir & DE_EDP_PSR_INT_HSW) {
 762		struct intel_encoder *encoder;
 763
 764		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
 765			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 766			u32 psr_iir;
 767
 768			psr_iir = intel_uncore_rmw(&dev_priv->uncore,
 769						   EDP_PSR_IIR, 0, 0);
 770			intel_psr_irq_handler(intel_dp, psr_iir);
 771			break;
 772		}
 773	}
 774
 775	if (de_iir & DE_AUX_CHANNEL_A_IVB)
 776		intel_dp_aux_irq_handler(display);
 777
 778	if (de_iir & DE_GSE_IVB)
 779		intel_opregion_asle_intr(display);
 780
 781	for_each_pipe(dev_priv, pipe) {
 782		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
 783			intel_handle_vblank(dev_priv, pipe);
 784
 785		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
 786			flip_done_handler(dev_priv, pipe);
 787	}
 788
 789	/* check event from PCH */
 790	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
 791		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
 792
 793		cpt_irq_handler(dev_priv, pch_iir);
 794
 795		/* clear PCH hotplug event before clear CPU irq */
 796		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
 797	}
 798}
 799
 800static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
 801{
 802	u32 mask;
 803
 804	if (DISPLAY_VER(dev_priv) >= 20)
 805		return 0;
 806	else if (DISPLAY_VER(dev_priv) >= 14)
 807		return TGL_DE_PORT_AUX_DDIA |
 808			TGL_DE_PORT_AUX_DDIB;
 809	else if (DISPLAY_VER(dev_priv) >= 13)
 810		return TGL_DE_PORT_AUX_DDIA |
 811			TGL_DE_PORT_AUX_DDIB |
 812			TGL_DE_PORT_AUX_DDIC |
 813			XELPD_DE_PORT_AUX_DDID |
 814			XELPD_DE_PORT_AUX_DDIE |
 815			TGL_DE_PORT_AUX_USBC1 |
 816			TGL_DE_PORT_AUX_USBC2 |
 817			TGL_DE_PORT_AUX_USBC3 |
 818			TGL_DE_PORT_AUX_USBC4;
 819	else if (DISPLAY_VER(dev_priv) >= 12)
 820		return TGL_DE_PORT_AUX_DDIA |
 821			TGL_DE_PORT_AUX_DDIB |
 822			TGL_DE_PORT_AUX_DDIC |
 823			TGL_DE_PORT_AUX_USBC1 |
 824			TGL_DE_PORT_AUX_USBC2 |
 825			TGL_DE_PORT_AUX_USBC3 |
 826			TGL_DE_PORT_AUX_USBC4 |
 827			TGL_DE_PORT_AUX_USBC5 |
 828			TGL_DE_PORT_AUX_USBC6;
 829
 830	mask = GEN8_AUX_CHANNEL_A;
 831	if (DISPLAY_VER(dev_priv) >= 9)
 832		mask |= GEN9_AUX_CHANNEL_B |
 833			GEN9_AUX_CHANNEL_C |
 834			GEN9_AUX_CHANNEL_D;
 835
 836	if (DISPLAY_VER(dev_priv) == 11) {
 837		mask |= ICL_AUX_CHANNEL_F;
 838		mask |= ICL_AUX_CHANNEL_E;
 839	}
 840
 841	return mask;
 842}
 843
 844static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
 845{
 846	if (DISPLAY_VER(dev_priv) >= 14)
 847		return MTL_PIPEDMC_ATS_FAULT |
 848			MTL_PLANE_ATS_FAULT |
 849			GEN12_PIPEDMC_FAULT |
 850			GEN9_PIPE_CURSOR_FAULT |
 851			GEN11_PIPE_PLANE5_FAULT |
 852			GEN9_PIPE_PLANE4_FAULT |
 853			GEN9_PIPE_PLANE3_FAULT |
 854			GEN9_PIPE_PLANE2_FAULT |
 855			GEN9_PIPE_PLANE1_FAULT;
 856	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
 857		return GEN12_PIPEDMC_FAULT |
 858			GEN9_PIPE_CURSOR_FAULT |
 859			GEN11_PIPE_PLANE5_FAULT |
 860			GEN9_PIPE_PLANE4_FAULT |
 861			GEN9_PIPE_PLANE3_FAULT |
 862			GEN9_PIPE_PLANE2_FAULT |
 863			GEN9_PIPE_PLANE1_FAULT;
 864	else if (DISPLAY_VER(dev_priv) == 12)
 865		return GEN12_PIPEDMC_FAULT |
 866			GEN9_PIPE_CURSOR_FAULT |
 867			GEN11_PIPE_PLANE7_FAULT |
 868			GEN11_PIPE_PLANE6_FAULT |
 869			GEN11_PIPE_PLANE5_FAULT |
 870			GEN9_PIPE_PLANE4_FAULT |
 871			GEN9_PIPE_PLANE3_FAULT |
 872			GEN9_PIPE_PLANE2_FAULT |
 873			GEN9_PIPE_PLANE1_FAULT;
 874	else if (DISPLAY_VER(dev_priv) == 11)
 875		return GEN9_PIPE_CURSOR_FAULT |
 876			GEN11_PIPE_PLANE7_FAULT |
 877			GEN11_PIPE_PLANE6_FAULT |
 878			GEN11_PIPE_PLANE5_FAULT |
 879			GEN9_PIPE_PLANE4_FAULT |
 880			GEN9_PIPE_PLANE3_FAULT |
 881			GEN9_PIPE_PLANE2_FAULT |
 882			GEN9_PIPE_PLANE1_FAULT;
 883	else if (DISPLAY_VER(dev_priv) >= 9)
 884		return GEN9_PIPE_CURSOR_FAULT |
 885			GEN9_PIPE_PLANE4_FAULT |
 886			GEN9_PIPE_PLANE3_FAULT |
 887			GEN9_PIPE_PLANE2_FAULT |
 888			GEN9_PIPE_PLANE1_FAULT;
 889	else
 890		return GEN8_PIPE_CURSOR_FAULT |
 891			GEN8_PIPE_SPRITE_FAULT |
 892			GEN8_PIPE_PRIMARY_FAULT;
 893}
 894
 895static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
 896{
 897	wake_up_all(&dev_priv->display.pmdemand.waitqueue);
 898}
 899
 900static void
 901gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 902{
 903	struct intel_display *display = &dev_priv->display;
 904	bool found = false;
 905
 906	if (HAS_DBUF_OVERLAP_DETECTION(display)) {
 907		if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) {
 908			drm_warn(display->drm,  "DBuf overlap detected\n");
 909			found = true;
 910		}
 911	}
 912
 913	if (DISPLAY_VER(dev_priv) >= 14) {
 914		if (iir & (XELPDP_PMDEMAND_RSP |
 915			   XELPDP_PMDEMAND_RSPTOUT_ERR)) {
 916			if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
 917				drm_dbg(&dev_priv->drm,
 918					"Error waiting for Punit PM Demand Response\n");
 919
 920			intel_pmdemand_irq_handler(dev_priv);
 921			found = true;
 922		}
 923
 924		if (iir & XELPDP_RM_TIMEOUT) {
 925			u32 val = intel_uncore_read(&dev_priv->uncore,
 926						    RM_TIMEOUT_REG_CAPTURE);
 927			drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
 928			found = true;
 929		}
 930	} else if (iir & GEN8_DE_MISC_GSE) {
 931		intel_opregion_asle_intr(display);
 932		found = true;
 933	}
 934
 935	if (iir & GEN8_DE_EDP_PSR) {
 936		struct intel_encoder *encoder;
 937		u32 psr_iir;
 938		i915_reg_t iir_reg;
 939
 940		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
 941			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 942
 943			if (DISPLAY_VER(dev_priv) >= 12)
 944				iir_reg = TRANS_PSR_IIR(dev_priv,
 945						        intel_dp->psr.transcoder);
 946			else
 947				iir_reg = EDP_PSR_IIR;
 948
 949			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
 950
 951			if (psr_iir)
 952				found = true;
 953
 954			intel_psr_irq_handler(intel_dp, psr_iir);
 955
 956			/* prior GEN12 only have one EDP PSR */
 957			if (DISPLAY_VER(dev_priv) < 12)
 958				break;
 959		}
 960	}
 961
 962	if (!found)
 963		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
 964}
 965
 966static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
 967					   u32 te_trigger)
 968{
 969	enum pipe pipe = INVALID_PIPE;
 970	enum transcoder dsi_trans;
 971	enum port port;
 972	u32 val;
 973
 974	/*
 975	 * Incase of dual link, TE comes from DSI_1
 976	 * this is to check if dual link is enabled
 977	 */
 978	val = intel_uncore_read(&dev_priv->uncore,
 979				TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
 980	val &= PORT_SYNC_MODE_ENABLE;
 981
 982	/*
 983	 * if dual link is enabled, then read DSI_0
 984	 * transcoder registers
 985	 */
 986	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
 987						  PORT_A : PORT_B;
 988	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
 989
 990	/* Check if DSI configured in command mode */
 991	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
 992	val = val & OP_MODE_MASK;
 993
 994	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
 995		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
 996		return;
 997	}
 998
 999	/* Get PIPE for handling VBLANK event */
1000	val = intel_uncore_read(&dev_priv->uncore,
1001				TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
1002	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1003	case TRANS_DDI_EDP_INPUT_A_ON:
1004		pipe = PIPE_A;
1005		break;
1006	case TRANS_DDI_EDP_INPUT_B_ONOFF:
1007		pipe = PIPE_B;
1008		break;
1009	case TRANS_DDI_EDP_INPUT_C_ONOFF:
1010		pipe = PIPE_C;
1011		break;
1012	default:
1013		drm_err(&dev_priv->drm, "Invalid PIPE\n");
1014		return;
1015	}
1016
1017	intel_handle_vblank(dev_priv, pipe);
1018
1019	/* clear TE in dsi IIR */
1020	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
1021	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
1022}
1023
1024static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
1025{
1026	if (DISPLAY_VER(i915) >= 9)
1027		return GEN9_PIPE_PLANE1_FLIP_DONE;
1028	else
1029		return GEN8_PIPE_PRIMARY_FLIP_DONE;
1030}
1031
1032static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
1033{
1034	u32 pica_ier = 0;
1035
1036	*pica_iir = 0;
1037	*pch_iir = intel_de_read(i915, SDEIIR);
1038	if (!*pch_iir)
1039		return;
1040
1041	/**
1042	 * PICA IER must be disabled/re-enabled around clearing PICA IIR and
1043	 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
1044	 * their flags both in the PICA and SDE IIR.
1045	 */
1046	if (*pch_iir & SDE_PICAINTERRUPT) {
1047		drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
1048
1049		pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
1050		*pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
1051		intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
1052	}
1053
1054	intel_de_write(i915, SDEIIR, *pch_iir);
1055
1056	if (pica_ier)
1057		intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
1058}
1059
1060void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
1061{
1062	struct intel_display *display = &dev_priv->display;
1063	u32 iir;
1064	enum pipe pipe;
1065
1066	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
1067
1068	if (master_ctl & GEN8_DE_MISC_IRQ) {
1069		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
1070		if (iir) {
1071			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
1072			gen8_de_misc_irq_handler(dev_priv, iir);
1073		} else {
1074			drm_err_ratelimited(&dev_priv->drm,
1075					    "The master control interrupt lied (DE MISC)!\n");
1076		}
1077	}
1078
1079	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
1080		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
1081		if (iir) {
1082			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
1083			gen11_hpd_irq_handler(dev_priv, iir);
1084		} else {
1085			drm_err_ratelimited(&dev_priv->drm,
1086					    "The master control interrupt lied, (DE HPD)!\n");
1087		}
1088	}
1089
1090	if (master_ctl & GEN8_DE_PORT_IRQ) {
1091		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
1092		if (iir) {
1093			bool found = false;
1094
1095			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
1096
1097			if (iir & gen8_de_port_aux_mask(dev_priv)) {
1098				intel_dp_aux_irq_handler(display);
1099				found = true;
1100			}
1101
1102			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
1103				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
1104
1105				if (hotplug_trigger) {
1106					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
1107					found = true;
1108				}
1109			} else if (IS_BROADWELL(dev_priv)) {
1110				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
1111
1112				if (hotplug_trigger) {
1113					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1114					found = true;
1115				}
1116			}
1117
1118			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
1119			    (iir & BXT_DE_PORT_GMBUS)) {
1120				intel_gmbus_irq_handler(display);
1121				found = true;
1122			}
1123
1124			if (DISPLAY_VER(dev_priv) >= 11) {
1125				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
1126
1127				if (te_trigger) {
1128					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
1129					found = true;
1130				}
1131			}
1132
1133			if (!found)
1134				drm_err_ratelimited(&dev_priv->drm,
1135						    "Unexpected DE Port interrupt\n");
1136		} else {
1137			drm_err_ratelimited(&dev_priv->drm,
1138					    "The master control interrupt lied (DE PORT)!\n");
1139		}
1140	}
1141
1142	for_each_pipe(dev_priv, pipe) {
1143		u32 fault_errors;
1144
1145		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1146			continue;
1147
1148		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
1149		if (!iir) {
1150			drm_err_ratelimited(&dev_priv->drm,
1151					    "The master control interrupt lied (DE PIPE)!\n");
1152			continue;
1153		}
1154
1155		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
1156
1157		if (iir & GEN8_PIPE_VBLANK)
1158			intel_handle_vblank(dev_priv, pipe);
1159
1160		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
1161			flip_done_handler(dev_priv, pipe);
1162
1163		if (HAS_DSB(dev_priv)) {
1164			if (iir & GEN12_DSB_INT(INTEL_DSB_0))
1165				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
1166
1167			if (iir & GEN12_DSB_INT(INTEL_DSB_1))
1168				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
1169
1170			if (iir & GEN12_DSB_INT(INTEL_DSB_2))
1171				intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
1172		}
1173
1174		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
1175			hsw_pipe_crc_irq_handler(dev_priv, pipe);
1176
1177		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
1178			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1179
1180		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
1181		if (fault_errors)
1182			drm_err_ratelimited(&dev_priv->drm,
1183					    "Fault errors on pipe %c: 0x%08x\n",
1184					    pipe_name(pipe),
1185					    fault_errors);
1186	}
1187
1188	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
1189	    master_ctl & GEN8_DE_PCH_IRQ) {
1190		u32 pica_iir;
1191
1192		/*
1193		 * FIXME(BDW): Assume for now that the new interrupt handling
1194		 * scheme also closed the SDE interrupt handling race we've seen
1195		 * on older pch-split platforms. But this needs testing.
1196		 */
1197		gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
1198		if (iir) {
1199			if (pica_iir)
1200				xelpdp_pica_irq_handler(dev_priv, pica_iir);
1201
1202			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1203				icp_irq_handler(dev_priv, iir);
1204			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
1205				spt_irq_handler(dev_priv, iir);
1206			else
1207				cpt_irq_handler(dev_priv, iir);
1208		} else {
1209			/*
1210			 * Like on previous PCH there seems to be something
1211			 * fishy going on with forwarding PCH interrupts.
1212			 */
1213			drm_dbg(&dev_priv->drm,
1214				"The master control interrupt lied (SDE)!\n");
1215		}
1216	}
1217}
1218
1219u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
1220{
1221	u32 iir;
1222
1223	if (!(master_ctl & GEN11_GU_MISC_IRQ))
1224		return 0;
1225
1226	iir = intel_de_read(i915, GEN11_GU_MISC_IIR);
1227	if (likely(iir))
1228		intel_de_write(i915, GEN11_GU_MISC_IIR, iir);
1229
1230	return iir;
1231}
1232
1233void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
1234{
1235	struct intel_display *display = &i915->display;
1236
1237	if (iir & GEN11_GU_MISC_GSE)
1238		intel_opregion_asle_intr(display);
1239}
1240
1241void gen11_display_irq_handler(struct drm_i915_private *i915)
1242{
1243	u32 disp_ctl;
1244
1245	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1246	/*
1247	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
1248	 * for the display related bits.
1249	 */
1250	disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL);
1251
1252	intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0);
1253	gen8_de_irq_handler(i915, disp_ctl);
1254	intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
1255
1256	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1257}
1258
1259static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
1260{
1261	lockdep_assert_held(&i915->drm.vblank_time_lock);
1262
1263	/*
1264	 * Vblank/CRC interrupts fail to wake the device up from C2+.
1265	 * Disabling render clock gating during C-states avoids
1266	 * the problem. There is a small power cost so we do this
1267	 * only when vblank/CRC interrupts are actually enabled.
1268	 */
1269	if (i915->display.irq.vblank_enabled++ == 0)
1270		intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1271}
1272
1273static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
1274{
1275	lockdep_assert_held(&i915->drm.vblank_time_lock);
1276
1277	if (--i915->display.irq.vblank_enabled == 0)
1278		intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1279}
1280
1281void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
1282{
1283	spin_lock_irq(&i915->drm.vblank_time_lock);
1284
1285	if (enable)
1286		i915gm_irq_cstate_wa_enable(i915);
1287	else
1288		i915gm_irq_cstate_wa_disable(i915);
1289
1290	spin_unlock_irq(&i915->drm.vblank_time_lock);
1291}
1292
1293int i8xx_enable_vblank(struct drm_crtc *crtc)
1294{
1295	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1296	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1297	unsigned long irqflags;
1298
1299	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1300	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1301	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1302
1303	return 0;
1304}
1305
1306void i8xx_disable_vblank(struct drm_crtc *crtc)
1307{
1308	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1309	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1310	unsigned long irqflags;
1311
1312	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1313	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1314	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1315}
1316
1317int i915gm_enable_vblank(struct drm_crtc *crtc)
1318{
1319	struct drm_i915_private *i915 = to_i915(crtc->dev);
1320
1321	i915gm_irq_cstate_wa_enable(i915);
1322
1323	return i8xx_enable_vblank(crtc);
1324}
1325
1326void i915gm_disable_vblank(struct drm_crtc *crtc)
1327{
1328	struct drm_i915_private *i915 = to_i915(crtc->dev);
1329
1330	i8xx_disable_vblank(crtc);
1331
1332	i915gm_irq_cstate_wa_disable(i915);
1333}
1334
1335int i965_enable_vblank(struct drm_crtc *crtc)
1336{
1337	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1338	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1339	unsigned long irqflags;
1340
1341	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1342	i915_enable_pipestat(dev_priv, pipe,
1343			     PIPE_START_VBLANK_INTERRUPT_STATUS);
1344	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1345
1346	return 0;
1347}
1348
1349void i965_disable_vblank(struct drm_crtc *crtc)
1350{
1351	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1352	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1353	unsigned long irqflags;
1354
1355	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1356	i915_disable_pipestat(dev_priv, pipe,
1357			      PIPE_START_VBLANK_INTERRUPT_STATUS);
1358	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1359}
1360
1361int ilk_enable_vblank(struct drm_crtc *crtc)
1362{
1363	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1364	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1365	unsigned long irqflags;
1366	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1367		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1368
1369	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1370	ilk_enable_display_irq(dev_priv, bit);
1371	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1372
1373	/* Even though there is no DMC, frame counter can get stuck when
1374	 * PSR is active as no frames are generated.
1375	 */
1376	if (HAS_PSR(dev_priv))
1377		drm_crtc_vblank_restore(crtc);
1378
1379	return 0;
1380}
1381
1382void ilk_disable_vblank(struct drm_crtc *crtc)
1383{
1384	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1385	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1386	unsigned long irqflags;
1387	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1388		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1389
1390	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1391	ilk_disable_display_irq(dev_priv, bit);
1392	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1393}
1394
1395static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
1396				   bool enable)
1397{
1398	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
1399	enum port port;
1400
1401	if (!(intel_crtc->mode_flags &
1402	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
1403		return false;
1404
1405	/* for dual link cases we consider TE from slave */
1406	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
1407		port = PORT_B;
1408	else
1409		port = PORT_A;
1410
1411	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
1412			 enable ? 0 : DSI_TE_EVENT);
1413
1414	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
1415
1416	return true;
1417}
1418
1419static void intel_display_vblank_dc_work(struct work_struct *work)
1420{
1421	struct intel_display *display =
1422		container_of(work, typeof(*display), irq.vblank_dc_work);
1423	struct drm_i915_private *i915 = to_i915(display->drm);
1424	int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
1425
1426	/*
1427	 * NOTE: intel_display_power_set_target_dc_state is used only by PSR
1428	 * code for DC3CO handling. DC3CO target state is currently disabled in
1429	 * PSR code. If DC3CO is taken into use we need take that into account
1430	 * here as well.
1431	 */
1432	intel_display_power_set_target_dc_state(i915, vblank_wa_num_pipes ? DC_STATE_DISABLE :
1433						DC_STATE_EN_UPTO_DC6);
1434}
1435
1436int bdw_enable_vblank(struct drm_crtc *_crtc)
1437{
1438	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1439	struct intel_display *display = to_intel_display(crtc);
1440	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1441	enum pipe pipe = crtc->pipe;
1442	unsigned long irqflags;
1443
1444	if (gen11_dsi_configure_te(crtc, true))
1445		return 0;
1446
1447	if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
1448		schedule_work(&display->irq.vblank_dc_work);
1449
1450	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1451	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1452	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1453
1454	/* Even if there is no DMC, frame counter can get stuck when
1455	 * PSR is active as no frames are generated, so check only for PSR.
1456	 */
1457	if (HAS_PSR(dev_priv))
1458		drm_crtc_vblank_restore(&crtc->base);
1459
1460	return 0;
1461}
1462
1463void bdw_disable_vblank(struct drm_crtc *_crtc)
1464{
1465	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1466	struct intel_display *display = to_intel_display(crtc);
1467	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1468	enum pipe pipe = crtc->pipe;
1469	unsigned long irqflags;
1470
1471	if (gen11_dsi_configure_te(crtc, false))
1472		return;
1473
1474	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1475	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1476	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1477
1478	if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
1479		schedule_work(&display->irq.vblank_dc_work);
1480}
1481
1482void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1483{
1484	struct intel_uncore *uncore = &dev_priv->uncore;
1485
1486	if (IS_CHERRYVIEW(dev_priv))
1487		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
1488	else
1489		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
1490
1491	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
1492	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
1493
1494	i9xx_pipestat_irq_reset(dev_priv);
1495
1496	gen2_irq_reset(uncore, VLV_IRQ_REGS);
1497	dev_priv->irq_mask = ~0u;
1498}
1499
1500void i9xx_display_irq_reset(struct drm_i915_private *i915)
1501{
1502	if (I915_HAS_HOTPLUG(i915)) {
1503		i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
1504		intel_uncore_rmw(&i915->uncore,
1505				 PORT_HOTPLUG_STAT(i915), 0, 0);
1506	}
1507
1508	i9xx_pipestat_irq_reset(i915);
1509}
1510
1511void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
1512{
1513	struct intel_uncore *uncore = &dev_priv->uncore;
1514
1515	u32 pipestat_mask;
1516	u32 enable_mask;
1517	enum pipe pipe;
1518
1519	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
1520
1521	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1522	for_each_pipe(dev_priv, pipe)
1523		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
1524
1525	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
1526		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1527		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1528		I915_LPE_PIPE_A_INTERRUPT |
1529		I915_LPE_PIPE_B_INTERRUPT;
1530
1531	if (IS_CHERRYVIEW(dev_priv))
1532		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
1533			I915_LPE_PIPE_C_INTERRUPT;
1534
1535	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
1536
1537	dev_priv->irq_mask = ~enable_mask;
1538
1539	gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1540}
1541
1542void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
1543{
1544	struct intel_uncore *uncore = &dev_priv->uncore;
1545	enum pipe pipe;
1546
1547	if (!HAS_DISPLAY(dev_priv))
1548		return;
1549
1550	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
1551	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
1552
1553	for_each_pipe(dev_priv, pipe)
1554		if (intel_display_power_is_enabled(dev_priv,
1555						   POWER_DOMAIN_PIPE(pipe)))
1556			gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
1557
1558	gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
1559	gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
1560}
1561
1562void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
1563{
1564	struct intel_uncore *uncore = &dev_priv->uncore;
1565	enum pipe pipe;
1566	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1567		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1568
1569	if (!HAS_DISPLAY(dev_priv))
1570		return;
1571
1572	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
1573
1574	if (DISPLAY_VER(dev_priv) >= 12) {
1575		enum transcoder trans;
1576
1577		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1578			enum intel_display_power_domain domain;
1579
1580			domain = POWER_DOMAIN_TRANSCODER(trans);
1581			if (!intel_display_power_is_enabled(dev_priv, domain))
1582				continue;
1583
1584			intel_uncore_write(uncore,
1585				           TRANS_PSR_IMR(dev_priv, trans),
1586				           0xffffffff);
1587			intel_uncore_write(uncore,
1588				           TRANS_PSR_IIR(dev_priv, trans),
1589				           0xffffffff);
1590		}
1591	} else {
1592		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
1593		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
1594	}
1595
1596	for_each_pipe(dev_priv, pipe)
1597		if (intel_display_power_is_enabled(dev_priv,
1598						   POWER_DOMAIN_PIPE(pipe)))
1599			gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
1600
1601	gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
1602	gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
1603
1604	if (DISPLAY_VER(dev_priv) >= 14)
1605		gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS);
1606	else
1607		gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS);
1608
1609	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1610		gen2_irq_reset(uncore, SDE_IRQ_REGS);
1611}
1612
1613void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
1614				     u8 pipe_mask)
1615{
1616	struct intel_uncore *uncore = &dev_priv->uncore;
1617	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
1618		gen8_de_pipe_flip_done_mask(dev_priv);
1619	enum pipe pipe;
1620
1621	spin_lock_irq(&dev_priv->irq_lock);
1622
1623	if (!intel_irqs_enabled(dev_priv)) {
1624		spin_unlock_irq(&dev_priv->irq_lock);
1625		return;
1626	}
1627
1628	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1629		gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
1630			      dev_priv->display.irq.de_irq_mask[pipe],
1631			      ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
1632
1633	spin_unlock_irq(&dev_priv->irq_lock);
1634}
1635
1636void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
1637				     u8 pipe_mask)
1638{
1639	struct intel_uncore *uncore = &dev_priv->uncore;
1640	enum pipe pipe;
1641
1642	spin_lock_irq(&dev_priv->irq_lock);
1643
1644	if (!intel_irqs_enabled(dev_priv)) {
1645		spin_unlock_irq(&dev_priv->irq_lock);
1646		return;
1647	}
1648
1649	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1650		gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
1651
1652	spin_unlock_irq(&dev_priv->irq_lock);
1653
1654	/* make sure we're done processing display irqs */
1655	intel_synchronize_irq(dev_priv);
1656}
1657
1658/*
1659 * SDEIER is also touched by the interrupt handler to work around missed PCH
1660 * interrupts. Hence we can't update it after the interrupt handler is enabled -
1661 * instead we unconditionally enable all PCH interrupt sources here, but then
1662 * only unmask them as needed with SDEIMR.
1663 *
1664 * Note that we currently do this after installing the interrupt handler,
1665 * but before we enable the master interrupt. That should be sufficient
1666 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
1667 * interrupts could still race.
1668 */
1669static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
1670{
1671	struct intel_uncore *uncore = &dev_priv->uncore;
1672	u32 mask;
1673
1674	if (HAS_PCH_NOP(dev_priv))
1675		return;
1676
1677	if (HAS_PCH_IBX(dev_priv))
1678		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
1679	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
1680		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
1681	else
1682		mask = SDE_GMBUS_CPT;
1683
1684	gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
1685}
1686
1687void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
1688{
1689	lockdep_assert_held(&dev_priv->irq_lock);
1690
1691	if (dev_priv->display.irq.display_irqs_enabled)
1692		return;
1693
1694	dev_priv->display.irq.display_irqs_enabled = true;
1695
1696	if (intel_irqs_enabled(dev_priv)) {
1697		vlv_display_irq_reset(dev_priv);
1698		vlv_display_irq_postinstall(dev_priv);
1699	}
1700}
1701
1702void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
1703{
1704	lockdep_assert_held(&dev_priv->irq_lock);
1705
1706	if (!dev_priv->display.irq.display_irqs_enabled)
1707		return;
1708
1709	dev_priv->display.irq.display_irqs_enabled = false;
1710
1711	if (intel_irqs_enabled(dev_priv))
1712		vlv_display_irq_reset(dev_priv);
1713}
1714
1715void ilk_de_irq_postinstall(struct drm_i915_private *i915)
1716{
1717	struct intel_uncore *uncore = &i915->uncore;
1718	u32 display_mask, extra_mask;
1719
1720	if (DISPLAY_VER(i915) >= 7) {
1721		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1722				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
1723		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
1724			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
1725			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
1726			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
1727			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
1728			      DE_DP_A_HOTPLUG_IVB);
1729	} else {
1730		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1731				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
1732				DE_PIPEA_CRC_DONE | DE_POISON);
1733		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
1734			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
1735			      DE_PLANE_FLIP_DONE(PLANE_A) |
1736			      DE_PLANE_FLIP_DONE(PLANE_B) |
1737			      DE_DP_A_HOTPLUG);
1738	}
1739
1740	if (IS_HASWELL(i915)) {
1741		gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
1742		display_mask |= DE_EDP_PSR_INT_HSW;
1743	}
1744
1745	if (IS_IRONLAKE_M(i915))
1746		extra_mask |= DE_PCU_EVENT;
1747
1748	i915->irq_mask = ~display_mask;
1749
1750	ibx_irq_postinstall(i915);
1751
1752	gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask,
1753		      display_mask | extra_mask);
1754}
1755
1756static void mtp_irq_postinstall(struct drm_i915_private *i915);
1757static void icp_irq_postinstall(struct drm_i915_private *i915);
1758
1759void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
1760{
1761	struct intel_display *display = &dev_priv->display;
1762	struct intel_uncore *uncore = &dev_priv->uncore;
1763
1764	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
1765		GEN8_PIPE_CDCLK_CRC_DONE;
1766	u32 de_pipe_enables;
1767	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
1768	u32 de_port_enables;
1769	u32 de_misc_masked = GEN8_DE_EDP_PSR;
1770	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1771		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1772	enum pipe pipe;
1773
1774	if (!HAS_DISPLAY(dev_priv))
1775		return;
1776
1777	if (DISPLAY_VER(dev_priv) >= 14)
1778		mtp_irq_postinstall(dev_priv);
1779	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1780		icp_irq_postinstall(dev_priv);
1781	else if (HAS_PCH_SPLIT(dev_priv))
1782		ibx_irq_postinstall(dev_priv);
1783
1784	if (DISPLAY_VER(dev_priv) < 11)
1785		de_misc_masked |= GEN8_DE_MISC_GSE;
1786
1787	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1788		de_port_masked |= BXT_DE_PORT_GMBUS;
1789
1790	if (DISPLAY_VER(dev_priv) >= 14) {
1791		de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
1792				  XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
1793	} else if (DISPLAY_VER(dev_priv) >= 11) {
1794		enum port port;
1795
1796		if (intel_bios_is_dsi_present(display, &port))
1797			de_port_masked |= DSI0_TE | DSI1_TE;
1798	}
1799
1800	if (HAS_DBUF_OVERLAP_DETECTION(display))
1801		de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
1802
1803	if (HAS_DSB(dev_priv))
1804		de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
1805			GEN12_DSB_INT(INTEL_DSB_1) |
1806			GEN12_DSB_INT(INTEL_DSB_2);
1807
1808	de_pipe_enables = de_pipe_masked |
1809		GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
1810		gen8_de_pipe_flip_done_mask(dev_priv);
1811
1812	de_port_enables = de_port_masked;
1813	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1814		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
1815	else if (IS_BROADWELL(dev_priv))
1816		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
1817
1818	if (DISPLAY_VER(dev_priv) >= 12) {
1819		enum transcoder trans;
1820
1821		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1822			enum intel_display_power_domain domain;
1823
1824			domain = POWER_DOMAIN_TRANSCODER(trans);
1825			if (!intel_display_power_is_enabled(dev_priv, domain))
1826				continue;
1827
1828			gen2_assert_iir_is_zero(uncore,
1829						TRANS_PSR_IIR(dev_priv, trans));
1830		}
1831	} else {
1832		gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
1833	}
1834
1835	for_each_pipe(dev_priv, pipe) {
1836		dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
1837
1838		if (intel_display_power_is_enabled(dev_priv,
1839						   POWER_DOMAIN_PIPE(pipe)))
1840			gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
1841				      dev_priv->display.irq.de_irq_mask[pipe],
1842				      de_pipe_enables);
1843	}
1844
1845	gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables);
1846	gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked);
1847
1848	if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
1849		u32 de_hpd_masked = 0;
1850		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
1851				     GEN11_DE_TBT_HOTPLUG_MASK;
1852
1853		gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
1854			      de_hpd_enables);
1855	}
1856}
1857
1858static void mtp_irq_postinstall(struct drm_i915_private *i915)
1859{
1860	struct intel_uncore *uncore = &i915->uncore;
1861	u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
1862	u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
1863	u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
1864			     XELPDP_TBT_HOTPLUG_MASK;
1865
1866	gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
1867		      de_hpd_enables);
1868
1869	gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
1870}
1871
1872static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
1873{
1874	struct intel_uncore *uncore = &dev_priv->uncore;
1875	u32 mask = SDE_GMBUS_ICP;
1876
1877	gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
1878}
1879
1880void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
1881{
1882	if (!HAS_DISPLAY(dev_priv))
1883		return;
1884
1885	gen8_de_irq_postinstall(dev_priv);
1886
1887	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
1888			   GEN11_DISPLAY_IRQ_ENABLE);
1889}
1890
1891void dg1_de_irq_postinstall(struct drm_i915_private *i915)
1892{
1893	if (!HAS_DISPLAY(i915))
1894		return;
1895
1896	gen8_de_irq_postinstall(i915);
1897	intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
1898			   GEN11_DISPLAY_IRQ_ENABLE);
1899}
1900
1901void intel_display_irq_init(struct drm_i915_private *i915)
1902{
1903	i915->drm.vblank_disable_immediate = true;
1904
1905	/*
1906	 * Most platforms treat the display irq block as an always-on power
1907	 * domain. vlv/chv can disable it at runtime and need special care to
1908	 * avoid writing any of the display block registers outside of the power
1909	 * domain. We defer setting up the display irqs in this case to the
1910	 * runtime pm.
1911	 */
1912	i915->display.irq.display_irqs_enabled = true;
1913	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1914		i915->display.irq.display_irqs_enabled = false;
1915
1916	intel_hotplug_irq_init(i915);
1917
1918	INIT_WORK(&i915->display.irq.vblank_dc_work,
1919		  intel_display_vblank_dc_work);
1920}