Linux Audio

Check our new training course

Loading...
v3.5.6
   1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/sysrq.h>
  32#include <linux/slab.h>
  33#include "drmP.h"
  34#include "drm.h"
  35#include "i915_drm.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36#include "i915_drv.h"
  37#include "i915_trace.h"
  38#include "intel_drv.h"
  39
  40/* For display hotplug interrupt */
  41static void
  42ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 
 
 
 
 
 
 
 
 
 
 
 
  43{
  44	if ((dev_priv->irq_mask & mask) != 0) {
  45		dev_priv->irq_mask &= ~mask;
  46		I915_WRITE(DEIMR, dev_priv->irq_mask);
  47		POSTING_READ(DEIMR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  48	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49}
  50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51static inline void
  52ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 
 
  53{
  54	if ((dev_priv->irq_mask & mask) != mask) {
  55		dev_priv->irq_mask |= mask;
  56		I915_WRITE(DEIMR, dev_priv->irq_mask);
  57		POSTING_READ(DEIMR);
  58	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59}
  60
  61void
  62i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 
 
 
 
 
 
  63{
  64	if ((dev_priv->pipestat[pipe] & mask) != mask) {
  65		u32 reg = PIPESTAT(pipe);
 
 
  66
  67		dev_priv->pipestat[pipe] |= mask;
  68		/* Enable the interrupt, clear any pending status */
  69		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
  70		POSTING_READ(reg);
 
 
 
 
 
  71	}
  72}
  73
  74void
  75i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76{
  77	if ((dev_priv->pipestat[pipe] & mask) != 0) {
  78		u32 reg = PIPESTAT(pipe);
 
 
 
 
 
 
 
  79
  80		dev_priv->pipestat[pipe] &= ~mask;
  81		I915_WRITE(reg, dev_priv->pipestat[pipe]);
  82		POSTING_READ(reg);
 
 
 
 
 
 
  83	}
  84}
  85
  86/**
  87 * intel_enable_asle - enable ASLE interrupt for OpRegion
 
 
 
 
  88 */
  89void intel_enable_asle(struct drm_device *dev)
 
 
  90{
  91	drm_i915_private_t *dev_priv = dev->dev_private;
  92	unsigned long irqflags;
 
  93
  94	/* FIXME: opregion/asle for VLV */
  95	if (IS_VALLEYVIEW(dev))
 
  96		return;
  97
  98	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
 
  99
 100	if (HAS_PCH_SPLIT(dev))
 101		ironlake_enable_display_irq(dev_priv, DE_GSE);
 102	else {
 103		i915_enable_pipestat(dev_priv, 1,
 104				     PIPE_LEGACY_BLC_EVENT_ENABLE);
 105		if (INTEL_INFO(dev)->gen >= 4)
 106			i915_enable_pipestat(dev_priv, 0,
 107					     PIPE_LEGACY_BLC_EVENT_ENABLE);
 108	}
 
 109
 110	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 
 
 
 
 
 
 
 
 111}
 112
 113/**
 114 * i915_pipe_enabled - check if a pipe is enabled
 115 * @dev: DRM device
 116 * @pipe: pipe to check
 117 *
 118 * Reading certain registers when the pipe is disabled can hang the chip.
 119 * Use this routine to make sure the PLL is running and the pipe is active
 120 * before reading such registers if unsure.
 121 */
 122static int
 123i915_pipe_enabled(struct drm_device *dev, int pipe)
 
 124{
 125	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 126	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127}
 128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129/* Called from drm generic code, passed a 'crtc', which
 130 * we use as a pipe index
 131 */
 132static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 133{
 134	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 135	unsigned long high_frame;
 136	unsigned long low_frame;
 137	u32 high1, high2, low;
 138
 139	if (!i915_pipe_enabled(dev, pipe)) {
 140		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
 141				"pipe %c\n", pipe_name(pipe));
 
 
 
 
 
 
 
 
 
 
 
 
 142		return 0;
 143	}
 
 
 
 
 
 
 
 
 
 
 
 144
 145	high_frame = PIPEFRAME(pipe);
 146	low_frame = PIPEFRAMEPIXEL(pipe);
 147
 
 
 148	/*
 149	 * High & low register fields aren't synchronized, so make sure
 150	 * we get a low value that's stable across two reads of the high
 151	 * register.
 152	 */
 153	do {
 154		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 155		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
 156		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 157	} while (high1 != high2);
 158
 
 
 159	high1 >>= PIPE_FRAME_HIGH_SHIFT;
 
 160	low >>= PIPE_FRAME_LOW_SHIFT;
 161	return (high1 << 8) | low;
 
 
 
 
 
 
 162}
 163
 164static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 165{
 166	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 167	int reg = PIPE_FRMCOUNT_GM45(pipe);
 
 168
 169	if (!i915_pipe_enabled(dev, pipe)) {
 170		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
 171				 "pipe %c\n", pipe_name(pipe));
 172		return 0;
 173	}
 174
 175	return I915_READ(reg);
 176}
 177
 178static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 179			     int *vpos, int *hpos)
 180{
 181	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 182	u32 vbl = 0, position = 0;
 183	int vbl_start, vbl_end, htotal, vtotal;
 184	bool in_vbl = true;
 185	int ret = 0;
 
 
 186
 187	if (!i915_pipe_enabled(dev, pipe)) {
 188		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 189				 "pipe %c\n", pipe_name(pipe));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 191	}
 192
 193	/* Get vtotal. */
 194	vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195
 196	if (INTEL_INFO(dev)->gen >= 4) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197		/* No obvious pixelcount register. Only query vertical
 198		 * scanout position from Display scan line register.
 199		 */
 200		position = I915_READ(PIPEDSL(pipe));
 201
 202		/* Decode into vertical scanout position. Don't have
 203		 * horizontal scanout position.
 204		 */
 205		*vpos = position & 0x1fff;
 206		*hpos = 0;
 207	} else {
 208		/* Have access to pixelcount since start of frame.
 209		 * We can split this into vertical and horizontal
 210		 * scanout position.
 211		 */
 212		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213
 214		htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 215		*vpos = position / htotal;
 216		*hpos = position - (*vpos * htotal);
 217	}
 218
 219	/* Query vblank area. */
 220	vbl = I915_READ(VBLANK(pipe));
 221
 222	/* Test position against vblank region. */
 223	vbl_start = vbl & 0x1fff;
 224	vbl_end = (vbl >> 16) & 0x1fff;
 225
 226	if ((*vpos < vbl_start) || (*vpos > vbl_end))
 227		in_vbl = false;
 228
 229	/* Inside "upper part" of vblank area? Apply corrective offset: */
 230	if (in_vbl && (*vpos >= vbl_start))
 231		*vpos = *vpos - vtotal;
 232
 233	/* Readouts valid? */
 234	if (vbl > 0)
 235		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 236
 237	/* In vblank? */
 238	if (in_vbl)
 239		ret |= DRM_SCANOUTPOS_INVBL;
 240
 241	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242}
 243
 244static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 245			      int *max_error,
 246			      struct timeval *vblank_time,
 247			      unsigned flags)
 248{
 249	struct drm_i915_private *dev_priv = dev->dev_private;
 250	struct drm_crtc *crtc;
 
 
 
 
 
 251
 252	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
 253		DRM_ERROR("Invalid crtc %d\n", pipe);
 254		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 255	}
 
 256
 257	/* Get drm_crtc to timestamp: */
 258	crtc = intel_get_crtc_for_pipe(dev, pipe);
 259	if (crtc == NULL) {
 260		DRM_ERROR("Invalid crtc %d\n", pipe);
 261		return -EINVAL;
 
 
 262	}
 
 263
 264	if (!crtc->enabled) {
 265		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 266		return -EBUSY;
 
 
 
 
 
 
 
 
 267	}
 
 268
 269	/* Helper routine in DRM core does all the work: */
 270	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 271						     vblank_time, flags,
 272						     crtc);
 
 
 
 
 
 
 
 
 273}
 274
 275/*
 276 * Handle hotplug events outside the interrupt handler proper.
 
 
 
 
 277 */
 278static void i915_hotplug_work_func(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279{
 280	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 281						    hotplug_work);
 282	struct drm_device *dev = dev_priv->dev;
 283	struct drm_mode_config *mode_config = &dev->mode_config;
 284	struct intel_encoder *encoder;
 
 285
 286	mutex_lock(&mode_config->mutex);
 287	DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
 
 
 
 288
 289	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 290		if (encoder->hot_plug)
 291			encoder->hot_plug(encoder);
 
 
 292
 293	mutex_unlock(&mode_config->mutex);
 
 294
 295	/* Just fire off a uevent and let userspace tell us what to do */
 296	drm_helper_hpd_irq_event(dev);
 297}
 298
 299static void i915_handle_rps_change(struct drm_device *dev)
 
 300{
 301	drm_i915_private_t *dev_priv = dev->dev_private;
 302	u32 busy_up, busy_down, max_avg, min_avg;
 303	u8 new_delay = dev_priv->cur_delay;
 304
 305	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 306	busy_up = I915_READ(RCPREVBSYTUPAVG);
 307	busy_down = I915_READ(RCPREVBSYTDNAVG);
 308	max_avg = I915_READ(RCBMAXAVG);
 309	min_avg = I915_READ(RCBMINAVG);
 310
 311	/* Handle RCS change request from hw */
 312	if (busy_up > max_avg) {
 313		if (dev_priv->cur_delay != dev_priv->max_delay)
 314			new_delay = dev_priv->cur_delay - 1;
 315		if (new_delay < dev_priv->max_delay)
 316			new_delay = dev_priv->max_delay;
 317	} else if (busy_down < min_avg) {
 318		if (dev_priv->cur_delay != dev_priv->min_delay)
 319			new_delay = dev_priv->cur_delay + 1;
 320		if (new_delay > dev_priv->min_delay)
 321			new_delay = dev_priv->min_delay;
 322	}
 323
 324	if (ironlake_set_drps(dev, new_delay))
 325		dev_priv->cur_delay = new_delay;
 
 
 326
 327	return;
 
 
 328}
 329
 330static void notify_ring(struct drm_device *dev,
 331			struct intel_ring_buffer *ring)
 
 
 
 
 332{
 333	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 334
 335	if (ring->obj == NULL)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 336		return;
 
 
 337
 338	trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339
 340	wake_up_all(&ring->irq_queue);
 341	if (i915_enable_hangcheck) {
 342		dev_priv->hangcheck_count = 0;
 343		mod_timer(&dev_priv->hangcheck_timer,
 344			  jiffies +
 345			  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346	}
 347}
 348
 349static void gen6_pm_rps_work(struct work_struct *work)
 
 350{
 351	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 352						    rps_work);
 353	u32 pm_iir, pm_imr;
 354	u8 new_delay;
 355
 356	spin_lock_irq(&dev_priv->rps_lock);
 357	pm_iir = dev_priv->pm_iir;
 358	dev_priv->pm_iir = 0;
 359	pm_imr = I915_READ(GEN6_PMIMR);
 360	I915_WRITE(GEN6_PMIMR, 0);
 361	spin_unlock_irq(&dev_priv->rps_lock);
 362
 363	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 
 364		return;
 
 365
 366	mutex_lock(&dev_priv->dev->struct_mutex);
 
 
 367
 368	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
 369		new_delay = dev_priv->cur_delay + 1;
 370	else
 371		new_delay = dev_priv->cur_delay - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372
 373	gen6_set_rps(dev_priv->dev, new_delay);
 
 374
 375	mutex_unlock(&dev_priv->dev->struct_mutex);
 
 
 
 
 
 
 
 
 376}
 377
 378static void snb_gt_irq_handler(struct drm_device *dev,
 379			       struct drm_i915_private *dev_priv,
 380			       u32 gt_iir)
 381{
 
 382
 383	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
 384		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
 385		notify_ring(dev, &dev_priv->ring[RCS]);
 386	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
 387		notify_ring(dev, &dev_priv->ring[VCS]);
 388	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
 389		notify_ring(dev, &dev_priv->ring[BCS]);
 390
 391	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
 392		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
 393		      GT_RENDER_CS_ERROR_INTERRUPT)) {
 394		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
 395		i915_handle_error(dev, false);
 
 
 
 396	}
 
 
 
 397}
 398
 399static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 400				u32 pm_iir)
 401{
 402	unsigned long flags;
 
 
 
 
 
 
 
 
 403
 404	/*
 405	 * IIR bits should never already be set because IMR should
 406	 * prevent an interrupt from being shown in IIR. The warning
 407	 * displays a case where we've unsafely cleared
 408	 * dev_priv->pm_iir. Although missing an interrupt of the same
 409	 * type is not a problem, it displays a problem in the logic.
 410	 *
 411	 * The mask bit in IMR is cleared by rps_work.
 412	 */
 
 
 413
 414	spin_lock_irqsave(&dev_priv->rps_lock, flags);
 415	dev_priv->pm_iir |= pm_iir;
 416	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
 417	POSTING_READ(GEN6_PMIMR);
 418	spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
 419
 420	queue_work(dev_priv->wq, &dev_priv->rps_work);
 
 
 
 
 
 
 
 
 421}
 422
 423static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
 
 424{
 425	struct drm_device *dev = (struct drm_device *) arg;
 426	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 427	u32 iir, gt_iir, pm_iir;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428	irqreturn_t ret = IRQ_NONE;
 429	unsigned long irqflags;
 430	int pipe;
 431	u32 pipe_stats[I915_MAX_PIPES];
 432	u32 vblank_status;
 433	int vblank = 0;
 434	bool blc_event;
 435
 436	atomic_inc(&dev_priv->irq_received);
 437
 438	vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
 439		PIPE_VBLANK_INTERRUPT_STATUS;
 440
 441	while (true) {
 442		iir = I915_READ(VLV_IIR);
 443		gt_iir = I915_READ(GTIIR);
 444		pm_iir = I915_READ(GEN6_PMIIR);
 445
 446		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
 447			goto out;
 448
 449		ret = IRQ_HANDLED;
 450
 451		snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452
 453		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 454		for_each_pipe(pipe) {
 455			int reg = PIPESTAT(pipe);
 456			pipe_stats[pipe] = I915_READ(reg);
 
 
 
 
 
 
 
 
 
 
 
 457
 458			/*
 459			 * Clear the PIPE*STAT regs before the IIR
 460			 */
 461			if (pipe_stats[pipe] & 0x8000ffff) {
 462				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 463					DRM_DEBUG_DRIVER("pipe %c underrun\n",
 464							 pipe_name(pipe));
 465				I915_WRITE(reg, pipe_stats[pipe]);
 466			}
 467		}
 468		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 469
 470		/* Consume port.  Then clear IIR or we'll miss events */
 471		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
 472			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 473
 474			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 475					 hotplug_status);
 476			if (hotplug_status & dev_priv->hotplug_supported_mask)
 477				queue_work(dev_priv->wq,
 478					   &dev_priv->hotplug_work);
 479
 480			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 481			I915_READ(PORT_HOTPLUG_STAT);
 482		}
 
 483
 
 
 484
 485		if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
 486			drm_handle_vblank(dev, 0);
 487			vblank++;
 488			intel_finish_page_flip(dev, 0);
 489		}
 490
 491		if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
 492			drm_handle_vblank(dev, 1);
 493			vblank++;
 494			intel_finish_page_flip(dev, 0);
 495		}
 496
 497		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 498			blc_event = true;
 499
 500		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
 501			gen6_queue_rps_work(dev_priv, pm_iir);
 502
 503		I915_WRITE(GTIIR, gt_iir);
 504		I915_WRITE(GEN6_PMIIR, pm_iir);
 505		I915_WRITE(VLV_IIR, iir);
 506	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507
 508out:
 509	return ret;
 510}
 511
 512static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 
 513{
 514	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 515	int pipe;
 516
 517	if (pch_iir & SDE_AUDIO_POWER_MASK)
 518		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
 519				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
 520				 SDE_AUDIO_POWER_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521
 522	if (pch_iir & SDE_GMBUS)
 523		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
 524
 525	if (pch_iir & SDE_AUDIO_HDCP_MASK)
 526		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
 527
 528	if (pch_iir & SDE_AUDIO_TRANS_MASK)
 529		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
 530
 531	if (pch_iir & SDE_POISON)
 532		DRM_ERROR("PCH poison interrupt\n");
 533
 534	if (pch_iir & SDE_FDI_MASK)
 535		for_each_pipe(pipe)
 536			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
 537					 pipe_name(pipe),
 538					 I915_READ(FDI_RX_IIR(pipe)));
 
 539
 540	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
 541		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
 542
 543	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
 544		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
 545
 546	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
 547		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
 548	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
 549		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 
 
 
 550}
 551
 552static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 553{
 554	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 555	int pipe;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 556
 557	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
 558		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
 559				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
 560				 SDE_AUDIO_POWER_SHIFT_CPT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 561
 562	if (pch_iir & SDE_AUX_MASK_CPT)
 563		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
 564
 565	if (pch_iir & SDE_GMBUS_CPT)
 566		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
 567
 568	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
 569		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
 570
 571	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
 572		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
 
 
 
 
 
 
 
 573
 574	if (pch_iir & SDE_FDI_MASK_CPT)
 575		for_each_pipe(pipe)
 576			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
 577					 pipe_name(pipe),
 578					 I915_READ(FDI_RX_IIR(pipe)));
 579}
 580
 581static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 582{
 583	struct drm_device *dev = (struct drm_device *) arg;
 584	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 585	u32 de_iir, gt_iir, de_ier, pm_iir;
 586	irqreturn_t ret = IRQ_NONE;
 587	int i;
 588
 589	atomic_inc(&dev_priv->irq_received);
 
 590
 591	/* disable master interrupt before clearing iir  */
 592	de_ier = I915_READ(DEIER);
 593	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
 594
 595	gt_iir = I915_READ(GTIIR);
 596	if (gt_iir) {
 597		snb_gt_irq_handler(dev, dev_priv, gt_iir);
 598		I915_WRITE(GTIIR, gt_iir);
 599		ret = IRQ_HANDLED;
 600	}
 601
 602	de_iir = I915_READ(DEIIR);
 603	if (de_iir) {
 604		if (de_iir & DE_GSE_IVB)
 605			intel_opregion_gse_intr(dev);
 606
 607		for (i = 0; i < 3; i++) {
 608			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
 609				intel_prepare_page_flip(dev, i);
 610				intel_finish_page_flip_plane(dev, i);
 611			}
 612			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
 613				drm_handle_vblank(dev, i);
 614		}
 615
 616		/* check event from PCH */
 617		if (de_iir & DE_PCH_EVENT_IVB) {
 618			u32 pch_iir = I915_READ(SDEIIR);
 619
 620			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
 621				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 622			cpt_irq_handler(dev, pch_iir);
 623
 624			/* clear PCH hotplug event before clear CPU irq */
 625			I915_WRITE(SDEIIR, pch_iir);
 626		}
 627
 628		I915_WRITE(DEIIR, de_iir);
 629		ret = IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 630	}
 631
 632	pm_iir = I915_READ(GEN6_PMIIR);
 633	if (pm_iir) {
 634		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
 635			gen6_queue_rps_work(dev_priv, pm_iir);
 636		I915_WRITE(GEN6_PMIIR, pm_iir);
 637		ret = IRQ_HANDLED;
 
 
 
 638	}
 639
 640	I915_WRITE(DEIER, de_ier);
 641	POSTING_READ(DEIER);
 642
 643	return ret;
 
 644}
 645
 646static void ilk_gt_irq_handler(struct drm_device *dev,
 647			       struct drm_i915_private *dev_priv,
 648			       u32 gt_iir)
 649{
 650	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
 651		notify_ring(dev, &dev_priv->ring[RCS]);
 652	if (gt_iir & GT_BSD_USER_INTERRUPT)
 653		notify_ring(dev, &dev_priv->ring[VCS]);
 654}
 655
 656static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 657{
 658	struct drm_device *dev = (struct drm_device *) arg;
 659	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 660	int ret = IRQ_NONE;
 661	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 662	u32 hotplug_mask;
 663
 664	atomic_inc(&dev_priv->irq_received);
 
 
 
 665
 666	/* disable master interrupt before clearing iir  */
 667	de_ier = I915_READ(DEIER);
 668	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 669	POSTING_READ(DEIER);
 670
 671	de_iir = I915_READ(DEIIR);
 672	gt_iir = I915_READ(GTIIR);
 673	pch_iir = I915_READ(SDEIIR);
 674	pm_iir = I915_READ(GEN6_PMIIR);
 675
 676	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
 677	    (!IS_GEN6(dev) || pm_iir == 0))
 678		goto done;
 679
 680	if (HAS_PCH_CPT(dev))
 681		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
 682	else
 683		hotplug_mask = SDE_HOTPLUG_MASK;
 
 684
 685	ret = IRQ_HANDLED;
 
 686
 687	if (IS_GEN5(dev))
 688		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
 689	else
 690		snb_gt_irq_handler(dev, dev_priv, gt_iir);
 691
 692	if (de_iir & DE_GSE)
 693		intel_opregion_gse_intr(dev);
 694
 695	if (de_iir & DE_PLANEA_FLIP_DONE) {
 696		intel_prepare_page_flip(dev, 0);
 697		intel_finish_page_flip_plane(dev, 0);
 698	}
 699
 700	if (de_iir & DE_PLANEB_FLIP_DONE) {
 701		intel_prepare_page_flip(dev, 1);
 702		intel_finish_page_flip_plane(dev, 1);
 703	}
 704
 705	if (de_iir & DE_PIPEA_VBLANK)
 706		drm_handle_vblank(dev, 0);
 707
 708	if (de_iir & DE_PIPEB_VBLANK)
 709		drm_handle_vblank(dev, 1);
 
 
 
 
 710
 711	/* check event from PCH */
 712	if (de_iir & DE_PCH_EVENT) {
 713		if (pch_iir & hotplug_mask)
 714			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 715		if (HAS_PCH_CPT(dev))
 716			cpt_irq_handler(dev, pch_iir);
 717		else
 718			ibx_irq_handler(dev, pch_iir);
 719	}
 720
 721	if (de_iir & DE_PCU_EVENT) {
 722		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 723		i915_handle_rps_change(dev);
 724	}
 725
 726	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
 727		gen6_queue_rps_work(dev_priv, pm_iir);
 
 728
 729	/* should clear PCH hotplug event before clear CPU irq */
 730	I915_WRITE(SDEIIR, pch_iir);
 731	I915_WRITE(GTIIR, gt_iir);
 732	I915_WRITE(DEIIR, de_iir);
 733	I915_WRITE(GEN6_PMIIR, pm_iir);
 734
 735done:
 736	I915_WRITE(DEIER, de_ier);
 737	POSTING_READ(DEIER);
 738
 739	return ret;
 740}
 741
 742/**
 743 * i915_error_work_func - do process context error handling work
 744 * @work: work struct
 745 *
 746 * Fire an error uevent so userspace can see that a hang or error
 747 * was detected.
 748 */
 749static void i915_error_work_func(struct work_struct *work)
 750{
 751	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 752						    error_work);
 753	struct drm_device *dev = dev_priv->dev;
 754	char *error_event[] = { "ERROR=1", NULL };
 755	char *reset_event[] = { "RESET=1", NULL };
 756	char *reset_done_event[] = { "ERROR=0", NULL };
 757
 758	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 759
 760	if (atomic_read(&dev_priv->mm.wedged)) {
 761		DRM_DEBUG_DRIVER("resetting chip\n");
 762		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
 763		if (!i915_reset(dev)) {
 764			atomic_set(&dev_priv->mm.wedged, 0);
 765			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
 766		}
 767		complete_all(&dev_priv->error_completion);
 768	}
 769}
 770
 771#ifdef CONFIG_DEBUG_FS
 772static struct drm_i915_error_object *
 773i915_error_object_create(struct drm_i915_private *dev_priv,
 774			 struct drm_i915_gem_object *src)
 775{
 776	struct drm_i915_error_object *dst;
 777	int page, page_count;
 778	u32 reloc_offset;
 779
 780	if (src == NULL || src->pages == NULL)
 781		return NULL;
 782
 783	page_count = src->base.size / PAGE_SIZE;
 
 
 
 784
 785	dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
 786	if (dst == NULL)
 787		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 788
 789	reloc_offset = src->gtt_offset;
 790	for (page = 0; page < page_count; page++) {
 791		unsigned long flags;
 792		void *d;
 793
 794		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
 795		if (d == NULL)
 796			goto unwind;
 797
 798		local_irq_save(flags);
 799		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
 800		    src->has_global_gtt_mapping) {
 801			void __iomem *s;
 802
 803			/* Simply ignore tiling or any overlapping fence.
 804			 * It's part of the error state, and this hopefully
 805			 * captures what the GPU read.
 806			 */
 
 
 
 
 
 807
 808			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 809						     reloc_offset);
 810			memcpy_fromio(d, s, PAGE_SIZE);
 811			io_mapping_unmap_atomic(s);
 812		} else {
 813			void *s;
 814
 815			drm_clflush_pages(&src->pages[page], 1);
 
 
 
 
 
 
 
 
 816
 817			s = kmap_atomic(src->pages[page]);
 818			memcpy(d, s, PAGE_SIZE);
 819			kunmap_atomic(s);
 
 
 
 
 
 
 820
 821			drm_clflush_pages(&src->pages[page], 1);
 
 
 
 
 
 822		}
 823		local_irq_restore(flags);
 824
 825		dst->pages[page] = d;
 
 
 826
 827		reloc_offset += PAGE_SIZE;
 828	}
 829	dst->page_count = page_count;
 830	dst->gtt_offset = src->gtt_offset;
 831
 832	return dst;
 
 833
 834unwind:
 835	while (page--)
 836		kfree(dst->pages[page]);
 837	kfree(dst);
 838	return NULL;
 839}
 840
 841static void
 842i915_error_object_free(struct drm_i915_error_object *obj)
 843{
 844	int page;
 845
 846	if (obj == NULL)
 847		return;
 848
 849	for (page = 0; page < obj->page_count; page++)
 850		kfree(obj->pages[page]);
 
 
 851
 852	kfree(obj);
 853}
 854
 855void
 856i915_error_state_free(struct kref *error_ref)
 857{
 858	struct drm_i915_error_state *error = container_of(error_ref,
 859							  typeof(*error), ref);
 860	int i;
 861
 862	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 863		i915_error_object_free(error->ring[i].batchbuffer);
 864		i915_error_object_free(error->ring[i].ringbuffer);
 865		kfree(error->ring[i].requests);
 866	}
 867
 868	kfree(error->active_bo);
 869	kfree(error->overlay);
 870	kfree(error);
 871}
 872static void capture_bo(struct drm_i915_error_buffer *err,
 873		       struct drm_i915_gem_object *obj)
 874{
 875	err->size = obj->base.size;
 876	err->name = obj->base.name;
 877	err->seqno = obj->last_rendering_seqno;
 878	err->gtt_offset = obj->gtt_offset;
 879	err->read_domains = obj->base.read_domains;
 880	err->write_domain = obj->base.write_domain;
 881	err->fence_reg = obj->fence_reg;
 882	err->pinned = 0;
 883	if (obj->pin_count > 0)
 884		err->pinned = 1;
 885	if (obj->user_pin_count > 0)
 886		err->pinned = -1;
 887	err->tiling = obj->tiling_mode;
 888	err->dirty = obj->dirty;
 889	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 890	err->ring = obj->ring ? obj->ring->id : -1;
 891	err->cache_level = obj->cache_level;
 892}
 893
 894static u32 capture_active_bo(struct drm_i915_error_buffer *err,
 895			     int count, struct list_head *head)
 896{
 897	struct drm_i915_gem_object *obj;
 898	int i = 0;
 899
 900	list_for_each_entry(obj, head, mm_list) {
 901		capture_bo(err++, obj);
 902		if (++i == count)
 903			break;
 904	}
 905
 906	return i;
 
 
 
 
 907}
 908
 909static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 910			     int count, struct list_head *head)
 911{
 912	struct drm_i915_gem_object *obj;
 913	int i = 0;
 914
 915	list_for_each_entry(obj, head, gtt_list) {
 916		if (obj->pin_count == 0)
 917			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 918
 919		capture_bo(err++, obj);
 920		if (++i == count)
 921			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922	}
 923
 924	return i;
 
 925}
 926
 927static void i915_gem_record_fences(struct drm_device *dev,
 928				   struct drm_i915_error_state *error)
 929{
 930	struct drm_i915_private *dev_priv = dev->dev_private;
 931	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932
 933	/* Fences */
 934	switch (INTEL_INFO(dev)->gen) {
 935	case 7:
 936	case 6:
 937		for (i = 0; i < 16; i++)
 938			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
 939		break;
 940	case 5:
 941	case 4:
 942		for (i = 0; i < 16; i++)
 943			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
 944		break;
 945	case 3:
 946		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 947			for (i = 0; i < 8; i++)
 948				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
 949	case 2:
 950		for (i = 0; i < 8; i++)
 951			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
 952		break;
 953
 
 
 954	}
 955}
 956
 957static struct drm_i915_error_object *
 958i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
 959			     struct intel_ring_buffer *ring)
 960{
 961	struct drm_i915_gem_object *obj;
 962	u32 seqno;
 963
 964	if (!ring->get_seqno)
 965		return NULL;
 966
 967	seqno = ring->get_seqno(ring);
 968	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 969		if (obj->ring != ring)
 970			continue;
 971
 972		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
 973			continue;
 
 
 974
 975		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
 976			continue;
 
 
 
 
 
 977
 978		/* We need to copy these to an anonymous buffer as the simplest
 979		 * method to avoid being overwritten by userspace.
 980		 */
 981		return i915_error_object_create(dev_priv, obj);
 982	}
 983
 984	return NULL;
 
 
 
 
 985}
 986
 987static void i915_record_ring_state(struct drm_device *dev,
 988				   struct drm_i915_error_state *error,
 989				   struct intel_ring_buffer *ring)
 990{
 991	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 992
 993	if (INTEL_INFO(dev)->gen >= 6) {
 994		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
 995		error->semaphore_mboxes[ring->id][0]
 996			= I915_READ(RING_SYNC_0(ring->mmio_base));
 997		error->semaphore_mboxes[ring->id][1]
 998			= I915_READ(RING_SYNC_1(ring->mmio_base));
 
 
 
 
 
 
 999	}
1000
1001	if (INTEL_INFO(dev)->gen >= 4) {
1002		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1003		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1004		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1005		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1006		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1007		if (ring->id == RCS) {
1008			error->instdone1 = I915_READ(INSTDONE1);
1009			error->bbaddr = I915_READ64(BB_ADDR);
1010		}
1011	} else {
1012		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1013		error->ipeir[ring->id] = I915_READ(IPEIR);
1014		error->ipehr[ring->id] = I915_READ(IPEHR);
1015		error->instdone[ring->id] = I915_READ(INSTDONE);
1016	}
1017
1018	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1019	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1020	error->seqno[ring->id] = ring->get_seqno(ring);
1021	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1022	error->head[ring->id] = I915_READ_HEAD(ring);
1023	error->tail[ring->id] = I915_READ_TAIL(ring);
1024
1025	error->cpu_ring_head[ring->id] = ring->head;
1026	error->cpu_ring_tail[ring->id] = ring->tail;
1027}
1028
1029static void i915_gem_record_rings(struct drm_device *dev,
1030				  struct drm_i915_error_state *error)
1031{
1032	struct drm_i915_private *dev_priv = dev->dev_private;
1033	struct intel_ring_buffer *ring;
1034	struct drm_i915_gem_request *request;
1035	int i, count;
1036
1037	for_each_ring(ring, dev_priv, i) {
1038		i915_record_ring_state(dev, error, ring);
1039
1040		error->ring[i].batchbuffer =
1041			i915_error_first_batchbuffer(dev_priv, ring);
1042
1043		error->ring[i].ringbuffer =
1044			i915_error_object_create(dev_priv, ring->obj);
1045
1046		count = 0;
1047		list_for_each_entry(request, &ring->request_list, list)
1048			count++;
1049
1050		error->ring[i].num_requests = count;
1051		error->ring[i].requests =
1052			kmalloc(count*sizeof(struct drm_i915_error_request),
1053				GFP_ATOMIC);
1054		if (error->ring[i].requests == NULL) {
1055			error->ring[i].num_requests = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056			continue;
1057		}
1058
1059		count = 0;
1060		list_for_each_entry(request, &ring->request_list, list) {
1061			struct drm_i915_error_request *erq;
1062
1063			erq = &error->ring[i].requests[count++];
1064			erq->seqno = request->seqno;
1065			erq->jiffies = request->emitted_jiffies;
1066			erq->tail = request->tail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067		}
1068	}
 
 
1069}
1070
1071/**
1072 * i915_capture_error_state - capture an error record for later analysis
1073 * @dev: drm device
1074 *
1075 * Should be called when an error is detected (either a hang or an error
1076 * interrupt) to capture error state from the time of the error.  Fills
1077 * out a structure which becomes available in debugfs for user level tools
1078 * to pick up.
1079 */
1080static void i915_capture_error_state(struct drm_device *dev)
1081{
1082	struct drm_i915_private *dev_priv = dev->dev_private;
1083	struct drm_i915_gem_object *obj;
1084	struct drm_i915_error_state *error;
1085	unsigned long flags;
1086	int i, pipe;
1087
1088	spin_lock_irqsave(&dev_priv->error_lock, flags);
1089	error = dev_priv->first_error;
1090	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1091	if (error)
1092		return;
 
 
 
1093
1094	/* Account for pipe specific data like PIPE*STAT */
1095	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1096	if (!error) {
1097		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1098		return;
1099	}
1100
1101	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1102		 dev->primary->index);
 
 
 
1103
1104	kref_init(&error->ref);
1105	error->eir = I915_READ(EIR);
1106	error->pgtbl_er = I915_READ(PGTBL_ER);
1107
1108	if (HAS_PCH_SPLIT(dev))
1109		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1110	else if (IS_VALLEYVIEW(dev))
1111		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1112	else if (IS_GEN2(dev))
1113		error->ier = I915_READ16(IER);
1114	else
1115		error->ier = I915_READ(IER);
1116
1117	for_each_pipe(pipe)
1118		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1119
1120	if (INTEL_INFO(dev)->gen >= 6) {
1121		error->error = I915_READ(ERROR_GEN6);
1122		error->done_reg = I915_READ(DONE_REG);
 
 
1123	}
1124
1125	i915_gem_record_fences(dev, error);
1126	i915_gem_record_rings(dev, error);
1127
1128	/* Record buffers on the active and pinned lists. */
1129	error->active_bo = NULL;
1130	error->pinned_bo = NULL;
 
1131
1132	i = 0;
1133	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1134		i++;
1135	error->active_bo_count = i;
1136	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
1137		if (obj->pin_count)
1138			i++;
1139	error->pinned_bo_count = i - error->active_bo_count;
1140
1141	error->active_bo = NULL;
1142	error->pinned_bo = NULL;
1143	if (i) {
1144		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1145					   GFP_ATOMIC);
1146		if (error->active_bo)
1147			error->pinned_bo =
1148				error->active_bo + error->active_bo_count;
1149	}
1150
1151	if (error->active_bo)
1152		error->active_bo_count =
1153			capture_active_bo(error->active_bo,
1154					  error->active_bo_count,
1155					  &dev_priv->mm.active_list);
1156
1157	if (error->pinned_bo)
1158		error->pinned_bo_count =
1159			capture_pinned_bo(error->pinned_bo,
1160					  error->pinned_bo_count,
1161					  &dev_priv->mm.gtt_list);
1162
1163	do_gettimeofday(&error->time);
 
 
 
 
 
1164
1165	error->overlay = intel_overlay_capture_error_state(dev);
1166	error->display = intel_display_capture_error_state(dev);
 
1167
1168	spin_lock_irqsave(&dev_priv->error_lock, flags);
1169	if (dev_priv->first_error == NULL) {
1170		dev_priv->first_error = error;
1171		error = NULL;
1172	}
1173	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
 
1174
1175	if (error)
1176		i915_error_state_free(&error->ref);
 
1177}
1178
1179void i915_destroy_error_state(struct drm_device *dev)
 
1180{
1181	struct drm_i915_private *dev_priv = dev->dev_private;
1182	struct drm_i915_error_state *error;
1183	unsigned long flags;
1184
1185	spin_lock_irqsave(&dev_priv->error_lock, flags);
1186	error = dev_priv->first_error;
1187	dev_priv->first_error = NULL;
1188	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
 
 
 
 
1189
1190	if (error)
1191		kref_put(&error->ref, i915_error_state_free);
1192}
1193#else
1194#define i915_capture_error_state(x)
1195#endif
1196
1197static void i915_report_and_clear_eir(struct drm_device *dev)
1198{
1199	struct drm_i915_private *dev_priv = dev->dev_private;
1200	u32 eir = I915_READ(EIR);
1201	int pipe;
1202
1203	if (!eir)
1204		return;
1205
1206	pr_err("render error detected, EIR: 0x%08x\n", eir);
1207
1208	if (IS_G4X(dev)) {
1209		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1210			u32 ipeir = I915_READ(IPEIR_I965);
1211
1212			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1213			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1214			pr_err("  INSTDONE: 0x%08x\n",
1215			       I915_READ(INSTDONE_I965));
1216			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1217			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1218			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1219			I915_WRITE(IPEIR_I965, ipeir);
1220			POSTING_READ(IPEIR_I965);
1221		}
1222		if (eir & GM45_ERROR_PAGE_TABLE) {
1223			u32 pgtbl_err = I915_READ(PGTBL_ER);
1224			pr_err("page table error\n");
1225			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1226			I915_WRITE(PGTBL_ER, pgtbl_err);
1227			POSTING_READ(PGTBL_ER);
1228		}
1229	}
1230
1231	if (!IS_GEN2(dev)) {
1232		if (eir & I915_ERROR_PAGE_TABLE) {
1233			u32 pgtbl_err = I915_READ(PGTBL_ER);
1234			pr_err("page table error\n");
1235			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1236			I915_WRITE(PGTBL_ER, pgtbl_err);
1237			POSTING_READ(PGTBL_ER);
1238		}
1239	}
1240
1241	if (eir & I915_ERROR_MEMORY_REFRESH) {
1242		pr_err("memory refresh error:\n");
1243		for_each_pipe(pipe)
1244			pr_err("pipe %c stat: 0x%08x\n",
1245			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1246		/* pipestat has already been acked */
1247	}
1248	if (eir & I915_ERROR_INSTRUCTION) {
1249		pr_err("instruction error\n");
1250		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1251		if (INTEL_INFO(dev)->gen < 4) {
1252			u32 ipeir = I915_READ(IPEIR);
1253
1254			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1255			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1256			pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1257			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1258			I915_WRITE(IPEIR, ipeir);
1259			POSTING_READ(IPEIR);
1260		} else {
1261			u32 ipeir = I915_READ(IPEIR_I965);
1262
1263			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1264			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1265			pr_err("  INSTDONE: 0x%08x\n",
1266			       I915_READ(INSTDONE_I965));
1267			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1268			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1269			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1270			I915_WRITE(IPEIR_I965, ipeir);
1271			POSTING_READ(IPEIR_I965);
1272		}
1273	}
1274
1275	I915_WRITE(EIR, eir);
1276	POSTING_READ(EIR);
1277	eir = I915_READ(EIR);
1278	if (eir) {
1279		/*
1280		 * some errors might have become stuck,
1281		 * mask them.
1282		 */
1283		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1284		I915_WRITE(EMR, I915_READ(EMR) | eir);
1285		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1286	}
1287}
1288
1289/**
1290 * i915_handle_error - handle an error interrupt
1291 * @dev: drm device
1292 *
1293 * Do some basic checking of regsiter state at error interrupt time and
1294 * dump it to the syslog.  Also call i915_capture_error_state() to make
1295 * sure we get a record and make it available in debugfs.  Fire a uevent
1296 * so userspace knows something bad happened (should trigger collection
1297 * of a ring dump etc.).
1298 */
1299void i915_handle_error(struct drm_device *dev, bool wedged)
1300{
1301	struct drm_i915_private *dev_priv = dev->dev_private;
1302	struct intel_ring_buffer *ring;
1303	int i;
1304
1305	i915_capture_error_state(dev);
1306	i915_report_and_clear_eir(dev);
1307
1308	if (wedged) {
1309		INIT_COMPLETION(dev_priv->error_completion);
1310		atomic_set(&dev_priv->mm.wedged, 1);
 
1311
1312		/*
1313		 * Wakeup waiting processes so they don't hang
1314		 */
1315		for_each_ring(ring, dev_priv, i)
1316			wake_up_all(&ring->irq_queue);
1317	}
1318
1319	queue_work(dev_priv->wq, &dev_priv->error_work);
1320}
1321
1322static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1323{
1324	drm_i915_private_t *dev_priv = dev->dev_private;
1325	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1326	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1327	struct drm_i915_gem_object *obj;
1328	struct intel_unpin_work *work;
1329	unsigned long flags;
1330	bool stall_detected;
1331
1332	/* Ignore early vblank irqs */
1333	if (intel_crtc == NULL)
1334		return;
 
 
 
 
1335
1336	spin_lock_irqsave(&dev->event_lock, flags);
1337	work = intel_crtc->unpin_work;
1338
1339	if (work == NULL || work->pending || !work->enable_stall_check) {
1340		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1341		spin_unlock_irqrestore(&dev->event_lock, flags);
1342		return;
1343	}
1344
1345	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1346	obj = work->pending_flip_obj;
1347	if (INTEL_INFO(dev)->gen >= 4) {
1348		int dspsurf = DSPSURF(intel_crtc->plane);
1349		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1350					obj->gtt_offset;
1351	} else {
1352		int dspaddr = DSPADDR(intel_crtc->plane);
1353		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1354							crtc->y * crtc->fb->pitches[0] +
1355							crtc->x * crtc->fb->bits_per_pixel/8);
1356	}
1357
1358	spin_unlock_irqrestore(&dev->event_lock, flags);
1359
1360	if (stall_detected) {
1361		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1362		intel_prepare_page_flip(dev, intel_crtc->plane);
1363	}
 
 
 
 
 
 
 
 
1364}
1365
1366/* Called from drm generic code, passed 'crtc' which
1367 * we use as a pipe index
1368 */
1369static int i915_enable_vblank(struct drm_device *dev, int pipe)
1370{
1371	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1372	unsigned long irqflags;
1373
1374	if (!i915_pipe_enabled(dev, pipe))
1375		return -EINVAL;
1376
1377	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1378	if (INTEL_INFO(dev)->gen >= 4)
1379		i915_enable_pipestat(dev_priv, pipe,
1380				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1381	else
1382		i915_enable_pipestat(dev_priv, pipe,
1383				     PIPE_VBLANK_INTERRUPT_ENABLE);
1384
1385	/* maintain vblank delivery even in deep C-states */
1386	if (dev_priv->info->gen == 3)
1387		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1388	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1389
1390	return 0;
1391}
1392
1393static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1394{
1395	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1396	unsigned long irqflags;
1397
1398	if (!i915_pipe_enabled(dev, pipe))
1399		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1400
1401	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1402	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1403				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1404	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1405
1406	return 0;
1407}
1408
1409static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1410{
1411	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1412	unsigned long irqflags;
1413
1414	if (!i915_pipe_enabled(dev, pipe))
1415		return -EINVAL;
1416
1417	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1418	ironlake_enable_display_irq(dev_priv,
1419				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1420	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1421
 
 
 
 
 
 
1422	return 0;
1423}
1424
1425static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1426{
1427	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
 
1428	unsigned long irqflags;
1429	u32 dpfl, imr;
1430
1431	if (!i915_pipe_enabled(dev, pipe))
1432		return -EINVAL;
1433
1434	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1435	dpfl = I915_READ(VLV_DPFLIPSTAT);
1436	imr = I915_READ(VLV_IMR);
1437	if (pipe == 0) {
1438		dpfl |= PIPEA_VBLANK_INT_EN;
1439		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1440	} else {
1441		dpfl |= PIPEA_VBLANK_INT_EN;
1442		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1443	}
1444	I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1445	I915_WRITE(VLV_IMR, imr);
1446	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1447
 
 
 
 
 
 
1448	return 0;
1449}
1450
1451/* Called from drm generic code, passed 'crtc' which
1452 * we use as a pipe index
1453 */
1454static void i915_disable_vblank(struct drm_device *dev, int pipe)
1455{
1456	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1457	unsigned long irqflags;
1458
1459	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1460	if (dev_priv->info->gen == 3)
1461		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1462
1463	i915_disable_pipestat(dev_priv, pipe,
1464			      PIPE_VBLANK_INTERRUPT_ENABLE |
1465			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1466	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1467}
1468
1469static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 
 
 
 
 
 
 
 
 
 
1470{
1471	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1472	unsigned long irqflags;
1473
1474	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1475	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1476				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1477	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1478}
1479
1480static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1481{
1482	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1483	unsigned long irqflags;
 
 
1484
1485	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1486	ironlake_disable_display_irq(dev_priv,
1487				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1488	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1489}
1490
1491static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1492{
1493	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
 
1494	unsigned long irqflags;
1495	u32 dpfl, imr;
 
 
1496
1497	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1498	dpfl = I915_READ(VLV_DPFLIPSTAT);
1499	imr = I915_READ(VLV_IMR);
1500	if (pipe == 0) {
1501		dpfl &= ~PIPEA_VBLANK_INT_EN;
1502		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1503	} else {
1504		dpfl &= ~PIPEB_VBLANK_INT_EN;
1505		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1506	}
1507	I915_WRITE(VLV_IMR, imr);
1508	I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1509	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1510}
1511
1512static u32
1513ring_last_seqno(struct intel_ring_buffer *ring)
1514{
1515	return list_entry(ring->request_list.prev,
1516			  struct drm_i915_gem_request, list)->seqno;
 
 
 
 
 
 
 
1517}
1518
1519static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1520{
1521	if (list_empty(&ring->request_list) ||
1522	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1523		/* Issue a wake-up to catch stuck h/w. */
1524		if (waitqueue_active(&ring->irq_queue)) {
1525			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1526				  ring->name);
1527			wake_up_all(&ring->irq_queue);
1528			*err = true;
1529		}
1530		return true;
1531	}
1532	return false;
 
 
1533}
1534
1535static bool kick_ring(struct intel_ring_buffer *ring)
1536{
1537	struct drm_device *dev = ring->dev;
1538	struct drm_i915_private *dev_priv = dev->dev_private;
1539	u32 tmp = I915_READ_CTL(ring);
1540	if (tmp & RING_WAIT) {
1541		DRM_ERROR("Kicking stuck wait on %s\n",
1542			  ring->name);
1543		I915_WRITE_CTL(ring, tmp);
1544		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545	}
1546	return false;
 
 
 
1547}
1548
1549static bool i915_hangcheck_hung(struct drm_device *dev)
1550{
1551	drm_i915_private_t *dev_priv = dev->dev_private;
 
1552
1553	if (dev_priv->hangcheck_count++ > 1) {
1554		bool hung = true;
1555
1556		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1557		i915_handle_error(dev, true);
 
 
 
1558
1559		if (!IS_GEN2(dev)) {
1560			struct intel_ring_buffer *ring;
1561			int i;
 
1562
1563			/* Is the chip hanging on a WAIT_FOR_EVENT?
1564			 * If so we can simply poke the RB_WAIT bit
1565			 * and break the hang. This should work on
1566			 * all but the second generation chipsets.
1567			 */
1568			for_each_ring(ring, dev_priv, i)
1569				hung &= !kick_ring(ring);
1570		}
1571
1572		return hung;
1573	}
 
 
 
 
 
1574
1575	return false;
 
1576}
1577
1578/**
1579 * This is called when the chip hasn't reported back with completed
1580 * batchbuffers in a long time. The first time this is called we simply record
1581 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1582 * again, we assume the chip is wedged and try to fix it.
1583 */
1584void i915_hangcheck_elapsed(unsigned long data)
1585{
1586	struct drm_device *dev = (struct drm_device *)data;
1587	drm_i915_private_t *dev_priv = dev->dev_private;
1588	uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1589	struct intel_ring_buffer *ring;
1590	bool err = false, idle;
1591	int i;
1592
1593	if (!i915_enable_hangcheck)
 
 
 
 
 
 
 
 
 
 
 
 
1594		return;
1595
1596	memset(acthd, 0, sizeof(acthd));
1597	idle = true;
1598	for_each_ring(ring, dev_priv, i) {
1599	    idle &= i915_hangcheck_ring_idle(ring, &err);
1600	    acthd[i] = intel_ring_get_active_head(ring);
1601	}
1602
1603	/* If all work is done then ACTHD clearly hasn't advanced. */
1604	if (idle) {
1605		if (err) {
1606			if (i915_hangcheck_hung(dev))
1607				return;
1608
1609			goto repeat;
 
 
 
 
 
1610		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611
1612		dev_priv->hangcheck_count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1613		return;
1614	}
1615
1616	if (INTEL_INFO(dev)->gen < 4) {
1617		instdone = I915_READ(INSTDONE);
1618		instdone1 = 0;
1619	} else {
1620		instdone = I915_READ(INSTDONE_I965);
1621		instdone1 = I915_READ(INSTDONE1);
 
 
 
 
 
 
 
 
 
 
 
 
 
1622	}
1623
1624	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1625	    dev_priv->last_instdone == instdone &&
1626	    dev_priv->last_instdone1 == instdone1) {
1627		if (i915_hangcheck_hung(dev))
1628			return;
1629	} else {
1630		dev_priv->hangcheck_count = 0;
1631
1632		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1633		dev_priv->last_instdone = instdone;
1634		dev_priv->last_instdone1 = instdone1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635	}
 
1636
1637repeat:
1638	/* Reset timer case chip hangs without another request being added */
1639	mod_timer(&dev_priv->hangcheck_timer,
1640		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 
 
 
 
 
 
 
 
 
 
 
 
1641}
1642
1643/* drm_dma.h hooks
1644*/
1645static void ironlake_irq_preinstall(struct drm_device *dev)
1646{
1647	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1648
1649	atomic_set(&dev_priv->irq_received, 0);
 
1650
 
1651
1652	I915_WRITE(HWSTAM, 0xeffe);
 
1653
1654	/* XXX hotplug from PCH */
 
 
 
 
 
 
 
 
 
 
 
 
1655
1656	I915_WRITE(DEIMR, 0xffffffff);
1657	I915_WRITE(DEIER, 0x0);
1658	POSTING_READ(DEIER);
 
 
 
 
 
 
 
 
 
 
 
 
1659
1660	/* and GT */
1661	I915_WRITE(GTIMR, 0xffffffff);
1662	I915_WRITE(GTIER, 0x0);
1663	POSTING_READ(GTIER);
 
 
 
 
 
1664
1665	/* south display irq */
1666	I915_WRITE(SDEIMR, 0xffffffff);
1667	I915_WRITE(SDEIER, 0x0);
1668	POSTING_READ(SDEIER);
 
 
 
 
 
 
1669}
1670
1671static void valleyview_irq_preinstall(struct drm_device *dev)
1672{
1673	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1674	int pipe;
1675
1676	atomic_set(&dev_priv->irq_received, 0);
 
1677
1678	/* VLV magic */
1679	I915_WRITE(VLV_IMR, 0);
1680	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1681	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1682	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1683
1684	/* and GT */
1685	I915_WRITE(GTIIR, I915_READ(GTIIR));
1686	I915_WRITE(GTIIR, I915_READ(GTIIR));
1687	I915_WRITE(GTIMR, 0xffffffff);
1688	I915_WRITE(GTIER, 0x0);
1689	POSTING_READ(GTIER);
1690
1691	I915_WRITE(DPINVGTT, 0xff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1692
1693	I915_WRITE(PORT_HOTPLUG_EN, 0);
1694	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1695	for_each_pipe(pipe)
1696		I915_WRITE(PIPESTAT(pipe), 0xffff);
1697	I915_WRITE(VLV_IIR, 0xffffffff);
1698	I915_WRITE(VLV_IMR, 0xffffffff);
1699	I915_WRITE(VLV_IER, 0x0);
1700	POSTING_READ(VLV_IER);
1701}
1702
1703/*
1704 * Enable digital hotplug on the PCH, and configure the DP short pulse
1705 * duration to 2ms (which is the minimum in the Display Port spec)
1706 *
1707 * This register is the same on all known PCH chips.
1708 */
1709
1710static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1711{
1712	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1713	u32	hotplug;
 
 
 
 
 
 
 
1714
1715	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1716	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1717	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1718	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1719	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1720	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 
 
 
 
1721}
1722
1723static int ironlake_irq_postinstall(struct drm_device *dev)
1724{
1725	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1726	/* enable kind of interrupts always enabled */
1727	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1728			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1729	u32 render_irqs;
1730	u32 hotplug_mask;
1731
1732	dev_priv->irq_mask = ~display_mask;
 
1733
1734	/* should always can generate irq */
1735	I915_WRITE(DEIIR, I915_READ(DEIIR));
1736	I915_WRITE(DEIMR, dev_priv->irq_mask);
1737	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1738	POSTING_READ(DEIER);
1739
1740	dev_priv->gt_irq_mask = ~0;
1741
1742	I915_WRITE(GTIIR, I915_READ(GTIIR));
1743	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1744
1745	if (IS_GEN6(dev))
1746		render_irqs =
1747			GT_USER_INTERRUPT |
1748			GEN6_BSD_USER_INTERRUPT |
1749			GEN6_BLITTER_USER_INTERRUPT;
1750	else
1751		render_irqs =
1752			GT_USER_INTERRUPT |
1753			GT_PIPE_NOTIFY |
1754			GT_BSD_USER_INTERRUPT;
1755	I915_WRITE(GTIER, render_irqs);
1756	POSTING_READ(GTIER);
1757
1758	if (HAS_PCH_CPT(dev)) {
1759		hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1760				SDE_PORTB_HOTPLUG_CPT |
1761				SDE_PORTC_HOTPLUG_CPT |
1762				SDE_PORTD_HOTPLUG_CPT);
1763	} else {
1764		hotplug_mask = (SDE_CRT_HOTPLUG |
1765				SDE_PORTB_HOTPLUG |
1766				SDE_PORTC_HOTPLUG |
1767				SDE_PORTD_HOTPLUG |
1768				SDE_AUX_MASK);
1769	}
1770
1771	dev_priv->pch_irq_mask = ~hotplug_mask;
1772
1773	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1774	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1775	I915_WRITE(SDEIER, hotplug_mask);
1776	POSTING_READ(SDEIER);
1777
1778	ironlake_enable_pch_hotplug(dev);
1779
1780	if (IS_IRONLAKE_M(dev)) {
1781		/* Clear & enable PCU event interrupts */
1782		I915_WRITE(DEIIR, DE_PCU_EVENT);
1783		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1784		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1785	}
 
1786
1787	return 0;
 
 
 
 
 
 
 
 
1788}
1789
1790static int ivybridge_irq_postinstall(struct drm_device *dev)
1791{
1792	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1793	/* enable kind of interrupts always enabled */
1794	u32 display_mask =
1795		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1796		DE_PLANEC_FLIP_DONE_IVB |
1797		DE_PLANEB_FLIP_DONE_IVB |
1798		DE_PLANEA_FLIP_DONE_IVB;
1799	u32 render_irqs;
1800	u32 hotplug_mask;
1801
1802	dev_priv->irq_mask = ~display_mask;
 
 
 
 
 
 
 
 
 
 
1803
1804	/* should always can generate irq */
1805	I915_WRITE(DEIIR, I915_READ(DEIIR));
1806	I915_WRITE(DEIMR, dev_priv->irq_mask);
1807	I915_WRITE(DEIER,
1808		   display_mask |
1809		   DE_PIPEC_VBLANK_IVB |
1810		   DE_PIPEB_VBLANK_IVB |
1811		   DE_PIPEA_VBLANK_IVB);
1812	POSTING_READ(DEIER);
1813
1814	dev_priv->gt_irq_mask = ~0;
1815
1816	I915_WRITE(GTIIR, I915_READ(GTIIR));
1817	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1818
1819	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1820		GEN6_BLITTER_USER_INTERRUPT;
1821	I915_WRITE(GTIER, render_irqs);
1822	POSTING_READ(GTIER);
1823
1824	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1825			SDE_PORTB_HOTPLUG_CPT |
1826			SDE_PORTC_HOTPLUG_CPT |
1827			SDE_PORTD_HOTPLUG_CPT);
1828	dev_priv->pch_irq_mask = ~hotplug_mask;
1829
1830	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1831	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1832	I915_WRITE(SDEIER, hotplug_mask);
1833	POSTING_READ(SDEIER);
1834
1835	ironlake_enable_pch_hotplug(dev);
 
1836
1837	return 0;
 
 
 
 
 
1838}
1839
1840static int valleyview_irq_postinstall(struct drm_device *dev)
 
1841{
1842	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1843	u32 render_irqs;
1844	u32 enable_mask;
1845	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1846	u16 msid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847
1848	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1849	enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1850		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1851
1852	dev_priv->irq_mask = ~enable_mask;
 
1853
1854	dev_priv->pipestat[0] = 0;
1855	dev_priv->pipestat[1] = 0;
 
 
1856
1857	/* Hack for broken MSIs on VLV */
1858	pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1859	pci_read_config_word(dev->pdev, 0x98, &msid);
1860	msid &= 0xff; /* mask out delivery bits */
1861	msid |= (1<<14);
1862	pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1863
1864	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1865	I915_WRITE(VLV_IER, enable_mask);
1866	I915_WRITE(VLV_IIR, 0xffffffff);
1867	I915_WRITE(PIPESTAT(0), 0xffff);
1868	I915_WRITE(PIPESTAT(1), 0xffff);
1869	POSTING_READ(VLV_IER);
1870
1871	I915_WRITE(VLV_IIR, 0xffffffff);
1872	I915_WRITE(VLV_IIR, 0xffffffff);
1873
1874	render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1875		GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1876		GT_GEN6_BLT_USER_INTERRUPT |
1877		GT_GEN6_BSD_USER_INTERRUPT |
1878		GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1879		GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1880		GT_PIPE_NOTIFY |
1881		GT_RENDER_CS_ERROR_INTERRUPT |
1882		GT_SYNC_STATUS |
1883		GT_USER_INTERRUPT;
1884
1885	dev_priv->gt_irq_mask = ~render_irqs;
1886
1887	I915_WRITE(GTIIR, I915_READ(GTIIR));
1888	I915_WRITE(GTIIR, I915_READ(GTIIR));
1889	I915_WRITE(GTIMR, 0);
1890	I915_WRITE(GTIER, render_irqs);
1891	POSTING_READ(GTIER);
1892
1893	/* ack & enable invalid PTE error interrupts */
1894#if 0 /* FIXME: add support to irq handler for checking these bits */
1895	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1896	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1897#endif
1898
1899	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1900#if 0 /* FIXME: check register definitions; some have moved */
1901	/* Note HDMI and DP share bits */
1902	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1903		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1904	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1905		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1906	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1907		hotplug_en |= HDMID_HOTPLUG_INT_EN;
1908	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1909		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1910	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1911		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1912	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1913		hotplug_en |= CRT_HOTPLUG_INT_EN;
1914		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
 
 
 
 
 
 
 
 
 
1915	}
1916#endif
1917
1918	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
 
 
 
 
 
 
 
1919
1920	return 0;
 
 
 
 
 
 
 
 
 
1921}
1922
1923static void valleyview_irq_uninstall(struct drm_device *dev)
 
 
 
 
 
 
 
 
 
 
 
1924{
1925	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1926	int pipe;
1927
1928	if (!dev_priv)
1929		return;
1930
1931	for_each_pipe(pipe)
1932		I915_WRITE(PIPESTAT(pipe), 0xffff);
 
 
 
 
1933
1934	I915_WRITE(HWSTAM, 0xffffffff);
1935	I915_WRITE(PORT_HOTPLUG_EN, 0);
1936	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1937	for_each_pipe(pipe)
1938		I915_WRITE(PIPESTAT(pipe), 0xffff);
1939	I915_WRITE(VLV_IIR, 0xffffffff);
1940	I915_WRITE(VLV_IMR, 0xffffffff);
1941	I915_WRITE(VLV_IER, 0x0);
1942	POSTING_READ(VLV_IER);
1943}
1944
1945static void ironlake_irq_uninstall(struct drm_device *dev)
1946{
1947	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
1948
1949	if (!dev_priv)
1950		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951
1952	I915_WRITE(HWSTAM, 0xffffffff);
 
1953
1954	I915_WRITE(DEIMR, 0xffffffff);
1955	I915_WRITE(DEIER, 0x0);
1956	I915_WRITE(DEIIR, I915_READ(DEIIR));
1957
1958	I915_WRITE(GTIMR, 0xffffffff);
1959	I915_WRITE(GTIER, 0x0);
1960	I915_WRITE(GTIIR, I915_READ(GTIIR));
1961
1962	I915_WRITE(SDEIMR, 0xffffffff);
1963	I915_WRITE(SDEIER, 0x0);
1964	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1965}
1966
1967static void i8xx_irq_preinstall(struct drm_device * dev)
1968{
1969	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1970	int pipe;
1971
1972	atomic_set(&dev_priv->irq_received, 0);
 
1973
1974	for_each_pipe(pipe)
1975		I915_WRITE(PIPESTAT(pipe), 0);
1976	I915_WRITE16(IMR, 0xffff);
1977	I915_WRITE16(IER, 0x0);
1978	POSTING_READ16(IER);
 
1979}
1980
1981static int i8xx_irq_postinstall(struct drm_device *dev)
1982{
1983	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1984
1985	dev_priv->pipestat[0] = 0;
1986	dev_priv->pipestat[1] = 0;
1987
1988	I915_WRITE16(EMR,
1989		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1990
1991	/* Unmask the interrupts that we always want on. */
1992	dev_priv->irq_mask =
1993		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1994		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1995		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1996		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1997		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1998	I915_WRITE16(IMR, dev_priv->irq_mask);
1999
2000	I915_WRITE16(IER,
2001		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2002		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2003		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2004		     I915_USER_INTERRUPT);
2005	POSTING_READ16(IER);
2006
2007	return 0;
 
 
 
 
 
 
 
 
 
 
 
2008}
2009
2010static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
2011{
2012	struct drm_device *dev = (struct drm_device *) arg;
2013	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2014	u16 iir, new_iir;
2015	u32 pipe_stats[2];
2016	unsigned long irqflags;
2017	int irq_received;
2018	int pipe;
2019	u16 flip_mask =
2020		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2021		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2022
2023	atomic_inc(&dev_priv->irq_received);
 
 
 
 
 
 
 
 
2024
2025	iir = I915_READ16(IIR);
2026	if (iir == 0)
2027		return IRQ_NONE;
2028
2029	while (iir & ~flip_mask) {
2030		/* Can't rely on pipestat interrupt bit in iir as it might
2031		 * have been cleared after the pipestat interrupt was received.
2032		 * It doesn't set the bit in iir again, but it still produces
2033		 * interrupts (for non-MSI).
2034		 */
2035		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2036		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2037			i915_handle_error(dev, false);
2038
2039		for_each_pipe(pipe) {
2040			int reg = PIPESTAT(pipe);
2041			pipe_stats[pipe] = I915_READ(reg);
2042
2043			/*
2044			 * Clear the PIPE*STAT regs before the IIR
2045			 */
2046			if (pipe_stats[pipe] & 0x8000ffff) {
2047				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2048					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2049							 pipe_name(pipe));
2050				I915_WRITE(reg, pipe_stats[pipe]);
2051				irq_received = 1;
2052			}
2053		}
2054		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2055
2056		I915_WRITE16(IIR, iir & ~flip_mask);
2057		new_iir = I915_READ16(IIR); /* Flush posted writes */
2058
2059		i915_update_dri1_breadcrumb(dev);
 
 
2060
2061		if (iir & I915_USER_INTERRUPT)
2062			notify_ring(dev, &dev_priv->ring[RCS]);
 
 
2063
2064		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2065		    drm_handle_vblank(dev, 0)) {
2066			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2067				intel_prepare_page_flip(dev, 0);
2068				intel_finish_page_flip(dev, 0);
2069				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2070			}
2071		}
2072
2073		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2074		    drm_handle_vblank(dev, 1)) {
2075			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2076				intel_prepare_page_flip(dev, 1);
2077				intel_finish_page_flip(dev, 1);
2078				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2079			}
 
 
 
 
2080		}
 
 
 
 
 
 
2081
2082		iir = new_iir;
 
 
 
 
2083	}
2084
2085	return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
2086}
2087
2088static void i8xx_irq_uninstall(struct drm_device * dev)
2089{
2090	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2091	int pipe;
2092
2093	for_each_pipe(pipe) {
2094		/* Clear enable bits; then clear status bits */
2095		I915_WRITE(PIPESTAT(pipe), 0);
2096		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2097	}
2098	I915_WRITE16(IMR, 0xffff);
2099	I915_WRITE16(IER, 0x0);
2100	I915_WRITE16(IIR, I915_READ16(IIR));
 
 
 
 
 
 
2101}
2102
2103static void i915_irq_preinstall(struct drm_device * dev)
2104{
2105	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2106	int pipe;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107
2108	atomic_set(&dev_priv->irq_received, 0);
2109
2110	if (I915_HAS_HOTPLUG(dev)) {
2111		I915_WRITE(PORT_HOTPLUG_EN, 0);
2112		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
 
 
 
2113	}
2114
2115	I915_WRITE16(HWSTAM, 0xeffe);
2116	for_each_pipe(pipe)
2117		I915_WRITE(PIPESTAT(pipe), 0);
2118	I915_WRITE(IMR, 0xffffffff);
2119	I915_WRITE(IER, 0x0);
2120	POSTING_READ(IER);
2121}
2122
2123static int i915_irq_postinstall(struct drm_device *dev)
2124{
2125	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2126	u32 enable_mask;
2127
2128	dev_priv->pipestat[0] = 0;
2129	dev_priv->pipestat[1] = 0;
 
 
2130
2131	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132
2133	/* Unmask the interrupts that we always want on. */
2134	dev_priv->irq_mask =
2135		~(I915_ASLE_INTERRUPT |
2136		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2137		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2138		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2139		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2140		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2141
2142	enable_mask =
2143		I915_ASLE_INTERRUPT |
2144		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2145		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2146		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2147		I915_USER_INTERRUPT;
2148
2149	if (I915_HAS_HOTPLUG(dev)) {
2150		/* Enable in IER... */
2151		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2152		/* and unmask in IMR */
2153		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2154	}
2155
2156	I915_WRITE(IMR, dev_priv->irq_mask);
2157	I915_WRITE(IER, enable_mask);
2158	POSTING_READ(IER);
2159
2160	if (I915_HAS_HOTPLUG(dev)) {
2161		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2162
2163		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2164			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2165		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2166			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2167		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2168			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2169		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2170			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2171		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2172			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2173		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2174			hotplug_en |= CRT_HOTPLUG_INT_EN;
2175			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2176		}
2177
2178		/* Ignore TV since it's buggy */
 
 
 
 
2179
2180		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2181	}
2182
2183	intel_opregion_enable_asle(dev);
 
2184
2185	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2186}
2187
2188static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
 
2189{
2190	struct drm_device *dev = (struct drm_device *) arg;
2191	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2192	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2193	unsigned long irqflags;
2194	u32 flip_mask =
2195		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2196		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2197	u32 flip[2] = {
2198		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2199		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2200	};
2201	int pipe, ret = IRQ_NONE;
2202
2203	atomic_inc(&dev_priv->irq_received);
 
 
 
2204
2205	iir = I915_READ(IIR);
2206	do {
2207		bool irq_received = (iir & ~flip_mask) != 0;
2208		bool blc_event = false;
2209
2210		/* Can't rely on pipestat interrupt bit in iir as it might
2211		 * have been cleared after the pipestat interrupt was received.
2212		 * It doesn't set the bit in iir again, but it still produces
2213		 * interrupts (for non-MSI).
2214		 */
2215		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2216		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2217			i915_handle_error(dev, false);
2218
2219		for_each_pipe(pipe) {
2220			int reg = PIPESTAT(pipe);
2221			pipe_stats[pipe] = I915_READ(reg);
2222
2223			/* Clear the PIPE*STAT regs before the IIR */
2224			if (pipe_stats[pipe] & 0x8000ffff) {
2225				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2226					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2227							 pipe_name(pipe));
2228				I915_WRITE(reg, pipe_stats[pipe]);
2229				irq_received = true;
2230			}
2231		}
2232		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2233
2234		if (!irq_received)
2235			break;
 
2236
2237		/* Consume port.  Then clear IIR or we'll miss events */
2238		if ((I915_HAS_HOTPLUG(dev)) &&
2239		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2240			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2241
2242			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2243				  hotplug_status);
2244			if (hotplug_status & dev_priv->hotplug_supported_mask)
2245				queue_work(dev_priv->wq,
2246					   &dev_priv->hotplug_work);
 
 
 
2247
2248			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2249			POSTING_READ(PORT_HOTPLUG_STAT);
2250		}
 
2251
2252		I915_WRITE(IIR, iir & ~flip_mask);
2253		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 
2254
2255		if (iir & I915_USER_INTERRUPT)
2256			notify_ring(dev, &dev_priv->ring[RCS]);
 
 
2257
2258		for_each_pipe(pipe) {
2259			int plane = pipe;
2260			if (IS_MOBILE(dev))
2261				plane = !plane;
2262			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2263			    drm_handle_vblank(dev, pipe)) {
2264				if (iir & flip[plane]) {
2265					intel_prepare_page_flip(dev, plane);
2266					intel_finish_page_flip(dev, pipe);
2267					flip_mask &= ~flip[plane];
2268				}
2269			}
2270
2271			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2272				blc_event = true;
2273		}
 
 
 
 
2274
2275		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2276			intel_opregion_asle_intr(dev);
 
2277
2278		/* With MSI, interrupts are only generated when iir
2279		 * transitions from zero to nonzero.  If another bit got
2280		 * set while we were handling the existing iir bits, then
2281		 * we would never get another interrupt.
2282		 *
2283		 * This is fine on non-MSI as well, as if we hit this path
2284		 * we avoid exiting the interrupt handler only to generate
2285		 * another one.
2286		 *
2287		 * Note that for MSI this could cause a stray interrupt report
2288		 * if an interrupt landed in the time between writing IIR and
2289		 * the posting read.  This should be rare enough to never
2290		 * trigger the 99% of 100,000 interrupts test for disabling
2291		 * stray interrupts.
2292		 */
2293		ret = IRQ_HANDLED;
2294		iir = new_iir;
2295	} while (iir & ~flip_mask);
2296
2297	i915_update_dri1_breadcrumb(dev);
 
 
2298
2299	return ret;
2300}
2301
2302static void i915_irq_uninstall(struct drm_device * dev)
2303{
2304	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2305	int pipe;
2306
2307	if (I915_HAS_HOTPLUG(dev)) {
2308		I915_WRITE(PORT_HOTPLUG_EN, 0);
2309		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2310	}
2311
2312	I915_WRITE16(HWSTAM, 0xffff);
2313	for_each_pipe(pipe) {
2314		/* Clear enable bits; then clear status bits */
2315		I915_WRITE(PIPESTAT(pipe), 0);
2316		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2317	}
2318	I915_WRITE(IMR, 0xffffffff);
2319	I915_WRITE(IER, 0x0);
2320
2321	I915_WRITE(IIR, I915_READ(IIR));
 
 
2322}
2323
2324static void i965_irq_preinstall(struct drm_device * dev)
2325{
2326	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2327	int pipe;
2328
2329	atomic_set(&dev_priv->irq_received, 0);
2330
2331	if (I915_HAS_HOTPLUG(dev)) {
2332		I915_WRITE(PORT_HOTPLUG_EN, 0);
2333		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2334	}
2335
2336	I915_WRITE(HWSTAM, 0xeffe);
2337	for_each_pipe(pipe)
2338		I915_WRITE(PIPESTAT(pipe), 0);
2339	I915_WRITE(IMR, 0xffffffff);
2340	I915_WRITE(IER, 0x0);
2341	POSTING_READ(IER);
2342}
2343
2344static int i965_irq_postinstall(struct drm_device *dev)
2345{
2346	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2347	u32 enable_mask;
2348	u32 error_mask;
2349
2350	/* Unmask the interrupts that we always want on. */
2351	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2352			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2353			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2354			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2355			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2356			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2357
2358	enable_mask = ~dev_priv->irq_mask;
2359	enable_mask |= I915_USER_INTERRUPT;
2360
2361	if (IS_G4X(dev))
2362		enable_mask |= I915_BSD_USER_INTERRUPT;
 
 
 
 
2363
2364	dev_priv->pipestat[0] = 0;
2365	dev_priv->pipestat[1] = 0;
 
 
 
 
2366
2367	if (I915_HAS_HOTPLUG(dev)) {
2368		/* Enable in IER... */
2369		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2370		/* and unmask in IMR */
2371		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2372	}
2373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2374	/*
2375	 * Enable some error detection, note the instruction error mask
2376	 * bit is reserved, so we leave it masked.
2377	 */
2378	if (IS_G4X(dev)) {
2379		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2380			       GM45_ERROR_MEM_PRIV |
2381			       GM45_ERROR_CP_PRIV |
2382			       I915_ERROR_MEMORY_REFRESH);
2383	} else {
2384		error_mask = ~(I915_ERROR_PAGE_TABLE |
2385			       I915_ERROR_MEMORY_REFRESH);
2386	}
2387	I915_WRITE(EMR, error_mask);
2388
2389	I915_WRITE(IMR, dev_priv->irq_mask);
2390	I915_WRITE(IER, enable_mask);
2391	POSTING_READ(IER);
2392
2393	if (I915_HAS_HOTPLUG(dev)) {
2394		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2395
2396		/* Note HDMI and DP share bits */
2397		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2398			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2399		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2400			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2401		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2402			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2403		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2404			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2405		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2406			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2407		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2408			hotplug_en |= CRT_HOTPLUG_INT_EN;
2409
2410			/* Programming the CRT detection parameters tends
2411			   to generate a spurious hotplug event about three
2412			   seconds later.  So just do it once.
2413			*/
2414			if (IS_G4X(dev))
2415				hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2416			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2417		}
2418
2419		/* Ignore TV since it's buggy */
 
 
 
 
 
 
2420
2421		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2422	}
2423
2424	intel_opregion_enable_asle(dev);
2425
2426	return 0;
 
 
 
 
 
 
 
 
2427}
2428
2429static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2430{
2431	struct drm_device *dev = (struct drm_device *) arg;
2432	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2433	u32 iir, new_iir;
2434	u32 pipe_stats[I915_MAX_PIPES];
2435	unsigned long irqflags;
2436	int irq_received;
2437	int ret = IRQ_NONE, pipe;
2438
2439	atomic_inc(&dev_priv->irq_received);
2440
2441	iir = I915_READ(IIR);
 
 
 
 
 
 
 
 
 
2442
2443	for (;;) {
2444		bool blc_event = false;
 
 
 
 
 
2445
2446		irq_received = iir != 0;
 
 
 
2447
2448		/* Can't rely on pipestat interrupt bit in iir as it might
2449		 * have been cleared after the pipestat interrupt was received.
2450		 * It doesn't set the bit in iir again, but it still produces
2451		 * interrupts (for non-MSI).
2452		 */
2453		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2454		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2455			i915_handle_error(dev, false);
2456
2457		for_each_pipe(pipe) {
2458			int reg = PIPESTAT(pipe);
2459			pipe_stats[pipe] = I915_READ(reg);
2460
2461			/*
2462			 * Clear the PIPE*STAT regs before the IIR
2463			 */
2464			if (pipe_stats[pipe] & 0x8000ffff) {
2465				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2466					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2467							 pipe_name(pipe));
2468				I915_WRITE(reg, pipe_stats[pipe]);
2469				irq_received = 1;
2470			}
2471		}
2472		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2473
2474		if (!irq_received)
 
2475			break;
2476
2477		ret = IRQ_HANDLED;
2478
2479		/* Consume port.  Then clear IIR or we'll miss events */
2480		if ((I915_HAS_HOTPLUG(dev)) &&
2481		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2482			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2483
2484			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2485				  hotplug_status);
2486			if (hotplug_status & dev_priv->hotplug_supported_mask)
2487				queue_work(dev_priv->wq,
2488					   &dev_priv->hotplug_work);
2489
2490			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2491			I915_READ(PORT_HOTPLUG_STAT);
2492		}
 
 
 
2493
2494		I915_WRITE(IIR, iir);
2495		new_iir = I915_READ(IIR); /* Flush posted writes */
2496
2497		if (iir & I915_USER_INTERRUPT)
2498			notify_ring(dev, &dev_priv->ring[RCS]);
2499		if (iir & I915_BSD_USER_INTERRUPT)
2500			notify_ring(dev, &dev_priv->ring[VCS]);
2501
2502		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2503			intel_prepare_page_flip(dev, 0);
 
2504
2505		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2506			intel_prepare_page_flip(dev, 1);
2507
2508		for_each_pipe(pipe) {
2509			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2510			    drm_handle_vblank(dev, pipe)) {
2511				i915_pageflip_stall_check(dev, pipe);
2512				intel_finish_page_flip(dev, pipe);
2513			}
2514
2515			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2516				blc_event = true;
2517		}
2518
 
2519
2520		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2521			intel_opregion_asle_intr(dev);
2522
2523		/* With MSI, interrupts are only generated when iir
2524		 * transitions from zero to nonzero.  If another bit got
2525		 * set while we were handling the existing iir bits, then
2526		 * we would never get another interrupt.
2527		 *
2528		 * This is fine on non-MSI as well, as if we hit this path
2529		 * we avoid exiting the interrupt handler only to generate
2530		 * another one.
2531		 *
2532		 * Note that for MSI this could cause a stray interrupt report
2533		 * if an interrupt landed in the time between writing IIR and
2534		 * the posting read.  This should be rare enough to never
2535		 * trigger the 99% of 100,000 interrupts test for disabling
2536		 * stray interrupts.
2537		 */
2538		iir = new_iir;
2539	}
2540
2541	i915_update_dri1_breadcrumb(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2542
2543	return ret;
 
 
 
2544}
2545
2546static void i965_irq_uninstall(struct drm_device * dev)
 
 
 
 
 
 
 
2547{
2548	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2549	int pipe;
 
 
 
 
 
 
 
2550
2551	if (!dev_priv)
2552		return;
2553
2554	if (I915_HAS_HOTPLUG(dev)) {
2555		I915_WRITE(PORT_HOTPLUG_EN, 0);
2556		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2557	}
2558
2559	I915_WRITE(HWSTAM, 0xffffffff);
2560	for_each_pipe(pipe)
2561		I915_WRITE(PIPESTAT(pipe), 0);
2562	I915_WRITE(IMR, 0xffffffff);
2563	I915_WRITE(IER, 0x0);
2564
2565	for_each_pipe(pipe)
2566		I915_WRITE(PIPESTAT(pipe),
2567			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2568	I915_WRITE(IIR, I915_READ(IIR));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2569}
2570
2571void intel_irq_init(struct drm_device *dev)
 
 
 
 
 
 
2572{
2573	struct drm_i915_private *dev_priv = dev->dev_private;
2574
2575	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2576	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2577	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
2578
2579	dev->driver->get_vblank_counter = i915_get_vblank_counter;
2580	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2581	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2582		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2583		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2584	}
 
2585
2586	if (drm_core_check_feature(dev, DRIVER_MODESET))
2587		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2588	else
2589		dev->driver->get_vblank_timestamp = NULL;
2590	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2591
2592	if (IS_VALLEYVIEW(dev)) {
2593		dev->driver->irq_handler = valleyview_irq_handler;
2594		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2595		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2596		dev->driver->irq_uninstall = valleyview_irq_uninstall;
2597		dev->driver->enable_vblank = valleyview_enable_vblank;
2598		dev->driver->disable_vblank = valleyview_disable_vblank;
2599	} else if (IS_IVYBRIDGE(dev)) {
2600		/* Share pre & uninstall handlers with ILK/SNB */
2601		dev->driver->irq_handler = ivybridge_irq_handler;
2602		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2603		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2604		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2605		dev->driver->enable_vblank = ivybridge_enable_vblank;
2606		dev->driver->disable_vblank = ivybridge_disable_vblank;
2607	} else if (IS_HASWELL(dev)) {
2608		/* Share interrupts handling with IVB */
2609		dev->driver->irq_handler = ivybridge_irq_handler;
2610		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2611		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2612		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2613		dev->driver->enable_vblank = ivybridge_enable_vblank;
2614		dev->driver->disable_vblank = ivybridge_disable_vblank;
2615	} else if (HAS_PCH_SPLIT(dev)) {
2616		dev->driver->irq_handler = ironlake_irq_handler;
2617		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2618		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2619		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2620		dev->driver->enable_vblank = ironlake_enable_vblank;
2621		dev->driver->disable_vblank = ironlake_disable_vblank;
2622	} else {
2623		if (INTEL_INFO(dev)->gen == 2) {
2624			dev->driver->irq_preinstall = i8xx_irq_preinstall;
2625			dev->driver->irq_postinstall = i8xx_irq_postinstall;
2626			dev->driver->irq_handler = i8xx_irq_handler;
2627			dev->driver->irq_uninstall = i8xx_irq_uninstall;
2628		} else if (INTEL_INFO(dev)->gen == 3) {
2629			dev->driver->irq_preinstall = i915_irq_preinstall;
2630			dev->driver->irq_postinstall = i915_irq_postinstall;
2631			dev->driver->irq_uninstall = i915_irq_uninstall;
2632			dev->driver->irq_handler = i915_irq_handler;
2633		} else {
2634			dev->driver->irq_preinstall = i965_irq_preinstall;
2635			dev->driver->irq_postinstall = i965_irq_postinstall;
2636			dev->driver->irq_uninstall = i965_irq_uninstall;
2637			dev->driver->irq_handler = i965_irq_handler;
2638		}
2639		dev->driver->enable_vblank = i915_enable_vblank;
2640		dev->driver->disable_vblank = i915_disable_vblank;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2641	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2642}
v6.2
   1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
 
  31#include <linux/slab.h>
  32#include <linux/sysrq.h>
  33
  34#include <drm/drm_drv.h>
  35
  36#include "display/icl_dsi_regs.h"
  37#include "display/intel_de.h"
  38#include "display/intel_display_trace.h"
  39#include "display/intel_display_types.h"
  40#include "display/intel_fifo_underrun.h"
  41#include "display/intel_hotplug.h"
  42#include "display/intel_lpe_audio.h"
  43#include "display/intel_psr.h"
  44
  45#include "gt/intel_breadcrumbs.h"
  46#include "gt/intel_gt.h"
  47#include "gt/intel_gt_irq.h"
  48#include "gt/intel_gt_pm_irq.h"
  49#include "gt/intel_gt_regs.h"
  50#include "gt/intel_rps.h"
  51
  52#include "i915_driver.h"
  53#include "i915_drv.h"
  54#include "i915_irq.h"
  55#include "intel_pm.h"
  56
  57/**
  58 * DOC: interrupt handling
  59 *
  60 * These functions provide the basic support for enabling and disabling the
  61 * interrupt handling support. There's a lot more functionality in i915_irq.c
  62 * and related files, but that will be described in separate chapters.
  63 */
  64
  65/*
  66 * Interrupt statistic for PMU. Increments the counter only if the
  67 * interrupt originated from the GPU so interrupts from a device which
  68 * shares the interrupt line are not accounted.
  69 */
  70static inline void pmu_irq_stats(struct drm_i915_private *i915,
  71				 irqreturn_t res)
  72{
  73	if (unlikely(res != IRQ_HANDLED))
  74		return;
  75
  76	/*
  77	 * A clever compiler translates that into INC. A not so clever one
  78	 * should at least prevent store tearing.
  79	 */
  80	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
  81}
  82
  83typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
  84typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
  85				    enum hpd_pin pin);
  86
  87static const u32 hpd_ilk[HPD_NUM_PINS] = {
  88	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
  89};
  90
  91static const u32 hpd_ivb[HPD_NUM_PINS] = {
  92	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
  93};
  94
  95static const u32 hpd_bdw[HPD_NUM_PINS] = {
  96	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
  97};
  98
  99static const u32 hpd_ibx[HPD_NUM_PINS] = {
 100	[HPD_CRT] = SDE_CRT_HOTPLUG,
 101	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
 102	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
 103	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
 104	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
 105};
 106
 107static const u32 hpd_cpt[HPD_NUM_PINS] = {
 108	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
 109	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
 110	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
 111	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
 112	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
 113};
 114
 115static const u32 hpd_spt[HPD_NUM_PINS] = {
 116	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
 117	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
 118	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
 119	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
 120	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
 121};
 122
 123static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
 124	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
 125	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
 126	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
 127	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
 128	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
 129	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
 130};
 131
 132static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
 133	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 134	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
 135	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
 136	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 137	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
 138	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
 139};
 140
 141static const u32 hpd_status_i915[HPD_NUM_PINS] = {
 142	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 143	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
 144	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
 145	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 146	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
 147	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
 148};
 149
 150static const u32 hpd_bxt[HPD_NUM_PINS] = {
 151	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
 152	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
 153	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
 154};
 155
 156static const u32 hpd_gen11[HPD_NUM_PINS] = {
 157	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
 158	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
 159	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
 160	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
 161	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
 162	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
 163};
 164
 165static const u32 hpd_icp[HPD_NUM_PINS] = {
 166	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
 167	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
 168	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
 169	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
 170	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
 171	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
 172	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
 173	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
 174	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
 175};
 176
 177static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
 178	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
 179	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
 180	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
 181	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
 182	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
 183};
 184
 185static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
 186{
 187	struct intel_hotplug *hpd = &dev_priv->display.hotplug;
 188
 189	if (HAS_GMCH(dev_priv)) {
 190		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
 191		    IS_CHERRYVIEW(dev_priv))
 192			hpd->hpd = hpd_status_g4x;
 193		else
 194			hpd->hpd = hpd_status_i915;
 195		return;
 196	}
 197
 198	if (DISPLAY_VER(dev_priv) >= 11)
 199		hpd->hpd = hpd_gen11;
 200	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 201		hpd->hpd = hpd_bxt;
 202	else if (DISPLAY_VER(dev_priv) >= 8)
 203		hpd->hpd = hpd_bdw;
 204	else if (DISPLAY_VER(dev_priv) >= 7)
 205		hpd->hpd = hpd_ivb;
 206	else
 207		hpd->hpd = hpd_ilk;
 208
 209	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
 210	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
 211		return;
 212
 213	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
 214		hpd->pch_hpd = hpd_sde_dg1;
 215	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 216		hpd->pch_hpd = hpd_icp;
 217	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
 218		hpd->pch_hpd = hpd_spt;
 219	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
 220		hpd->pch_hpd = hpd_cpt;
 221	else if (HAS_PCH_IBX(dev_priv))
 222		hpd->pch_hpd = hpd_ibx;
 223	else
 224		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
 225}
 226
 227static void
 228intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
 229{
 230	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
 231
 232	drm_crtc_handle_vblank(&crtc->base);
 233}
 234
 235void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
 236		    i915_reg_t iir, i915_reg_t ier)
 237{
 238	intel_uncore_write(uncore, imr, 0xffffffff);
 239	intel_uncore_posting_read(uncore, imr);
 240
 241	intel_uncore_write(uncore, ier, 0);
 242
 243	/* IIR can theoretically queue up two events. Be paranoid. */
 244	intel_uncore_write(uncore, iir, 0xffffffff);
 245	intel_uncore_posting_read(uncore, iir);
 246	intel_uncore_write(uncore, iir, 0xffffffff);
 247	intel_uncore_posting_read(uncore, iir);
 248}
 249
 250static void gen2_irq_reset(struct intel_uncore *uncore)
 251{
 252	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
 253	intel_uncore_posting_read16(uncore, GEN2_IMR);
 254
 255	intel_uncore_write16(uncore, GEN2_IER, 0);
 256
 257	/* IIR can theoretically queue up two events. Be paranoid. */
 258	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 259	intel_uncore_posting_read16(uncore, GEN2_IIR);
 260	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 261	intel_uncore_posting_read16(uncore, GEN2_IIR);
 262}
 263
 264/*
 265 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 266 */
 267static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
 268{
 269	u32 val = intel_uncore_read(uncore, reg);
 270
 271	if (val == 0)
 272		return;
 273
 274	drm_WARN(&uncore->i915->drm, 1,
 275		 "Interrupt register 0x%x is not zero: 0x%08x\n",
 276		 i915_mmio_reg_offset(reg), val);
 277	intel_uncore_write(uncore, reg, 0xffffffff);
 278	intel_uncore_posting_read(uncore, reg);
 279	intel_uncore_write(uncore, reg, 0xffffffff);
 280	intel_uncore_posting_read(uncore, reg);
 281}
 282
 283static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
 284{
 285	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
 286
 287	if (val == 0)
 288		return;
 289
 290	drm_WARN(&uncore->i915->drm, 1,
 291		 "Interrupt register 0x%x is not zero: 0x%08x\n",
 292		 i915_mmio_reg_offset(GEN2_IIR), val);
 293	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 294	intel_uncore_posting_read16(uncore, GEN2_IIR);
 295	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 296	intel_uncore_posting_read16(uncore, GEN2_IIR);
 297}
 298
 299void gen3_irq_init(struct intel_uncore *uncore,
 300		   i915_reg_t imr, u32 imr_val,
 301		   i915_reg_t ier, u32 ier_val,
 302		   i915_reg_t iir)
 303{
 304	gen3_assert_iir_is_zero(uncore, iir);
 305
 306	intel_uncore_write(uncore, ier, ier_val);
 307	intel_uncore_write(uncore, imr, imr_val);
 308	intel_uncore_posting_read(uncore, imr);
 309}
 310
 311static void gen2_irq_init(struct intel_uncore *uncore,
 312			  u32 imr_val, u32 ier_val)
 313{
 314	gen2_assert_iir_is_zero(uncore);
 315
 316	intel_uncore_write16(uncore, GEN2_IER, ier_val);
 317	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
 318	intel_uncore_posting_read16(uncore, GEN2_IMR);
 319}
 320
 321/* For display hotplug interrupt */
 322static inline void
 323i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
 324				     u32 mask,
 325				     u32 bits)
 326{
 327	lockdep_assert_held(&dev_priv->irq_lock);
 328	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
 329
 330	intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
 331}
 332
 333/**
 334 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 335 * @dev_priv: driver private
 336 * @mask: bits to update
 337 * @bits: bits to enable
 338 * NOTE: the HPD enable bits are modified both inside and outside
 339 * of an interrupt context. To avoid that read-modify-write cycles
 340 * interfer, these bits are protected by a spinlock. Since this
 341 * function is usually not called from a context where the lock is
 342 * held already, this function acquires the lock itself. A non-locking
 343 * version is also available.
 344 */
 345void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 346				   u32 mask,
 347				   u32 bits)
 348{
 349	spin_lock_irq(&dev_priv->irq_lock);
 350	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
 351	spin_unlock_irq(&dev_priv->irq_lock);
 352}
 353
 354/**
 355 * ilk_update_display_irq - update DEIMR
 356 * @dev_priv: driver private
 357 * @interrupt_mask: mask of interrupt bits to update
 358 * @enabled_irq_mask: mask of interrupt bits to enable
 359 */
 360static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 361				   u32 interrupt_mask, u32 enabled_irq_mask)
 362{
 363	u32 new_val;
 364
 365	lockdep_assert_held(&dev_priv->irq_lock);
 366	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 367
 368	new_val = dev_priv->irq_mask;
 369	new_val &= ~interrupt_mask;
 370	new_val |= (~enabled_irq_mask & interrupt_mask);
 371
 372	if (new_val != dev_priv->irq_mask &&
 373	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
 374		dev_priv->irq_mask = new_val;
 375		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
 376		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
 377	}
 378}
 379
 380void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
 381{
 382	ilk_update_display_irq(i915, bits, bits);
 383}
 384
 385void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
 386{
 387	ilk_update_display_irq(i915, bits, 0);
 388}
 389
 390/**
 391 * bdw_update_port_irq - update DE port interrupt
 392 * @dev_priv: driver private
 393 * @interrupt_mask: mask of interrupt bits to update
 394 * @enabled_irq_mask: mask of interrupt bits to enable
 395 */
 396static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 397				u32 interrupt_mask,
 398				u32 enabled_irq_mask)
 399{
 400	u32 new_val;
 401	u32 old_val;
 402
 403	lockdep_assert_held(&dev_priv->irq_lock);
 404
 405	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 406
 407	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 408		return;
 409
 410	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
 411
 412	new_val = old_val;
 413	new_val &= ~interrupt_mask;
 414	new_val |= (~enabled_irq_mask & interrupt_mask);
 415
 416	if (new_val != old_val) {
 417		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
 418		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
 419	}
 420}
 421
 422/**
 423 * bdw_update_pipe_irq - update DE pipe interrupt
 424 * @dev_priv: driver private
 425 * @pipe: pipe whose interrupt to update
 426 * @interrupt_mask: mask of interrupt bits to update
 427 * @enabled_irq_mask: mask of interrupt bits to enable
 428 */
 429static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 430				enum pipe pipe, u32 interrupt_mask,
 431				u32 enabled_irq_mask)
 432{
 433	u32 new_val;
 434
 435	lockdep_assert_held(&dev_priv->irq_lock);
 436
 437	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 438
 439	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 440		return;
 441
 442	new_val = dev_priv->de_irq_mask[pipe];
 443	new_val &= ~interrupt_mask;
 444	new_val |= (~enabled_irq_mask & interrupt_mask);
 445
 446	if (new_val != dev_priv->de_irq_mask[pipe]) {
 447		dev_priv->de_irq_mask[pipe] = new_val;
 448		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
 449		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
 
 
 
 
 450	}
 451}
 452
 453void bdw_enable_pipe_irq(struct drm_i915_private *i915,
 454			 enum pipe pipe, u32 bits)
 455{
 456	bdw_update_pipe_irq(i915, pipe, bits, bits);
 457}
 458
 459void bdw_disable_pipe_irq(struct drm_i915_private *i915,
 460			  enum pipe pipe, u32 bits)
 461{
 462	bdw_update_pipe_irq(i915, pipe, bits, 0);
 463}
 464
 465/**
 466 * ibx_display_interrupt_update - update SDEIMR
 467 * @dev_priv: driver private
 468 * @interrupt_mask: mask of interrupt bits to update
 469 * @enabled_irq_mask: mask of interrupt bits to enable
 
 
 
 470 */
 471static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 472					 u32 interrupt_mask,
 473					 u32 enabled_irq_mask)
 474{
 475	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
 476	sdeimr &= ~interrupt_mask;
 477	sdeimr |= (~enabled_irq_mask & interrupt_mask);
 478
 479	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 480
 481	lockdep_assert_held(&dev_priv->irq_lock);
 482
 483	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 484		return;
 485
 486	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
 487	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
 488}
 489
 490void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
 491{
 492	ibx_display_interrupt_update(i915, bits, bits);
 493}
 494
 495void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
 496{
 497	ibx_display_interrupt_update(i915, bits, 0);
 498}
 499
 500u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
 501			      enum pipe pipe)
 502{
 503	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
 504	u32 enable_mask = status_mask << 16;
 505
 506	lockdep_assert_held(&dev_priv->irq_lock);
 507
 508	if (DISPLAY_VER(dev_priv) < 5)
 509		goto out;
 510
 511	/*
 512	 * On pipe A we don't support the PSR interrupt yet,
 513	 * on pipe B and C the same bit MBZ.
 514	 */
 515	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 516			     status_mask & PIPE_A_PSR_STATUS_VLV))
 517		return 0;
 518	/*
 519	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
 520	 * A the same bit is for perf counters which we don't use either.
 521	 */
 522	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 523			     status_mask & PIPE_B_PSR_STATUS_VLV))
 524		return 0;
 525
 526	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
 527			 SPRITE0_FLIP_DONE_INT_EN_VLV |
 528			 SPRITE1_FLIP_DONE_INT_EN_VLV);
 529	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
 530		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
 531	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
 532		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 533
 534out:
 535	drm_WARN_ONCE(&dev_priv->drm,
 536		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 537		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
 538		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
 539		      pipe_name(pipe), enable_mask, status_mask);
 540
 541	return enable_mask;
 542}
 543
 544void i915_enable_pipestat(struct drm_i915_private *dev_priv,
 545			  enum pipe pipe, u32 status_mask)
 546{
 547	i915_reg_t reg = PIPESTAT(pipe);
 548	u32 enable_mask;
 549
 550	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
 551		      "pipe %c: status_mask=0x%x\n",
 552		      pipe_name(pipe), status_mask);
 553
 554	lockdep_assert_held(&dev_priv->irq_lock);
 555	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 556
 557	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
 558		return;
 559
 560	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 561	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 562
 563	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
 564	intel_uncore_posting_read(&dev_priv->uncore, reg);
 565}
 566
 567void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 568			   enum pipe pipe, u32 status_mask)
 569{
 570	i915_reg_t reg = PIPESTAT(pipe);
 571	u32 enable_mask;
 572
 573	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
 574		      "pipe %c: status_mask=0x%x\n",
 575		      pipe_name(pipe), status_mask);
 576
 577	lockdep_assert_held(&dev_priv->irq_lock);
 578	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 579
 580	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
 581		return;
 582
 583	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 584	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 585
 586	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
 587	intel_uncore_posting_read(&dev_priv->uncore, reg);
 588}
 589
 590static bool i915_has_asle(struct drm_i915_private *dev_priv)
 591{
 592	if (!dev_priv->display.opregion.asle)
 593		return false;
 594
 595	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
 596}
 597
 598/**
 599 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
 600 * @dev_priv: i915 device private
 601 */
 602static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 603{
 604	if (!i915_has_asle(dev_priv))
 605		return;
 606
 607	spin_lock_irq(&dev_priv->irq_lock);
 608
 609	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
 610	if (DISPLAY_VER(dev_priv) >= 4)
 611		i915_enable_pipestat(dev_priv, PIPE_A,
 612				     PIPE_LEGACY_BLC_EVENT_STATUS);
 613
 614	spin_unlock_irq(&dev_priv->irq_lock);
 615}
 616
 617/*
 618 * This timing diagram depicts the video signal in and
 619 * around the vertical blanking period.
 620 *
 621 * Assumptions about the fictitious mode used in this example:
 622 *  vblank_start >= 3
 623 *  vsync_start = vblank_start + 1
 624 *  vsync_end = vblank_start + 2
 625 *  vtotal = vblank_start + 3
 626 *
 627 *           start of vblank:
 628 *           latch double buffered registers
 629 *           increment frame counter (ctg+)
 630 *           generate start of vblank interrupt (gen4+)
 631 *           |
 632 *           |          frame start:
 633 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 634 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 635 *           |          |
 636 *           |          |  start of vsync:
 637 *           |          |  generate vsync interrupt
 638 *           |          |  |
 639 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 640 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 641 * ----va---> <-----------------vb--------------------> <--------va-------------
 642 *       |          |       <----vs----->                     |
 643 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 644 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 645 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 646 *       |          |                                         |
 647 *       last visible pixel                                   first visible pixel
 648 *                  |                                         increment frame counter (gen3/4)
 649 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 650 *
 651 * x  = horizontal active
 652 * _  = horizontal blanking
 653 * hs = horizontal sync
 654 * va = vertical active
 655 * vb = vertical blanking
 656 * vs = vertical sync
 657 * vbs = vblank_start (number)
 658 *
 659 * Summary:
 660 * - most events happen at the start of horizontal sync
 661 * - frame start happens at the start of horizontal blank, 1-4 lines
 662 *   (depending on PIPECONF settings) after the start of vblank
 663 * - gen3/4 pixel and frame counter are synchronized with the start
 664 *   of horizontal active on the first line of vertical active
 665 */
 666
 667/* Called from drm generic code, passed a 'crtc', which
 668 * we use as a pipe index
 669 */
 670u32 i915_get_vblank_counter(struct drm_crtc *crtc)
 671{
 672	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 673	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
 674	const struct drm_display_mode *mode = &vblank->hwmode;
 675	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 676	i915_reg_t high_frame, low_frame;
 677	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 678	unsigned long irqflags;
 679
 680	/*
 681	 * On i965gm TV output the frame counter only works up to
 682	 * the point when we enable the TV encoder. After that the
 683	 * frame counter ceases to work and reads zero. We need a
 684	 * vblank wait before enabling the TV encoder and so we
 685	 * have to enable vblank interrupts while the frame counter
 686	 * is still in a working state. However the core vblank code
 687	 * does not like us returning non-zero frame counter values
 688	 * when we've told it that we don't have a working frame
 689	 * counter. Thus we must stop non-zero values leaking out.
 690	 */
 691	if (!vblank->max_vblank_count)
 692		return 0;
 693
 694	htotal = mode->crtc_htotal;
 695	hsync_start = mode->crtc_hsync_start;
 696	vbl_start = mode->crtc_vblank_start;
 697	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 698		vbl_start = DIV_ROUND_UP(vbl_start, 2);
 699
 700	/* Convert to pixel count */
 701	vbl_start *= htotal;
 702
 703	/* Start of vblank event occurs at start of hsync */
 704	vbl_start -= htotal - hsync_start;
 705
 706	high_frame = PIPEFRAME(pipe);
 707	low_frame = PIPEFRAMEPIXEL(pipe);
 708
 709	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 710
 711	/*
 712	 * High & low register fields aren't synchronized, so make sure
 713	 * we get a low value that's stable across two reads of the high
 714	 * register.
 715	 */
 716	do {
 717		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
 718		low   = intel_de_read_fw(dev_priv, low_frame);
 719		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
 720	} while (high1 != high2);
 721
 722	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 723
 724	high1 >>= PIPE_FRAME_HIGH_SHIFT;
 725	pixel = low & PIPE_PIXEL_MASK;
 726	low >>= PIPE_FRAME_LOW_SHIFT;
 727
 728	/*
 729	 * The frame counter increments at beginning of active.
 730	 * Cook up a vblank counter by also checking the pixel
 731	 * counter against vblank start.
 732	 */
 733	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 734}
 735
 736u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
 737{
 738	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 739	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
 740	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 741
 742	if (!vblank->max_vblank_count)
 
 
 743		return 0;
 
 744
 745	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
 746}
 747
 748static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
 
 749{
 750	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 751	struct drm_vblank_crtc *vblank =
 752		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 753	const struct drm_display_mode *mode = &vblank->hwmode;
 754	u32 htotal = mode->crtc_htotal;
 755	u32 clock = mode->crtc_clock;
 756	u32 scan_prev_time, scan_curr_time, scan_post_time;
 757
 758	/*
 759	 * To avoid the race condition where we might cross into the
 760	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
 761	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
 762	 * during the same frame.
 763	 */
 764	do {
 765		/*
 766		 * This field provides read back of the display
 767		 * pipe frame time stamp. The time stamp value
 768		 * is sampled at every start of vertical blank.
 769		 */
 770		scan_prev_time = intel_de_read_fw(dev_priv,
 771						  PIPE_FRMTMSTMP(crtc->pipe));
 772
 773		/*
 774		 * The TIMESTAMP_CTR register has the current
 775		 * time stamp value.
 776		 */
 777		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
 778
 779		scan_post_time = intel_de_read_fw(dev_priv,
 780						  PIPE_FRMTMSTMP(crtc->pipe));
 781	} while (scan_post_time != scan_prev_time);
 782
 783	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
 784				   clock), 1000 * htotal);
 785}
 786
 787/*
 788 * On certain encoders on certain platforms, pipe
 789 * scanline register will not work to get the scanline,
 790 * since the timings are driven from the PORT or issues
 791 * with scanline register updates.
 792 * This function will use Framestamp and current
 793 * timestamp registers to calculate the scanline.
 794 */
 795static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
 796{
 797	struct drm_vblank_crtc *vblank =
 798		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 799	const struct drm_display_mode *mode = &vblank->hwmode;
 800	u32 vblank_start = mode->crtc_vblank_start;
 801	u32 vtotal = mode->crtc_vtotal;
 802	u32 scanline;
 803
 804	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
 805	scanline = min(scanline, vtotal - 1);
 806	scanline = (scanline + vblank_start) % vtotal;
 807
 808	return scanline;
 809}
 810
 811/*
 812 * intel_de_read_fw(), only for fast reads of display block, no need for
 813 * forcewake etc.
 814 */
 815static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 816{
 817	struct drm_device *dev = crtc->base.dev;
 818	struct drm_i915_private *dev_priv = to_i915(dev);
 819	const struct drm_display_mode *mode;
 820	struct drm_vblank_crtc *vblank;
 821	enum pipe pipe = crtc->pipe;
 822	int position, vtotal;
 823
 824	if (!crtc->active)
 825		return 0;
 826
 827	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 828	mode = &vblank->hwmode;
 829
 830	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
 831		return __intel_get_crtc_scanline_from_timestamp(crtc);
 832
 833	vtotal = mode->crtc_vtotal;
 834	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 835		vtotal /= 2;
 836
 837	position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
 838
 839	/*
 840	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
 841	 * read it just before the start of vblank.  So try it again
 842	 * so we don't accidentally end up spanning a vblank frame
 843	 * increment, causing the pipe_update_end() code to squak at us.
 844	 *
 845	 * The nature of this problem means we can't simply check the ISR
 846	 * bit and return the vblank start value; nor can we use the scanline
 847	 * debug register in the transcoder as it appears to have the same
 848	 * problem.  We may need to extend this to include other platforms,
 849	 * but so far testing only shows the problem on HSW.
 850	 */
 851	if (HAS_DDI(dev_priv) && !position) {
 852		int i, temp;
 853
 854		for (i = 0; i < 100; i++) {
 855			udelay(1);
 856			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
 857			if (temp != position) {
 858				position = temp;
 859				break;
 860			}
 861		}
 862	}
 863
 864	/*
 865	 * See update_scanline_offset() for the details on the
 866	 * scanline_offset adjustment.
 867	 */
 868	return (position + crtc->scanline_offset) % vtotal;
 869}
 870
 871static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
 872				     bool in_vblank_irq,
 873				     int *vpos, int *hpos,
 874				     ktime_t *stime, ktime_t *etime,
 875				     const struct drm_display_mode *mode)
 876{
 877	struct drm_device *dev = _crtc->dev;
 878	struct drm_i915_private *dev_priv = to_i915(dev);
 879	struct intel_crtc *crtc = to_intel_crtc(_crtc);
 880	enum pipe pipe = crtc->pipe;
 881	int position;
 882	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 883	unsigned long irqflags;
 884	bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
 885		IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
 886		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
 887
 888	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
 889		drm_dbg(&dev_priv->drm,
 890			"trying to get scanoutpos for disabled "
 891			"pipe %c\n", pipe_name(pipe));
 892		return false;
 893	}
 894
 895	htotal = mode->crtc_htotal;
 896	hsync_start = mode->crtc_hsync_start;
 897	vtotal = mode->crtc_vtotal;
 898	vbl_start = mode->crtc_vblank_start;
 899	vbl_end = mode->crtc_vblank_end;
 900
 901	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
 902		vbl_start = DIV_ROUND_UP(vbl_start, 2);
 903		vbl_end /= 2;
 904		vtotal /= 2;
 905	}
 906
 907	/*
 908	 * Lock uncore.lock, as we will do multiple timing critical raw
 909	 * register reads, potentially with preemption disabled, so the
 910	 * following code must not block on uncore.lock.
 911	 */
 912	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 913
 914	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 915
 916	/* Get optional system timestamp before query. */
 917	if (stime)
 918		*stime = ktime_get();
 919
 920	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
 921		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
 922
 923		position = __intel_get_crtc_scanline(crtc);
 924
 925		/*
 926		 * Already exiting vblank? If so, shift our position
 927		 * so it looks like we're already apporaching the full
 928		 * vblank end. This should make the generated timestamp
 929		 * more or less match when the active portion will start.
 930		 */
 931		if (position >= vbl_start && scanlines < position)
 932			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
 933	} else if (use_scanline_counter) {
 934		/* No obvious pixelcount register. Only query vertical
 935		 * scanout position from Display scan line register.
 936		 */
 937		position = __intel_get_crtc_scanline(crtc);
 
 
 
 
 
 
 938	} else {
 939		/* Have access to pixelcount since start of frame.
 940		 * We can split this into vertical and horizontal
 941		 * scanout position.
 942		 */
 943		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 944
 945		/* convert to pixel counts */
 946		vbl_start *= htotal;
 947		vbl_end *= htotal;
 948		vtotal *= htotal;
 949
 950		/*
 951		 * In interlaced modes, the pixel counter counts all pixels,
 952		 * so one field will have htotal more pixels. In order to avoid
 953		 * the reported position from jumping backwards when the pixel
 954		 * counter is beyond the length of the shorter field, just
 955		 * clamp the position the length of the shorter field. This
 956		 * matches how the scanline counter based position works since
 957		 * the scanline counter doesn't count the two half lines.
 958		 */
 959		if (position >= vtotal)
 960			position = vtotal - 1;
 961
 962		/*
 963		 * Start of vblank interrupt is triggered at start of hsync,
 964		 * just prior to the first active line of vblank. However we
 965		 * consider lines to start at the leading edge of horizontal
 966		 * active. So, should we get here before we've crossed into
 967		 * the horizontal active of the first line in vblank, we would
 968		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
 969		 * always add htotal-hsync_start to the current pixel position.
 970		 */
 971		position = (position + htotal - hsync_start) % vtotal;
 972	}
 973
 974	/* Get optional system timestamp after query. */
 975	if (etime)
 976		*etime = ktime_get();
 977
 978	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
 979
 980	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 981
 982	/*
 983	 * While in vblank, position will be negative
 984	 * counting up towards 0 at vbl_end. And outside
 985	 * vblank, position will be positive counting
 986	 * up since vbl_end.
 987	 */
 988	if (position >= vbl_start)
 989		position -= vbl_end;
 990	else
 991		position += vtotal - vbl_end;
 992
 993	if (use_scanline_counter) {
 994		*vpos = position;
 995		*hpos = 0;
 996	} else {
 997		*vpos = position / htotal;
 998		*hpos = position - (*vpos * htotal);
 999	}
1000
1001	return true;
1002}
1003
1004bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1005				     ktime_t *vblank_time, bool in_vblank_irq)
1006{
1007	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1008		crtc, max_error, vblank_time, in_vblank_irq,
1009		i915_get_crtc_scanoutpos);
1010}
 
 
 
 
 
 
 
 
 
 
 
1011
1012int intel_get_crtc_scanline(struct intel_crtc *crtc)
1013{
1014	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1015	unsigned long irqflags;
1016	int position;
1017
1018	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1019	position = __intel_get_crtc_scanline(crtc);
1020	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1021
1022	return position;
1023}
1024
1025/**
1026 * ivb_parity_work - Workqueue called when a parity error interrupt
1027 * occurred.
1028 * @work: workqueue struct
1029 *
1030 * Doesn't actually do anything except notify userspace. As a consequence of
1031 * this event, userspace should try to remap the bad rows since statistically
1032 * it is likely the same row is more likely to go bad again.
1033 */
1034static void ivb_parity_work(struct work_struct *work)
1035{
1036	struct drm_i915_private *dev_priv =
1037		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1038	struct intel_gt *gt = to_gt(dev_priv);
1039	u32 error_status, row, bank, subbank;
1040	char *parity_event[6];
1041	u32 misccpctl;
1042	u8 slice = 0;
1043
1044	/* We must turn off DOP level clock gating to access the L3 registers.
1045	 * In order to prevent a get/put style interface, acquire struct mutex
1046	 * any time we access those registers.
1047	 */
1048	mutex_lock(&dev_priv->drm.struct_mutex);
1049
1050	/* If we've screwed up tracking, just let the interrupt fire again */
1051	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1052		goto out;
1053
1054	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
1055				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1056	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1057
1058	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1059		i915_reg_t reg;
1060
1061		slice--;
1062		if (drm_WARN_ON_ONCE(&dev_priv->drm,
1063				     slice >= NUM_L3_SLICES(dev_priv)))
1064			break;
1065
1066		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1067
1068		reg = GEN7_L3CDERRST1(slice);
1069
1070		error_status = intel_uncore_read(&dev_priv->uncore, reg);
1071		row = GEN7_PARITY_ERROR_ROW(error_status);
1072		bank = GEN7_PARITY_ERROR_BANK(error_status);
1073		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1074
1075		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1076		intel_uncore_posting_read(&dev_priv->uncore, reg);
1077
1078		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1079		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1080		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1081		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1082		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1083		parity_event[5] = NULL;
1084
1085		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1086				   KOBJ_CHANGE, parity_event);
1087
1088		drm_dbg(&dev_priv->drm,
1089			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1090			slice, row, bank, subbank);
1091
1092		kfree(parity_event[4]);
1093		kfree(parity_event[3]);
1094		kfree(parity_event[2]);
1095		kfree(parity_event[1]);
1096	}
1097
1098	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1099
1100out:
1101	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1102	spin_lock_irq(gt->irq_lock);
1103	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1104	spin_unlock_irq(gt->irq_lock);
1105
1106	mutex_unlock(&dev_priv->drm.struct_mutex);
1107}
1108
1109static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1110{
1111	switch (pin) {
1112	case HPD_PORT_TC1:
1113	case HPD_PORT_TC2:
1114	case HPD_PORT_TC3:
1115	case HPD_PORT_TC4:
1116	case HPD_PORT_TC5:
1117	case HPD_PORT_TC6:
1118		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1119	default:
1120		return false;
1121	}
1122}
1123
1124static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125{
1126	switch (pin) {
1127	case HPD_PORT_A:
1128		return val & PORTA_HOTPLUG_LONG_DETECT;
1129	case HPD_PORT_B:
1130		return val & PORTB_HOTPLUG_LONG_DETECT;
1131	case HPD_PORT_C:
1132		return val & PORTC_HOTPLUG_LONG_DETECT;
1133	default:
1134		return false;
1135	}
1136}
1137
1138static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1139{
1140	switch (pin) {
1141	case HPD_PORT_A:
1142	case HPD_PORT_B:
1143	case HPD_PORT_C:
1144	case HPD_PORT_D:
1145		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1146	default:
1147		return false;
1148	}
1149}
1150
1151static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1152{
1153	switch (pin) {
1154	case HPD_PORT_TC1:
1155	case HPD_PORT_TC2:
1156	case HPD_PORT_TC3:
1157	case HPD_PORT_TC4:
1158	case HPD_PORT_TC5:
1159	case HPD_PORT_TC6:
1160		return val & ICP_TC_HPD_LONG_DETECT(pin);
1161	default:
1162		return false;
1163	}
1164}
1165
1166static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
 
 
 
1167{
1168	switch (pin) {
1169	case HPD_PORT_E:
1170		return val & PORTE_HOTPLUG_LONG_DETECT;
1171	default:
1172		return false;
1173	}
1174}
1175
1176static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177{
1178	switch (pin) {
1179	case HPD_PORT_A:
1180		return val & PORTA_HOTPLUG_LONG_DETECT;
1181	case HPD_PORT_B:
1182		return val & PORTB_HOTPLUG_LONG_DETECT;
1183	case HPD_PORT_C:
1184		return val & PORTC_HOTPLUG_LONG_DETECT;
1185	case HPD_PORT_D:
1186		return val & PORTD_HOTPLUG_LONG_DETECT;
1187	default:
1188		return false;
1189	}
1190}
1191
1192static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1193{
1194	switch (pin) {
1195	case HPD_PORT_A:
1196		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1197	default:
1198		return false;
1199	}
1200}
1201
1202static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1203{
1204	switch (pin) {
1205	case HPD_PORT_B:
1206		return val & PORTB_HOTPLUG_LONG_DETECT;
1207	case HPD_PORT_C:
1208		return val & PORTC_HOTPLUG_LONG_DETECT;
1209	case HPD_PORT_D:
1210		return val & PORTD_HOTPLUG_LONG_DETECT;
1211	default:
1212		return false;
1213	}
1214}
1215
1216static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1217{
1218	switch (pin) {
1219	case HPD_PORT_B:
1220		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1221	case HPD_PORT_C:
1222		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1223	case HPD_PORT_D:
1224		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1225	default:
1226		return false;
1227	}
1228}
1229
1230/*
1231 * Get a bit mask of pins that have triggered, and which ones may be long.
1232 * This can be called multiple times with the same masks to accumulate
1233 * hotplug detection results from several registers.
1234 *
1235 * Note that the caller is expected to zero out the masks initially.
1236 */
1237static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1238			       u32 *pin_mask, u32 *long_mask,
1239			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1240			       const u32 hpd[HPD_NUM_PINS],
1241			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1242{
1243	enum hpd_pin pin;
1244
1245	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1246
1247	for_each_hpd_pin(pin) {
1248		if ((hpd[pin] & hotplug_trigger) == 0)
1249			continue;
1250
1251		*pin_mask |= BIT(pin);
1252
1253		if (long_pulse_detect(pin, dig_hotplug_reg))
1254			*long_mask |= BIT(pin);
1255	}
1256
1257	drm_dbg(&dev_priv->drm,
1258		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1259		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1260
1261}
1262
1263static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1264				  const u32 hpd[HPD_NUM_PINS])
1265{
 
 
 
 
1266	struct intel_encoder *encoder;
1267	u32 enabled_irqs = 0;
1268
1269	for_each_intel_encoder(&dev_priv->drm, encoder)
1270		if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1271			enabled_irqs |= hpd[encoder->hpd_pin];
1272
1273	return enabled_irqs;
1274}
1275
1276static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1277				  const u32 hpd[HPD_NUM_PINS])
1278{
1279	struct intel_encoder *encoder;
1280	u32 hotplug_irqs = 0;
1281
1282	for_each_intel_encoder(&dev_priv->drm, encoder)
1283		hotplug_irqs |= hpd[encoder->hpd_pin];
1284
1285	return hotplug_irqs;
 
1286}
1287
1288static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1289				     hotplug_enables_func hotplug_enables)
1290{
1291	struct intel_encoder *encoder;
1292	u32 hotplug = 0;
 
1293
1294	for_each_intel_encoder(&i915->drm, encoder)
1295		hotplug |= hotplug_enables(i915, encoder->hpd_pin);
 
 
 
1296
1297	return hotplug;
1298}
 
 
 
 
 
 
 
 
 
 
1299
1300static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1301{
1302	wake_up_all(&dev_priv->display.gmbus.wait_queue);
1303}
1304
1305static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1306{
1307	wake_up_all(&dev_priv->display.gmbus.wait_queue);
1308}
1309
1310#if defined(CONFIG_DEBUG_FS)
1311static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1312					 enum pipe pipe,
1313					 u32 crc0, u32 crc1,
1314					 u32 crc2, u32 crc3,
1315					 u32 crc4)
1316{
1317	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1318	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1319	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1320
1321	trace_intel_pipe_crc(crtc, crcs);
1322
1323	spin_lock(&pipe_crc->lock);
1324	/*
1325	 * For some not yet identified reason, the first CRC is
1326	 * bonkers. So let's just wait for the next vblank and read
1327	 * out the buggy result.
1328	 *
1329	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1330	 * don't trust that one either.
1331	 */
1332	if (pipe_crc->skipped <= 0 ||
1333	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1334		pipe_crc->skipped++;
1335		spin_unlock(&pipe_crc->lock);
1336		return;
1337	}
1338	spin_unlock(&pipe_crc->lock);
1339
1340	drm_crtc_add_crc_entry(&crtc->base, true,
1341				drm_crtc_accurate_vblank_count(&crtc->base),
1342				crcs);
1343}
1344#else
1345static inline void
1346display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1347			     enum pipe pipe,
1348			     u32 crc0, u32 crc1,
1349			     u32 crc2, u32 crc3,
1350			     u32 crc4) {}
1351#endif
1352
1353static void flip_done_handler(struct drm_i915_private *i915,
1354			      enum pipe pipe)
1355{
1356	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1357	struct drm_crtc_state *crtc_state = crtc->base.state;
1358	struct drm_pending_vblank_event *e = crtc_state->event;
1359	struct drm_device *dev = &i915->drm;
1360	unsigned long irqflags;
1361
1362	spin_lock_irqsave(&dev->event_lock, irqflags);
1363
1364	crtc_state->event = NULL;
1365
1366	drm_crtc_send_vblank_event(&crtc->base, e);
1367
1368	spin_unlock_irqrestore(&dev->event_lock, irqflags);
1369}
1370
1371static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1372				     enum pipe pipe)
1373{
1374	display_pipe_crc_irq_handler(dev_priv, pipe,
1375				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1376				     0, 0, 0, 0);
1377}
1378
1379static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1380				     enum pipe pipe)
1381{
1382	display_pipe_crc_irq_handler(dev_priv, pipe,
1383				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1384				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1385				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1386				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1387				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1388}
1389
1390static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1391				      enum pipe pipe)
1392{
1393	u32 res1, res2;
1394
1395	if (DISPLAY_VER(dev_priv) >= 3)
1396		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1397	else
1398		res1 = 0;
1399
1400	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1401		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1402	else
1403		res2 = 0;
1404
1405	display_pipe_crc_irq_handler(dev_priv, pipe,
1406				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1407				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1408				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1409				     res1, res2);
1410}
1411
1412static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1413{
1414	enum pipe pipe;
1415
1416	for_each_pipe(dev_priv, pipe) {
1417		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1418			   PIPESTAT_INT_STATUS_MASK |
1419			   PIPE_FIFO_UNDERRUN_STATUS);
1420
1421		dev_priv->pipestat_irq_mask[pipe] = 0;
1422	}
1423}
1424
1425static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1426				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1427{
1428	enum pipe pipe;
 
 
 
1429
1430	spin_lock(&dev_priv->irq_lock);
 
 
 
 
 
1431
1432	if (!dev_priv->display_irqs_enabled) {
1433		spin_unlock(&dev_priv->irq_lock);
1434		return;
1435	}
1436
1437	for_each_pipe(dev_priv, pipe) {
1438		i915_reg_t reg;
1439		u32 status_mask, enable_mask, iir_bit = 0;
1440
1441		/*
1442		 * PIPESTAT bits get signalled even when the interrupt is
1443		 * disabled with the mask bits, and some of the status bits do
1444		 * not generate interrupts at all (like the underrun bit). Hence
1445		 * we need to be careful that we only handle what we want to
1446		 * handle.
1447		 */
1448
1449		/* fifo underruns are filterered in the underrun handler. */
1450		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1451
1452		switch (pipe) {
1453		default:
1454		case PIPE_A:
1455			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1456			break;
1457		case PIPE_B:
1458			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1459			break;
1460		case PIPE_C:
1461			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1462			break;
1463		}
1464		if (iir & iir_bit)
1465			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1466
1467		if (!status_mask)
1468			continue;
1469
1470		reg = PIPESTAT(pipe);
1471		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1472		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1473
1474		/*
1475		 * Clear the PIPE*STAT regs before the IIR
1476		 *
1477		 * Toggle the enable bits to make sure we get an
1478		 * edge in the ISR pipe event bit if we don't clear
1479		 * all the enabled status bits. Otherwise the edge
1480		 * triggered IIR on i965/g4x wouldn't notice that
1481		 * an interrupt is still pending.
1482		 */
1483		if (pipe_stats[pipe]) {
1484			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1485			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1486		}
1487	}
1488	spin_unlock(&dev_priv->irq_lock);
1489}
1490
1491static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1492				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1493{
1494	enum pipe pipe;
1495
1496	for_each_pipe(dev_priv, pipe) {
1497		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1498			intel_handle_vblank(dev_priv, pipe);
1499
1500		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1501			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1502
1503		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1504			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1505	}
1506}
1507
1508static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1509				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1510{
1511	bool blc_event = false;
1512	enum pipe pipe;
1513
1514	for_each_pipe(dev_priv, pipe) {
1515		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1516			intel_handle_vblank(dev_priv, pipe);
1517
1518		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1519			blc_event = true;
1520
1521		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1522			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1523
1524		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1525			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1526	}
1527
1528	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1529		intel_opregion_asle_intr(dev_priv);
1530}
1531
1532static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1533				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1534{
1535	bool blc_event = false;
1536	enum pipe pipe;
1537
1538	for_each_pipe(dev_priv, pipe) {
1539		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540			intel_handle_vblank(dev_priv, pipe);
1541
1542		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1543			blc_event = true;
1544
1545		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547
1548		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550	}
1551
1552	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1553		intel_opregion_asle_intr(dev_priv);
1554
1555	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1556		gmbus_irq_handler(dev_priv);
1557}
1558
1559static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1560					    u32 pipe_stats[I915_MAX_PIPES])
 
1561{
1562	enum pipe pipe;
1563
1564	for_each_pipe(dev_priv, pipe) {
1565		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1566			intel_handle_vblank(dev_priv, pipe);
 
 
 
 
1567
1568		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1569			flip_done_handler(dev_priv, pipe);
1570
1571		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1572			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1573
1574		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1575			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1576	}
1577
1578	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1579		gmbus_irq_handler(dev_priv);
1580}
1581
1582static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 
1583{
1584	u32 hotplug_status = 0, hotplug_status_mask;
1585	int i;
1586
1587	if (IS_G4X(dev_priv) ||
1588	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1589		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1590			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1591	else
1592		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1593
1594	/*
1595	 * We absolutely have to clear all the pending interrupt
1596	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1597	 * interrupt bit won't have an edge, and the i965/g4x
1598	 * edge triggered IIR will not notice that an interrupt
1599	 * is still pending. We can't use PORT_HOTPLUG_EN to
1600	 * guarantee the edge as the act of toggling the enable
1601	 * bits can itself generate a new hotplug interrupt :(
1602	 */
1603	for (i = 0; i < 10; i++) {
1604		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1605
1606		if (tmp == 0)
1607			return hotplug_status;
 
 
 
1608
1609		hotplug_status |= tmp;
1610		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1611	}
1612
1613	drm_WARN_ONCE(&dev_priv->drm, 1,
1614		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1615		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1616
1617	return hotplug_status;
1618}
1619
1620static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1621				 u32 hotplug_status)
1622{
1623	u32 pin_mask = 0, long_mask = 0;
1624	u32 hotplug_trigger;
1625
1626	if (IS_G4X(dev_priv) ||
1627	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1628		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1629	else
1630		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1631
1632	if (hotplug_trigger) {
1633		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1634				   hotplug_trigger, hotplug_trigger,
1635				   dev_priv->display.hotplug.hpd,
1636				   i9xx_port_hotplug_long_detect);
1637
1638		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1639	}
1640
1641	if ((IS_G4X(dev_priv) ||
1642	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1643	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1644		dp_aux_irq_handler(dev_priv);
1645}
1646
1647static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1648{
1649	struct drm_i915_private *dev_priv = arg;
1650	irqreturn_t ret = IRQ_NONE;
1651
1652	if (!intel_irqs_enabled(dev_priv))
1653		return IRQ_NONE;
1654
1655	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1656	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1657
1658	do {
1659		u32 iir, gt_iir, pm_iir;
1660		u32 pipe_stats[I915_MAX_PIPES] = {};
1661		u32 hotplug_status = 0;
1662		u32 ier = 0;
1663
1664		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1665		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1666		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1667
1668		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1669			break;
1670
1671		ret = IRQ_HANDLED;
1672
1673		/*
1674		 * Theory on interrupt generation, based on empirical evidence:
1675		 *
1676		 * x = ((VLV_IIR & VLV_IER) ||
1677		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1678		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1679		 *
1680		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1681		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1682		 * guarantee the CPU interrupt will be raised again even if we
1683		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1684		 * bits this time around.
1685		 */
1686		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1687		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1688
1689		if (gt_iir)
1690			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1691		if (pm_iir)
1692			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1693
1694		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1695			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1696
1697		/* Call regardless, as some status bits might not be
1698		 * signalled in iir */
1699		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1700
1701		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1702			   I915_LPE_PIPE_B_INTERRUPT))
1703			intel_lpe_audio_irq_handler(dev_priv);
1704
1705		/*
1706		 * VLV_IIR is single buffered, and reflects the level
1707		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1708		 */
1709		if (iir)
1710			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
 
 
 
 
 
1711
1712		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1713		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
 
 
 
 
 
 
 
1714
1715		if (gt_iir)
1716			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1717		if (pm_iir)
1718			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1719
1720		if (hotplug_status)
1721			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1722
1723		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1724	} while (0);
 
 
 
1725
1726	pmu_irq_stats(dev_priv, ret);
 
 
 
 
1727
1728	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
1729
1730	return ret;
1731}
1732
1733static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1734{
1735	struct drm_i915_private *dev_priv = arg;
1736	irqreturn_t ret = IRQ_NONE;
1737
1738	if (!intel_irqs_enabled(dev_priv))
1739		return IRQ_NONE;
1740
1741	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1742	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1743
1744	do {
1745		u32 master_ctl, iir;
1746		u32 pipe_stats[I915_MAX_PIPES] = {};
1747		u32 hotplug_status = 0;
1748		u32 ier = 0;
1749
1750		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1751		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1752
1753		if (master_ctl == 0 && iir == 0)
1754			break;
1755
1756		ret = IRQ_HANDLED;
1757
1758		/*
1759		 * Theory on interrupt generation, based on empirical evidence:
1760		 *
1761		 * x = ((VLV_IIR & VLV_IER) ||
1762		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1763		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1764		 *
1765		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1766		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1767		 * guarantee the CPU interrupt will be raised again even if we
1768		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1769		 * bits this time around.
1770		 */
1771		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1772		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1773
1774		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1775
1776		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1777			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1778
1779		/* Call regardless, as some status bits might not be
1780		 * signalled in iir */
1781		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1782
1783		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1784			   I915_LPE_PIPE_B_INTERRUPT |
1785			   I915_LPE_PIPE_C_INTERRUPT))
1786			intel_lpe_audio_irq_handler(dev_priv);
1787
1788		/*
1789		 * VLV_IIR is single buffered, and reflects the level
1790		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1791		 */
1792		if (iir)
1793			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1794
1795		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1796		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1797
1798		if (hotplug_status)
1799			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1800
1801		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1802	} while (0);
1803
1804	pmu_irq_stats(dev_priv, ret);
1805
1806	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1807
 
1808	return ret;
1809}
1810
1811static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1812				u32 hotplug_trigger)
1813{
1814	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
1815
1816	/*
1817	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1818	 * unless we touch the hotplug register, even if hotplug_trigger is
1819	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1820	 * errors.
1821	 */
1822	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1823	if (!hotplug_trigger) {
1824		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1825			PORTD_HOTPLUG_STATUS_MASK |
1826			PORTC_HOTPLUG_STATUS_MASK |
1827			PORTB_HOTPLUG_STATUS_MASK;
1828		dig_hotplug_reg &= ~mask;
1829	}
1830
1831	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1832	if (!hotplug_trigger)
1833		return;
1834
1835	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1836			   hotplug_trigger, dig_hotplug_reg,
1837			   dev_priv->display.hotplug.pch_hpd,
1838			   pch_port_hotplug_long_detect);
1839
1840	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1841}
1842
1843static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1844{
1845	enum pipe pipe;
1846	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1847
1848	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1849
1850	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1851		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1852			       SDE_AUDIO_POWER_SHIFT);
1853		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1854			port_name(port));
1855	}
1856
1857	if (pch_iir & SDE_AUX_MASK)
1858		dp_aux_irq_handler(dev_priv);
1859
1860	if (pch_iir & SDE_GMBUS)
1861		gmbus_irq_handler(dev_priv);
1862
1863	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1864		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1865
1866	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1867		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1868
1869	if (pch_iir & SDE_POISON)
1870		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1871
1872	if (pch_iir & SDE_FDI_MASK) {
1873		for_each_pipe(dev_priv, pipe)
1874			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1875				pipe_name(pipe),
1876				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1877	}
1878
1879	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1880		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1881
1882	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1883		drm_dbg(&dev_priv->drm,
1884			"PCH transcoder CRC error interrupt\n");
1885
 
 
1886	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1887		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1888
1889	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1890		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1891}
1892
1893static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1894{
1895	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1896	enum pipe pipe;
1897
1898	if (err_int & ERR_INT_POISON)
1899		drm_err(&dev_priv->drm, "Poison interrupt\n");
1900
1901	for_each_pipe(dev_priv, pipe) {
1902		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1903			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1904
1905		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1906			if (IS_IVYBRIDGE(dev_priv))
1907				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1908			else
1909				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1910		}
1911	}
1912
1913	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1914}
1915
1916static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1917{
1918	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1919	enum pipe pipe;
1920
1921	if (serr_int & SERR_INT_POISON)
1922		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1923
1924	for_each_pipe(dev_priv, pipe)
1925		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1926			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1927
1928	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1929}
1930
1931static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1932{
1933	enum pipe pipe;
1934	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1935
1936	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1937
1938	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1939		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1940			       SDE_AUDIO_POWER_SHIFT_CPT);
1941		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1942			port_name(port));
1943	}
1944
1945	if (pch_iir & SDE_AUX_MASK_CPT)
1946		dp_aux_irq_handler(dev_priv);
1947
1948	if (pch_iir & SDE_GMBUS_CPT)
1949		gmbus_irq_handler(dev_priv);
1950
1951	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1952		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1953
1954	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1955		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1956
1957	if (pch_iir & SDE_FDI_MASK_CPT) {
1958		for_each_pipe(dev_priv, pipe)
1959			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1960				pipe_name(pipe),
1961				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1962	}
1963
1964	if (pch_iir & SDE_ERROR_CPT)
1965		cpt_serr_int_handler(dev_priv);
 
 
 
1966}
1967
1968static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1969{
1970	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1971	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1972	u32 pin_mask = 0, long_mask = 0;
 
 
1973
1974	if (ddi_hotplug_trigger) {
1975		u32 dig_hotplug_reg;
1976
1977		/* Locking due to DSI native GPIO sequences */
1978		spin_lock(&dev_priv->irq_lock);
1979		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1980		spin_unlock(&dev_priv->irq_lock);
1981
1982		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1983				   ddi_hotplug_trigger, dig_hotplug_reg,
1984				   dev_priv->display.hotplug.pch_hpd,
1985				   icp_ddi_port_hotplug_long_detect);
 
1986	}
1987
1988	if (tc_hotplug_trigger) {
1989		u32 dig_hotplug_reg;
 
 
1990
1991		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
 
 
 
 
 
 
 
1992
1993		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1994				   tc_hotplug_trigger, dig_hotplug_reg,
1995				   dev_priv->display.hotplug.pch_hpd,
1996				   icp_tc_port_hotplug_long_detect);
1997	}
 
 
1998
1999	if (pin_mask)
2000		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 
2001
2002	if (pch_iir & SDE_GMBUS_ICP)
2003		gmbus_irq_handler(dev_priv);
2004}
2005
2006static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2007{
2008	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2009		~SDE_PORTE_HOTPLUG_SPT;
2010	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2011	u32 pin_mask = 0, long_mask = 0;
2012
2013	if (hotplug_trigger) {
2014		u32 dig_hotplug_reg;
2015
2016		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2017
2018		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2019				   hotplug_trigger, dig_hotplug_reg,
2020				   dev_priv->display.hotplug.pch_hpd,
2021				   spt_port_hotplug_long_detect);
2022	}
2023
2024	if (hotplug2_trigger) {
2025		u32 dig_hotplug_reg;
2026
2027		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
2028
2029		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2030				   hotplug2_trigger, dig_hotplug_reg,
2031				   dev_priv->display.hotplug.pch_hpd,
2032				   spt_port_hotplug2_long_detect);
2033	}
2034
2035	if (pin_mask)
2036		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2037
2038	if (pch_iir & SDE_GMBUS_CPT)
2039		gmbus_irq_handler(dev_priv);
2040}
2041
2042static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2043				u32 hotplug_trigger)
 
2044{
2045	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
 
 
 
2046
2047	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
 
 
 
 
 
 
2048
2049	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2050			   hotplug_trigger, dig_hotplug_reg,
2051			   dev_priv->display.hotplug.hpd,
2052			   ilk_port_hotplug_long_detect);
2053
2054	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2055}
 
 
 
 
 
 
 
 
 
 
 
2056
2057static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2058				    u32 de_iir)
2059{
2060	enum pipe pipe;
2061	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2062
2063	if (hotplug_trigger)
2064		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2065
2066	if (de_iir & DE_AUX_CHANNEL_A)
2067		dp_aux_irq_handler(dev_priv);
 
 
2068
2069	if (de_iir & DE_GSE)
2070		intel_opregion_asle_intr(dev_priv);
2071
2072	if (de_iir & DE_POISON)
2073		drm_err(&dev_priv->drm, "Poison interrupt\n");
 
 
2074
2075	for_each_pipe(dev_priv, pipe) {
2076		if (de_iir & DE_PIPE_VBLANK(pipe))
2077			intel_handle_vblank(dev_priv, pipe);
 
2078
2079		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2080			flip_done_handler(dev_priv, pipe);
2081
2082		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2083			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2084
2085		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2086			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2087	}
2088
2089	/* check event from PCH */
2090	if (de_iir & DE_PCH_EVENT) {
2091		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2092
2093		if (HAS_PCH_CPT(dev_priv))
2094			cpt_irq_handler(dev_priv, pch_iir);
2095		else
2096			ibx_irq_handler(dev_priv, pch_iir);
 
2097
2098		/* should clear PCH hotplug event before clear CPU irq */
2099		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
 
2100	}
2101
2102	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2103		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2104}
2105
2106static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2107				    u32 de_iir)
2108{
2109	enum pipe pipe;
2110	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2111
2112	if (hotplug_trigger)
2113		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
 
2114
2115	if (de_iir & DE_ERR_INT_IVB)
2116		ivb_err_int_handler(dev_priv);
2117
2118	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2119		dp_aux_irq_handler(dev_priv);
2120
2121	if (de_iir & DE_GSE_IVB)
2122		intel_opregion_asle_intr(dev_priv);
2123
2124	for_each_pipe(dev_priv, pipe) {
2125		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2126			intel_handle_vblank(dev_priv, pipe);
2127
2128		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2129			flip_done_handler(dev_priv, pipe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2130	}
 
2131
2132	/* check event from PCH */
2133	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2134		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
 
 
 
 
 
2135
2136		cpt_irq_handler(dev_priv, pch_iir);
 
2137
2138		/* clear PCH hotplug event before clear CPU irq */
2139		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2140	}
2141}
2142
2143/*
2144 * To handle irqs with the minimum potential races with fresh interrupts, we:
2145 * 1 - Disable Master Interrupt Control.
2146 * 2 - Find the source(s) of the interrupt.
2147 * 3 - Clear the Interrupt Identity bits (IIR).
2148 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2149 * 5 - Re-enable Master Interrupt Control.
2150 */
2151static irqreturn_t ilk_irq_handler(int irq, void *arg)
2152{
2153	struct drm_i915_private *i915 = arg;
2154	void __iomem * const regs = i915->uncore.regs;
2155	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2156	irqreturn_t ret = IRQ_NONE;
2157
2158	if (unlikely(!intel_irqs_enabled(i915)))
2159		return IRQ_NONE;
 
 
2160
2161	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2162	disable_rpm_wakeref_asserts(&i915->runtime_pm);
 
2163
2164	/* disable master interrupt before clearing iir  */
2165	de_ier = raw_reg_read(regs, DEIER);
2166	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
2167
2168	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2169	 * interrupts will will be stored on its back queue, and then we'll be
2170	 * able to process them after we restore SDEIER (as soon as we restore
2171	 * it, we'll get an interrupt if SDEIIR still has something to process
2172	 * due to its back queue). */
2173	if (!HAS_PCH_NOP(i915)) {
2174		sde_ier = raw_reg_read(regs, SDEIER);
2175		raw_reg_write(regs, SDEIER, 0);
2176	}
2177
2178	/* Find, clear, then process each source of interrupt */
 
 
 
 
 
2179
2180	gt_iir = raw_reg_read(regs, GTIIR);
2181	if (gt_iir) {
2182		raw_reg_write(regs, GTIIR, gt_iir);
2183		if (GRAPHICS_VER(i915) >= 6)
2184			gen6_gt_irq_handler(to_gt(i915), gt_iir);
2185		else
2186			gen5_gt_irq_handler(to_gt(i915), gt_iir);
2187		ret = IRQ_HANDLED;
2188	}
2189
2190	de_iir = raw_reg_read(regs, DEIIR);
2191	if (de_iir) {
2192		raw_reg_write(regs, DEIIR, de_iir);
2193		if (DISPLAY_VER(i915) >= 7)
2194			ivb_display_irq_handler(i915, de_iir);
2195		else
2196			ilk_display_irq_handler(i915, de_iir);
2197		ret = IRQ_HANDLED;
2198	}
2199
2200	if (GRAPHICS_VER(i915) >= 6) {
2201		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2202		if (pm_iir) {
2203			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2204			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2205			ret = IRQ_HANDLED;
2206		}
2207	}
2208
2209	raw_reg_write(regs, DEIER, de_ier);
2210	if (sde_ier)
2211		raw_reg_write(regs, SDEIER, sde_ier);
2212
2213	pmu_irq_stats(i915, ret);
 
 
 
2214
2215	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2216	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2217
2218	return ret;
 
 
 
 
2219}
2220
2221static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2222				u32 hotplug_trigger)
2223{
2224	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2225
2226	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
 
2227
2228	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2229			   hotplug_trigger, dig_hotplug_reg,
2230			   dev_priv->display.hotplug.hpd,
2231			   bxt_port_hotplug_long_detect);
2232
2233	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2234}
2235
2236static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 
2237{
2238	u32 pin_mask = 0, long_mask = 0;
2239	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2240	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2241
2242	if (trigger_tc) {
2243		u32 dig_hotplug_reg;
2244
2245		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
2246
2247		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2248				   trigger_tc, dig_hotplug_reg,
2249				   dev_priv->display.hotplug.hpd,
2250				   gen11_port_hotplug_long_detect);
2251	}
2252
2253	if (trigger_tbt) {
2254		u32 dig_hotplug_reg;
2255
2256		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
2257
2258		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2259				   trigger_tbt, dig_hotplug_reg,
2260				   dev_priv->display.hotplug.hpd,
2261				   gen11_port_hotplug_long_detect);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2262	}
2263
2264	if (pin_mask)
2265		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2266	else
2267		drm_err(&dev_priv->drm,
2268			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2269}
2270
2271static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
 
2272{
2273	u32 mask;
 
2274
2275	if (DISPLAY_VER(dev_priv) >= 13)
2276		return TGL_DE_PORT_AUX_DDIA |
2277			TGL_DE_PORT_AUX_DDIB |
2278			TGL_DE_PORT_AUX_DDIC |
2279			XELPD_DE_PORT_AUX_DDID |
2280			XELPD_DE_PORT_AUX_DDIE |
2281			TGL_DE_PORT_AUX_USBC1 |
2282			TGL_DE_PORT_AUX_USBC2 |
2283			TGL_DE_PORT_AUX_USBC3 |
2284			TGL_DE_PORT_AUX_USBC4;
2285	else if (DISPLAY_VER(dev_priv) >= 12)
2286		return TGL_DE_PORT_AUX_DDIA |
2287			TGL_DE_PORT_AUX_DDIB |
2288			TGL_DE_PORT_AUX_DDIC |
2289			TGL_DE_PORT_AUX_USBC1 |
2290			TGL_DE_PORT_AUX_USBC2 |
2291			TGL_DE_PORT_AUX_USBC3 |
2292			TGL_DE_PORT_AUX_USBC4 |
2293			TGL_DE_PORT_AUX_USBC5 |
2294			TGL_DE_PORT_AUX_USBC6;
2295
2296
2297	mask = GEN8_AUX_CHANNEL_A;
2298	if (DISPLAY_VER(dev_priv) >= 9)
2299		mask |= GEN9_AUX_CHANNEL_B |
2300			GEN9_AUX_CHANNEL_C |
2301			GEN9_AUX_CHANNEL_D;
2302
2303	if (DISPLAY_VER(dev_priv) == 11) {
2304		mask |= ICL_AUX_CHANNEL_F;
2305		mask |= ICL_AUX_CHANNEL_E;
2306	}
2307
2308	return mask;
2309}
2310
2311static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2312{
2313	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2314		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2315	else if (DISPLAY_VER(dev_priv) >= 11)
2316		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2317	else if (DISPLAY_VER(dev_priv) >= 9)
2318		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2319	else
2320		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2321}
2322
2323static void
2324gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2325{
2326	bool found = false;
2327
2328	if (iir & GEN8_DE_MISC_GSE) {
2329		intel_opregion_asle_intr(dev_priv);
2330		found = true;
2331	}
2332
2333	if (iir & GEN8_DE_EDP_PSR) {
2334		struct intel_encoder *encoder;
2335		u32 psr_iir;
2336		i915_reg_t iir_reg;
2337
2338		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2339			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2340
2341			if (DISPLAY_VER(dev_priv) >= 12)
2342				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2343			else
2344				iir_reg = EDP_PSR_IIR;
2345
2346			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
2347
2348			if (psr_iir)
2349				found = true;
2350
2351			intel_psr_irq_handler(intel_dp, psr_iir);
2352
2353			/* prior GEN12 only have one EDP PSR */
2354			if (DISPLAY_VER(dev_priv) < 12)
2355				break;
2356		}
2357	}
2358
2359	if (!found)
2360		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2361}
2362
2363static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2364					   u32 te_trigger)
2365{
2366	enum pipe pipe = INVALID_PIPE;
2367	enum transcoder dsi_trans;
2368	enum port port;
2369	u32 val, tmp;
2370
2371	/*
2372	 * Incase of dual link, TE comes from DSI_1
2373	 * this is to check if dual link is enabled
2374	 */
2375	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2376	val &= PORT_SYNC_MODE_ENABLE;
2377
2378	/*
2379	 * if dual link is enabled, then read DSI_0
2380	 * transcoder registers
2381	 */
2382	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2383						  PORT_A : PORT_B;
2384	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2385
2386	/* Check if DSI configured in command mode */
2387	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2388	val = val & OP_MODE_MASK;
2389
2390	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2391		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2392		return;
2393	}
2394
2395	/* Get PIPE for handling VBLANK event */
2396	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2397	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2398	case TRANS_DDI_EDP_INPUT_A_ON:
2399		pipe = PIPE_A;
 
2400		break;
2401	case TRANS_DDI_EDP_INPUT_B_ONOFF:
2402		pipe = PIPE_B;
 
 
2403		break;
2404	case TRANS_DDI_EDP_INPUT_C_ONOFF:
2405		pipe = PIPE_C;
 
 
 
 
 
2406		break;
2407	default:
2408		drm_err(&dev_priv->drm, "Invalid PIPE\n");
2409		return;
2410	}
 
2411
2412	intel_handle_vblank(dev_priv, pipe);
 
 
 
 
 
 
 
 
 
 
 
 
 
2413
2414	/* clear TE in dsi IIR */
2415	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2416	tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2417}
2418
2419static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2420{
2421	if (DISPLAY_VER(i915) >= 9)
2422		return GEN9_PIPE_PLANE1_FLIP_DONE;
2423	else
2424		return GEN8_PIPE_PRIMARY_FLIP_DONE;
2425}
2426
2427u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2428{
2429	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
 
 
2430
2431	if (DISPLAY_VER(dev_priv) >= 13)
2432		mask |= XELPD_PIPE_SOFT_UNDERRUN |
2433			XELPD_PIPE_HARD_UNDERRUN;
2434
2435	return mask;
2436}
2437
2438static irqreturn_t
2439gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 
2440{
2441	irqreturn_t ret = IRQ_NONE;
2442	u32 iir;
2443	enum pipe pipe;
2444
2445	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2446
2447	if (master_ctl & GEN8_DE_MISC_IRQ) {
2448		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2449		if (iir) {
2450			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2451			ret = IRQ_HANDLED;
2452			gen8_de_misc_irq_handler(dev_priv, iir);
2453		} else {
2454			drm_err(&dev_priv->drm,
2455				"The master control interrupt lied (DE MISC)!\n");
2456		}
2457	}
2458
2459	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2460		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2461		if (iir) {
2462			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2463			ret = IRQ_HANDLED;
2464			gen11_hpd_irq_handler(dev_priv, iir);
2465		} else {
2466			drm_err(&dev_priv->drm,
2467				"The master control interrupt lied, (DE HPD)!\n");
2468		}
2469	}
2470
2471	if (master_ctl & GEN8_DE_PORT_IRQ) {
2472		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2473		if (iir) {
2474			bool found = false;
2475
2476			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2477			ret = IRQ_HANDLED;
2478
2479			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2480				dp_aux_irq_handler(dev_priv);
2481				found = true;
2482			}
2483
2484			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2485				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2486
2487				if (hotplug_trigger) {
2488					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2489					found = true;
2490				}
2491			} else if (IS_BROADWELL(dev_priv)) {
2492				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2493
2494				if (hotplug_trigger) {
2495					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2496					found = true;
2497				}
2498			}
2499
2500			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2501			    (iir & BXT_DE_PORT_GMBUS)) {
2502				gmbus_irq_handler(dev_priv);
2503				found = true;
2504			}
2505
2506			if (DISPLAY_VER(dev_priv) >= 11) {
2507				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2508
2509				if (te_trigger) {
2510					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2511					found = true;
2512				}
2513			}
2514
2515			if (!found)
2516				drm_err(&dev_priv->drm,
2517					"Unexpected DE Port interrupt\n");
2518		}
2519		else
2520			drm_err(&dev_priv->drm,
2521				"The master control interrupt lied (DE PORT)!\n");
2522	}
2523
2524	for_each_pipe(dev_priv, pipe) {
2525		u32 fault_errors;
2526
2527		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2528			continue;
2529
2530		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2531		if (!iir) {
2532			drm_err(&dev_priv->drm,
2533				"The master control interrupt lied (DE PIPE)!\n");
2534			continue;
2535		}
2536
2537		ret = IRQ_HANDLED;
2538		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2539
2540		if (iir & GEN8_PIPE_VBLANK)
2541			intel_handle_vblank(dev_priv, pipe);
2542
2543		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2544			flip_done_handler(dev_priv, pipe);
2545
2546		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2547			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2548
2549		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2550			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2551
2552		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2553		if (fault_errors)
2554			drm_err(&dev_priv->drm,
2555				"Fault errors on pipe %c: 0x%08x\n",
2556				pipe_name(pipe),
2557				fault_errors);
2558	}
2559
2560	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2561	    master_ctl & GEN8_DE_PCH_IRQ) {
2562		/*
2563		 * FIXME(BDW): Assume for now that the new interrupt handling
2564		 * scheme also closed the SDE interrupt handling race we've seen
2565		 * on older pch-split platforms. But this needs testing.
2566		 */
2567		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2568		if (iir) {
2569			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2570			ret = IRQ_HANDLED;
2571
2572			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2573				icp_irq_handler(dev_priv, iir);
2574			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2575				spt_irq_handler(dev_priv, iir);
2576			else
2577				cpt_irq_handler(dev_priv, iir);
2578		} else {
2579			/*
2580			 * Like on previous PCH there seems to be something
2581			 * fishy going on with forwarding PCH interrupts.
2582			 */
2583			drm_dbg(&dev_priv->drm,
2584				"The master control interrupt lied (SDE)!\n");
2585		}
2586	}
2587
2588	return ret;
2589}
2590
2591static inline u32 gen8_master_intr_disable(void __iomem * const regs)
 
 
 
 
 
 
 
 
 
2592{
2593	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
 
 
 
 
2594
2595	/*
2596	 * Now with master disabled, get a sample of level indications
2597	 * for this interrupt. Indications will be cleared on related acks.
2598	 * New indications can and will light up during processing,
2599	 * and will generate new interrupt after enabling master.
2600	 */
2601	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2602}
2603
2604static inline void gen8_master_intr_enable(void __iomem * const regs)
2605{
2606	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2607}
 
 
2608
2609static irqreturn_t gen8_irq_handler(int irq, void *arg)
2610{
2611	struct drm_i915_private *dev_priv = arg;
2612	void __iomem * const regs = dev_priv->uncore.regs;
2613	u32 master_ctl;
2614
2615	if (!intel_irqs_enabled(dev_priv))
2616		return IRQ_NONE;
 
2617
2618	master_ctl = gen8_master_intr_disable(regs);
2619	if (!master_ctl) {
2620		gen8_master_intr_enable(regs);
2621		return IRQ_NONE;
2622	}
 
 
 
2623
2624	/* Find, queue (onto bottom-halves), then clear each source */
2625	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2626
2627	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2628	if (master_ctl & ~GEN8_GT_IRQS) {
2629		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2630		gen8_de_irq_handler(dev_priv, master_ctl);
2631		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2632	}
2633
2634	gen8_master_intr_enable(regs);
 
2635
2636	pmu_irq_stats(dev_priv, IRQ_HANDLED);
2637
2638	return IRQ_HANDLED;
2639}
2640
2641static u32
2642gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2643{
2644	void __iomem * const regs = i915->uncore.regs;
2645	u32 iir;
 
 
 
2646
2647	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2648		return 0;
 
 
 
 
 
 
 
2649
2650	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2651	if (likely(iir))
2652		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
 
 
2653
2654	return iir;
2655}
 
 
 
2656
2657static void
2658gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2659{
2660	if (iir & GEN11_GU_MISC_GSE)
2661		intel_opregion_asle_intr(i915);
2662}
2663
2664static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2665{
2666	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2667
2668	/*
2669	 * Now with master disabled, get a sample of level indications
2670	 * for this interrupt. Indications will be cleared on related acks.
2671	 * New indications can and will light up during processing,
2672	 * and will generate new interrupt after enabling master.
2673	 */
2674	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2675}
2676
2677static inline void gen11_master_intr_enable(void __iomem * const regs)
2678{
2679	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2680}
2681
2682static void
2683gen11_display_irq_handler(struct drm_i915_private *i915)
2684{
2685	void __iomem * const regs = i915->uncore.regs;
2686	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
 
2687
2688	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2689	/*
2690	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2691	 * for the display related bits.
2692	 */
2693	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2694	gen8_de_irq_handler(i915, disp_ctl);
2695	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2696		      GEN11_DISPLAY_IRQ_ENABLE);
2697
2698	enable_rpm_wakeref_asserts(&i915->runtime_pm);
 
2699}
 
 
 
2700
2701static irqreturn_t gen11_irq_handler(int irq, void *arg)
2702{
2703	struct drm_i915_private *i915 = arg;
2704	void __iomem * const regs = i915->uncore.regs;
2705	struct intel_gt *gt = to_gt(i915);
2706	u32 master_ctl;
2707	u32 gu_misc_iir;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2708
2709	if (!intel_irqs_enabled(i915))
2710		return IRQ_NONE;
2711
2712	master_ctl = gen11_master_intr_disable(regs);
2713	if (!master_ctl) {
2714		gen11_master_intr_enable(regs);
2715		return IRQ_NONE;
 
2716	}
2717
2718	/* Find, queue (onto bottom-halves), then clear each source */
2719	gen11_gt_irq_handler(gt, master_ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2720
2721	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2722	if (master_ctl & GEN11_DISPLAY_IRQ)
2723		gen11_display_irq_handler(i915);
 
 
 
 
 
 
 
 
2724
2725	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2726
2727	gen11_master_intr_enable(regs);
2728
2729	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2730
2731	pmu_irq_stats(i915, IRQ_HANDLED);
2732
2733	return IRQ_HANDLED;
 
 
 
2734}
2735
2736static inline u32 dg1_master_intr_disable(void __iomem * const regs)
 
 
 
 
 
 
 
 
 
 
2737{
2738	u32 val;
 
 
2739
2740	/* First disable interrupts */
2741	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2742
2743	/* Get the indication levels and ack the master unit */
2744	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2745	if (unlikely(!val))
2746		return 0;
2747
2748	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
 
 
 
 
 
2749
2750	return val;
2751}
2752
2753static inline void dg1_master_intr_enable(void __iomem * const regs)
2754{
2755	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2756}
 
 
 
 
 
2757
2758static irqreturn_t dg1_irq_handler(int irq, void *arg)
2759{
2760	struct drm_i915_private * const i915 = arg;
2761	struct intel_gt *gt = to_gt(i915);
2762	void __iomem * const regs = gt->uncore->regs;
2763	u32 master_tile_ctl, master_ctl;
2764	u32 gu_misc_iir;
2765
2766	if (!intel_irqs_enabled(i915))
2767		return IRQ_NONE;
2768
2769	master_tile_ctl = dg1_master_intr_disable(regs);
2770	if (!master_tile_ctl) {
2771		dg1_master_intr_enable(regs);
2772		return IRQ_NONE;
2773	}
2774
2775	/* FIXME: we only support tile 0 for now. */
2776	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2777		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2778		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
 
 
2779	} else {
2780		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2781			master_tile_ctl);
2782		dg1_master_intr_enable(regs);
2783		return IRQ_NONE;
2784	}
2785
2786	gen11_gt_irq_handler(gt, master_ctl);
2787
2788	if (master_ctl & GEN11_DISPLAY_IRQ)
2789		gen11_display_irq_handler(i915);
2790
2791	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2792
2793	dg1_master_intr_enable(regs);
2794
2795	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2796
2797	pmu_irq_stats(i915, IRQ_HANDLED);
2798
2799	return IRQ_HANDLED;
2800}
2801
2802/* Called from drm generic code, passed 'crtc' which
2803 * we use as a pipe index
2804 */
2805int i8xx_enable_vblank(struct drm_crtc *crtc)
2806{
2807	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2808	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2809	unsigned long irqflags;
2810
 
 
 
2811	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2812	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
 
 
 
 
 
 
 
 
 
2813	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2814
2815	return 0;
2816}
2817
2818int i915gm_enable_vblank(struct drm_crtc *crtc)
2819{
2820	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 
2821
2822	/*
2823	 * Vblank interrupts fail to wake the device up from C2+.
2824	 * Disabling render clock gating during C-states avoids
2825	 * the problem. There is a small power cost so we do this
2826	 * only when vblank interrupts are actually enabled.
2827	 */
2828	if (dev_priv->vblank_enabled++ == 0)
2829		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2830
2831	return i8xx_enable_vblank(crtc);
2832}
2833
2834int i965_enable_vblank(struct drm_crtc *crtc)
2835{
2836	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2837	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2838	unsigned long irqflags;
2839
2840	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2841	i915_enable_pipestat(dev_priv, pipe,
2842			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2843	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2844
2845	return 0;
2846}
2847
2848int ilk_enable_vblank(struct drm_crtc *crtc)
2849{
2850	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2851	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2852	unsigned long irqflags;
2853	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2854		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
 
2855
2856	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2857	ilk_enable_display_irq(dev_priv, bit);
 
2858	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2859
2860	/* Even though there is no DMC, frame counter can get stuck when
2861	 * PSR is active as no frames are generated.
2862	 */
2863	if (HAS_PSR(dev_priv))
2864		drm_crtc_vblank_restore(crtc);
2865
2866	return 0;
2867}
2868
2869static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2870				   bool enable)
2871{
2872	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2873	enum port port;
2874
2875	if (!(intel_crtc->mode_flags &
2876	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2877		return false;
2878
2879	/* for dual link cases we consider TE from slave */
2880	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2881		port = PORT_B;
2882	else
2883		port = PORT_A;
2884
2885	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2886			 enable ? 0 : DSI_TE_EVENT);
2887
2888	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2889
2890	return true;
2891}
2892
2893int bdw_enable_vblank(struct drm_crtc *_crtc)
2894{
2895	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2896	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2897	enum pipe pipe = crtc->pipe;
2898	unsigned long irqflags;
 
2899
2900	if (gen11_dsi_configure_te(crtc, true))
2901		return 0;
2902
2903	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2904	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
 
 
 
 
 
 
 
 
 
 
2905	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2906
2907	/* Even if there is no DMC, frame counter can get stuck when
2908	 * PSR is active as no frames are generated, so check only for PSR.
2909	 */
2910	if (HAS_PSR(dev_priv))
2911		drm_crtc_vblank_restore(&crtc->base);
2912
2913	return 0;
2914}
2915
2916/* Called from drm generic code, passed 'crtc' which
2917 * we use as a pipe index
2918 */
2919void i8xx_disable_vblank(struct drm_crtc *crtc)
2920{
2921	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2922	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2923	unsigned long irqflags;
2924
2925	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2926	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
 
 
 
 
 
2927	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2928}
2929
2930void i915gm_disable_vblank(struct drm_crtc *crtc)
2931{
2932	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2933
2934	i8xx_disable_vblank(crtc);
2935
2936	if (--dev_priv->vblank_enabled == 0)
2937		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2938}
2939
2940void i965_disable_vblank(struct drm_crtc *crtc)
2941{
2942	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2943	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2944	unsigned long irqflags;
2945
2946	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2947	i915_disable_pipestat(dev_priv, pipe,
2948			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2949	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2950}
2951
2952void ilk_disable_vblank(struct drm_crtc *crtc)
2953{
2954	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2955	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2956	unsigned long irqflags;
2957	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2958		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2959
2960	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2961	ilk_disable_display_irq(dev_priv, bit);
 
2962	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2963}
2964
2965void bdw_disable_vblank(struct drm_crtc *_crtc)
2966{
2967	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2968	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2969	enum pipe pipe = crtc->pipe;
2970	unsigned long irqflags;
2971
2972	if (gen11_dsi_configure_te(crtc, false))
2973		return;
2974
2975	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2976	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
 
 
 
 
 
 
 
 
 
 
2977	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2978}
2979
2980static void ibx_irq_reset(struct drm_i915_private *dev_priv)
 
2981{
2982	struct intel_uncore *uncore = &dev_priv->uncore;
2983
2984	if (HAS_PCH_NOP(dev_priv))
2985		return;
2986
2987	GEN3_IRQ_RESET(uncore, SDE);
2988
2989	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2990		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2991}
2992
2993static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2994{
2995	struct intel_uncore *uncore = &dev_priv->uncore;
2996
2997	if (IS_CHERRYVIEW(dev_priv))
2998		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2999	else
3000		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
3001
3002	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3003	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3004
3005	i9xx_pipestat_irq_reset(dev_priv);
3006
3007	GEN3_IRQ_RESET(uncore, VLV_);
3008	dev_priv->irq_mask = ~0u;
3009}
3010
3011static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3012{
3013	struct intel_uncore *uncore = &dev_priv->uncore;
3014
3015	u32 pipestat_mask;
3016	u32 enable_mask;
3017	enum pipe pipe;
3018
3019	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3020
3021	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3022	for_each_pipe(dev_priv, pipe)
3023		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3024
3025	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3026		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3027		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3028		I915_LPE_PIPE_A_INTERRUPT |
3029		I915_LPE_PIPE_B_INTERRUPT;
3030
3031	if (IS_CHERRYVIEW(dev_priv))
3032		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3033			I915_LPE_PIPE_C_INTERRUPT;
3034
3035	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3036
3037	dev_priv->irq_mask = ~enable_mask;
3038
3039	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3040}
3041
3042/* drm_dma.h hooks
3043*/
3044static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3045{
3046	struct intel_uncore *uncore = &dev_priv->uncore;
3047
3048	GEN3_IRQ_RESET(uncore, DE);
3049	dev_priv->irq_mask = ~0u;
3050
3051	if (GRAPHICS_VER(dev_priv) == 7)
3052		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3053
3054	if (IS_HASWELL(dev_priv)) {
3055		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3056		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3057	}
3058
3059	gen5_gt_irq_reset(to_gt(dev_priv));
3060
3061	ibx_irq_reset(dev_priv);
3062}
3063
3064static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3065{
3066	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3067	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3068
3069	gen5_gt_irq_reset(to_gt(dev_priv));
 
3070
3071	spin_lock_irq(&dev_priv->irq_lock);
3072	if (dev_priv->display_irqs_enabled)
3073		vlv_display_irq_reset(dev_priv);
3074	spin_unlock_irq(&dev_priv->irq_lock);
3075}
3076
3077static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3078{
3079	struct intel_uncore *uncore = &dev_priv->uncore;
3080	enum pipe pipe;
3081
3082	if (!HAS_DISPLAY(dev_priv))
3083		return;
 
 
 
 
 
 
3084
3085	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3086	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3087
3088	for_each_pipe(dev_priv, pipe)
3089		if (intel_display_power_is_enabled(dev_priv,
3090						   POWER_DOMAIN_PIPE(pipe)))
3091			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3092
3093	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3094	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3095}
3096
3097static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3098{
3099	struct intel_uncore *uncore = &dev_priv->uncore;
3100
3101	gen8_master_intr_disable(uncore->regs);
3102
3103	gen8_gt_irq_reset(to_gt(dev_priv));
3104	gen8_display_irq_reset(dev_priv);
3105	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
 
 
 
 
 
3106
3107	if (HAS_PCH_SPLIT(dev_priv))
3108		ibx_irq_reset(dev_priv);
3109
3110}
3111
3112static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3113{
3114	struct intel_uncore *uncore = &dev_priv->uncore;
3115	enum pipe pipe;
3116	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3117		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3118
3119	if (!HAS_DISPLAY(dev_priv))
3120		return;
3121
3122	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3123
3124	if (DISPLAY_VER(dev_priv) >= 12) {
3125		enum transcoder trans;
 
 
3126
3127		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3128			enum intel_display_power_domain domain;
 
 
 
3129
3130			domain = POWER_DOMAIN_TRANSCODER(trans);
3131			if (!intel_display_power_is_enabled(dev_priv, domain))
3132				continue;
3133
3134			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3135			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3136		}
3137	} else {
3138		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3139		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3140	}
3141
3142	for_each_pipe(dev_priv, pipe)
3143		if (intel_display_power_is_enabled(dev_priv,
3144						   POWER_DOMAIN_PIPE(pipe)))
3145			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3146
3147	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3148	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3149	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3150
3151	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3152		GEN3_IRQ_RESET(uncore, SDE);
3153}
3154
3155static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3156{
3157	struct intel_gt *gt = to_gt(dev_priv);
3158	struct intel_uncore *uncore = gt->uncore;
3159
3160	gen11_master_intr_disable(dev_priv->uncore.regs);
3161
3162	gen11_gt_irq_reset(gt);
3163	gen11_display_irq_reset(dev_priv);
3164
3165	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3166	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3167}
3168
3169static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3170{
3171	struct intel_gt *gt = to_gt(dev_priv);
3172	struct intel_uncore *uncore = gt->uncore;
3173
3174	dg1_master_intr_disable(dev_priv->uncore.regs);
3175
3176	gen11_gt_irq_reset(gt);
3177	gen11_display_irq_reset(dev_priv);
3178
3179	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3180	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3181}
3182
3183void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3184				     u8 pipe_mask)
3185{
3186	struct intel_uncore *uncore = &dev_priv->uncore;
3187	u32 extra_ier = GEN8_PIPE_VBLANK |
3188		gen8_de_pipe_underrun_mask(dev_priv) |
3189		gen8_de_pipe_flip_done_mask(dev_priv);
3190	enum pipe pipe;
3191
3192	spin_lock_irq(&dev_priv->irq_lock);
3193
3194	if (!intel_irqs_enabled(dev_priv)) {
3195		spin_unlock_irq(&dev_priv->irq_lock);
3196		return;
3197	}
3198
3199	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3200		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3201				  dev_priv->de_irq_mask[pipe],
3202				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3203
3204	spin_unlock_irq(&dev_priv->irq_lock);
3205}
3206
3207void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3208				     u8 pipe_mask)
3209{
3210	struct intel_uncore *uncore = &dev_priv->uncore;
3211	enum pipe pipe;
3212
3213	spin_lock_irq(&dev_priv->irq_lock);
3214
3215	if (!intel_irqs_enabled(dev_priv)) {
3216		spin_unlock_irq(&dev_priv->irq_lock);
3217		return;
3218	}
3219
3220	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3221		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3222
3223	spin_unlock_irq(&dev_priv->irq_lock);
 
 
 
3224
3225	/* make sure we're done processing display irqs */
3226	intel_synchronize_irq(dev_priv);
3227}
3228
3229static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3230{
3231	struct intel_uncore *uncore = &dev_priv->uncore;
3232
3233	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
3234	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3235
3236	gen8_gt_irq_reset(to_gt(dev_priv));
3237
3238	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3239
3240	spin_lock_irq(&dev_priv->irq_lock);
3241	if (dev_priv->display_irqs_enabled)
3242		vlv_display_irq_reset(dev_priv);
3243	spin_unlock_irq(&dev_priv->irq_lock);
3244}
3245
3246static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3247			       enum hpd_pin pin)
3248{
3249	switch (pin) {
3250	case HPD_PORT_A:
3251		/*
3252		 * When CPU and PCH are on the same package, port A
3253		 * HPD must be enabled in both north and south.
3254		 */
3255		return HAS_PCH_LPT_LP(i915) ?
3256			PORTA_HOTPLUG_ENABLE : 0;
3257	case HPD_PORT_B:
3258		return PORTB_HOTPLUG_ENABLE |
3259			PORTB_PULSE_DURATION_2ms;
3260	case HPD_PORT_C:
3261		return PORTC_HOTPLUG_ENABLE |
3262			PORTC_PULSE_DURATION_2ms;
3263	case HPD_PORT_D:
3264		return PORTD_HOTPLUG_ENABLE |
3265			PORTD_PULSE_DURATION_2ms;
3266	default:
3267		return 0;
3268	}
3269}
3270
3271static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3272{
3273	/*
3274	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3275	 * duration to 2ms (which is the minimum in the Display Port spec).
3276	 * The pulse duration bits are reserved on LPT+.
3277	 */
3278	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3279			 PORTA_HOTPLUG_ENABLE |
3280			 PORTB_HOTPLUG_ENABLE |
3281			 PORTC_HOTPLUG_ENABLE |
3282			 PORTD_HOTPLUG_ENABLE |
3283			 PORTB_PULSE_DURATION_MASK |
3284			 PORTC_PULSE_DURATION_MASK |
3285			 PORTD_PULSE_DURATION_MASK,
3286			 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
3287}
3288
3289static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
 
3290{
3291	u32 hotplug_irqs, enabled_irqs;
3292
3293	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3294	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3295
3296	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3297
3298	ibx_hpd_detection_setup(dev_priv);
3299}
3300
3301static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3302				   enum hpd_pin pin)
3303{
3304	switch (pin) {
3305	case HPD_PORT_A:
3306	case HPD_PORT_B:
3307	case HPD_PORT_C:
3308	case HPD_PORT_D:
3309		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3310	default:
3311		return 0;
3312	}
3313}
3314
3315static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3316				  enum hpd_pin pin)
3317{
3318	switch (pin) {
3319	case HPD_PORT_TC1:
3320	case HPD_PORT_TC2:
3321	case HPD_PORT_TC3:
3322	case HPD_PORT_TC4:
3323	case HPD_PORT_TC5:
3324	case HPD_PORT_TC6:
3325		return ICP_TC_HPD_ENABLE(pin);
3326	default:
3327		return 0;
3328	}
3329}
3330
3331static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3332{
3333	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
3334			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3335			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3336			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3337			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
3338			 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
3339}
3340
3341static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3342{
3343	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
3344			 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3345			 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3346			 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3347			 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3348			 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3349			 ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
3350			 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
3351}
3352
3353static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3354{
3355	u32 hotplug_irqs, enabled_irqs;
 
3356
3357	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3358	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3359
3360	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3361		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
 
 
3362
3363	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3364
3365	icp_ddi_hpd_detection_setup(dev_priv);
3366	icp_tc_hpd_detection_setup(dev_priv);
3367}
 
3368
3369static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3370				 enum hpd_pin pin)
3371{
3372	switch (pin) {
3373	case HPD_PORT_TC1:
3374	case HPD_PORT_TC2:
3375	case HPD_PORT_TC3:
3376	case HPD_PORT_TC4:
3377	case HPD_PORT_TC5:
3378	case HPD_PORT_TC6:
3379		return GEN11_HOTPLUG_CTL_ENABLE(pin);
3380	default:
3381		return 0;
3382	}
3383}
3384
3385static void dg1_hpd_invert(struct drm_i915_private *i915)
3386{
3387	u32 val = (INVERT_DDIA_HPD |
3388		   INVERT_DDIB_HPD |
3389		   INVERT_DDIC_HPD |
3390		   INVERT_DDID_HPD);
3391	intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
 
3392}
3393
3394static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3395{
3396	dg1_hpd_invert(dev_priv);
3397	icp_hpd_irq_setup(dev_priv);
3398}
 
3399
3400static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3401{
3402	intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
3403			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3404			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3405			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3406			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3407			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3408			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3409			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3410}
3411
3412static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3413{
3414	intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3415			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3416			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3417			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3418			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3419			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3420			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3421			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3422}
3423
3424static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3425{
3426	u32 hotplug_irqs, enabled_irqs;
 
 
 
 
 
3427
3428	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3429	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3430
3431	intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3432			 ~enabled_irqs & hotplug_irqs);
3433	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3434
3435	gen11_tc_hpd_detection_setup(dev_priv);
3436	gen11_tbt_hpd_detection_setup(dev_priv);
3437
3438	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3439		icp_hpd_irq_setup(dev_priv);
3440}
3441
3442static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3443			       enum hpd_pin pin)
3444{
3445	switch (pin) {
3446	case HPD_PORT_A:
3447		return PORTA_HOTPLUG_ENABLE;
3448	case HPD_PORT_B:
3449		return PORTB_HOTPLUG_ENABLE;
3450	case HPD_PORT_C:
3451		return PORTC_HOTPLUG_ENABLE;
3452	case HPD_PORT_D:
3453		return PORTD_HOTPLUG_ENABLE;
3454	default:
3455		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3456	}
3457}
3458
3459static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3460				enum hpd_pin pin)
3461{
3462	switch (pin) {
3463	case HPD_PORT_E:
3464		return PORTE_HOTPLUG_ENABLE;
3465	default:
3466		return 0;
3467	}
3468}
3469
3470static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3471{
3472	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3473	if (HAS_PCH_CNP(dev_priv)) {
3474		intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3475				 CHASSIS_CLK_REQ_DURATION(0xf));
3476	}
 
 
 
 
3477
3478	/* Enable digital hotplug on the PCH */
3479	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3480			 PORTA_HOTPLUG_ENABLE |
3481			 PORTB_HOTPLUG_ENABLE |
3482			 PORTC_HOTPLUG_ENABLE |
3483			 PORTD_HOTPLUG_ENABLE,
3484			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3485
3486	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3487			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3488}
3489
3490static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3491{
3492	u32 hotplug_irqs, enabled_irqs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3493
3494	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3495		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3496
3497	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3498	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3499
3500	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3501
3502	spt_hpd_detection_setup(dev_priv);
3503}
3504
3505static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3506			       enum hpd_pin pin)
3507{
3508	switch (pin) {
3509	case HPD_PORT_A:
3510		return DIGITAL_PORTA_HOTPLUG_ENABLE |
3511			DIGITAL_PORTA_PULSE_DURATION_2ms;
3512	default:
3513		return 0;
3514	}
3515}
3516
3517static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3518{
3519	/*
3520	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3521	 * duration to 2ms (which is the minimum in the Display Port spec)
3522	 * The pulse duration bits are reserved on HSW+.
3523	 */
3524	intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3525			 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3526			 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3527}
3528
3529static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3530{
3531	u32 hotplug_irqs, enabled_irqs;
3532
3533	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3534	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3535
3536	if (DISPLAY_VER(dev_priv) >= 8)
3537		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3538	else
3539		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3540
3541	ilk_hpd_detection_setup(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3542
3543	ibx_hpd_irq_setup(dev_priv);
3544}
3545
3546static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3547			       enum hpd_pin pin)
3548{
3549	u32 hotplug;
3550
3551	switch (pin) {
3552	case HPD_PORT_A:
3553		hotplug = PORTA_HOTPLUG_ENABLE;
3554		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3555			hotplug |= BXT_DDIA_HPD_INVERT;
3556		return hotplug;
3557	case HPD_PORT_B:
3558		hotplug = PORTB_HOTPLUG_ENABLE;
3559		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3560			hotplug |= BXT_DDIB_HPD_INVERT;
3561		return hotplug;
3562	case HPD_PORT_C:
3563		hotplug = PORTC_HOTPLUG_ENABLE;
3564		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3565			hotplug |= BXT_DDIC_HPD_INVERT;
3566		return hotplug;
3567	default:
3568		return 0;
3569	}
3570}
3571
3572static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3573{
3574	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3575			 PORTA_HOTPLUG_ENABLE |
3576			 PORTB_HOTPLUG_ENABLE |
3577			 PORTC_HOTPLUG_ENABLE |
3578			 BXT_DDI_HPD_INVERT_MASK,
3579			 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3580}
3581
3582static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3583{
3584	u32 hotplug_irqs, enabled_irqs;
3585
3586	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3587	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3588
3589	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3590
3591	bxt_hpd_detection_setup(dev_priv);
3592}
3593
3594/*
3595 * SDEIER is also touched by the interrupt handler to work around missed PCH
3596 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3597 * instead we unconditionally enable all PCH interrupt sources here, but then
3598 * only unmask them as needed with SDEIMR.
3599 *
3600 * Note that we currently do this after installing the interrupt handler,
3601 * but before we enable the master interrupt. That should be sufficient
3602 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3603 * interrupts could still race.
3604 */
3605static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3606{
3607	struct intel_uncore *uncore = &dev_priv->uncore;
3608	u32 mask;
3609
3610	if (HAS_PCH_NOP(dev_priv))
3611		return;
3612
3613	if (HAS_PCH_IBX(dev_priv))
3614		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3615	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3616		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3617	else
3618		mask = SDE_GMBUS_CPT;
3619
3620	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
 
 
 
 
 
 
 
 
3621}
3622
3623static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3624{
3625	struct intel_uncore *uncore = &dev_priv->uncore;
3626	u32 display_mask, extra_mask;
3627
3628	if (GRAPHICS_VER(dev_priv) >= 7) {
3629		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3630				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3631		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3632			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3633			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3634			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3635			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3636			      DE_DP_A_HOTPLUG_IVB);
3637	} else {
3638		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3639				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3640				DE_PIPEA_CRC_DONE | DE_POISON);
3641		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3642			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3643			      DE_PLANE_FLIP_DONE(PLANE_A) |
3644			      DE_PLANE_FLIP_DONE(PLANE_B) |
3645			      DE_DP_A_HOTPLUG);
3646	}
3647
3648	if (IS_HASWELL(dev_priv)) {
3649		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3650		display_mask |= DE_EDP_PSR_INT_HSW;
3651	}
3652
3653	if (IS_IRONLAKE_M(dev_priv))
3654		extra_mask |= DE_PCU_EVENT;
3655
3656	dev_priv->irq_mask = ~display_mask;
3657
3658	ibx_irq_postinstall(dev_priv);
3659
3660	gen5_gt_irq_postinstall(to_gt(dev_priv));
 
 
3661
3662	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3663		      display_mask | extra_mask);
 
3664}
3665
3666void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3667{
3668	lockdep_assert_held(&dev_priv->irq_lock);
 
3669
3670	if (dev_priv->display_irqs_enabled)
3671		return;
3672
3673	dev_priv->display_irqs_enabled = true;
3674
3675	if (intel_irqs_enabled(dev_priv)) {
3676		vlv_display_irq_reset(dev_priv);
3677		vlv_display_irq_postinstall(dev_priv);
3678	}
3679}
3680
3681void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3682{
3683	lockdep_assert_held(&dev_priv->irq_lock);
3684
3685	if (!dev_priv->display_irqs_enabled)
3686		return;
3687
3688	dev_priv->display_irqs_enabled = false;
 
3689
3690	if (intel_irqs_enabled(dev_priv))
3691		vlv_display_irq_reset(dev_priv);
3692}
 
 
 
 
 
 
 
 
 
 
 
 
3693
3694
3695static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3696{
3697	gen5_gt_irq_postinstall(to_gt(dev_priv));
3698
3699	spin_lock_irq(&dev_priv->irq_lock);
3700	if (dev_priv->display_irqs_enabled)
3701		vlv_display_irq_postinstall(dev_priv);
3702	spin_unlock_irq(&dev_priv->irq_lock);
3703
3704	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3705	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3706}
3707
3708static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3709{
3710	struct intel_uncore *uncore = &dev_priv->uncore;
 
 
 
 
 
 
 
 
 
3711
3712	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3713		GEN8_PIPE_CDCLK_CRC_DONE;
3714	u32 de_pipe_enables;
3715	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3716	u32 de_port_enables;
3717	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3718	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3719		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3720	enum pipe pipe;
3721
3722	if (!HAS_DISPLAY(dev_priv))
3723		return;
 
3724
3725	if (DISPLAY_VER(dev_priv) <= 10)
3726		de_misc_masked |= GEN8_DE_MISC_GSE;
 
 
 
 
 
 
 
 
 
 
 
3727
3728	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3729		de_port_masked |= BXT_DE_PORT_GMBUS;
 
 
 
 
 
 
 
 
 
 
3730
3731	if (DISPLAY_VER(dev_priv) >= 11) {
3732		enum port port;
3733
3734		if (intel_bios_is_dsi_present(dev_priv, &port))
3735			de_port_masked |= DSI0_TE | DSI1_TE;
3736	}
3737
3738	de_pipe_enables = de_pipe_masked |
3739		GEN8_PIPE_VBLANK |
3740		gen8_de_pipe_underrun_mask(dev_priv) |
3741		gen8_de_pipe_flip_done_mask(dev_priv);
3742
3743	de_port_enables = de_port_masked;
3744	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3745		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3746	else if (IS_BROADWELL(dev_priv))
3747		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
 
 
 
3748
3749	if (DISPLAY_VER(dev_priv) >= 12) {
3750		enum transcoder trans;
3751
3752		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3753			enum intel_display_power_domain domain;
3754
3755			domain = POWER_DOMAIN_TRANSCODER(trans);
3756			if (!intel_display_power_is_enabled(dev_priv, domain))
3757				continue;
3758
3759			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3760		}
3761	} else {
3762		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3763	}
3764
3765	for_each_pipe(dev_priv, pipe) {
3766		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3767
3768		if (intel_display_power_is_enabled(dev_priv,
3769				POWER_DOMAIN_PIPE(pipe)))
3770			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3771					  dev_priv->de_irq_mask[pipe],
3772					  de_pipe_enables);
3773	}
3774
3775	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3776	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3777
3778	if (DISPLAY_VER(dev_priv) >= 11) {
3779		u32 de_hpd_masked = 0;
3780		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3781				     GEN11_DE_TBT_HOTPLUG_MASK;
3782
3783		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3784			      de_hpd_enables);
3785	}
3786}
3787
3788static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3789{
3790	struct intel_uncore *uncore = &dev_priv->uncore;
3791	u32 mask = SDE_GMBUS_ICP;
3792
3793	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3794}
3795
3796static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3797{
3798	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3799		icp_irq_postinstall(dev_priv);
3800	else if (HAS_PCH_SPLIT(dev_priv))
3801		ibx_irq_postinstall(dev_priv);
3802
3803	gen8_gt_irq_postinstall(to_gt(dev_priv));
3804	gen8_de_irq_postinstall(dev_priv);
3805
3806	gen8_master_intr_enable(dev_priv->uncore.regs);
3807}
3808
3809static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3810{
3811	if (!HAS_DISPLAY(dev_priv))
3812		return;
3813
3814	gen8_de_irq_postinstall(dev_priv);
3815
3816	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3817			   GEN11_DISPLAY_IRQ_ENABLE);
3818}
3819
3820static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3821{
3822	struct intel_gt *gt = to_gt(dev_priv);
3823	struct intel_uncore *uncore = gt->uncore;
3824	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3825
3826	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3827		icp_irq_postinstall(dev_priv);
3828
3829	gen11_gt_irq_postinstall(gt);
3830	gen11_de_irq_postinstall(dev_priv);
3831
3832	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3833
3834	gen11_master_intr_enable(uncore->regs);
3835	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3836}
3837
3838static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3839{
3840	struct intel_gt *gt = to_gt(dev_priv);
3841	struct intel_uncore *uncore = gt->uncore;
3842	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3843
3844	gen11_gt_irq_postinstall(gt);
3845
3846	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3847
3848	if (HAS_DISPLAY(dev_priv)) {
3849		icp_irq_postinstall(dev_priv);
3850		gen8_de_irq_postinstall(dev_priv);
3851		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3852				   GEN11_DISPLAY_IRQ_ENABLE);
3853	}
3854
3855	dg1_master_intr_enable(uncore->regs);
3856	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
 
 
 
 
3857}
3858
3859static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3860{
3861	gen8_gt_irq_postinstall(to_gt(dev_priv));
 
3862
3863	spin_lock_irq(&dev_priv->irq_lock);
3864	if (dev_priv->display_irqs_enabled)
3865		vlv_display_irq_postinstall(dev_priv);
3866	spin_unlock_irq(&dev_priv->irq_lock);
3867
3868	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3869	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3870}
3871
3872static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3873{
3874	struct intel_uncore *uncore = &dev_priv->uncore;
3875
3876	i9xx_pipestat_irq_reset(dev_priv);
3877
3878	gen2_irq_reset(uncore);
3879	dev_priv->irq_mask = ~0u;
3880}
3881
3882static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3883{
3884	struct intel_uncore *uncore = &dev_priv->uncore;
3885	u16 enable_mask;
3886
3887	intel_uncore_write16(uncore,
3888			     EMR,
3889			     ~(I915_ERROR_PAGE_TABLE |
3890			       I915_ERROR_MEMORY_REFRESH));
3891
3892	/* Unmask the interrupts that we always want on. */
3893	dev_priv->irq_mask =
3894		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 
3895		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3896		  I915_MASTER_ERROR_INTERRUPT);
 
 
3897
3898	enable_mask =
 
3899		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3900		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3901		I915_MASTER_ERROR_INTERRUPT |
3902		I915_USER_INTERRUPT;
3903
3904	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
 
 
 
 
 
3905
3906	/* Interrupt setup is already guaranteed to be single-threaded, this is
3907	 * just to make the assert_spin_locked check happy. */
3908	spin_lock_irq(&dev_priv->irq_lock);
3909	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3910	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3911	spin_unlock_irq(&dev_priv->irq_lock);
3912}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3913
3914static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3915			       u16 *eir, u16 *eir_stuck)
3916{
3917	struct intel_uncore *uncore = &i915->uncore;
3918	u16 emr;
3919
3920	*eir = intel_uncore_read16(uncore, EIR);
 
3921
3922	if (*eir)
3923		intel_uncore_write16(uncore, EIR, *eir);
3924
3925	*eir_stuck = intel_uncore_read16(uncore, EIR);
3926	if (*eir_stuck == 0)
3927		return;
3928
3929	/*
3930	 * Toggle all EMR bits to make sure we get an edge
3931	 * in the ISR master error bit if we don't clear
3932	 * all the EIR bits. Otherwise the edge triggered
3933	 * IIR on i965/g4x wouldn't notice that an interrupt
3934	 * is still pending. Also some EIR bits can't be
3935	 * cleared except by handling the underlying error
3936	 * (or by a GPU reset) so we mask any bit that
3937	 * remains set.
3938	 */
3939	emr = intel_uncore_read16(uncore, EMR);
3940	intel_uncore_write16(uncore, EMR, 0xffff);
3941	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3942}
3943
3944static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3945				   u16 eir, u16 eir_stuck)
3946{
3947	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
 
 
 
 
 
 
 
 
 
 
 
3948
3949	if (eir_stuck)
3950		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3951			eir_stuck);
3952}
3953
3954static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3955			       u32 *eir, u32 *eir_stuck)
3956{
3957	u32 emr;
3958
3959	*eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3960
3961	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3962	if (*eir_stuck == 0)
3963		return;
3964
3965	/*
3966	 * Toggle all EMR bits to make sure we get an edge
3967	 * in the ISR master error bit if we don't clear
3968	 * all the EIR bits. Otherwise the edge triggered
3969	 * IIR on i965/g4x wouldn't notice that an interrupt
3970	 * is still pending. Also some EIR bits can't be
3971	 * cleared except by handling the underlying error
3972	 * (or by a GPU reset) so we mask any bit that
3973	 * remains set.
3974	 */
3975	emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
3976	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3977}
3978
3979static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3980				   u32 eir, u32 eir_stuck)
3981{
3982	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3983
3984	if (eir_stuck)
3985		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3986			eir_stuck);
3987}
3988
3989static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3990{
3991	struct drm_i915_private *dev_priv = arg;
3992	irqreturn_t ret = IRQ_NONE;
3993
3994	if (!intel_irqs_enabled(dev_priv))
3995		return IRQ_NONE;
 
 
 
 
 
 
 
 
 
 
3996
3997	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3998	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3999
4000	do {
4001		u32 pipe_stats[I915_MAX_PIPES] = {};
4002		u16 eir = 0, eir_stuck = 0;
4003		u16 iir;
4004
4005		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4006		if (iir == 0)
4007			break;
4008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4009		ret = IRQ_HANDLED;
 
 
4010
4011		/* Call regardless, as some status bits might not be
4012		 * signalled in iir */
4013		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4014
4015		if (iir & I915_MASTER_ERROR_INTERRUPT)
4016			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4017
4018		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
 
 
 
4019
4020		if (iir & I915_USER_INTERRUPT)
4021			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
 
 
4022
4023		if (iir & I915_MASTER_ERROR_INTERRUPT)
4024			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4025
4026		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4027	} while (0);
4028
4029	pmu_irq_stats(dev_priv, ret);
 
4030
4031	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4032
4033	return ret;
4034}
4035
4036static void i915_irq_reset(struct drm_i915_private *dev_priv)
4037{
4038	struct intel_uncore *uncore = &dev_priv->uncore;
 
4039
4040	if (I915_HAS_HOTPLUG(dev_priv)) {
4041		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4042		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
 
 
4043	}
4044
4045	i9xx_pipestat_irq_reset(dev_priv);
4046
4047	GEN3_IRQ_RESET(uncore, GEN2_);
4048	dev_priv->irq_mask = ~0u;
 
 
4049}
4050
4051static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4052{
4053	struct intel_uncore *uncore = &dev_priv->uncore;
4054	u32 enable_mask;
 
 
 
 
 
 
 
 
 
4055
4056	intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4057					  I915_ERROR_MEMORY_REFRESH));
4058
4059	/* Unmask the interrupts that we always want on. */
4060	dev_priv->irq_mask =
4061		~(I915_ASLE_INTERRUPT |
4062		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4063		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4064		  I915_MASTER_ERROR_INTERRUPT);
4065
4066	enable_mask =
4067		I915_ASLE_INTERRUPT |
4068		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4069		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4070		I915_MASTER_ERROR_INTERRUPT |
4071		I915_USER_INTERRUPT;
4072
4073	if (I915_HAS_HOTPLUG(dev_priv)) {
4074		/* Enable in IER... */
4075		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4076		/* and unmask in IMR */
4077		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4078	}
4079
4080	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4081
4082	/* Interrupt setup is already guaranteed to be single-threaded, this is
4083	 * just to make the assert_spin_locked check happy. */
4084	spin_lock_irq(&dev_priv->irq_lock);
4085	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4086	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4087	spin_unlock_irq(&dev_priv->irq_lock);
4088
4089	i915_enable_asle_pipestat(dev_priv);
4090}
4091
4092static irqreturn_t i915_irq_handler(int irq, void *arg)
4093{
4094	struct drm_i915_private *dev_priv = arg;
4095	irqreturn_t ret = IRQ_NONE;
4096
4097	if (!intel_irqs_enabled(dev_priv))
4098		return IRQ_NONE;
4099
4100	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4101	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4102
4103	do {
4104		u32 pipe_stats[I915_MAX_PIPES] = {};
4105		u32 eir = 0, eir_stuck = 0;
4106		u32 hotplug_status = 0;
4107		u32 iir;
4108
4109		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4110		if (iir == 0)
4111			break;
4112
4113		ret = IRQ_HANDLED;
4114
4115		if (I915_HAS_HOTPLUG(dev_priv) &&
4116		    iir & I915_DISPLAY_PORT_INTERRUPT)
4117			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4118
4119		/* Call regardless, as some status bits might not be
4120		 * signalled in iir */
4121		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4122
4123		if (iir & I915_MASTER_ERROR_INTERRUPT)
4124			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4125
4126		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4127
4128		if (iir & I915_USER_INTERRUPT)
4129			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4130
4131		if (iir & I915_MASTER_ERROR_INTERRUPT)
4132			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4133
4134		if (hotplug_status)
4135			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4136
4137		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4138	} while (0);
4139
4140	pmu_irq_stats(dev_priv, ret);
4141
4142	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4143
4144	return ret;
4145}
4146
4147static void i965_irq_reset(struct drm_i915_private *dev_priv)
4148{
4149	struct intel_uncore *uncore = &dev_priv->uncore;
4150
4151	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4152	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
4153
4154	i9xx_pipestat_irq_reset(dev_priv);
4155
4156	GEN3_IRQ_RESET(uncore, GEN2_);
4157	dev_priv->irq_mask = ~0u;
4158}
4159
4160static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4161{
4162	struct intel_uncore *uncore = &dev_priv->uncore;
4163	u32 enable_mask;
4164	u32 error_mask;
4165
4166	/*
4167	 * Enable some error detection, note the instruction error mask
4168	 * bit is reserved, so we leave it masked.
4169	 */
4170	if (IS_G4X(dev_priv)) {
4171		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4172			       GM45_ERROR_MEM_PRIV |
4173			       GM45_ERROR_CP_PRIV |
4174			       I915_ERROR_MEMORY_REFRESH);
4175	} else {
4176		error_mask = ~(I915_ERROR_PAGE_TABLE |
4177			       I915_ERROR_MEMORY_REFRESH);
4178	}
4179	intel_uncore_write(uncore, EMR, error_mask);
4180
4181	/* Unmask the interrupts that we always want on. */
4182	dev_priv->irq_mask =
4183		~(I915_ASLE_INTERRUPT |
4184		  I915_DISPLAY_PORT_INTERRUPT |
4185		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4186		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4187		  I915_MASTER_ERROR_INTERRUPT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4188
4189	enable_mask =
4190		I915_ASLE_INTERRUPT |
4191		I915_DISPLAY_PORT_INTERRUPT |
4192		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4193		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4194		I915_MASTER_ERROR_INTERRUPT |
4195		I915_USER_INTERRUPT;
4196
4197	if (IS_G4X(dev_priv))
4198		enable_mask |= I915_BSD_USER_INTERRUPT;
4199
4200	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4201
4202	/* Interrupt setup is already guaranteed to be single-threaded, this is
4203	 * just to make the assert_spin_locked check happy. */
4204	spin_lock_irq(&dev_priv->irq_lock);
4205	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4206	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4207	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4208	spin_unlock_irq(&dev_priv->irq_lock);
4209
4210	i915_enable_asle_pipestat(dev_priv);
4211}
4212
4213static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4214{
4215	u32 hotplug_en;
 
 
 
 
 
 
4216
4217	lockdep_assert_held(&dev_priv->irq_lock);
4218
4219	/* Note HDMI and DP share hotplug bits */
4220	/* enable bits are the same for all generations */
4221	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4222	/* Programming the CRT detection parameters tends
4223	   to generate a spurious hotplug event about three
4224	   seconds later.  So just do it once.
4225	*/
4226	if (IS_G4X(dev_priv))
4227		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4228	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4229
4230	/* Ignore TV since it's buggy */
4231	i915_hotplug_interrupt_update_locked(dev_priv,
4232					     HOTPLUG_INT_EN_MASK |
4233					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4234					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4235					     hotplug_en);
4236}
4237
4238static irqreturn_t i965_irq_handler(int irq, void *arg)
4239{
4240	struct drm_i915_private *dev_priv = arg;
4241	irqreturn_t ret = IRQ_NONE;
4242
4243	if (!intel_irqs_enabled(dev_priv))
4244		return IRQ_NONE;
 
 
 
 
 
 
 
 
 
 
4245
4246	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4247	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4248
4249	do {
4250		u32 pipe_stats[I915_MAX_PIPES] = {};
4251		u32 eir = 0, eir_stuck = 0;
4252		u32 hotplug_status = 0;
4253		u32 iir;
 
 
 
 
4254
4255		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4256		if (iir == 0)
4257			break;
4258
4259		ret = IRQ_HANDLED;
4260
4261		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4262			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
 
 
 
 
 
 
 
 
4263
4264		/* Call regardless, as some status bits might not be
4265		 * signalled in iir */
4266		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4267
4268		if (iir & I915_MASTER_ERROR_INTERRUPT)
4269			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4270
4271		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
 
4272
4273		if (iir & I915_USER_INTERRUPT)
4274			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4275					    iir);
 
4276
4277		if (iir & I915_BSD_USER_INTERRUPT)
4278			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4279					    iir >> 25);
4280
4281		if (iir & I915_MASTER_ERROR_INTERRUPT)
4282			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4283
4284		if (hotplug_status)
4285			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
 
 
 
 
4286
4287		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4288	} while (0);
 
4289
4290	pmu_irq_stats(dev_priv, IRQ_HANDLED);
4291
4292	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
4293
4294	return ret;
4295}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4296
4297struct intel_hotplug_funcs {
4298	void (*hpd_irq_setup)(struct drm_i915_private *i915);
4299};
4300
4301#define HPD_FUNCS(platform)					 \
4302static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4303	.hpd_irq_setup = platform##_hpd_irq_setup,		 \
4304}
4305
4306HPD_FUNCS(i915);
4307HPD_FUNCS(dg1);
4308HPD_FUNCS(gen11);
4309HPD_FUNCS(bxt);
4310HPD_FUNCS(icp);
4311HPD_FUNCS(spt);
4312HPD_FUNCS(ilk);
4313#undef HPD_FUNCS
4314
4315void intel_hpd_irq_setup(struct drm_i915_private *i915)
4316{
4317	if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4318		i915->display.funcs.hotplug->hpd_irq_setup(i915);
4319}
4320
4321/**
4322 * intel_irq_init - initializes irq support
4323 * @dev_priv: i915 device instance
4324 *
4325 * This function initializes all the irq support including work items, timers
4326 * and all the vtables. It does not setup the interrupt itself though.
4327 */
4328void intel_irq_init(struct drm_i915_private *dev_priv)
4329{
4330	int i;
4331
4332	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4333	for (i = 0; i < MAX_L3_SLICES; ++i)
4334		dev_priv->l3_parity.remap_info[i] = NULL;
4335
4336	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4337	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4338		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4339
4340	if (!HAS_DISPLAY(dev_priv))
4341		return;
4342
4343	intel_hpd_init_pins(dev_priv);
4344
4345	intel_hpd_init_early(dev_priv);
 
4346
4347	dev_priv->drm.vblank_disable_immediate = true;
 
 
 
 
4348
4349	/* Most platforms treat the display irq block as an always-on
4350	 * power domain. vlv/chv can disable it at runtime and need
4351	 * special care to avoid writing any of the display block registers
4352	 * outside of the power domain. We defer setting up the display irqs
4353	 * in this case to the runtime pm.
4354	 */
4355	dev_priv->display_irqs_enabled = true;
4356	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4357		dev_priv->display_irqs_enabled = false;
4358
4359	if (HAS_GMCH(dev_priv)) {
4360		if (I915_HAS_HOTPLUG(dev_priv))
4361			dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4362	} else {
4363		if (HAS_PCH_DG2(dev_priv))
4364			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4365		else if (HAS_PCH_DG1(dev_priv))
4366			dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4367		else if (DISPLAY_VER(dev_priv) >= 11)
4368			dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4369		else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4370			dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4371		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4372			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4373		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4374			dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4375		else
4376			dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4377	}
4378}
4379
4380/**
4381 * intel_irq_fini - deinitializes IRQ support
4382 * @i915: i915 device instance
4383 *
4384 * This function deinitializes all the IRQ support.
4385 */
4386void intel_irq_fini(struct drm_i915_private *i915)
4387{
4388	int i;
4389
4390	for (i = 0; i < MAX_L3_SLICES; ++i)
4391		kfree(i915->l3_parity.remap_info[i]);
4392}
4393
4394static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4395{
4396	if (HAS_GMCH(dev_priv)) {
4397		if (IS_CHERRYVIEW(dev_priv))
4398			return cherryview_irq_handler;
4399		else if (IS_VALLEYVIEW(dev_priv))
4400			return valleyview_irq_handler;
4401		else if (GRAPHICS_VER(dev_priv) == 4)
4402			return i965_irq_handler;
4403		else if (GRAPHICS_VER(dev_priv) == 3)
4404			return i915_irq_handler;
4405		else
4406			return i8xx_irq_handler;
4407	} else {
4408		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4409			return dg1_irq_handler;
4410		else if (GRAPHICS_VER(dev_priv) >= 11)
4411			return gen11_irq_handler;
4412		else if (GRAPHICS_VER(dev_priv) >= 8)
4413			return gen8_irq_handler;
4414		else
4415			return ilk_irq_handler;
4416	}
4417}
4418
4419static void intel_irq_reset(struct drm_i915_private *dev_priv)
4420{
4421	if (HAS_GMCH(dev_priv)) {
4422		if (IS_CHERRYVIEW(dev_priv))
4423			cherryview_irq_reset(dev_priv);
4424		else if (IS_VALLEYVIEW(dev_priv))
4425			valleyview_irq_reset(dev_priv);
4426		else if (GRAPHICS_VER(dev_priv) == 4)
4427			i965_irq_reset(dev_priv);
4428		else if (GRAPHICS_VER(dev_priv) == 3)
4429			i915_irq_reset(dev_priv);
4430		else
4431			i8xx_irq_reset(dev_priv);
4432	} else {
4433		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4434			dg1_irq_reset(dev_priv);
4435		else if (GRAPHICS_VER(dev_priv) >= 11)
4436			gen11_irq_reset(dev_priv);
4437		else if (GRAPHICS_VER(dev_priv) >= 8)
4438			gen8_irq_reset(dev_priv);
4439		else
4440			ilk_irq_reset(dev_priv);
4441	}
4442}
4443
4444static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4445{
4446	if (HAS_GMCH(dev_priv)) {
4447		if (IS_CHERRYVIEW(dev_priv))
4448			cherryview_irq_postinstall(dev_priv);
4449		else if (IS_VALLEYVIEW(dev_priv))
4450			valleyview_irq_postinstall(dev_priv);
4451		else if (GRAPHICS_VER(dev_priv) == 4)
4452			i965_irq_postinstall(dev_priv);
4453		else if (GRAPHICS_VER(dev_priv) == 3)
4454			i915_irq_postinstall(dev_priv);
4455		else
4456			i8xx_irq_postinstall(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4457	} else {
4458		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4459			dg1_irq_postinstall(dev_priv);
4460		else if (GRAPHICS_VER(dev_priv) >= 11)
4461			gen11_irq_postinstall(dev_priv);
4462		else if (GRAPHICS_VER(dev_priv) >= 8)
4463			gen8_irq_postinstall(dev_priv);
4464		else
4465			ilk_irq_postinstall(dev_priv);
4466	}
4467}
4468
4469/**
4470 * intel_irq_install - enables the hardware interrupt
4471 * @dev_priv: i915 device instance
4472 *
4473 * This function enables the hardware interrupt handling, but leaves the hotplug
4474 * handling still disabled. It is called after intel_irq_init().
4475 *
4476 * In the driver load and resume code we need working interrupts in a few places
4477 * but don't want to deal with the hassle of concurrent probe and hotplug
4478 * workers. Hence the split into this two-stage approach.
4479 */
4480int intel_irq_install(struct drm_i915_private *dev_priv)
4481{
4482	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4483	int ret;
4484
4485	/*
4486	 * We enable some interrupt sources in our postinstall hooks, so mark
4487	 * interrupts as enabled _before_ actually enabling them to avoid
4488	 * special cases in our ordering checks.
4489	 */
4490	dev_priv->runtime_pm.irqs_enabled = true;
4491
4492	dev_priv->irq_enabled = true;
4493
4494	intel_irq_reset(dev_priv);
4495
4496	ret = request_irq(irq, intel_irq_handler(dev_priv),
4497			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4498	if (ret < 0) {
4499		dev_priv->irq_enabled = false;
4500		return ret;
4501	}
4502
4503	intel_irq_postinstall(dev_priv);
4504
4505	return ret;
4506}
4507
4508/**
4509 * intel_irq_uninstall - finilizes all irq handling
4510 * @dev_priv: i915 device instance
4511 *
4512 * This stops interrupt and hotplug handling and unregisters and frees all
4513 * resources acquired in the init functions.
4514 */
4515void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4516{
4517	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4518
4519	/*
4520	 * FIXME we can get called twice during driver probe
4521	 * error handling as well as during driver remove due to
4522	 * intel_modeset_driver_remove() calling us out of sequence.
4523	 * Would be nice if it didn't do that...
4524	 */
4525	if (!dev_priv->irq_enabled)
4526		return;
4527
4528	dev_priv->irq_enabled = false;
4529
4530	intel_irq_reset(dev_priv);
4531
4532	free_irq(irq, dev_priv);
4533
4534	intel_hpd_cancel_work(dev_priv);
4535	dev_priv->runtime_pm.irqs_enabled = false;
4536}
4537
4538/**
4539 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4540 * @dev_priv: i915 device instance
4541 *
4542 * This function is used to disable interrupts at runtime, both in the runtime
4543 * pm and the system suspend/resume code.
4544 */
4545void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4546{
4547	intel_irq_reset(dev_priv);
4548	dev_priv->runtime_pm.irqs_enabled = false;
4549	intel_synchronize_irq(dev_priv);
4550}
4551
4552/**
4553 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4554 * @dev_priv: i915 device instance
4555 *
4556 * This function is used to enable interrupts at runtime, both in the runtime
4557 * pm and the system suspend/resume code.
4558 */
4559void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4560{
4561	dev_priv->runtime_pm.irqs_enabled = true;
4562	intel_irq_reset(dev_priv);
4563	intel_irq_postinstall(dev_priv);
4564}
4565
4566bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4567{
4568	return dev_priv->runtime_pm.irqs_enabled;
4569}
4570
4571void intel_synchronize_irq(struct drm_i915_private *i915)
4572{
4573	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4574}
4575
4576void intel_synchronize_hardirq(struct drm_i915_private *i915)
4577{
4578	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4579}