Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright © 2006-2007 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *	Eric Anholt <eric@anholt.net>
  25 */
  26
  27#include <linux/cpufreq.h>
  28#include <linux/module.h>
  29#include <linux/input.h>
  30#include <linux/i2c.h>
  31#include <linux/kernel.h>
  32#include <linux/slab.h>
  33#include <linux/vgaarb.h>
 
  34#include "drmP.h"
  35#include "intel_drv.h"
  36#include "i915_drm.h"
  37#include "i915_drv.h"
  38#include "i915_trace.h"
  39#include "drm_dp_helper.h"
  40
  41#include "drm_crtc_helper.h"
 
  42
  43#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  44
  45bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
  46static void intel_update_watermarks(struct drm_device *dev);
  47static void intel_increase_pllclock(struct drm_crtc *crtc);
  48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  49
  50typedef struct {
  51    /* given values */
  52    int n;
  53    int m1, m2;
  54    int p1, p2;
  55    /* derived values */
  56    int	dot;
  57    int	vco;
  58    int	m;
  59    int	p;
  60} intel_clock_t;
  61
  62typedef struct {
  63    int	min, max;
  64} intel_range_t;
  65
  66typedef struct {
  67    int	dot_limit;
  68    int	p2_slow, p2_fast;
  69} intel_p2_t;
  70
  71#define INTEL_P2_NUM		      2
  72typedef struct intel_limit intel_limit_t;
  73struct intel_limit {
  74    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
  75    intel_p2_t	    p2;
  76    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
  77		      int, int, intel_clock_t *);
  78};
  79
  80/* FDI */
  81#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
  82
  83static bool
  84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  85		    int target, int refclk, intel_clock_t *best_clock);
 
  86static bool
  87intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  88			int target, int refclk, intel_clock_t *best_clock);
 
  89
  90static bool
  91intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
  92		      int target, int refclk, intel_clock_t *best_clock);
 
  93static bool
  94intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
  95			   int target, int refclk, intel_clock_t *best_clock);
 
  96
  97static inline u32 /* units of 100MHz */
  98intel_fdi_link_freq(struct drm_device *dev)
  99{
 100	if (IS_GEN5(dev)) {
 101		struct drm_i915_private *dev_priv = dev->dev_private;
 102		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
 103	} else
 104		return 27;
 105}
 106
 107static const intel_limit_t intel_limits_i8xx_dvo = {
 108        .dot = { .min = 25000, .max = 350000 },
 109        .vco = { .min = 930000, .max = 1400000 },
 110        .n = { .min = 3, .max = 16 },
 111        .m = { .min = 96, .max = 140 },
 112        .m1 = { .min = 18, .max = 26 },
 113        .m2 = { .min = 6, .max = 16 },
 114        .p = { .min = 4, .max = 128 },
 115        .p1 = { .min = 2, .max = 33 },
 116	.p2 = { .dot_limit = 165000,
 117		.p2_slow = 4, .p2_fast = 2 },
 118	.find_pll = intel_find_best_PLL,
 119};
 120
 121static const intel_limit_t intel_limits_i8xx_lvds = {
 122        .dot = { .min = 25000, .max = 350000 },
 123        .vco = { .min = 930000, .max = 1400000 },
 124        .n = { .min = 3, .max = 16 },
 125        .m = { .min = 96, .max = 140 },
 126        .m1 = { .min = 18, .max = 26 },
 127        .m2 = { .min = 6, .max = 16 },
 128        .p = { .min = 4, .max = 128 },
 129        .p1 = { .min = 1, .max = 6 },
 130	.p2 = { .dot_limit = 165000,
 131		.p2_slow = 14, .p2_fast = 7 },
 132	.find_pll = intel_find_best_PLL,
 133};
 134
 135static const intel_limit_t intel_limits_i9xx_sdvo = {
 136        .dot = { .min = 20000, .max = 400000 },
 137        .vco = { .min = 1400000, .max = 2800000 },
 138        .n = { .min = 1, .max = 6 },
 139        .m = { .min = 70, .max = 120 },
 140        .m1 = { .min = 10, .max = 22 },
 141        .m2 = { .min = 5, .max = 9 },
 142        .p = { .min = 5, .max = 80 },
 143        .p1 = { .min = 1, .max = 8 },
 144	.p2 = { .dot_limit = 200000,
 145		.p2_slow = 10, .p2_fast = 5 },
 146	.find_pll = intel_find_best_PLL,
 147};
 148
 149static const intel_limit_t intel_limits_i9xx_lvds = {
 150        .dot = { .min = 20000, .max = 400000 },
 151        .vco = { .min = 1400000, .max = 2800000 },
 152        .n = { .min = 1, .max = 6 },
 153        .m = { .min = 70, .max = 120 },
 154        .m1 = { .min = 10, .max = 22 },
 155        .m2 = { .min = 5, .max = 9 },
 156        .p = { .min = 7, .max = 98 },
 157        .p1 = { .min = 1, .max = 8 },
 158	.p2 = { .dot_limit = 112000,
 159		.p2_slow = 14, .p2_fast = 7 },
 160	.find_pll = intel_find_best_PLL,
 161};
 162
 163
 164static const intel_limit_t intel_limits_g4x_sdvo = {
 165	.dot = { .min = 25000, .max = 270000 },
 166	.vco = { .min = 1750000, .max = 3500000},
 167	.n = { .min = 1, .max = 4 },
 168	.m = { .min = 104, .max = 138 },
 169	.m1 = { .min = 17, .max = 23 },
 170	.m2 = { .min = 5, .max = 11 },
 171	.p = { .min = 10, .max = 30 },
 172	.p1 = { .min = 1, .max = 3},
 173	.p2 = { .dot_limit = 270000,
 174		.p2_slow = 10,
 175		.p2_fast = 10
 176	},
 177	.find_pll = intel_g4x_find_best_PLL,
 178};
 179
 180static const intel_limit_t intel_limits_g4x_hdmi = {
 181	.dot = { .min = 22000, .max = 400000 },
 182	.vco = { .min = 1750000, .max = 3500000},
 183	.n = { .min = 1, .max = 4 },
 184	.m = { .min = 104, .max = 138 },
 185	.m1 = { .min = 16, .max = 23 },
 186	.m2 = { .min = 5, .max = 11 },
 187	.p = { .min = 5, .max = 80 },
 188	.p1 = { .min = 1, .max = 8},
 189	.p2 = { .dot_limit = 165000,
 190		.p2_slow = 10, .p2_fast = 5 },
 191	.find_pll = intel_g4x_find_best_PLL,
 192};
 193
 194static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
 195	.dot = { .min = 20000, .max = 115000 },
 196	.vco = { .min = 1750000, .max = 3500000 },
 197	.n = { .min = 1, .max = 3 },
 198	.m = { .min = 104, .max = 138 },
 199	.m1 = { .min = 17, .max = 23 },
 200	.m2 = { .min = 5, .max = 11 },
 201	.p = { .min = 28, .max = 112 },
 202	.p1 = { .min = 2, .max = 8 },
 203	.p2 = { .dot_limit = 0,
 204		.p2_slow = 14, .p2_fast = 14
 205	},
 206	.find_pll = intel_g4x_find_best_PLL,
 207};
 208
 209static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
 210	.dot = { .min = 80000, .max = 224000 },
 211	.vco = { .min = 1750000, .max = 3500000 },
 212	.n = { .min = 1, .max = 3 },
 213	.m = { .min = 104, .max = 138 },
 214	.m1 = { .min = 17, .max = 23 },
 215	.m2 = { .min = 5, .max = 11 },
 216	.p = { .min = 14, .max = 42 },
 217	.p1 = { .min = 2, .max = 6 },
 218	.p2 = { .dot_limit = 0,
 219		.p2_slow = 7, .p2_fast = 7
 220	},
 221	.find_pll = intel_g4x_find_best_PLL,
 222};
 223
 224static const intel_limit_t intel_limits_g4x_display_port = {
 225        .dot = { .min = 161670, .max = 227000 },
 226        .vco = { .min = 1750000, .max = 3500000},
 227        .n = { .min = 1, .max = 2 },
 228        .m = { .min = 97, .max = 108 },
 229        .m1 = { .min = 0x10, .max = 0x12 },
 230        .m2 = { .min = 0x05, .max = 0x06 },
 231        .p = { .min = 10, .max = 20 },
 232        .p1 = { .min = 1, .max = 2},
 233        .p2 = { .dot_limit = 0,
 234		.p2_slow = 10, .p2_fast = 10 },
 235        .find_pll = intel_find_pll_g4x_dp,
 236};
 237
 238static const intel_limit_t intel_limits_pineview_sdvo = {
 239        .dot = { .min = 20000, .max = 400000},
 240        .vco = { .min = 1700000, .max = 3500000 },
 241	/* Pineview's Ncounter is a ring counter */
 242        .n = { .min = 3, .max = 6 },
 243        .m = { .min = 2, .max = 256 },
 244	/* Pineview only has one combined m divider, which we treat as m2. */
 245        .m1 = { .min = 0, .max = 0 },
 246        .m2 = { .min = 0, .max = 254 },
 247        .p = { .min = 5, .max = 80 },
 248        .p1 = { .min = 1, .max = 8 },
 249	.p2 = { .dot_limit = 200000,
 250		.p2_slow = 10, .p2_fast = 5 },
 251	.find_pll = intel_find_best_PLL,
 252};
 253
 254static const intel_limit_t intel_limits_pineview_lvds = {
 255        .dot = { .min = 20000, .max = 400000 },
 256        .vco = { .min = 1700000, .max = 3500000 },
 257        .n = { .min = 3, .max = 6 },
 258        .m = { .min = 2, .max = 256 },
 259        .m1 = { .min = 0, .max = 0 },
 260        .m2 = { .min = 0, .max = 254 },
 261        .p = { .min = 7, .max = 112 },
 262        .p1 = { .min = 1, .max = 8 },
 263	.p2 = { .dot_limit = 112000,
 264		.p2_slow = 14, .p2_fast = 14 },
 265	.find_pll = intel_find_best_PLL,
 266};
 267
 268/* Ironlake / Sandybridge
 269 *
 270 * We calculate clock using (register_value + 2) for N/M1/M2, so here
 271 * the range value for them is (actual_value - 2).
 272 */
 273static const intel_limit_t intel_limits_ironlake_dac = {
 274	.dot = { .min = 25000, .max = 350000 },
 275	.vco = { .min = 1760000, .max = 3510000 },
 276	.n = { .min = 1, .max = 5 },
 277	.m = { .min = 79, .max = 127 },
 278	.m1 = { .min = 12, .max = 22 },
 279	.m2 = { .min = 5, .max = 9 },
 280	.p = { .min = 5, .max = 80 },
 281	.p1 = { .min = 1, .max = 8 },
 282	.p2 = { .dot_limit = 225000,
 283		.p2_slow = 10, .p2_fast = 5 },
 284	.find_pll = intel_g4x_find_best_PLL,
 285};
 286
 287static const intel_limit_t intel_limits_ironlake_single_lvds = {
 288	.dot = { .min = 25000, .max = 350000 },
 289	.vco = { .min = 1760000, .max = 3510000 },
 290	.n = { .min = 1, .max = 3 },
 291	.m = { .min = 79, .max = 118 },
 292	.m1 = { .min = 12, .max = 22 },
 293	.m2 = { .min = 5, .max = 9 },
 294	.p = { .min = 28, .max = 112 },
 295	.p1 = { .min = 2, .max = 8 },
 296	.p2 = { .dot_limit = 225000,
 297		.p2_slow = 14, .p2_fast = 14 },
 298	.find_pll = intel_g4x_find_best_PLL,
 299};
 300
 301static const intel_limit_t intel_limits_ironlake_dual_lvds = {
 302	.dot = { .min = 25000, .max = 350000 },
 303	.vco = { .min = 1760000, .max = 3510000 },
 304	.n = { .min = 1, .max = 3 },
 305	.m = { .min = 79, .max = 127 },
 306	.m1 = { .min = 12, .max = 22 },
 307	.m2 = { .min = 5, .max = 9 },
 308	.p = { .min = 14, .max = 56 },
 309	.p1 = { .min = 2, .max = 8 },
 310	.p2 = { .dot_limit = 225000,
 311		.p2_slow = 7, .p2_fast = 7 },
 312	.find_pll = intel_g4x_find_best_PLL,
 313};
 314
 315/* LVDS 100mhz refclk limits. */
 316static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
 317	.dot = { .min = 25000, .max = 350000 },
 318	.vco = { .min = 1760000, .max = 3510000 },
 319	.n = { .min = 1, .max = 2 },
 320	.m = { .min = 79, .max = 126 },
 321	.m1 = { .min = 12, .max = 22 },
 322	.m2 = { .min = 5, .max = 9 },
 323	.p = { .min = 28, .max = 112 },
 324	.p1 = { .min = 2,.max = 8 },
 325	.p2 = { .dot_limit = 225000,
 326		.p2_slow = 14, .p2_fast = 14 },
 327	.find_pll = intel_g4x_find_best_PLL,
 328};
 329
 330static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
 331	.dot = { .min = 25000, .max = 350000 },
 332	.vco = { .min = 1760000, .max = 3510000 },
 333	.n = { .min = 1, .max = 3 },
 334	.m = { .min = 79, .max = 126 },
 335	.m1 = { .min = 12, .max = 22 },
 336	.m2 = { .min = 5, .max = 9 },
 337	.p = { .min = 14, .max = 42 },
 338	.p1 = { .min = 2,.max = 6 },
 339	.p2 = { .dot_limit = 225000,
 340		.p2_slow = 7, .p2_fast = 7 },
 341	.find_pll = intel_g4x_find_best_PLL,
 342};
 343
 344static const intel_limit_t intel_limits_ironlake_display_port = {
 345        .dot = { .min = 25000, .max = 350000 },
 346        .vco = { .min = 1760000, .max = 3510000},
 347        .n = { .min = 1, .max = 2 },
 348        .m = { .min = 81, .max = 90 },
 349        .m1 = { .min = 12, .max = 22 },
 350        .m2 = { .min = 5, .max = 9 },
 351        .p = { .min = 10, .max = 20 },
 352        .p1 = { .min = 1, .max = 2},
 353        .p2 = { .dot_limit = 0,
 354		.p2_slow = 10, .p2_fast = 10 },
 355        .find_pll = intel_find_pll_ironlake_dp,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356};
 357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 358static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 359						int refclk)
 360{
 361	struct drm_device *dev = crtc->dev;
 362	struct drm_i915_private *dev_priv = dev->dev_private;
 363	const intel_limit_t *limit;
 364
 365	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 366		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
 367		    LVDS_CLKB_POWER_UP) {
 368			/* LVDS dual channel */
 369			if (refclk == 100000)
 370				limit = &intel_limits_ironlake_dual_lvds_100m;
 371			else
 372				limit = &intel_limits_ironlake_dual_lvds;
 373		} else {
 374			if (refclk == 100000)
 375				limit = &intel_limits_ironlake_single_lvds_100m;
 376			else
 377				limit = &intel_limits_ironlake_single_lvds;
 378		}
 379	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
 380			HAS_eDP)
 381		limit = &intel_limits_ironlake_display_port;
 382	else
 383		limit = &intel_limits_ironlake_dac;
 384
 385	return limit;
 386}
 387
 388static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
 389{
 390	struct drm_device *dev = crtc->dev;
 391	struct drm_i915_private *dev_priv = dev->dev_private;
 392	const intel_limit_t *limit;
 393
 394	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 395		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
 396		    LVDS_CLKB_POWER_UP)
 397			/* LVDS with dual channel */
 398			limit = &intel_limits_g4x_dual_channel_lvds;
 399		else
 400			/* LVDS with dual channel */
 401			limit = &intel_limits_g4x_single_channel_lvds;
 402	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
 403		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
 404		limit = &intel_limits_g4x_hdmi;
 405	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
 406		limit = &intel_limits_g4x_sdvo;
 407	} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 408		limit = &intel_limits_g4x_display_port;
 409	} else /* The option is for other outputs */
 410		limit = &intel_limits_i9xx_sdvo;
 411
 412	return limit;
 413}
 414
 415static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
 416{
 417	struct drm_device *dev = crtc->dev;
 418	const intel_limit_t *limit;
 419
 420	if (HAS_PCH_SPLIT(dev))
 421		limit = intel_ironlake_limit(crtc, refclk);
 422	else if (IS_G4X(dev)) {
 423		limit = intel_g4x_limit(crtc);
 424	} else if (IS_PINEVIEW(dev)) {
 425		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 426			limit = &intel_limits_pineview_lvds;
 427		else
 428			limit = &intel_limits_pineview_sdvo;
 429	} else if (!IS_GEN2(dev)) {
 430		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 431			limit = &intel_limits_i9xx_lvds;
 432		else
 433			limit = &intel_limits_i9xx_sdvo;
 434	} else {
 435		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 436			limit = &intel_limits_i8xx_lvds;
 437		else
 438			limit = &intel_limits_i8xx_dvo;
 439	}
 440	return limit;
 441}
 442
 443/* m1 is reserved as 0 in Pineview, n is a ring counter */
 444static void pineview_clock(int refclk, intel_clock_t *clock)
 445{
 446	clock->m = clock->m2 + 2;
 447	clock->p = clock->p1 * clock->p2;
 448	clock->vco = refclk * clock->m / clock->n;
 449	clock->dot = clock->vco / clock->p;
 450}
 451
 452static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
 453{
 454	if (IS_PINEVIEW(dev)) {
 455		pineview_clock(refclk, clock);
 456		return;
 457	}
 458	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
 459	clock->p = clock->p1 * clock->p2;
 460	clock->vco = refclk * clock->m / (clock->n + 2);
 461	clock->dot = clock->vco / clock->p;
 462}
 463
 464/**
 465 * Returns whether any output on the specified pipe is of the specified type
 466 */
 467bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
 468{
 469	struct drm_device *dev = crtc->dev;
 470	struct drm_mode_config *mode_config = &dev->mode_config;
 471	struct intel_encoder *encoder;
 472
 473	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 474		if (encoder->base.crtc == crtc && encoder->type == type)
 475			return true;
 476
 477	return false;
 478}
 479
 480#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
 481/**
 482 * Returns whether the given set of divisors are valid for a given refclk with
 483 * the given connectors.
 484 */
 485
 486static bool intel_PLL_is_valid(struct drm_device *dev,
 487			       const intel_limit_t *limit,
 488			       const intel_clock_t *clock)
 489{
 490	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 491		INTELPllInvalid ("p1 out of range\n");
 492	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
 493		INTELPllInvalid ("p out of range\n");
 494	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 495		INTELPllInvalid ("m2 out of range\n");
 496	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 497		INTELPllInvalid ("m1 out of range\n");
 498	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
 499		INTELPllInvalid ("m1 <= m2\n");
 500	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
 501		INTELPllInvalid ("m out of range\n");
 502	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
 503		INTELPllInvalid ("n out of range\n");
 504	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 505		INTELPllInvalid ("vco out of range\n");
 506	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
 507	 * connector, etc., rather than just a single range.
 508	 */
 509	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
 510		INTELPllInvalid ("dot out of range\n");
 511
 512	return true;
 513}
 514
 515static bool
 516intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 517		    int target, int refclk, intel_clock_t *best_clock)
 
 518
 519{
 520	struct drm_device *dev = crtc->dev;
 521	struct drm_i915_private *dev_priv = dev->dev_private;
 522	intel_clock_t clock;
 523	int err = target;
 524
 525	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 526	    (I915_READ(LVDS)) != 0) {
 527		/*
 528		 * For LVDS, if the panel is on, just rely on its current
 529		 * settings for dual-channel.  We haven't figured out how to
 530		 * reliably set up different single/dual channel state, if we
 531		 * even can.
 532		 */
 533		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
 534		    LVDS_CLKB_POWER_UP)
 535			clock.p2 = limit->p2.p2_fast;
 536		else
 537			clock.p2 = limit->p2.p2_slow;
 538	} else {
 539		if (target < limit->p2.dot_limit)
 540			clock.p2 = limit->p2.p2_slow;
 541		else
 542			clock.p2 = limit->p2.p2_fast;
 543	}
 544
 545	memset (best_clock, 0, sizeof (*best_clock));
 546
 547	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 548	     clock.m1++) {
 549		for (clock.m2 = limit->m2.min;
 550		     clock.m2 <= limit->m2.max; clock.m2++) {
 551			/* m1 is always 0 in Pineview */
 552			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
 553				break;
 554			for (clock.n = limit->n.min;
 555			     clock.n <= limit->n.max; clock.n++) {
 556				for (clock.p1 = limit->p1.min;
 557					clock.p1 <= limit->p1.max; clock.p1++) {
 558					int this_err;
 559
 560					intel_clock(dev, refclk, &clock);
 561					if (!intel_PLL_is_valid(dev, limit,
 562								&clock))
 563						continue;
 
 
 
 564
 565					this_err = abs(clock.dot - target);
 566					if (this_err < err) {
 567						*best_clock = clock;
 568						err = this_err;
 569					}
 570				}
 571			}
 572		}
 573	}
 574
 575	return (err != target);
 576}
 577
 578static bool
 579intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 580			int target, int refclk, intel_clock_t *best_clock)
 
 581{
 582	struct drm_device *dev = crtc->dev;
 583	struct drm_i915_private *dev_priv = dev->dev_private;
 584	intel_clock_t clock;
 585	int max_n;
 586	bool found;
 587	/* approximately equals target * 0.00585 */
 588	int err_most = (target >> 8) + (target >> 9);
 589	found = false;
 590
 591	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 592		int lvds_reg;
 593
 594		if (HAS_PCH_SPLIT(dev))
 595			lvds_reg = PCH_LVDS;
 596		else
 597			lvds_reg = LVDS;
 598		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
 599		    LVDS_CLKB_POWER_UP)
 600			clock.p2 = limit->p2.p2_fast;
 601		else
 602			clock.p2 = limit->p2.p2_slow;
 603	} else {
 604		if (target < limit->p2.dot_limit)
 605			clock.p2 = limit->p2.p2_slow;
 606		else
 607			clock.p2 = limit->p2.p2_fast;
 608	}
 609
 610	memset(best_clock, 0, sizeof(*best_clock));
 611	max_n = limit->n.max;
 612	/* based on hardware requirement, prefer smaller n to precision */
 613	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 614		/* based on hardware requirement, prefere larger m1,m2 */
 615		for (clock.m1 = limit->m1.max;
 616		     clock.m1 >= limit->m1.min; clock.m1--) {
 617			for (clock.m2 = limit->m2.max;
 618			     clock.m2 >= limit->m2.min; clock.m2--) {
 619				for (clock.p1 = limit->p1.max;
 620				     clock.p1 >= limit->p1.min; clock.p1--) {
 621					int this_err;
 622
 623					intel_clock(dev, refclk, &clock);
 624					if (!intel_PLL_is_valid(dev, limit,
 625								&clock))
 626						continue;
 
 
 
 627
 628					this_err = abs(clock.dot - target);
 629					if (this_err < err_most) {
 630						*best_clock = clock;
 631						err_most = this_err;
 632						max_n = clock.n;
 633						found = true;
 634					}
 635				}
 636			}
 637		}
 638	}
 639	return found;
 640}
 641
 642static bool
 643intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 644			   int target, int refclk, intel_clock_t *best_clock)
 
 645{
 646	struct drm_device *dev = crtc->dev;
 647	intel_clock_t clock;
 648
 649	if (target < 200000) {
 650		clock.n = 1;
 651		clock.p1 = 2;
 652		clock.p2 = 10;
 653		clock.m1 = 12;
 654		clock.m2 = 9;
 655	} else {
 656		clock.n = 2;
 657		clock.p1 = 1;
 658		clock.p2 = 10;
 659		clock.m1 = 14;
 660		clock.m2 = 8;
 661	}
 662	intel_clock(dev, refclk, &clock);
 663	memcpy(best_clock, &clock, sizeof(intel_clock_t));
 664	return true;
 665}
 666
 667/* DisplayPort has only two frequencies, 162MHz and 270MHz */
 668static bool
 669intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 670		      int target, int refclk, intel_clock_t *best_clock)
 
 671{
 672	intel_clock_t clock;
 673	if (target < 200000) {
 674		clock.p1 = 2;
 675		clock.p2 = 10;
 676		clock.n = 2;
 677		clock.m1 = 23;
 678		clock.m2 = 8;
 679	} else {
 680		clock.p1 = 1;
 681		clock.p2 = 10;
 682		clock.n = 1;
 683		clock.m1 = 14;
 684		clock.m2 = 2;
 685	}
 686	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
 687	clock.p = (clock.p1 * clock.p2);
 688	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
 689	clock.vco = 0;
 690	memcpy(best_clock, &clock, sizeof(intel_clock_t));
 691	return true;
 692}
 693
 
 
 
 
 
 
 
 
 
 
 
 694/**
 695 * intel_wait_for_vblank - wait for vblank on a given pipe
 696 * @dev: drm device
 697 * @pipe: pipe to wait for
 698 *
 699 * Wait for vblank to occur on a given pipe.  Needed for various bits of
 700 * mode setting code.
 701 */
 702void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 703{
 704	struct drm_i915_private *dev_priv = dev->dev_private;
 705	int pipestat_reg = PIPESTAT(pipe);
 706
 
 
 
 
 
 707	/* Clear existing vblank status. Note this will clear any other
 708	 * sticky status fields as well.
 709	 *
 710	 * This races with i915_driver_irq_handler() with the result
 711	 * that either function could miss a vblank event.  Here it is not
 712	 * fatal, as we will either wait upon the next vblank interrupt or
 713	 * timeout.  Generally speaking intel_wait_for_vblank() is only
 714	 * called during modeset at which time the GPU should be idle and
 715	 * should *not* be performing page flips and thus not waiting on
 716	 * vblanks...
 717	 * Currently, the result of us stealing a vblank from the irq
 718	 * handler is that a single frame will be skipped during swapbuffers.
 719	 */
 720	I915_WRITE(pipestat_reg,
 721		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
 722
 723	/* Wait for vblank interrupt bit to set */
 724	if (wait_for(I915_READ(pipestat_reg) &
 725		     PIPE_VBLANK_INTERRUPT_STATUS,
 726		     50))
 727		DRM_DEBUG_KMS("vblank wait timed out\n");
 728}
 729
 730/*
 731 * intel_wait_for_pipe_off - wait for pipe to turn off
 732 * @dev: drm device
 733 * @pipe: pipe to wait for
 734 *
 735 * After disabling a pipe, we can't wait for vblank in the usual way,
 736 * spinning on the vblank interrupt status bit, since we won't actually
 737 * see an interrupt when the pipe is disabled.
 738 *
 739 * On Gen4 and above:
 740 *   wait for the pipe register state bit to turn off
 741 *
 742 * Otherwise:
 743 *   wait for the display line value to settle (it usually
 744 *   ends up stopping at the start of the next frame).
 745 *
 746 */
 747void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 748{
 749	struct drm_i915_private *dev_priv = dev->dev_private;
 750
 751	if (INTEL_INFO(dev)->gen >= 4) {
 752		int reg = PIPECONF(pipe);
 753
 754		/* Wait for the Pipe State to go off */
 755		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
 756			     100))
 757			DRM_DEBUG_KMS("pipe_off wait timed out\n");
 758	} else {
 759		u32 last_line;
 760		int reg = PIPEDSL(pipe);
 761		unsigned long timeout = jiffies + msecs_to_jiffies(100);
 762
 
 
 
 
 
 763		/* Wait for the display line to settle */
 764		do {
 765			last_line = I915_READ(reg) & DSL_LINEMASK;
 766			mdelay(5);
 767		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
 768			 time_after(timeout, jiffies));
 769		if (time_after(jiffies, timeout))
 770			DRM_DEBUG_KMS("pipe_off wait timed out\n");
 771	}
 772}
 773
 774static const char *state_string(bool enabled)
 775{
 776	return enabled ? "on" : "off";
 777}
 778
 779/* Only for pre-ILK configs */
 780static void assert_pll(struct drm_i915_private *dev_priv,
 781		       enum pipe pipe, bool state)
 782{
 783	int reg;
 784	u32 val;
 785	bool cur_state;
 786
 787	reg = DPLL(pipe);
 788	val = I915_READ(reg);
 789	cur_state = !!(val & DPLL_VCO_ENABLE);
 790	WARN(cur_state != state,
 791	     "PLL state assertion failure (expected %s, current %s)\n",
 792	     state_string(state), state_string(cur_state));
 793}
 794#define assert_pll_enabled(d, p) assert_pll(d, p, true)
 795#define assert_pll_disabled(d, p) assert_pll(d, p, false)
 796
 797/* For ILK+ */
 798static void assert_pch_pll(struct drm_i915_private *dev_priv,
 799			   enum pipe pipe, bool state)
 
 
 800{
 801	int reg;
 802	u32 val;
 803	bool cur_state;
 804
 805	reg = PCH_DPLL(pipe);
 806	val = I915_READ(reg);
 
 
 
 
 
 
 
 
 807	cur_state = !!(val & DPLL_VCO_ENABLE);
 808	WARN(cur_state != state,
 809	     "PCH PLL state assertion failure (expected %s, current %s)\n",
 810	     state_string(state), state_string(cur_state));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811}
 812#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
 813#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
 814
 815static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 816			  enum pipe pipe, bool state)
 817{
 818	int reg;
 819	u32 val;
 820	bool cur_state;
 821
 822	reg = FDI_TX_CTL(pipe);
 823	val = I915_READ(reg);
 824	cur_state = !!(val & FDI_TX_ENABLE);
 
 
 
 
 
 
 
 825	WARN(cur_state != state,
 826	     "FDI TX state assertion failure (expected %s, current %s)\n",
 827	     state_string(state), state_string(cur_state));
 828}
 829#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
 830#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
 831
 832static void assert_fdi_rx(struct drm_i915_private *dev_priv,
 833			  enum pipe pipe, bool state)
 834{
 835	int reg;
 836	u32 val;
 837	bool cur_state;
 838
 839	reg = FDI_RX_CTL(pipe);
 840	val = I915_READ(reg);
 841	cur_state = !!(val & FDI_RX_ENABLE);
 
 
 
 
 
 842	WARN(cur_state != state,
 843	     "FDI RX state assertion failure (expected %s, current %s)\n",
 844	     state_string(state), state_string(cur_state));
 845}
 846#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
 847#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
 848
 849static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
 850				      enum pipe pipe)
 851{
 852	int reg;
 853	u32 val;
 854
 855	/* ILK FDI PLL is always enabled */
 856	if (dev_priv->info->gen == 5)
 857		return;
 858
 
 
 
 
 859	reg = FDI_TX_CTL(pipe);
 860	val = I915_READ(reg);
 861	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
 862}
 863
 864static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
 865				      enum pipe pipe)
 866{
 867	int reg;
 868	u32 val;
 869
 
 
 
 
 870	reg = FDI_RX_CTL(pipe);
 871	val = I915_READ(reg);
 872	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
 873}
 874
 875static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 876				  enum pipe pipe)
 877{
 878	int pp_reg, lvds_reg;
 879	u32 val;
 880	enum pipe panel_pipe = PIPE_A;
 881	bool locked = true;
 882
 883	if (HAS_PCH_SPLIT(dev_priv->dev)) {
 884		pp_reg = PCH_PP_CONTROL;
 885		lvds_reg = PCH_LVDS;
 886	} else {
 887		pp_reg = PP_CONTROL;
 888		lvds_reg = LVDS;
 889	}
 890
 891	val = I915_READ(pp_reg);
 892	if (!(val & PANEL_POWER_ON) ||
 893	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
 894		locked = false;
 895
 896	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
 897		panel_pipe = PIPE_B;
 898
 899	WARN(panel_pipe == pipe && locked,
 900	     "panel assertion failure, pipe %c regs locked\n",
 901	     pipe_name(pipe));
 902}
 903
 904static void assert_pipe(struct drm_i915_private *dev_priv,
 905			enum pipe pipe, bool state)
 906{
 907	int reg;
 908	u32 val;
 909	bool cur_state;
 910
 
 
 
 
 911	reg = PIPECONF(pipe);
 912	val = I915_READ(reg);
 913	cur_state = !!(val & PIPECONF_ENABLE);
 914	WARN(cur_state != state,
 915	     "pipe %c assertion failure (expected %s, current %s)\n",
 916	     pipe_name(pipe), state_string(state), state_string(cur_state));
 917}
 918#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 919#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 920
 921static void assert_plane_enabled(struct drm_i915_private *dev_priv,
 922				 enum plane plane)
 923{
 924	int reg;
 925	u32 val;
 
 926
 927	reg = DSPCNTR(plane);
 928	val = I915_READ(reg);
 929	WARN(!(val & DISPLAY_PLANE_ENABLE),
 930	     "plane %c assertion failure, should be active but is disabled\n",
 931	     plane_name(plane));
 
 932}
 933
 
 
 
 934static void assert_planes_disabled(struct drm_i915_private *dev_priv,
 935				   enum pipe pipe)
 936{
 937	int reg, i;
 938	u32 val;
 939	int cur_pipe;
 940
 941	/* Planes are fixed to pipes on ILK+ */
 942	if (HAS_PCH_SPLIT(dev_priv->dev))
 
 
 
 
 
 943		return;
 
 944
 945	/* Need to check both planes against the pipe */
 946	for (i = 0; i < 2; i++) {
 947		reg = DSPCNTR(i);
 948		val = I915_READ(reg);
 949		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
 950			DISPPLANE_SEL_PIPE_SHIFT;
 951		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
 952		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
 953		     plane_name(i), pipe_name(pipe));
 954	}
 955}
 956
 957static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 958{
 959	u32 val;
 960	bool enabled;
 961
 
 
 
 
 
 962	val = I915_READ(PCH_DREF_CONTROL);
 963	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 964			    DREF_SUPERSPREAD_SOURCE_MASK));
 965	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
 966}
 967
 968static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
 969				       enum pipe pipe)
 970{
 971	int reg;
 972	u32 val;
 973	bool enabled;
 974
 975	reg = TRANSCONF(pipe);
 976	val = I915_READ(reg);
 977	enabled = !!(val & TRANS_ENABLE);
 978	WARN(enabled,
 979	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
 980	     pipe_name(pipe));
 981}
 982
 983static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
 984			    enum pipe pipe, u32 port_sel, u32 val)
 985{
 986	if ((val & DP_PORT_EN) == 0)
 987		return false;
 988
 989	if (HAS_PCH_CPT(dev_priv->dev)) {
 990		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
 991		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
 992		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
 993			return false;
 994	} else {
 995		if ((val & DP_PIPE_MASK) != (pipe << 30))
 996			return false;
 997	}
 998	return true;
 999}
1000
1001static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1002			      enum pipe pipe, u32 val)
1003{
1004	if ((val & PORT_ENABLE) == 0)
1005		return false;
1006
1007	if (HAS_PCH_CPT(dev_priv->dev)) {
1008		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1009			return false;
1010	} else {
1011		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1012			return false;
1013	}
1014	return true;
1015}
1016
1017static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1018			      enum pipe pipe, u32 val)
1019{
1020	if ((val & LVDS_PORT_EN) == 0)
1021		return false;
1022
1023	if (HAS_PCH_CPT(dev_priv->dev)) {
1024		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1025			return false;
1026	} else {
1027		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1028			return false;
1029	}
1030	return true;
1031}
1032
1033static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1034			      enum pipe pipe, u32 val)
1035{
1036	if ((val & ADPA_DAC_ENABLE) == 0)
1037		return false;
1038	if (HAS_PCH_CPT(dev_priv->dev)) {
1039		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040			return false;
1041	} else {
1042		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1043			return false;
1044	}
1045	return true;
1046}
1047
1048static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1049				   enum pipe pipe, int reg, u32 port_sel)
1050{
1051	u32 val = I915_READ(reg);
1052	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1053	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1054	     reg, pipe_name(pipe));
1055}
1056
1057static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1058				     enum pipe pipe, int reg)
1059{
1060	u32 val = I915_READ(reg);
1061	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1062	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1063	     reg, pipe_name(pipe));
1064}
1065
1066static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1067				      enum pipe pipe)
1068{
1069	int reg;
1070	u32 val;
1071
1072	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1073	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1074	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1075
1076	reg = PCH_ADPA;
1077	val = I915_READ(reg);
1078	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1079	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1080	     pipe_name(pipe));
1081
1082	reg = PCH_LVDS;
1083	val = I915_READ(reg);
1084	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1085	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1086	     pipe_name(pipe));
1087
1088	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1089	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1090	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1091}
1092
1093/**
1094 * intel_enable_pll - enable a PLL
1095 * @dev_priv: i915 private structure
1096 * @pipe: pipe PLL to enable
1097 *
1098 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1099 * make sure the PLL reg is writable first though, since the panel write
1100 * protect mechanism may be enabled.
1101 *
1102 * Note!  This is for pre-ILK only.
1103 */
1104static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1105{
1106	int reg;
1107	u32 val;
1108
1109	/* No really, not for ILK+ */
1110	BUG_ON(dev_priv->info->gen >= 5);
1111
1112	/* PLL is protected by panel, make sure we can write it */
1113	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1114		assert_panel_unlocked(dev_priv, pipe);
1115
1116	reg = DPLL(pipe);
1117	val = I915_READ(reg);
1118	val |= DPLL_VCO_ENABLE;
1119
1120	/* We do this three times for luck */
1121	I915_WRITE(reg, val);
1122	POSTING_READ(reg);
1123	udelay(150); /* wait for warmup */
1124	I915_WRITE(reg, val);
1125	POSTING_READ(reg);
1126	udelay(150); /* wait for warmup */
1127	I915_WRITE(reg, val);
1128	POSTING_READ(reg);
1129	udelay(150); /* wait for warmup */
1130}
1131
1132/**
1133 * intel_disable_pll - disable a PLL
1134 * @dev_priv: i915 private structure
1135 * @pipe: pipe PLL to disable
1136 *
1137 * Disable the PLL for @pipe, making sure the pipe is off first.
1138 *
1139 * Note!  This is for pre-ILK only.
1140 */
1141static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1142{
1143	int reg;
1144	u32 val;
1145
1146	/* Don't disable pipe A or pipe A PLLs if needed */
1147	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1148		return;
1149
1150	/* Make sure the pipe isn't still relying on us */
1151	assert_pipe_disabled(dev_priv, pipe);
1152
1153	reg = DPLL(pipe);
1154	val = I915_READ(reg);
1155	val &= ~DPLL_VCO_ENABLE;
1156	I915_WRITE(reg, val);
1157	POSTING_READ(reg);
1158}
1159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1160/**
1161 * intel_enable_pch_pll - enable PCH PLL
1162 * @dev_priv: i915 private structure
1163 * @pipe: pipe PLL to enable
1164 *
1165 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1166 * drives the transcoder clock.
1167 */
1168static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1169				 enum pipe pipe)
1170{
 
 
1171	int reg;
1172	u32 val;
1173
1174	/* PCH only available on ILK+ */
1175	BUG_ON(dev_priv->info->gen < 5);
 
 
 
 
 
 
 
 
 
 
1176
1177	/* PCH refclock must be enabled first */
1178	assert_pch_refclk_enabled(dev_priv);
1179
1180	reg = PCH_DPLL(pipe);
 
 
 
 
 
 
 
1181	val = I915_READ(reg);
1182	val |= DPLL_VCO_ENABLE;
1183	I915_WRITE(reg, val);
1184	POSTING_READ(reg);
1185	udelay(200);
 
 
1186}
1187
1188static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1189				  enum pipe pipe)
1190{
 
 
1191	int reg;
1192	u32 val;
1193
1194	/* PCH only available on ILK+ */
1195	BUG_ON(dev_priv->info->gen < 5);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196
1197	/* Make sure transcoder isn't still depending on us */
1198	assert_transcoder_disabled(dev_priv, pipe);
1199
1200	reg = PCH_DPLL(pipe);
1201	val = I915_READ(reg);
1202	val &= ~DPLL_VCO_ENABLE;
1203	I915_WRITE(reg, val);
1204	POSTING_READ(reg);
1205	udelay(200);
 
 
1206}
1207
1208static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1209				    enum pipe pipe)
1210{
1211	int reg;
1212	u32 val;
 
1213
1214	/* PCH only available on ILK+ */
1215	BUG_ON(dev_priv->info->gen < 5);
1216
1217	/* Make sure PCH DPLL is enabled */
1218	assert_pch_pll_enabled(dev_priv, pipe);
 
 
1219
1220	/* FDI must be feeding us bits for PCH ports */
1221	assert_fdi_tx_enabled(dev_priv, pipe);
1222	assert_fdi_rx_enabled(dev_priv, pipe);
1223
 
 
 
 
1224	reg = TRANSCONF(pipe);
1225	val = I915_READ(reg);
 
1226
1227	if (HAS_PCH_IBX(dev_priv->dev)) {
1228		/*
1229		 * make the BPC in transcoder be consistent with
1230		 * that in pipeconf reg.
1231		 */
1232		val &= ~PIPE_BPC_MASK;
1233		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1234	}
 
 
 
 
 
 
 
 
 
 
 
1235	I915_WRITE(reg, val | TRANS_ENABLE);
1236	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1237		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1238}
1239
1240static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1241				     enum pipe pipe)
1242{
1243	int reg;
1244	u32 val;
1245
1246	/* FDI relies on the transcoder */
1247	assert_fdi_tx_disabled(dev_priv, pipe);
1248	assert_fdi_rx_disabled(dev_priv, pipe);
1249
1250	/* Ports must be off as well */
1251	assert_pch_ports_disabled(dev_priv, pipe);
1252
1253	reg = TRANSCONF(pipe);
1254	val = I915_READ(reg);
1255	val &= ~TRANS_ENABLE;
1256	I915_WRITE(reg, val);
1257	/* wait for PCH transcoder off, transcoder state */
1258	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1259		DRM_ERROR("failed to disable transcoder\n");
1260}
1261
1262/**
1263 * intel_enable_pipe - enable a pipe, asserting requirements
1264 * @dev_priv: i915 private structure
1265 * @pipe: pipe to enable
1266 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1267 *
1268 * Enable @pipe, making sure that various hardware specific requirements
1269 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1270 *
1271 * @pipe should be %PIPE_A or %PIPE_B.
1272 *
1273 * Will wait until the pipe is actually running (i.e. first vblank) before
1274 * returning.
1275 */
1276static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1277			      bool pch_port)
1278{
1279	int reg;
1280	u32 val;
1281
1282	/*
1283	 * A pipe without a PLL won't actually be able to drive bits from
1284	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1285	 * need the check.
1286	 */
1287	if (!HAS_PCH_SPLIT(dev_priv->dev))
1288		assert_pll_enabled(dev_priv, pipe);
1289	else {
1290		if (pch_port) {
1291			/* if driving the PCH, we need FDI enabled */
1292			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1293			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1294		}
1295		/* FIXME: assert CPU port conditions for SNB+ */
1296	}
1297
1298	reg = PIPECONF(pipe);
1299	val = I915_READ(reg);
1300	if (val & PIPECONF_ENABLE)
1301		return;
1302
1303	I915_WRITE(reg, val | PIPECONF_ENABLE);
1304	intel_wait_for_vblank(dev_priv->dev, pipe);
1305}
1306
1307/**
1308 * intel_disable_pipe - disable a pipe, asserting requirements
1309 * @dev_priv: i915 private structure
1310 * @pipe: pipe to disable
1311 *
1312 * Disable @pipe, making sure that various hardware specific requirements
1313 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1314 *
1315 * @pipe should be %PIPE_A or %PIPE_B.
1316 *
1317 * Will wait until the pipe has shut down before returning.
1318 */
1319static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1320			       enum pipe pipe)
1321{
1322	int reg;
1323	u32 val;
1324
1325	/*
1326	 * Make sure planes won't keep trying to pump pixels to us,
1327	 * or we might hang the display.
1328	 */
1329	assert_planes_disabled(dev_priv, pipe);
1330
1331	/* Don't disable pipe A or pipe A PLLs if needed */
1332	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1333		return;
1334
1335	reg = PIPECONF(pipe);
1336	val = I915_READ(reg);
1337	if ((val & PIPECONF_ENABLE) == 0)
1338		return;
1339
1340	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1341	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1342}
1343
1344/*
1345 * Plane regs are double buffered, going from enabled->disabled needs a
1346 * trigger in order to latch.  The display address reg provides this.
1347 */
1348static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1349				      enum plane plane)
1350{
1351	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1352	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1353}
1354
1355/**
1356 * intel_enable_plane - enable a display plane on a given pipe
1357 * @dev_priv: i915 private structure
1358 * @plane: plane to enable
1359 * @pipe: pipe being fed
1360 *
1361 * Enable @plane on @pipe, making sure that @pipe is running first.
1362 */
1363static void intel_enable_plane(struct drm_i915_private *dev_priv,
1364			       enum plane plane, enum pipe pipe)
1365{
1366	int reg;
1367	u32 val;
1368
1369	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1370	assert_pipe_enabled(dev_priv, pipe);
1371
1372	reg = DSPCNTR(plane);
1373	val = I915_READ(reg);
1374	if (val & DISPLAY_PLANE_ENABLE)
1375		return;
1376
1377	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1378	intel_flush_display_plane(dev_priv, plane);
1379	intel_wait_for_vblank(dev_priv->dev, pipe);
1380}
1381
1382/**
1383 * intel_disable_plane - disable a display plane
1384 * @dev_priv: i915 private structure
1385 * @plane: plane to disable
1386 * @pipe: pipe consuming the data
1387 *
1388 * Disable @plane; should be an independent operation.
1389 */
1390static void intel_disable_plane(struct drm_i915_private *dev_priv,
1391				enum plane plane, enum pipe pipe)
1392{
1393	int reg;
1394	u32 val;
1395
1396	reg = DSPCNTR(plane);
1397	val = I915_READ(reg);
1398	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1399		return;
1400
1401	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1402	intel_flush_display_plane(dev_priv, plane);
1403	intel_wait_for_vblank(dev_priv->dev, pipe);
1404}
1405
1406static void disable_pch_dp(struct drm_i915_private *dev_priv,
1407			   enum pipe pipe, int reg, u32 port_sel)
1408{
1409	u32 val = I915_READ(reg);
1410	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1411		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1412		I915_WRITE(reg, val & ~DP_PORT_EN);
1413	}
1414}
1415
1416static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1417			     enum pipe pipe, int reg)
1418{
1419	u32 val = I915_READ(reg);
1420	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1421		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1422			      reg, pipe);
1423		I915_WRITE(reg, val & ~PORT_ENABLE);
1424	}
1425}
1426
1427/* Disable any ports connected to this transcoder */
1428static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1429				    enum pipe pipe)
1430{
1431	u32 reg, val;
1432
1433	val = I915_READ(PCH_PP_CONTROL);
1434	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1435
1436	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1437	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1438	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1439
1440	reg = PCH_ADPA;
1441	val = I915_READ(reg);
1442	if (adpa_pipe_enabled(dev_priv, val, pipe))
1443		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1444
1445	reg = PCH_LVDS;
1446	val = I915_READ(reg);
1447	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1448		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1449		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1450		POSTING_READ(reg);
1451		udelay(100);
1452	}
1453
1454	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1455	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1456	disable_pch_hdmi(dev_priv, pipe, HDMID);
1457}
1458
1459static void i8xx_disable_fbc(struct drm_device *dev)
1460{
1461	struct drm_i915_private *dev_priv = dev->dev_private;
1462	u32 fbc_ctl;
1463
1464	/* Disable compression */
1465	fbc_ctl = I915_READ(FBC_CONTROL);
1466	if ((fbc_ctl & FBC_CTL_EN) == 0)
1467		return;
1468
1469	fbc_ctl &= ~FBC_CTL_EN;
1470	I915_WRITE(FBC_CONTROL, fbc_ctl);
1471
1472	/* Wait for compressing bit to clear */
1473	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1474		DRM_DEBUG_KMS("FBC idle timed out\n");
1475		return;
1476	}
1477
1478	DRM_DEBUG_KMS("disabled FBC\n");
1479}
1480
1481static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1482{
1483	struct drm_device *dev = crtc->dev;
1484	struct drm_i915_private *dev_priv = dev->dev_private;
1485	struct drm_framebuffer *fb = crtc->fb;
1486	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1487	struct drm_i915_gem_object *obj = intel_fb->obj;
1488	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1489	int cfb_pitch;
1490	int plane, i;
1491	u32 fbc_ctl, fbc_ctl2;
1492
1493	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1494	if (fb->pitch < cfb_pitch)
1495		cfb_pitch = fb->pitch;
1496
1497	/* FBC_CTL wants 64B units */
1498	cfb_pitch = (cfb_pitch / 64) - 1;
1499	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1500
1501	/* Clear old tags */
1502	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1503		I915_WRITE(FBC_TAG + (i * 4), 0);
1504
1505	/* Set it up... */
1506	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1507	fbc_ctl2 |= plane;
1508	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1509	I915_WRITE(FBC_FENCE_OFF, crtc->y);
1510
1511	/* enable it... */
1512	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1513	if (IS_I945GM(dev))
1514		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1515	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1516	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1517	fbc_ctl |= obj->fence_reg;
1518	I915_WRITE(FBC_CONTROL, fbc_ctl);
1519
1520	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1521		      cfb_pitch, crtc->y, intel_crtc->plane);
1522}
1523
1524static bool i8xx_fbc_enabled(struct drm_device *dev)
1525{
1526	struct drm_i915_private *dev_priv = dev->dev_private;
1527
1528	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1529}
1530
1531static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1532{
1533	struct drm_device *dev = crtc->dev;
1534	struct drm_i915_private *dev_priv = dev->dev_private;
1535	struct drm_framebuffer *fb = crtc->fb;
1536	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1537	struct drm_i915_gem_object *obj = intel_fb->obj;
1538	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1539	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1540	unsigned long stall_watermark = 200;
1541	u32 dpfc_ctl;
1542
1543	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1544	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1545	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1546
1547	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1548		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1549		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1550	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1551
1552	/* enable it... */
1553	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1554
1555	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1556}
1557
1558static void g4x_disable_fbc(struct drm_device *dev)
1559{
1560	struct drm_i915_private *dev_priv = dev->dev_private;
1561	u32 dpfc_ctl;
1562
1563	/* Disable compression */
1564	dpfc_ctl = I915_READ(DPFC_CONTROL);
1565	if (dpfc_ctl & DPFC_CTL_EN) {
1566		dpfc_ctl &= ~DPFC_CTL_EN;
1567		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1568
1569		DRM_DEBUG_KMS("disabled FBC\n");
1570	}
1571}
1572
1573static bool g4x_fbc_enabled(struct drm_device *dev)
1574{
1575	struct drm_i915_private *dev_priv = dev->dev_private;
1576
1577	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1578}
1579
1580static void sandybridge_blit_fbc_update(struct drm_device *dev)
1581{
1582	struct drm_i915_private *dev_priv = dev->dev_private;
1583	u32 blt_ecoskpd;
1584
1585	/* Make sure blitter notifies FBC of writes */
1586	gen6_gt_force_wake_get(dev_priv);
1587	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1588	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1589		GEN6_BLITTER_LOCK_SHIFT;
1590	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1591	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1592	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1593	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1594			 GEN6_BLITTER_LOCK_SHIFT);
1595	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1596	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1597	gen6_gt_force_wake_put(dev_priv);
1598}
1599
1600static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1601{
1602	struct drm_device *dev = crtc->dev;
1603	struct drm_i915_private *dev_priv = dev->dev_private;
1604	struct drm_framebuffer *fb = crtc->fb;
1605	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1606	struct drm_i915_gem_object *obj = intel_fb->obj;
1607	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1608	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1609	unsigned long stall_watermark = 200;
1610	u32 dpfc_ctl;
1611
1612	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1613	dpfc_ctl &= DPFC_RESERVED;
1614	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1615	/* Set persistent mode for front-buffer rendering, ala X. */
1616	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1617	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1618	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1619
1620	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1621		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1622		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1623	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1624	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1625	/* enable it... */
1626	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1627
1628	if (IS_GEN6(dev)) {
1629		I915_WRITE(SNB_DPFC_CTL_SA,
1630			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1631		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1632		sandybridge_blit_fbc_update(dev);
1633	}
1634
1635	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1636}
1637
1638static void ironlake_disable_fbc(struct drm_device *dev)
1639{
1640	struct drm_i915_private *dev_priv = dev->dev_private;
1641	u32 dpfc_ctl;
1642
1643	/* Disable compression */
1644	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1645	if (dpfc_ctl & DPFC_CTL_EN) {
1646		dpfc_ctl &= ~DPFC_CTL_EN;
1647		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1648
1649		DRM_DEBUG_KMS("disabled FBC\n");
1650	}
1651}
1652
1653static bool ironlake_fbc_enabled(struct drm_device *dev)
1654{
1655	struct drm_i915_private *dev_priv = dev->dev_private;
1656
1657	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1658}
1659
1660bool intel_fbc_enabled(struct drm_device *dev)
1661{
1662	struct drm_i915_private *dev_priv = dev->dev_private;
1663
1664	if (!dev_priv->display.fbc_enabled)
1665		return false;
1666
1667	return dev_priv->display.fbc_enabled(dev);
1668}
1669
1670static void intel_fbc_work_fn(struct work_struct *__work)
1671{
1672	struct intel_fbc_work *work =
1673		container_of(to_delayed_work(__work),
1674			     struct intel_fbc_work, work);
1675	struct drm_device *dev = work->crtc->dev;
1676	struct drm_i915_private *dev_priv = dev->dev_private;
1677
1678	mutex_lock(&dev->struct_mutex);
1679	if (work == dev_priv->fbc_work) {
1680		/* Double check that we haven't switched fb without cancelling
1681		 * the prior work.
1682		 */
1683		if (work->crtc->fb == work->fb) {
1684			dev_priv->display.enable_fbc(work->crtc,
1685						     work->interval);
1686
1687			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1688			dev_priv->cfb_fb = work->crtc->fb->base.id;
1689			dev_priv->cfb_y = work->crtc->y;
1690		}
1691
1692		dev_priv->fbc_work = NULL;
1693	}
1694	mutex_unlock(&dev->struct_mutex);
1695
1696	kfree(work);
1697}
1698
1699static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1700{
1701	if (dev_priv->fbc_work == NULL)
1702		return;
1703
1704	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1705
1706	/* Synchronisation is provided by struct_mutex and checking of
1707	 * dev_priv->fbc_work, so we can perform the cancellation
1708	 * entirely asynchronously.
1709	 */
1710	if (cancel_delayed_work(&dev_priv->fbc_work->work))
1711		/* tasklet was killed before being run, clean up */
1712		kfree(dev_priv->fbc_work);
1713
1714	/* Mark the work as no longer wanted so that if it does
1715	 * wake-up (because the work was already running and waiting
1716	 * for our mutex), it will discover that is no longer
1717	 * necessary to run.
1718	 */
1719	dev_priv->fbc_work = NULL;
1720}
1721
1722static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1723{
1724	struct intel_fbc_work *work;
1725	struct drm_device *dev = crtc->dev;
1726	struct drm_i915_private *dev_priv = dev->dev_private;
1727
1728	if (!dev_priv->display.enable_fbc)
1729		return;
1730
1731	intel_cancel_fbc_work(dev_priv);
1732
1733	work = kzalloc(sizeof *work, GFP_KERNEL);
1734	if (work == NULL) {
1735		dev_priv->display.enable_fbc(crtc, interval);
1736		return;
1737	}
1738
1739	work->crtc = crtc;
1740	work->fb = crtc->fb;
1741	work->interval = interval;
1742	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1743
1744	dev_priv->fbc_work = work;
1745
1746	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1747
1748	/* Delay the actual enabling to let pageflipping cease and the
1749	 * display to settle before starting the compression. Note that
1750	 * this delay also serves a second purpose: it allows for a
1751	 * vblank to pass after disabling the FBC before we attempt
1752	 * to modify the control registers.
1753	 *
1754	 * A more complicated solution would involve tracking vblanks
1755	 * following the termination of the page-flipping sequence
1756	 * and indeed performing the enable as a co-routine and not
1757	 * waiting synchronously upon the vblank.
1758	 */
1759	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1760}
1761
1762void intel_disable_fbc(struct drm_device *dev)
1763{
1764	struct drm_i915_private *dev_priv = dev->dev_private;
1765
1766	intel_cancel_fbc_work(dev_priv);
1767
1768	if (!dev_priv->display.disable_fbc)
1769		return;
1770
1771	dev_priv->display.disable_fbc(dev);
1772	dev_priv->cfb_plane = -1;
1773}
1774
1775/**
1776 * intel_update_fbc - enable/disable FBC as needed
1777 * @dev: the drm_device
1778 *
1779 * Set up the framebuffer compression hardware at mode set time.  We
1780 * enable it if possible:
1781 *   - plane A only (on pre-965)
1782 *   - no pixel mulitply/line duplication
1783 *   - no alpha buffer discard
1784 *   - no dual wide
1785 *   - framebuffer <= 2048 in width, 1536 in height
1786 *
1787 * We can't assume that any compression will take place (worst case),
1788 * so the compressed buffer has to be the same size as the uncompressed
1789 * one.  It also must reside (along with the line length buffer) in
1790 * stolen memory.
1791 *
1792 * We need to enable/disable FBC on a global basis.
1793 */
1794static void intel_update_fbc(struct drm_device *dev)
1795{
1796	struct drm_i915_private *dev_priv = dev->dev_private;
1797	struct drm_crtc *crtc = NULL, *tmp_crtc;
1798	struct intel_crtc *intel_crtc;
1799	struct drm_framebuffer *fb;
1800	struct intel_framebuffer *intel_fb;
1801	struct drm_i915_gem_object *obj;
1802	int enable_fbc;
1803
1804	DRM_DEBUG_KMS("\n");
1805
1806	if (!i915_powersave)
1807		return;
1808
1809	if (!I915_HAS_FBC(dev))
1810		return;
1811
1812	/*
1813	 * If FBC is already on, we just have to verify that we can
1814	 * keep it that way...
1815	 * Need to disable if:
1816	 *   - more than one pipe is active
1817	 *   - changing FBC params (stride, fence, mode)
1818	 *   - new fb is too large to fit in compressed buffer
1819	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1820	 */
1821	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1822		if (tmp_crtc->enabled && tmp_crtc->fb) {
1823			if (crtc) {
1824				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1825				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1826				goto out_disable;
1827			}
1828			crtc = tmp_crtc;
1829		}
1830	}
1831
1832	if (!crtc || crtc->fb == NULL) {
1833		DRM_DEBUG_KMS("no output, disabling\n");
1834		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1835		goto out_disable;
1836	}
1837
1838	intel_crtc = to_intel_crtc(crtc);
1839	fb = crtc->fb;
1840	intel_fb = to_intel_framebuffer(fb);
1841	obj = intel_fb->obj;
1842
1843	enable_fbc = i915_enable_fbc;
1844	if (enable_fbc < 0) {
1845		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1846		enable_fbc = 1;
1847		if (INTEL_INFO(dev)->gen <= 5)
1848			enable_fbc = 0;
1849	}
1850	if (!enable_fbc) {
1851		DRM_DEBUG_KMS("fbc disabled per module param\n");
1852		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1853		goto out_disable;
1854	}
1855	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1856		DRM_DEBUG_KMS("framebuffer too large, disabling "
1857			      "compression\n");
1858		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1859		goto out_disable;
1860	}
1861	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1862	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1863		DRM_DEBUG_KMS("mode incompatible with compression, "
1864			      "disabling\n");
1865		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1866		goto out_disable;
1867	}
1868	if ((crtc->mode.hdisplay > 2048) ||
1869	    (crtc->mode.vdisplay > 1536)) {
1870		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1871		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1872		goto out_disable;
1873	}
1874	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1875		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1876		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1877		goto out_disable;
1878	}
1879
1880	/* The use of a CPU fence is mandatory in order to detect writes
1881	 * by the CPU to the scanout and trigger updates to the FBC.
1882	 */
1883	if (obj->tiling_mode != I915_TILING_X ||
1884	    obj->fence_reg == I915_FENCE_REG_NONE) {
1885		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1886		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1887		goto out_disable;
1888	}
1889
1890	/* If the kernel debugger is active, always disable compression */
1891	if (in_dbg_master())
1892		goto out_disable;
1893
1894	/* If the scanout has not changed, don't modify the FBC settings.
1895	 * Note that we make the fundamental assumption that the fb->obj
1896	 * cannot be unpinned (and have its GTT offset and fence revoked)
1897	 * without first being decoupled from the scanout and FBC disabled.
1898	 */
1899	if (dev_priv->cfb_plane == intel_crtc->plane &&
1900	    dev_priv->cfb_fb == fb->base.id &&
1901	    dev_priv->cfb_y == crtc->y)
1902		return;
1903
1904	if (intel_fbc_enabled(dev)) {
1905		/* We update FBC along two paths, after changing fb/crtc
1906		 * configuration (modeswitching) and after page-flipping
1907		 * finishes. For the latter, we know that not only did
1908		 * we disable the FBC at the start of the page-flip
1909		 * sequence, but also more than one vblank has passed.
1910		 *
1911		 * For the former case of modeswitching, it is possible
1912		 * to switch between two FBC valid configurations
1913		 * instantaneously so we do need to disable the FBC
1914		 * before we can modify its control registers. We also
1915		 * have to wait for the next vblank for that to take
1916		 * effect. However, since we delay enabling FBC we can
1917		 * assume that a vblank has passed since disabling and
1918		 * that we can safely alter the registers in the deferred
1919		 * callback.
1920		 *
1921		 * In the scenario that we go from a valid to invalid
1922		 * and then back to valid FBC configuration we have
1923		 * no strict enforcement that a vblank occurred since
1924		 * disabling the FBC. However, along all current pipe
1925		 * disabling paths we do need to wait for a vblank at
1926		 * some point. And we wait before enabling FBC anyway.
1927		 */
1928		DRM_DEBUG_KMS("disabling active FBC for update\n");
1929		intel_disable_fbc(dev);
1930	}
1931
1932	intel_enable_fbc(crtc, 500);
1933	return;
1934
1935out_disable:
1936	/* Multiple disables should be harmless */
1937	if (intel_fbc_enabled(dev)) {
1938		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1939		intel_disable_fbc(dev);
1940	}
1941}
1942
1943int
1944intel_pin_and_fence_fb_obj(struct drm_device *dev,
1945			   struct drm_i915_gem_object *obj,
1946			   struct intel_ring_buffer *pipelined)
1947{
1948	struct drm_i915_private *dev_priv = dev->dev_private;
1949	u32 alignment;
1950	int ret;
1951
1952	switch (obj->tiling_mode) {
1953	case I915_TILING_NONE:
1954		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1955			alignment = 128 * 1024;
1956		else if (INTEL_INFO(dev)->gen >= 4)
1957			alignment = 4 * 1024;
1958		else
1959			alignment = 64 * 1024;
1960		break;
1961	case I915_TILING_X:
1962		/* pin() will align the object as required by fence */
1963		alignment = 0;
1964		break;
1965	case I915_TILING_Y:
1966		/* FIXME: Is this true? */
1967		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1968		return -EINVAL;
1969	default:
1970		BUG();
1971	}
1972
1973	dev_priv->mm.interruptible = false;
1974	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1975	if (ret)
1976		goto err_interruptible;
1977
1978	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1979	 * fence, whereas 965+ only requires a fence if using
1980	 * framebuffer compression.  For simplicity, we always install
1981	 * a fence as the cost is not that onerous.
1982	 */
1983	if (obj->tiling_mode != I915_TILING_NONE) {
1984		ret = i915_gem_object_get_fence(obj, pipelined);
1985		if (ret)
1986			goto err_unpin;
1987	}
1988
1989	dev_priv->mm.interruptible = true;
1990	return 0;
1991
1992err_unpin:
1993	i915_gem_object_unpin(obj);
1994err_interruptible:
1995	dev_priv->mm.interruptible = true;
1996	return ret;
1997}
1998
 
 
 
 
 
 
1999static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2000			     int x, int y)
2001{
2002	struct drm_device *dev = crtc->dev;
2003	struct drm_i915_private *dev_priv = dev->dev_private;
2004	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2005	struct intel_framebuffer *intel_fb;
2006	struct drm_i915_gem_object *obj;
2007	int plane = intel_crtc->plane;
2008	unsigned long Start, Offset;
2009	u32 dspcntr;
2010	u32 reg;
2011
2012	switch (plane) {
2013	case 0:
2014	case 1:
2015		break;
2016	default:
2017		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2018		return -EINVAL;
2019	}
2020
2021	intel_fb = to_intel_framebuffer(fb);
2022	obj = intel_fb->obj;
2023
2024	reg = DSPCNTR(plane);
2025	dspcntr = I915_READ(reg);
2026	/* Mask out pixel format bits in case we change it */
2027	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2028	switch (fb->bits_per_pixel) {
2029	case 8:
2030		dspcntr |= DISPPLANE_8BPP;
2031		break;
2032	case 16:
2033		if (fb->depth == 15)
2034			dspcntr |= DISPPLANE_15_16BPP;
2035		else
2036			dspcntr |= DISPPLANE_16BPP;
2037		break;
2038	case 24:
2039	case 32:
2040		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2041		break;
2042	default:
2043		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2044		return -EINVAL;
2045	}
2046	if (INTEL_INFO(dev)->gen >= 4) {
2047		if (obj->tiling_mode != I915_TILING_NONE)
2048			dspcntr |= DISPPLANE_TILED;
2049		else
2050			dspcntr &= ~DISPPLANE_TILED;
2051	}
2052
2053	I915_WRITE(reg, dspcntr);
2054
2055	Start = obj->gtt_offset;
2056	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2057
2058	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2059		      Start, Offset, x, y, fb->pitch);
2060	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2061	if (INTEL_INFO(dev)->gen >= 4) {
2062		I915_WRITE(DSPSURF(plane), Start);
2063		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2064		I915_WRITE(DSPADDR(plane), Offset);
2065	} else
2066		I915_WRITE(DSPADDR(plane), Start + Offset);
2067	POSTING_READ(reg);
2068
2069	return 0;
2070}
2071
2072static int ironlake_update_plane(struct drm_crtc *crtc,
2073				 struct drm_framebuffer *fb, int x, int y)
2074{
2075	struct drm_device *dev = crtc->dev;
2076	struct drm_i915_private *dev_priv = dev->dev_private;
2077	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2078	struct intel_framebuffer *intel_fb;
2079	struct drm_i915_gem_object *obj;
2080	int plane = intel_crtc->plane;
2081	unsigned long Start, Offset;
2082	u32 dspcntr;
2083	u32 reg;
2084
2085	switch (plane) {
2086	case 0:
2087	case 1:
 
2088		break;
2089	default:
2090		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2091		return -EINVAL;
2092	}
2093
2094	intel_fb = to_intel_framebuffer(fb);
2095	obj = intel_fb->obj;
2096
2097	reg = DSPCNTR(plane);
2098	dspcntr = I915_READ(reg);
2099	/* Mask out pixel format bits in case we change it */
2100	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2101	switch (fb->bits_per_pixel) {
2102	case 8:
2103		dspcntr |= DISPPLANE_8BPP;
2104		break;
2105	case 16:
2106		if (fb->depth != 16)
2107			return -EINVAL;
2108
2109		dspcntr |= DISPPLANE_16BPP;
2110		break;
2111	case 24:
2112	case 32:
2113		if (fb->depth == 24)
2114			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2115		else if (fb->depth == 30)
2116			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2117		else
2118			return -EINVAL;
2119		break;
2120	default:
2121		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2122		return -EINVAL;
2123	}
2124
2125	if (obj->tiling_mode != I915_TILING_NONE)
2126		dspcntr |= DISPPLANE_TILED;
2127	else
2128		dspcntr &= ~DISPPLANE_TILED;
2129
2130	/* must disable */
2131	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2132
2133	I915_WRITE(reg, dspcntr);
2134
2135	Start = obj->gtt_offset;
2136	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2137
2138	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2139		      Start, Offset, x, y, fb->pitch);
2140	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2141	I915_WRITE(DSPSURF(plane), Start);
2142	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2143	I915_WRITE(DSPADDR(plane), Offset);
2144	POSTING_READ(reg);
2145
2146	return 0;
2147}
2148
2149/* Assume fb object is pinned & idle & fenced and just update base pointers */
2150static int
2151intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2152			   int x, int y, enum mode_set_atomic state)
2153{
2154	struct drm_device *dev = crtc->dev;
2155	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2156	int ret;
2157
2158	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2159	if (ret)
2160		return ret;
2161
2162	intel_update_fbc(dev);
2163	intel_increase_pllclock(crtc);
 
 
 
 
 
 
 
 
 
2164
2165	return 0;
2166}
2167
2168static int
2169intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2170		    struct drm_framebuffer *old_fb)
2171{
2172	struct drm_device *dev = crtc->dev;
 
2173	struct drm_i915_master_private *master_priv;
2174	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2175	int ret;
2176
2177	/* no fb bound */
2178	if (!crtc->fb) {
2179		DRM_ERROR("No FB bound\n");
2180		return 0;
2181	}
2182
2183	switch (intel_crtc->plane) {
2184	case 0:
2185	case 1:
2186		break;
2187	default:
2188		DRM_ERROR("no plane for crtc\n");
2189		return -EINVAL;
2190	}
2191
2192	mutex_lock(&dev->struct_mutex);
2193	ret = intel_pin_and_fence_fb_obj(dev,
2194					 to_intel_framebuffer(crtc->fb)->obj,
2195					 NULL);
2196	if (ret != 0) {
2197		mutex_unlock(&dev->struct_mutex);
2198		DRM_ERROR("pin & fence failed\n");
2199		return ret;
2200	}
2201
2202	if (old_fb) {
2203		struct drm_i915_private *dev_priv = dev->dev_private;
2204		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2205
2206		wait_event(dev_priv->pending_flip_queue,
2207			   atomic_read(&dev_priv->mm.wedged) ||
2208			   atomic_read(&obj->pending_flip) == 0);
2209
2210		/* Big Hammer, we also need to ensure that any pending
2211		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2212		 * current scanout is retired before unpinning the old
2213		 * framebuffer.
2214		 *
2215		 * This should only fail upon a hung GPU, in which case we
2216		 * can safely continue.
2217		 */
2218		ret = i915_gem_object_finish_gpu(obj);
2219		(void) ret;
2220	}
2221
2222	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2223					 LEAVE_ATOMIC_MODE_SET);
2224	if (ret) {
2225		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2226		mutex_unlock(&dev->struct_mutex);
2227		DRM_ERROR("failed to update base address\n");
2228		return ret;
2229	}
2230
2231	if (old_fb) {
2232		intel_wait_for_vblank(dev, intel_crtc->pipe);
2233		i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
2234	}
2235
 
2236	mutex_unlock(&dev->struct_mutex);
2237
2238	if (!dev->primary->master)
2239		return 0;
2240
2241	master_priv = dev->primary->master->driver_priv;
2242	if (!master_priv->sarea_priv)
2243		return 0;
2244
2245	if (intel_crtc->pipe) {
2246		master_priv->sarea_priv->pipeB_x = x;
2247		master_priv->sarea_priv->pipeB_y = y;
2248	} else {
2249		master_priv->sarea_priv->pipeA_x = x;
2250		master_priv->sarea_priv->pipeA_y = y;
2251	}
2252
2253	return 0;
2254}
2255
2256static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2257{
2258	struct drm_device *dev = crtc->dev;
2259	struct drm_i915_private *dev_priv = dev->dev_private;
2260	u32 dpa_ctl;
2261
2262	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2263	dpa_ctl = I915_READ(DP_A);
2264	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2265
2266	if (clock < 200000) {
2267		u32 temp;
2268		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2269		/* workaround for 160Mhz:
2270		   1) program 0x4600c bits 15:0 = 0x8124
2271		   2) program 0x46010 bit 0 = 1
2272		   3) program 0x46034 bit 24 = 1
2273		   4) program 0x64000 bit 14 = 1
2274		   */
2275		temp = I915_READ(0x4600c);
2276		temp &= 0xffff0000;
2277		I915_WRITE(0x4600c, temp | 0x8124);
2278
2279		temp = I915_READ(0x46010);
2280		I915_WRITE(0x46010, temp | 1);
2281
2282		temp = I915_READ(0x46034);
2283		I915_WRITE(0x46034, temp | (1 << 24));
2284	} else {
2285		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2286	}
2287	I915_WRITE(DP_A, dpa_ctl);
2288
2289	POSTING_READ(DP_A);
2290	udelay(500);
2291}
2292
2293static void intel_fdi_normal_train(struct drm_crtc *crtc)
2294{
2295	struct drm_device *dev = crtc->dev;
2296	struct drm_i915_private *dev_priv = dev->dev_private;
2297	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2298	int pipe = intel_crtc->pipe;
2299	u32 reg, temp;
2300
2301	/* enable normal train */
2302	reg = FDI_TX_CTL(pipe);
2303	temp = I915_READ(reg);
2304	if (IS_IVYBRIDGE(dev)) {
2305		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2306		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2307	} else {
2308		temp &= ~FDI_LINK_TRAIN_NONE;
2309		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2310	}
2311	I915_WRITE(reg, temp);
2312
2313	reg = FDI_RX_CTL(pipe);
2314	temp = I915_READ(reg);
2315	if (HAS_PCH_CPT(dev)) {
2316		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2317		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2318	} else {
2319		temp &= ~FDI_LINK_TRAIN_NONE;
2320		temp |= FDI_LINK_TRAIN_NONE;
2321	}
2322	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2323
2324	/* wait one idle pattern time */
2325	POSTING_READ(reg);
2326	udelay(1000);
2327
2328	/* IVB wants error correction enabled */
2329	if (IS_IVYBRIDGE(dev))
2330		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2331			   FDI_FE_ERRC_ENABLE);
2332}
2333
2334static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2335{
2336	struct drm_i915_private *dev_priv = dev->dev_private;
2337	u32 flags = I915_READ(SOUTH_CHICKEN1);
2338
2339	flags |= FDI_PHASE_SYNC_OVR(pipe);
2340	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2341	flags |= FDI_PHASE_SYNC_EN(pipe);
2342	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2343	POSTING_READ(SOUTH_CHICKEN1);
2344}
2345
2346/* The FDI link training functions for ILK/Ibexpeak. */
2347static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2348{
2349	struct drm_device *dev = crtc->dev;
2350	struct drm_i915_private *dev_priv = dev->dev_private;
2351	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2352	int pipe = intel_crtc->pipe;
2353	int plane = intel_crtc->plane;
2354	u32 reg, temp, tries;
2355
2356	/* FDI needs bits from pipe & plane first */
2357	assert_pipe_enabled(dev_priv, pipe);
2358	assert_plane_enabled(dev_priv, plane);
2359
2360	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2361	   for train result */
2362	reg = FDI_RX_IMR(pipe);
2363	temp = I915_READ(reg);
2364	temp &= ~FDI_RX_SYMBOL_LOCK;
2365	temp &= ~FDI_RX_BIT_LOCK;
2366	I915_WRITE(reg, temp);
2367	I915_READ(reg);
2368	udelay(150);
2369
2370	/* enable CPU FDI TX and PCH FDI RX */
2371	reg = FDI_TX_CTL(pipe);
2372	temp = I915_READ(reg);
2373	temp &= ~(7 << 19);
2374	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2375	temp &= ~FDI_LINK_TRAIN_NONE;
2376	temp |= FDI_LINK_TRAIN_PATTERN_1;
2377	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2378
2379	reg = FDI_RX_CTL(pipe);
2380	temp = I915_READ(reg);
2381	temp &= ~FDI_LINK_TRAIN_NONE;
2382	temp |= FDI_LINK_TRAIN_PATTERN_1;
2383	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2384
2385	POSTING_READ(reg);
2386	udelay(150);
2387
2388	/* Ironlake workaround, enable clock pointer after FDI enable*/
2389	if (HAS_PCH_IBX(dev)) {
2390		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2391		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2392			   FDI_RX_PHASE_SYNC_POINTER_EN);
2393	}
2394
2395	reg = FDI_RX_IIR(pipe);
2396	for (tries = 0; tries < 5; tries++) {
2397		temp = I915_READ(reg);
2398		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2399
2400		if ((temp & FDI_RX_BIT_LOCK)) {
2401			DRM_DEBUG_KMS("FDI train 1 done.\n");
2402			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2403			break;
2404		}
2405	}
2406	if (tries == 5)
2407		DRM_ERROR("FDI train 1 fail!\n");
2408
2409	/* Train 2 */
2410	reg = FDI_TX_CTL(pipe);
2411	temp = I915_READ(reg);
2412	temp &= ~FDI_LINK_TRAIN_NONE;
2413	temp |= FDI_LINK_TRAIN_PATTERN_2;
2414	I915_WRITE(reg, temp);
2415
2416	reg = FDI_RX_CTL(pipe);
2417	temp = I915_READ(reg);
2418	temp &= ~FDI_LINK_TRAIN_NONE;
2419	temp |= FDI_LINK_TRAIN_PATTERN_2;
2420	I915_WRITE(reg, temp);
2421
2422	POSTING_READ(reg);
2423	udelay(150);
2424
2425	reg = FDI_RX_IIR(pipe);
2426	for (tries = 0; tries < 5; tries++) {
2427		temp = I915_READ(reg);
2428		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2429
2430		if (temp & FDI_RX_SYMBOL_LOCK) {
2431			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2432			DRM_DEBUG_KMS("FDI train 2 done.\n");
2433			break;
2434		}
2435	}
2436	if (tries == 5)
2437		DRM_ERROR("FDI train 2 fail!\n");
2438
2439	DRM_DEBUG_KMS("FDI train done\n");
2440
2441}
2442
2443static const int snb_b_fdi_train_param [] = {
2444	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2445	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2446	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2447	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2448};
2449
2450/* The FDI link training functions for SNB/Cougarpoint. */
2451static void gen6_fdi_link_train(struct drm_crtc *crtc)
2452{
2453	struct drm_device *dev = crtc->dev;
2454	struct drm_i915_private *dev_priv = dev->dev_private;
2455	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2456	int pipe = intel_crtc->pipe;
2457	u32 reg, temp, i;
2458
2459	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2460	   for train result */
2461	reg = FDI_RX_IMR(pipe);
2462	temp = I915_READ(reg);
2463	temp &= ~FDI_RX_SYMBOL_LOCK;
2464	temp &= ~FDI_RX_BIT_LOCK;
2465	I915_WRITE(reg, temp);
2466
2467	POSTING_READ(reg);
2468	udelay(150);
2469
2470	/* enable CPU FDI TX and PCH FDI RX */
2471	reg = FDI_TX_CTL(pipe);
2472	temp = I915_READ(reg);
2473	temp &= ~(7 << 19);
2474	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2475	temp &= ~FDI_LINK_TRAIN_NONE;
2476	temp |= FDI_LINK_TRAIN_PATTERN_1;
2477	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2478	/* SNB-B */
2479	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2480	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2481
2482	reg = FDI_RX_CTL(pipe);
2483	temp = I915_READ(reg);
2484	if (HAS_PCH_CPT(dev)) {
2485		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2486		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2487	} else {
2488		temp &= ~FDI_LINK_TRAIN_NONE;
2489		temp |= FDI_LINK_TRAIN_PATTERN_1;
2490	}
2491	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2492
2493	POSTING_READ(reg);
2494	udelay(150);
2495
2496	if (HAS_PCH_CPT(dev))
2497		cpt_phase_pointer_enable(dev, pipe);
2498
2499	for (i = 0; i < 4; i++ ) {
2500		reg = FDI_TX_CTL(pipe);
2501		temp = I915_READ(reg);
2502		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2503		temp |= snb_b_fdi_train_param[i];
2504		I915_WRITE(reg, temp);
2505
2506		POSTING_READ(reg);
2507		udelay(500);
2508
2509		reg = FDI_RX_IIR(pipe);
2510		temp = I915_READ(reg);
2511		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2512
2513		if (temp & FDI_RX_BIT_LOCK) {
2514			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2515			DRM_DEBUG_KMS("FDI train 1 done.\n");
2516			break;
 
 
2517		}
 
 
2518	}
2519	if (i == 4)
2520		DRM_ERROR("FDI train 1 fail!\n");
2521
2522	/* Train 2 */
2523	reg = FDI_TX_CTL(pipe);
2524	temp = I915_READ(reg);
2525	temp &= ~FDI_LINK_TRAIN_NONE;
2526	temp |= FDI_LINK_TRAIN_PATTERN_2;
2527	if (IS_GEN6(dev)) {
2528		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2529		/* SNB-B */
2530		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2531	}
2532	I915_WRITE(reg, temp);
2533
2534	reg = FDI_RX_CTL(pipe);
2535	temp = I915_READ(reg);
2536	if (HAS_PCH_CPT(dev)) {
2537		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2538		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2539	} else {
2540		temp &= ~FDI_LINK_TRAIN_NONE;
2541		temp |= FDI_LINK_TRAIN_PATTERN_2;
2542	}
2543	I915_WRITE(reg, temp);
2544
2545	POSTING_READ(reg);
2546	udelay(150);
2547
2548	for (i = 0; i < 4; i++ ) {
2549		reg = FDI_TX_CTL(pipe);
2550		temp = I915_READ(reg);
2551		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2552		temp |= snb_b_fdi_train_param[i];
2553		I915_WRITE(reg, temp);
2554
2555		POSTING_READ(reg);
2556		udelay(500);
2557
2558		reg = FDI_RX_IIR(pipe);
2559		temp = I915_READ(reg);
2560		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2561
2562		if (temp & FDI_RX_SYMBOL_LOCK) {
2563			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2564			DRM_DEBUG_KMS("FDI train 2 done.\n");
2565			break;
 
 
2566		}
 
 
2567	}
2568	if (i == 4)
2569		DRM_ERROR("FDI train 2 fail!\n");
2570
2571	DRM_DEBUG_KMS("FDI train done.\n");
2572}
2573
2574/* Manual link training for Ivy Bridge A0 parts */
2575static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2576{
2577	struct drm_device *dev = crtc->dev;
2578	struct drm_i915_private *dev_priv = dev->dev_private;
2579	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2580	int pipe = intel_crtc->pipe;
2581	u32 reg, temp, i;
2582
2583	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2584	   for train result */
2585	reg = FDI_RX_IMR(pipe);
2586	temp = I915_READ(reg);
2587	temp &= ~FDI_RX_SYMBOL_LOCK;
2588	temp &= ~FDI_RX_BIT_LOCK;
2589	I915_WRITE(reg, temp);
2590
2591	POSTING_READ(reg);
2592	udelay(150);
2593
2594	/* enable CPU FDI TX and PCH FDI RX */
2595	reg = FDI_TX_CTL(pipe);
2596	temp = I915_READ(reg);
2597	temp &= ~(7 << 19);
2598	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2599	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2600	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2601	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2602	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 
2603	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2604
2605	reg = FDI_RX_CTL(pipe);
2606	temp = I915_READ(reg);
2607	temp &= ~FDI_LINK_TRAIN_AUTO;
2608	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2609	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 
2610	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2611
2612	POSTING_READ(reg);
2613	udelay(150);
2614
2615	if (HAS_PCH_CPT(dev))
2616		cpt_phase_pointer_enable(dev, pipe);
2617
2618	for (i = 0; i < 4; i++ ) {
2619		reg = FDI_TX_CTL(pipe);
2620		temp = I915_READ(reg);
2621		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2622		temp |= snb_b_fdi_train_param[i];
2623		I915_WRITE(reg, temp);
2624
2625		POSTING_READ(reg);
2626		udelay(500);
2627
2628		reg = FDI_RX_IIR(pipe);
2629		temp = I915_READ(reg);
2630		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2631
2632		if (temp & FDI_RX_BIT_LOCK ||
2633		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2634			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2635			DRM_DEBUG_KMS("FDI train 1 done.\n");
2636			break;
2637		}
2638	}
2639	if (i == 4)
2640		DRM_ERROR("FDI train 1 fail!\n");
2641
2642	/* Train 2 */
2643	reg = FDI_TX_CTL(pipe);
2644	temp = I915_READ(reg);
2645	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2646	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2647	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2648	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2649	I915_WRITE(reg, temp);
2650
2651	reg = FDI_RX_CTL(pipe);
2652	temp = I915_READ(reg);
2653	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2654	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2655	I915_WRITE(reg, temp);
2656
2657	POSTING_READ(reg);
2658	udelay(150);
2659
2660	for (i = 0; i < 4; i++ ) {
2661		reg = FDI_TX_CTL(pipe);
2662		temp = I915_READ(reg);
2663		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2664		temp |= snb_b_fdi_train_param[i];
2665		I915_WRITE(reg, temp);
2666
2667		POSTING_READ(reg);
2668		udelay(500);
2669
2670		reg = FDI_RX_IIR(pipe);
2671		temp = I915_READ(reg);
2672		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2673
2674		if (temp & FDI_RX_SYMBOL_LOCK) {
2675			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2676			DRM_DEBUG_KMS("FDI train 2 done.\n");
2677			break;
2678		}
2679	}
2680	if (i == 4)
2681		DRM_ERROR("FDI train 2 fail!\n");
2682
2683	DRM_DEBUG_KMS("FDI train done.\n");
2684}
2685
2686static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2687{
2688	struct drm_device *dev = crtc->dev;
2689	struct drm_i915_private *dev_priv = dev->dev_private;
2690	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2691	int pipe = intel_crtc->pipe;
2692	u32 reg, temp;
2693
2694	/* Write the TU size bits so error detection works */
2695	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2696		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2697
2698	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2699	reg = FDI_RX_CTL(pipe);
2700	temp = I915_READ(reg);
2701	temp &= ~((0x7 << 19) | (0x7 << 16));
2702	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2703	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2704	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2705
2706	POSTING_READ(reg);
2707	udelay(200);
2708
2709	/* Switch from Rawclk to PCDclk */
2710	temp = I915_READ(reg);
2711	I915_WRITE(reg, temp | FDI_PCDCLK);
2712
2713	POSTING_READ(reg);
2714	udelay(200);
2715
2716	/* Enable CPU FDI TX PLL, always on for Ironlake */
2717	reg = FDI_TX_CTL(pipe);
2718	temp = I915_READ(reg);
2719	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2720		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
 
 
 
2721
2722		POSTING_READ(reg);
2723		udelay(100);
 
2724	}
2725}
2726
2727static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2728{
2729	struct drm_i915_private *dev_priv = dev->dev_private;
2730	u32 flags = I915_READ(SOUTH_CHICKEN1);
2731
2732	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2733	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2734	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2735	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2736	POSTING_READ(SOUTH_CHICKEN1);
2737}
2738static void ironlake_fdi_disable(struct drm_crtc *crtc)
2739{
2740	struct drm_device *dev = crtc->dev;
2741	struct drm_i915_private *dev_priv = dev->dev_private;
2742	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2743	int pipe = intel_crtc->pipe;
2744	u32 reg, temp;
2745
2746	/* disable CPU FDI tx and PCH FDI rx */
2747	reg = FDI_TX_CTL(pipe);
2748	temp = I915_READ(reg);
2749	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2750	POSTING_READ(reg);
2751
2752	reg = FDI_RX_CTL(pipe);
2753	temp = I915_READ(reg);
2754	temp &= ~(0x7 << 16);
2755	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2756	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2757
2758	POSTING_READ(reg);
2759	udelay(100);
2760
2761	/* Ironlake workaround, disable clock pointer after downing FDI */
2762	if (HAS_PCH_IBX(dev)) {
2763		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2764		I915_WRITE(FDI_RX_CHICKEN(pipe),
2765			   I915_READ(FDI_RX_CHICKEN(pipe) &
2766				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2767	} else if (HAS_PCH_CPT(dev)) {
2768		cpt_phase_pointer_disable(dev, pipe);
2769	}
2770
2771	/* still set train pattern 1 */
2772	reg = FDI_TX_CTL(pipe);
2773	temp = I915_READ(reg);
2774	temp &= ~FDI_LINK_TRAIN_NONE;
2775	temp |= FDI_LINK_TRAIN_PATTERN_1;
2776	I915_WRITE(reg, temp);
2777
2778	reg = FDI_RX_CTL(pipe);
2779	temp = I915_READ(reg);
2780	if (HAS_PCH_CPT(dev)) {
2781		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2782		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2783	} else {
2784		temp &= ~FDI_LINK_TRAIN_NONE;
2785		temp |= FDI_LINK_TRAIN_PATTERN_1;
2786	}
2787	/* BPC in FDI rx is consistent with that in PIPECONF */
2788	temp &= ~(0x07 << 16);
2789	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2790	I915_WRITE(reg, temp);
2791
2792	POSTING_READ(reg);
2793	udelay(100);
2794}
2795
2796/*
2797 * When we disable a pipe, we need to clear any pending scanline wait events
2798 * to avoid hanging the ring, which we assume we are waiting on.
2799 */
2800static void intel_clear_scanline_wait(struct drm_device *dev)
2801{
2802	struct drm_i915_private *dev_priv = dev->dev_private;
2803	struct intel_ring_buffer *ring;
2804	u32 tmp;
2805
2806	if (IS_GEN2(dev))
2807		/* Can't break the hang on i8xx */
2808		return;
2809
2810	ring = LP_RING(dev_priv);
2811	tmp = I915_READ_CTL(ring);
2812	if (tmp & RING_WAIT)
2813		I915_WRITE_CTL(ring, tmp);
2814}
2815
2816static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2817{
2818	struct drm_i915_gem_object *obj;
2819	struct drm_i915_private *dev_priv;
2820
2821	if (crtc->fb == NULL)
2822		return;
2823
2824	obj = to_intel_framebuffer(crtc->fb)->obj;
2825	dev_priv = crtc->dev->dev_private;
2826	wait_event(dev_priv->pending_flip_queue,
2827		   atomic_read(&obj->pending_flip) == 0);
2828}
2829
2830static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2831{
2832	struct drm_device *dev = crtc->dev;
2833	struct drm_mode_config *mode_config = &dev->mode_config;
2834	struct intel_encoder *encoder;
2835
2836	/*
2837	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2838	 * must be driven by its own crtc; no sharing is possible.
2839	 */
2840	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2841		if (encoder->base.crtc != crtc)
2842			continue;
2843
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2844		switch (encoder->type) {
2845		case INTEL_OUTPUT_EDP:
2846			if (!intel_encoder_is_pch_edp(&encoder->base))
2847				return false;
2848			continue;
2849		}
2850	}
2851
2852	return true;
2853}
2854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2855/*
2856 * Enable PCH resources required for PCH ports:
2857 *   - PCH PLLs
2858 *   - FDI training & RX/TX
2859 *   - update transcoder timings
2860 *   - DP transcoding bits
2861 *   - transcoder
2862 */
2863static void ironlake_pch_enable(struct drm_crtc *crtc)
2864{
2865	struct drm_device *dev = crtc->dev;
2866	struct drm_i915_private *dev_priv = dev->dev_private;
2867	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2868	int pipe = intel_crtc->pipe;
2869	u32 reg, temp;
2870
 
 
2871	/* For PCH output, training FDI link */
2872	dev_priv->display.fdi_link_train(crtc);
2873
2874	intel_enable_pch_pll(dev_priv, pipe);
 
 
 
 
 
 
2875
2876	if (HAS_PCH_CPT(dev)) {
2877		/* Be sure PCH DPLL SEL is set */
2878		temp = I915_READ(PCH_DPLL_SEL);
2879		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2880			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2881		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2882			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2883		I915_WRITE(PCH_DPLL_SEL, temp);
2884	}
2885
2886	/* set transcoder timing, panel must allow it */
2887	assert_panel_unlocked(dev_priv, pipe);
2888	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2889	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2890	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2891
2892	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2893	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2894	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 
2895
2896	intel_fdi_normal_train(crtc);
 
2897
2898	/* For PCH DP, enable TRANS_DP_CTL */
2899	if (HAS_PCH_CPT(dev) &&
2900	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 
2901		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2902		reg = TRANS_DP_CTL(pipe);
2903		temp = I915_READ(reg);
2904		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2905			  TRANS_DP_SYNC_MASK |
2906			  TRANS_DP_BPC_MASK);
2907		temp |= (TRANS_DP_OUTPUT_ENABLE |
2908			 TRANS_DP_ENH_FRAMING);
2909		temp |= bpc << 9; /* same format but at 11:9 */
2910
2911		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2912			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2913		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2914			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2915
2916		switch (intel_trans_dp_port_sel(crtc)) {
2917		case PCH_DP_B:
2918			temp |= TRANS_DP_PORT_SEL_B;
2919			break;
2920		case PCH_DP_C:
2921			temp |= TRANS_DP_PORT_SEL_C;
2922			break;
2923		case PCH_DP_D:
2924			temp |= TRANS_DP_PORT_SEL_D;
2925			break;
2926		default:
2927			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2928			temp |= TRANS_DP_PORT_SEL_B;
2929			break;
2930		}
2931
2932		I915_WRITE(reg, temp);
2933	}
2934
2935	intel_enable_transcoder(dev_priv, pipe);
2936}
2937
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2938static void ironlake_crtc_enable(struct drm_crtc *crtc)
2939{
2940	struct drm_device *dev = crtc->dev;
2941	struct drm_i915_private *dev_priv = dev->dev_private;
2942	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2943	int pipe = intel_crtc->pipe;
2944	int plane = intel_crtc->plane;
2945	u32 temp;
2946	bool is_pch_port;
2947
2948	if (intel_crtc->active)
2949		return;
2950
2951	intel_crtc->active = true;
2952	intel_update_watermarks(dev);
2953
2954	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2955		temp = I915_READ(PCH_LVDS);
2956		if ((temp & LVDS_PORT_EN) == 0)
2957			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2958	}
2959
2960	is_pch_port = intel_crtc_driving_pch(crtc);
2961
2962	if (is_pch_port)
2963		ironlake_fdi_pll_enable(crtc);
2964	else
2965		ironlake_fdi_disable(crtc);
2966
2967	/* Enable panel fitting for LVDS */
2968	if (dev_priv->pch_pf_size &&
2969	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2970		/* Force use of hard-coded filter coefficients
2971		 * as some pre-programmed values are broken,
2972		 * e.g. x201.
2973		 */
2974		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2975		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2976		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2977	}
2978
2979	/*
2980	 * On ILK+ LUT must be loaded before the pipe is running but with
2981	 * clocks enabled
2982	 */
2983	intel_crtc_load_lut(crtc);
2984
2985	intel_enable_pipe(dev_priv, pipe, is_pch_port);
2986	intel_enable_plane(dev_priv, plane, pipe);
2987
2988	if (is_pch_port)
2989		ironlake_pch_enable(crtc);
2990
2991	mutex_lock(&dev->struct_mutex);
2992	intel_update_fbc(dev);
2993	mutex_unlock(&dev->struct_mutex);
2994
2995	intel_crtc_update_cursor(crtc, true);
2996}
2997
2998static void ironlake_crtc_disable(struct drm_crtc *crtc)
2999{
3000	struct drm_device *dev = crtc->dev;
3001	struct drm_i915_private *dev_priv = dev->dev_private;
3002	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3003	int pipe = intel_crtc->pipe;
3004	int plane = intel_crtc->plane;
3005	u32 reg, temp;
3006
3007	if (!intel_crtc->active)
3008		return;
3009
3010	intel_crtc_wait_for_pending_flips(crtc);
3011	drm_vblank_off(dev, pipe);
3012	intel_crtc_update_cursor(crtc, false);
3013
3014	intel_disable_plane(dev_priv, plane, pipe);
3015
3016	if (dev_priv->cfb_plane == plane)
3017		intel_disable_fbc(dev);
3018
3019	intel_disable_pipe(dev_priv, pipe);
3020
3021	/* Disable PF */
3022	I915_WRITE(PF_CTL(pipe), 0);
3023	I915_WRITE(PF_WIN_SZ(pipe), 0);
3024
3025	ironlake_fdi_disable(crtc);
3026
3027	/* This is a horrible layering violation; we should be doing this in
3028	 * the connector/encoder ->prepare instead, but we don't always have
3029	 * enough information there about the config to know whether it will
3030	 * actually be necessary or just cause undesired flicker.
3031	 */
3032	intel_disable_pch_ports(dev_priv, pipe);
3033
3034	intel_disable_transcoder(dev_priv, pipe);
3035
3036	if (HAS_PCH_CPT(dev)) {
3037		/* disable TRANS_DP_CTL */
3038		reg = TRANS_DP_CTL(pipe);
3039		temp = I915_READ(reg);
3040		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3041		temp |= TRANS_DP_PORT_SEL_NONE;
3042		I915_WRITE(reg, temp);
3043
3044		/* disable DPLL_SEL */
3045		temp = I915_READ(PCH_DPLL_SEL);
3046		switch (pipe) {
3047		case 0:
3048			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3049			break;
3050		case 1:
3051			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3052			break;
3053		case 2:
3054			/* FIXME: manage transcoder PLLs? */
3055			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3056			break;
3057		default:
3058			BUG(); /* wtf */
3059		}
3060		I915_WRITE(PCH_DPLL_SEL, temp);
3061	}
3062
3063	/* disable PCH DPLL */
3064	intel_disable_pch_pll(dev_priv, pipe);
3065
3066	/* Switch from PCDclk to Rawclk */
3067	reg = FDI_RX_CTL(pipe);
3068	temp = I915_READ(reg);
3069	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3070
3071	/* Disable CPU FDI TX PLL */
3072	reg = FDI_TX_CTL(pipe);
3073	temp = I915_READ(reg);
3074	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3075
3076	POSTING_READ(reg);
3077	udelay(100);
3078
3079	reg = FDI_RX_CTL(pipe);
3080	temp = I915_READ(reg);
3081	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3082
3083	/* Wait for the clocks to turn off. */
3084	POSTING_READ(reg);
3085	udelay(100);
3086
3087	intel_crtc->active = false;
3088	intel_update_watermarks(dev);
3089
3090	mutex_lock(&dev->struct_mutex);
3091	intel_update_fbc(dev);
3092	intel_clear_scanline_wait(dev);
3093	mutex_unlock(&dev->struct_mutex);
3094}
3095
3096static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3097{
3098	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3099	int pipe = intel_crtc->pipe;
3100	int plane = intel_crtc->plane;
3101
3102	/* XXX: When our outputs are all unaware of DPMS modes other than off
3103	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3104	 */
3105	switch (mode) {
3106	case DRM_MODE_DPMS_ON:
3107	case DRM_MODE_DPMS_STANDBY:
3108	case DRM_MODE_DPMS_SUSPEND:
3109		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3110		ironlake_crtc_enable(crtc);
3111		break;
3112
3113	case DRM_MODE_DPMS_OFF:
3114		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3115		ironlake_crtc_disable(crtc);
3116		break;
3117	}
3118}
3119
 
 
 
 
 
 
3120static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3121{
3122	if (!enable && intel_crtc->overlay) {
3123		struct drm_device *dev = intel_crtc->base.dev;
3124		struct drm_i915_private *dev_priv = dev->dev_private;
3125
3126		mutex_lock(&dev->struct_mutex);
3127		dev_priv->mm.interruptible = false;
3128		(void) intel_overlay_switch_off(intel_crtc->overlay);
3129		dev_priv->mm.interruptible = true;
3130		mutex_unlock(&dev->struct_mutex);
3131	}
3132
3133	/* Let userspace switch the overlay on again. In most cases userspace
3134	 * has to recompute where to put it anyway.
3135	 */
3136}
3137
3138static void i9xx_crtc_enable(struct drm_crtc *crtc)
3139{
3140	struct drm_device *dev = crtc->dev;
3141	struct drm_i915_private *dev_priv = dev->dev_private;
3142	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3143	int pipe = intel_crtc->pipe;
3144	int plane = intel_crtc->plane;
3145
3146	if (intel_crtc->active)
3147		return;
3148
3149	intel_crtc->active = true;
3150	intel_update_watermarks(dev);
3151
3152	intel_enable_pll(dev_priv, pipe);
3153	intel_enable_pipe(dev_priv, pipe, false);
3154	intel_enable_plane(dev_priv, plane, pipe);
3155
3156	intel_crtc_load_lut(crtc);
3157	intel_update_fbc(dev);
3158
3159	/* Give the overlay scaler a chance to enable if it's on this pipe */
3160	intel_crtc_dpms_overlay(intel_crtc, true);
3161	intel_crtc_update_cursor(crtc, true);
3162}
3163
3164static void i9xx_crtc_disable(struct drm_crtc *crtc)
3165{
3166	struct drm_device *dev = crtc->dev;
3167	struct drm_i915_private *dev_priv = dev->dev_private;
3168	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3169	int pipe = intel_crtc->pipe;
3170	int plane = intel_crtc->plane;
3171
3172	if (!intel_crtc->active)
3173		return;
3174
3175	/* Give the overlay scaler a chance to disable if it's on this pipe */
3176	intel_crtc_wait_for_pending_flips(crtc);
3177	drm_vblank_off(dev, pipe);
3178	intel_crtc_dpms_overlay(intel_crtc, false);
3179	intel_crtc_update_cursor(crtc, false);
3180
3181	if (dev_priv->cfb_plane == plane)
3182		intel_disable_fbc(dev);
3183
3184	intel_disable_plane(dev_priv, plane, pipe);
3185	intel_disable_pipe(dev_priv, pipe);
3186	intel_disable_pll(dev_priv, pipe);
3187
3188	intel_crtc->active = false;
3189	intel_update_fbc(dev);
3190	intel_update_watermarks(dev);
3191	intel_clear_scanline_wait(dev);
3192}
3193
3194static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3195{
3196	/* XXX: When our outputs are all unaware of DPMS modes other than off
3197	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3198	 */
3199	switch (mode) {
3200	case DRM_MODE_DPMS_ON:
3201	case DRM_MODE_DPMS_STANDBY:
3202	case DRM_MODE_DPMS_SUSPEND:
3203		i9xx_crtc_enable(crtc);
3204		break;
3205	case DRM_MODE_DPMS_OFF:
3206		i9xx_crtc_disable(crtc);
3207		break;
3208	}
3209}
3210
 
 
 
 
3211/**
3212 * Sets the power management mode of the pipe and plane.
3213 */
3214static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3215{
3216	struct drm_device *dev = crtc->dev;
3217	struct drm_i915_private *dev_priv = dev->dev_private;
3218	struct drm_i915_master_private *master_priv;
3219	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3220	int pipe = intel_crtc->pipe;
3221	bool enabled;
3222
3223	if (intel_crtc->dpms_mode == mode)
3224		return;
3225
3226	intel_crtc->dpms_mode = mode;
3227
3228	dev_priv->display.dpms(crtc, mode);
3229
3230	if (!dev->primary->master)
3231		return;
3232
3233	master_priv = dev->primary->master->driver_priv;
3234	if (!master_priv->sarea_priv)
3235		return;
3236
3237	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3238
3239	switch (pipe) {
3240	case 0:
3241		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3242		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3243		break;
3244	case 1:
3245		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3246		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3247		break;
3248	default:
3249		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3250		break;
3251	}
3252}
3253
3254static void intel_crtc_disable(struct drm_crtc *crtc)
3255{
3256	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3257	struct drm_device *dev = crtc->dev;
 
3258
3259	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
 
 
 
3260
3261	if (crtc->fb) {
3262		mutex_lock(&dev->struct_mutex);
3263		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
3264		mutex_unlock(&dev->struct_mutex);
3265	}
3266}
3267
3268/* Prepare for a mode set.
3269 *
3270 * Note we could be a lot smarter here.  We need to figure out which outputs
3271 * will be enabled, which disabled (in short, how the config will changes)
3272 * and perform the minimum necessary steps to accomplish that, e.g. updating
3273 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3274 * panel fitting is in the proper state, etc.
3275 */
3276static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3277{
3278	i9xx_crtc_disable(crtc);
3279}
3280
3281static void i9xx_crtc_commit(struct drm_crtc *crtc)
3282{
3283	i9xx_crtc_enable(crtc);
3284}
3285
3286static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3287{
3288	ironlake_crtc_disable(crtc);
3289}
3290
3291static void ironlake_crtc_commit(struct drm_crtc *crtc)
3292{
3293	ironlake_crtc_enable(crtc);
3294}
3295
3296void intel_encoder_prepare (struct drm_encoder *encoder)
3297{
3298	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3299	/* lvds has its own version of prepare see intel_lvds_prepare */
3300	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3301}
3302
3303void intel_encoder_commit (struct drm_encoder *encoder)
3304{
3305	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
 
 
3306	/* lvds has its own version of commit see intel_lvds_commit */
3307	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 
 
 
3308}
3309
3310void intel_encoder_destroy(struct drm_encoder *encoder)
3311{
3312	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3313
3314	drm_encoder_cleanup(encoder);
3315	kfree(intel_encoder);
3316}
3317
3318static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3319				  struct drm_display_mode *mode,
3320				  struct drm_display_mode *adjusted_mode)
3321{
3322	struct drm_device *dev = crtc->dev;
3323
3324	if (HAS_PCH_SPLIT(dev)) {
3325		/* FDI link clock is fixed at 2.7G */
3326		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3327			return false;
3328	}
3329
3330	/* XXX some encoders set the crtcinfo, others don't.
3331	 * Obviously we need some form of conflict resolution here...
3332	 */
3333	if (adjusted_mode->crtc_htotal == 0)
3334		drm_mode_set_crtcinfo(adjusted_mode, 0);
3335
3336	return true;
3337}
3338
 
 
 
 
 
3339static int i945_get_display_clock_speed(struct drm_device *dev)
3340{
3341	return 400000;
3342}
3343
3344static int i915_get_display_clock_speed(struct drm_device *dev)
3345{
3346	return 333000;
3347}
3348
3349static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3350{
3351	return 200000;
3352}
3353
3354static int i915gm_get_display_clock_speed(struct drm_device *dev)
3355{
3356	u16 gcfgc = 0;
3357
3358	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3359
3360	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3361		return 133000;
3362	else {
3363		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3364		case GC_DISPLAY_CLOCK_333_MHZ:
3365			return 333000;
3366		default:
3367		case GC_DISPLAY_CLOCK_190_200_MHZ:
3368			return 190000;
3369		}
3370	}
3371}
3372
3373static int i865_get_display_clock_speed(struct drm_device *dev)
3374{
3375	return 266000;
3376}
3377
3378static int i855_get_display_clock_speed(struct drm_device *dev)
3379{
3380	u16 hpllcc = 0;
3381	/* Assume that the hardware is in the high speed state.  This
3382	 * should be the default.
3383	 */
3384	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3385	case GC_CLOCK_133_200:
3386	case GC_CLOCK_100_200:
3387		return 200000;
3388	case GC_CLOCK_166_250:
3389		return 250000;
3390	case GC_CLOCK_100_133:
3391		return 133000;
3392	}
3393
3394	/* Shouldn't happen */
3395	return 0;
3396}
3397
3398static int i830_get_display_clock_speed(struct drm_device *dev)
3399{
3400	return 133000;
3401}
3402
3403struct fdi_m_n {
3404	u32        tu;
3405	u32        gmch_m;
3406	u32        gmch_n;
3407	u32        link_m;
3408	u32        link_n;
3409};
3410
3411static void
3412fdi_reduce_ratio(u32 *num, u32 *den)
3413{
3414	while (*num > 0xffffff || *den > 0xffffff) {
3415		*num >>= 1;
3416		*den >>= 1;
3417	}
3418}
3419
3420static void
3421ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3422		     int link_clock, struct fdi_m_n *m_n)
3423{
3424	m_n->tu = 64; /* default size */
3425
3426	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3427	m_n->gmch_m = bits_per_pixel * pixel_clock;
3428	m_n->gmch_n = link_clock * nlanes * 8;
3429	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3430
3431	m_n->link_m = pixel_clock;
3432	m_n->link_n = link_clock;
3433	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3434}
3435
3436
3437struct intel_watermark_params {
3438	unsigned long fifo_size;
3439	unsigned long max_wm;
3440	unsigned long default_wm;
3441	unsigned long guard_size;
3442	unsigned long cacheline_size;
3443};
3444
3445/* Pineview has different values for various configs */
3446static const struct intel_watermark_params pineview_display_wm = {
3447	PINEVIEW_DISPLAY_FIFO,
3448	PINEVIEW_MAX_WM,
3449	PINEVIEW_DFT_WM,
3450	PINEVIEW_GUARD_WM,
3451	PINEVIEW_FIFO_LINE_SIZE
3452};
3453static const struct intel_watermark_params pineview_display_hplloff_wm = {
3454	PINEVIEW_DISPLAY_FIFO,
3455	PINEVIEW_MAX_WM,
3456	PINEVIEW_DFT_HPLLOFF_WM,
3457	PINEVIEW_GUARD_WM,
3458	PINEVIEW_FIFO_LINE_SIZE
3459};
3460static const struct intel_watermark_params pineview_cursor_wm = {
3461	PINEVIEW_CURSOR_FIFO,
3462	PINEVIEW_CURSOR_MAX_WM,
3463	PINEVIEW_CURSOR_DFT_WM,
3464	PINEVIEW_CURSOR_GUARD_WM,
3465	PINEVIEW_FIFO_LINE_SIZE,
3466};
3467static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3468	PINEVIEW_CURSOR_FIFO,
3469	PINEVIEW_CURSOR_MAX_WM,
3470	PINEVIEW_CURSOR_DFT_WM,
3471	PINEVIEW_CURSOR_GUARD_WM,
3472	PINEVIEW_FIFO_LINE_SIZE
3473};
3474static const struct intel_watermark_params g4x_wm_info = {
3475	G4X_FIFO_SIZE,
3476	G4X_MAX_WM,
3477	G4X_MAX_WM,
3478	2,
3479	G4X_FIFO_LINE_SIZE,
3480};
3481static const struct intel_watermark_params g4x_cursor_wm_info = {
3482	I965_CURSOR_FIFO,
3483	I965_CURSOR_MAX_WM,
3484	I965_CURSOR_DFT_WM,
3485	2,
3486	G4X_FIFO_LINE_SIZE,
3487};
3488static const struct intel_watermark_params i965_cursor_wm_info = {
3489	I965_CURSOR_FIFO,
3490	I965_CURSOR_MAX_WM,
3491	I965_CURSOR_DFT_WM,
3492	2,
3493	I915_FIFO_LINE_SIZE,
3494};
3495static const struct intel_watermark_params i945_wm_info = {
3496	I945_FIFO_SIZE,
3497	I915_MAX_WM,
3498	1,
3499	2,
3500	I915_FIFO_LINE_SIZE
3501};
3502static const struct intel_watermark_params i915_wm_info = {
3503	I915_FIFO_SIZE,
3504	I915_MAX_WM,
3505	1,
3506	2,
3507	I915_FIFO_LINE_SIZE
3508};
3509static const struct intel_watermark_params i855_wm_info = {
3510	I855GM_FIFO_SIZE,
3511	I915_MAX_WM,
3512	1,
3513	2,
3514	I830_FIFO_LINE_SIZE
3515};
3516static const struct intel_watermark_params i830_wm_info = {
3517	I830_FIFO_SIZE,
3518	I915_MAX_WM,
3519	1,
3520	2,
3521	I830_FIFO_LINE_SIZE
3522};
3523
3524static const struct intel_watermark_params ironlake_display_wm_info = {
3525	ILK_DISPLAY_FIFO,
3526	ILK_DISPLAY_MAXWM,
3527	ILK_DISPLAY_DFTWM,
3528	2,
3529	ILK_FIFO_LINE_SIZE
3530};
3531static const struct intel_watermark_params ironlake_cursor_wm_info = {
3532	ILK_CURSOR_FIFO,
3533	ILK_CURSOR_MAXWM,
3534	ILK_CURSOR_DFTWM,
3535	2,
3536	ILK_FIFO_LINE_SIZE
3537};
3538static const struct intel_watermark_params ironlake_display_srwm_info = {
3539	ILK_DISPLAY_SR_FIFO,
3540	ILK_DISPLAY_MAX_SRWM,
3541	ILK_DISPLAY_DFT_SRWM,
3542	2,
3543	ILK_FIFO_LINE_SIZE
3544};
3545static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3546	ILK_CURSOR_SR_FIFO,
3547	ILK_CURSOR_MAX_SRWM,
3548	ILK_CURSOR_DFT_SRWM,
3549	2,
3550	ILK_FIFO_LINE_SIZE
3551};
3552
3553static const struct intel_watermark_params sandybridge_display_wm_info = {
3554	SNB_DISPLAY_FIFO,
3555	SNB_DISPLAY_MAXWM,
3556	SNB_DISPLAY_DFTWM,
3557	2,
3558	SNB_FIFO_LINE_SIZE
3559};
3560static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3561	SNB_CURSOR_FIFO,
3562	SNB_CURSOR_MAXWM,
3563	SNB_CURSOR_DFTWM,
3564	2,
3565	SNB_FIFO_LINE_SIZE
3566};
3567static const struct intel_watermark_params sandybridge_display_srwm_info = {
3568	SNB_DISPLAY_SR_FIFO,
3569	SNB_DISPLAY_MAX_SRWM,
3570	SNB_DISPLAY_DFT_SRWM,
3571	2,
3572	SNB_FIFO_LINE_SIZE
3573};
3574static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3575	SNB_CURSOR_SR_FIFO,
3576	SNB_CURSOR_MAX_SRWM,
3577	SNB_CURSOR_DFT_SRWM,
3578	2,
3579	SNB_FIFO_LINE_SIZE
3580};
3581
3582
3583/**
3584 * intel_calculate_wm - calculate watermark level
3585 * @clock_in_khz: pixel clock
3586 * @wm: chip FIFO params
3587 * @pixel_size: display pixel size
3588 * @latency_ns: memory latency for the platform
3589 *
3590 * Calculate the watermark level (the level at which the display plane will
3591 * start fetching from memory again).  Each chip has a different display
3592 * FIFO size and allocation, so the caller needs to figure that out and pass
3593 * in the correct intel_watermark_params structure.
3594 *
3595 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3596 * on the pixel size.  When it reaches the watermark level, it'll start
3597 * fetching FIFO line sized based chunks from memory until the FIFO fills
3598 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3599 * will occur, and a display engine hang could result.
3600 */
3601static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3602					const struct intel_watermark_params *wm,
3603					int fifo_size,
3604					int pixel_size,
3605					unsigned long latency_ns)
3606{
3607	long entries_required, wm_size;
3608
3609	/*
3610	 * Note: we need to make sure we don't overflow for various clock &
3611	 * latency values.
3612	 * clocks go from a few thousand to several hundred thousand.
3613	 * latency is usually a few thousand
3614	 */
3615	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3616		1000;
3617	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3618
3619	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3620
3621	wm_size = fifo_size - (entries_required + wm->guard_size);
3622
3623	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3624
3625	/* Don't promote wm_size to unsigned... */
3626	if (wm_size > (long)wm->max_wm)
3627		wm_size = wm->max_wm;
3628	if (wm_size <= 0)
3629		wm_size = wm->default_wm;
3630	return wm_size;
3631}
3632
3633struct cxsr_latency {
3634	int is_desktop;
3635	int is_ddr3;
3636	unsigned long fsb_freq;
3637	unsigned long mem_freq;
3638	unsigned long display_sr;
3639	unsigned long display_hpll_disable;
3640	unsigned long cursor_sr;
3641	unsigned long cursor_hpll_disable;
3642};
3643
3644static const struct cxsr_latency cxsr_latency_table[] = {
3645	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3646	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3647	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3648	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3649	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3650
3651	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3652	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3653	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3654	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3655	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3656
3657	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3658	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3659	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3660	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3661	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3662
3663	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3664	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3665	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3666	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3667	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3668
3669	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3670	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3671	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3672	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3673	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3674
3675	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3676	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3677	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3678	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3679	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3680};
3681
3682static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3683							 int is_ddr3,
3684							 int fsb,
3685							 int mem)
3686{
3687	const struct cxsr_latency *latency;
3688	int i;
3689
3690	if (fsb == 0 || mem == 0)
3691		return NULL;
3692
3693	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3694		latency = &cxsr_latency_table[i];
3695		if (is_desktop == latency->is_desktop &&
3696		    is_ddr3 == latency->is_ddr3 &&
3697		    fsb == latency->fsb_freq && mem == latency->mem_freq)
3698			return latency;
3699	}
3700
3701	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3702
3703	return NULL;
3704}
3705
3706static void pineview_disable_cxsr(struct drm_device *dev)
3707{
3708	struct drm_i915_private *dev_priv = dev->dev_private;
3709
3710	/* deactivate cxsr */
3711	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3712}
3713
3714/*
3715 * Latency for FIFO fetches is dependent on several factors:
3716 *   - memory configuration (speed, channels)
3717 *   - chipset
3718 *   - current MCH state
3719 * It can be fairly high in some situations, so here we assume a fairly
3720 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3721 * set this value too high, the FIFO will fetch frequently to stay full)
3722 * and power consumption (set it too low to save power and we might see
3723 * FIFO underruns and display "flicker").
3724 *
3725 * A value of 5us seems to be a good balance; safe for very low end
3726 * platforms but not overly aggressive on lower latency configs.
3727 */
3728static const int latency_ns = 5000;
3729
3730static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3731{
3732	struct drm_i915_private *dev_priv = dev->dev_private;
3733	uint32_t dsparb = I915_READ(DSPARB);
3734	int size;
3735
3736	size = dsparb & 0x7f;
3737	if (plane)
3738		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3739
3740	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3741		      plane ? "B" : "A", size);
3742
3743	return size;
3744}
3745
3746static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3747{
3748	struct drm_i915_private *dev_priv = dev->dev_private;
3749	uint32_t dsparb = I915_READ(DSPARB);
3750	int size;
3751
3752	size = dsparb & 0x1ff;
3753	if (plane)
3754		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3755	size >>= 1; /* Convert to cachelines */
3756
3757	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3758		      plane ? "B" : "A", size);
3759
3760	return size;
3761}
3762
3763static int i845_get_fifo_size(struct drm_device *dev, int plane)
3764{
3765	struct drm_i915_private *dev_priv = dev->dev_private;
3766	uint32_t dsparb = I915_READ(DSPARB);
3767	int size;
3768
3769	size = dsparb & 0x7f;
3770	size >>= 2; /* Convert to cachelines */
3771
3772	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3773		      plane ? "B" : "A",
3774		      size);
3775
3776	return size;
3777}
3778
3779static int i830_get_fifo_size(struct drm_device *dev, int plane)
3780{
3781	struct drm_i915_private *dev_priv = dev->dev_private;
3782	uint32_t dsparb = I915_READ(DSPARB);
3783	int size;
3784
3785	size = dsparb & 0x7f;
3786	size >>= 1; /* Convert to cachelines */
3787
3788	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3789		      plane ? "B" : "A", size);
3790
3791	return size;
3792}
3793
3794static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3795{
3796	struct drm_crtc *crtc, *enabled = NULL;
3797
3798	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3799		if (crtc->enabled && crtc->fb) {
3800			if (enabled)
3801				return NULL;
3802			enabled = crtc;
3803		}
3804	}
3805
3806	return enabled;
3807}
3808
3809static void pineview_update_wm(struct drm_device *dev)
3810{
3811	struct drm_i915_private *dev_priv = dev->dev_private;
3812	struct drm_crtc *crtc;
3813	const struct cxsr_latency *latency;
3814	u32 reg;
3815	unsigned long wm;
3816
3817	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3818					 dev_priv->fsb_freq, dev_priv->mem_freq);
3819	if (!latency) {
3820		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3821		pineview_disable_cxsr(dev);
3822		return;
3823	}
3824
3825	crtc = single_enabled_crtc(dev);
3826	if (crtc) {
3827		int clock = crtc->mode.clock;
3828		int pixel_size = crtc->fb->bits_per_pixel / 8;
3829
3830		/* Display SR */
3831		wm = intel_calculate_wm(clock, &pineview_display_wm,
3832					pineview_display_wm.fifo_size,
3833					pixel_size, latency->display_sr);
3834		reg = I915_READ(DSPFW1);
3835		reg &= ~DSPFW_SR_MASK;
3836		reg |= wm << DSPFW_SR_SHIFT;
3837		I915_WRITE(DSPFW1, reg);
3838		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3839
3840		/* cursor SR */
3841		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3842					pineview_display_wm.fifo_size,
3843					pixel_size, latency->cursor_sr);
3844		reg = I915_READ(DSPFW3);
3845		reg &= ~DSPFW_CURSOR_SR_MASK;
3846		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3847		I915_WRITE(DSPFW3, reg);
3848
3849		/* Display HPLL off SR */
3850		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3851					pineview_display_hplloff_wm.fifo_size,
3852					pixel_size, latency->display_hpll_disable);
3853		reg = I915_READ(DSPFW3);
3854		reg &= ~DSPFW_HPLL_SR_MASK;
3855		reg |= wm & DSPFW_HPLL_SR_MASK;
3856		I915_WRITE(DSPFW3, reg);
3857
3858		/* cursor HPLL off SR */
3859		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3860					pineview_display_hplloff_wm.fifo_size,
3861					pixel_size, latency->cursor_hpll_disable);
3862		reg = I915_READ(DSPFW3);
3863		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3864		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3865		I915_WRITE(DSPFW3, reg);
3866		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3867
3868		/* activate cxsr */
3869		I915_WRITE(DSPFW3,
3870			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3871		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3872	} else {
3873		pineview_disable_cxsr(dev);
3874		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3875	}
3876}
3877
3878static bool g4x_compute_wm0(struct drm_device *dev,
3879			    int plane,
3880			    const struct intel_watermark_params *display,
3881			    int display_latency_ns,
3882			    const struct intel_watermark_params *cursor,
3883			    int cursor_latency_ns,
3884			    int *plane_wm,
3885			    int *cursor_wm)
3886{
3887	struct drm_crtc *crtc;
3888	int htotal, hdisplay, clock, pixel_size;
3889	int line_time_us, line_count;
3890	int entries, tlb_miss;
3891
3892	crtc = intel_get_crtc_for_plane(dev, plane);
3893	if (crtc->fb == NULL || !crtc->enabled) {
3894		*cursor_wm = cursor->guard_size;
3895		*plane_wm = display->guard_size;
3896		return false;
3897	}
3898
3899	htotal = crtc->mode.htotal;
3900	hdisplay = crtc->mode.hdisplay;
3901	clock = crtc->mode.clock;
3902	pixel_size = crtc->fb->bits_per_pixel / 8;
3903
3904	/* Use the small buffer method to calculate plane watermark */
3905	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3906	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3907	if (tlb_miss > 0)
3908		entries += tlb_miss;
3909	entries = DIV_ROUND_UP(entries, display->cacheline_size);
3910	*plane_wm = entries + display->guard_size;
3911	if (*plane_wm > (int)display->max_wm)
3912		*plane_wm = display->max_wm;
3913
3914	/* Use the large buffer method to calculate cursor watermark */
3915	line_time_us = ((htotal * 1000) / clock);
3916	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3917	entries = line_count * 64 * pixel_size;
3918	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3919	if (tlb_miss > 0)
3920		entries += tlb_miss;
3921	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3922	*cursor_wm = entries + cursor->guard_size;
3923	if (*cursor_wm > (int)cursor->max_wm)
3924		*cursor_wm = (int)cursor->max_wm;
3925
3926	return true;
3927}
3928
3929/*
3930 * Check the wm result.
3931 *
3932 * If any calculated watermark values is larger than the maximum value that
3933 * can be programmed into the associated watermark register, that watermark
3934 * must be disabled.
3935 */
3936static bool g4x_check_srwm(struct drm_device *dev,
3937			   int display_wm, int cursor_wm,
3938			   const struct intel_watermark_params *display,
3939			   const struct intel_watermark_params *cursor)
3940{
3941	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3942		      display_wm, cursor_wm);
3943
3944	if (display_wm > display->max_wm) {
3945		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3946			      display_wm, display->max_wm);
3947		return false;
3948	}
3949
3950	if (cursor_wm > cursor->max_wm) {
3951		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3952			      cursor_wm, cursor->max_wm);
3953		return false;
3954	}
3955
3956	if (!(display_wm || cursor_wm)) {
3957		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3958		return false;
3959	}
3960
3961	return true;
3962}
3963
3964static bool g4x_compute_srwm(struct drm_device *dev,
3965			     int plane,
3966			     int latency_ns,
3967			     const struct intel_watermark_params *display,
3968			     const struct intel_watermark_params *cursor,
3969			     int *display_wm, int *cursor_wm)
3970{
3971	struct drm_crtc *crtc;
3972	int hdisplay, htotal, pixel_size, clock;
3973	unsigned long line_time_us;
3974	int line_count, line_size;
3975	int small, large;
3976	int entries;
3977
3978	if (!latency_ns) {
3979		*display_wm = *cursor_wm = 0;
3980		return false;
3981	}
3982
3983	crtc = intel_get_crtc_for_plane(dev, plane);
3984	hdisplay = crtc->mode.hdisplay;
3985	htotal = crtc->mode.htotal;
3986	clock = crtc->mode.clock;
3987	pixel_size = crtc->fb->bits_per_pixel / 8;
3988
3989	line_time_us = (htotal * 1000) / clock;
3990	line_count = (latency_ns / line_time_us + 1000) / 1000;
3991	line_size = hdisplay * pixel_size;
3992
3993	/* Use the minimum of the small and large buffer method for primary */
3994	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3995	large = line_count * line_size;
3996
3997	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3998	*display_wm = entries + display->guard_size;
3999
4000	/* calculate the self-refresh watermark for display cursor */
4001	entries = line_count * pixel_size * 64;
4002	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4003	*cursor_wm = entries + cursor->guard_size;
4004
4005	return g4x_check_srwm(dev,
4006			      *display_wm, *cursor_wm,
4007			      display, cursor);
4008}
4009
4010#define single_plane_enabled(mask) is_power_of_2(mask)
4011
4012static void g4x_update_wm(struct drm_device *dev)
4013{
4014	static const int sr_latency_ns = 12000;
4015	struct drm_i915_private *dev_priv = dev->dev_private;
4016	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4017	int plane_sr, cursor_sr;
4018	unsigned int enabled = 0;
4019
4020	if (g4x_compute_wm0(dev, 0,
4021			    &g4x_wm_info, latency_ns,
4022			    &g4x_cursor_wm_info, latency_ns,
4023			    &planea_wm, &cursora_wm))
4024		enabled |= 1;
4025
4026	if (g4x_compute_wm0(dev, 1,
4027			    &g4x_wm_info, latency_ns,
4028			    &g4x_cursor_wm_info, latency_ns,
4029			    &planeb_wm, &cursorb_wm))
4030		enabled |= 2;
4031
4032	plane_sr = cursor_sr = 0;
4033	if (single_plane_enabled(enabled) &&
4034	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4035			     sr_latency_ns,
4036			     &g4x_wm_info,
4037			     &g4x_cursor_wm_info,
4038			     &plane_sr, &cursor_sr))
4039		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4040	else
4041		I915_WRITE(FW_BLC_SELF,
4042			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4043
4044	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4045		      planea_wm, cursora_wm,
4046		      planeb_wm, cursorb_wm,
4047		      plane_sr, cursor_sr);
4048
4049	I915_WRITE(DSPFW1,
4050		   (plane_sr << DSPFW_SR_SHIFT) |
4051		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4052		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4053		   planea_wm);
4054	I915_WRITE(DSPFW2,
4055		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4056		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4057	/* HPLL off in SR has some issues on G4x... disable it */
4058	I915_WRITE(DSPFW3,
4059		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4060		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4061}
4062
4063static void i965_update_wm(struct drm_device *dev)
4064{
4065	struct drm_i915_private *dev_priv = dev->dev_private;
4066	struct drm_crtc *crtc;
4067	int srwm = 1;
4068	int cursor_sr = 16;
4069
4070	/* Calc sr entries for one plane configs */
4071	crtc = single_enabled_crtc(dev);
4072	if (crtc) {
4073		/* self-refresh has much higher latency */
4074		static const int sr_latency_ns = 12000;
4075		int clock = crtc->mode.clock;
4076		int htotal = crtc->mode.htotal;
4077		int hdisplay = crtc->mode.hdisplay;
4078		int pixel_size = crtc->fb->bits_per_pixel / 8;
4079		unsigned long line_time_us;
4080		int entries;
4081
4082		line_time_us = ((htotal * 1000) / clock);
4083
4084		/* Use ns/us then divide to preserve precision */
4085		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4086			pixel_size * hdisplay;
4087		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4088		srwm = I965_FIFO_SIZE - entries;
4089		if (srwm < 0)
4090			srwm = 1;
4091		srwm &= 0x1ff;
4092		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4093			      entries, srwm);
4094
4095		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4096			pixel_size * 64;
4097		entries = DIV_ROUND_UP(entries,
4098					  i965_cursor_wm_info.cacheline_size);
4099		cursor_sr = i965_cursor_wm_info.fifo_size -
4100			(entries + i965_cursor_wm_info.guard_size);
4101
4102		if (cursor_sr > i965_cursor_wm_info.max_wm)
4103			cursor_sr = i965_cursor_wm_info.max_wm;
4104
4105		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4106			      "cursor %d\n", srwm, cursor_sr);
4107
4108		if (IS_CRESTLINE(dev))
4109			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4110	} else {
4111		/* Turn off self refresh if both pipes are enabled */
4112		if (IS_CRESTLINE(dev))
4113			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4114				   & ~FW_BLC_SELF_EN);
4115	}
4116
4117	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4118		      srwm);
4119
4120	/* 965 has limitations... */
4121	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4122		   (8 << 16) | (8 << 8) | (8 << 0));
4123	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4124	/* update cursor SR watermark */
4125	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4126}
4127
4128static void i9xx_update_wm(struct drm_device *dev)
4129{
4130	struct drm_i915_private *dev_priv = dev->dev_private;
4131	const struct intel_watermark_params *wm_info;
4132	uint32_t fwater_lo;
4133	uint32_t fwater_hi;
4134	int cwm, srwm = 1;
4135	int fifo_size;
4136	int planea_wm, planeb_wm;
4137	struct drm_crtc *crtc, *enabled = NULL;
4138
4139	if (IS_I945GM(dev))
4140		wm_info = &i945_wm_info;
4141	else if (!IS_GEN2(dev))
4142		wm_info = &i915_wm_info;
4143	else
4144		wm_info = &i855_wm_info;
4145
4146	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4147	crtc = intel_get_crtc_for_plane(dev, 0);
4148	if (crtc->enabled && crtc->fb) {
4149		planea_wm = intel_calculate_wm(crtc->mode.clock,
4150					       wm_info, fifo_size,
4151					       crtc->fb->bits_per_pixel / 8,
4152					       latency_ns);
4153		enabled = crtc;
4154	} else
4155		planea_wm = fifo_size - wm_info->guard_size;
4156
4157	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4158	crtc = intel_get_crtc_for_plane(dev, 1);
4159	if (crtc->enabled && crtc->fb) {
4160		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4161					       wm_info, fifo_size,
4162					       crtc->fb->bits_per_pixel / 8,
4163					       latency_ns);
4164		if (enabled == NULL)
4165			enabled = crtc;
4166		else
4167			enabled = NULL;
4168	} else
4169		planeb_wm = fifo_size - wm_info->guard_size;
4170
4171	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4172
4173	/*
4174	 * Overlay gets an aggressive default since video jitter is bad.
4175	 */
4176	cwm = 2;
4177
4178	/* Play safe and disable self-refresh before adjusting watermarks. */
4179	if (IS_I945G(dev) || IS_I945GM(dev))
4180		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4181	else if (IS_I915GM(dev))
4182		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4183
4184	/* Calc sr entries for one plane configs */
4185	if (HAS_FW_BLC(dev) && enabled) {
4186		/* self-refresh has much higher latency */
4187		static const int sr_latency_ns = 6000;
4188		int clock = enabled->mode.clock;
4189		int htotal = enabled->mode.htotal;
4190		int hdisplay = enabled->mode.hdisplay;
4191		int pixel_size = enabled->fb->bits_per_pixel / 8;
4192		unsigned long line_time_us;
4193		int entries;
4194
4195		line_time_us = (htotal * 1000) / clock;
4196
4197		/* Use ns/us then divide to preserve precision */
4198		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4199			pixel_size * hdisplay;
4200		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4201		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4202		srwm = wm_info->fifo_size - entries;
4203		if (srwm < 0)
4204			srwm = 1;
4205
4206		if (IS_I945G(dev) || IS_I945GM(dev))
4207			I915_WRITE(FW_BLC_SELF,
4208				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4209		else if (IS_I915GM(dev))
4210			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4211	}
4212
4213	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4214		      planea_wm, planeb_wm, cwm, srwm);
4215
4216	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4217	fwater_hi = (cwm & 0x1f);
4218
4219	/* Set request length to 8 cachelines per fetch */
4220	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4221	fwater_hi = fwater_hi | (1 << 8);
4222
4223	I915_WRITE(FW_BLC, fwater_lo);
4224	I915_WRITE(FW_BLC2, fwater_hi);
4225
4226	if (HAS_FW_BLC(dev)) {
4227		if (enabled) {
4228			if (IS_I945G(dev) || IS_I945GM(dev))
4229				I915_WRITE(FW_BLC_SELF,
4230					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4231			else if (IS_I915GM(dev))
4232				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4233			DRM_DEBUG_KMS("memory self refresh enabled\n");
4234		} else
4235			DRM_DEBUG_KMS("memory self refresh disabled\n");
4236	}
4237}
4238
4239static void i830_update_wm(struct drm_device *dev)
4240{
4241	struct drm_i915_private *dev_priv = dev->dev_private;
4242	struct drm_crtc *crtc;
4243	uint32_t fwater_lo;
4244	int planea_wm;
4245
4246	crtc = single_enabled_crtc(dev);
4247	if (crtc == NULL)
4248		return;
4249
4250	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4251				       dev_priv->display.get_fifo_size(dev, 0),
4252				       crtc->fb->bits_per_pixel / 8,
4253				       latency_ns);
4254	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4255	fwater_lo |= (3<<8) | planea_wm;
4256
4257	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4258
4259	I915_WRITE(FW_BLC, fwater_lo);
4260}
4261
4262#define ILK_LP0_PLANE_LATENCY		700
4263#define ILK_LP0_CURSOR_LATENCY		1300
4264
4265/*
4266 * Check the wm result.
4267 *
4268 * If any calculated watermark values is larger than the maximum value that
4269 * can be programmed into the associated watermark register, that watermark
4270 * must be disabled.
4271 */
4272static bool ironlake_check_srwm(struct drm_device *dev, int level,
4273				int fbc_wm, int display_wm, int cursor_wm,
4274				const struct intel_watermark_params *display,
4275				const struct intel_watermark_params *cursor)
4276{
4277	struct drm_i915_private *dev_priv = dev->dev_private;
4278
4279	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4280		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4281
4282	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4283		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4284			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4285
4286		/* fbc has it's own way to disable FBC WM */
4287		I915_WRITE(DISP_ARB_CTL,
4288			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4289		return false;
4290	}
4291
4292	if (display_wm > display->max_wm) {
4293		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4294			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4295		return false;
4296	}
4297
4298	if (cursor_wm > cursor->max_wm) {
4299		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4300			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4301		return false;
4302	}
4303
4304	if (!(fbc_wm || display_wm || cursor_wm)) {
4305		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4306		return false;
4307	}
4308
4309	return true;
4310}
4311
4312/*
4313 * Compute watermark values of WM[1-3],
4314 */
4315static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4316				  int latency_ns,
4317				  const struct intel_watermark_params *display,
4318				  const struct intel_watermark_params *cursor,
4319				  int *fbc_wm, int *display_wm, int *cursor_wm)
4320{
4321	struct drm_crtc *crtc;
4322	unsigned long line_time_us;
4323	int hdisplay, htotal, pixel_size, clock;
4324	int line_count, line_size;
4325	int small, large;
4326	int entries;
4327
4328	if (!latency_ns) {
4329		*fbc_wm = *display_wm = *cursor_wm = 0;
4330		return false;
4331	}
4332
4333	crtc = intel_get_crtc_for_plane(dev, plane);
4334	hdisplay = crtc->mode.hdisplay;
4335	htotal = crtc->mode.htotal;
4336	clock = crtc->mode.clock;
4337	pixel_size = crtc->fb->bits_per_pixel / 8;
4338
4339	line_time_us = (htotal * 1000) / clock;
4340	line_count = (latency_ns / line_time_us + 1000) / 1000;
4341	line_size = hdisplay * pixel_size;
4342
4343	/* Use the minimum of the small and large buffer method for primary */
4344	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4345	large = line_count * line_size;
4346
4347	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4348	*display_wm = entries + display->guard_size;
4349
4350	/*
4351	 * Spec says:
4352	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4353	 */
4354	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4355
4356	/* calculate the self-refresh watermark for display cursor */
4357	entries = line_count * pixel_size * 64;
4358	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4359	*cursor_wm = entries + cursor->guard_size;
4360
4361	return ironlake_check_srwm(dev, level,
4362				   *fbc_wm, *display_wm, *cursor_wm,
4363				   display, cursor);
4364}
4365
4366static void ironlake_update_wm(struct drm_device *dev)
4367{
4368	struct drm_i915_private *dev_priv = dev->dev_private;
4369	int fbc_wm, plane_wm, cursor_wm;
4370	unsigned int enabled;
4371
4372	enabled = 0;
4373	if (g4x_compute_wm0(dev, 0,
4374			    &ironlake_display_wm_info,
4375			    ILK_LP0_PLANE_LATENCY,
4376			    &ironlake_cursor_wm_info,
4377			    ILK_LP0_CURSOR_LATENCY,
4378			    &plane_wm, &cursor_wm)) {
4379		I915_WRITE(WM0_PIPEA_ILK,
4380			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4381		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4382			      " plane %d, " "cursor: %d\n",
4383			      plane_wm, cursor_wm);
4384		enabled |= 1;
4385	}
4386
4387	if (g4x_compute_wm0(dev, 1,
4388			    &ironlake_display_wm_info,
4389			    ILK_LP0_PLANE_LATENCY,
4390			    &ironlake_cursor_wm_info,
4391			    ILK_LP0_CURSOR_LATENCY,
4392			    &plane_wm, &cursor_wm)) {
4393		I915_WRITE(WM0_PIPEB_ILK,
4394			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4395		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4396			      " plane %d, cursor: %d\n",
4397			      plane_wm, cursor_wm);
4398		enabled |= 2;
4399	}
4400
4401	/*
4402	 * Calculate and update the self-refresh watermark only when one
4403	 * display plane is used.
4404	 */
4405	I915_WRITE(WM3_LP_ILK, 0);
4406	I915_WRITE(WM2_LP_ILK, 0);
4407	I915_WRITE(WM1_LP_ILK, 0);
4408
4409	if (!single_plane_enabled(enabled))
4410		return;
4411	enabled = ffs(enabled) - 1;
4412
4413	/* WM1 */
4414	if (!ironlake_compute_srwm(dev, 1, enabled,
4415				   ILK_READ_WM1_LATENCY() * 500,
4416				   &ironlake_display_srwm_info,
4417				   &ironlake_cursor_srwm_info,
4418				   &fbc_wm, &plane_wm, &cursor_wm))
4419		return;
4420
4421	I915_WRITE(WM1_LP_ILK,
4422		   WM1_LP_SR_EN |
4423		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4424		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4425		   (plane_wm << WM1_LP_SR_SHIFT) |
4426		   cursor_wm);
4427
4428	/* WM2 */
4429	if (!ironlake_compute_srwm(dev, 2, enabled,
4430				   ILK_READ_WM2_LATENCY() * 500,
4431				   &ironlake_display_srwm_info,
4432				   &ironlake_cursor_srwm_info,
4433				   &fbc_wm, &plane_wm, &cursor_wm))
4434		return;
4435
4436	I915_WRITE(WM2_LP_ILK,
4437		   WM2_LP_EN |
4438		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4439		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4440		   (plane_wm << WM1_LP_SR_SHIFT) |
4441		   cursor_wm);
4442
4443	/*
4444	 * WM3 is unsupported on ILK, probably because we don't have latency
4445	 * data for that power state
4446	 */
4447}
4448
4449static void sandybridge_update_wm(struct drm_device *dev)
4450{
4451	struct drm_i915_private *dev_priv = dev->dev_private;
4452	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4453	int fbc_wm, plane_wm, cursor_wm;
4454	unsigned int enabled;
4455
4456	enabled = 0;
4457	if (g4x_compute_wm0(dev, 0,
4458			    &sandybridge_display_wm_info, latency,
4459			    &sandybridge_cursor_wm_info, latency,
4460			    &plane_wm, &cursor_wm)) {
4461		I915_WRITE(WM0_PIPEA_ILK,
4462			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4463		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4464			      " plane %d, " "cursor: %d\n",
4465			      plane_wm, cursor_wm);
4466		enabled |= 1;
4467	}
4468
4469	if (g4x_compute_wm0(dev, 1,
4470			    &sandybridge_display_wm_info, latency,
4471			    &sandybridge_cursor_wm_info, latency,
4472			    &plane_wm, &cursor_wm)) {
4473		I915_WRITE(WM0_PIPEB_ILK,
4474			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4475		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4476			      " plane %d, cursor: %d\n",
4477			      plane_wm, cursor_wm);
4478		enabled |= 2;
4479	}
4480
4481	/*
4482	 * Calculate and update the self-refresh watermark only when one
4483	 * display plane is used.
4484	 *
4485	 * SNB support 3 levels of watermark.
4486	 *
4487	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4488	 * and disabled in the descending order
4489	 *
4490	 */
4491	I915_WRITE(WM3_LP_ILK, 0);
4492	I915_WRITE(WM2_LP_ILK, 0);
4493	I915_WRITE(WM1_LP_ILK, 0);
4494
4495	if (!single_plane_enabled(enabled))
4496		return;
4497	enabled = ffs(enabled) - 1;
4498
4499	/* WM1 */
4500	if (!ironlake_compute_srwm(dev, 1, enabled,
4501				   SNB_READ_WM1_LATENCY() * 500,
4502				   &sandybridge_display_srwm_info,
4503				   &sandybridge_cursor_srwm_info,
4504				   &fbc_wm, &plane_wm, &cursor_wm))
4505		return;
4506
4507	I915_WRITE(WM1_LP_ILK,
4508		   WM1_LP_SR_EN |
4509		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4510		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4511		   (plane_wm << WM1_LP_SR_SHIFT) |
4512		   cursor_wm);
4513
4514	/* WM2 */
4515	if (!ironlake_compute_srwm(dev, 2, enabled,
4516				   SNB_READ_WM2_LATENCY() * 500,
4517				   &sandybridge_display_srwm_info,
4518				   &sandybridge_cursor_srwm_info,
4519				   &fbc_wm, &plane_wm, &cursor_wm))
4520		return;
4521
4522	I915_WRITE(WM2_LP_ILK,
4523		   WM2_LP_EN |
4524		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4525		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4526		   (plane_wm << WM1_LP_SR_SHIFT) |
4527		   cursor_wm);
4528
4529	/* WM3 */
4530	if (!ironlake_compute_srwm(dev, 3, enabled,
4531				   SNB_READ_WM3_LATENCY() * 500,
4532				   &sandybridge_display_srwm_info,
4533				   &sandybridge_cursor_srwm_info,
4534				   &fbc_wm, &plane_wm, &cursor_wm))
4535		return;
4536
4537	I915_WRITE(WM3_LP_ILK,
4538		   WM3_LP_EN |
4539		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4540		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4541		   (plane_wm << WM1_LP_SR_SHIFT) |
4542		   cursor_wm);
4543}
4544
4545/**
4546 * intel_update_watermarks - update FIFO watermark values based on current modes
4547 *
4548 * Calculate watermark values for the various WM regs based on current mode
4549 * and plane configuration.
4550 *
4551 * There are several cases to deal with here:
4552 *   - normal (i.e. non-self-refresh)
4553 *   - self-refresh (SR) mode
4554 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4555 *   - lines are small relative to FIFO size (buffer can hold more than 2
4556 *     lines), so need to account for TLB latency
4557 *
4558 *   The normal calculation is:
4559 *     watermark = dotclock * bytes per pixel * latency
4560 *   where latency is platform & configuration dependent (we assume pessimal
4561 *   values here).
4562 *
4563 *   The SR calculation is:
4564 *     watermark = (trunc(latency/line time)+1) * surface width *
4565 *       bytes per pixel
4566 *   where
4567 *     line time = htotal / dotclock
4568 *     surface width = hdisplay for normal plane and 64 for cursor
4569 *   and latency is assumed to be high, as above.
4570 *
4571 * The final value programmed to the register should always be rounded up,
4572 * and include an extra 2 entries to account for clock crossings.
4573 *
4574 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4575 * to set the non-SR watermarks to 8.
4576 */
4577static void intel_update_watermarks(struct drm_device *dev)
4578{
4579	struct drm_i915_private *dev_priv = dev->dev_private;
4580
4581	if (dev_priv->display.update_wm)
4582		dev_priv->display.update_wm(dev);
4583}
4584
4585static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4586{
4587	return dev_priv->lvds_use_ssc && i915_panel_use_ssc
 
 
4588		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4589}
4590
4591/**
4592 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4593 * @crtc: CRTC structure
 
4594 *
4595 * A pipe may be connected to one or more outputs.  Based on the depth of the
4596 * attached framebuffer, choose a good color depth to use on the pipe.
4597 *
4598 * If possible, match the pipe depth to the fb depth.  In some cases, this
4599 * isn't ideal, because the connected output supports a lesser or restricted
4600 * set of depths.  Resolve that here:
4601 *    LVDS typically supports only 6bpc, so clamp down in that case
4602 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4603 *    Displays may support a restricted set as well, check EDID and clamp as
4604 *      appropriate.
 
4605 *
4606 * RETURNS:
4607 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4608 * true if they don't match).
4609 */
4610static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4611					 unsigned int *pipe_bpp)
 
4612{
4613	struct drm_device *dev = crtc->dev;
4614	struct drm_i915_private *dev_priv = dev->dev_private;
4615	struct drm_encoder *encoder;
4616	struct drm_connector *connector;
4617	unsigned int display_bpc = UINT_MAX, bpc;
4618
4619	/* Walk the encoders & connectors on this crtc, get min bpc */
4620	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4621		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4622
4623		if (encoder->crtc != crtc)
4624			continue;
4625
4626		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4627			unsigned int lvds_bpc;
4628
4629			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4630			    LVDS_A3_POWER_UP)
4631				lvds_bpc = 8;
4632			else
4633				lvds_bpc = 6;
4634
4635			if (lvds_bpc < display_bpc) {
4636				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4637				display_bpc = lvds_bpc;
4638			}
4639			continue;
4640		}
4641
4642		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4643			/* Use VBT settings if we have an eDP panel */
4644			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4645
4646			if (edp_bpc < display_bpc) {
4647				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4648				display_bpc = edp_bpc;
4649			}
4650			continue;
4651		}
4652
4653		/* Not one of the known troublemakers, check the EDID */
4654		list_for_each_entry(connector, &dev->mode_config.connector_list,
4655				    head) {
4656			if (connector->encoder != encoder)
4657				continue;
4658
4659			/* Don't use an invalid EDID bpc value */
4660			if (connector->display_info.bpc &&
4661			    connector->display_info.bpc < display_bpc) {
4662				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4663				display_bpc = connector->display_info.bpc;
4664			}
4665		}
4666
4667		/*
4668		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4669		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4670		 */
4671		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4672			if (display_bpc > 8 && display_bpc < 12) {
4673				DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4674				display_bpc = 12;
4675			} else {
4676				DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4677				display_bpc = 8;
4678			}
4679		}
4680	}
4681
 
 
 
 
 
4682	/*
4683	 * We could just drive the pipe at the highest bpc all the time and
4684	 * enable dithering as needed, but that costs bandwidth.  So choose
4685	 * the minimum value that expresses the full color range of the fb but
4686	 * also stays within the max display bpc discovered above.
4687	 */
4688
4689	switch (crtc->fb->depth) {
4690	case 8:
4691		bpc = 8; /* since we go through a colormap */
4692		break;
4693	case 15:
4694	case 16:
4695		bpc = 6; /* min is 18bpp */
4696		break;
4697	case 24:
4698		bpc = 8;
4699		break;
4700	case 30:
4701		bpc = 10;
4702		break;
4703	case 48:
4704		bpc = 12;
4705		break;
4706	default:
4707		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4708		bpc = min((unsigned int)8, display_bpc);
4709		break;
4710	}
4711
4712	display_bpc = min(display_bpc, bpc);
4713
4714	DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4715			 bpc, display_bpc);
4716
4717	*pipe_bpp = display_bpc * 3;
4718
4719	return display_bpc != bpc;
4720}
4721
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4722static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4723			      struct drm_display_mode *mode,
4724			      struct drm_display_mode *adjusted_mode,
4725			      int x, int y,
4726			      struct drm_framebuffer *old_fb)
4727{
4728	struct drm_device *dev = crtc->dev;
4729	struct drm_i915_private *dev_priv = dev->dev_private;
4730	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4731	int pipe = intel_crtc->pipe;
4732	int plane = intel_crtc->plane;
4733	int refclk, num_connectors = 0;
4734	intel_clock_t clock, reduced_clock;
4735	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4736	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4737	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4738	struct drm_mode_config *mode_config = &dev->mode_config;
4739	struct intel_encoder *encoder;
4740	const intel_limit_t *limit;
4741	int ret;
4742	u32 temp;
4743	u32 lvds_sync = 0;
4744
4745	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4746		if (encoder->base.crtc != crtc)
4747			continue;
4748
4749		switch (encoder->type) {
4750		case INTEL_OUTPUT_LVDS:
4751			is_lvds = true;
4752			break;
4753		case INTEL_OUTPUT_SDVO:
4754		case INTEL_OUTPUT_HDMI:
4755			is_sdvo = true;
4756			if (encoder->needs_tv_clock)
4757				is_tv = true;
4758			break;
4759		case INTEL_OUTPUT_DVO:
4760			is_dvo = true;
4761			break;
4762		case INTEL_OUTPUT_TVOUT:
4763			is_tv = true;
4764			break;
4765		case INTEL_OUTPUT_ANALOG:
4766			is_crt = true;
4767			break;
4768		case INTEL_OUTPUT_DISPLAYPORT:
4769			is_dp = true;
4770			break;
4771		}
4772
4773		num_connectors++;
4774	}
4775
4776	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4777		refclk = dev_priv->lvds_ssc_freq * 1000;
4778		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4779			      refclk / 1000);
4780	} else if (!IS_GEN2(dev)) {
4781		refclk = 96000;
4782	} else {
4783		refclk = 48000;
4784	}
4785
4786	/*
4787	 * Returns a set of divisors for the desired target clock with the given
4788	 * refclk, or FALSE.  The returned values represent the clock equation:
4789	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4790	 */
4791	limit = intel_limit(crtc, refclk);
4792	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 
4793	if (!ok) {
4794		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4795		return -EINVAL;
4796	}
4797
4798	/* Ensure that the cursor is valid for the new mode before changing... */
4799	intel_crtc_update_cursor(crtc, true);
4800
4801	if (is_lvds && dev_priv->lvds_downclock_avail) {
 
 
 
 
 
 
4802		has_reduced_clock = limit->find_pll(limit, crtc,
4803						    dev_priv->lvds_downclock,
4804						    refclk,
 
4805						    &reduced_clock);
4806		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4807			/*
4808			 * If the different P is found, it means that we can't
4809			 * switch the display clock by using the FP0/FP1.
4810			 * In such case we will disable the LVDS downclock
4811			 * feature.
4812			 */
4813			DRM_DEBUG_KMS("Different P is found for "
4814				      "LVDS clock/downclock\n");
4815			has_reduced_clock = 0;
4816		}
4817	}
4818	/* SDVO TV has fixed PLL values depend on its clock range,
4819	   this mirrors vbios setting. */
4820	if (is_sdvo && is_tv) {
4821		if (adjusted_mode->clock >= 100000
4822		    && adjusted_mode->clock < 140500) {
4823			clock.p1 = 2;
4824			clock.p2 = 10;
4825			clock.n = 3;
4826			clock.m1 = 16;
4827			clock.m2 = 8;
4828		} else if (adjusted_mode->clock >= 140500
4829			   && adjusted_mode->clock <= 200000) {
4830			clock.p1 = 1;
4831			clock.p2 = 10;
4832			clock.n = 6;
4833			clock.m1 = 12;
4834			clock.m2 = 8;
4835		}
4836	}
4837
4838	if (IS_PINEVIEW(dev)) {
4839		fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4840		if (has_reduced_clock)
4841			fp2 = (1 << reduced_clock.n) << 16 |
4842				reduced_clock.m1 << 8 | reduced_clock.m2;
4843	} else {
4844		fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4845		if (has_reduced_clock)
4846			fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4847				reduced_clock.m2;
4848	}
4849
4850	dpll = DPLL_VGA_MODE_DIS;
4851
4852	if (!IS_GEN2(dev)) {
4853		if (is_lvds)
4854			dpll |= DPLLB_MODE_LVDS;
4855		else
4856			dpll |= DPLLB_MODE_DAC_SERIAL;
4857		if (is_sdvo) {
4858			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4859			if (pixel_multiplier > 1) {
4860				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4861					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4862			}
4863			dpll |= DPLL_DVO_HIGH_SPEED;
4864		}
4865		if (is_dp)
4866			dpll |= DPLL_DVO_HIGH_SPEED;
4867
4868		/* compute bitmask from p1 value */
4869		if (IS_PINEVIEW(dev))
4870			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4871		else {
4872			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4873			if (IS_G4X(dev) && has_reduced_clock)
4874				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4875		}
4876		switch (clock.p2) {
4877		case 5:
4878			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4879			break;
4880		case 7:
4881			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4882			break;
4883		case 10:
4884			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4885			break;
4886		case 14:
4887			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4888			break;
4889		}
4890		if (INTEL_INFO(dev)->gen >= 4)
4891			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4892	} else {
4893		if (is_lvds) {
4894			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4895		} else {
4896			if (clock.p1 == 2)
4897				dpll |= PLL_P1_DIVIDE_BY_TWO;
4898			else
4899				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4900			if (clock.p2 == 4)
4901				dpll |= PLL_P2_DIVIDE_BY_4;
4902		}
4903	}
4904
4905	if (is_sdvo && is_tv)
4906		dpll |= PLL_REF_INPUT_TVCLKINBC;
4907	else if (is_tv)
4908		/* XXX: just matching BIOS for now */
4909		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
4910		dpll |= 3;
4911	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4912		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4913	else
4914		dpll |= PLL_REF_INPUT_DREFCLK;
 
 
4915
4916	/* setup pipeconf */
4917	pipeconf = I915_READ(PIPECONF(pipe));
4918
4919	/* Set up the display plane register */
4920	dspcntr = DISPPLANE_GAMMA_ENABLE;
4921
4922	/* Ironlake's plane is forced to pipe, bit 24 is to
4923	   enable color space conversion */
4924	if (pipe == 0)
4925		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4926	else
4927		dspcntr |= DISPPLANE_SEL_PIPE_B;
4928
4929	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4930		/* Enable pixel doubling when the dot clock is > 90% of the (display)
4931		 * core speed.
4932		 *
4933		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4934		 * pipe == 0 check?
4935		 */
4936		if (mode->clock >
4937		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4938			pipeconf |= PIPECONF_DOUBLE_WIDE;
4939		else
4940			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4941	}
4942
4943	dpll |= DPLL_VCO_ENABLE;
4944
4945	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4946	drm_mode_debug_printmodeline(mode);
4947
4948	I915_WRITE(FP0(pipe), fp);
4949	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4950
4951	POSTING_READ(DPLL(pipe));
4952	udelay(150);
4953
4954	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
4955	 * This is an exception to the general rule that mode_set doesn't turn
4956	 * things on.
4957	 */
4958	if (is_lvds) {
4959		temp = I915_READ(LVDS);
4960		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4961		if (pipe == 1) {
4962			temp |= LVDS_PIPEB_SELECT;
4963		} else {
4964			temp &= ~LVDS_PIPEB_SELECT;
4965		}
4966		/* set the corresponsding LVDS_BORDER bit */
4967		temp |= dev_priv->lvds_border_bits;
4968		/* Set the B0-B3 data pairs corresponding to whether we're going to
4969		 * set the DPLLs for dual-channel mode or not.
4970		 */
4971		if (clock.p2 == 7)
4972			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4973		else
4974			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4975
4976		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4977		 * appropriately here, but we need to look more thoroughly into how
4978		 * panels behave in the two modes.
4979		 */
4980		/* set the dithering flag on LVDS as needed */
4981		if (INTEL_INFO(dev)->gen >= 4) {
4982			if (dev_priv->lvds_dither)
4983				temp |= LVDS_ENABLE_DITHER;
4984			else
4985				temp &= ~LVDS_ENABLE_DITHER;
4986		}
4987		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4988			lvds_sync |= LVDS_HSYNC_POLARITY;
4989		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4990			lvds_sync |= LVDS_VSYNC_POLARITY;
4991		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4992		    != lvds_sync) {
4993			char flags[2] = "-+";
4994			DRM_INFO("Changing LVDS panel from "
4995				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4996				 flags[!(temp & LVDS_HSYNC_POLARITY)],
4997				 flags[!(temp & LVDS_VSYNC_POLARITY)],
4998				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4999				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5000			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5001			temp |= lvds_sync;
5002		}
5003		I915_WRITE(LVDS, temp);
5004	}
5005
5006	if (is_dp) {
5007		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5008	}
5009
5010	I915_WRITE(DPLL(pipe), dpll);
5011
5012	/* Wait for the clocks to stabilize. */
5013	POSTING_READ(DPLL(pipe));
5014	udelay(150);
5015
5016	if (INTEL_INFO(dev)->gen >= 4) {
5017		temp = 0;
5018		if (is_sdvo) {
5019			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5020			if (temp > 1)
5021				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5022			else
5023				temp = 0;
5024		}
5025		I915_WRITE(DPLL_MD(pipe), temp);
5026	} else {
5027		/* The pixel multiplier can only be updated once the
5028		 * DPLL is enabled and the clocks are stable.
5029		 *
5030		 * So write it again.
5031		 */
5032		I915_WRITE(DPLL(pipe), dpll);
5033	}
5034
5035	intel_crtc->lowfreq_avail = false;
5036	if (is_lvds && has_reduced_clock && i915_powersave) {
5037		I915_WRITE(FP1(pipe), fp2);
5038		intel_crtc->lowfreq_avail = true;
5039		if (HAS_PIPE_CXSR(dev)) {
5040			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5041			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5042		}
5043	} else {
5044		I915_WRITE(FP1(pipe), fp);
5045		if (HAS_PIPE_CXSR(dev)) {
5046			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5047			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5048		}
5049	}
5050
5051	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 
 
5052		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5053		/* the chip adds 2 halflines automatically */
5054		adjusted_mode->crtc_vdisplay -= 1;
5055		adjusted_mode->crtc_vtotal -= 1;
5056		adjusted_mode->crtc_vblank_start -= 1;
5057		adjusted_mode->crtc_vblank_end -= 1;
5058		adjusted_mode->crtc_vsync_end -= 1;
5059		adjusted_mode->crtc_vsync_start -= 1;
5060	} else
5061		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
 
 
 
 
 
5062
5063	I915_WRITE(HTOTAL(pipe),
5064		   (adjusted_mode->crtc_hdisplay - 1) |
5065		   ((adjusted_mode->crtc_htotal - 1) << 16));
5066	I915_WRITE(HBLANK(pipe),
5067		   (adjusted_mode->crtc_hblank_start - 1) |
5068		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5069	I915_WRITE(HSYNC(pipe),
5070		   (adjusted_mode->crtc_hsync_start - 1) |
5071		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5072
5073	I915_WRITE(VTOTAL(pipe),
5074		   (adjusted_mode->crtc_vdisplay - 1) |
5075		   ((adjusted_mode->crtc_vtotal - 1) << 16));
5076	I915_WRITE(VBLANK(pipe),
5077		   (adjusted_mode->crtc_vblank_start - 1) |
5078		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
5079	I915_WRITE(VSYNC(pipe),
5080		   (adjusted_mode->crtc_vsync_start - 1) |
5081		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5082
5083	/* pipesrc and dspsize control the size that is scaled from,
5084	 * which should always be the user's requested size.
5085	 */
5086	I915_WRITE(DSPSIZE(plane),
5087		   ((mode->vdisplay - 1) << 16) |
5088		   (mode->hdisplay - 1));
5089	I915_WRITE(DSPPOS(plane), 0);
5090	I915_WRITE(PIPESRC(pipe),
5091		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5092
5093	I915_WRITE(PIPECONF(pipe), pipeconf);
5094	POSTING_READ(PIPECONF(pipe));
5095	intel_enable_pipe(dev_priv, pipe, false);
5096
5097	intel_wait_for_vblank(dev, pipe);
5098
5099	I915_WRITE(DSPCNTR(plane), dspcntr);
5100	POSTING_READ(DSPCNTR(plane));
5101	intel_enable_plane(dev_priv, plane, pipe);
5102
5103	ret = intel_pipe_set_base(crtc, x, y, old_fb);
5104
5105	intel_update_watermarks(dev);
5106
5107	return ret;
5108}
5109
5110static void ironlake_update_pch_refclk(struct drm_device *dev)
 
 
 
5111{
5112	struct drm_i915_private *dev_priv = dev->dev_private;
5113	struct drm_mode_config *mode_config = &dev->mode_config;
5114	struct drm_crtc *crtc;
5115	struct intel_encoder *encoder;
5116	struct intel_encoder *has_edp_encoder = NULL;
5117	u32 temp;
5118	bool has_lvds = false;
 
 
 
 
 
5119
5120	/* We need to take the global config into account */
5121	list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5122		if (!crtc->enabled)
5123			continue;
5124
5125		list_for_each_entry(encoder, &mode_config->encoder_list,
5126				    base.head) {
5127			if (encoder->base.crtc != crtc)
5128				continue;
5129
5130			switch (encoder->type) {
5131			case INTEL_OUTPUT_LVDS:
5132				has_lvds = true;
5133			case INTEL_OUTPUT_EDP:
5134				has_edp_encoder = encoder;
5135				break;
5136			}
5137		}
5138	}
5139
 
 
 
 
 
 
 
 
 
 
 
 
5140	/* Ironlake: try to setup display ref clock before DPLL
5141	 * enabling. This is only under driver's control after
5142	 * PCH B stepping, previous chipset stepping should be
5143	 * ignoring this setting.
5144	 */
5145	temp = I915_READ(PCH_DREF_CONTROL);
5146	/* Always enable nonspread source */
5147	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5148	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5149	temp &= ~DREF_SSC_SOURCE_MASK;
5150	temp |= DREF_SSC_SOURCE_ENABLE;
5151	I915_WRITE(PCH_DREF_CONTROL, temp);
5152
5153	POSTING_READ(PCH_DREF_CONTROL);
5154	udelay(200);
 
 
5155
5156	if (has_edp_encoder) {
5157		if (intel_panel_use_ssc(dev_priv)) {
 
 
 
 
 
5158			temp |= DREF_SSC1_ENABLE;
5159			I915_WRITE(PCH_DREF_CONTROL, temp);
 
 
 
 
 
 
5160
5161			POSTING_READ(PCH_DREF_CONTROL);
5162			udelay(200);
5163		}
5164		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5165
5166		/* Enable CPU source on CPU attached eDP */
5167		if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5168			if (intel_panel_use_ssc(dev_priv))
 
5169				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 
5170			else
5171				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5172		} else {
5173			/* Enable SSC on PCH eDP if needed */
5174			if (intel_panel_use_ssc(dev_priv)) {
5175				DRM_ERROR("enabling SSC on PCH\n");
5176				temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5177			}
5178		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5179		I915_WRITE(PCH_DREF_CONTROL, temp);
5180		POSTING_READ(PCH_DREF_CONTROL);
5181		udelay(200);
5182	}
5183}
5184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5185static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5186				  struct drm_display_mode *mode,
5187				  struct drm_display_mode *adjusted_mode,
5188				  int x, int y,
5189				  struct drm_framebuffer *old_fb)
5190{
5191	struct drm_device *dev = crtc->dev;
5192	struct drm_i915_private *dev_priv = dev->dev_private;
5193	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5194	int pipe = intel_crtc->pipe;
5195	int plane = intel_crtc->plane;
5196	int refclk, num_connectors = 0;
5197	intel_clock_t clock, reduced_clock;
5198	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5199	bool ok, has_reduced_clock = false, is_sdvo = false;
5200	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5201	struct intel_encoder *has_edp_encoder = NULL;
5202	struct drm_mode_config *mode_config = &dev->mode_config;
5203	struct intel_encoder *encoder;
5204	const intel_limit_t *limit;
5205	int ret;
5206	struct fdi_m_n m_n = {0};
5207	u32 temp;
5208	u32 lvds_sync = 0;
5209	int target_clock, pixel_multiplier, lane, link_bw, factor;
5210	unsigned int pipe_bpp;
5211	bool dither;
 
5212
5213	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5214		if (encoder->base.crtc != crtc)
5215			continue;
5216
5217		switch (encoder->type) {
5218		case INTEL_OUTPUT_LVDS:
5219			is_lvds = true;
5220			break;
5221		case INTEL_OUTPUT_SDVO:
5222		case INTEL_OUTPUT_HDMI:
5223			is_sdvo = true;
5224			if (encoder->needs_tv_clock)
5225				is_tv = true;
5226			break;
5227		case INTEL_OUTPUT_TVOUT:
5228			is_tv = true;
5229			break;
5230		case INTEL_OUTPUT_ANALOG:
5231			is_crt = true;
5232			break;
5233		case INTEL_OUTPUT_DISPLAYPORT:
5234			is_dp = true;
5235			break;
5236		case INTEL_OUTPUT_EDP:
5237			has_edp_encoder = encoder;
 
 
 
 
 
5238			break;
5239		}
5240
5241		num_connectors++;
5242	}
5243
5244	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5245		refclk = dev_priv->lvds_ssc_freq * 1000;
5246		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5247			      refclk / 1000);
5248	} else {
5249		refclk = 96000;
5250		if (!has_edp_encoder ||
5251		    intel_encoder_is_pch_edp(&has_edp_encoder->base))
5252			refclk = 120000; /* 120Mhz refclk */
5253	}
5254
5255	/*
5256	 * Returns a set of divisors for the desired target clock with the given
5257	 * refclk, or FALSE.  The returned values represent the clock equation:
5258	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5259	 */
5260	limit = intel_limit(crtc, refclk);
5261	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 
5262	if (!ok) {
5263		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5264		return -EINVAL;
5265	}
5266
5267	/* Ensure that the cursor is valid for the new mode before changing... */
5268	intel_crtc_update_cursor(crtc, true);
5269
5270	if (is_lvds && dev_priv->lvds_downclock_avail) {
 
 
 
 
 
 
5271		has_reduced_clock = limit->find_pll(limit, crtc,
5272						    dev_priv->lvds_downclock,
5273						    refclk,
 
5274						    &reduced_clock);
5275		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5276			/*
5277			 * If the different P is found, it means that we can't
5278			 * switch the display clock by using the FP0/FP1.
5279			 * In such case we will disable the LVDS downclock
5280			 * feature.
5281			 */
5282			DRM_DEBUG_KMS("Different P is found for "
5283				      "LVDS clock/downclock\n");
5284			has_reduced_clock = 0;
5285		}
5286	}
5287	/* SDVO TV has fixed PLL values depend on its clock range,
5288	   this mirrors vbios setting. */
5289	if (is_sdvo && is_tv) {
5290		if (adjusted_mode->clock >= 100000
5291		    && adjusted_mode->clock < 140500) {
5292			clock.p1 = 2;
5293			clock.p2 = 10;
5294			clock.n = 3;
5295			clock.m1 = 16;
5296			clock.m2 = 8;
5297		} else if (adjusted_mode->clock >= 140500
5298			   && adjusted_mode->clock <= 200000) {
5299			clock.p1 = 1;
5300			clock.p2 = 10;
5301			clock.n = 6;
5302			clock.m1 = 12;
5303			clock.m2 = 8;
5304		}
5305	}
5306
5307	/* FDI link */
5308	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5309	lane = 0;
5310	/* CPU eDP doesn't require FDI link, so just set DP M/N
5311	   according to current link config */
5312	if (has_edp_encoder &&
5313	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5314		target_clock = mode->clock;
5315		intel_edp_link_config(has_edp_encoder,
5316				      &lane, &link_bw);
5317	} else {
5318		/* [e]DP over FDI requires target mode clock
5319		   instead of link clock */
5320		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5321			target_clock = mode->clock;
5322		else
5323			target_clock = adjusted_mode->clock;
5324
5325		/* FDI is a binary signal running at ~2.7GHz, encoding
5326		 * each output octet as 10 bits. The actual frequency
5327		 * is stored as a divider into a 100MHz clock, and the
5328		 * mode pixel clock is stored in units of 1KHz.
5329		 * Hence the bw of each lane in terms of the mode signal
5330		 * is:
5331		 */
5332		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5333	}
5334
5335	/* determine panel color depth */
5336	temp = I915_READ(PIPECONF(pipe));
5337	temp &= ~PIPE_BPC_MASK;
5338	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
5339	switch (pipe_bpp) {
5340	case 18:
5341		temp |= PIPE_6BPC;
5342		break;
5343	case 24:
5344		temp |= PIPE_8BPC;
5345		break;
5346	case 30:
5347		temp |= PIPE_10BPC;
5348		break;
5349	case 36:
5350		temp |= PIPE_12BPC;
5351		break;
5352	default:
5353		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5354			pipe_bpp);
5355		temp |= PIPE_8BPC;
5356		pipe_bpp = 24;
5357		break;
5358	}
5359
5360	intel_crtc->bpp = pipe_bpp;
5361	I915_WRITE(PIPECONF(pipe), temp);
5362
5363	if (!lane) {
5364		/*
5365		 * Account for spread spectrum to avoid
5366		 * oversubscribing the link. Max center spread
5367		 * is 2.5%; use 5% for safety's sake.
5368		 */
5369		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5370		lane = bps / (link_bw * 8) + 1;
5371	}
5372
5373	intel_crtc->fdi_lanes = lane;
5374
5375	if (pixel_multiplier > 1)
5376		link_bw *= pixel_multiplier;
5377	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5378			     &m_n);
5379
5380	ironlake_update_pch_refclk(dev);
5381
5382	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5383	if (has_reduced_clock)
5384		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5385			reduced_clock.m2;
5386
5387	/* Enable autotuning of the PLL clock (if permissible) */
5388	factor = 21;
5389	if (is_lvds) {
5390		if ((intel_panel_use_ssc(dev_priv) &&
5391		     dev_priv->lvds_ssc_freq == 100) ||
5392		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5393			factor = 25;
5394	} else if (is_sdvo && is_tv)
5395		factor = 20;
5396
5397	if (clock.m < factor * clock.n)
5398		fp |= FP_CB_TUNE;
5399
5400	dpll = 0;
5401
5402	if (is_lvds)
5403		dpll |= DPLLB_MODE_LVDS;
5404	else
5405		dpll |= DPLLB_MODE_DAC_SERIAL;
5406	if (is_sdvo) {
5407		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5408		if (pixel_multiplier > 1) {
5409			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5410		}
5411		dpll |= DPLL_DVO_HIGH_SPEED;
5412	}
5413	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5414		dpll |= DPLL_DVO_HIGH_SPEED;
5415
5416	/* compute bitmask from p1 value */
5417	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5418	/* also FPA1 */
5419	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5420
5421	switch (clock.p2) {
5422	case 5:
5423		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5424		break;
5425	case 7:
5426		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5427		break;
5428	case 10:
5429		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5430		break;
5431	case 14:
5432		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5433		break;
5434	}
5435
5436	if (is_sdvo && is_tv)
5437		dpll |= PLL_REF_INPUT_TVCLKINBC;
5438	else if (is_tv)
5439		/* XXX: just matching BIOS for now */
5440		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5441		dpll |= 3;
5442	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5443		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5444	else
5445		dpll |= PLL_REF_INPUT_DREFCLK;
5446
5447	/* setup pipeconf */
5448	pipeconf = I915_READ(PIPECONF(pipe));
5449
5450	/* Set up the display plane register */
5451	dspcntr = DISPPLANE_GAMMA_ENABLE;
5452
5453	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5454	drm_mode_debug_printmodeline(mode);
5455
5456	/* PCH eDP needs FDI, but CPU eDP does not */
5457	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5458		I915_WRITE(PCH_FP0(pipe), fp);
5459		I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5460
5461		POSTING_READ(PCH_DPLL(pipe));
5462		udelay(150);
5463	}
5464
5465	/* enable transcoder DPLL */
5466	if (HAS_PCH_CPT(dev)) {
5467		temp = I915_READ(PCH_DPLL_SEL);
5468		switch (pipe) {
5469		case 0:
5470			temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5471			break;
5472		case 1:
5473			temp |=	TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5474			break;
5475		case 2:
5476			/* FIXME: manage transcoder PLLs? */
5477			temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5478			break;
5479		default:
5480			BUG();
5481		}
5482		I915_WRITE(PCH_DPLL_SEL, temp);
5483
5484		POSTING_READ(PCH_DPLL_SEL);
5485		udelay(150);
5486	}
5487
5488	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5489	 * This is an exception to the general rule that mode_set doesn't turn
5490	 * things on.
5491	 */
5492	if (is_lvds) {
5493		temp = I915_READ(PCH_LVDS);
5494		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5495		if (pipe == 1) {
5496			if (HAS_PCH_CPT(dev))
5497				temp |= PORT_TRANS_B_SEL_CPT;
5498			else
5499				temp |= LVDS_PIPEB_SELECT;
5500		} else {
5501			if (HAS_PCH_CPT(dev))
5502				temp &= ~PORT_TRANS_SEL_MASK;
5503			else
5504				temp &= ~LVDS_PIPEB_SELECT;
5505		}
 
5506		/* set the corresponsding LVDS_BORDER bit */
5507		temp |= dev_priv->lvds_border_bits;
5508		/* Set the B0-B3 data pairs corresponding to whether we're going to
5509		 * set the DPLLs for dual-channel mode or not.
5510		 */
5511		if (clock.p2 == 7)
5512			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5513		else
5514			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5515
5516		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5517		 * appropriately here, but we need to look more thoroughly into how
5518		 * panels behave in the two modes.
5519		 */
 
5520		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5521			lvds_sync |= LVDS_HSYNC_POLARITY;
5522		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5523			lvds_sync |= LVDS_VSYNC_POLARITY;
5524		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5525		    != lvds_sync) {
5526			char flags[2] = "-+";
5527			DRM_INFO("Changing LVDS panel from "
5528				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5529				 flags[!(temp & LVDS_HSYNC_POLARITY)],
5530				 flags[!(temp & LVDS_VSYNC_POLARITY)],
5531				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5532				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5533			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5534			temp |= lvds_sync;
5535		}
5536		I915_WRITE(PCH_LVDS, temp);
5537	}
5538
5539	pipeconf &= ~PIPECONF_DITHER_EN;
5540	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5541	if ((is_lvds && dev_priv->lvds_dither) || dither) {
5542		pipeconf |= PIPECONF_DITHER_EN;
5543		pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5544	}
5545	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5546		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5547	} else {
5548		/* For non-DP output, clear any trans DP clock recovery setting.*/
5549		I915_WRITE(TRANSDATA_M1(pipe), 0);
5550		I915_WRITE(TRANSDATA_N1(pipe), 0);
5551		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5552		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5553	}
5554
5555	if (!has_edp_encoder ||
5556	    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5557		I915_WRITE(PCH_DPLL(pipe), dpll);
5558
5559		/* Wait for the clocks to stabilize. */
5560		POSTING_READ(PCH_DPLL(pipe));
5561		udelay(150);
5562
5563		/* The pixel multiplier can only be updated once the
5564		 * DPLL is enabled and the clocks are stable.
5565		 *
5566		 * So write it again.
5567		 */
5568		I915_WRITE(PCH_DPLL(pipe), dpll);
5569	}
5570
5571	intel_crtc->lowfreq_avail = false;
5572	if (is_lvds && has_reduced_clock && i915_powersave) {
5573		I915_WRITE(PCH_FP1(pipe), fp2);
5574		intel_crtc->lowfreq_avail = true;
5575		if (HAS_PIPE_CXSR(dev)) {
5576			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5577			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5578		}
5579	} else {
5580		I915_WRITE(PCH_FP1(pipe), fp);
5581		if (HAS_PIPE_CXSR(dev)) {
5582			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5583			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 
 
5584		}
5585	}
5586
 
5587	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5588		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5589		/* the chip adds 2 halflines automatically */
5590		adjusted_mode->crtc_vdisplay -= 1;
5591		adjusted_mode->crtc_vtotal -= 1;
5592		adjusted_mode->crtc_vblank_start -= 1;
5593		adjusted_mode->crtc_vblank_end -= 1;
5594		adjusted_mode->crtc_vsync_end -= 1;
5595		adjusted_mode->crtc_vsync_start -= 1;
5596	} else
5597		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
 
 
 
5598
5599	I915_WRITE(HTOTAL(pipe),
5600		   (adjusted_mode->crtc_hdisplay - 1) |
5601		   ((adjusted_mode->crtc_htotal - 1) << 16));
5602	I915_WRITE(HBLANK(pipe),
5603		   (adjusted_mode->crtc_hblank_start - 1) |
5604		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5605	I915_WRITE(HSYNC(pipe),
5606		   (adjusted_mode->crtc_hsync_start - 1) |
5607		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5608
5609	I915_WRITE(VTOTAL(pipe),
5610		   (adjusted_mode->crtc_vdisplay - 1) |
5611		   ((adjusted_mode->crtc_vtotal - 1) << 16));
5612	I915_WRITE(VBLANK(pipe),
5613		   (adjusted_mode->crtc_vblank_start - 1) |
5614		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
5615	I915_WRITE(VSYNC(pipe),
5616		   (adjusted_mode->crtc_vsync_start - 1) |
5617		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5618
5619	/* pipesrc controls the size that is scaled from, which should
5620	 * always be the user's requested size.
5621	 */
5622	I915_WRITE(PIPESRC(pipe),
5623		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5624
5625	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5626	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5627	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5628	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5629
5630	if (has_edp_encoder &&
5631	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5632		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5633	}
5634
5635	I915_WRITE(PIPECONF(pipe), pipeconf);
5636	POSTING_READ(PIPECONF(pipe));
5637
5638	intel_wait_for_vblank(dev, pipe);
5639
5640	if (IS_GEN5(dev)) {
5641		/* enable address swizzle for tiling buffer */
5642		temp = I915_READ(DISP_ARB_CTL);
5643		I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5644	}
5645
5646	I915_WRITE(DSPCNTR(plane), dspcntr);
5647	POSTING_READ(DSPCNTR(plane));
5648
5649	ret = intel_pipe_set_base(crtc, x, y, old_fb);
5650
5651	intel_update_watermarks(dev);
5652
 
 
5653	return ret;
5654}
5655
5656static int intel_crtc_mode_set(struct drm_crtc *crtc,
5657			       struct drm_display_mode *mode,
5658			       struct drm_display_mode *adjusted_mode,
5659			       int x, int y,
5660			       struct drm_framebuffer *old_fb)
5661{
5662	struct drm_device *dev = crtc->dev;
5663	struct drm_i915_private *dev_priv = dev->dev_private;
5664	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5665	int pipe = intel_crtc->pipe;
5666	int ret;
5667
5668	drm_vblank_pre_modeset(dev, pipe);
5669
5670	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5671					      x, y, old_fb);
5672
5673	drm_vblank_post_modeset(dev, pipe);
5674
5675	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
 
 
 
5676
5677	return ret;
5678}
5679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5680/** Loads the palette/gamma unit for the CRTC with the prepared values */
5681void intel_crtc_load_lut(struct drm_crtc *crtc)
5682{
5683	struct drm_device *dev = crtc->dev;
5684	struct drm_i915_private *dev_priv = dev->dev_private;
5685	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5686	int palreg = PALETTE(intel_crtc->pipe);
5687	int i;
5688
5689	/* The clocks have to be on to load the palette. */
5690	if (!crtc->enabled)
5691		return;
5692
5693	/* use legacy palette for Ironlake */
5694	if (HAS_PCH_SPLIT(dev))
5695		palreg = LGC_PALETTE(intel_crtc->pipe);
5696
5697	for (i = 0; i < 256; i++) {
5698		I915_WRITE(palreg + 4 * i,
5699			   (intel_crtc->lut_r[i] << 16) |
5700			   (intel_crtc->lut_g[i] << 8) |
5701			   intel_crtc->lut_b[i]);
5702	}
5703}
5704
5705static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
5706{
5707	struct drm_device *dev = crtc->dev;
5708	struct drm_i915_private *dev_priv = dev->dev_private;
5709	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5710	bool visible = base != 0;
5711	u32 cntl;
5712
5713	if (intel_crtc->cursor_visible == visible)
5714		return;
5715
5716	cntl = I915_READ(_CURACNTR);
5717	if (visible) {
5718		/* On these chipsets we can only modify the base whilst
5719		 * the cursor is disabled.
5720		 */
5721		I915_WRITE(_CURABASE, base);
5722
5723		cntl &= ~(CURSOR_FORMAT_MASK);
5724		/* XXX width must be 64, stride 256 => 0x00 << 28 */
5725		cntl |= CURSOR_ENABLE |
5726			CURSOR_GAMMA_ENABLE |
5727			CURSOR_FORMAT_ARGB;
5728	} else
5729		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
5730	I915_WRITE(_CURACNTR, cntl);
5731
5732	intel_crtc->cursor_visible = visible;
5733}
5734
5735static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5736{
5737	struct drm_device *dev = crtc->dev;
5738	struct drm_i915_private *dev_priv = dev->dev_private;
5739	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5740	int pipe = intel_crtc->pipe;
5741	bool visible = base != 0;
5742
5743	if (intel_crtc->cursor_visible != visible) {
5744		uint32_t cntl = I915_READ(CURCNTR(pipe));
5745		if (base) {
5746			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5747			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5748			cntl |= pipe << 28; /* Connect to correct pipe */
5749		} else {
5750			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5751			cntl |= CURSOR_MODE_DISABLE;
5752		}
5753		I915_WRITE(CURCNTR(pipe), cntl);
5754
5755		intel_crtc->cursor_visible = visible;
5756	}
5757	/* and commit changes on next vblank */
5758	I915_WRITE(CURBASE(pipe), base);
5759}
5760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5761/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5762static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5763				     bool on)
5764{
5765	struct drm_device *dev = crtc->dev;
5766	struct drm_i915_private *dev_priv = dev->dev_private;
5767	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5768	int pipe = intel_crtc->pipe;
5769	int x = intel_crtc->cursor_x;
5770	int y = intel_crtc->cursor_y;
5771	u32 base, pos;
5772	bool visible;
5773
5774	pos = 0;
5775
5776	if (on && crtc->enabled && crtc->fb) {
5777		base = intel_crtc->cursor_addr;
5778		if (x > (int) crtc->fb->width)
5779			base = 0;
5780
5781		if (y > (int) crtc->fb->height)
5782			base = 0;
5783	} else
5784		base = 0;
5785
5786	if (x < 0) {
5787		if (x + intel_crtc->cursor_width < 0)
5788			base = 0;
5789
5790		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5791		x = -x;
5792	}
5793	pos |= x << CURSOR_X_SHIFT;
5794
5795	if (y < 0) {
5796		if (y + intel_crtc->cursor_height < 0)
5797			base = 0;
5798
5799		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5800		y = -y;
5801	}
5802	pos |= y << CURSOR_Y_SHIFT;
5803
5804	visible = base != 0;
5805	if (!visible && !intel_crtc->cursor_visible)
5806		return;
5807
5808	I915_WRITE(CURPOS(pipe), pos);
5809	if (IS_845G(dev) || IS_I865G(dev))
5810		i845_update_cursor(crtc, base);
5811	else
5812		i9xx_update_cursor(crtc, base);
5813
5814	if (visible)
5815		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
 
 
5816}
5817
5818static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5819				 struct drm_file *file,
5820				 uint32_t handle,
5821				 uint32_t width, uint32_t height)
5822{
5823	struct drm_device *dev = crtc->dev;
5824	struct drm_i915_private *dev_priv = dev->dev_private;
5825	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5826	struct drm_i915_gem_object *obj;
5827	uint32_t addr;
5828	int ret;
5829
5830	DRM_DEBUG_KMS("\n");
5831
5832	/* if we want to turn off the cursor ignore width and height */
5833	if (!handle) {
5834		DRM_DEBUG_KMS("cursor off\n");
5835		addr = 0;
5836		obj = NULL;
5837		mutex_lock(&dev->struct_mutex);
5838		goto finish;
5839	}
5840
5841	/* Currently we only support 64x64 cursors */
5842	if (width != 64 || height != 64) {
5843		DRM_ERROR("we currently only support 64x64 cursors\n");
5844		return -EINVAL;
5845	}
5846
5847	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
5848	if (&obj->base == NULL)
5849		return -ENOENT;
5850
5851	if (obj->base.size < width * height * 4) {
5852		DRM_ERROR("buffer is to small\n");
5853		ret = -ENOMEM;
5854		goto fail;
5855	}
5856
5857	/* we only need to pin inside GTT if cursor is non-phy */
5858	mutex_lock(&dev->struct_mutex);
5859	if (!dev_priv->info->cursor_needs_physical) {
5860		if (obj->tiling_mode) {
5861			DRM_ERROR("cursor cannot be tiled\n");
5862			ret = -EINVAL;
5863			goto fail_locked;
5864		}
5865
5866		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5867		if (ret) {
5868			DRM_ERROR("failed to move cursor bo into the GTT\n");
5869			goto fail_locked;
5870		}
5871
5872		ret = i915_gem_object_put_fence(obj);
5873		if (ret) {
5874			DRM_ERROR("failed to release fence for cursor");
5875			goto fail_unpin;
5876		}
5877
5878		addr = obj->gtt_offset;
5879	} else {
5880		int align = IS_I830(dev) ? 16 * 1024 : 256;
5881		ret = i915_gem_attach_phys_object(dev, obj,
5882						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5883						  align);
5884		if (ret) {
5885			DRM_ERROR("failed to attach phys object\n");
5886			goto fail_locked;
5887		}
5888		addr = obj->phys_obj->handle->busaddr;
5889	}
5890
5891	if (IS_GEN2(dev))
5892		I915_WRITE(CURSIZE, (height << 12) | width);
5893
5894 finish:
5895	if (intel_crtc->cursor_bo) {
5896		if (dev_priv->info->cursor_needs_physical) {
5897			if (intel_crtc->cursor_bo != obj)
5898				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5899		} else
5900			i915_gem_object_unpin(intel_crtc->cursor_bo);
5901		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5902	}
5903
5904	mutex_unlock(&dev->struct_mutex);
5905
5906	intel_crtc->cursor_addr = addr;
5907	intel_crtc->cursor_bo = obj;
5908	intel_crtc->cursor_width = width;
5909	intel_crtc->cursor_height = height;
5910
5911	intel_crtc_update_cursor(crtc, true);
5912
5913	return 0;
5914fail_unpin:
5915	i915_gem_object_unpin(obj);
5916fail_locked:
5917	mutex_unlock(&dev->struct_mutex);
5918fail:
5919	drm_gem_object_unreference_unlocked(&obj->base);
5920	return ret;
5921}
5922
5923static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5924{
5925	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5926
5927	intel_crtc->cursor_x = x;
5928	intel_crtc->cursor_y = y;
5929
5930	intel_crtc_update_cursor(crtc, true);
5931
5932	return 0;
5933}
5934
5935/** Sets the color ramps on behalf of RandR */
5936void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5937				 u16 blue, int regno)
5938{
5939	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5940
5941	intel_crtc->lut_r[regno] = red >> 8;
5942	intel_crtc->lut_g[regno] = green >> 8;
5943	intel_crtc->lut_b[regno] = blue >> 8;
5944}
5945
5946void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5947			     u16 *blue, int regno)
5948{
5949	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5950
5951	*red = intel_crtc->lut_r[regno] << 8;
5952	*green = intel_crtc->lut_g[regno] << 8;
5953	*blue = intel_crtc->lut_b[regno] << 8;
5954}
5955
5956static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5957				 u16 *blue, uint32_t start, uint32_t size)
5958{
5959	int end = (start + size > 256) ? 256 : start + size, i;
5960	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5961
5962	for (i = start; i < end; i++) {
5963		intel_crtc->lut_r[i] = red[i] >> 8;
5964		intel_crtc->lut_g[i] = green[i] >> 8;
5965		intel_crtc->lut_b[i] = blue[i] >> 8;
5966	}
5967
5968	intel_crtc_load_lut(crtc);
5969}
5970
5971/**
5972 * Get a pipe with a simple mode set on it for doing load-based monitor
5973 * detection.
5974 *
5975 * It will be up to the load-detect code to adjust the pipe as appropriate for
5976 * its requirements.  The pipe will be connected to no other encoders.
5977 *
5978 * Currently this code will only succeed if there is a pipe with no encoders
5979 * configured for it.  In the future, it could choose to temporarily disable
5980 * some outputs to free up a pipe for its use.
5981 *
5982 * \return crtc, or NULL if no pipes are available.
5983 */
5984
5985/* VESA 640x480x72Hz mode to set on the pipe */
5986static struct drm_display_mode load_detect_mode = {
5987	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5988		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5989};
5990
5991static struct drm_framebuffer *
5992intel_framebuffer_create(struct drm_device *dev,
5993			 struct drm_mode_fb_cmd *mode_cmd,
5994			 struct drm_i915_gem_object *obj)
5995{
5996	struct intel_framebuffer *intel_fb;
5997	int ret;
5998
5999	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6000	if (!intel_fb) {
6001		drm_gem_object_unreference_unlocked(&obj->base);
6002		return ERR_PTR(-ENOMEM);
6003	}
6004
6005	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6006	if (ret) {
6007		drm_gem_object_unreference_unlocked(&obj->base);
6008		kfree(intel_fb);
6009		return ERR_PTR(ret);
6010	}
6011
6012	return &intel_fb->base;
6013}
6014
6015static u32
6016intel_framebuffer_pitch_for_width(int width, int bpp)
6017{
6018	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6019	return ALIGN(pitch, 64);
6020}
6021
6022static u32
6023intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6024{
6025	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6026	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6027}
6028
6029static struct drm_framebuffer *
6030intel_framebuffer_create_for_mode(struct drm_device *dev,
6031				  struct drm_display_mode *mode,
6032				  int depth, int bpp)
6033{
6034	struct drm_i915_gem_object *obj;
6035	struct drm_mode_fb_cmd mode_cmd;
6036
6037	obj = i915_gem_alloc_object(dev,
6038				    intel_framebuffer_size_for_mode(mode, bpp));
6039	if (obj == NULL)
6040		return ERR_PTR(-ENOMEM);
6041
6042	mode_cmd.width = mode->hdisplay;
6043	mode_cmd.height = mode->vdisplay;
6044	mode_cmd.depth = depth;
6045	mode_cmd.bpp = bpp;
6046	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
6047
6048	return intel_framebuffer_create(dev, &mode_cmd, obj);
6049}
6050
6051static struct drm_framebuffer *
6052mode_fits_in_fbdev(struct drm_device *dev,
6053		   struct drm_display_mode *mode)
6054{
6055	struct drm_i915_private *dev_priv = dev->dev_private;
6056	struct drm_i915_gem_object *obj;
6057	struct drm_framebuffer *fb;
6058
6059	if (dev_priv->fbdev == NULL)
6060		return NULL;
6061
6062	obj = dev_priv->fbdev->ifb.obj;
6063	if (obj == NULL)
6064		return NULL;
6065
6066	fb = &dev_priv->fbdev->ifb.base;
6067	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
6068							  fb->bits_per_pixel))
6069		return NULL;
6070
6071	if (obj->base.size < mode->vdisplay * fb->pitch)
6072		return NULL;
6073
6074	return fb;
6075}
6076
6077bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6078				struct drm_connector *connector,
6079				struct drm_display_mode *mode,
6080				struct intel_load_detect_pipe *old)
6081{
6082	struct intel_crtc *intel_crtc;
6083	struct drm_crtc *possible_crtc;
6084	struct drm_encoder *encoder = &intel_encoder->base;
6085	struct drm_crtc *crtc = NULL;
6086	struct drm_device *dev = encoder->dev;
6087	struct drm_framebuffer *old_fb;
6088	int i = -1;
6089
6090	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6091		      connector->base.id, drm_get_connector_name(connector),
6092		      encoder->base.id, drm_get_encoder_name(encoder));
6093
6094	/*
6095	 * Algorithm gets a little messy:
6096	 *
6097	 *   - if the connector already has an assigned crtc, use it (but make
6098	 *     sure it's on first)
6099	 *
6100	 *   - try to find the first unused crtc that can drive this connector,
6101	 *     and use that if we find one
6102	 */
6103
6104	/* See if we already have a CRTC for this connector */
6105	if (encoder->crtc) {
6106		crtc = encoder->crtc;
6107
6108		intel_crtc = to_intel_crtc(crtc);
6109		old->dpms_mode = intel_crtc->dpms_mode;
6110		old->load_detect_temp = false;
6111
6112		/* Make sure the crtc and connector are running */
6113		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6114			struct drm_encoder_helper_funcs *encoder_funcs;
6115			struct drm_crtc_helper_funcs *crtc_funcs;
6116
6117			crtc_funcs = crtc->helper_private;
6118			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6119
6120			encoder_funcs = encoder->helper_private;
6121			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6122		}
6123
6124		return true;
6125	}
6126
6127	/* Find an unused one (if possible) */
6128	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6129		i++;
6130		if (!(encoder->possible_crtcs & (1 << i)))
6131			continue;
6132		if (!possible_crtc->enabled) {
6133			crtc = possible_crtc;
6134			break;
6135		}
6136	}
6137
6138	/*
6139	 * If we didn't find an unused CRTC, don't use any.
6140	 */
6141	if (!crtc) {
6142		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6143		return false;
6144	}
6145
6146	encoder->crtc = crtc;
6147	connector->encoder = encoder;
6148
6149	intel_crtc = to_intel_crtc(crtc);
6150	old->dpms_mode = intel_crtc->dpms_mode;
6151	old->load_detect_temp = true;
6152	old->release_fb = NULL;
6153
6154	if (!mode)
6155		mode = &load_detect_mode;
6156
6157	old_fb = crtc->fb;
6158
6159	/* We need a framebuffer large enough to accommodate all accesses
6160	 * that the plane may generate whilst we perform load detection.
6161	 * We can not rely on the fbcon either being present (we get called
6162	 * during its initialisation to detect all boot displays, or it may
6163	 * not even exist) or that it is large enough to satisfy the
6164	 * requested mode.
6165	 */
6166	crtc->fb = mode_fits_in_fbdev(dev, mode);
6167	if (crtc->fb == NULL) {
6168		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6169		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6170		old->release_fb = crtc->fb;
6171	} else
6172		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6173	if (IS_ERR(crtc->fb)) {
6174		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6175		crtc->fb = old_fb;
6176		return false;
6177	}
6178
6179	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6180		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6181		if (old->release_fb)
6182			old->release_fb->funcs->destroy(old->release_fb);
6183		crtc->fb = old_fb;
6184		return false;
6185	}
6186
6187	/* let the connector get through one full cycle before testing */
6188	intel_wait_for_vblank(dev, intel_crtc->pipe);
6189
6190	return true;
6191}
6192
6193void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6194				    struct drm_connector *connector,
6195				    struct intel_load_detect_pipe *old)
6196{
6197	struct drm_encoder *encoder = &intel_encoder->base;
6198	struct drm_device *dev = encoder->dev;
6199	struct drm_crtc *crtc = encoder->crtc;
6200	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6201	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6202
6203	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6204		      connector->base.id, drm_get_connector_name(connector),
6205		      encoder->base.id, drm_get_encoder_name(encoder));
6206
6207	if (old->load_detect_temp) {
6208		connector->encoder = NULL;
6209		drm_helper_disable_unused_functions(dev);
6210
6211		if (old->release_fb)
6212			old->release_fb->funcs->destroy(old->release_fb);
6213
6214		return;
6215	}
6216
6217	/* Switch crtc and encoder back off if necessary */
6218	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6219		encoder_funcs->dpms(encoder, old->dpms_mode);
6220		crtc_funcs->dpms(crtc, old->dpms_mode);
6221	}
6222}
6223
6224/* Returns the clock of the currently programmed mode of the given pipe. */
6225static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6226{
6227	struct drm_i915_private *dev_priv = dev->dev_private;
6228	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6229	int pipe = intel_crtc->pipe;
6230	u32 dpll = I915_READ(DPLL(pipe));
6231	u32 fp;
6232	intel_clock_t clock;
6233
6234	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6235		fp = I915_READ(FP0(pipe));
6236	else
6237		fp = I915_READ(FP1(pipe));
6238
6239	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6240	if (IS_PINEVIEW(dev)) {
6241		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6242		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6243	} else {
6244		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6245		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6246	}
6247
6248	if (!IS_GEN2(dev)) {
6249		if (IS_PINEVIEW(dev))
6250			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6251				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6252		else
6253			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6254			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6255
6256		switch (dpll & DPLL_MODE_MASK) {
6257		case DPLLB_MODE_DAC_SERIAL:
6258			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6259				5 : 10;
6260			break;
6261		case DPLLB_MODE_LVDS:
6262			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6263				7 : 14;
6264			break;
6265		default:
6266			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6267				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6268			return 0;
6269		}
6270
6271		/* XXX: Handle the 100Mhz refclk */
6272		intel_clock(dev, 96000, &clock);
6273	} else {
6274		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6275
6276		if (is_lvds) {
6277			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6278				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6279			clock.p2 = 14;
6280
6281			if ((dpll & PLL_REF_INPUT_MASK) ==
6282			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6283				/* XXX: might not be 66MHz */
6284				intel_clock(dev, 66000, &clock);
6285			} else
6286				intel_clock(dev, 48000, &clock);
6287		} else {
6288			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6289				clock.p1 = 2;
6290			else {
6291				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6292					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6293			}
6294			if (dpll & PLL_P2_DIVIDE_BY_4)
6295				clock.p2 = 4;
6296			else
6297				clock.p2 = 2;
6298
6299			intel_clock(dev, 48000, &clock);
6300		}
6301	}
6302
6303	/* XXX: It would be nice to validate the clocks, but we can't reuse
6304	 * i830PllIsValid() because it relies on the xf86_config connector
6305	 * configuration being accurate, which it isn't necessarily.
6306	 */
6307
6308	return clock.dot;
6309}
6310
6311/** Returns the currently programmed mode of the given pipe. */
6312struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6313					     struct drm_crtc *crtc)
6314{
6315	struct drm_i915_private *dev_priv = dev->dev_private;
6316	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6317	int pipe = intel_crtc->pipe;
6318	struct drm_display_mode *mode;
6319	int htot = I915_READ(HTOTAL(pipe));
6320	int hsync = I915_READ(HSYNC(pipe));
6321	int vtot = I915_READ(VTOTAL(pipe));
6322	int vsync = I915_READ(VSYNC(pipe));
6323
6324	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6325	if (!mode)
6326		return NULL;
6327
6328	mode->clock = intel_crtc_clock_get(dev, crtc);
6329	mode->hdisplay = (htot & 0xffff) + 1;
6330	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6331	mode->hsync_start = (hsync & 0xffff) + 1;
6332	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6333	mode->vdisplay = (vtot & 0xffff) + 1;
6334	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6335	mode->vsync_start = (vsync & 0xffff) + 1;
6336	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6337
6338	drm_mode_set_name(mode);
6339	drm_mode_set_crtcinfo(mode, 0);
6340
6341	return mode;
6342}
6343
6344#define GPU_IDLE_TIMEOUT 500 /* ms */
6345
6346/* When this timer fires, we've been idle for awhile */
6347static void intel_gpu_idle_timer(unsigned long arg)
6348{
6349	struct drm_device *dev = (struct drm_device *)arg;
6350	drm_i915_private_t *dev_priv = dev->dev_private;
6351
6352	if (!list_empty(&dev_priv->mm.active_list)) {
6353		/* Still processing requests, so just re-arm the timer. */
6354		mod_timer(&dev_priv->idle_timer, jiffies +
6355			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6356		return;
6357	}
6358
6359	dev_priv->busy = false;
6360	queue_work(dev_priv->wq, &dev_priv->idle_work);
6361}
6362
6363#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6364
6365static void intel_crtc_idle_timer(unsigned long arg)
6366{
6367	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
6368	struct drm_crtc *crtc = &intel_crtc->base;
6369	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
6370	struct intel_framebuffer *intel_fb;
6371
6372	intel_fb = to_intel_framebuffer(crtc->fb);
6373	if (intel_fb && intel_fb->obj->active) {
6374		/* The framebuffer is still being accessed by the GPU. */
6375		mod_timer(&intel_crtc->idle_timer, jiffies +
6376			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6377		return;
6378	}
6379
6380	intel_crtc->busy = false;
6381	queue_work(dev_priv->wq, &dev_priv->idle_work);
6382}
6383
6384static void intel_increase_pllclock(struct drm_crtc *crtc)
6385{
6386	struct drm_device *dev = crtc->dev;
6387	drm_i915_private_t *dev_priv = dev->dev_private;
6388	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6389	int pipe = intel_crtc->pipe;
6390	int dpll_reg = DPLL(pipe);
6391	int dpll;
6392
6393	if (HAS_PCH_SPLIT(dev))
6394		return;
6395
6396	if (!dev_priv->lvds_downclock_avail)
6397		return;
6398
6399	dpll = I915_READ(dpll_reg);
6400	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6401		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6402
6403		/* Unlock panel regs */
6404		I915_WRITE(PP_CONTROL,
6405			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6406
6407		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6408		I915_WRITE(dpll_reg, dpll);
6409		intel_wait_for_vblank(dev, pipe);
6410
6411		dpll = I915_READ(dpll_reg);
6412		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6413			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6414
6415		/* ...and lock them again */
6416		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6417	}
6418
6419	/* Schedule downclock */
6420	mod_timer(&intel_crtc->idle_timer, jiffies +
6421		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6422}
6423
6424static void intel_decrease_pllclock(struct drm_crtc *crtc)
6425{
6426	struct drm_device *dev = crtc->dev;
6427	drm_i915_private_t *dev_priv = dev->dev_private;
6428	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6429	int pipe = intel_crtc->pipe;
6430	int dpll_reg = DPLL(pipe);
6431	int dpll = I915_READ(dpll_reg);
6432
6433	if (HAS_PCH_SPLIT(dev))
6434		return;
6435
6436	if (!dev_priv->lvds_downclock_avail)
6437		return;
6438
6439	/*
6440	 * Since this is called by a timer, we should never get here in
6441	 * the manual case.
6442	 */
6443	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
 
 
 
 
6444		DRM_DEBUG_DRIVER("downclocking LVDS\n");
6445
6446		/* Unlock panel regs */
6447		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
6448			   PANEL_UNLOCK_REGS);
6449
 
6450		dpll |= DISPLAY_RATE_SELECT_FPA1;
6451		I915_WRITE(dpll_reg, dpll);
6452		intel_wait_for_vblank(dev, pipe);
6453		dpll = I915_READ(dpll_reg);
6454		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
6455			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
6456
6457		/* ...and lock them again */
6458		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6459	}
6460
6461}
6462
6463/**
6464 * intel_idle_update - adjust clocks for idleness
6465 * @work: work struct
6466 *
6467 * Either the GPU or display (or both) went idle.  Check the busy status
6468 * here and adjust the CRTC and GPU clocks as necessary.
6469 */
6470static void intel_idle_update(struct work_struct *work)
6471{
6472	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
6473						    idle_work);
6474	struct drm_device *dev = dev_priv->dev;
6475	struct drm_crtc *crtc;
6476	struct intel_crtc *intel_crtc;
6477
6478	if (!i915_powersave)
6479		return;
6480
6481	mutex_lock(&dev->struct_mutex);
6482
6483	i915_update_gfx_val(dev_priv);
6484
6485	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6486		/* Skip inactive CRTCs */
6487		if (!crtc->fb)
6488			continue;
6489
6490		intel_crtc = to_intel_crtc(crtc);
6491		if (!intel_crtc->busy)
6492			intel_decrease_pllclock(crtc);
6493	}
6494
6495
6496	mutex_unlock(&dev->struct_mutex);
6497}
6498
6499/**
6500 * intel_mark_busy - mark the GPU and possibly the display busy
6501 * @dev: drm device
6502 * @obj: object we're operating on
6503 *
6504 * Callers can use this function to indicate that the GPU is busy processing
6505 * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
6506 * buffer), we'll also mark the display as busy, so we know to increase its
6507 * clock frequency.
6508 */
6509void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
6510{
6511	drm_i915_private_t *dev_priv = dev->dev_private;
6512	struct drm_crtc *crtc = NULL;
6513	struct intel_framebuffer *intel_fb;
6514	struct intel_crtc *intel_crtc;
6515
6516	if (!drm_core_check_feature(dev, DRIVER_MODESET))
6517		return;
6518
6519	if (!dev_priv->busy)
 
6520		dev_priv->busy = true;
6521	else
6522		mod_timer(&dev_priv->idle_timer, jiffies +
6523			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6524
 
 
 
6525	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6526		if (!crtc->fb)
6527			continue;
6528
6529		intel_crtc = to_intel_crtc(crtc);
6530		intel_fb = to_intel_framebuffer(crtc->fb);
6531		if (intel_fb->obj == obj) {
6532			if (!intel_crtc->busy) {
6533				/* Non-busy -> busy, upclock */
6534				intel_increase_pllclock(crtc);
6535				intel_crtc->busy = true;
6536			} else {
6537				/* Busy -> busy, put off timer */
6538				mod_timer(&intel_crtc->idle_timer, jiffies +
6539					  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6540			}
6541		}
6542	}
6543}
6544
6545static void intel_crtc_destroy(struct drm_crtc *crtc)
6546{
6547	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6548	struct drm_device *dev = crtc->dev;
6549	struct intel_unpin_work *work;
6550	unsigned long flags;
6551
6552	spin_lock_irqsave(&dev->event_lock, flags);
6553	work = intel_crtc->unpin_work;
6554	intel_crtc->unpin_work = NULL;
6555	spin_unlock_irqrestore(&dev->event_lock, flags);
6556
6557	if (work) {
6558		cancel_work_sync(&work->work);
6559		kfree(work);
6560	}
6561
6562	drm_crtc_cleanup(crtc);
6563
6564	kfree(intel_crtc);
6565}
6566
6567static void intel_unpin_work_fn(struct work_struct *__work)
6568{
6569	struct intel_unpin_work *work =
6570		container_of(__work, struct intel_unpin_work, work);
6571
6572	mutex_lock(&work->dev->struct_mutex);
6573	i915_gem_object_unpin(work->old_fb_obj);
6574	drm_gem_object_unreference(&work->pending_flip_obj->base);
6575	drm_gem_object_unreference(&work->old_fb_obj->base);
6576
6577	intel_update_fbc(work->dev);
6578	mutex_unlock(&work->dev->struct_mutex);
6579	kfree(work);
6580}
6581
6582static void do_intel_finish_page_flip(struct drm_device *dev,
6583				      struct drm_crtc *crtc)
6584{
6585	drm_i915_private_t *dev_priv = dev->dev_private;
6586	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6587	struct intel_unpin_work *work;
6588	struct drm_i915_gem_object *obj;
6589	struct drm_pending_vblank_event *e;
6590	struct timeval tnow, tvbl;
6591	unsigned long flags;
6592
6593	/* Ignore early vblank irqs */
6594	if (intel_crtc == NULL)
6595		return;
6596
6597	do_gettimeofday(&tnow);
6598
6599	spin_lock_irqsave(&dev->event_lock, flags);
6600	work = intel_crtc->unpin_work;
6601	if (work == NULL || !work->pending) {
6602		spin_unlock_irqrestore(&dev->event_lock, flags);
6603		return;
6604	}
6605
6606	intel_crtc->unpin_work = NULL;
6607
6608	if (work->event) {
6609		e = work->event;
6610		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
6611
6612		/* Called before vblank count and timestamps have
6613		 * been updated for the vblank interval of flip
6614		 * completion? Need to increment vblank count and
6615		 * add one videorefresh duration to returned timestamp
6616		 * to account for this. We assume this happened if we
6617		 * get called over 0.9 frame durations after the last
6618		 * timestamped vblank.
6619		 *
6620		 * This calculation can not be used with vrefresh rates
6621		 * below 5Hz (10Hz to be on the safe side) without
6622		 * promoting to 64 integers.
6623		 */
6624		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
6625		    9 * crtc->framedur_ns) {
6626			e->event.sequence++;
6627			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
6628					     crtc->framedur_ns);
6629		}
6630
6631		e->event.tv_sec = tvbl.tv_sec;
6632		e->event.tv_usec = tvbl.tv_usec;
6633
6634		list_add_tail(&e->base.link,
6635			      &e->base.file_priv->event_list);
6636		wake_up_interruptible(&e->base.file_priv->event_wait);
6637	}
6638
6639	drm_vblank_put(dev, intel_crtc->pipe);
6640
6641	spin_unlock_irqrestore(&dev->event_lock, flags);
6642
6643	obj = work->old_fb_obj;
6644
6645	atomic_clear_mask(1 << intel_crtc->plane,
6646			  &obj->pending_flip.counter);
6647	if (atomic_read(&obj->pending_flip) == 0)
6648		wake_up(&dev_priv->pending_flip_queue);
6649
6650	schedule_work(&work->work);
6651
6652	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6653}
6654
6655void intel_finish_page_flip(struct drm_device *dev, int pipe)
6656{
6657	drm_i915_private_t *dev_priv = dev->dev_private;
6658	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6659
6660	do_intel_finish_page_flip(dev, crtc);
6661}
6662
6663void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6664{
6665	drm_i915_private_t *dev_priv = dev->dev_private;
6666	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6667
6668	do_intel_finish_page_flip(dev, crtc);
6669}
6670
6671void intel_prepare_page_flip(struct drm_device *dev, int plane)
6672{
6673	drm_i915_private_t *dev_priv = dev->dev_private;
6674	struct intel_crtc *intel_crtc =
6675		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6676	unsigned long flags;
6677
6678	spin_lock_irqsave(&dev->event_lock, flags);
6679	if (intel_crtc->unpin_work) {
6680		if ((++intel_crtc->unpin_work->pending) > 1)
6681			DRM_ERROR("Prepared flip multiple times\n");
6682	} else {
6683		DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6684	}
6685	spin_unlock_irqrestore(&dev->event_lock, flags);
6686}
6687
6688static int intel_gen2_queue_flip(struct drm_device *dev,
6689				 struct drm_crtc *crtc,
6690				 struct drm_framebuffer *fb,
6691				 struct drm_i915_gem_object *obj)
6692{
6693	struct drm_i915_private *dev_priv = dev->dev_private;
6694	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6695	unsigned long offset;
6696	u32 flip_mask;
 
6697	int ret;
6698
6699	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6700	if (ret)
6701		goto out;
6702
6703	/* Offset into the new buffer for cases of shared fbs between CRTCs */
6704	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6705
6706	ret = BEGIN_LP_RING(6);
6707	if (ret)
6708		goto out;
6709
6710	/* Can't queue multiple flips, so wait for the previous
6711	 * one to finish before executing the next.
6712	 */
6713	if (intel_crtc->plane)
6714		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6715	else
6716		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6717	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6718	OUT_RING(MI_NOOP);
6719	OUT_RING(MI_DISPLAY_FLIP |
6720		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6721	OUT_RING(fb->pitch);
6722	OUT_RING(obj->gtt_offset + offset);
6723	OUT_RING(MI_NOOP);
6724	ADVANCE_LP_RING();
6725out:
 
 
 
 
6726	return ret;
6727}
6728
6729static int intel_gen3_queue_flip(struct drm_device *dev,
6730				 struct drm_crtc *crtc,
6731				 struct drm_framebuffer *fb,
6732				 struct drm_i915_gem_object *obj)
6733{
6734	struct drm_i915_private *dev_priv = dev->dev_private;
6735	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6736	unsigned long offset;
6737	u32 flip_mask;
 
6738	int ret;
6739
6740	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6741	if (ret)
6742		goto out;
6743
6744	/* Offset into the new buffer for cases of shared fbs between CRTCs */
6745	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6746
6747	ret = BEGIN_LP_RING(6);
6748	if (ret)
6749		goto out;
6750
6751	if (intel_crtc->plane)
6752		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6753	else
6754		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6755	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6756	OUT_RING(MI_NOOP);
6757	OUT_RING(MI_DISPLAY_FLIP_I915 |
6758		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6759	OUT_RING(fb->pitch);
6760	OUT_RING(obj->gtt_offset + offset);
6761	OUT_RING(MI_NOOP);
6762
6763	ADVANCE_LP_RING();
6764out:
 
 
 
 
6765	return ret;
6766}
6767
6768static int intel_gen4_queue_flip(struct drm_device *dev,
6769				 struct drm_crtc *crtc,
6770				 struct drm_framebuffer *fb,
6771				 struct drm_i915_gem_object *obj)
6772{
6773	struct drm_i915_private *dev_priv = dev->dev_private;
6774	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6775	uint32_t pf, pipesrc;
 
6776	int ret;
6777
6778	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6779	if (ret)
6780		goto out;
6781
6782	ret = BEGIN_LP_RING(4);
6783	if (ret)
6784		goto out;
6785
6786	/* i965+ uses the linear or tiled offsets from the
6787	 * Display Registers (which do not change across a page-flip)
6788	 * so we need only reprogram the base address.
6789	 */
6790	OUT_RING(MI_DISPLAY_FLIP |
6791		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6792	OUT_RING(fb->pitch);
6793	OUT_RING(obj->gtt_offset | obj->tiling_mode);
6794
6795	/* XXX Enabling the panel-fitter across page-flip is so far
6796	 * untested on non-native modes, so ignore it for now.
6797	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6798	 */
6799	pf = 0;
6800	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6801	OUT_RING(pf | pipesrc);
6802	ADVANCE_LP_RING();
6803out:
 
 
 
 
6804	return ret;
6805}
6806
6807static int intel_gen6_queue_flip(struct drm_device *dev,
6808				 struct drm_crtc *crtc,
6809				 struct drm_framebuffer *fb,
6810				 struct drm_i915_gem_object *obj)
6811{
6812	struct drm_i915_private *dev_priv = dev->dev_private;
6813	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
6814	uint32_t pf, pipesrc;
6815	int ret;
6816
6817	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6818	if (ret)
6819		goto out;
6820
6821	ret = BEGIN_LP_RING(4);
6822	if (ret)
6823		goto out;
6824
6825	OUT_RING(MI_DISPLAY_FLIP |
6826		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6827	OUT_RING(fb->pitch | obj->tiling_mode);
6828	OUT_RING(obj->gtt_offset);
6829
6830	pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
 
 
 
 
 
 
 
 
 
 
 
6831	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6832	OUT_RING(pf | pipesrc);
6833	ADVANCE_LP_RING();
6834out:
 
 
 
 
6835	return ret;
6836}
6837
6838/*
6839 * On gen7 we currently use the blit ring because (in early silicon at least)
6840 * the render ring doesn't give us interrpts for page flip completion, which
6841 * means clients will hang after the first flip is queued.  Fortunately the
6842 * blit ring generates interrupts properly, so use it instead.
6843 */
6844static int intel_gen7_queue_flip(struct drm_device *dev,
6845				 struct drm_crtc *crtc,
6846				 struct drm_framebuffer *fb,
6847				 struct drm_i915_gem_object *obj)
6848{
6849	struct drm_i915_private *dev_priv = dev->dev_private;
6850	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6851	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 
6852	int ret;
6853
6854	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6855	if (ret)
6856		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6857
6858	ret = intel_ring_begin(ring, 4);
6859	if (ret)
6860		goto out;
6861
6862	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6863	intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
6864	intel_ring_emit(ring, (obj->gtt_offset));
6865	intel_ring_emit(ring, (MI_NOOP));
6866	intel_ring_advance(ring);
6867out:
 
 
 
 
6868	return ret;
6869}
6870
6871static int intel_default_queue_flip(struct drm_device *dev,
6872				    struct drm_crtc *crtc,
6873				    struct drm_framebuffer *fb,
6874				    struct drm_i915_gem_object *obj)
6875{
6876	return -ENODEV;
6877}
6878
6879static int intel_crtc_page_flip(struct drm_crtc *crtc,
6880				struct drm_framebuffer *fb,
6881				struct drm_pending_vblank_event *event)
6882{
6883	struct drm_device *dev = crtc->dev;
6884	struct drm_i915_private *dev_priv = dev->dev_private;
6885	struct intel_framebuffer *intel_fb;
6886	struct drm_i915_gem_object *obj;
6887	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6888	struct intel_unpin_work *work;
6889	unsigned long flags;
6890	int ret;
6891
6892	work = kzalloc(sizeof *work, GFP_KERNEL);
6893	if (work == NULL)
6894		return -ENOMEM;
6895
6896	work->event = event;
6897	work->dev = crtc->dev;
6898	intel_fb = to_intel_framebuffer(crtc->fb);
6899	work->old_fb_obj = intel_fb->obj;
6900	INIT_WORK(&work->work, intel_unpin_work_fn);
6901
 
 
 
 
6902	/* We borrow the event spin lock for protecting unpin_work */
6903	spin_lock_irqsave(&dev->event_lock, flags);
6904	if (intel_crtc->unpin_work) {
6905		spin_unlock_irqrestore(&dev->event_lock, flags);
6906		kfree(work);
 
6907
6908		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6909		return -EBUSY;
6910	}
6911	intel_crtc->unpin_work = work;
6912	spin_unlock_irqrestore(&dev->event_lock, flags);
6913
6914	intel_fb = to_intel_framebuffer(fb);
6915	obj = intel_fb->obj;
6916
6917	mutex_lock(&dev->struct_mutex);
6918
6919	/* Reference the objects for the scheduled work. */
6920	drm_gem_object_reference(&work->old_fb_obj->base);
6921	drm_gem_object_reference(&obj->base);
6922
6923	crtc->fb = fb;
6924
6925	ret = drm_vblank_get(dev, intel_crtc->pipe);
6926	if (ret)
6927		goto cleanup_objs;
6928
6929	work->pending_flip_obj = obj;
6930
6931	work->enable_stall_check = true;
6932
6933	/* Block clients from rendering to the new back buffer until
6934	 * the flip occurs and the object is no longer visible.
6935	 */
6936	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6937
6938	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6939	if (ret)
6940		goto cleanup_pending;
6941
6942	intel_disable_fbc(dev);
 
6943	mutex_unlock(&dev->struct_mutex);
6944
6945	trace_i915_flip_request(intel_crtc->plane, obj);
6946
6947	return 0;
6948
6949cleanup_pending:
6950	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6951cleanup_objs:
6952	drm_gem_object_unreference(&work->old_fb_obj->base);
6953	drm_gem_object_unreference(&obj->base);
6954	mutex_unlock(&dev->struct_mutex);
6955
6956	spin_lock_irqsave(&dev->event_lock, flags);
6957	intel_crtc->unpin_work = NULL;
6958	spin_unlock_irqrestore(&dev->event_lock, flags);
6959
 
 
6960	kfree(work);
6961
6962	return ret;
6963}
6964
6965static void intel_sanitize_modesetting(struct drm_device *dev,
6966				       int pipe, int plane)
6967{
6968	struct drm_i915_private *dev_priv = dev->dev_private;
6969	u32 reg, val;
 
 
 
 
 
 
 
6970
6971	if (HAS_PCH_SPLIT(dev))
6972		return;
6973
6974	/* Who knows what state these registers were left in by the BIOS or
6975	 * grub?
6976	 *
6977	 * If we leave the registers in a conflicting state (e.g. with the
6978	 * display plane reading from the other pipe than the one we intend
6979	 * to use) then when we attempt to teardown the active mode, we will
6980	 * not disable the pipes and planes in the correct order -- leaving
6981	 * a plane reading from a disabled pipe and possibly leading to
6982	 * undefined behaviour.
6983	 */
6984
6985	reg = DSPCNTR(plane);
6986	val = I915_READ(reg);
6987
6988	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6989		return;
6990	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6991		return;
6992
6993	/* This display plane is active and attached to the other CPU pipe. */
6994	pipe = !pipe;
6995
6996	/* Disable the plane and wait for it to stop reading from the pipe. */
6997	intel_disable_plane(dev_priv, plane, pipe);
6998	intel_disable_pipe(dev_priv, pipe);
6999}
7000
7001static void intel_crtc_reset(struct drm_crtc *crtc)
7002{
7003	struct drm_device *dev = crtc->dev;
7004	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7005
7006	/* Reset flags back to the 'unknown' status so that they
7007	 * will be correctly set on the initial modeset.
7008	 */
7009	intel_crtc->dpms_mode = -1;
7010
7011	/* We need to fix up any BIOS configuration that conflicts with
7012	 * our expectations.
7013	 */
7014	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7015}
7016
7017static struct drm_crtc_helper_funcs intel_helper_funcs = {
7018	.dpms = intel_crtc_dpms,
7019	.mode_fixup = intel_crtc_mode_fixup,
7020	.mode_set = intel_crtc_mode_set,
7021	.mode_set_base = intel_pipe_set_base,
7022	.mode_set_base_atomic = intel_pipe_set_base_atomic,
7023	.load_lut = intel_crtc_load_lut,
7024	.disable = intel_crtc_disable,
7025};
7026
7027static const struct drm_crtc_funcs intel_crtc_funcs = {
7028	.reset = intel_crtc_reset,
7029	.cursor_set = intel_crtc_cursor_set,
7030	.cursor_move = intel_crtc_cursor_move,
7031	.gamma_set = intel_crtc_gamma_set,
7032	.set_config = drm_crtc_helper_set_config,
7033	.destroy = intel_crtc_destroy,
7034	.page_flip = intel_crtc_page_flip,
7035};
7036
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7037static void intel_crtc_init(struct drm_device *dev, int pipe)
7038{
7039	drm_i915_private_t *dev_priv = dev->dev_private;
7040	struct intel_crtc *intel_crtc;
7041	int i;
7042
7043	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7044	if (intel_crtc == NULL)
7045		return;
7046
7047	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7048
7049	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7050	for (i = 0; i < 256; i++) {
7051		intel_crtc->lut_r[i] = i;
7052		intel_crtc->lut_g[i] = i;
7053		intel_crtc->lut_b[i] = i;
7054	}
7055
7056	/* Swap pipes & planes for FBC on pre-965 */
7057	intel_crtc->pipe = pipe;
7058	intel_crtc->plane = pipe;
7059	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7060		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7061		intel_crtc->plane = !pipe;
7062	}
7063
7064	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7065	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7066	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7067	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7068
7069	intel_crtc_reset(&intel_crtc->base);
7070	intel_crtc->active = true; /* force the pipe off on setup_init_config */
7071	intel_crtc->bpp = 24; /* default for pre-Ironlake */
7072
7073	if (HAS_PCH_SPLIT(dev)) {
7074		intel_helper_funcs.prepare = ironlake_crtc_prepare;
7075		intel_helper_funcs.commit = ironlake_crtc_commit;
7076	} else {
7077		intel_helper_funcs.prepare = i9xx_crtc_prepare;
7078		intel_helper_funcs.commit = i9xx_crtc_commit;
7079	}
7080
7081	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7082
7083	intel_crtc->busy = false;
7084
7085	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
7086		    (unsigned long)intel_crtc);
7087}
7088
7089int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7090				struct drm_file *file)
7091{
7092	drm_i915_private_t *dev_priv = dev->dev_private;
7093	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7094	struct drm_mode_object *drmmode_obj;
7095	struct intel_crtc *crtc;
7096
7097	if (!dev_priv) {
7098		DRM_ERROR("called with no initialization\n");
7099		return -EINVAL;
7100	}
7101
7102	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7103			DRM_MODE_OBJECT_CRTC);
7104
7105	if (!drmmode_obj) {
7106		DRM_ERROR("no such CRTC id\n");
7107		return -EINVAL;
7108	}
7109
7110	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7111	pipe_from_crtc_id->pipe = crtc->pipe;
7112
7113	return 0;
7114}
7115
7116static int intel_encoder_clones(struct drm_device *dev, int type_mask)
7117{
7118	struct intel_encoder *encoder;
7119	int index_mask = 0;
7120	int entry = 0;
7121
7122	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7123		if (type_mask & encoder->clone_mask)
7124			index_mask |= (1 << entry);
7125		entry++;
7126	}
7127
7128	return index_mask;
7129}
7130
7131static bool has_edp_a(struct drm_device *dev)
7132{
7133	struct drm_i915_private *dev_priv = dev->dev_private;
7134
7135	if (!IS_MOBILE(dev))
7136		return false;
7137
7138	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7139		return false;
7140
7141	if (IS_GEN5(dev) &&
7142	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7143		return false;
7144
7145	return true;
7146}
7147
7148static void intel_setup_outputs(struct drm_device *dev)
7149{
7150	struct drm_i915_private *dev_priv = dev->dev_private;
7151	struct intel_encoder *encoder;
7152	bool dpd_is_edp = false;
7153	bool has_lvds = false;
7154
7155	if (IS_MOBILE(dev) && !IS_I830(dev))
7156		has_lvds = intel_lvds_init(dev);
7157	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7158		/* disable the panel fitter on everything but LVDS */
7159		I915_WRITE(PFIT_CONTROL, 0);
7160	}
7161
7162	if (HAS_PCH_SPLIT(dev)) {
7163		dpd_is_edp = intel_dpd_is_edp(dev);
7164
7165		if (has_edp_a(dev))
7166			intel_dp_init(dev, DP_A);
7167
7168		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7169			intel_dp_init(dev, PCH_DP_D);
7170	}
7171
7172	intel_crt_init(dev);
7173
7174	if (HAS_PCH_SPLIT(dev)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7175		int found;
7176
7177		if (I915_READ(HDMIB) & PORT_DETECTED) {
7178			/* PCH SDVOB multiplex with HDMIB */
7179			found = intel_sdvo_init(dev, PCH_SDVOB);
7180			if (!found)
7181				intel_hdmi_init(dev, HDMIB);
7182			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7183				intel_dp_init(dev, PCH_DP_B);
7184		}
7185
7186		if (I915_READ(HDMIC) & PORT_DETECTED)
7187			intel_hdmi_init(dev, HDMIC);
7188
7189		if (I915_READ(HDMID) & PORT_DETECTED)
7190			intel_hdmi_init(dev, HDMID);
7191
7192		if (I915_READ(PCH_DP_C) & DP_DETECTED)
7193			intel_dp_init(dev, PCH_DP_C);
7194
7195		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7196			intel_dp_init(dev, PCH_DP_D);
7197
7198	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7199		bool found = false;
7200
7201		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7202			DRM_DEBUG_KMS("probing SDVOB\n");
7203			found = intel_sdvo_init(dev, SDVOB);
7204			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7205				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7206				intel_hdmi_init(dev, SDVOB);
7207			}
7208
7209			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7210				DRM_DEBUG_KMS("probing DP_B\n");
7211				intel_dp_init(dev, DP_B);
7212			}
7213		}
7214
7215		/* Before G4X SDVOC doesn't have its own detect register */
7216
7217		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7218			DRM_DEBUG_KMS("probing SDVOC\n");
7219			found = intel_sdvo_init(dev, SDVOC);
7220		}
7221
7222		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7223
7224			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7225				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7226				intel_hdmi_init(dev, SDVOC);
7227			}
7228			if (SUPPORTS_INTEGRATED_DP(dev)) {
7229				DRM_DEBUG_KMS("probing DP_C\n");
7230				intel_dp_init(dev, DP_C);
7231			}
7232		}
7233
7234		if (SUPPORTS_INTEGRATED_DP(dev) &&
7235		    (I915_READ(DP_D) & DP_DETECTED)) {
7236			DRM_DEBUG_KMS("probing DP_D\n");
7237			intel_dp_init(dev, DP_D);
7238		}
7239	} else if (IS_GEN2(dev))
7240		intel_dvo_init(dev);
7241
7242	if (SUPPORTS_TV(dev))
7243		intel_tv_init(dev);
7244
7245	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7246		encoder->base.possible_crtcs = encoder->crtc_mask;
7247		encoder->base.possible_clones =
7248			intel_encoder_clones(dev, encoder->clone_mask);
7249	}
7250
7251	/* disable all the possible outputs/crtcs before entering KMS mode */
7252	drm_helper_disable_unused_functions(dev);
 
 
 
7253}
7254
7255static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7256{
7257	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7258
7259	drm_framebuffer_cleanup(fb);
7260	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7261
7262	kfree(intel_fb);
7263}
7264
7265static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7266						struct drm_file *file,
7267						unsigned int *handle)
7268{
7269	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7270	struct drm_i915_gem_object *obj = intel_fb->obj;
7271
7272	return drm_gem_handle_create(file, &obj->base, handle);
7273}
7274
7275static const struct drm_framebuffer_funcs intel_fb_funcs = {
7276	.destroy = intel_user_framebuffer_destroy,
7277	.create_handle = intel_user_framebuffer_create_handle,
7278};
7279
7280int intel_framebuffer_init(struct drm_device *dev,
7281			   struct intel_framebuffer *intel_fb,
7282			   struct drm_mode_fb_cmd *mode_cmd,
7283			   struct drm_i915_gem_object *obj)
7284{
7285	int ret;
7286
7287	if (obj->tiling_mode == I915_TILING_Y)
7288		return -EINVAL;
7289
7290	if (mode_cmd->pitch & 63)
7291		return -EINVAL;
7292
7293	switch (mode_cmd->bpp) {
7294	case 8:
7295	case 16:
7296		/* Only pre-ILK can handle 5:5:5 */
7297		if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
7298			return -EINVAL;
 
 
 
7299		break;
7300
7301	case 24:
7302	case 32:
 
7303		break;
7304	default:
 
 
7305		return -EINVAL;
7306	}
7307
7308	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
7309	if (ret) {
7310		DRM_ERROR("framebuffer init failed %d\n", ret);
7311		return ret;
7312	}
7313
7314	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
7315	intel_fb->obj = obj;
7316	return 0;
7317}
7318
7319static struct drm_framebuffer *
7320intel_user_framebuffer_create(struct drm_device *dev,
7321			      struct drm_file *filp,
7322			      struct drm_mode_fb_cmd *mode_cmd)
7323{
7324	struct drm_i915_gem_object *obj;
7325
7326	obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
 
7327	if (&obj->base == NULL)
7328		return ERR_PTR(-ENOENT);
7329
7330	return intel_framebuffer_create(dev, mode_cmd, obj);
7331}
7332
7333static const struct drm_mode_config_funcs intel_mode_funcs = {
7334	.fb_create = intel_user_framebuffer_create,
7335	.output_poll_changed = intel_fb_output_poll_changed,
7336};
7337
7338static struct drm_i915_gem_object *
7339intel_alloc_context_page(struct drm_device *dev)
7340{
7341	struct drm_i915_gem_object *ctx;
7342	int ret;
7343
7344	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
7345
7346	ctx = i915_gem_alloc_object(dev, 4096);
7347	if (!ctx) {
7348		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
7349		return NULL;
7350	}
7351
7352	ret = i915_gem_object_pin(ctx, 4096, true);
7353	if (ret) {
7354		DRM_ERROR("failed to pin power context: %d\n", ret);
7355		goto err_unref;
7356	}
7357
7358	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
7359	if (ret) {
7360		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
7361		goto err_unpin;
7362	}
7363
7364	return ctx;
7365
7366err_unpin:
7367	i915_gem_object_unpin(ctx);
7368err_unref:
7369	drm_gem_object_unreference(&ctx->base);
7370	mutex_unlock(&dev->struct_mutex);
7371	return NULL;
7372}
7373
7374bool ironlake_set_drps(struct drm_device *dev, u8 val)
7375{
7376	struct drm_i915_private *dev_priv = dev->dev_private;
7377	u16 rgvswctl;
7378
7379	rgvswctl = I915_READ16(MEMSWCTL);
7380	if (rgvswctl & MEMCTL_CMD_STS) {
7381		DRM_DEBUG("gpu busy, RCS change rejected\n");
7382		return false; /* still busy with another command */
7383	}
7384
7385	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
7386		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
7387	I915_WRITE16(MEMSWCTL, rgvswctl);
7388	POSTING_READ16(MEMSWCTL);
7389
7390	rgvswctl |= MEMCTL_CMD_STS;
7391	I915_WRITE16(MEMSWCTL, rgvswctl);
7392
7393	return true;
7394}
7395
7396void ironlake_enable_drps(struct drm_device *dev)
7397{
7398	struct drm_i915_private *dev_priv = dev->dev_private;
7399	u32 rgvmodectl = I915_READ(MEMMODECTL);
7400	u8 fmax, fmin, fstart, vstart;
7401
7402	/* Enable temp reporting */
7403	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
7404	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
7405
7406	/* 100ms RC evaluation intervals */
7407	I915_WRITE(RCUPEI, 100000);
7408	I915_WRITE(RCDNEI, 100000);
7409
7410	/* Set max/min thresholds to 90ms and 80ms respectively */
7411	I915_WRITE(RCBMAXAVG, 90000);
7412	I915_WRITE(RCBMINAVG, 80000);
7413
7414	I915_WRITE(MEMIHYST, 1);
7415
7416	/* Set up min, max, and cur for interrupt handling */
7417	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
7418	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
7419	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
7420		MEMMODE_FSTART_SHIFT;
7421
7422	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
7423		PXVFREQ_PX_SHIFT;
7424
7425	dev_priv->fmax = fmax; /* IPS callback will increase this */
7426	dev_priv->fstart = fstart;
7427
7428	dev_priv->max_delay = fstart;
7429	dev_priv->min_delay = fmin;
7430	dev_priv->cur_delay = fstart;
7431
7432	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
7433			 fmax, fmin, fstart);
7434
7435	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
7436
7437	/*
7438	 * Interrupts will be enabled in ironlake_irq_postinstall
7439	 */
7440
7441	I915_WRITE(VIDSTART, vstart);
7442	POSTING_READ(VIDSTART);
7443
7444	rgvmodectl |= MEMMODE_SWMODE_EN;
7445	I915_WRITE(MEMMODECTL, rgvmodectl);
7446
7447	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
7448		DRM_ERROR("stuck trying to change perf mode\n");
7449	msleep(1);
7450
7451	ironlake_set_drps(dev, fstart);
7452
7453	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
7454		I915_READ(0x112e0);
7455	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
7456	dev_priv->last_count2 = I915_READ(0x112f4);
7457	getrawmonotonic(&dev_priv->last_time2);
7458}
7459
7460void ironlake_disable_drps(struct drm_device *dev)
7461{
7462	struct drm_i915_private *dev_priv = dev->dev_private;
7463	u16 rgvswctl = I915_READ16(MEMSWCTL);
7464
7465	/* Ack interrupts, disable EFC interrupt */
7466	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
7467	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
7468	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
7469	I915_WRITE(DEIIR, DE_PCU_EVENT);
7470	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
7471
7472	/* Go back to the starting frequency */
7473	ironlake_set_drps(dev, dev_priv->fstart);
7474	msleep(1);
7475	rgvswctl |= MEMCTL_CMD_STS;
7476	I915_WRITE(MEMSWCTL, rgvswctl);
7477	msleep(1);
7478
7479}
7480
7481void gen6_set_rps(struct drm_device *dev, u8 val)
7482{
7483	struct drm_i915_private *dev_priv = dev->dev_private;
7484	u32 swreq;
7485
7486	swreq = (val & 0x3ff) << 25;
7487	I915_WRITE(GEN6_RPNSWREQ, swreq);
7488}
7489
7490void gen6_disable_rps(struct drm_device *dev)
7491{
7492	struct drm_i915_private *dev_priv = dev->dev_private;
7493
7494	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
7495	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
7496	I915_WRITE(GEN6_PMIER, 0);
7497
7498	spin_lock_irq(&dev_priv->rps_lock);
7499	dev_priv->pm_iir = 0;
7500	spin_unlock_irq(&dev_priv->rps_lock);
7501
7502	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
7503}
7504
7505static unsigned long intel_pxfreq(u32 vidfreq)
7506{
7507	unsigned long freq;
7508	int div = (vidfreq & 0x3f0000) >> 16;
7509	int post = (vidfreq & 0x3000) >> 12;
7510	int pre = (vidfreq & 0x7);
7511
7512	if (!pre)
7513		return 0;
7514
7515	freq = ((div * 133333) / ((1<<post) * pre));
7516
7517	return freq;
7518}
7519
7520void intel_init_emon(struct drm_device *dev)
7521{
7522	struct drm_i915_private *dev_priv = dev->dev_private;
7523	u32 lcfuse;
7524	u8 pxw[16];
7525	int i;
7526
7527	/* Disable to program */
7528	I915_WRITE(ECR, 0);
7529	POSTING_READ(ECR);
7530
7531	/* Program energy weights for various events */
7532	I915_WRITE(SDEW, 0x15040d00);
7533	I915_WRITE(CSIEW0, 0x007f0000);
7534	I915_WRITE(CSIEW1, 0x1e220004);
7535	I915_WRITE(CSIEW2, 0x04000004);
7536
7537	for (i = 0; i < 5; i++)
7538		I915_WRITE(PEW + (i * 4), 0);
7539	for (i = 0; i < 3; i++)
7540		I915_WRITE(DEW + (i * 4), 0);
7541
7542	/* Program P-state weights to account for frequency power adjustment */
7543	for (i = 0; i < 16; i++) {
7544		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
7545		unsigned long freq = intel_pxfreq(pxvidfreq);
7546		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7547			PXVFREQ_PX_SHIFT;
7548		unsigned long val;
7549
7550		val = vid * vid;
7551		val *= (freq / 1000);
7552		val *= 255;
7553		val /= (127*127*900);
7554		if (val > 0xff)
7555			DRM_ERROR("bad pxval: %ld\n", val);
7556		pxw[i] = val;
7557	}
7558	/* Render standby states get 0 weight */
7559	pxw[14] = 0;
7560	pxw[15] = 0;
7561
7562	for (i = 0; i < 4; i++) {
7563		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7564			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7565		I915_WRITE(PXW + (i * 4), val);
7566	}
7567
7568	/* Adjust magic regs to magic values (more experimental results) */
7569	I915_WRITE(OGW0, 0);
7570	I915_WRITE(OGW1, 0);
7571	I915_WRITE(EG0, 0x00007f00);
7572	I915_WRITE(EG1, 0x0000000e);
7573	I915_WRITE(EG2, 0x000e0000);
7574	I915_WRITE(EG3, 0x68000300);
7575	I915_WRITE(EG4, 0x42000000);
7576	I915_WRITE(EG5, 0x00140031);
7577	I915_WRITE(EG6, 0);
7578	I915_WRITE(EG7, 0);
7579
7580	for (i = 0; i < 8; i++)
7581		I915_WRITE(PXWL + (i * 4), 0);
7582
7583	/* Enable PMON + select events */
7584	I915_WRITE(ECR, 0x80000019);
7585
7586	lcfuse = I915_READ(LCFUSE02);
7587
7588	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7589}
7590
7591void gen6_enable_rps(struct drm_i915_private *dev_priv)
7592{
7593	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7594	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
7595	u32 pcu_mbox, rc6_mask = 0;
7596	int cur_freq, min_freq, max_freq;
7597	int i;
7598
7599	/* Here begins a magic sequence of register writes to enable
7600	 * auto-downclocking.
7601	 *
7602	 * Perhaps there might be some value in exposing these to
7603	 * userspace...
7604	 */
7605	I915_WRITE(GEN6_RC_STATE, 0);
7606	mutex_lock(&dev_priv->dev->struct_mutex);
7607	gen6_gt_force_wake_get(dev_priv);
7608
7609	/* disable the counters and set deterministic thresholds */
7610	I915_WRITE(GEN6_RC_CONTROL, 0);
7611
7612	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7613	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7614	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7615	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7616	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7617
7618	for (i = 0; i < I915_NUM_RINGS; i++)
7619		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
7620
7621	I915_WRITE(GEN6_RC_SLEEP, 0);
7622	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7623	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7624	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7625	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7626
7627	if (i915_enable_rc6)
7628		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7629			GEN6_RC_CTL_RC6_ENABLE;
7630
7631	I915_WRITE(GEN6_RC_CONTROL,
7632		   rc6_mask |
7633		   GEN6_RC_CTL_EI_MODE(1) |
7634		   GEN6_RC_CTL_HW_ENABLE);
7635
7636	I915_WRITE(GEN6_RPNSWREQ,
7637		   GEN6_FREQUENCY(10) |
7638		   GEN6_OFFSET(0) |
7639		   GEN6_AGGRESSIVE_TURBO);
7640	I915_WRITE(GEN6_RC_VIDEO_FREQ,
7641		   GEN6_FREQUENCY(12));
7642
7643	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7644	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7645		   18 << 24 |
7646		   6 << 16);
7647	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
7648	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
7649	I915_WRITE(GEN6_RP_UP_EI, 100000);
7650	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
7651	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7652	I915_WRITE(GEN6_RP_CONTROL,
7653		   GEN6_RP_MEDIA_TURBO |
7654		   GEN6_RP_USE_NORMAL_FREQ |
7655		   GEN6_RP_MEDIA_IS_GFX |
7656		   GEN6_RP_ENABLE |
7657		   GEN6_RP_UP_BUSY_AVG |
7658		   GEN6_RP_DOWN_IDLE_CONT);
7659
7660	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7661		     500))
7662		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7663
7664	I915_WRITE(GEN6_PCODE_DATA, 0);
7665	I915_WRITE(GEN6_PCODE_MAILBOX,
7666		   GEN6_PCODE_READY |
7667		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7668	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7669		     500))
7670		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7671
7672	min_freq = (rp_state_cap & 0xff0000) >> 16;
7673	max_freq = rp_state_cap & 0xff;
7674	cur_freq = (gt_perf_status & 0xff00) >> 8;
7675
7676	/* Check for overclock support */
7677	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7678		     500))
7679		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7680	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
7681	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
7682	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7683		     500))
7684		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7685	if (pcu_mbox & (1<<31)) { /* OC supported */
7686		max_freq = pcu_mbox & 0xff;
7687		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
7688	}
7689
7690	/* In units of 100MHz */
7691	dev_priv->max_delay = max_freq;
7692	dev_priv->min_delay = min_freq;
7693	dev_priv->cur_delay = cur_freq;
7694
7695	/* requires MSI enabled */
7696	I915_WRITE(GEN6_PMIER,
7697		   GEN6_PM_MBOX_EVENT |
7698		   GEN6_PM_THERMAL_EVENT |
7699		   GEN6_PM_RP_DOWN_TIMEOUT |
7700		   GEN6_PM_RP_UP_THRESHOLD |
7701		   GEN6_PM_RP_DOWN_THRESHOLD |
7702		   GEN6_PM_RP_UP_EI_EXPIRED |
7703		   GEN6_PM_RP_DOWN_EI_EXPIRED);
7704	spin_lock_irq(&dev_priv->rps_lock);
7705	WARN_ON(dev_priv->pm_iir != 0);
7706	I915_WRITE(GEN6_PMIMR, 0);
7707	spin_unlock_irq(&dev_priv->rps_lock);
7708	/* enable all PM interrupts */
7709	I915_WRITE(GEN6_PMINTRMSK, 0);
7710
7711	gen6_gt_force_wake_put(dev_priv);
7712	mutex_unlock(&dev_priv->dev->struct_mutex);
7713}
7714
7715void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7716{
7717	int min_freq = 15;
7718	int gpu_freq, ia_freq, max_ia_freq;
7719	int scaling_factor = 180;
7720
7721	max_ia_freq = cpufreq_quick_get_max(0);
7722	/*
7723	 * Default to measured freq if none found, PCU will ensure we don't go
7724	 * over
7725	 */
7726	if (!max_ia_freq)
7727		max_ia_freq = tsc_khz;
7728
7729	/* Convert from kHz to MHz */
7730	max_ia_freq /= 1000;
7731
7732	mutex_lock(&dev_priv->dev->struct_mutex);
7733
7734	/*
7735	 * For each potential GPU frequency, load a ring frequency we'd like
7736	 * to use for memory access.  We do this by specifying the IA frequency
7737	 * the PCU should use as a reference to determine the ring frequency.
7738	 */
7739	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7740	     gpu_freq--) {
7741		int diff = dev_priv->max_delay - gpu_freq;
7742
7743		/*
7744		 * For GPU frequencies less than 750MHz, just use the lowest
7745		 * ring freq.
7746		 */
7747		if (gpu_freq < min_freq)
7748			ia_freq = 800;
7749		else
7750			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7751		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7752
7753		I915_WRITE(GEN6_PCODE_DATA,
7754			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7755			   gpu_freq);
7756		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7757			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7758		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7759			      GEN6_PCODE_READY) == 0, 10)) {
7760			DRM_ERROR("pcode write of freq table timed out\n");
7761			continue;
7762		}
7763	}
7764
7765	mutex_unlock(&dev_priv->dev->struct_mutex);
7766}
7767
7768static void ironlake_init_clock_gating(struct drm_device *dev)
7769{
7770	struct drm_i915_private *dev_priv = dev->dev_private;
7771	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7772
7773	/* Required for FBC */
7774	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7775		DPFCRUNIT_CLOCK_GATE_DISABLE |
7776		DPFDUNIT_CLOCK_GATE_DISABLE;
7777	/* Required for CxSR */
7778	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7779
7780	I915_WRITE(PCH_3DCGDIS0,
7781		   MARIUNIT_CLOCK_GATE_DISABLE |
7782		   SVSMUNIT_CLOCK_GATE_DISABLE);
7783	I915_WRITE(PCH_3DCGDIS1,
7784		   VFMUNIT_CLOCK_GATE_DISABLE);
7785
7786	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7787
7788	/*
7789	 * According to the spec the following bits should be set in
7790	 * order to enable memory self-refresh
7791	 * The bit 22/21 of 0x42004
7792	 * The bit 5 of 0x42020
7793	 * The bit 15 of 0x45000
7794	 */
7795	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7796		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
7797		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7798	I915_WRITE(ILK_DSPCLK_GATE,
7799		   (I915_READ(ILK_DSPCLK_GATE) |
7800		    ILK_DPARB_CLK_GATE));
7801	I915_WRITE(DISP_ARB_CTL,
7802		   (I915_READ(DISP_ARB_CTL) |
7803		    DISP_FBC_WM_DIS));
7804	I915_WRITE(WM3_LP_ILK, 0);
7805	I915_WRITE(WM2_LP_ILK, 0);
7806	I915_WRITE(WM1_LP_ILK, 0);
7807
7808	/*
7809	 * Based on the document from hardware guys the following bits
7810	 * should be set unconditionally in order to enable FBC.
7811	 * The bit 22 of 0x42000
7812	 * The bit 22 of 0x42004
7813	 * The bit 7,8,9 of 0x42020.
7814	 */
7815	if (IS_IRONLAKE_M(dev)) {
7816		I915_WRITE(ILK_DISPLAY_CHICKEN1,
7817			   I915_READ(ILK_DISPLAY_CHICKEN1) |
7818			   ILK_FBCQ_DIS);
7819		I915_WRITE(ILK_DISPLAY_CHICKEN2,
7820			   I915_READ(ILK_DISPLAY_CHICKEN2) |
7821			   ILK_DPARB_GATE);
7822		I915_WRITE(ILK_DSPCLK_GATE,
7823			   I915_READ(ILK_DSPCLK_GATE) |
7824			   ILK_DPFC_DIS1 |
7825			   ILK_DPFC_DIS2 |
7826			   ILK_CLK_FBC);
7827	}
7828
7829	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7830		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7831		   ILK_ELPIN_409_SELECT);
7832	I915_WRITE(_3D_CHICKEN2,
7833		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7834		   _3D_CHICKEN2_WM_READ_PIPELINED);
7835}
7836
7837static void gen6_init_clock_gating(struct drm_device *dev)
7838{
7839	struct drm_i915_private *dev_priv = dev->dev_private;
7840	int pipe;
7841	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7842
7843	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7844
7845	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7846		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7847		   ILK_ELPIN_409_SELECT);
7848
7849	I915_WRITE(WM3_LP_ILK, 0);
7850	I915_WRITE(WM2_LP_ILK, 0);
7851	I915_WRITE(WM1_LP_ILK, 0);
7852
7853	/*
7854	 * According to the spec the following bits should be
7855	 * set in order to enable memory self-refresh and fbc:
7856	 * The bit21 and bit22 of 0x42000
7857	 * The bit21 and bit22 of 0x42004
7858	 * The bit5 and bit7 of 0x42020
7859	 * The bit14 of 0x70180
7860	 * The bit14 of 0x71180
7861	 */
7862	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7863		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7864		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7865	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7866		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7867		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7868	I915_WRITE(ILK_DSPCLK_GATE,
7869		   I915_READ(ILK_DSPCLK_GATE) |
7870		   ILK_DPARB_CLK_GATE  |
7871		   ILK_DPFD_CLK_GATE);
7872
7873	for_each_pipe(pipe) {
7874		I915_WRITE(DSPCNTR(pipe),
7875			   I915_READ(DSPCNTR(pipe)) |
7876			   DISPPLANE_TRICKLE_FEED_DISABLE);
7877		intel_flush_display_plane(dev_priv, pipe);
7878	}
7879}
7880
7881static void ivybridge_init_clock_gating(struct drm_device *dev)
7882{
7883	struct drm_i915_private *dev_priv = dev->dev_private;
7884	int pipe;
7885	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7886
7887	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7888
7889	I915_WRITE(WM3_LP_ILK, 0);
7890	I915_WRITE(WM2_LP_ILK, 0);
7891	I915_WRITE(WM1_LP_ILK, 0);
7892
7893	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7894
7895	for_each_pipe(pipe) {
7896		I915_WRITE(DSPCNTR(pipe),
7897			   I915_READ(DSPCNTR(pipe)) |
7898			   DISPPLANE_TRICKLE_FEED_DISABLE);
7899		intel_flush_display_plane(dev_priv, pipe);
7900	}
7901}
7902
7903static void g4x_init_clock_gating(struct drm_device *dev)
7904{
7905	struct drm_i915_private *dev_priv = dev->dev_private;
7906	uint32_t dspclk_gate;
7907
7908	I915_WRITE(RENCLK_GATE_D1, 0);
7909	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7910		   GS_UNIT_CLOCK_GATE_DISABLE |
7911		   CL_UNIT_CLOCK_GATE_DISABLE);
7912	I915_WRITE(RAMCLK_GATE_D, 0);
7913	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7914		OVRUNIT_CLOCK_GATE_DISABLE |
7915		OVCUNIT_CLOCK_GATE_DISABLE;
7916	if (IS_GM45(dev))
7917		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7918	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7919}
7920
7921static void crestline_init_clock_gating(struct drm_device *dev)
7922{
7923	struct drm_i915_private *dev_priv = dev->dev_private;
7924
7925	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7926	I915_WRITE(RENCLK_GATE_D2, 0);
7927	I915_WRITE(DSPCLK_GATE_D, 0);
7928	I915_WRITE(RAMCLK_GATE_D, 0);
7929	I915_WRITE16(DEUC, 0);
7930}
7931
7932static void broadwater_init_clock_gating(struct drm_device *dev)
7933{
7934	struct drm_i915_private *dev_priv = dev->dev_private;
7935
7936	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7937		   I965_RCC_CLOCK_GATE_DISABLE |
7938		   I965_RCPB_CLOCK_GATE_DISABLE |
7939		   I965_ISC_CLOCK_GATE_DISABLE |
7940		   I965_FBC_CLOCK_GATE_DISABLE);
7941	I915_WRITE(RENCLK_GATE_D2, 0);
7942}
7943
7944static void gen3_init_clock_gating(struct drm_device *dev)
7945{
7946	struct drm_i915_private *dev_priv = dev->dev_private;
7947	u32 dstate = I915_READ(D_STATE);
7948
7949	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7950		DSTATE_DOT_CLOCK_GATING;
7951	I915_WRITE(D_STATE, dstate);
7952}
7953
7954static void i85x_init_clock_gating(struct drm_device *dev)
7955{
7956	struct drm_i915_private *dev_priv = dev->dev_private;
7957
7958	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7959}
7960
7961static void i830_init_clock_gating(struct drm_device *dev)
7962{
7963	struct drm_i915_private *dev_priv = dev->dev_private;
7964
7965	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7966}
7967
7968static void ibx_init_clock_gating(struct drm_device *dev)
7969{
7970	struct drm_i915_private *dev_priv = dev->dev_private;
7971
7972	/*
7973	 * On Ibex Peak and Cougar Point, we need to disable clock
7974	 * gating for the panel power sequencer or it will fail to
7975	 * start up when no ports are active.
7976	 */
7977	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7978}
7979
7980static void cpt_init_clock_gating(struct drm_device *dev)
7981{
7982	struct drm_i915_private *dev_priv = dev->dev_private;
7983	int pipe;
7984
7985	/*
7986	 * On Ibex Peak and Cougar Point, we need to disable clock
7987	 * gating for the panel power sequencer or it will fail to
7988	 * start up when no ports are active.
7989	 */
7990	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7991	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7992		   DPLS_EDP_PPS_FIX_DIS);
7993	/* Without this, mode sets may fail silently on FDI */
7994	for_each_pipe(pipe)
7995		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7996}
7997
7998static void ironlake_teardown_rc6(struct drm_device *dev)
7999{
8000	struct drm_i915_private *dev_priv = dev->dev_private;
8001
8002	if (dev_priv->renderctx) {
8003		i915_gem_object_unpin(dev_priv->renderctx);
8004		drm_gem_object_unreference(&dev_priv->renderctx->base);
8005		dev_priv->renderctx = NULL;
8006	}
8007
8008	if (dev_priv->pwrctx) {
8009		i915_gem_object_unpin(dev_priv->pwrctx);
8010		drm_gem_object_unreference(&dev_priv->pwrctx->base);
8011		dev_priv->pwrctx = NULL;
8012	}
8013}
8014
8015static void ironlake_disable_rc6(struct drm_device *dev)
8016{
8017	struct drm_i915_private *dev_priv = dev->dev_private;
8018
8019	if (I915_READ(PWRCTXA)) {
8020		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8021		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8022		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8023			 50);
8024
8025		I915_WRITE(PWRCTXA, 0);
8026		POSTING_READ(PWRCTXA);
8027
8028		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8029		POSTING_READ(RSTDBYCTL);
8030	}
8031
8032	ironlake_teardown_rc6(dev);
8033}
8034
8035static int ironlake_setup_rc6(struct drm_device *dev)
8036{
8037	struct drm_i915_private *dev_priv = dev->dev_private;
8038
8039	if (dev_priv->renderctx == NULL)
8040		dev_priv->renderctx = intel_alloc_context_page(dev);
8041	if (!dev_priv->renderctx)
8042		return -ENOMEM;
8043
8044	if (dev_priv->pwrctx == NULL)
8045		dev_priv->pwrctx = intel_alloc_context_page(dev);
8046	if (!dev_priv->pwrctx) {
8047		ironlake_teardown_rc6(dev);
8048		return -ENOMEM;
8049	}
8050
8051	return 0;
8052}
8053
8054void ironlake_enable_rc6(struct drm_device *dev)
8055{
8056	struct drm_i915_private *dev_priv = dev->dev_private;
8057	int ret;
8058
8059	/* rc6 disabled by default due to repeated reports of hanging during
8060	 * boot and resume.
8061	 */
8062	if (!i915_enable_rc6)
8063		return;
8064
8065	mutex_lock(&dev->struct_mutex);
8066	ret = ironlake_setup_rc6(dev);
8067	if (ret) {
8068		mutex_unlock(&dev->struct_mutex);
8069		return;
8070	}
8071
8072	/*
8073	 * GPU can automatically power down the render unit if given a page
8074	 * to save state.
8075	 */
8076	ret = BEGIN_LP_RING(6);
8077	if (ret) {
8078		ironlake_teardown_rc6(dev);
8079		mutex_unlock(&dev->struct_mutex);
8080		return;
8081	}
8082
8083	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8084	OUT_RING(MI_SET_CONTEXT);
8085	OUT_RING(dev_priv->renderctx->gtt_offset |
8086		 MI_MM_SPACE_GTT |
8087		 MI_SAVE_EXT_STATE_EN |
8088		 MI_RESTORE_EXT_STATE_EN |
8089		 MI_RESTORE_INHIBIT);
8090	OUT_RING(MI_SUSPEND_FLUSH);
8091	OUT_RING(MI_NOOP);
8092	OUT_RING(MI_FLUSH);
8093	ADVANCE_LP_RING();
8094
8095	/*
8096	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8097	 * does an implicit flush, combined with MI_FLUSH above, it should be
8098	 * safe to assume that renderctx is valid
8099	 */
8100	ret = intel_wait_ring_idle(LP_RING(dev_priv));
8101	if (ret) {
8102		DRM_ERROR("failed to enable ironlake power power savings\n");
8103		ironlake_teardown_rc6(dev);
8104		mutex_unlock(&dev->struct_mutex);
8105		return;
8106	}
8107
8108	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8109	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8110	mutex_unlock(&dev->struct_mutex);
8111}
8112
8113void intel_init_clock_gating(struct drm_device *dev)
8114{
8115	struct drm_i915_private *dev_priv = dev->dev_private;
8116
8117	dev_priv->display.init_clock_gating(dev);
8118
8119	if (dev_priv->display.init_pch_clock_gating)
8120		dev_priv->display.init_pch_clock_gating(dev);
8121}
8122
8123/* Set up chip specific display functions */
8124static void intel_init_display(struct drm_device *dev)
8125{
8126	struct drm_i915_private *dev_priv = dev->dev_private;
8127
8128	/* We always want a DPMS function */
8129	if (HAS_PCH_SPLIT(dev)) {
8130		dev_priv->display.dpms = ironlake_crtc_dpms;
8131		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
 
8132		dev_priv->display.update_plane = ironlake_update_plane;
8133	} else {
8134		dev_priv->display.dpms = i9xx_crtc_dpms;
8135		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
 
8136		dev_priv->display.update_plane = i9xx_update_plane;
8137	}
8138
8139	if (I915_HAS_FBC(dev)) {
8140		if (HAS_PCH_SPLIT(dev)) {
8141			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8142			dev_priv->display.enable_fbc = ironlake_enable_fbc;
8143			dev_priv->display.disable_fbc = ironlake_disable_fbc;
8144		} else if (IS_GM45(dev)) {
8145			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8146			dev_priv->display.enable_fbc = g4x_enable_fbc;
8147			dev_priv->display.disable_fbc = g4x_disable_fbc;
8148		} else if (IS_CRESTLINE(dev)) {
8149			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8150			dev_priv->display.enable_fbc = i8xx_enable_fbc;
8151			dev_priv->display.disable_fbc = i8xx_disable_fbc;
8152		}
8153		/* 855GM needs testing */
8154	}
8155
8156	/* Returns the core display clock speed */
8157	if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
 
 
 
8158		dev_priv->display.get_display_clock_speed =
8159			i945_get_display_clock_speed;
8160	else if (IS_I915G(dev))
8161		dev_priv->display.get_display_clock_speed =
8162			i915_get_display_clock_speed;
8163	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
8164		dev_priv->display.get_display_clock_speed =
8165			i9xx_misc_get_display_clock_speed;
8166	else if (IS_I915GM(dev))
8167		dev_priv->display.get_display_clock_speed =
8168			i915gm_get_display_clock_speed;
8169	else if (IS_I865G(dev))
8170		dev_priv->display.get_display_clock_speed =
8171			i865_get_display_clock_speed;
8172	else if (IS_I85X(dev))
8173		dev_priv->display.get_display_clock_speed =
8174			i855_get_display_clock_speed;
8175	else /* 852, 830 */
8176		dev_priv->display.get_display_clock_speed =
8177			i830_get_display_clock_speed;
8178
8179	/* For FIFO watermark updates */
8180	if (HAS_PCH_SPLIT(dev)) {
8181		if (HAS_PCH_IBX(dev))
8182			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8183		else if (HAS_PCH_CPT(dev))
8184			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
8185
8186		if (IS_GEN5(dev)) {
8187			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
8188				dev_priv->display.update_wm = ironlake_update_wm;
8189			else {
8190				DRM_DEBUG_KMS("Failed to get proper latency. "
8191					      "Disable CxSR\n");
8192				dev_priv->display.update_wm = NULL;
8193			}
8194			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
8195			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
8196		} else if (IS_GEN6(dev)) {
8197			if (SNB_READ_WM0_LATENCY()) {
8198				dev_priv->display.update_wm = sandybridge_update_wm;
8199			} else {
8200				DRM_DEBUG_KMS("Failed to read display plane latency. "
8201					      "Disable CxSR\n");
8202				dev_priv->display.update_wm = NULL;
8203			}
8204			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
8205			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8206		} else if (IS_IVYBRIDGE(dev)) {
8207			/* FIXME: detect B0+ stepping and use auto training */
8208			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
8209			if (SNB_READ_WM0_LATENCY()) {
8210				dev_priv->display.update_wm = sandybridge_update_wm;
8211			} else {
8212				DRM_DEBUG_KMS("Failed to read display plane latency. "
8213					      "Disable CxSR\n");
8214				dev_priv->display.update_wm = NULL;
8215			}
8216			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
8217
8218		} else
8219			dev_priv->display.update_wm = NULL;
8220	} else if (IS_PINEVIEW(dev)) {
8221		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
8222					    dev_priv->is_ddr3,
8223					    dev_priv->fsb_freq,
8224					    dev_priv->mem_freq)) {
8225			DRM_INFO("failed to find known CxSR latency "
8226				 "(found ddr%s fsb freq %d, mem freq %d), "
8227				 "disabling CxSR\n",
8228				 (dev_priv->is_ddr3 == 1) ? "3": "2",
8229				 dev_priv->fsb_freq, dev_priv->mem_freq);
8230			/* Disable CxSR and never update its watermark again */
8231			pineview_disable_cxsr(dev);
8232			dev_priv->display.update_wm = NULL;
8233		} else
8234			dev_priv->display.update_wm = pineview_update_wm;
8235		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8236	} else if (IS_G4X(dev)) {
8237		dev_priv->display.update_wm = g4x_update_wm;
8238		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8239	} else if (IS_GEN4(dev)) {
8240		dev_priv->display.update_wm = i965_update_wm;
8241		if (IS_CRESTLINE(dev))
8242			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
8243		else if (IS_BROADWATER(dev))
8244			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
8245	} else if (IS_GEN3(dev)) {
8246		dev_priv->display.update_wm = i9xx_update_wm;
8247		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
8248		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8249	} else if (IS_I865G(dev)) {
8250		dev_priv->display.update_wm = i830_update_wm;
8251		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8252		dev_priv->display.get_fifo_size = i830_get_fifo_size;
8253	} else if (IS_I85X(dev)) {
8254		dev_priv->display.update_wm = i9xx_update_wm;
8255		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
8256		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8257	} else {
8258		dev_priv->display.update_wm = i830_update_wm;
8259		dev_priv->display.init_clock_gating = i830_init_clock_gating;
8260		if (IS_845G(dev))
8261			dev_priv->display.get_fifo_size = i845_get_fifo_size;
8262		else
8263			dev_priv->display.get_fifo_size = i830_get_fifo_size;
8264	}
8265
8266	/* Default just returns -ENODEV to indicate unsupported */
8267	dev_priv->display.queue_flip = intel_default_queue_flip;
8268
8269	switch (INTEL_INFO(dev)->gen) {
8270	case 2:
8271		dev_priv->display.queue_flip = intel_gen2_queue_flip;
8272		break;
8273
8274	case 3:
8275		dev_priv->display.queue_flip = intel_gen3_queue_flip;
8276		break;
8277
8278	case 4:
8279	case 5:
8280		dev_priv->display.queue_flip = intel_gen4_queue_flip;
8281		break;
8282
8283	case 6:
8284		dev_priv->display.queue_flip = intel_gen6_queue_flip;
8285		break;
8286	case 7:
8287		dev_priv->display.queue_flip = intel_gen7_queue_flip;
8288		break;
8289	}
8290}
8291
8292/*
8293 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8294 * resume, or other times.  This quirk makes sure that's the case for
8295 * affected systems.
8296 */
8297static void quirk_pipea_force (struct drm_device *dev)
8298{
8299	struct drm_i915_private *dev_priv = dev->dev_private;
8300
8301	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
8302	DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
8303}
8304
8305/*
8306 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8307 */
8308static void quirk_ssc_force_disable(struct drm_device *dev)
8309{
8310	struct drm_i915_private *dev_priv = dev->dev_private;
8311	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
 
 
 
 
 
 
 
 
 
 
 
 
8312}
8313
8314struct intel_quirk {
8315	int device;
8316	int subsystem_vendor;
8317	int subsystem_device;
8318	void (*hook)(struct drm_device *dev);
8319};
8320
8321struct intel_quirk intel_quirks[] = {
8322	/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
8323	{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
8324	/* HP Mini needs pipe A force quirk (LP: #322104) */
8325	{ 0x27ae,0x103c, 0x361a, quirk_pipea_force },
8326
8327	/* Thinkpad R31 needs pipe A force quirk */
8328	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
8329	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8330	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8331
8332	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
8333	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
8334	/* ThinkPad X40 needs pipe A force quirk */
8335
8336	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8337	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8338
8339	/* 855 & before need to leave pipe A & dpll A up */
8340	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8341	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8342
8343	/* Lenovo U160 cannot use SSC on LVDS */
8344	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8345
8346	/* Sony Vaio Y cannot use SSC on LVDS */
8347	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 
 
 
8348};
8349
8350static void intel_init_quirks(struct drm_device *dev)
8351{
8352	struct pci_dev *d = dev->pdev;
8353	int i;
8354
8355	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8356		struct intel_quirk *q = &intel_quirks[i];
8357
8358		if (d->device == q->device &&
8359		    (d->subsystem_vendor == q->subsystem_vendor ||
8360		     q->subsystem_vendor == PCI_ANY_ID) &&
8361		    (d->subsystem_device == q->subsystem_device ||
8362		     q->subsystem_device == PCI_ANY_ID))
8363			q->hook(dev);
8364	}
8365}
8366
8367/* Disable the VGA plane that we never use */
8368static void i915_disable_vga(struct drm_device *dev)
8369{
8370	struct drm_i915_private *dev_priv = dev->dev_private;
8371	u8 sr1;
8372	u32 vga_reg;
8373
8374	if (HAS_PCH_SPLIT(dev))
8375		vga_reg = CPU_VGACNTRL;
8376	else
8377		vga_reg = VGACNTRL;
8378
8379	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8380	outb(1, VGA_SR_INDEX);
8381	sr1 = inb(VGA_SR_DATA);
8382	outb(sr1 | 1<<5, VGA_SR_DATA);
8383	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8384	udelay(300);
8385
8386	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8387	POSTING_READ(vga_reg);
8388}
8389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8390void intel_modeset_init(struct drm_device *dev)
8391{
8392	struct drm_i915_private *dev_priv = dev->dev_private;
8393	int i;
8394
8395	drm_mode_config_init(dev);
8396
8397	dev->mode_config.min_width = 0;
8398	dev->mode_config.min_height = 0;
8399
8400	dev->mode_config.funcs = (void *)&intel_mode_funcs;
 
 
 
8401
8402	intel_init_quirks(dev);
8403
 
 
 
 
8404	intel_init_display(dev);
8405
8406	if (IS_GEN2(dev)) {
8407		dev->mode_config.max_width = 2048;
8408		dev->mode_config.max_height = 2048;
8409	} else if (IS_GEN3(dev)) {
8410		dev->mode_config.max_width = 4096;
8411		dev->mode_config.max_height = 4096;
8412	} else {
8413		dev->mode_config.max_width = 8192;
8414		dev->mode_config.max_height = 8192;
8415	}
8416	dev->mode_config.fb_base = dev->agp->base;
8417
8418	DRM_DEBUG_KMS("%d display pipe%s available.\n",
8419		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8420
8421	for (i = 0; i < dev_priv->num_pipe; i++) {
8422		intel_crtc_init(dev, i);
 
 
 
8423	}
8424
 
 
8425	/* Just disable it once at startup */
8426	i915_disable_vga(dev);
8427	intel_setup_outputs(dev);
8428
8429	intel_init_clock_gating(dev);
8430
8431	if (IS_IRONLAKE_M(dev)) {
8432		ironlake_enable_drps(dev);
8433		intel_init_emon(dev);
8434	}
8435
8436	if (IS_GEN6(dev) || IS_GEN7(dev)) {
8437		gen6_enable_rps(dev_priv);
8438		gen6_update_ring_freq(dev_priv);
8439	}
8440
8441	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
8442	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
8443		    (unsigned long)dev);
8444}
8445
8446void intel_modeset_gem_init(struct drm_device *dev)
8447{
8448	if (IS_IRONLAKE_M(dev))
8449		ironlake_enable_rc6(dev);
8450
8451	intel_setup_overlay(dev);
8452}
8453
8454void intel_modeset_cleanup(struct drm_device *dev)
8455{
8456	struct drm_i915_private *dev_priv = dev->dev_private;
8457	struct drm_crtc *crtc;
8458	struct intel_crtc *intel_crtc;
8459
8460	drm_kms_helper_poll_fini(dev);
8461	mutex_lock(&dev->struct_mutex);
8462
8463	intel_unregister_dsm_handler();
8464
8465
8466	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8467		/* Skip inactive CRTCs */
8468		if (!crtc->fb)
8469			continue;
8470
8471		intel_crtc = to_intel_crtc(crtc);
8472		intel_increase_pllclock(crtc);
8473	}
8474
8475	intel_disable_fbc(dev);
8476
8477	if (IS_IRONLAKE_M(dev))
8478		ironlake_disable_drps(dev);
8479	if (IS_GEN6(dev) || IS_GEN7(dev))
8480		gen6_disable_rps(dev);
8481
8482	if (IS_IRONLAKE_M(dev))
8483		ironlake_disable_rc6(dev);
8484
 
 
 
8485	mutex_unlock(&dev->struct_mutex);
8486
8487	/* Disable the irq before mode object teardown, for the irq might
8488	 * enqueue unpin/hotplug work. */
8489	drm_irq_uninstall(dev);
8490	cancel_work_sync(&dev_priv->hotplug_work);
 
8491
8492	/* flush any delayed tasks or pending work */
8493	flush_scheduled_work();
8494
8495	/* Shut off idle work before the crtcs get freed. */
8496	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8497		intel_crtc = to_intel_crtc(crtc);
8498		del_timer_sync(&intel_crtc->idle_timer);
8499	}
8500	del_timer_sync(&dev_priv->idle_timer);
8501	cancel_work_sync(&dev_priv->idle_work);
8502
8503	drm_mode_config_cleanup(dev);
8504}
8505
8506/*
8507 * Return which encoder is currently attached for connector.
8508 */
8509struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
8510{
8511	return &intel_attached_encoder(connector)->base;
8512}
8513
8514void intel_connector_attach_encoder(struct intel_connector *connector,
8515				    struct intel_encoder *encoder)
8516{
8517	connector->encoder = encoder;
8518	drm_mode_connector_attach_encoder(&connector->base,
8519					  &encoder->base);
8520}
8521
8522/*
8523 * set vga decode state - true == enable VGA decode
8524 */
8525int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
8526{
8527	struct drm_i915_private *dev_priv = dev->dev_private;
8528	u16 gmch_ctrl;
8529
8530	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
8531	if (state)
8532		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
8533	else
8534		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
8535	pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
8536	return 0;
8537}
8538
8539#ifdef CONFIG_DEBUG_FS
8540#include <linux/seq_file.h>
8541
8542struct intel_display_error_state {
8543	struct intel_cursor_error_state {
8544		u32 control;
8545		u32 position;
8546		u32 base;
8547		u32 size;
8548	} cursor[2];
8549
8550	struct intel_pipe_error_state {
8551		u32 conf;
8552		u32 source;
8553
8554		u32 htotal;
8555		u32 hblank;
8556		u32 hsync;
8557		u32 vtotal;
8558		u32 vblank;
8559		u32 vsync;
8560	} pipe[2];
8561
8562	struct intel_plane_error_state {
8563		u32 control;
8564		u32 stride;
8565		u32 size;
8566		u32 pos;
8567		u32 addr;
8568		u32 surface;
8569		u32 tile_offset;
8570	} plane[2];
8571};
8572
8573struct intel_display_error_state *
8574intel_display_capture_error_state(struct drm_device *dev)
8575{
8576        drm_i915_private_t *dev_priv = dev->dev_private;
8577	struct intel_display_error_state *error;
8578	int i;
8579
8580	error = kmalloc(sizeof(*error), GFP_ATOMIC);
8581	if (error == NULL)
8582		return NULL;
8583
8584	for (i = 0; i < 2; i++) {
8585		error->cursor[i].control = I915_READ(CURCNTR(i));
8586		error->cursor[i].position = I915_READ(CURPOS(i));
8587		error->cursor[i].base = I915_READ(CURBASE(i));
8588
8589		error->plane[i].control = I915_READ(DSPCNTR(i));
8590		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
8591		error->plane[i].size = I915_READ(DSPSIZE(i));
8592		error->plane[i].pos= I915_READ(DSPPOS(i));
8593		error->plane[i].addr = I915_READ(DSPADDR(i));
8594		if (INTEL_INFO(dev)->gen >= 4) {
8595			error->plane[i].surface = I915_READ(DSPSURF(i));
8596			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
8597		}
8598
8599		error->pipe[i].conf = I915_READ(PIPECONF(i));
8600		error->pipe[i].source = I915_READ(PIPESRC(i));
8601		error->pipe[i].htotal = I915_READ(HTOTAL(i));
8602		error->pipe[i].hblank = I915_READ(HBLANK(i));
8603		error->pipe[i].hsync = I915_READ(HSYNC(i));
8604		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
8605		error->pipe[i].vblank = I915_READ(VBLANK(i));
8606		error->pipe[i].vsync = I915_READ(VSYNC(i));
8607	}
8608
8609	return error;
8610}
8611
8612void
8613intel_display_print_error_state(struct seq_file *m,
8614				struct drm_device *dev,
8615				struct intel_display_error_state *error)
8616{
8617	int i;
8618
8619	for (i = 0; i < 2; i++) {
8620		seq_printf(m, "Pipe [%d]:\n", i);
8621		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
8622		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
8623		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
8624		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
8625		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
8626		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
8627		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
8628		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
8629
8630		seq_printf(m, "Plane [%d]:\n", i);
8631		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
8632		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
8633		seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
8634		seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
8635		seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
8636		if (INTEL_INFO(dev)->gen >= 4) {
8637			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
8638			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
8639		}
8640
8641		seq_printf(m, "Cursor [%d]:\n", i);
8642		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
8643		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
8644		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
8645	}
8646}
8647#endif
v3.5.6
   1/*
   2 * Copyright © 2006-2007 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *	Eric Anholt <eric@anholt.net>
  25 */
  26
  27#include <linux/dmi.h>
  28#include <linux/module.h>
  29#include <linux/input.h>
  30#include <linux/i2c.h>
  31#include <linux/kernel.h>
  32#include <linux/slab.h>
  33#include <linux/vgaarb.h>
  34#include <drm/drm_edid.h>
  35#include "drmP.h"
  36#include "intel_drv.h"
  37#include "i915_drm.h"
  38#include "i915_drv.h"
  39#include "i915_trace.h"
  40#include "drm_dp_helper.h"
 
  41#include "drm_crtc_helper.h"
  42#include <linux/dma_remapping.h>
  43
  44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  45
  46bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 
  47static void intel_increase_pllclock(struct drm_crtc *crtc);
  48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  49
  50typedef struct {
  51	/* given values */
  52	int n;
  53	int m1, m2;
  54	int p1, p2;
  55	/* derived values */
  56	int	dot;
  57	int	vco;
  58	int	m;
  59	int	p;
  60} intel_clock_t;
  61
  62typedef struct {
  63	int	min, max;
  64} intel_range_t;
  65
  66typedef struct {
  67	int	dot_limit;
  68	int	p2_slow, p2_fast;
  69} intel_p2_t;
  70
  71#define INTEL_P2_NUM		      2
  72typedef struct intel_limit intel_limit_t;
  73struct intel_limit {
  74	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
  75	intel_p2_t	    p2;
  76	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
  77			int, int, intel_clock_t *, intel_clock_t *);
  78};
  79
  80/* FDI */
  81#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
  82
  83static bool
  84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  85		    int target, int refclk, intel_clock_t *match_clock,
  86		    intel_clock_t *best_clock);
  87static bool
  88intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  89			int target, int refclk, intel_clock_t *match_clock,
  90			intel_clock_t *best_clock);
  91
  92static bool
  93intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
  94		      int target, int refclk, intel_clock_t *match_clock,
  95		      intel_clock_t *best_clock);
  96static bool
  97intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
  98			   int target, int refclk, intel_clock_t *match_clock,
  99			   intel_clock_t *best_clock);
 100
 101static inline u32 /* units of 100MHz */
 102intel_fdi_link_freq(struct drm_device *dev)
 103{
 104	if (IS_GEN5(dev)) {
 105		struct drm_i915_private *dev_priv = dev->dev_private;
 106		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
 107	} else
 108		return 27;
 109}
 110
 111static const intel_limit_t intel_limits_i8xx_dvo = {
 112	.dot = { .min = 25000, .max = 350000 },
 113	.vco = { .min = 930000, .max = 1400000 },
 114	.n = { .min = 3, .max = 16 },
 115	.m = { .min = 96, .max = 140 },
 116	.m1 = { .min = 18, .max = 26 },
 117	.m2 = { .min = 6, .max = 16 },
 118	.p = { .min = 4, .max = 128 },
 119	.p1 = { .min = 2, .max = 33 },
 120	.p2 = { .dot_limit = 165000,
 121		.p2_slow = 4, .p2_fast = 2 },
 122	.find_pll = intel_find_best_PLL,
 123};
 124
 125static const intel_limit_t intel_limits_i8xx_lvds = {
 126	.dot = { .min = 25000, .max = 350000 },
 127	.vco = { .min = 930000, .max = 1400000 },
 128	.n = { .min = 3, .max = 16 },
 129	.m = { .min = 96, .max = 140 },
 130	.m1 = { .min = 18, .max = 26 },
 131	.m2 = { .min = 6, .max = 16 },
 132	.p = { .min = 4, .max = 128 },
 133	.p1 = { .min = 1, .max = 6 },
 134	.p2 = { .dot_limit = 165000,
 135		.p2_slow = 14, .p2_fast = 7 },
 136	.find_pll = intel_find_best_PLL,
 137};
 138
 139static const intel_limit_t intel_limits_i9xx_sdvo = {
 140	.dot = { .min = 20000, .max = 400000 },
 141	.vco = { .min = 1400000, .max = 2800000 },
 142	.n = { .min = 1, .max = 6 },
 143	.m = { .min = 70, .max = 120 },
 144	.m1 = { .min = 10, .max = 22 },
 145	.m2 = { .min = 5, .max = 9 },
 146	.p = { .min = 5, .max = 80 },
 147	.p1 = { .min = 1, .max = 8 },
 148	.p2 = { .dot_limit = 200000,
 149		.p2_slow = 10, .p2_fast = 5 },
 150	.find_pll = intel_find_best_PLL,
 151};
 152
 153static const intel_limit_t intel_limits_i9xx_lvds = {
 154	.dot = { .min = 20000, .max = 400000 },
 155	.vco = { .min = 1400000, .max = 2800000 },
 156	.n = { .min = 1, .max = 6 },
 157	.m = { .min = 70, .max = 120 },
 158	.m1 = { .min = 10, .max = 22 },
 159	.m2 = { .min = 5, .max = 9 },
 160	.p = { .min = 7, .max = 98 },
 161	.p1 = { .min = 1, .max = 8 },
 162	.p2 = { .dot_limit = 112000,
 163		.p2_slow = 14, .p2_fast = 7 },
 164	.find_pll = intel_find_best_PLL,
 165};
 166
 167
 168static const intel_limit_t intel_limits_g4x_sdvo = {
 169	.dot = { .min = 25000, .max = 270000 },
 170	.vco = { .min = 1750000, .max = 3500000},
 171	.n = { .min = 1, .max = 4 },
 172	.m = { .min = 104, .max = 138 },
 173	.m1 = { .min = 17, .max = 23 },
 174	.m2 = { .min = 5, .max = 11 },
 175	.p = { .min = 10, .max = 30 },
 176	.p1 = { .min = 1, .max = 3},
 177	.p2 = { .dot_limit = 270000,
 178		.p2_slow = 10,
 179		.p2_fast = 10
 180	},
 181	.find_pll = intel_g4x_find_best_PLL,
 182};
 183
 184static const intel_limit_t intel_limits_g4x_hdmi = {
 185	.dot = { .min = 22000, .max = 400000 },
 186	.vco = { .min = 1750000, .max = 3500000},
 187	.n = { .min = 1, .max = 4 },
 188	.m = { .min = 104, .max = 138 },
 189	.m1 = { .min = 16, .max = 23 },
 190	.m2 = { .min = 5, .max = 11 },
 191	.p = { .min = 5, .max = 80 },
 192	.p1 = { .min = 1, .max = 8},
 193	.p2 = { .dot_limit = 165000,
 194		.p2_slow = 10, .p2_fast = 5 },
 195	.find_pll = intel_g4x_find_best_PLL,
 196};
 197
 198static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
 199	.dot = { .min = 20000, .max = 115000 },
 200	.vco = { .min = 1750000, .max = 3500000 },
 201	.n = { .min = 1, .max = 3 },
 202	.m = { .min = 104, .max = 138 },
 203	.m1 = { .min = 17, .max = 23 },
 204	.m2 = { .min = 5, .max = 11 },
 205	.p = { .min = 28, .max = 112 },
 206	.p1 = { .min = 2, .max = 8 },
 207	.p2 = { .dot_limit = 0,
 208		.p2_slow = 14, .p2_fast = 14
 209	},
 210	.find_pll = intel_g4x_find_best_PLL,
 211};
 212
 213static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
 214	.dot = { .min = 80000, .max = 224000 },
 215	.vco = { .min = 1750000, .max = 3500000 },
 216	.n = { .min = 1, .max = 3 },
 217	.m = { .min = 104, .max = 138 },
 218	.m1 = { .min = 17, .max = 23 },
 219	.m2 = { .min = 5, .max = 11 },
 220	.p = { .min = 14, .max = 42 },
 221	.p1 = { .min = 2, .max = 6 },
 222	.p2 = { .dot_limit = 0,
 223		.p2_slow = 7, .p2_fast = 7
 224	},
 225	.find_pll = intel_g4x_find_best_PLL,
 226};
 227
 228static const intel_limit_t intel_limits_g4x_display_port = {
 229	.dot = { .min = 161670, .max = 227000 },
 230	.vco = { .min = 1750000, .max = 3500000},
 231	.n = { .min = 1, .max = 2 },
 232	.m = { .min = 97, .max = 108 },
 233	.m1 = { .min = 0x10, .max = 0x12 },
 234	.m2 = { .min = 0x05, .max = 0x06 },
 235	.p = { .min = 10, .max = 20 },
 236	.p1 = { .min = 1, .max = 2},
 237	.p2 = { .dot_limit = 0,
 238		.p2_slow = 10, .p2_fast = 10 },
 239	.find_pll = intel_find_pll_g4x_dp,
 240};
 241
 242static const intel_limit_t intel_limits_pineview_sdvo = {
 243	.dot = { .min = 20000, .max = 400000},
 244	.vco = { .min = 1700000, .max = 3500000 },
 245	/* Pineview's Ncounter is a ring counter */
 246	.n = { .min = 3, .max = 6 },
 247	.m = { .min = 2, .max = 256 },
 248	/* Pineview only has one combined m divider, which we treat as m2. */
 249	.m1 = { .min = 0, .max = 0 },
 250	.m2 = { .min = 0, .max = 254 },
 251	.p = { .min = 5, .max = 80 },
 252	.p1 = { .min = 1, .max = 8 },
 253	.p2 = { .dot_limit = 200000,
 254		.p2_slow = 10, .p2_fast = 5 },
 255	.find_pll = intel_find_best_PLL,
 256};
 257
 258static const intel_limit_t intel_limits_pineview_lvds = {
 259	.dot = { .min = 20000, .max = 400000 },
 260	.vco = { .min = 1700000, .max = 3500000 },
 261	.n = { .min = 3, .max = 6 },
 262	.m = { .min = 2, .max = 256 },
 263	.m1 = { .min = 0, .max = 0 },
 264	.m2 = { .min = 0, .max = 254 },
 265	.p = { .min = 7, .max = 112 },
 266	.p1 = { .min = 1, .max = 8 },
 267	.p2 = { .dot_limit = 112000,
 268		.p2_slow = 14, .p2_fast = 14 },
 269	.find_pll = intel_find_best_PLL,
 270};
 271
 272/* Ironlake / Sandybridge
 273 *
 274 * We calculate clock using (register_value + 2) for N/M1/M2, so here
 275 * the range value for them is (actual_value - 2).
 276 */
 277static const intel_limit_t intel_limits_ironlake_dac = {
 278	.dot = { .min = 25000, .max = 350000 },
 279	.vco = { .min = 1760000, .max = 3510000 },
 280	.n = { .min = 1, .max = 5 },
 281	.m = { .min = 79, .max = 127 },
 282	.m1 = { .min = 12, .max = 22 },
 283	.m2 = { .min = 5, .max = 9 },
 284	.p = { .min = 5, .max = 80 },
 285	.p1 = { .min = 1, .max = 8 },
 286	.p2 = { .dot_limit = 225000,
 287		.p2_slow = 10, .p2_fast = 5 },
 288	.find_pll = intel_g4x_find_best_PLL,
 289};
 290
 291static const intel_limit_t intel_limits_ironlake_single_lvds = {
 292	.dot = { .min = 25000, .max = 350000 },
 293	.vco = { .min = 1760000, .max = 3510000 },
 294	.n = { .min = 1, .max = 3 },
 295	.m = { .min = 79, .max = 118 },
 296	.m1 = { .min = 12, .max = 22 },
 297	.m2 = { .min = 5, .max = 9 },
 298	.p = { .min = 28, .max = 112 },
 299	.p1 = { .min = 2, .max = 8 },
 300	.p2 = { .dot_limit = 225000,
 301		.p2_slow = 14, .p2_fast = 14 },
 302	.find_pll = intel_g4x_find_best_PLL,
 303};
 304
 305static const intel_limit_t intel_limits_ironlake_dual_lvds = {
 306	.dot = { .min = 25000, .max = 350000 },
 307	.vco = { .min = 1760000, .max = 3510000 },
 308	.n = { .min = 1, .max = 3 },
 309	.m = { .min = 79, .max = 127 },
 310	.m1 = { .min = 12, .max = 22 },
 311	.m2 = { .min = 5, .max = 9 },
 312	.p = { .min = 14, .max = 56 },
 313	.p1 = { .min = 2, .max = 8 },
 314	.p2 = { .dot_limit = 225000,
 315		.p2_slow = 7, .p2_fast = 7 },
 316	.find_pll = intel_g4x_find_best_PLL,
 317};
 318
 319/* LVDS 100mhz refclk limits. */
 320static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
 321	.dot = { .min = 25000, .max = 350000 },
 322	.vco = { .min = 1760000, .max = 3510000 },
 323	.n = { .min = 1, .max = 2 },
 324	.m = { .min = 79, .max = 126 },
 325	.m1 = { .min = 12, .max = 22 },
 326	.m2 = { .min = 5, .max = 9 },
 327	.p = { .min = 28, .max = 112 },
 328	.p1 = { .min = 2, .max = 8 },
 329	.p2 = { .dot_limit = 225000,
 330		.p2_slow = 14, .p2_fast = 14 },
 331	.find_pll = intel_g4x_find_best_PLL,
 332};
 333
 334static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
 335	.dot = { .min = 25000, .max = 350000 },
 336	.vco = { .min = 1760000, .max = 3510000 },
 337	.n = { .min = 1, .max = 3 },
 338	.m = { .min = 79, .max = 126 },
 339	.m1 = { .min = 12, .max = 22 },
 340	.m2 = { .min = 5, .max = 9 },
 341	.p = { .min = 14, .max = 42 },
 342	.p1 = { .min = 2, .max = 6 },
 343	.p2 = { .dot_limit = 225000,
 344		.p2_slow = 7, .p2_fast = 7 },
 345	.find_pll = intel_g4x_find_best_PLL,
 346};
 347
 348static const intel_limit_t intel_limits_ironlake_display_port = {
 349	.dot = { .min = 25000, .max = 350000 },
 350	.vco = { .min = 1760000, .max = 3510000},
 351	.n = { .min = 1, .max = 2 },
 352	.m = { .min = 81, .max = 90 },
 353	.m1 = { .min = 12, .max = 22 },
 354	.m2 = { .min = 5, .max = 9 },
 355	.p = { .min = 10, .max = 20 },
 356	.p1 = { .min = 1, .max = 2},
 357	.p2 = { .dot_limit = 0,
 358		.p2_slow = 10, .p2_fast = 10 },
 359	.find_pll = intel_find_pll_ironlake_dp,
 360};
 361
 362u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
 363{
 364	unsigned long flags;
 365	u32 val = 0;
 366
 367	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
 368	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
 369		DRM_ERROR("DPIO idle wait timed out\n");
 370		goto out_unlock;
 371	}
 372
 373	I915_WRITE(DPIO_REG, reg);
 374	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
 375		   DPIO_BYTE);
 376	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
 377		DRM_ERROR("DPIO read wait timed out\n");
 378		goto out_unlock;
 379	}
 380	val = I915_READ(DPIO_DATA);
 381
 382out_unlock:
 383	spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
 384	return val;
 385}
 386
 387static void vlv_init_dpio(struct drm_device *dev)
 388{
 389	struct drm_i915_private *dev_priv = dev->dev_private;
 390
 391	/* Reset the DPIO config */
 392	I915_WRITE(DPIO_CTL, 0);
 393	POSTING_READ(DPIO_CTL);
 394	I915_WRITE(DPIO_CTL, 1);
 395	POSTING_READ(DPIO_CTL);
 396}
 397
 398static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
 399{
 400	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
 401	return 1;
 402}
 403
 404static const struct dmi_system_id intel_dual_link_lvds[] = {
 405	{
 406		.callback = intel_dual_link_lvds_callback,
 407		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
 408		.matches = {
 409			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
 410			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
 411		},
 412	},
 413	{ }	/* terminating entry */
 414};
 415
 416static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
 417			      unsigned int reg)
 418{
 419	unsigned int val;
 420
 421	/* use the module option value if specified */
 422	if (i915_lvds_channel_mode > 0)
 423		return i915_lvds_channel_mode == 2;
 424
 425	if (dmi_check_system(intel_dual_link_lvds))
 426		return true;
 427
 428	if (dev_priv->lvds_val)
 429		val = dev_priv->lvds_val;
 430	else {
 431		/* BIOS should set the proper LVDS register value at boot, but
 432		 * in reality, it doesn't set the value when the lid is closed;
 433		 * we need to check "the value to be set" in VBT when LVDS
 434		 * register is uninitialized.
 435		 */
 436		val = I915_READ(reg);
 437		if (!(val & ~LVDS_DETECTED))
 438			val = dev_priv->bios_lvds_val;
 439		dev_priv->lvds_val = val;
 440	}
 441	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
 442}
 443
 444static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 445						int refclk)
 446{
 447	struct drm_device *dev = crtc->dev;
 448	struct drm_i915_private *dev_priv = dev->dev_private;
 449	const intel_limit_t *limit;
 450
 451	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 452		if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
 
 453			/* LVDS dual channel */
 454			if (refclk == 100000)
 455				limit = &intel_limits_ironlake_dual_lvds_100m;
 456			else
 457				limit = &intel_limits_ironlake_dual_lvds;
 458		} else {
 459			if (refclk == 100000)
 460				limit = &intel_limits_ironlake_single_lvds_100m;
 461			else
 462				limit = &intel_limits_ironlake_single_lvds;
 463		}
 464	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
 465			HAS_eDP)
 466		limit = &intel_limits_ironlake_display_port;
 467	else
 468		limit = &intel_limits_ironlake_dac;
 469
 470	return limit;
 471}
 472
 473static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
 474{
 475	struct drm_device *dev = crtc->dev;
 476	struct drm_i915_private *dev_priv = dev->dev_private;
 477	const intel_limit_t *limit;
 478
 479	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 480		if (is_dual_link_lvds(dev_priv, LVDS))
 
 481			/* LVDS with dual channel */
 482			limit = &intel_limits_g4x_dual_channel_lvds;
 483		else
 484			/* LVDS with dual channel */
 485			limit = &intel_limits_g4x_single_channel_lvds;
 486	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
 487		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
 488		limit = &intel_limits_g4x_hdmi;
 489	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
 490		limit = &intel_limits_g4x_sdvo;
 491	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 492		limit = &intel_limits_g4x_display_port;
 493	} else /* The option is for other outputs */
 494		limit = &intel_limits_i9xx_sdvo;
 495
 496	return limit;
 497}
 498
 499static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
 500{
 501	struct drm_device *dev = crtc->dev;
 502	const intel_limit_t *limit;
 503
 504	if (HAS_PCH_SPLIT(dev))
 505		limit = intel_ironlake_limit(crtc, refclk);
 506	else if (IS_G4X(dev)) {
 507		limit = intel_g4x_limit(crtc);
 508	} else if (IS_PINEVIEW(dev)) {
 509		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 510			limit = &intel_limits_pineview_lvds;
 511		else
 512			limit = &intel_limits_pineview_sdvo;
 513	} else if (!IS_GEN2(dev)) {
 514		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 515			limit = &intel_limits_i9xx_lvds;
 516		else
 517			limit = &intel_limits_i9xx_sdvo;
 518	} else {
 519		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 520			limit = &intel_limits_i8xx_lvds;
 521		else
 522			limit = &intel_limits_i8xx_dvo;
 523	}
 524	return limit;
 525}
 526
 527/* m1 is reserved as 0 in Pineview, n is a ring counter */
 528static void pineview_clock(int refclk, intel_clock_t *clock)
 529{
 530	clock->m = clock->m2 + 2;
 531	clock->p = clock->p1 * clock->p2;
 532	clock->vco = refclk * clock->m / clock->n;
 533	clock->dot = clock->vco / clock->p;
 534}
 535
 536static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
 537{
 538	if (IS_PINEVIEW(dev)) {
 539		pineview_clock(refclk, clock);
 540		return;
 541	}
 542	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
 543	clock->p = clock->p1 * clock->p2;
 544	clock->vco = refclk * clock->m / (clock->n + 2);
 545	clock->dot = clock->vco / clock->p;
 546}
 547
 548/**
 549 * Returns whether any output on the specified pipe is of the specified type
 550 */
 551bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
 552{
 553	struct drm_device *dev = crtc->dev;
 554	struct drm_mode_config *mode_config = &dev->mode_config;
 555	struct intel_encoder *encoder;
 556
 557	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 558		if (encoder->base.crtc == crtc && encoder->type == type)
 559			return true;
 560
 561	return false;
 562}
 563
 564#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
 565/**
 566 * Returns whether the given set of divisors are valid for a given refclk with
 567 * the given connectors.
 568 */
 569
 570static bool intel_PLL_is_valid(struct drm_device *dev,
 571			       const intel_limit_t *limit,
 572			       const intel_clock_t *clock)
 573{
 574	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 575		INTELPllInvalid("p1 out of range\n");
 576	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
 577		INTELPllInvalid("p out of range\n");
 578	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 579		INTELPllInvalid("m2 out of range\n");
 580	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 581		INTELPllInvalid("m1 out of range\n");
 582	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
 583		INTELPllInvalid("m1 <= m2\n");
 584	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
 585		INTELPllInvalid("m out of range\n");
 586	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
 587		INTELPllInvalid("n out of range\n");
 588	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 589		INTELPllInvalid("vco out of range\n");
 590	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
 591	 * connector, etc., rather than just a single range.
 592	 */
 593	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
 594		INTELPllInvalid("dot out of range\n");
 595
 596	return true;
 597}
 598
 599static bool
 600intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 601		    int target, int refclk, intel_clock_t *match_clock,
 602		    intel_clock_t *best_clock)
 603
 604{
 605	struct drm_device *dev = crtc->dev;
 606	struct drm_i915_private *dev_priv = dev->dev_private;
 607	intel_clock_t clock;
 608	int err = target;
 609
 610	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 611	    (I915_READ(LVDS)) != 0) {
 612		/*
 613		 * For LVDS, if the panel is on, just rely on its current
 614		 * settings for dual-channel.  We haven't figured out how to
 615		 * reliably set up different single/dual channel state, if we
 616		 * even can.
 617		 */
 618		if (is_dual_link_lvds(dev_priv, LVDS))
 
 619			clock.p2 = limit->p2.p2_fast;
 620		else
 621			clock.p2 = limit->p2.p2_slow;
 622	} else {
 623		if (target < limit->p2.dot_limit)
 624			clock.p2 = limit->p2.p2_slow;
 625		else
 626			clock.p2 = limit->p2.p2_fast;
 627	}
 628
 629	memset(best_clock, 0, sizeof(*best_clock));
 630
 631	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 632	     clock.m1++) {
 633		for (clock.m2 = limit->m2.min;
 634		     clock.m2 <= limit->m2.max; clock.m2++) {
 635			/* m1 is always 0 in Pineview */
 636			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
 637				break;
 638			for (clock.n = limit->n.min;
 639			     clock.n <= limit->n.max; clock.n++) {
 640				for (clock.p1 = limit->p1.min;
 641					clock.p1 <= limit->p1.max; clock.p1++) {
 642					int this_err;
 643
 644					intel_clock(dev, refclk, &clock);
 645					if (!intel_PLL_is_valid(dev, limit,
 646								&clock))
 647						continue;
 648					if (match_clock &&
 649					    clock.p != match_clock->p)
 650						continue;
 651
 652					this_err = abs(clock.dot - target);
 653					if (this_err < err) {
 654						*best_clock = clock;
 655						err = this_err;
 656					}
 657				}
 658			}
 659		}
 660	}
 661
 662	return (err != target);
 663}
 664
 665static bool
 666intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 667			int target, int refclk, intel_clock_t *match_clock,
 668			intel_clock_t *best_clock)
 669{
 670	struct drm_device *dev = crtc->dev;
 671	struct drm_i915_private *dev_priv = dev->dev_private;
 672	intel_clock_t clock;
 673	int max_n;
 674	bool found;
 675	/* approximately equals target * 0.00585 */
 676	int err_most = (target >> 8) + (target >> 9);
 677	found = false;
 678
 679	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 680		int lvds_reg;
 681
 682		if (HAS_PCH_SPLIT(dev))
 683			lvds_reg = PCH_LVDS;
 684		else
 685			lvds_reg = LVDS;
 686		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
 687		    LVDS_CLKB_POWER_UP)
 688			clock.p2 = limit->p2.p2_fast;
 689		else
 690			clock.p2 = limit->p2.p2_slow;
 691	} else {
 692		if (target < limit->p2.dot_limit)
 693			clock.p2 = limit->p2.p2_slow;
 694		else
 695			clock.p2 = limit->p2.p2_fast;
 696	}
 697
 698	memset(best_clock, 0, sizeof(*best_clock));
 699	max_n = limit->n.max;
 700	/* based on hardware requirement, prefer smaller n to precision */
 701	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 702		/* based on hardware requirement, prefere larger m1,m2 */
 703		for (clock.m1 = limit->m1.max;
 704		     clock.m1 >= limit->m1.min; clock.m1--) {
 705			for (clock.m2 = limit->m2.max;
 706			     clock.m2 >= limit->m2.min; clock.m2--) {
 707				for (clock.p1 = limit->p1.max;
 708				     clock.p1 >= limit->p1.min; clock.p1--) {
 709					int this_err;
 710
 711					intel_clock(dev, refclk, &clock);
 712					if (!intel_PLL_is_valid(dev, limit,
 713								&clock))
 714						continue;
 715					if (match_clock &&
 716					    clock.p != match_clock->p)
 717						continue;
 718
 719					this_err = abs(clock.dot - target);
 720					if (this_err < err_most) {
 721						*best_clock = clock;
 722						err_most = this_err;
 723						max_n = clock.n;
 724						found = true;
 725					}
 726				}
 727			}
 728		}
 729	}
 730	return found;
 731}
 732
 733static bool
 734intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 735			   int target, int refclk, intel_clock_t *match_clock,
 736			   intel_clock_t *best_clock)
 737{
 738	struct drm_device *dev = crtc->dev;
 739	intel_clock_t clock;
 740
 741	if (target < 200000) {
 742		clock.n = 1;
 743		clock.p1 = 2;
 744		clock.p2 = 10;
 745		clock.m1 = 12;
 746		clock.m2 = 9;
 747	} else {
 748		clock.n = 2;
 749		clock.p1 = 1;
 750		clock.p2 = 10;
 751		clock.m1 = 14;
 752		clock.m2 = 8;
 753	}
 754	intel_clock(dev, refclk, &clock);
 755	memcpy(best_clock, &clock, sizeof(intel_clock_t));
 756	return true;
 757}
 758
 759/* DisplayPort has only two frequencies, 162MHz and 270MHz */
 760static bool
 761intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 762		      int target, int refclk, intel_clock_t *match_clock,
 763		      intel_clock_t *best_clock)
 764{
 765	intel_clock_t clock;
 766	if (target < 200000) {
 767		clock.p1 = 2;
 768		clock.p2 = 10;
 769		clock.n = 2;
 770		clock.m1 = 23;
 771		clock.m2 = 8;
 772	} else {
 773		clock.p1 = 1;
 774		clock.p2 = 10;
 775		clock.n = 1;
 776		clock.m1 = 14;
 777		clock.m2 = 2;
 778	}
 779	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
 780	clock.p = (clock.p1 * clock.p2);
 781	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
 782	clock.vco = 0;
 783	memcpy(best_clock, &clock, sizeof(intel_clock_t));
 784	return true;
 785}
 786
 787static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
 788{
 789	struct drm_i915_private *dev_priv = dev->dev_private;
 790	u32 frame, frame_reg = PIPEFRAME(pipe);
 791
 792	frame = I915_READ(frame_reg);
 793
 794	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
 795		DRM_DEBUG_KMS("vblank wait timed out\n");
 796}
 797
 798/**
 799 * intel_wait_for_vblank - wait for vblank on a given pipe
 800 * @dev: drm device
 801 * @pipe: pipe to wait for
 802 *
 803 * Wait for vblank to occur on a given pipe.  Needed for various bits of
 804 * mode setting code.
 805 */
 806void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 807{
 808	struct drm_i915_private *dev_priv = dev->dev_private;
 809	int pipestat_reg = PIPESTAT(pipe);
 810
 811	if (INTEL_INFO(dev)->gen >= 5) {
 812		ironlake_wait_for_vblank(dev, pipe);
 813		return;
 814	}
 815
 816	/* Clear existing vblank status. Note this will clear any other
 817	 * sticky status fields as well.
 818	 *
 819	 * This races with i915_driver_irq_handler() with the result
 820	 * that either function could miss a vblank event.  Here it is not
 821	 * fatal, as we will either wait upon the next vblank interrupt or
 822	 * timeout.  Generally speaking intel_wait_for_vblank() is only
 823	 * called during modeset at which time the GPU should be idle and
 824	 * should *not* be performing page flips and thus not waiting on
 825	 * vblanks...
 826	 * Currently, the result of us stealing a vblank from the irq
 827	 * handler is that a single frame will be skipped during swapbuffers.
 828	 */
 829	I915_WRITE(pipestat_reg,
 830		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
 831
 832	/* Wait for vblank interrupt bit to set */
 833	if (wait_for(I915_READ(pipestat_reg) &
 834		     PIPE_VBLANK_INTERRUPT_STATUS,
 835		     50))
 836		DRM_DEBUG_KMS("vblank wait timed out\n");
 837}
 838
 839/*
 840 * intel_wait_for_pipe_off - wait for pipe to turn off
 841 * @dev: drm device
 842 * @pipe: pipe to wait for
 843 *
 844 * After disabling a pipe, we can't wait for vblank in the usual way,
 845 * spinning on the vblank interrupt status bit, since we won't actually
 846 * see an interrupt when the pipe is disabled.
 847 *
 848 * On Gen4 and above:
 849 *   wait for the pipe register state bit to turn off
 850 *
 851 * Otherwise:
 852 *   wait for the display line value to settle (it usually
 853 *   ends up stopping at the start of the next frame).
 854 *
 855 */
 856void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 857{
 858	struct drm_i915_private *dev_priv = dev->dev_private;
 859
 860	if (INTEL_INFO(dev)->gen >= 4) {
 861		int reg = PIPECONF(pipe);
 862
 863		/* Wait for the Pipe State to go off */
 864		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
 865			     100))
 866			DRM_DEBUG_KMS("pipe_off wait timed out\n");
 867	} else {
 868		u32 last_line, line_mask;
 869		int reg = PIPEDSL(pipe);
 870		unsigned long timeout = jiffies + msecs_to_jiffies(100);
 871
 872		if (IS_GEN2(dev))
 873			line_mask = DSL_LINEMASK_GEN2;
 874		else
 875			line_mask = DSL_LINEMASK_GEN3;
 876
 877		/* Wait for the display line to settle */
 878		do {
 879			last_line = I915_READ(reg) & line_mask;
 880			mdelay(5);
 881		} while (((I915_READ(reg) & line_mask) != last_line) &&
 882			 time_after(timeout, jiffies));
 883		if (time_after(jiffies, timeout))
 884			DRM_DEBUG_KMS("pipe_off wait timed out\n");
 885	}
 886}
 887
 888static const char *state_string(bool enabled)
 889{
 890	return enabled ? "on" : "off";
 891}
 892
 893/* Only for pre-ILK configs */
 894static void assert_pll(struct drm_i915_private *dev_priv,
 895		       enum pipe pipe, bool state)
 896{
 897	int reg;
 898	u32 val;
 899	bool cur_state;
 900
 901	reg = DPLL(pipe);
 902	val = I915_READ(reg);
 903	cur_state = !!(val & DPLL_VCO_ENABLE);
 904	WARN(cur_state != state,
 905	     "PLL state assertion failure (expected %s, current %s)\n",
 906	     state_string(state), state_string(cur_state));
 907}
 908#define assert_pll_enabled(d, p) assert_pll(d, p, true)
 909#define assert_pll_disabled(d, p) assert_pll(d, p, false)
 910
 911/* For ILK+ */
 912static void assert_pch_pll(struct drm_i915_private *dev_priv,
 913			   struct intel_pch_pll *pll,
 914			   struct intel_crtc *crtc,
 915			   bool state)
 916{
 
 917	u32 val;
 918	bool cur_state;
 919
 920	if (HAS_PCH_LPT(dev_priv->dev)) {
 921		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
 922		return;
 923	}
 924
 925	if (WARN (!pll,
 926		  "asserting PCH PLL %s with no PLL\n", state_string(state)))
 927		return;
 928
 929	val = I915_READ(pll->pll_reg);
 930	cur_state = !!(val & DPLL_VCO_ENABLE);
 931	WARN(cur_state != state,
 932	     "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
 933	     pll->pll_reg, state_string(state), state_string(cur_state), val);
 934
 935	/* Make sure the selected PLL is correctly attached to the transcoder */
 936	if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
 937		u32 pch_dpll;
 938
 939		pch_dpll = I915_READ(PCH_DPLL_SEL);
 940		cur_state = pll->pll_reg == _PCH_DPLL_B;
 941		if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
 942			  "PLL[%d] not attached to this transcoder %d: %08x\n",
 943			  cur_state, crtc->pipe, pch_dpll)) {
 944			cur_state = !!(val >> (4*crtc->pipe + 3));
 945			WARN(cur_state != state,
 946			     "PLL[%d] not %s on this transcoder %d: %08x\n",
 947			     pll->pll_reg == _PCH_DPLL_B,
 948			     state_string(state),
 949			     crtc->pipe,
 950			     val);
 951		}
 952	}
 953}
 954#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
 955#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 956
 957static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 958			  enum pipe pipe, bool state)
 959{
 960	int reg;
 961	u32 val;
 962	bool cur_state;
 963
 964	if (IS_HASWELL(dev_priv->dev)) {
 965		/* On Haswell, DDI is used instead of FDI_TX_CTL */
 966		reg = DDI_FUNC_CTL(pipe);
 967		val = I915_READ(reg);
 968		cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
 969	} else {
 970		reg = FDI_TX_CTL(pipe);
 971		val = I915_READ(reg);
 972		cur_state = !!(val & FDI_TX_ENABLE);
 973	}
 974	WARN(cur_state != state,
 975	     "FDI TX state assertion failure (expected %s, current %s)\n",
 976	     state_string(state), state_string(cur_state));
 977}
 978#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
 979#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
 980
 981static void assert_fdi_rx(struct drm_i915_private *dev_priv,
 982			  enum pipe pipe, bool state)
 983{
 984	int reg;
 985	u32 val;
 986	bool cur_state;
 987
 988	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
 989			DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
 990			return;
 991	} else {
 992		reg = FDI_RX_CTL(pipe);
 993		val = I915_READ(reg);
 994		cur_state = !!(val & FDI_RX_ENABLE);
 995	}
 996	WARN(cur_state != state,
 997	     "FDI RX state assertion failure (expected %s, current %s)\n",
 998	     state_string(state), state_string(cur_state));
 999}
1000#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1001#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1002
1003static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1004				      enum pipe pipe)
1005{
1006	int reg;
1007	u32 val;
1008
1009	/* ILK FDI PLL is always enabled */
1010	if (dev_priv->info->gen == 5)
1011		return;
1012
1013	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1014	if (IS_HASWELL(dev_priv->dev))
1015		return;
1016
1017	reg = FDI_TX_CTL(pipe);
1018	val = I915_READ(reg);
1019	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1020}
1021
1022static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1023				      enum pipe pipe)
1024{
1025	int reg;
1026	u32 val;
1027
1028	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1029		DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1030		return;
1031	}
1032	reg = FDI_RX_CTL(pipe);
1033	val = I915_READ(reg);
1034	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1035}
1036
1037static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1038				  enum pipe pipe)
1039{
1040	int pp_reg, lvds_reg;
1041	u32 val;
1042	enum pipe panel_pipe = PIPE_A;
1043	bool locked = true;
1044
1045	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1046		pp_reg = PCH_PP_CONTROL;
1047		lvds_reg = PCH_LVDS;
1048	} else {
1049		pp_reg = PP_CONTROL;
1050		lvds_reg = LVDS;
1051	}
1052
1053	val = I915_READ(pp_reg);
1054	if (!(val & PANEL_POWER_ON) ||
1055	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1056		locked = false;
1057
1058	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1059		panel_pipe = PIPE_B;
1060
1061	WARN(panel_pipe == pipe && locked,
1062	     "panel assertion failure, pipe %c regs locked\n",
1063	     pipe_name(pipe));
1064}
1065
1066void assert_pipe(struct drm_i915_private *dev_priv,
1067		 enum pipe pipe, bool state)
1068{
1069	int reg;
1070	u32 val;
1071	bool cur_state;
1072
1073	/* if we need the pipe A quirk it must be always on */
1074	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1075		state = true;
1076
1077	reg = PIPECONF(pipe);
1078	val = I915_READ(reg);
1079	cur_state = !!(val & PIPECONF_ENABLE);
1080	WARN(cur_state != state,
1081	     "pipe %c assertion failure (expected %s, current %s)\n",
1082	     pipe_name(pipe), state_string(state), state_string(cur_state));
1083}
 
 
1084
1085static void assert_plane(struct drm_i915_private *dev_priv,
1086			 enum plane plane, bool state)
1087{
1088	int reg;
1089	u32 val;
1090	bool cur_state;
1091
1092	reg = DSPCNTR(plane);
1093	val = I915_READ(reg);
1094	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1095	WARN(cur_state != state,
1096	     "plane %c assertion failure (expected %s, current %s)\n",
1097	     plane_name(plane), state_string(state), state_string(cur_state));
1098}
1099
1100#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1101#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1102
1103static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1104				   enum pipe pipe)
1105{
1106	int reg, i;
1107	u32 val;
1108	int cur_pipe;
1109
1110	/* Planes are fixed to pipes on ILK+ */
1111	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1112		reg = DSPCNTR(pipe);
1113		val = I915_READ(reg);
1114		WARN((val & DISPLAY_PLANE_ENABLE),
1115		     "plane %c assertion failure, should be disabled but not\n",
1116		     plane_name(pipe));
1117		return;
1118	}
1119
1120	/* Need to check both planes against the pipe */
1121	for (i = 0; i < 2; i++) {
1122		reg = DSPCNTR(i);
1123		val = I915_READ(reg);
1124		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1125			DISPPLANE_SEL_PIPE_SHIFT;
1126		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1127		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1128		     plane_name(i), pipe_name(pipe));
1129	}
1130}
1131
1132static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1133{
1134	u32 val;
1135	bool enabled;
1136
1137	if (HAS_PCH_LPT(dev_priv->dev)) {
1138		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1139		return;
1140	}
1141
1142	val = I915_READ(PCH_DREF_CONTROL);
1143	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1144			    DREF_SUPERSPREAD_SOURCE_MASK));
1145	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1146}
1147
1148static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1149				       enum pipe pipe)
1150{
1151	int reg;
1152	u32 val;
1153	bool enabled;
1154
1155	reg = TRANSCONF(pipe);
1156	val = I915_READ(reg);
1157	enabled = !!(val & TRANS_ENABLE);
1158	WARN(enabled,
1159	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1160	     pipe_name(pipe));
1161}
1162
1163static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1164			    enum pipe pipe, u32 port_sel, u32 val)
1165{
1166	if ((val & DP_PORT_EN) == 0)
1167		return false;
1168
1169	if (HAS_PCH_CPT(dev_priv->dev)) {
1170		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1171		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1172		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1173			return false;
1174	} else {
1175		if ((val & DP_PIPE_MASK) != (pipe << 30))
1176			return false;
1177	}
1178	return true;
1179}
1180
1181static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1182			      enum pipe pipe, u32 val)
1183{
1184	if ((val & PORT_ENABLE) == 0)
1185		return false;
1186
1187	if (HAS_PCH_CPT(dev_priv->dev)) {
1188		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1189			return false;
1190	} else {
1191		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1192			return false;
1193	}
1194	return true;
1195}
1196
1197static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1198			      enum pipe pipe, u32 val)
1199{
1200	if ((val & LVDS_PORT_EN) == 0)
1201		return false;
1202
1203	if (HAS_PCH_CPT(dev_priv->dev)) {
1204		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1205			return false;
1206	} else {
1207		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1208			return false;
1209	}
1210	return true;
1211}
1212
1213static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1214			      enum pipe pipe, u32 val)
1215{
1216	if ((val & ADPA_DAC_ENABLE) == 0)
1217		return false;
1218	if (HAS_PCH_CPT(dev_priv->dev)) {
1219		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1220			return false;
1221	} else {
1222		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1223			return false;
1224	}
1225	return true;
1226}
1227
1228static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1229				   enum pipe pipe, int reg, u32 port_sel)
1230{
1231	u32 val = I915_READ(reg);
1232	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1233	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1234	     reg, pipe_name(pipe));
1235}
1236
1237static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1238				     enum pipe pipe, int reg)
1239{
1240	u32 val = I915_READ(reg);
1241	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1242	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1243	     reg, pipe_name(pipe));
1244}
1245
1246static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1247				      enum pipe pipe)
1248{
1249	int reg;
1250	u32 val;
1251
1252	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1253	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1254	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1255
1256	reg = PCH_ADPA;
1257	val = I915_READ(reg);
1258	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1259	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1260	     pipe_name(pipe));
1261
1262	reg = PCH_LVDS;
1263	val = I915_READ(reg);
1264	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1265	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1266	     pipe_name(pipe));
1267
1268	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1269	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1270	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1271}
1272
1273/**
1274 * intel_enable_pll - enable a PLL
1275 * @dev_priv: i915 private structure
1276 * @pipe: pipe PLL to enable
1277 *
1278 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1279 * make sure the PLL reg is writable first though, since the panel write
1280 * protect mechanism may be enabled.
1281 *
1282 * Note!  This is for pre-ILK only.
1283 */
1284static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1285{
1286	int reg;
1287	u32 val;
1288
1289	/* No really, not for ILK+ */
1290	BUG_ON(dev_priv->info->gen >= 5);
1291
1292	/* PLL is protected by panel, make sure we can write it */
1293	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1294		assert_panel_unlocked(dev_priv, pipe);
1295
1296	reg = DPLL(pipe);
1297	val = I915_READ(reg);
1298	val |= DPLL_VCO_ENABLE;
1299
1300	/* We do this three times for luck */
1301	I915_WRITE(reg, val);
1302	POSTING_READ(reg);
1303	udelay(150); /* wait for warmup */
1304	I915_WRITE(reg, val);
1305	POSTING_READ(reg);
1306	udelay(150); /* wait for warmup */
1307	I915_WRITE(reg, val);
1308	POSTING_READ(reg);
1309	udelay(150); /* wait for warmup */
1310}
1311
1312/**
1313 * intel_disable_pll - disable a PLL
1314 * @dev_priv: i915 private structure
1315 * @pipe: pipe PLL to disable
1316 *
1317 * Disable the PLL for @pipe, making sure the pipe is off first.
1318 *
1319 * Note!  This is for pre-ILK only.
1320 */
1321static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1322{
1323	int reg;
1324	u32 val;
1325
1326	/* Don't disable pipe A or pipe A PLLs if needed */
1327	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1328		return;
1329
1330	/* Make sure the pipe isn't still relying on us */
1331	assert_pipe_disabled(dev_priv, pipe);
1332
1333	reg = DPLL(pipe);
1334	val = I915_READ(reg);
1335	val &= ~DPLL_VCO_ENABLE;
1336	I915_WRITE(reg, val);
1337	POSTING_READ(reg);
1338}
1339
1340/* SBI access */
1341static void
1342intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1343{
1344	unsigned long flags;
1345
1346	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1347	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1348				100)) {
1349		DRM_ERROR("timeout waiting for SBI to become ready\n");
1350		goto out_unlock;
1351	}
1352
1353	I915_WRITE(SBI_ADDR,
1354			(reg << 16));
1355	I915_WRITE(SBI_DATA,
1356			value);
1357	I915_WRITE(SBI_CTL_STAT,
1358			SBI_BUSY |
1359			SBI_CTL_OP_CRWR);
1360
1361	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1362				100)) {
1363		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1364		goto out_unlock;
1365	}
1366
1367out_unlock:
1368	spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1369}
1370
1371static u32
1372intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1373{
1374	unsigned long flags;
1375	u32 value;
1376
1377	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1378	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1379				100)) {
1380		DRM_ERROR("timeout waiting for SBI to become ready\n");
1381		goto out_unlock;
1382	}
1383
1384	I915_WRITE(SBI_ADDR,
1385			(reg << 16));
1386	I915_WRITE(SBI_CTL_STAT,
1387			SBI_BUSY |
1388			SBI_CTL_OP_CRRD);
1389
1390	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1391				100)) {
1392		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1393		goto out_unlock;
1394	}
1395
1396	value = I915_READ(SBI_DATA);
1397
1398out_unlock:
1399	spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1400	return value;
1401}
1402
1403/**
1404 * intel_enable_pch_pll - enable PCH PLL
1405 * @dev_priv: i915 private structure
1406 * @pipe: pipe PLL to enable
1407 *
1408 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1409 * drives the transcoder clock.
1410 */
1411static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
 
1412{
1413	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1414	struct intel_pch_pll *pll;
1415	int reg;
1416	u32 val;
1417
1418	/* PCH PLLs only available on ILK, SNB and IVB */
1419	BUG_ON(dev_priv->info->gen < 5);
1420	pll = intel_crtc->pch_pll;
1421	if (pll == NULL)
1422		return;
1423
1424	if (WARN_ON(pll->refcount == 0))
1425		return;
1426
1427	DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1428		      pll->pll_reg, pll->active, pll->on,
1429		      intel_crtc->base.base.id);
1430
1431	/* PCH refclock must be enabled first */
1432	assert_pch_refclk_enabled(dev_priv);
1433
1434	if (pll->active++ && pll->on) {
1435		assert_pch_pll_enabled(dev_priv, pll, NULL);
1436		return;
1437	}
1438
1439	DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1440
1441	reg = pll->pll_reg;
1442	val = I915_READ(reg);
1443	val |= DPLL_VCO_ENABLE;
1444	I915_WRITE(reg, val);
1445	POSTING_READ(reg);
1446	udelay(200);
1447
1448	pll->on = true;
1449}
1450
1451static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
 
1452{
1453	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1454	struct intel_pch_pll *pll = intel_crtc->pch_pll;
1455	int reg;
1456	u32 val;
1457
1458	/* PCH only available on ILK+ */
1459	BUG_ON(dev_priv->info->gen < 5);
1460	if (pll == NULL)
1461	       return;
1462
1463	if (WARN_ON(pll->refcount == 0))
1464		return;
1465
1466	DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1467		      pll->pll_reg, pll->active, pll->on,
1468		      intel_crtc->base.base.id);
1469
1470	if (WARN_ON(pll->active == 0)) {
1471		assert_pch_pll_disabled(dev_priv, pll, NULL);
1472		return;
1473	}
1474
1475	if (--pll->active) {
1476		assert_pch_pll_enabled(dev_priv, pll, NULL);
1477		return;
1478	}
1479
1480	DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1481
1482	/* Make sure transcoder isn't still depending on us */
1483	assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1484
1485	reg = pll->pll_reg;
1486	val = I915_READ(reg);
1487	val &= ~DPLL_VCO_ENABLE;
1488	I915_WRITE(reg, val);
1489	POSTING_READ(reg);
1490	udelay(200);
1491
1492	pll->on = false;
1493}
1494
1495static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1496				    enum pipe pipe)
1497{
1498	int reg;
1499	u32 val, pipeconf_val;
1500	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1501
1502	/* PCH only available on ILK+ */
1503	BUG_ON(dev_priv->info->gen < 5);
1504
1505	/* Make sure PCH DPLL is enabled */
1506	assert_pch_pll_enabled(dev_priv,
1507			       to_intel_crtc(crtc)->pch_pll,
1508			       to_intel_crtc(crtc));
1509
1510	/* FDI must be feeding us bits for PCH ports */
1511	assert_fdi_tx_enabled(dev_priv, pipe);
1512	assert_fdi_rx_enabled(dev_priv, pipe);
1513
1514	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1515		DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1516		return;
1517	}
1518	reg = TRANSCONF(pipe);
1519	val = I915_READ(reg);
1520	pipeconf_val = I915_READ(PIPECONF(pipe));
1521
1522	if (HAS_PCH_IBX(dev_priv->dev)) {
1523		/*
1524		 * make the BPC in transcoder be consistent with
1525		 * that in pipeconf reg.
1526		 */
1527		val &= ~PIPE_BPC_MASK;
1528		val |= pipeconf_val & PIPE_BPC_MASK;
1529	}
1530
1531	val &= ~TRANS_INTERLACE_MASK;
1532	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1533		if (HAS_PCH_IBX(dev_priv->dev) &&
1534		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1535			val |= TRANS_LEGACY_INTERLACED_ILK;
1536		else
1537			val |= TRANS_INTERLACED;
1538	else
1539		val |= TRANS_PROGRESSIVE;
1540
1541	I915_WRITE(reg, val | TRANS_ENABLE);
1542	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1543		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1544}
1545
1546static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1547				     enum pipe pipe)
1548{
1549	int reg;
1550	u32 val;
1551
1552	/* FDI relies on the transcoder */
1553	assert_fdi_tx_disabled(dev_priv, pipe);
1554	assert_fdi_rx_disabled(dev_priv, pipe);
1555
1556	/* Ports must be off as well */
1557	assert_pch_ports_disabled(dev_priv, pipe);
1558
1559	reg = TRANSCONF(pipe);
1560	val = I915_READ(reg);
1561	val &= ~TRANS_ENABLE;
1562	I915_WRITE(reg, val);
1563	/* wait for PCH transcoder off, transcoder state */
1564	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1565		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1566}
1567
1568/**
1569 * intel_enable_pipe - enable a pipe, asserting requirements
1570 * @dev_priv: i915 private structure
1571 * @pipe: pipe to enable
1572 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1573 *
1574 * Enable @pipe, making sure that various hardware specific requirements
1575 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1576 *
1577 * @pipe should be %PIPE_A or %PIPE_B.
1578 *
1579 * Will wait until the pipe is actually running (i.e. first vblank) before
1580 * returning.
1581 */
1582static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1583			      bool pch_port)
1584{
1585	int reg;
1586	u32 val;
1587
1588	/*
1589	 * A pipe without a PLL won't actually be able to drive bits from
1590	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1591	 * need the check.
1592	 */
1593	if (!HAS_PCH_SPLIT(dev_priv->dev))
1594		assert_pll_enabled(dev_priv, pipe);
1595	else {
1596		if (pch_port) {
1597			/* if driving the PCH, we need FDI enabled */
1598			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1599			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1600		}
1601		/* FIXME: assert CPU port conditions for SNB+ */
1602	}
1603
1604	reg = PIPECONF(pipe);
1605	val = I915_READ(reg);
1606	if (val & PIPECONF_ENABLE)
1607		return;
1608
1609	I915_WRITE(reg, val | PIPECONF_ENABLE);
1610	intel_wait_for_vblank(dev_priv->dev, pipe);
1611}
1612
1613/**
1614 * intel_disable_pipe - disable a pipe, asserting requirements
1615 * @dev_priv: i915 private structure
1616 * @pipe: pipe to disable
1617 *
1618 * Disable @pipe, making sure that various hardware specific requirements
1619 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1620 *
1621 * @pipe should be %PIPE_A or %PIPE_B.
1622 *
1623 * Will wait until the pipe has shut down before returning.
1624 */
1625static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1626			       enum pipe pipe)
1627{
1628	int reg;
1629	u32 val;
1630
1631	/*
1632	 * Make sure planes won't keep trying to pump pixels to us,
1633	 * or we might hang the display.
1634	 */
1635	assert_planes_disabled(dev_priv, pipe);
1636
1637	/* Don't disable pipe A or pipe A PLLs if needed */
1638	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1639		return;
1640
1641	reg = PIPECONF(pipe);
1642	val = I915_READ(reg);
1643	if ((val & PIPECONF_ENABLE) == 0)
1644		return;
1645
1646	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1647	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1648}
1649
1650/*
1651 * Plane regs are double buffered, going from enabled->disabled needs a
1652 * trigger in order to latch.  The display address reg provides this.
1653 */
1654void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1655				      enum plane plane)
1656{
1657	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1658	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1659}
1660
1661/**
1662 * intel_enable_plane - enable a display plane on a given pipe
1663 * @dev_priv: i915 private structure
1664 * @plane: plane to enable
1665 * @pipe: pipe being fed
1666 *
1667 * Enable @plane on @pipe, making sure that @pipe is running first.
1668 */
1669static void intel_enable_plane(struct drm_i915_private *dev_priv,
1670			       enum plane plane, enum pipe pipe)
1671{
1672	int reg;
1673	u32 val;
1674
1675	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1676	assert_pipe_enabled(dev_priv, pipe);
1677
1678	reg = DSPCNTR(plane);
1679	val = I915_READ(reg);
1680	if (val & DISPLAY_PLANE_ENABLE)
1681		return;
1682
1683	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1684	intel_flush_display_plane(dev_priv, plane);
1685	intel_wait_for_vblank(dev_priv->dev, pipe);
1686}
1687
1688/**
1689 * intel_disable_plane - disable a display plane
1690 * @dev_priv: i915 private structure
1691 * @plane: plane to disable
1692 * @pipe: pipe consuming the data
1693 *
1694 * Disable @plane; should be an independent operation.
1695 */
1696static void intel_disable_plane(struct drm_i915_private *dev_priv,
1697				enum plane plane, enum pipe pipe)
1698{
1699	int reg;
1700	u32 val;
1701
1702	reg = DSPCNTR(plane);
1703	val = I915_READ(reg);
1704	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1705		return;
1706
1707	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1708	intel_flush_display_plane(dev_priv, plane);
1709	intel_wait_for_vblank(dev_priv->dev, pipe);
1710}
1711
1712static void disable_pch_dp(struct drm_i915_private *dev_priv,
1713			   enum pipe pipe, int reg, u32 port_sel)
1714{
1715	u32 val = I915_READ(reg);
1716	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1717		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1718		I915_WRITE(reg, val & ~DP_PORT_EN);
1719	}
1720}
1721
1722static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1723			     enum pipe pipe, int reg)
1724{
1725	u32 val = I915_READ(reg);
1726	if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1727		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1728			      reg, pipe);
1729		I915_WRITE(reg, val & ~PORT_ENABLE);
1730	}
1731}
1732
1733/* Disable any ports connected to this transcoder */
1734static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1735				    enum pipe pipe)
1736{
1737	u32 reg, val;
1738
1739	val = I915_READ(PCH_PP_CONTROL);
1740	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1741
1742	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1743	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1744	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1745
1746	reg = PCH_ADPA;
1747	val = I915_READ(reg);
1748	if (adpa_pipe_enabled(dev_priv, pipe, val))
1749		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1750
1751	reg = PCH_LVDS;
1752	val = I915_READ(reg);
1753	if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1754		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1755		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1756		POSTING_READ(reg);
1757		udelay(100);
1758	}
1759
1760	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1761	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1762	disable_pch_hdmi(dev_priv, pipe, HDMID);
1763}
1764
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1765int
1766intel_pin_and_fence_fb_obj(struct drm_device *dev,
1767			   struct drm_i915_gem_object *obj,
1768			   struct intel_ring_buffer *pipelined)
1769{
1770	struct drm_i915_private *dev_priv = dev->dev_private;
1771	u32 alignment;
1772	int ret;
1773
1774	switch (obj->tiling_mode) {
1775	case I915_TILING_NONE:
1776		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1777			alignment = 128 * 1024;
1778		else if (INTEL_INFO(dev)->gen >= 4)
1779			alignment = 4 * 1024;
1780		else
1781			alignment = 64 * 1024;
1782		break;
1783	case I915_TILING_X:
1784		/* pin() will align the object as required by fence */
1785		alignment = 0;
1786		break;
1787	case I915_TILING_Y:
1788		/* FIXME: Is this true? */
1789		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1790		return -EINVAL;
1791	default:
1792		BUG();
1793	}
1794
1795	dev_priv->mm.interruptible = false;
1796	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1797	if (ret)
1798		goto err_interruptible;
1799
1800	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1801	 * fence, whereas 965+ only requires a fence if using
1802	 * framebuffer compression.  For simplicity, we always install
1803	 * a fence as the cost is not that onerous.
1804	 */
1805	ret = i915_gem_object_get_fence(obj);
1806	if (ret)
1807		goto err_unpin;
1808
1809	i915_gem_object_pin_fence(obj);
1810
1811	dev_priv->mm.interruptible = true;
1812	return 0;
1813
1814err_unpin:
1815	i915_gem_object_unpin(obj);
1816err_interruptible:
1817	dev_priv->mm.interruptible = true;
1818	return ret;
1819}
1820
1821void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1822{
1823	i915_gem_object_unpin_fence(obj);
1824	i915_gem_object_unpin(obj);
1825}
1826
1827static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1828			     int x, int y)
1829{
1830	struct drm_device *dev = crtc->dev;
1831	struct drm_i915_private *dev_priv = dev->dev_private;
1832	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1833	struct intel_framebuffer *intel_fb;
1834	struct drm_i915_gem_object *obj;
1835	int plane = intel_crtc->plane;
1836	unsigned long Start, Offset;
1837	u32 dspcntr;
1838	u32 reg;
1839
1840	switch (plane) {
1841	case 0:
1842	case 1:
1843		break;
1844	default:
1845		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1846		return -EINVAL;
1847	}
1848
1849	intel_fb = to_intel_framebuffer(fb);
1850	obj = intel_fb->obj;
1851
1852	reg = DSPCNTR(plane);
1853	dspcntr = I915_READ(reg);
1854	/* Mask out pixel format bits in case we change it */
1855	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1856	switch (fb->bits_per_pixel) {
1857	case 8:
1858		dspcntr |= DISPPLANE_8BPP;
1859		break;
1860	case 16:
1861		if (fb->depth == 15)
1862			dspcntr |= DISPPLANE_15_16BPP;
1863		else
1864			dspcntr |= DISPPLANE_16BPP;
1865		break;
1866	case 24:
1867	case 32:
1868		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1869		break;
1870	default:
1871		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1872		return -EINVAL;
1873	}
1874	if (INTEL_INFO(dev)->gen >= 4) {
1875		if (obj->tiling_mode != I915_TILING_NONE)
1876			dspcntr |= DISPPLANE_TILED;
1877		else
1878			dspcntr &= ~DISPPLANE_TILED;
1879	}
1880
1881	I915_WRITE(reg, dspcntr);
1882
1883	Start = obj->gtt_offset;
1884	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1885
1886	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1887		      Start, Offset, x, y, fb->pitches[0]);
1888	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1889	if (INTEL_INFO(dev)->gen >= 4) {
1890		I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
1891		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1892		I915_WRITE(DSPADDR(plane), Offset);
1893	} else
1894		I915_WRITE(DSPADDR(plane), Start + Offset);
1895	POSTING_READ(reg);
1896
1897	return 0;
1898}
1899
1900static int ironlake_update_plane(struct drm_crtc *crtc,
1901				 struct drm_framebuffer *fb, int x, int y)
1902{
1903	struct drm_device *dev = crtc->dev;
1904	struct drm_i915_private *dev_priv = dev->dev_private;
1905	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1906	struct intel_framebuffer *intel_fb;
1907	struct drm_i915_gem_object *obj;
1908	int plane = intel_crtc->plane;
1909	unsigned long Start, Offset;
1910	u32 dspcntr;
1911	u32 reg;
1912
1913	switch (plane) {
1914	case 0:
1915	case 1:
1916	case 2:
1917		break;
1918	default:
1919		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1920		return -EINVAL;
1921	}
1922
1923	intel_fb = to_intel_framebuffer(fb);
1924	obj = intel_fb->obj;
1925
1926	reg = DSPCNTR(plane);
1927	dspcntr = I915_READ(reg);
1928	/* Mask out pixel format bits in case we change it */
1929	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1930	switch (fb->bits_per_pixel) {
1931	case 8:
1932		dspcntr |= DISPPLANE_8BPP;
1933		break;
1934	case 16:
1935		if (fb->depth != 16)
1936			return -EINVAL;
1937
1938		dspcntr |= DISPPLANE_16BPP;
1939		break;
1940	case 24:
1941	case 32:
1942		if (fb->depth == 24)
1943			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1944		else if (fb->depth == 30)
1945			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1946		else
1947			return -EINVAL;
1948		break;
1949	default:
1950		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1951		return -EINVAL;
1952	}
1953
1954	if (obj->tiling_mode != I915_TILING_NONE)
1955		dspcntr |= DISPPLANE_TILED;
1956	else
1957		dspcntr &= ~DISPPLANE_TILED;
1958
1959	/* must disable */
1960	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1961
1962	I915_WRITE(reg, dspcntr);
1963
1964	Start = obj->gtt_offset;
1965	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1966
1967	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1968		      Start, Offset, x, y, fb->pitches[0]);
1969	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1970	I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
1971	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1972	I915_WRITE(DSPADDR(plane), Offset);
1973	POSTING_READ(reg);
1974
1975	return 0;
1976}
1977
1978/* Assume fb object is pinned & idle & fenced and just update base pointers */
1979static int
1980intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1981			   int x, int y, enum mode_set_atomic state)
1982{
1983	struct drm_device *dev = crtc->dev;
1984	struct drm_i915_private *dev_priv = dev->dev_private;
1985
1986	if (dev_priv->display.disable_fbc)
1987		dev_priv->display.disable_fbc(dev);
1988	intel_increase_pllclock(crtc);
1989
1990	return dev_priv->display.update_plane(crtc, fb, x, y);
1991}
1992
1993static int
1994intel_finish_fb(struct drm_framebuffer *old_fb)
1995{
1996	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1997	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1998	bool was_interruptible = dev_priv->mm.interruptible;
1999	int ret;
2000
2001	wait_event(dev_priv->pending_flip_queue,
2002		   atomic_read(&dev_priv->mm.wedged) ||
2003		   atomic_read(&obj->pending_flip) == 0);
2004
2005	/* Big Hammer, we also need to ensure that any pending
2006	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2007	 * current scanout is retired before unpinning the old
2008	 * framebuffer.
2009	 *
2010	 * This should only fail upon a hung GPU, in which case we
2011	 * can safely continue.
2012	 */
2013	dev_priv->mm.interruptible = false;
2014	ret = i915_gem_object_finish_gpu(obj);
2015	dev_priv->mm.interruptible = was_interruptible;
2016
2017	return ret;
2018}
2019
2020static int
2021intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2022		    struct drm_framebuffer *old_fb)
2023{
2024	struct drm_device *dev = crtc->dev;
2025	struct drm_i915_private *dev_priv = dev->dev_private;
2026	struct drm_i915_master_private *master_priv;
2027	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2028	int ret;
2029
2030	/* no fb bound */
2031	if (!crtc->fb) {
2032		DRM_ERROR("No FB bound\n");
2033		return 0;
2034	}
2035
2036	if(intel_crtc->plane > dev_priv->num_pipe) {
2037		DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2038				intel_crtc->plane,
2039				dev_priv->num_pipe);
 
 
2040		return -EINVAL;
2041	}
2042
2043	mutex_lock(&dev->struct_mutex);
2044	ret = intel_pin_and_fence_fb_obj(dev,
2045					 to_intel_framebuffer(crtc->fb)->obj,
2046					 NULL);
2047	if (ret != 0) {
2048		mutex_unlock(&dev->struct_mutex);
2049		DRM_ERROR("pin & fence failed\n");
2050		return ret;
2051	}
2052
2053	if (old_fb)
2054		intel_finish_fb(old_fb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2055
2056	ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
 
2057	if (ret) {
2058		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2059		mutex_unlock(&dev->struct_mutex);
2060		DRM_ERROR("failed to update base address\n");
2061		return ret;
2062	}
2063
2064	if (old_fb) {
2065		intel_wait_for_vblank(dev, intel_crtc->pipe);
2066		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2067	}
2068
2069	intel_update_fbc(dev);
2070	mutex_unlock(&dev->struct_mutex);
2071
2072	if (!dev->primary->master)
2073		return 0;
2074
2075	master_priv = dev->primary->master->driver_priv;
2076	if (!master_priv->sarea_priv)
2077		return 0;
2078
2079	if (intel_crtc->pipe) {
2080		master_priv->sarea_priv->pipeB_x = x;
2081		master_priv->sarea_priv->pipeB_y = y;
2082	} else {
2083		master_priv->sarea_priv->pipeA_x = x;
2084		master_priv->sarea_priv->pipeA_y = y;
2085	}
2086
2087	return 0;
2088}
2089
2090static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2091{
2092	struct drm_device *dev = crtc->dev;
2093	struct drm_i915_private *dev_priv = dev->dev_private;
2094	u32 dpa_ctl;
2095
2096	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2097	dpa_ctl = I915_READ(DP_A);
2098	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2099
2100	if (clock < 200000) {
2101		u32 temp;
2102		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2103		/* workaround for 160Mhz:
2104		   1) program 0x4600c bits 15:0 = 0x8124
2105		   2) program 0x46010 bit 0 = 1
2106		   3) program 0x46034 bit 24 = 1
2107		   4) program 0x64000 bit 14 = 1
2108		   */
2109		temp = I915_READ(0x4600c);
2110		temp &= 0xffff0000;
2111		I915_WRITE(0x4600c, temp | 0x8124);
2112
2113		temp = I915_READ(0x46010);
2114		I915_WRITE(0x46010, temp | 1);
2115
2116		temp = I915_READ(0x46034);
2117		I915_WRITE(0x46034, temp | (1 << 24));
2118	} else {
2119		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2120	}
2121	I915_WRITE(DP_A, dpa_ctl);
2122
2123	POSTING_READ(DP_A);
2124	udelay(500);
2125}
2126
2127static void intel_fdi_normal_train(struct drm_crtc *crtc)
2128{
2129	struct drm_device *dev = crtc->dev;
2130	struct drm_i915_private *dev_priv = dev->dev_private;
2131	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2132	int pipe = intel_crtc->pipe;
2133	u32 reg, temp;
2134
2135	/* enable normal train */
2136	reg = FDI_TX_CTL(pipe);
2137	temp = I915_READ(reg);
2138	if (IS_IVYBRIDGE(dev)) {
2139		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2140		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2141	} else {
2142		temp &= ~FDI_LINK_TRAIN_NONE;
2143		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2144	}
2145	I915_WRITE(reg, temp);
2146
2147	reg = FDI_RX_CTL(pipe);
2148	temp = I915_READ(reg);
2149	if (HAS_PCH_CPT(dev)) {
2150		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2151		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2152	} else {
2153		temp &= ~FDI_LINK_TRAIN_NONE;
2154		temp |= FDI_LINK_TRAIN_NONE;
2155	}
2156	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2157
2158	/* wait one idle pattern time */
2159	POSTING_READ(reg);
2160	udelay(1000);
2161
2162	/* IVB wants error correction enabled */
2163	if (IS_IVYBRIDGE(dev))
2164		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2165			   FDI_FE_ERRC_ENABLE);
2166}
2167
2168static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2169{
2170	struct drm_i915_private *dev_priv = dev->dev_private;
2171	u32 flags = I915_READ(SOUTH_CHICKEN1);
2172
2173	flags |= FDI_PHASE_SYNC_OVR(pipe);
2174	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2175	flags |= FDI_PHASE_SYNC_EN(pipe);
2176	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2177	POSTING_READ(SOUTH_CHICKEN1);
2178}
2179
2180/* The FDI link training functions for ILK/Ibexpeak. */
2181static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2182{
2183	struct drm_device *dev = crtc->dev;
2184	struct drm_i915_private *dev_priv = dev->dev_private;
2185	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2186	int pipe = intel_crtc->pipe;
2187	int plane = intel_crtc->plane;
2188	u32 reg, temp, tries;
2189
2190	/* FDI needs bits from pipe & plane first */
2191	assert_pipe_enabled(dev_priv, pipe);
2192	assert_plane_enabled(dev_priv, plane);
2193
2194	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2195	   for train result */
2196	reg = FDI_RX_IMR(pipe);
2197	temp = I915_READ(reg);
2198	temp &= ~FDI_RX_SYMBOL_LOCK;
2199	temp &= ~FDI_RX_BIT_LOCK;
2200	I915_WRITE(reg, temp);
2201	I915_READ(reg);
2202	udelay(150);
2203
2204	/* enable CPU FDI TX and PCH FDI RX */
2205	reg = FDI_TX_CTL(pipe);
2206	temp = I915_READ(reg);
2207	temp &= ~(7 << 19);
2208	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2209	temp &= ~FDI_LINK_TRAIN_NONE;
2210	temp |= FDI_LINK_TRAIN_PATTERN_1;
2211	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2212
2213	reg = FDI_RX_CTL(pipe);
2214	temp = I915_READ(reg);
2215	temp &= ~FDI_LINK_TRAIN_NONE;
2216	temp |= FDI_LINK_TRAIN_PATTERN_1;
2217	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2218
2219	POSTING_READ(reg);
2220	udelay(150);
2221
2222	/* Ironlake workaround, enable clock pointer after FDI enable*/
2223	if (HAS_PCH_IBX(dev)) {
2224		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2225		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2226			   FDI_RX_PHASE_SYNC_POINTER_EN);
2227	}
2228
2229	reg = FDI_RX_IIR(pipe);
2230	for (tries = 0; tries < 5; tries++) {
2231		temp = I915_READ(reg);
2232		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2233
2234		if ((temp & FDI_RX_BIT_LOCK)) {
2235			DRM_DEBUG_KMS("FDI train 1 done.\n");
2236			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2237			break;
2238		}
2239	}
2240	if (tries == 5)
2241		DRM_ERROR("FDI train 1 fail!\n");
2242
2243	/* Train 2 */
2244	reg = FDI_TX_CTL(pipe);
2245	temp = I915_READ(reg);
2246	temp &= ~FDI_LINK_TRAIN_NONE;
2247	temp |= FDI_LINK_TRAIN_PATTERN_2;
2248	I915_WRITE(reg, temp);
2249
2250	reg = FDI_RX_CTL(pipe);
2251	temp = I915_READ(reg);
2252	temp &= ~FDI_LINK_TRAIN_NONE;
2253	temp |= FDI_LINK_TRAIN_PATTERN_2;
2254	I915_WRITE(reg, temp);
2255
2256	POSTING_READ(reg);
2257	udelay(150);
2258
2259	reg = FDI_RX_IIR(pipe);
2260	for (tries = 0; tries < 5; tries++) {
2261		temp = I915_READ(reg);
2262		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2263
2264		if (temp & FDI_RX_SYMBOL_LOCK) {
2265			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2266			DRM_DEBUG_KMS("FDI train 2 done.\n");
2267			break;
2268		}
2269	}
2270	if (tries == 5)
2271		DRM_ERROR("FDI train 2 fail!\n");
2272
2273	DRM_DEBUG_KMS("FDI train done\n");
2274
2275}
2276
2277static const int snb_b_fdi_train_param[] = {
2278	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2279	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2280	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2281	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2282};
2283
2284/* The FDI link training functions for SNB/Cougarpoint. */
2285static void gen6_fdi_link_train(struct drm_crtc *crtc)
2286{
2287	struct drm_device *dev = crtc->dev;
2288	struct drm_i915_private *dev_priv = dev->dev_private;
2289	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2290	int pipe = intel_crtc->pipe;
2291	u32 reg, temp, i, retry;
2292
2293	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2294	   for train result */
2295	reg = FDI_RX_IMR(pipe);
2296	temp = I915_READ(reg);
2297	temp &= ~FDI_RX_SYMBOL_LOCK;
2298	temp &= ~FDI_RX_BIT_LOCK;
2299	I915_WRITE(reg, temp);
2300
2301	POSTING_READ(reg);
2302	udelay(150);
2303
2304	/* enable CPU FDI TX and PCH FDI RX */
2305	reg = FDI_TX_CTL(pipe);
2306	temp = I915_READ(reg);
2307	temp &= ~(7 << 19);
2308	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2309	temp &= ~FDI_LINK_TRAIN_NONE;
2310	temp |= FDI_LINK_TRAIN_PATTERN_1;
2311	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2312	/* SNB-B */
2313	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2314	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2315
2316	reg = FDI_RX_CTL(pipe);
2317	temp = I915_READ(reg);
2318	if (HAS_PCH_CPT(dev)) {
2319		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2320		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2321	} else {
2322		temp &= ~FDI_LINK_TRAIN_NONE;
2323		temp |= FDI_LINK_TRAIN_PATTERN_1;
2324	}
2325	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2326
2327	POSTING_READ(reg);
2328	udelay(150);
2329
2330	if (HAS_PCH_CPT(dev))
2331		cpt_phase_pointer_enable(dev, pipe);
2332
2333	for (i = 0; i < 4; i++) {
2334		reg = FDI_TX_CTL(pipe);
2335		temp = I915_READ(reg);
2336		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2337		temp |= snb_b_fdi_train_param[i];
2338		I915_WRITE(reg, temp);
2339
2340		POSTING_READ(reg);
2341		udelay(500);
2342
2343		for (retry = 0; retry < 5; retry++) {
2344			reg = FDI_RX_IIR(pipe);
2345			temp = I915_READ(reg);
2346			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2347			if (temp & FDI_RX_BIT_LOCK) {
2348				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2349				DRM_DEBUG_KMS("FDI train 1 done.\n");
2350				break;
2351			}
2352			udelay(50);
2353		}
2354		if (retry < 5)
2355			break;
2356	}
2357	if (i == 4)
2358		DRM_ERROR("FDI train 1 fail!\n");
2359
2360	/* Train 2 */
2361	reg = FDI_TX_CTL(pipe);
2362	temp = I915_READ(reg);
2363	temp &= ~FDI_LINK_TRAIN_NONE;
2364	temp |= FDI_LINK_TRAIN_PATTERN_2;
2365	if (IS_GEN6(dev)) {
2366		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2367		/* SNB-B */
2368		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2369	}
2370	I915_WRITE(reg, temp);
2371
2372	reg = FDI_RX_CTL(pipe);
2373	temp = I915_READ(reg);
2374	if (HAS_PCH_CPT(dev)) {
2375		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2376		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2377	} else {
2378		temp &= ~FDI_LINK_TRAIN_NONE;
2379		temp |= FDI_LINK_TRAIN_PATTERN_2;
2380	}
2381	I915_WRITE(reg, temp);
2382
2383	POSTING_READ(reg);
2384	udelay(150);
2385
2386	for (i = 0; i < 4; i++) {
2387		reg = FDI_TX_CTL(pipe);
2388		temp = I915_READ(reg);
2389		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2390		temp |= snb_b_fdi_train_param[i];
2391		I915_WRITE(reg, temp);
2392
2393		POSTING_READ(reg);
2394		udelay(500);
2395
2396		for (retry = 0; retry < 5; retry++) {
2397			reg = FDI_RX_IIR(pipe);
2398			temp = I915_READ(reg);
2399			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2400			if (temp & FDI_RX_SYMBOL_LOCK) {
2401				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2402				DRM_DEBUG_KMS("FDI train 2 done.\n");
2403				break;
2404			}
2405			udelay(50);
2406		}
2407		if (retry < 5)
2408			break;
2409	}
2410	if (i == 4)
2411		DRM_ERROR("FDI train 2 fail!\n");
2412
2413	DRM_DEBUG_KMS("FDI train done.\n");
2414}
2415
2416/* Manual link training for Ivy Bridge A0 parts */
2417static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2418{
2419	struct drm_device *dev = crtc->dev;
2420	struct drm_i915_private *dev_priv = dev->dev_private;
2421	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2422	int pipe = intel_crtc->pipe;
2423	u32 reg, temp, i;
2424
2425	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2426	   for train result */
2427	reg = FDI_RX_IMR(pipe);
2428	temp = I915_READ(reg);
2429	temp &= ~FDI_RX_SYMBOL_LOCK;
2430	temp &= ~FDI_RX_BIT_LOCK;
2431	I915_WRITE(reg, temp);
2432
2433	POSTING_READ(reg);
2434	udelay(150);
2435
2436	/* enable CPU FDI TX and PCH FDI RX */
2437	reg = FDI_TX_CTL(pipe);
2438	temp = I915_READ(reg);
2439	temp &= ~(7 << 19);
2440	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2441	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2442	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2443	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2444	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2445	temp |= FDI_COMPOSITE_SYNC;
2446	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2447
2448	reg = FDI_RX_CTL(pipe);
2449	temp = I915_READ(reg);
2450	temp &= ~FDI_LINK_TRAIN_AUTO;
2451	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2452	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2453	temp |= FDI_COMPOSITE_SYNC;
2454	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2455
2456	POSTING_READ(reg);
2457	udelay(150);
2458
2459	if (HAS_PCH_CPT(dev))
2460		cpt_phase_pointer_enable(dev, pipe);
2461
2462	for (i = 0; i < 4; i++) {
2463		reg = FDI_TX_CTL(pipe);
2464		temp = I915_READ(reg);
2465		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2466		temp |= snb_b_fdi_train_param[i];
2467		I915_WRITE(reg, temp);
2468
2469		POSTING_READ(reg);
2470		udelay(500);
2471
2472		reg = FDI_RX_IIR(pipe);
2473		temp = I915_READ(reg);
2474		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2475
2476		if (temp & FDI_RX_BIT_LOCK ||
2477		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2478			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2479			DRM_DEBUG_KMS("FDI train 1 done.\n");
2480			break;
2481		}
2482	}
2483	if (i == 4)
2484		DRM_ERROR("FDI train 1 fail!\n");
2485
2486	/* Train 2 */
2487	reg = FDI_TX_CTL(pipe);
2488	temp = I915_READ(reg);
2489	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2490	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2491	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2492	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2493	I915_WRITE(reg, temp);
2494
2495	reg = FDI_RX_CTL(pipe);
2496	temp = I915_READ(reg);
2497	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2498	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2499	I915_WRITE(reg, temp);
2500
2501	POSTING_READ(reg);
2502	udelay(150);
2503
2504	for (i = 0; i < 4; i++) {
2505		reg = FDI_TX_CTL(pipe);
2506		temp = I915_READ(reg);
2507		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2508		temp |= snb_b_fdi_train_param[i];
2509		I915_WRITE(reg, temp);
2510
2511		POSTING_READ(reg);
2512		udelay(500);
2513
2514		reg = FDI_RX_IIR(pipe);
2515		temp = I915_READ(reg);
2516		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2517
2518		if (temp & FDI_RX_SYMBOL_LOCK) {
2519			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2520			DRM_DEBUG_KMS("FDI train 2 done.\n");
2521			break;
2522		}
2523	}
2524	if (i == 4)
2525		DRM_ERROR("FDI train 2 fail!\n");
2526
2527	DRM_DEBUG_KMS("FDI train done.\n");
2528}
2529
2530static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2531{
2532	struct drm_device *dev = crtc->dev;
2533	struct drm_i915_private *dev_priv = dev->dev_private;
2534	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2535	int pipe = intel_crtc->pipe;
2536	u32 reg, temp;
2537
2538	/* Write the TU size bits so error detection works */
2539	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2540		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2541
2542	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2543	reg = FDI_RX_CTL(pipe);
2544	temp = I915_READ(reg);
2545	temp &= ~((0x7 << 19) | (0x7 << 16));
2546	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2547	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2548	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2549
2550	POSTING_READ(reg);
2551	udelay(200);
2552
2553	/* Switch from Rawclk to PCDclk */
2554	temp = I915_READ(reg);
2555	I915_WRITE(reg, temp | FDI_PCDCLK);
2556
2557	POSTING_READ(reg);
2558	udelay(200);
2559
2560	/* On Haswell, the PLL configuration for ports and pipes is handled
2561	 * separately, as part of DDI setup */
2562	if (!IS_HASWELL(dev)) {
2563		/* Enable CPU FDI TX PLL, always on for Ironlake */
2564		reg = FDI_TX_CTL(pipe);
2565		temp = I915_READ(reg);
2566		if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2567			I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2568
2569			POSTING_READ(reg);
2570			udelay(100);
2571		}
2572	}
2573}
2574
2575static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2576{
2577	struct drm_i915_private *dev_priv = dev->dev_private;
2578	u32 flags = I915_READ(SOUTH_CHICKEN1);
2579
2580	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2581	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2582	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2583	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2584	POSTING_READ(SOUTH_CHICKEN1);
2585}
2586static void ironlake_fdi_disable(struct drm_crtc *crtc)
2587{
2588	struct drm_device *dev = crtc->dev;
2589	struct drm_i915_private *dev_priv = dev->dev_private;
2590	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2591	int pipe = intel_crtc->pipe;
2592	u32 reg, temp;
2593
2594	/* disable CPU FDI tx and PCH FDI rx */
2595	reg = FDI_TX_CTL(pipe);
2596	temp = I915_READ(reg);
2597	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2598	POSTING_READ(reg);
2599
2600	reg = FDI_RX_CTL(pipe);
2601	temp = I915_READ(reg);
2602	temp &= ~(0x7 << 16);
2603	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2604	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2605
2606	POSTING_READ(reg);
2607	udelay(100);
2608
2609	/* Ironlake workaround, disable clock pointer after downing FDI */
2610	if (HAS_PCH_IBX(dev)) {
2611		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2612		I915_WRITE(FDI_RX_CHICKEN(pipe),
2613			   I915_READ(FDI_RX_CHICKEN(pipe) &
2614				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2615	} else if (HAS_PCH_CPT(dev)) {
2616		cpt_phase_pointer_disable(dev, pipe);
2617	}
2618
2619	/* still set train pattern 1 */
2620	reg = FDI_TX_CTL(pipe);
2621	temp = I915_READ(reg);
2622	temp &= ~FDI_LINK_TRAIN_NONE;
2623	temp |= FDI_LINK_TRAIN_PATTERN_1;
2624	I915_WRITE(reg, temp);
2625
2626	reg = FDI_RX_CTL(pipe);
2627	temp = I915_READ(reg);
2628	if (HAS_PCH_CPT(dev)) {
2629		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2630		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2631	} else {
2632		temp &= ~FDI_LINK_TRAIN_NONE;
2633		temp |= FDI_LINK_TRAIN_PATTERN_1;
2634	}
2635	/* BPC in FDI rx is consistent with that in PIPECONF */
2636	temp &= ~(0x07 << 16);
2637	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2638	I915_WRITE(reg, temp);
2639
2640	POSTING_READ(reg);
2641	udelay(100);
2642}
2643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2644static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2645{
2646	struct drm_device *dev = crtc->dev;
 
2647
2648	if (crtc->fb == NULL)
2649		return;
2650
2651	mutex_lock(&dev->struct_mutex);
2652	intel_finish_fb(crtc->fb);
2653	mutex_unlock(&dev->struct_mutex);
 
2654}
2655
2656static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2657{
2658	struct drm_device *dev = crtc->dev;
2659	struct drm_mode_config *mode_config = &dev->mode_config;
2660	struct intel_encoder *encoder;
2661
2662	/*
2663	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2664	 * must be driven by its own crtc; no sharing is possible.
2665	 */
2666	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2667		if (encoder->base.crtc != crtc)
2668			continue;
2669
2670		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2671		 * CPU handles all others */
2672		if (IS_HASWELL(dev)) {
2673			/* It is still unclear how this will work on PPT, so throw up a warning */
2674			WARN_ON(!HAS_PCH_LPT(dev));
2675
2676			if (encoder->type == DRM_MODE_ENCODER_DAC) {
2677				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2678				return true;
2679			} else {
2680				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2681						encoder->type);
2682				return false;
2683			}
2684		}
2685
2686		switch (encoder->type) {
2687		case INTEL_OUTPUT_EDP:
2688			if (!intel_encoder_is_pch_edp(&encoder->base))
2689				return false;
2690			continue;
2691		}
2692	}
2693
2694	return true;
2695}
2696
2697/* Program iCLKIP clock to the desired frequency */
2698static void lpt_program_iclkip(struct drm_crtc *crtc)
2699{
2700	struct drm_device *dev = crtc->dev;
2701	struct drm_i915_private *dev_priv = dev->dev_private;
2702	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2703	u32 temp;
2704
2705	/* It is necessary to ungate the pixclk gate prior to programming
2706	 * the divisors, and gate it back when it is done.
2707	 */
2708	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2709
2710	/* Disable SSCCTL */
2711	intel_sbi_write(dev_priv, SBI_SSCCTL6,
2712				intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2713					SBI_SSCCTL_DISABLE);
2714
2715	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
2716	if (crtc->mode.clock == 20000) {
2717		auxdiv = 1;
2718		divsel = 0x41;
2719		phaseinc = 0x20;
2720	} else {
2721		/* The iCLK virtual clock root frequency is in MHz,
2722		 * but the crtc->mode.clock in in KHz. To get the divisors,
2723		 * it is necessary to divide one by another, so we
2724		 * convert the virtual clock precision to KHz here for higher
2725		 * precision.
2726		 */
2727		u32 iclk_virtual_root_freq = 172800 * 1000;
2728		u32 iclk_pi_range = 64;
2729		u32 desired_divisor, msb_divisor_value, pi_value;
2730
2731		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2732		msb_divisor_value = desired_divisor / iclk_pi_range;
2733		pi_value = desired_divisor % iclk_pi_range;
2734
2735		auxdiv = 0;
2736		divsel = msb_divisor_value - 2;
2737		phaseinc = pi_value;
2738	}
2739
2740	/* This should not happen with any sane values */
2741	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2742		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2743	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2744		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2745
2746	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2747			crtc->mode.clock,
2748			auxdiv,
2749			divsel,
2750			phasedir,
2751			phaseinc);
2752
2753	/* Program SSCDIVINTPHASE6 */
2754	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2755	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2756	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2757	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2758	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2759	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2760	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2761
2762	intel_sbi_write(dev_priv,
2763			SBI_SSCDIVINTPHASE6,
2764			temp);
2765
2766	/* Program SSCAUXDIV */
2767	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2768	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2769	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2770	intel_sbi_write(dev_priv,
2771			SBI_SSCAUXDIV6,
2772			temp);
2773
2774
2775	/* Enable modulator and associated divider */
2776	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2777	temp &= ~SBI_SSCCTL_DISABLE;
2778	intel_sbi_write(dev_priv,
2779			SBI_SSCCTL6,
2780			temp);
2781
2782	/* Wait for initialization time */
2783	udelay(24);
2784
2785	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2786}
2787
2788/*
2789 * Enable PCH resources required for PCH ports:
2790 *   - PCH PLLs
2791 *   - FDI training & RX/TX
2792 *   - update transcoder timings
2793 *   - DP transcoding bits
2794 *   - transcoder
2795 */
2796static void ironlake_pch_enable(struct drm_crtc *crtc)
2797{
2798	struct drm_device *dev = crtc->dev;
2799	struct drm_i915_private *dev_priv = dev->dev_private;
2800	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2801	int pipe = intel_crtc->pipe;
2802	u32 reg, temp;
2803
2804	assert_transcoder_disabled(dev_priv, pipe);
2805
2806	/* For PCH output, training FDI link */
2807	dev_priv->display.fdi_link_train(crtc);
2808
2809	intel_enable_pch_pll(intel_crtc);
2810
2811	if (HAS_PCH_LPT(dev)) {
2812		DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2813		lpt_program_iclkip(crtc);
2814	} else if (HAS_PCH_CPT(dev)) {
2815		u32 sel;
2816
 
 
2817		temp = I915_READ(PCH_DPLL_SEL);
2818		switch (pipe) {
2819		default:
2820		case 0:
2821			temp |= TRANSA_DPLL_ENABLE;
2822			sel = TRANSA_DPLLB_SEL;
2823			break;
2824		case 1:
2825			temp |= TRANSB_DPLL_ENABLE;
2826			sel = TRANSB_DPLLB_SEL;
2827			break;
2828		case 2:
2829			temp |= TRANSC_DPLL_ENABLE;
2830			sel = TRANSC_DPLLB_SEL;
2831			break;
2832		}
2833		if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2834			temp |= sel;
2835		else
2836			temp &= ~sel;
2837		I915_WRITE(PCH_DPLL_SEL, temp);
2838	}
2839
2840	/* set transcoder timing, panel must allow it */
2841	assert_panel_unlocked(dev_priv, pipe);
2842	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2843	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2844	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2845
2846	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2847	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2848	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2849	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
2850
2851	if (!IS_HASWELL(dev))
2852		intel_fdi_normal_train(crtc);
2853
2854	/* For PCH DP, enable TRANS_DP_CTL */
2855	if (HAS_PCH_CPT(dev) &&
2856	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2857	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2858		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2859		reg = TRANS_DP_CTL(pipe);
2860		temp = I915_READ(reg);
2861		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2862			  TRANS_DP_SYNC_MASK |
2863			  TRANS_DP_BPC_MASK);
2864		temp |= (TRANS_DP_OUTPUT_ENABLE |
2865			 TRANS_DP_ENH_FRAMING);
2866		temp |= bpc << 9; /* same format but at 11:9 */
2867
2868		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2869			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2870		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2871			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2872
2873		switch (intel_trans_dp_port_sel(crtc)) {
2874		case PCH_DP_B:
2875			temp |= TRANS_DP_PORT_SEL_B;
2876			break;
2877		case PCH_DP_C:
2878			temp |= TRANS_DP_PORT_SEL_C;
2879			break;
2880		case PCH_DP_D:
2881			temp |= TRANS_DP_PORT_SEL_D;
2882			break;
2883		default:
2884			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2885			temp |= TRANS_DP_PORT_SEL_B;
2886			break;
2887		}
2888
2889		I915_WRITE(reg, temp);
2890	}
2891
2892	intel_enable_transcoder(dev_priv, pipe);
2893}
2894
2895static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2896{
2897	struct intel_pch_pll *pll = intel_crtc->pch_pll;
2898
2899	if (pll == NULL)
2900		return;
2901
2902	if (pll->refcount == 0) {
2903		WARN(1, "bad PCH PLL refcount\n");
2904		return;
2905	}
2906
2907	--pll->refcount;
2908	intel_crtc->pch_pll = NULL;
2909}
2910
2911static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2912{
2913	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2914	struct intel_pch_pll *pll;
2915	int i;
2916
2917	pll = intel_crtc->pch_pll;
2918	if (pll) {
2919		DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2920			      intel_crtc->base.base.id, pll->pll_reg);
2921		goto prepare;
2922	}
2923
2924	if (HAS_PCH_IBX(dev_priv->dev)) {
2925		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
2926		i = intel_crtc->pipe;
2927		pll = &dev_priv->pch_plls[i];
2928
2929		DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
2930			      intel_crtc->base.base.id, pll->pll_reg);
2931
2932		goto found;
2933	}
2934
2935	for (i = 0; i < dev_priv->num_pch_pll; i++) {
2936		pll = &dev_priv->pch_plls[i];
2937
2938		/* Only want to check enabled timings first */
2939		if (pll->refcount == 0)
2940			continue;
2941
2942		if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2943		    fp == I915_READ(pll->fp0_reg)) {
2944			DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2945				      intel_crtc->base.base.id,
2946				      pll->pll_reg, pll->refcount, pll->active);
2947
2948			goto found;
2949		}
2950	}
2951
2952	/* Ok no matching timings, maybe there's a free one? */
2953	for (i = 0; i < dev_priv->num_pch_pll; i++) {
2954		pll = &dev_priv->pch_plls[i];
2955		if (pll->refcount == 0) {
2956			DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
2957				      intel_crtc->base.base.id, pll->pll_reg);
2958			goto found;
2959		}
2960	}
2961
2962	return NULL;
2963
2964found:
2965	intel_crtc->pch_pll = pll;
2966	pll->refcount++;
2967	DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
2968prepare: /* separate function? */
2969	DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
2970
2971	/* Wait for the clocks to stabilize before rewriting the regs */
2972	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2973	POSTING_READ(pll->pll_reg);
2974	udelay(150);
2975
2976	I915_WRITE(pll->fp0_reg, fp);
2977	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2978	pll->on = false;
2979	return pll;
2980}
2981
2982void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2983{
2984	struct drm_i915_private *dev_priv = dev->dev_private;
2985	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
2986	u32 temp;
2987
2988	temp = I915_READ(dslreg);
2989	udelay(500);
2990	if (wait_for(I915_READ(dslreg) != temp, 5)) {
2991		/* Without this, mode sets may fail silently on FDI */
2992		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
2993		udelay(250);
2994		I915_WRITE(tc2reg, 0);
2995		if (wait_for(I915_READ(dslreg) != temp, 5))
2996			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
2997	}
2998}
2999
3000static void ironlake_crtc_enable(struct drm_crtc *crtc)
3001{
3002	struct drm_device *dev = crtc->dev;
3003	struct drm_i915_private *dev_priv = dev->dev_private;
3004	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3005	int pipe = intel_crtc->pipe;
3006	int plane = intel_crtc->plane;
3007	u32 temp;
3008	bool is_pch_port;
3009
3010	if (intel_crtc->active)
3011		return;
3012
3013	intel_crtc->active = true;
3014	intel_update_watermarks(dev);
3015
3016	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3017		temp = I915_READ(PCH_LVDS);
3018		if ((temp & LVDS_PORT_EN) == 0)
3019			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3020	}
3021
3022	is_pch_port = intel_crtc_driving_pch(crtc);
3023
3024	if (is_pch_port)
3025		ironlake_fdi_pll_enable(crtc);
3026	else
3027		ironlake_fdi_disable(crtc);
3028
3029	/* Enable panel fitting for LVDS */
3030	if (dev_priv->pch_pf_size &&
3031	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3032		/* Force use of hard-coded filter coefficients
3033		 * as some pre-programmed values are broken,
3034		 * e.g. x201.
3035		 */
3036		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3037		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3038		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3039	}
3040
3041	/*
3042	 * On ILK+ LUT must be loaded before the pipe is running but with
3043	 * clocks enabled
3044	 */
3045	intel_crtc_load_lut(crtc);
3046
3047	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3048	intel_enable_plane(dev_priv, plane, pipe);
3049
3050	if (is_pch_port)
3051		ironlake_pch_enable(crtc);
3052
3053	mutex_lock(&dev->struct_mutex);
3054	intel_update_fbc(dev);
3055	mutex_unlock(&dev->struct_mutex);
3056
3057	intel_crtc_update_cursor(crtc, true);
3058}
3059
3060static void ironlake_crtc_disable(struct drm_crtc *crtc)
3061{
3062	struct drm_device *dev = crtc->dev;
3063	struct drm_i915_private *dev_priv = dev->dev_private;
3064	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3065	int pipe = intel_crtc->pipe;
3066	int plane = intel_crtc->plane;
3067	u32 reg, temp;
3068
3069	if (!intel_crtc->active)
3070		return;
3071
3072	intel_crtc_wait_for_pending_flips(crtc);
3073	drm_vblank_off(dev, pipe);
3074	intel_crtc_update_cursor(crtc, false);
3075
3076	intel_disable_plane(dev_priv, plane, pipe);
3077
3078	if (dev_priv->cfb_plane == plane)
3079		intel_disable_fbc(dev);
3080
3081	intel_disable_pipe(dev_priv, pipe);
3082
3083	/* Disable PF */
3084	I915_WRITE(PF_CTL(pipe), 0);
3085	I915_WRITE(PF_WIN_SZ(pipe), 0);
3086
3087	ironlake_fdi_disable(crtc);
3088
3089	/* This is a horrible layering violation; we should be doing this in
3090	 * the connector/encoder ->prepare instead, but we don't always have
3091	 * enough information there about the config to know whether it will
3092	 * actually be necessary or just cause undesired flicker.
3093	 */
3094	intel_disable_pch_ports(dev_priv, pipe);
3095
3096	intel_disable_transcoder(dev_priv, pipe);
3097
3098	if (HAS_PCH_CPT(dev)) {
3099		/* disable TRANS_DP_CTL */
3100		reg = TRANS_DP_CTL(pipe);
3101		temp = I915_READ(reg);
3102		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3103		temp |= TRANS_DP_PORT_SEL_NONE;
3104		I915_WRITE(reg, temp);
3105
3106		/* disable DPLL_SEL */
3107		temp = I915_READ(PCH_DPLL_SEL);
3108		switch (pipe) {
3109		case 0:
3110			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3111			break;
3112		case 1:
3113			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3114			break;
3115		case 2:
3116			/* C shares PLL A or B */
3117			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3118			break;
3119		default:
3120			BUG(); /* wtf */
3121		}
3122		I915_WRITE(PCH_DPLL_SEL, temp);
3123	}
3124
3125	/* disable PCH DPLL */
3126	intel_disable_pch_pll(intel_crtc);
3127
3128	/* Switch from PCDclk to Rawclk */
3129	reg = FDI_RX_CTL(pipe);
3130	temp = I915_READ(reg);
3131	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3132
3133	/* Disable CPU FDI TX PLL */
3134	reg = FDI_TX_CTL(pipe);
3135	temp = I915_READ(reg);
3136	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3137
3138	POSTING_READ(reg);
3139	udelay(100);
3140
3141	reg = FDI_RX_CTL(pipe);
3142	temp = I915_READ(reg);
3143	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3144
3145	/* Wait for the clocks to turn off. */
3146	POSTING_READ(reg);
3147	udelay(100);
3148
3149	intel_crtc->active = false;
3150	intel_update_watermarks(dev);
3151
3152	mutex_lock(&dev->struct_mutex);
3153	intel_update_fbc(dev);
 
3154	mutex_unlock(&dev->struct_mutex);
3155}
3156
3157static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3158{
3159	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3160	int pipe = intel_crtc->pipe;
3161	int plane = intel_crtc->plane;
3162
3163	/* XXX: When our outputs are all unaware of DPMS modes other than off
3164	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3165	 */
3166	switch (mode) {
3167	case DRM_MODE_DPMS_ON:
3168	case DRM_MODE_DPMS_STANDBY:
3169	case DRM_MODE_DPMS_SUSPEND:
3170		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3171		ironlake_crtc_enable(crtc);
3172		break;
3173
3174	case DRM_MODE_DPMS_OFF:
3175		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3176		ironlake_crtc_disable(crtc);
3177		break;
3178	}
3179}
3180
3181static void ironlake_crtc_off(struct drm_crtc *crtc)
3182{
3183	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3184	intel_put_pch_pll(intel_crtc);
3185}
3186
3187static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3188{
3189	if (!enable && intel_crtc->overlay) {
3190		struct drm_device *dev = intel_crtc->base.dev;
3191		struct drm_i915_private *dev_priv = dev->dev_private;
3192
3193		mutex_lock(&dev->struct_mutex);
3194		dev_priv->mm.interruptible = false;
3195		(void) intel_overlay_switch_off(intel_crtc->overlay);
3196		dev_priv->mm.interruptible = true;
3197		mutex_unlock(&dev->struct_mutex);
3198	}
3199
3200	/* Let userspace switch the overlay on again. In most cases userspace
3201	 * has to recompute where to put it anyway.
3202	 */
3203}
3204
3205static void i9xx_crtc_enable(struct drm_crtc *crtc)
3206{
3207	struct drm_device *dev = crtc->dev;
3208	struct drm_i915_private *dev_priv = dev->dev_private;
3209	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3210	int pipe = intel_crtc->pipe;
3211	int plane = intel_crtc->plane;
3212
3213	if (intel_crtc->active)
3214		return;
3215
3216	intel_crtc->active = true;
3217	intel_update_watermarks(dev);
3218
3219	intel_enable_pll(dev_priv, pipe);
3220	intel_enable_pipe(dev_priv, pipe, false);
3221	intel_enable_plane(dev_priv, plane, pipe);
3222
3223	intel_crtc_load_lut(crtc);
3224	intel_update_fbc(dev);
3225
3226	/* Give the overlay scaler a chance to enable if it's on this pipe */
3227	intel_crtc_dpms_overlay(intel_crtc, true);
3228	intel_crtc_update_cursor(crtc, true);
3229}
3230
3231static void i9xx_crtc_disable(struct drm_crtc *crtc)
3232{
3233	struct drm_device *dev = crtc->dev;
3234	struct drm_i915_private *dev_priv = dev->dev_private;
3235	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3236	int pipe = intel_crtc->pipe;
3237	int plane = intel_crtc->plane;
3238
3239	if (!intel_crtc->active)
3240		return;
3241
3242	/* Give the overlay scaler a chance to disable if it's on this pipe */
3243	intel_crtc_wait_for_pending_flips(crtc);
3244	drm_vblank_off(dev, pipe);
3245	intel_crtc_dpms_overlay(intel_crtc, false);
3246	intel_crtc_update_cursor(crtc, false);
3247
3248	if (dev_priv->cfb_plane == plane)
3249		intel_disable_fbc(dev);
3250
3251	intel_disable_plane(dev_priv, plane, pipe);
3252	intel_disable_pipe(dev_priv, pipe);
3253	intel_disable_pll(dev_priv, pipe);
3254
3255	intel_crtc->active = false;
3256	intel_update_fbc(dev);
3257	intel_update_watermarks(dev);
 
3258}
3259
3260static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3261{
3262	/* XXX: When our outputs are all unaware of DPMS modes other than off
3263	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3264	 */
3265	switch (mode) {
3266	case DRM_MODE_DPMS_ON:
3267	case DRM_MODE_DPMS_STANDBY:
3268	case DRM_MODE_DPMS_SUSPEND:
3269		i9xx_crtc_enable(crtc);
3270		break;
3271	case DRM_MODE_DPMS_OFF:
3272		i9xx_crtc_disable(crtc);
3273		break;
3274	}
3275}
3276
3277static void i9xx_crtc_off(struct drm_crtc *crtc)
3278{
3279}
3280
3281/**
3282 * Sets the power management mode of the pipe and plane.
3283 */
3284static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3285{
3286	struct drm_device *dev = crtc->dev;
3287	struct drm_i915_private *dev_priv = dev->dev_private;
3288	struct drm_i915_master_private *master_priv;
3289	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3290	int pipe = intel_crtc->pipe;
3291	bool enabled;
3292
3293	if (intel_crtc->dpms_mode == mode)
3294		return;
3295
3296	intel_crtc->dpms_mode = mode;
3297
3298	dev_priv->display.dpms(crtc, mode);
3299
3300	if (!dev->primary->master)
3301		return;
3302
3303	master_priv = dev->primary->master->driver_priv;
3304	if (!master_priv->sarea_priv)
3305		return;
3306
3307	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3308
3309	switch (pipe) {
3310	case 0:
3311		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3312		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3313		break;
3314	case 1:
3315		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3316		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3317		break;
3318	default:
3319		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3320		break;
3321	}
3322}
3323
3324static void intel_crtc_disable(struct drm_crtc *crtc)
3325{
3326	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3327	struct drm_device *dev = crtc->dev;
3328	struct drm_i915_private *dev_priv = dev->dev_private;
3329
3330	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3331	dev_priv->display.off(crtc);
3332
3333	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3334	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3335
3336	if (crtc->fb) {
3337		mutex_lock(&dev->struct_mutex);
3338		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3339		mutex_unlock(&dev->struct_mutex);
3340	}
3341}
3342
3343/* Prepare for a mode set.
3344 *
3345 * Note we could be a lot smarter here.  We need to figure out which outputs
3346 * will be enabled, which disabled (in short, how the config will changes)
3347 * and perform the minimum necessary steps to accomplish that, e.g. updating
3348 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3349 * panel fitting is in the proper state, etc.
3350 */
3351static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3352{
3353	i9xx_crtc_disable(crtc);
3354}
3355
3356static void i9xx_crtc_commit(struct drm_crtc *crtc)
3357{
3358	i9xx_crtc_enable(crtc);
3359}
3360
3361static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3362{
3363	ironlake_crtc_disable(crtc);
3364}
3365
3366static void ironlake_crtc_commit(struct drm_crtc *crtc)
3367{
3368	ironlake_crtc_enable(crtc);
3369}
3370
3371void intel_encoder_prepare(struct drm_encoder *encoder)
3372{
3373	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3374	/* lvds has its own version of prepare see intel_lvds_prepare */
3375	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3376}
3377
3378void intel_encoder_commit(struct drm_encoder *encoder)
3379{
3380	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3381	struct drm_device *dev = encoder->dev;
3382	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3383
3384	/* lvds has its own version of commit see intel_lvds_commit */
3385	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3386
3387	if (HAS_PCH_CPT(dev))
3388		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3389}
3390
3391void intel_encoder_destroy(struct drm_encoder *encoder)
3392{
3393	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3394
3395	drm_encoder_cleanup(encoder);
3396	kfree(intel_encoder);
3397}
3398
3399static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3400				  struct drm_display_mode *mode,
3401				  struct drm_display_mode *adjusted_mode)
3402{
3403	struct drm_device *dev = crtc->dev;
3404
3405	if (HAS_PCH_SPLIT(dev)) {
3406		/* FDI link clock is fixed at 2.7G */
3407		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3408			return false;
3409	}
3410
3411	/* All interlaced capable intel hw wants timings in frames. Note though
3412	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3413	 * timings, so we need to be careful not to clobber these.*/
3414	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3415		drm_mode_set_crtcinfo(adjusted_mode, 0);
3416
3417	return true;
3418}
3419
3420static int valleyview_get_display_clock_speed(struct drm_device *dev)
3421{
3422	return 400000; /* FIXME */
3423}
3424
3425static int i945_get_display_clock_speed(struct drm_device *dev)
3426{
3427	return 400000;
3428}
3429
3430static int i915_get_display_clock_speed(struct drm_device *dev)
3431{
3432	return 333000;
3433}
3434
3435static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3436{
3437	return 200000;
3438}
3439
3440static int i915gm_get_display_clock_speed(struct drm_device *dev)
3441{
3442	u16 gcfgc = 0;
3443
3444	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3445
3446	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3447		return 133000;
3448	else {
3449		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3450		case GC_DISPLAY_CLOCK_333_MHZ:
3451			return 333000;
3452		default:
3453		case GC_DISPLAY_CLOCK_190_200_MHZ:
3454			return 190000;
3455		}
3456	}
3457}
3458
3459static int i865_get_display_clock_speed(struct drm_device *dev)
3460{
3461	return 266000;
3462}
3463
3464static int i855_get_display_clock_speed(struct drm_device *dev)
3465{
3466	u16 hpllcc = 0;
3467	/* Assume that the hardware is in the high speed state.  This
3468	 * should be the default.
3469	 */
3470	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3471	case GC_CLOCK_133_200:
3472	case GC_CLOCK_100_200:
3473		return 200000;
3474	case GC_CLOCK_166_250:
3475		return 250000;
3476	case GC_CLOCK_100_133:
3477		return 133000;
3478	}
3479
3480	/* Shouldn't happen */
3481	return 0;
3482}
3483
3484static int i830_get_display_clock_speed(struct drm_device *dev)
3485{
3486	return 133000;
3487}
3488
3489struct fdi_m_n {
3490	u32        tu;
3491	u32        gmch_m;
3492	u32        gmch_n;
3493	u32        link_m;
3494	u32        link_n;
3495};
3496
3497static void
3498fdi_reduce_ratio(u32 *num, u32 *den)
3499{
3500	while (*num > 0xffffff || *den > 0xffffff) {
3501		*num >>= 1;
3502		*den >>= 1;
3503	}
3504}
3505
3506static void
3507ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3508		     int link_clock, struct fdi_m_n *m_n)
3509{
3510	m_n->tu = 64; /* default size */
3511
3512	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3513	m_n->gmch_m = bits_per_pixel * pixel_clock;
3514	m_n->gmch_n = link_clock * nlanes * 8;
3515	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3516
3517	m_n->link_m = pixel_clock;
3518	m_n->link_n = link_clock;
3519	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3520}
3521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3522static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3523{
3524	if (i915_panel_use_ssc >= 0)
3525		return i915_panel_use_ssc != 0;
3526	return dev_priv->lvds_use_ssc
3527		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
3528}
3529
3530/**
3531 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3532 * @crtc: CRTC structure
3533 * @mode: requested mode
3534 *
3535 * A pipe may be connected to one or more outputs.  Based on the depth of the
3536 * attached framebuffer, choose a good color depth to use on the pipe.
3537 *
3538 * If possible, match the pipe depth to the fb depth.  In some cases, this
3539 * isn't ideal, because the connected output supports a lesser or restricted
3540 * set of depths.  Resolve that here:
3541 *    LVDS typically supports only 6bpc, so clamp down in that case
3542 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3543 *    Displays may support a restricted set as well, check EDID and clamp as
3544 *      appropriate.
3545 *    DP may want to dither down to 6bpc to fit larger modes
3546 *
3547 * RETURNS:
3548 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3549 * true if they don't match).
3550 */
3551static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3552					 unsigned int *pipe_bpp,
3553					 struct drm_display_mode *mode)
3554{
3555	struct drm_device *dev = crtc->dev;
3556	struct drm_i915_private *dev_priv = dev->dev_private;
3557	struct drm_encoder *encoder;
3558	struct drm_connector *connector;
3559	unsigned int display_bpc = UINT_MAX, bpc;
3560
3561	/* Walk the encoders & connectors on this crtc, get min bpc */
3562	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3563		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3564
3565		if (encoder->crtc != crtc)
3566			continue;
3567
3568		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3569			unsigned int lvds_bpc;
3570
3571			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3572			    LVDS_A3_POWER_UP)
3573				lvds_bpc = 8;
3574			else
3575				lvds_bpc = 6;
3576
3577			if (lvds_bpc < display_bpc) {
3578				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
3579				display_bpc = lvds_bpc;
3580			}
3581			continue;
3582		}
3583
 
 
 
 
 
 
 
 
 
 
 
3584		/* Not one of the known troublemakers, check the EDID */
3585		list_for_each_entry(connector, &dev->mode_config.connector_list,
3586				    head) {
3587			if (connector->encoder != encoder)
3588				continue;
3589
3590			/* Don't use an invalid EDID bpc value */
3591			if (connector->display_info.bpc &&
3592			    connector->display_info.bpc < display_bpc) {
3593				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
3594				display_bpc = connector->display_info.bpc;
3595			}
3596		}
3597
3598		/*
3599		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3600		 * through, clamp it down.  (Note: >12bpc will be caught below.)
3601		 */
3602		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3603			if (display_bpc > 8 && display_bpc < 12) {
3604				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
3605				display_bpc = 12;
3606			} else {
3607				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
3608				display_bpc = 8;
3609			}
3610		}
3611	}
3612
3613	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3614		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3615		display_bpc = 6;
3616	}
3617
3618	/*
3619	 * We could just drive the pipe at the highest bpc all the time and
3620	 * enable dithering as needed, but that costs bandwidth.  So choose
3621	 * the minimum value that expresses the full color range of the fb but
3622	 * also stays within the max display bpc discovered above.
3623	 */
3624
3625	switch (crtc->fb->depth) {
3626	case 8:
3627		bpc = 8; /* since we go through a colormap */
3628		break;
3629	case 15:
3630	case 16:
3631		bpc = 6; /* min is 18bpp */
3632		break;
3633	case 24:
3634		bpc = 8;
3635		break;
3636	case 30:
3637		bpc = 10;
3638		break;
3639	case 48:
3640		bpc = 12;
3641		break;
3642	default:
3643		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3644		bpc = min((unsigned int)8, display_bpc);
3645		break;
3646	}
3647
3648	display_bpc = min(display_bpc, bpc);
3649
3650	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3651		      bpc, display_bpc);
3652
3653	*pipe_bpp = display_bpc * 3;
3654
3655	return display_bpc != bpc;
3656}
3657
3658static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
3659{
3660	struct drm_device *dev = crtc->dev;
3661	struct drm_i915_private *dev_priv = dev->dev_private;
3662	int refclk;
3663
3664	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3665	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3666		refclk = dev_priv->lvds_ssc_freq * 1000;
3667		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3668			      refclk / 1000);
3669	} else if (!IS_GEN2(dev)) {
3670		refclk = 96000;
3671	} else {
3672		refclk = 48000;
3673	}
3674
3675	return refclk;
3676}
3677
3678static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
3679				      intel_clock_t *clock)
3680{
3681	/* SDVO TV has fixed PLL values depend on its clock range,
3682	   this mirrors vbios setting. */
3683	if (adjusted_mode->clock >= 100000
3684	    && adjusted_mode->clock < 140500) {
3685		clock->p1 = 2;
3686		clock->p2 = 10;
3687		clock->n = 3;
3688		clock->m1 = 16;
3689		clock->m2 = 8;
3690	} else if (adjusted_mode->clock >= 140500
3691		   && adjusted_mode->clock <= 200000) {
3692		clock->p1 = 1;
3693		clock->p2 = 10;
3694		clock->n = 6;
3695		clock->m1 = 12;
3696		clock->m2 = 8;
3697	}
3698}
3699
3700static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
3701				     intel_clock_t *clock,
3702				     intel_clock_t *reduced_clock)
3703{
3704	struct drm_device *dev = crtc->dev;
3705	struct drm_i915_private *dev_priv = dev->dev_private;
3706	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3707	int pipe = intel_crtc->pipe;
3708	u32 fp, fp2 = 0;
3709
3710	if (IS_PINEVIEW(dev)) {
3711		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
3712		if (reduced_clock)
3713			fp2 = (1 << reduced_clock->n) << 16 |
3714				reduced_clock->m1 << 8 | reduced_clock->m2;
3715	} else {
3716		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
3717		if (reduced_clock)
3718			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
3719				reduced_clock->m2;
3720	}
3721
3722	I915_WRITE(FP0(pipe), fp);
3723
3724	intel_crtc->lowfreq_avail = false;
3725	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3726	    reduced_clock && i915_powersave) {
3727		I915_WRITE(FP1(pipe), fp2);
3728		intel_crtc->lowfreq_avail = true;
3729	} else {
3730		I915_WRITE(FP1(pipe), fp);
3731	}
3732}
3733
3734static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3735			      struct drm_display_mode *adjusted_mode)
3736{
3737	struct drm_device *dev = crtc->dev;
3738	struct drm_i915_private *dev_priv = dev->dev_private;
3739	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3740	int pipe = intel_crtc->pipe;
3741	u32 temp;
3742
3743	temp = I915_READ(LVDS);
3744	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3745	if (pipe == 1) {
3746		temp |= LVDS_PIPEB_SELECT;
3747	} else {
3748		temp &= ~LVDS_PIPEB_SELECT;
3749	}
3750	/* set the corresponsding LVDS_BORDER bit */
3751	temp |= dev_priv->lvds_border_bits;
3752	/* Set the B0-B3 data pairs corresponding to whether we're going to
3753	 * set the DPLLs for dual-channel mode or not.
3754	 */
3755	if (clock->p2 == 7)
3756		temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3757	else
3758		temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3759
3760	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3761	 * appropriately here, but we need to look more thoroughly into how
3762	 * panels behave in the two modes.
3763	 */
3764	/* set the dithering flag on LVDS as needed */
3765	if (INTEL_INFO(dev)->gen >= 4) {
3766		if (dev_priv->lvds_dither)
3767			temp |= LVDS_ENABLE_DITHER;
3768		else
3769			temp &= ~LVDS_ENABLE_DITHER;
3770	}
3771	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
3772	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3773		temp |= LVDS_HSYNC_POLARITY;
3774	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3775		temp |= LVDS_VSYNC_POLARITY;
3776	I915_WRITE(LVDS, temp);
3777}
3778
3779static void i9xx_update_pll(struct drm_crtc *crtc,
3780			    struct drm_display_mode *mode,
3781			    struct drm_display_mode *adjusted_mode,
3782			    intel_clock_t *clock, intel_clock_t *reduced_clock,
3783			    int num_connectors)
3784{
3785	struct drm_device *dev = crtc->dev;
3786	struct drm_i915_private *dev_priv = dev->dev_private;
3787	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3788	int pipe = intel_crtc->pipe;
3789	u32 dpll;
3790	bool is_sdvo;
3791
3792	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
3793		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3794
3795	dpll = DPLL_VGA_MODE_DIS;
3796
3797	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3798		dpll |= DPLLB_MODE_LVDS;
3799	else
3800		dpll |= DPLLB_MODE_DAC_SERIAL;
3801	if (is_sdvo) {
3802		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3803		if (pixel_multiplier > 1) {
3804			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3805				dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3806		}
3807		dpll |= DPLL_DVO_HIGH_SPEED;
3808	}
3809	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3810		dpll |= DPLL_DVO_HIGH_SPEED;
3811
3812	/* compute bitmask from p1 value */
3813	if (IS_PINEVIEW(dev))
3814		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3815	else {
3816		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3817		if (IS_G4X(dev) && reduced_clock)
3818			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3819	}
3820	switch (clock->p2) {
3821	case 5:
3822		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3823		break;
3824	case 7:
3825		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3826		break;
3827	case 10:
3828		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3829		break;
3830	case 14:
3831		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3832		break;
3833	}
3834	if (INTEL_INFO(dev)->gen >= 4)
3835		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3836
3837	if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3838		dpll |= PLL_REF_INPUT_TVCLKINBC;
3839	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3840		/* XXX: just matching BIOS for now */
3841		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
3842		dpll |= 3;
3843	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3844		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3845		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3846	else
3847		dpll |= PLL_REF_INPUT_DREFCLK;
3848
3849	dpll |= DPLL_VCO_ENABLE;
3850	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3851	POSTING_READ(DPLL(pipe));
3852	udelay(150);
3853
3854	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
3855	 * This is an exception to the general rule that mode_set doesn't turn
3856	 * things on.
3857	 */
3858	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3859		intel_update_lvds(crtc, clock, adjusted_mode);
3860
3861	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3862		intel_dp_set_m_n(crtc, mode, adjusted_mode);
3863
3864	I915_WRITE(DPLL(pipe), dpll);
3865
3866	/* Wait for the clocks to stabilize. */
3867	POSTING_READ(DPLL(pipe));
3868	udelay(150);
3869
3870	if (INTEL_INFO(dev)->gen >= 4) {
3871		u32 temp = 0;
3872		if (is_sdvo) {
3873			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3874			if (temp > 1)
3875				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3876			else
3877				temp = 0;
3878		}
3879		I915_WRITE(DPLL_MD(pipe), temp);
3880	} else {
3881		/* The pixel multiplier can only be updated once the
3882		 * DPLL is enabled and the clocks are stable.
3883		 *
3884		 * So write it again.
3885		 */
3886		I915_WRITE(DPLL(pipe), dpll);
3887	}
3888}
3889
3890static void i8xx_update_pll(struct drm_crtc *crtc,
3891			    struct drm_display_mode *adjusted_mode,
3892			    intel_clock_t *clock,
3893			    int num_connectors)
3894{
3895	struct drm_device *dev = crtc->dev;
3896	struct drm_i915_private *dev_priv = dev->dev_private;
3897	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3898	int pipe = intel_crtc->pipe;
3899	u32 dpll;
3900
3901	dpll = DPLL_VGA_MODE_DIS;
3902
3903	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3904		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3905	} else {
3906		if (clock->p1 == 2)
3907			dpll |= PLL_P1_DIVIDE_BY_TWO;
3908		else
3909			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3910		if (clock->p2 == 4)
3911			dpll |= PLL_P2_DIVIDE_BY_4;
3912	}
3913
3914	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3915		/* XXX: just matching BIOS for now */
3916		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
3917		dpll |= 3;
3918	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3919		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3920		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3921	else
3922		dpll |= PLL_REF_INPUT_DREFCLK;
3923
3924	dpll |= DPLL_VCO_ENABLE;
3925	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3926	POSTING_READ(DPLL(pipe));
3927	udelay(150);
3928
3929	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
3930	 * This is an exception to the general rule that mode_set doesn't turn
3931	 * things on.
3932	 */
3933	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3934		intel_update_lvds(crtc, clock, adjusted_mode);
3935
3936	I915_WRITE(DPLL(pipe), dpll);
3937
3938	/* Wait for the clocks to stabilize. */
3939	POSTING_READ(DPLL(pipe));
3940	udelay(150);
3941
3942	/* The pixel multiplier can only be updated once the
3943	 * DPLL is enabled and the clocks are stable.
3944	 *
3945	 * So write it again.
3946	 */
3947	I915_WRITE(DPLL(pipe), dpll);
3948}
3949
3950static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
3951			      struct drm_display_mode *mode,
3952			      struct drm_display_mode *adjusted_mode,
3953			      int x, int y,
3954			      struct drm_framebuffer *old_fb)
3955{
3956	struct drm_device *dev = crtc->dev;
3957	struct drm_i915_private *dev_priv = dev->dev_private;
3958	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3959	int pipe = intel_crtc->pipe;
3960	int plane = intel_crtc->plane;
3961	int refclk, num_connectors = 0;
3962	intel_clock_t clock, reduced_clock;
3963	u32 dspcntr, pipeconf, vsyncshift;
3964	bool ok, has_reduced_clock = false, is_sdvo = false;
3965	bool is_lvds = false, is_tv = false, is_dp = false;
3966	struct drm_mode_config *mode_config = &dev->mode_config;
3967	struct intel_encoder *encoder;
3968	const intel_limit_t *limit;
3969	int ret;
 
 
3970
3971	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3972		if (encoder->base.crtc != crtc)
3973			continue;
3974
3975		switch (encoder->type) {
3976		case INTEL_OUTPUT_LVDS:
3977			is_lvds = true;
3978			break;
3979		case INTEL_OUTPUT_SDVO:
3980		case INTEL_OUTPUT_HDMI:
3981			is_sdvo = true;
3982			if (encoder->needs_tv_clock)
3983				is_tv = true;
3984			break;
 
 
 
3985		case INTEL_OUTPUT_TVOUT:
3986			is_tv = true;
3987			break;
 
 
 
3988		case INTEL_OUTPUT_DISPLAYPORT:
3989			is_dp = true;
3990			break;
3991		}
3992
3993		num_connectors++;
3994	}
3995
3996	refclk = i9xx_get_refclk(crtc, num_connectors);
 
 
 
 
 
 
 
 
3997
3998	/*
3999	 * Returns a set of divisors for the desired target clock with the given
4000	 * refclk, or FALSE.  The returned values represent the clock equation:
4001	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4002	 */
4003	limit = intel_limit(crtc, refclk);
4004	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4005			     &clock);
4006	if (!ok) {
4007		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4008		return -EINVAL;
4009	}
4010
4011	/* Ensure that the cursor is valid for the new mode before changing... */
4012	intel_crtc_update_cursor(crtc, true);
4013
4014	if (is_lvds && dev_priv->lvds_downclock_avail) {
4015		/*
4016		 * Ensure we match the reduced clock's P to the target clock.
4017		 * If the clocks don't match, we can't switch the display clock
4018		 * by using the FP0/FP1. In such case we will disable the LVDS
4019		 * downclock feature.
4020		*/
4021		has_reduced_clock = limit->find_pll(limit, crtc,
4022						    dev_priv->lvds_downclock,
4023						    refclk,
4024						    &clock,
4025						    &reduced_clock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4026	}
4027
4028	if (is_sdvo && is_tv)
4029		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 
 
 
 
 
 
 
 
 
 
 
4030
4031	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4032				 &reduced_clock : NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
4033
4034	if (IS_GEN2(dev))
4035		i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4036	else
4037		i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4038				has_reduced_clock ? &reduced_clock : NULL,
4039				num_connectors);
4040
4041	/* setup pipeconf */
4042	pipeconf = I915_READ(PIPECONF(pipe));
4043
4044	/* Set up the display plane register */
4045	dspcntr = DISPPLANE_GAMMA_ENABLE;
4046
 
 
4047	if (pipe == 0)
4048		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4049	else
4050		dspcntr |= DISPPLANE_SEL_PIPE_B;
4051
4052	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4053		/* Enable pixel doubling when the dot clock is > 90% of the (display)
4054		 * core speed.
4055		 *
4056		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4057		 * pipe == 0 check?
4058		 */
4059		if (mode->clock >
4060		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4061			pipeconf |= PIPECONF_DOUBLE_WIDE;
4062		else
4063			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4064	}
4065
4066	/* default to 8bpc */
4067	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4068	if (is_dp) {
4069		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4070			pipeconf |= PIPECONF_BPP_6 |
4071				    PIPECONF_DITHER_EN |
4072				    PIPECONF_DITHER_TYPE_SP;
 
 
 
 
 
 
 
 
 
 
 
 
 
4073		}
 
 
 
 
 
 
 
 
4074	}
4075
4076	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4077	drm_mode_debug_printmodeline(mode);
4078
4079	if (HAS_PIPE_CXSR(dev)) {
4080		if (intel_crtc->lowfreq_avail) {
4081			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4082			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4083		} else {
 
 
 
4084			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4085			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4086		}
4087	}
4088
4089	pipeconf &= ~PIPECONF_INTERLACE_MASK;
4090	if (!IS_GEN2(dev) &&
4091	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4092		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4093		/* the chip adds 2 halflines automatically */
 
4094		adjusted_mode->crtc_vtotal -= 1;
 
4095		adjusted_mode->crtc_vblank_end -= 1;
4096		vsyncshift = adjusted_mode->crtc_hsync_start
4097			     - adjusted_mode->crtc_htotal/2;
4098	} else {
4099		pipeconf |= PIPECONF_PROGRESSIVE;
4100		vsyncshift = 0;
4101	}
4102
4103	if (!IS_GEN3(dev))
4104		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
4105
4106	I915_WRITE(HTOTAL(pipe),
4107		   (adjusted_mode->crtc_hdisplay - 1) |
4108		   ((adjusted_mode->crtc_htotal - 1) << 16));
4109	I915_WRITE(HBLANK(pipe),
4110		   (adjusted_mode->crtc_hblank_start - 1) |
4111		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4112	I915_WRITE(HSYNC(pipe),
4113		   (adjusted_mode->crtc_hsync_start - 1) |
4114		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4115
4116	I915_WRITE(VTOTAL(pipe),
4117		   (adjusted_mode->crtc_vdisplay - 1) |
4118		   ((adjusted_mode->crtc_vtotal - 1) << 16));
4119	I915_WRITE(VBLANK(pipe),
4120		   (adjusted_mode->crtc_vblank_start - 1) |
4121		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
4122	I915_WRITE(VSYNC(pipe),
4123		   (adjusted_mode->crtc_vsync_start - 1) |
4124		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4125
4126	/* pipesrc and dspsize control the size that is scaled from,
4127	 * which should always be the user's requested size.
4128	 */
4129	I915_WRITE(DSPSIZE(plane),
4130		   ((mode->vdisplay - 1) << 16) |
4131		   (mode->hdisplay - 1));
4132	I915_WRITE(DSPPOS(plane), 0);
4133	I915_WRITE(PIPESRC(pipe),
4134		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4135
4136	I915_WRITE(PIPECONF(pipe), pipeconf);
4137	POSTING_READ(PIPECONF(pipe));
4138	intel_enable_pipe(dev_priv, pipe, false);
4139
4140	intel_wait_for_vblank(dev, pipe);
4141
4142	I915_WRITE(DSPCNTR(plane), dspcntr);
4143	POSTING_READ(DSPCNTR(plane));
 
4144
4145	ret = intel_pipe_set_base(crtc, x, y, old_fb);
4146
4147	intel_update_watermarks(dev);
4148
4149	return ret;
4150}
4151
4152/*
4153 * Initialize reference clocks when the driver loads
4154 */
4155void ironlake_init_pch_refclk(struct drm_device *dev)
4156{
4157	struct drm_i915_private *dev_priv = dev->dev_private;
4158	struct drm_mode_config *mode_config = &dev->mode_config;
 
4159	struct intel_encoder *encoder;
 
4160	u32 temp;
4161	bool has_lvds = false;
4162	bool has_cpu_edp = false;
4163	bool has_pch_edp = false;
4164	bool has_panel = false;
4165	bool has_ck505 = false;
4166	bool can_ssc = false;
4167
4168	/* We need to take the global config into account */
4169	list_for_each_entry(encoder, &mode_config->encoder_list,
4170			    base.head) {
4171		switch (encoder->type) {
4172		case INTEL_OUTPUT_LVDS:
4173			has_panel = true;
4174			has_lvds = true;
4175			break;
4176		case INTEL_OUTPUT_EDP:
4177			has_panel = true;
4178			if (intel_encoder_is_pch_edp(&encoder->base))
4179				has_pch_edp = true;
4180			else
4181				has_cpu_edp = true;
4182			break;
 
 
4183		}
4184	}
4185
4186	if (HAS_PCH_IBX(dev)) {
4187		has_ck505 = dev_priv->display_clock_mode;
4188		can_ssc = has_ck505;
4189	} else {
4190		has_ck505 = false;
4191		can_ssc = true;
4192	}
4193
4194	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4195		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4196		      has_ck505);
4197
4198	/* Ironlake: try to setup display ref clock before DPLL
4199	 * enabling. This is only under driver's control after
4200	 * PCH B stepping, previous chipset stepping should be
4201	 * ignoring this setting.
4202	 */
4203	temp = I915_READ(PCH_DREF_CONTROL);
4204	/* Always enable nonspread source */
4205	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
 
 
 
 
4206
4207	if (has_ck505)
4208		temp |= DREF_NONSPREAD_CK505_ENABLE;
4209	else
4210		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4211
4212	if (has_panel) {
4213		temp &= ~DREF_SSC_SOURCE_MASK;
4214		temp |= DREF_SSC_SOURCE_ENABLE;
4215
4216		/* SSC must be turned on before enabling the CPU output  */
4217		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4218			DRM_DEBUG_KMS("Using SSC on panel\n");
4219			temp |= DREF_SSC1_ENABLE;
4220		} else
4221			temp &= ~DREF_SSC1_ENABLE;
4222
4223		/* Get SSC going before enabling the outputs */
4224		I915_WRITE(PCH_DREF_CONTROL, temp);
4225		POSTING_READ(PCH_DREF_CONTROL);
4226		udelay(200);
4227
 
 
 
4228		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4229
4230		/* Enable CPU source on CPU attached eDP */
4231		if (has_cpu_edp) {
4232			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4233				DRM_DEBUG_KMS("Using SSC on eDP\n");
4234				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4235			}
4236			else
4237				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4238		} else
4239			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4240
4241		I915_WRITE(PCH_DREF_CONTROL, temp);
4242		POSTING_READ(PCH_DREF_CONTROL);
4243		udelay(200);
4244	} else {
4245		DRM_DEBUG_KMS("Disabling SSC entirely\n");
4246
4247		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4248
4249		/* Turn off CPU output */
4250		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4251
4252		I915_WRITE(PCH_DREF_CONTROL, temp);
4253		POSTING_READ(PCH_DREF_CONTROL);
4254		udelay(200);
4255
4256		/* Turn off the SSC source */
4257		temp &= ~DREF_SSC_SOURCE_MASK;
4258		temp |= DREF_SSC_SOURCE_DISABLE;
4259
4260		/* Turn off SSC1 */
4261		temp &= ~ DREF_SSC1_ENABLE;
4262
4263		I915_WRITE(PCH_DREF_CONTROL, temp);
4264		POSTING_READ(PCH_DREF_CONTROL);
4265		udelay(200);
4266	}
4267}
4268
4269static int ironlake_get_refclk(struct drm_crtc *crtc)
4270{
4271	struct drm_device *dev = crtc->dev;
4272	struct drm_i915_private *dev_priv = dev->dev_private;
4273	struct intel_encoder *encoder;
4274	struct drm_mode_config *mode_config = &dev->mode_config;
4275	struct intel_encoder *edp_encoder = NULL;
4276	int num_connectors = 0;
4277	bool is_lvds = false;
4278
4279	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4280		if (encoder->base.crtc != crtc)
4281			continue;
4282
4283		switch (encoder->type) {
4284		case INTEL_OUTPUT_LVDS:
4285			is_lvds = true;
4286			break;
4287		case INTEL_OUTPUT_EDP:
4288			edp_encoder = encoder;
4289			break;
4290		}
4291		num_connectors++;
4292	}
4293
4294	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4295		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4296			      dev_priv->lvds_ssc_freq);
4297		return dev_priv->lvds_ssc_freq * 1000;
4298	}
4299
4300	return 120000;
4301}
4302
4303static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4304				  struct drm_display_mode *mode,
4305				  struct drm_display_mode *adjusted_mode,
4306				  int x, int y,
4307				  struct drm_framebuffer *old_fb)
4308{
4309	struct drm_device *dev = crtc->dev;
4310	struct drm_i915_private *dev_priv = dev->dev_private;
4311	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4312	int pipe = intel_crtc->pipe;
4313	int plane = intel_crtc->plane;
4314	int refclk, num_connectors = 0;
4315	intel_clock_t clock, reduced_clock;
4316	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4317	bool ok, has_reduced_clock = false, is_sdvo = false;
4318	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
 
4319	struct drm_mode_config *mode_config = &dev->mode_config;
4320	struct intel_encoder *encoder, *edp_encoder = NULL;
4321	const intel_limit_t *limit;
4322	int ret;
4323	struct fdi_m_n m_n = {0};
4324	u32 temp;
 
4325	int target_clock, pixel_multiplier, lane, link_bw, factor;
4326	unsigned int pipe_bpp;
4327	bool dither;
4328	bool is_cpu_edp = false, is_pch_edp = false;
4329
4330	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4331		if (encoder->base.crtc != crtc)
4332			continue;
4333
4334		switch (encoder->type) {
4335		case INTEL_OUTPUT_LVDS:
4336			is_lvds = true;
4337			break;
4338		case INTEL_OUTPUT_SDVO:
4339		case INTEL_OUTPUT_HDMI:
4340			is_sdvo = true;
4341			if (encoder->needs_tv_clock)
4342				is_tv = true;
4343			break;
4344		case INTEL_OUTPUT_TVOUT:
4345			is_tv = true;
4346			break;
4347		case INTEL_OUTPUT_ANALOG:
4348			is_crt = true;
4349			break;
4350		case INTEL_OUTPUT_DISPLAYPORT:
4351			is_dp = true;
4352			break;
4353		case INTEL_OUTPUT_EDP:
4354			is_dp = true;
4355			if (intel_encoder_is_pch_edp(&encoder->base))
4356				is_pch_edp = true;
4357			else
4358				is_cpu_edp = true;
4359			edp_encoder = encoder;
4360			break;
4361		}
4362
4363		num_connectors++;
4364	}
4365
4366	refclk = ironlake_get_refclk(crtc);
 
 
 
 
 
 
 
 
 
4367
4368	/*
4369	 * Returns a set of divisors for the desired target clock with the given
4370	 * refclk, or FALSE.  The returned values represent the clock equation:
4371	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4372	 */
4373	limit = intel_limit(crtc, refclk);
4374	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4375			     &clock);
4376	if (!ok) {
4377		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4378		return -EINVAL;
4379	}
4380
4381	/* Ensure that the cursor is valid for the new mode before changing... */
4382	intel_crtc_update_cursor(crtc, true);
4383
4384	if (is_lvds && dev_priv->lvds_downclock_avail) {
4385		/*
4386		 * Ensure we match the reduced clock's P to the target clock.
4387		 * If the clocks don't match, we can't switch the display clock
4388		 * by using the FP0/FP1. In such case we will disable the LVDS
4389		 * downclock feature.
4390		*/
4391		has_reduced_clock = limit->find_pll(limit, crtc,
4392						    dev_priv->lvds_downclock,
4393						    refclk,
4394						    &clock,
4395						    &reduced_clock);
 
 
 
 
 
 
 
 
 
 
 
4396	}
4397	/* SDVO TV has fixed PLL values depend on its clock range,
4398	   this mirrors vbios setting. */
4399	if (is_sdvo && is_tv) {
4400		if (adjusted_mode->clock >= 100000
4401		    && adjusted_mode->clock < 140500) {
4402			clock.p1 = 2;
4403			clock.p2 = 10;
4404			clock.n = 3;
4405			clock.m1 = 16;
4406			clock.m2 = 8;
4407		} else if (adjusted_mode->clock >= 140500
4408			   && adjusted_mode->clock <= 200000) {
4409			clock.p1 = 1;
4410			clock.p2 = 10;
4411			clock.n = 6;
4412			clock.m1 = 12;
4413			clock.m2 = 8;
4414		}
4415	}
4416
4417	/* FDI link */
4418	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4419	lane = 0;
4420	/* CPU eDP doesn't require FDI link, so just set DP M/N
4421	   according to current link config */
4422	if (is_cpu_edp) {
 
4423		target_clock = mode->clock;
4424		intel_edp_link_config(edp_encoder, &lane, &link_bw);
 
4425	} else {
4426		/* [e]DP over FDI requires target mode clock
4427		   instead of link clock */
4428		if (is_dp)
4429			target_clock = mode->clock;
4430		else
4431			target_clock = adjusted_mode->clock;
4432
4433		/* FDI is a binary signal running at ~2.7GHz, encoding
4434		 * each output octet as 10 bits. The actual frequency
4435		 * is stored as a divider into a 100MHz clock, and the
4436		 * mode pixel clock is stored in units of 1KHz.
4437		 * Hence the bw of each lane in terms of the mode signal
4438		 * is:
4439		 */
4440		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4441	}
4442
4443	/* determine panel color depth */
4444	temp = I915_READ(PIPECONF(pipe));
4445	temp &= ~PIPE_BPC_MASK;
4446	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
4447	switch (pipe_bpp) {
4448	case 18:
4449		temp |= PIPE_6BPC;
4450		break;
4451	case 24:
4452		temp |= PIPE_8BPC;
4453		break;
4454	case 30:
4455		temp |= PIPE_10BPC;
4456		break;
4457	case 36:
4458		temp |= PIPE_12BPC;
4459		break;
4460	default:
4461		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4462			pipe_bpp);
4463		temp |= PIPE_8BPC;
4464		pipe_bpp = 24;
4465		break;
4466	}
4467
4468	intel_crtc->bpp = pipe_bpp;
4469	I915_WRITE(PIPECONF(pipe), temp);
4470
4471	if (!lane) {
4472		/*
4473		 * Account for spread spectrum to avoid
4474		 * oversubscribing the link. Max center spread
4475		 * is 2.5%; use 5% for safety's sake.
4476		 */
4477		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4478		lane = bps / (link_bw * 8) + 1;
4479	}
4480
4481	intel_crtc->fdi_lanes = lane;
4482
4483	if (pixel_multiplier > 1)
4484		link_bw *= pixel_multiplier;
4485	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4486			     &m_n);
4487
 
 
4488	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4489	if (has_reduced_clock)
4490		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4491			reduced_clock.m2;
4492
4493	/* Enable autotuning of the PLL clock (if permissible) */
4494	factor = 21;
4495	if (is_lvds) {
4496		if ((intel_panel_use_ssc(dev_priv) &&
4497		     dev_priv->lvds_ssc_freq == 100) ||
4498		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4499			factor = 25;
4500	} else if (is_sdvo && is_tv)
4501		factor = 20;
4502
4503	if (clock.m < factor * clock.n)
4504		fp |= FP_CB_TUNE;
4505
4506	dpll = 0;
4507
4508	if (is_lvds)
4509		dpll |= DPLLB_MODE_LVDS;
4510	else
4511		dpll |= DPLLB_MODE_DAC_SERIAL;
4512	if (is_sdvo) {
4513		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4514		if (pixel_multiplier > 1) {
4515			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4516		}
4517		dpll |= DPLL_DVO_HIGH_SPEED;
4518	}
4519	if (is_dp && !is_cpu_edp)
4520		dpll |= DPLL_DVO_HIGH_SPEED;
4521
4522	/* compute bitmask from p1 value */
4523	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4524	/* also FPA1 */
4525	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4526
4527	switch (clock.p2) {
4528	case 5:
4529		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4530		break;
4531	case 7:
4532		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4533		break;
4534	case 10:
4535		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4536		break;
4537	case 14:
4538		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4539		break;
4540	}
4541
4542	if (is_sdvo && is_tv)
4543		dpll |= PLL_REF_INPUT_TVCLKINBC;
4544	else if (is_tv)
4545		/* XXX: just matching BIOS for now */
4546		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
4547		dpll |= 3;
4548	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4549		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4550	else
4551		dpll |= PLL_REF_INPUT_DREFCLK;
4552
4553	/* setup pipeconf */
4554	pipeconf = I915_READ(PIPECONF(pipe));
4555
4556	/* Set up the display plane register */
4557	dspcntr = DISPPLANE_GAMMA_ENABLE;
4558
4559	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4560	drm_mode_debug_printmodeline(mode);
4561
4562	/* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4563	 * pre-Haswell/LPT generation */
4564	if (HAS_PCH_LPT(dev)) {
4565		DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4566				pipe);
4567	} else if (!is_cpu_edp) {
4568		struct intel_pch_pll *pll;
4569
4570		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4571		if (pll == NULL) {
4572			DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4573					 pipe);
4574			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
4575		}
4576	} else
4577		intel_put_pch_pll(intel_crtc);
 
 
 
4578
4579	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
4580	 * This is an exception to the general rule that mode_set doesn't turn
4581	 * things on.
4582	 */
4583	if (is_lvds) {
4584		temp = I915_READ(PCH_LVDS);
4585		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4586		if (HAS_PCH_CPT(dev)) {
4587			temp &= ~PORT_TRANS_SEL_MASK;
4588			temp |= PORT_TRANS_SEL_CPT(pipe);
 
 
4589		} else {
4590			if (pipe == 1)
4591				temp |= LVDS_PIPEB_SELECT;
4592			else
4593				temp &= ~LVDS_PIPEB_SELECT;
4594		}
4595
4596		/* set the corresponsding LVDS_BORDER bit */
4597		temp |= dev_priv->lvds_border_bits;
4598		/* Set the B0-B3 data pairs corresponding to whether we're going to
4599		 * set the DPLLs for dual-channel mode or not.
4600		 */
4601		if (clock.p2 == 7)
4602			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4603		else
4604			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4605
4606		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4607		 * appropriately here, but we need to look more thoroughly into how
4608		 * panels behave in the two modes.
4609		 */
4610		temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4611		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4612			temp |= LVDS_HSYNC_POLARITY;
4613		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4614			temp |= LVDS_VSYNC_POLARITY;
 
 
 
 
 
 
 
 
 
 
 
 
4615		I915_WRITE(PCH_LVDS, temp);
4616	}
4617
4618	pipeconf &= ~PIPECONF_DITHER_EN;
4619	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4620	if ((is_lvds && dev_priv->lvds_dither) || dither) {
4621		pipeconf |= PIPECONF_DITHER_EN;
4622		pipeconf |= PIPECONF_DITHER_TYPE_SP;
4623	}
4624	if (is_dp && !is_cpu_edp) {
4625		intel_dp_set_m_n(crtc, mode, adjusted_mode);
4626	} else {
4627		/* For non-DP output, clear any trans DP clock recovery setting.*/
4628		I915_WRITE(TRANSDATA_M1(pipe), 0);
4629		I915_WRITE(TRANSDATA_N1(pipe), 0);
4630		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4631		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4632	}
4633
4634	if (intel_crtc->pch_pll) {
4635		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 
4636
4637		/* Wait for the clocks to stabilize. */
4638		POSTING_READ(intel_crtc->pch_pll->pll_reg);
4639		udelay(150);
4640
4641		/* The pixel multiplier can only be updated once the
4642		 * DPLL is enabled and the clocks are stable.
4643		 *
4644		 * So write it again.
4645		 */
4646		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4647	}
4648
4649	intel_crtc->lowfreq_avail = false;
4650	if (intel_crtc->pch_pll) {
4651		if (is_lvds && has_reduced_clock && i915_powersave) {
4652			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4653			intel_crtc->lowfreq_avail = true;
4654			if (HAS_PIPE_CXSR(dev)) {
4655				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4656				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4657			}
4658		} else {
4659			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
4660			if (HAS_PIPE_CXSR(dev)) {
4661				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4662				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4663			}
4664		}
4665	}
4666
4667	pipeconf &= ~PIPECONF_INTERLACE_MASK;
4668	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4669		pipeconf |= PIPECONF_INTERLACED_ILK;
4670		/* the chip adds 2 halflines automatically */
 
4671		adjusted_mode->crtc_vtotal -= 1;
 
4672		adjusted_mode->crtc_vblank_end -= 1;
4673		I915_WRITE(VSYNCSHIFT(pipe),
4674			   adjusted_mode->crtc_hsync_start
4675			   - adjusted_mode->crtc_htotal/2);
4676	} else {
4677		pipeconf |= PIPECONF_PROGRESSIVE;
4678		I915_WRITE(VSYNCSHIFT(pipe), 0);
4679	}
4680
4681	I915_WRITE(HTOTAL(pipe),
4682		   (adjusted_mode->crtc_hdisplay - 1) |
4683		   ((adjusted_mode->crtc_htotal - 1) << 16));
4684	I915_WRITE(HBLANK(pipe),
4685		   (adjusted_mode->crtc_hblank_start - 1) |
4686		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4687	I915_WRITE(HSYNC(pipe),
4688		   (adjusted_mode->crtc_hsync_start - 1) |
4689		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4690
4691	I915_WRITE(VTOTAL(pipe),
4692		   (adjusted_mode->crtc_vdisplay - 1) |
4693		   ((adjusted_mode->crtc_vtotal - 1) << 16));
4694	I915_WRITE(VBLANK(pipe),
4695		   (adjusted_mode->crtc_vblank_start - 1) |
4696		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
4697	I915_WRITE(VSYNC(pipe),
4698		   (adjusted_mode->crtc_vsync_start - 1) |
4699		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4700
4701	/* pipesrc controls the size that is scaled from, which should
4702	 * always be the user's requested size.
4703	 */
4704	I915_WRITE(PIPESRC(pipe),
4705		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4706
4707	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4708	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4709	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4710	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4711
4712	if (is_cpu_edp)
 
4713		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
4714
4715	I915_WRITE(PIPECONF(pipe), pipeconf);
4716	POSTING_READ(PIPECONF(pipe));
4717
4718	intel_wait_for_vblank(dev, pipe);
4719
 
 
 
 
 
 
4720	I915_WRITE(DSPCNTR(plane), dspcntr);
4721	POSTING_READ(DSPCNTR(plane));
4722
4723	ret = intel_pipe_set_base(crtc, x, y, old_fb);
4724
4725	intel_update_watermarks(dev);
4726
4727	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4728
4729	return ret;
4730}
4731
4732static int intel_crtc_mode_set(struct drm_crtc *crtc,
4733			       struct drm_display_mode *mode,
4734			       struct drm_display_mode *adjusted_mode,
4735			       int x, int y,
4736			       struct drm_framebuffer *old_fb)
4737{
4738	struct drm_device *dev = crtc->dev;
4739	struct drm_i915_private *dev_priv = dev->dev_private;
4740	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4741	int pipe = intel_crtc->pipe;
4742	int ret;
4743
4744	drm_vblank_pre_modeset(dev, pipe);
4745
4746	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4747					      x, y, old_fb);
 
4748	drm_vblank_post_modeset(dev, pipe);
4749
4750	if (ret)
4751		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4752	else
4753		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
4754
4755	return ret;
4756}
4757
4758static bool intel_eld_uptodate(struct drm_connector *connector,
4759			       int reg_eldv, uint32_t bits_eldv,
4760			       int reg_elda, uint32_t bits_elda,
4761			       int reg_edid)
4762{
4763	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4764	uint8_t *eld = connector->eld;
4765	uint32_t i;
4766
4767	i = I915_READ(reg_eldv);
4768	i &= bits_eldv;
4769
4770	if (!eld[0])
4771		return !i;
4772
4773	if (!i)
4774		return false;
4775
4776	i = I915_READ(reg_elda);
4777	i &= ~bits_elda;
4778	I915_WRITE(reg_elda, i);
4779
4780	for (i = 0; i < eld[2]; i++)
4781		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
4782			return false;
4783
4784	return true;
4785}
4786
4787static void g4x_write_eld(struct drm_connector *connector,
4788			  struct drm_crtc *crtc)
4789{
4790	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4791	uint8_t *eld = connector->eld;
4792	uint32_t eldv;
4793	uint32_t len;
4794	uint32_t i;
4795
4796	i = I915_READ(G4X_AUD_VID_DID);
4797
4798	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
4799		eldv = G4X_ELDV_DEVCL_DEVBLC;
4800	else
4801		eldv = G4X_ELDV_DEVCTG;
4802
4803	if (intel_eld_uptodate(connector,
4804			       G4X_AUD_CNTL_ST, eldv,
4805			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
4806			       G4X_HDMIW_HDMIEDID))
4807		return;
4808
4809	i = I915_READ(G4X_AUD_CNTL_ST);
4810	i &= ~(eldv | G4X_ELD_ADDR);
4811	len = (i >> 9) & 0x1f;		/* ELD buffer size */
4812	I915_WRITE(G4X_AUD_CNTL_ST, i);
4813
4814	if (!eld[0])
4815		return;
4816
4817	len = min_t(uint8_t, eld[2], len);
4818	DRM_DEBUG_DRIVER("ELD size %d\n", len);
4819	for (i = 0; i < len; i++)
4820		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
4821
4822	i = I915_READ(G4X_AUD_CNTL_ST);
4823	i |= eldv;
4824	I915_WRITE(G4X_AUD_CNTL_ST, i);
4825}
4826
4827static void ironlake_write_eld(struct drm_connector *connector,
4828				     struct drm_crtc *crtc)
4829{
4830	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4831	uint8_t *eld = connector->eld;
4832	uint32_t eldv;
4833	uint32_t i;
4834	int len;
4835	int hdmiw_hdmiedid;
4836	int aud_config;
4837	int aud_cntl_st;
4838	int aud_cntrl_st2;
4839
4840	if (HAS_PCH_IBX(connector->dev)) {
4841		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
4842		aud_config = IBX_AUD_CONFIG_A;
4843		aud_cntl_st = IBX_AUD_CNTL_ST_A;
4844		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
4845	} else {
4846		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
4847		aud_config = CPT_AUD_CONFIG_A;
4848		aud_cntl_st = CPT_AUD_CNTL_ST_A;
4849		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
4850	}
4851
4852	i = to_intel_crtc(crtc)->pipe;
4853	hdmiw_hdmiedid += i * 0x100;
4854	aud_cntl_st += i * 0x100;
4855	aud_config += i * 0x100;
4856
4857	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
4858
4859	i = I915_READ(aud_cntl_st);
4860	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
4861	if (!i) {
4862		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
4863		/* operate blindly on all ports */
4864		eldv = IBX_ELD_VALIDB;
4865		eldv |= IBX_ELD_VALIDB << 4;
4866		eldv |= IBX_ELD_VALIDB << 8;
4867	} else {
4868		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
4869		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
4870	}
4871
4872	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
4873		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
4874		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
4875		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4876	} else
4877		I915_WRITE(aud_config, 0);
4878
4879	if (intel_eld_uptodate(connector,
4880			       aud_cntrl_st2, eldv,
4881			       aud_cntl_st, IBX_ELD_ADDRESS,
4882			       hdmiw_hdmiedid))
4883		return;
4884
4885	i = I915_READ(aud_cntrl_st2);
4886	i &= ~eldv;
4887	I915_WRITE(aud_cntrl_st2, i);
4888
4889	if (!eld[0])
4890		return;
4891
4892	i = I915_READ(aud_cntl_st);
4893	i &= ~IBX_ELD_ADDRESS;
4894	I915_WRITE(aud_cntl_st, i);
4895
4896	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
4897	DRM_DEBUG_DRIVER("ELD size %d\n", len);
4898	for (i = 0; i < len; i++)
4899		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
4900
4901	i = I915_READ(aud_cntrl_st2);
4902	i |= eldv;
4903	I915_WRITE(aud_cntrl_st2, i);
4904}
4905
4906void intel_write_eld(struct drm_encoder *encoder,
4907		     struct drm_display_mode *mode)
4908{
4909	struct drm_crtc *crtc = encoder->crtc;
4910	struct drm_connector *connector;
4911	struct drm_device *dev = encoder->dev;
4912	struct drm_i915_private *dev_priv = dev->dev_private;
4913
4914	connector = drm_select_eld(encoder, mode);
4915	if (!connector)
4916		return;
4917
4918	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4919			 connector->base.id,
4920			 drm_get_connector_name(connector),
4921			 connector->encoder->base.id,
4922			 drm_get_encoder_name(connector->encoder));
4923
4924	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
4925
4926	if (dev_priv->display.write_eld)
4927		dev_priv->display.write_eld(connector, crtc);
4928}
4929
4930/** Loads the palette/gamma unit for the CRTC with the prepared values */
4931void intel_crtc_load_lut(struct drm_crtc *crtc)
4932{
4933	struct drm_device *dev = crtc->dev;
4934	struct drm_i915_private *dev_priv = dev->dev_private;
4935	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4936	int palreg = PALETTE(intel_crtc->pipe);
4937	int i;
4938
4939	/* The clocks have to be on to load the palette. */
4940	if (!crtc->enabled || !intel_crtc->active)
4941		return;
4942
4943	/* use legacy palette for Ironlake */
4944	if (HAS_PCH_SPLIT(dev))
4945		palreg = LGC_PALETTE(intel_crtc->pipe);
4946
4947	for (i = 0; i < 256; i++) {
4948		I915_WRITE(palreg + 4 * i,
4949			   (intel_crtc->lut_r[i] << 16) |
4950			   (intel_crtc->lut_g[i] << 8) |
4951			   intel_crtc->lut_b[i]);
4952	}
4953}
4954
4955static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4956{
4957	struct drm_device *dev = crtc->dev;
4958	struct drm_i915_private *dev_priv = dev->dev_private;
4959	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4960	bool visible = base != 0;
4961	u32 cntl;
4962
4963	if (intel_crtc->cursor_visible == visible)
4964		return;
4965
4966	cntl = I915_READ(_CURACNTR);
4967	if (visible) {
4968		/* On these chipsets we can only modify the base whilst
4969		 * the cursor is disabled.
4970		 */
4971		I915_WRITE(_CURABASE, base);
4972
4973		cntl &= ~(CURSOR_FORMAT_MASK);
4974		/* XXX width must be 64, stride 256 => 0x00 << 28 */
4975		cntl |= CURSOR_ENABLE |
4976			CURSOR_GAMMA_ENABLE |
4977			CURSOR_FORMAT_ARGB;
4978	} else
4979		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4980	I915_WRITE(_CURACNTR, cntl);
4981
4982	intel_crtc->cursor_visible = visible;
4983}
4984
4985static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4986{
4987	struct drm_device *dev = crtc->dev;
4988	struct drm_i915_private *dev_priv = dev->dev_private;
4989	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4990	int pipe = intel_crtc->pipe;
4991	bool visible = base != 0;
4992
4993	if (intel_crtc->cursor_visible != visible) {
4994		uint32_t cntl = I915_READ(CURCNTR(pipe));
4995		if (base) {
4996			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4997			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4998			cntl |= pipe << 28; /* Connect to correct pipe */
4999		} else {
5000			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5001			cntl |= CURSOR_MODE_DISABLE;
5002		}
5003		I915_WRITE(CURCNTR(pipe), cntl);
5004
5005		intel_crtc->cursor_visible = visible;
5006	}
5007	/* and commit changes on next vblank */
5008	I915_WRITE(CURBASE(pipe), base);
5009}
5010
5011static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
5012{
5013	struct drm_device *dev = crtc->dev;
5014	struct drm_i915_private *dev_priv = dev->dev_private;
5015	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5016	int pipe = intel_crtc->pipe;
5017	bool visible = base != 0;
5018
5019	if (intel_crtc->cursor_visible != visible) {
5020		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
5021		if (base) {
5022			cntl &= ~CURSOR_MODE;
5023			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5024		} else {
5025			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5026			cntl |= CURSOR_MODE_DISABLE;
5027		}
5028		I915_WRITE(CURCNTR_IVB(pipe), cntl);
5029
5030		intel_crtc->cursor_visible = visible;
5031	}
5032	/* and commit changes on next vblank */
5033	I915_WRITE(CURBASE_IVB(pipe), base);
5034}
5035
5036/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5037static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5038				     bool on)
5039{
5040	struct drm_device *dev = crtc->dev;
5041	struct drm_i915_private *dev_priv = dev->dev_private;
5042	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5043	int pipe = intel_crtc->pipe;
5044	int x = intel_crtc->cursor_x;
5045	int y = intel_crtc->cursor_y;
5046	u32 base, pos;
5047	bool visible;
5048
5049	pos = 0;
5050
5051	if (on && crtc->enabled && crtc->fb) {
5052		base = intel_crtc->cursor_addr;
5053		if (x > (int) crtc->fb->width)
5054			base = 0;
5055
5056		if (y > (int) crtc->fb->height)
5057			base = 0;
5058	} else
5059		base = 0;
5060
5061	if (x < 0) {
5062		if (x + intel_crtc->cursor_width < 0)
5063			base = 0;
5064
5065		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5066		x = -x;
5067	}
5068	pos |= x << CURSOR_X_SHIFT;
5069
5070	if (y < 0) {
5071		if (y + intel_crtc->cursor_height < 0)
5072			base = 0;
5073
5074		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5075		y = -y;
5076	}
5077	pos |= y << CURSOR_Y_SHIFT;
5078
5079	visible = base != 0;
5080	if (!visible && !intel_crtc->cursor_visible)
5081		return;
5082
5083	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5084		I915_WRITE(CURPOS_IVB(pipe), pos);
5085		ivb_update_cursor(crtc, base);
5086	} else {
5087		I915_WRITE(CURPOS(pipe), pos);
5088		if (IS_845G(dev) || IS_I865G(dev))
5089			i845_update_cursor(crtc, base);
5090		else
5091			i9xx_update_cursor(crtc, base);
5092	}
5093}
5094
5095static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5096				 struct drm_file *file,
5097				 uint32_t handle,
5098				 uint32_t width, uint32_t height)
5099{
5100	struct drm_device *dev = crtc->dev;
5101	struct drm_i915_private *dev_priv = dev->dev_private;
5102	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5103	struct drm_i915_gem_object *obj;
5104	uint32_t addr;
5105	int ret;
5106
5107	DRM_DEBUG_KMS("\n");
5108
5109	/* if we want to turn off the cursor ignore width and height */
5110	if (!handle) {
5111		DRM_DEBUG_KMS("cursor off\n");
5112		addr = 0;
5113		obj = NULL;
5114		mutex_lock(&dev->struct_mutex);
5115		goto finish;
5116	}
5117
5118	/* Currently we only support 64x64 cursors */
5119	if (width != 64 || height != 64) {
5120		DRM_ERROR("we currently only support 64x64 cursors\n");
5121		return -EINVAL;
5122	}
5123
5124	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
5125	if (&obj->base == NULL)
5126		return -ENOENT;
5127
5128	if (obj->base.size < width * height * 4) {
5129		DRM_ERROR("buffer is to small\n");
5130		ret = -ENOMEM;
5131		goto fail;
5132	}
5133
5134	/* we only need to pin inside GTT if cursor is non-phy */
5135	mutex_lock(&dev->struct_mutex);
5136	if (!dev_priv->info->cursor_needs_physical) {
5137		if (obj->tiling_mode) {
5138			DRM_ERROR("cursor cannot be tiled\n");
5139			ret = -EINVAL;
5140			goto fail_locked;
5141		}
5142
5143		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5144		if (ret) {
5145			DRM_ERROR("failed to move cursor bo into the GTT\n");
5146			goto fail_locked;
5147		}
5148
5149		ret = i915_gem_object_put_fence(obj);
5150		if (ret) {
5151			DRM_ERROR("failed to release fence for cursor");
5152			goto fail_unpin;
5153		}
5154
5155		addr = obj->gtt_offset;
5156	} else {
5157		int align = IS_I830(dev) ? 16 * 1024 : 256;
5158		ret = i915_gem_attach_phys_object(dev, obj,
5159						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5160						  align);
5161		if (ret) {
5162			DRM_ERROR("failed to attach phys object\n");
5163			goto fail_locked;
5164		}
5165		addr = obj->phys_obj->handle->busaddr;
5166	}
5167
5168	if (IS_GEN2(dev))
5169		I915_WRITE(CURSIZE, (height << 12) | width);
5170
5171 finish:
5172	if (intel_crtc->cursor_bo) {
5173		if (dev_priv->info->cursor_needs_physical) {
5174			if (intel_crtc->cursor_bo != obj)
5175				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5176		} else
5177			i915_gem_object_unpin(intel_crtc->cursor_bo);
5178		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5179	}
5180
5181	mutex_unlock(&dev->struct_mutex);
5182
5183	intel_crtc->cursor_addr = addr;
5184	intel_crtc->cursor_bo = obj;
5185	intel_crtc->cursor_width = width;
5186	intel_crtc->cursor_height = height;
5187
5188	intel_crtc_update_cursor(crtc, true);
5189
5190	return 0;
5191fail_unpin:
5192	i915_gem_object_unpin(obj);
5193fail_locked:
5194	mutex_unlock(&dev->struct_mutex);
5195fail:
5196	drm_gem_object_unreference_unlocked(&obj->base);
5197	return ret;
5198}
5199
5200static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5201{
5202	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5203
5204	intel_crtc->cursor_x = x;
5205	intel_crtc->cursor_y = y;
5206
5207	intel_crtc_update_cursor(crtc, true);
5208
5209	return 0;
5210}
5211
5212/** Sets the color ramps on behalf of RandR */
5213void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5214				 u16 blue, int regno)
5215{
5216	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5217
5218	intel_crtc->lut_r[regno] = red >> 8;
5219	intel_crtc->lut_g[regno] = green >> 8;
5220	intel_crtc->lut_b[regno] = blue >> 8;
5221}
5222
5223void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5224			     u16 *blue, int regno)
5225{
5226	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5227
5228	*red = intel_crtc->lut_r[regno] << 8;
5229	*green = intel_crtc->lut_g[regno] << 8;
5230	*blue = intel_crtc->lut_b[regno] << 8;
5231}
5232
5233static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5234				 u16 *blue, uint32_t start, uint32_t size)
5235{
5236	int end = (start + size > 256) ? 256 : start + size, i;
5237	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5238
5239	for (i = start; i < end; i++) {
5240		intel_crtc->lut_r[i] = red[i] >> 8;
5241		intel_crtc->lut_g[i] = green[i] >> 8;
5242		intel_crtc->lut_b[i] = blue[i] >> 8;
5243	}
5244
5245	intel_crtc_load_lut(crtc);
5246}
5247
5248/**
5249 * Get a pipe with a simple mode set on it for doing load-based monitor
5250 * detection.
5251 *
5252 * It will be up to the load-detect code to adjust the pipe as appropriate for
5253 * its requirements.  The pipe will be connected to no other encoders.
5254 *
5255 * Currently this code will only succeed if there is a pipe with no encoders
5256 * configured for it.  In the future, it could choose to temporarily disable
5257 * some outputs to free up a pipe for its use.
5258 *
5259 * \return crtc, or NULL if no pipes are available.
5260 */
5261
5262/* VESA 640x480x72Hz mode to set on the pipe */
5263static struct drm_display_mode load_detect_mode = {
5264	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5265		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5266};
5267
5268static struct drm_framebuffer *
5269intel_framebuffer_create(struct drm_device *dev,
5270			 struct drm_mode_fb_cmd2 *mode_cmd,
5271			 struct drm_i915_gem_object *obj)
5272{
5273	struct intel_framebuffer *intel_fb;
5274	int ret;
5275
5276	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5277	if (!intel_fb) {
5278		drm_gem_object_unreference_unlocked(&obj->base);
5279		return ERR_PTR(-ENOMEM);
5280	}
5281
5282	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5283	if (ret) {
5284		drm_gem_object_unreference_unlocked(&obj->base);
5285		kfree(intel_fb);
5286		return ERR_PTR(ret);
5287	}
5288
5289	return &intel_fb->base;
5290}
5291
5292static u32
5293intel_framebuffer_pitch_for_width(int width, int bpp)
5294{
5295	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5296	return ALIGN(pitch, 64);
5297}
5298
5299static u32
5300intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5301{
5302	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5303	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5304}
5305
5306static struct drm_framebuffer *
5307intel_framebuffer_create_for_mode(struct drm_device *dev,
5308				  struct drm_display_mode *mode,
5309				  int depth, int bpp)
5310{
5311	struct drm_i915_gem_object *obj;
5312	struct drm_mode_fb_cmd2 mode_cmd;
5313
5314	obj = i915_gem_alloc_object(dev,
5315				    intel_framebuffer_size_for_mode(mode, bpp));
5316	if (obj == NULL)
5317		return ERR_PTR(-ENOMEM);
5318
5319	mode_cmd.width = mode->hdisplay;
5320	mode_cmd.height = mode->vdisplay;
5321	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
5322								bpp);
5323	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
5324
5325	return intel_framebuffer_create(dev, &mode_cmd, obj);
5326}
5327
5328static struct drm_framebuffer *
5329mode_fits_in_fbdev(struct drm_device *dev,
5330		   struct drm_display_mode *mode)
5331{
5332	struct drm_i915_private *dev_priv = dev->dev_private;
5333	struct drm_i915_gem_object *obj;
5334	struct drm_framebuffer *fb;
5335
5336	if (dev_priv->fbdev == NULL)
5337		return NULL;
5338
5339	obj = dev_priv->fbdev->ifb.obj;
5340	if (obj == NULL)
5341		return NULL;
5342
5343	fb = &dev_priv->fbdev->ifb.base;
5344	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
5345							       fb->bits_per_pixel))
5346		return NULL;
5347
5348	if (obj->base.size < mode->vdisplay * fb->pitches[0])
5349		return NULL;
5350
5351	return fb;
5352}
5353
5354bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5355				struct drm_connector *connector,
5356				struct drm_display_mode *mode,
5357				struct intel_load_detect_pipe *old)
5358{
5359	struct intel_crtc *intel_crtc;
5360	struct drm_crtc *possible_crtc;
5361	struct drm_encoder *encoder = &intel_encoder->base;
5362	struct drm_crtc *crtc = NULL;
5363	struct drm_device *dev = encoder->dev;
5364	struct drm_framebuffer *old_fb;
5365	int i = -1;
5366
5367	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5368		      connector->base.id, drm_get_connector_name(connector),
5369		      encoder->base.id, drm_get_encoder_name(encoder));
5370
5371	/*
5372	 * Algorithm gets a little messy:
5373	 *
5374	 *   - if the connector already has an assigned crtc, use it (but make
5375	 *     sure it's on first)
5376	 *
5377	 *   - try to find the first unused crtc that can drive this connector,
5378	 *     and use that if we find one
5379	 */
5380
5381	/* See if we already have a CRTC for this connector */
5382	if (encoder->crtc) {
5383		crtc = encoder->crtc;
5384
5385		intel_crtc = to_intel_crtc(crtc);
5386		old->dpms_mode = intel_crtc->dpms_mode;
5387		old->load_detect_temp = false;
5388
5389		/* Make sure the crtc and connector are running */
5390		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5391			struct drm_encoder_helper_funcs *encoder_funcs;
5392			struct drm_crtc_helper_funcs *crtc_funcs;
5393
5394			crtc_funcs = crtc->helper_private;
5395			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5396
5397			encoder_funcs = encoder->helper_private;
5398			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5399		}
5400
5401		return true;
5402	}
5403
5404	/* Find an unused one (if possible) */
5405	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5406		i++;
5407		if (!(encoder->possible_crtcs & (1 << i)))
5408			continue;
5409		if (!possible_crtc->enabled) {
5410			crtc = possible_crtc;
5411			break;
5412		}
5413	}
5414
5415	/*
5416	 * If we didn't find an unused CRTC, don't use any.
5417	 */
5418	if (!crtc) {
5419		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5420		return false;
5421	}
5422
5423	encoder->crtc = crtc;
5424	connector->encoder = encoder;
5425
5426	intel_crtc = to_intel_crtc(crtc);
5427	old->dpms_mode = intel_crtc->dpms_mode;
5428	old->load_detect_temp = true;
5429	old->release_fb = NULL;
5430
5431	if (!mode)
5432		mode = &load_detect_mode;
5433
5434	old_fb = crtc->fb;
5435
5436	/* We need a framebuffer large enough to accommodate all accesses
5437	 * that the plane may generate whilst we perform load detection.
5438	 * We can not rely on the fbcon either being present (we get called
5439	 * during its initialisation to detect all boot displays, or it may
5440	 * not even exist) or that it is large enough to satisfy the
5441	 * requested mode.
5442	 */
5443	crtc->fb = mode_fits_in_fbdev(dev, mode);
5444	if (crtc->fb == NULL) {
5445		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5446		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5447		old->release_fb = crtc->fb;
5448	} else
5449		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5450	if (IS_ERR(crtc->fb)) {
5451		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5452		crtc->fb = old_fb;
5453		return false;
5454	}
5455
5456	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5457		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5458		if (old->release_fb)
5459			old->release_fb->funcs->destroy(old->release_fb);
5460		crtc->fb = old_fb;
5461		return false;
5462	}
5463
5464	/* let the connector get through one full cycle before testing */
5465	intel_wait_for_vblank(dev, intel_crtc->pipe);
5466
5467	return true;
5468}
5469
5470void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5471				    struct drm_connector *connector,
5472				    struct intel_load_detect_pipe *old)
5473{
5474	struct drm_encoder *encoder = &intel_encoder->base;
5475	struct drm_device *dev = encoder->dev;
5476	struct drm_crtc *crtc = encoder->crtc;
5477	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5478	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5479
5480	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5481		      connector->base.id, drm_get_connector_name(connector),
5482		      encoder->base.id, drm_get_encoder_name(encoder));
5483
5484	if (old->load_detect_temp) {
5485		connector->encoder = NULL;
5486		drm_helper_disable_unused_functions(dev);
5487
5488		if (old->release_fb)
5489			old->release_fb->funcs->destroy(old->release_fb);
5490
5491		return;
5492	}
5493
5494	/* Switch crtc and encoder back off if necessary */
5495	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5496		encoder_funcs->dpms(encoder, old->dpms_mode);
5497		crtc_funcs->dpms(crtc, old->dpms_mode);
5498	}
5499}
5500
5501/* Returns the clock of the currently programmed mode of the given pipe. */
5502static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5503{
5504	struct drm_i915_private *dev_priv = dev->dev_private;
5505	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5506	int pipe = intel_crtc->pipe;
5507	u32 dpll = I915_READ(DPLL(pipe));
5508	u32 fp;
5509	intel_clock_t clock;
5510
5511	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5512		fp = I915_READ(FP0(pipe));
5513	else
5514		fp = I915_READ(FP1(pipe));
5515
5516	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5517	if (IS_PINEVIEW(dev)) {
5518		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5519		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5520	} else {
5521		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5522		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5523	}
5524
5525	if (!IS_GEN2(dev)) {
5526		if (IS_PINEVIEW(dev))
5527			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5528				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5529		else
5530			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5531			       DPLL_FPA01_P1_POST_DIV_SHIFT);
5532
5533		switch (dpll & DPLL_MODE_MASK) {
5534		case DPLLB_MODE_DAC_SERIAL:
5535			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5536				5 : 10;
5537			break;
5538		case DPLLB_MODE_LVDS:
5539			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5540				7 : 14;
5541			break;
5542		default:
5543			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5544				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
5545			return 0;
5546		}
5547
5548		/* XXX: Handle the 100Mhz refclk */
5549		intel_clock(dev, 96000, &clock);
5550	} else {
5551		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5552
5553		if (is_lvds) {
5554			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5555				       DPLL_FPA01_P1_POST_DIV_SHIFT);
5556			clock.p2 = 14;
5557
5558			if ((dpll & PLL_REF_INPUT_MASK) ==
5559			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5560				/* XXX: might not be 66MHz */
5561				intel_clock(dev, 66000, &clock);
5562			} else
5563				intel_clock(dev, 48000, &clock);
5564		} else {
5565			if (dpll & PLL_P1_DIVIDE_BY_TWO)
5566				clock.p1 = 2;
5567			else {
5568				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5569					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5570			}
5571			if (dpll & PLL_P2_DIVIDE_BY_4)
5572				clock.p2 = 4;
5573			else
5574				clock.p2 = 2;
5575
5576			intel_clock(dev, 48000, &clock);
5577		}
5578	}
5579
5580	/* XXX: It would be nice to validate the clocks, but we can't reuse
5581	 * i830PllIsValid() because it relies on the xf86_config connector
5582	 * configuration being accurate, which it isn't necessarily.
5583	 */
5584
5585	return clock.dot;
5586}
5587
5588/** Returns the currently programmed mode of the given pipe. */
5589struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5590					     struct drm_crtc *crtc)
5591{
5592	struct drm_i915_private *dev_priv = dev->dev_private;
5593	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5594	int pipe = intel_crtc->pipe;
5595	struct drm_display_mode *mode;
5596	int htot = I915_READ(HTOTAL(pipe));
5597	int hsync = I915_READ(HSYNC(pipe));
5598	int vtot = I915_READ(VTOTAL(pipe));
5599	int vsync = I915_READ(VSYNC(pipe));
5600
5601	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5602	if (!mode)
5603		return NULL;
5604
5605	mode->clock = intel_crtc_clock_get(dev, crtc);
5606	mode->hdisplay = (htot & 0xffff) + 1;
5607	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5608	mode->hsync_start = (hsync & 0xffff) + 1;
5609	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5610	mode->vdisplay = (vtot & 0xffff) + 1;
5611	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5612	mode->vsync_start = (vsync & 0xffff) + 1;
5613	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5614
5615	drm_mode_set_name(mode);
 
5616
5617	return mode;
5618}
5619
5620#define GPU_IDLE_TIMEOUT 500 /* ms */
5621
5622/* When this timer fires, we've been idle for awhile */
5623static void intel_gpu_idle_timer(unsigned long arg)
5624{
5625	struct drm_device *dev = (struct drm_device *)arg;
5626	drm_i915_private_t *dev_priv = dev->dev_private;
5627
5628	if (!list_empty(&dev_priv->mm.active_list)) {
5629		/* Still processing requests, so just re-arm the timer. */
5630		mod_timer(&dev_priv->idle_timer, jiffies +
5631			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5632		return;
5633	}
5634
5635	dev_priv->busy = false;
5636	queue_work(dev_priv->wq, &dev_priv->idle_work);
5637}
5638
5639#define CRTC_IDLE_TIMEOUT 1000 /* ms */
5640
5641static void intel_crtc_idle_timer(unsigned long arg)
5642{
5643	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5644	struct drm_crtc *crtc = &intel_crtc->base;
5645	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5646	struct intel_framebuffer *intel_fb;
5647
5648	intel_fb = to_intel_framebuffer(crtc->fb);
5649	if (intel_fb && intel_fb->obj->active) {
5650		/* The framebuffer is still being accessed by the GPU. */
5651		mod_timer(&intel_crtc->idle_timer, jiffies +
5652			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5653		return;
5654	}
5655
5656	intel_crtc->busy = false;
5657	queue_work(dev_priv->wq, &dev_priv->idle_work);
5658}
5659
5660static void intel_increase_pllclock(struct drm_crtc *crtc)
5661{
5662	struct drm_device *dev = crtc->dev;
5663	drm_i915_private_t *dev_priv = dev->dev_private;
5664	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5665	int pipe = intel_crtc->pipe;
5666	int dpll_reg = DPLL(pipe);
5667	int dpll;
5668
5669	if (HAS_PCH_SPLIT(dev))
5670		return;
5671
5672	if (!dev_priv->lvds_downclock_avail)
5673		return;
5674
5675	dpll = I915_READ(dpll_reg);
5676	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5677		DRM_DEBUG_DRIVER("upclocking LVDS\n");
5678
5679		assert_panel_unlocked(dev_priv, pipe);
 
 
5680
5681		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5682		I915_WRITE(dpll_reg, dpll);
5683		intel_wait_for_vblank(dev, pipe);
5684
5685		dpll = I915_READ(dpll_reg);
5686		if (dpll & DISPLAY_RATE_SELECT_FPA1)
5687			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 
 
 
5688	}
5689
5690	/* Schedule downclock */
5691	mod_timer(&intel_crtc->idle_timer, jiffies +
5692		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5693}
5694
5695static void intel_decrease_pllclock(struct drm_crtc *crtc)
5696{
5697	struct drm_device *dev = crtc->dev;
5698	drm_i915_private_t *dev_priv = dev->dev_private;
5699	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 
 
5700
5701	if (HAS_PCH_SPLIT(dev))
5702		return;
5703
5704	if (!dev_priv->lvds_downclock_avail)
5705		return;
5706
5707	/*
5708	 * Since this is called by a timer, we should never get here in
5709	 * the manual case.
5710	 */
5711	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
5712		int pipe = intel_crtc->pipe;
5713		int dpll_reg = DPLL(pipe);
5714		int dpll;
5715
5716		DRM_DEBUG_DRIVER("downclocking LVDS\n");
5717
5718		assert_panel_unlocked(dev_priv, pipe);
 
 
5719
5720		dpll = I915_READ(dpll_reg);
5721		dpll |= DISPLAY_RATE_SELECT_FPA1;
5722		I915_WRITE(dpll_reg, dpll);
5723		intel_wait_for_vblank(dev, pipe);
5724		dpll = I915_READ(dpll_reg);
5725		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
5726			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 
 
 
5727	}
5728
5729}
5730
5731/**
5732 * intel_idle_update - adjust clocks for idleness
5733 * @work: work struct
5734 *
5735 * Either the GPU or display (or both) went idle.  Check the busy status
5736 * here and adjust the CRTC and GPU clocks as necessary.
5737 */
5738static void intel_idle_update(struct work_struct *work)
5739{
5740	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
5741						    idle_work);
5742	struct drm_device *dev = dev_priv->dev;
5743	struct drm_crtc *crtc;
5744	struct intel_crtc *intel_crtc;
5745
5746	if (!i915_powersave)
5747		return;
5748
5749	mutex_lock(&dev->struct_mutex);
5750
5751	i915_update_gfx_val(dev_priv);
5752
5753	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5754		/* Skip inactive CRTCs */
5755		if (!crtc->fb)
5756			continue;
5757
5758		intel_crtc = to_intel_crtc(crtc);
5759		if (!intel_crtc->busy)
5760			intel_decrease_pllclock(crtc);
5761	}
5762
5763
5764	mutex_unlock(&dev->struct_mutex);
5765}
5766
5767/**
5768 * intel_mark_busy - mark the GPU and possibly the display busy
5769 * @dev: drm device
5770 * @obj: object we're operating on
5771 *
5772 * Callers can use this function to indicate that the GPU is busy processing
5773 * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
5774 * buffer), we'll also mark the display as busy, so we know to increase its
5775 * clock frequency.
5776 */
5777void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5778{
5779	drm_i915_private_t *dev_priv = dev->dev_private;
5780	struct drm_crtc *crtc = NULL;
5781	struct intel_framebuffer *intel_fb;
5782	struct intel_crtc *intel_crtc;
5783
5784	if (!drm_core_check_feature(dev, DRIVER_MODESET))
5785		return;
5786
5787	if (!dev_priv->busy) {
5788		intel_sanitize_pm(dev);
5789		dev_priv->busy = true;
5790	} else
5791		mod_timer(&dev_priv->idle_timer, jiffies +
5792			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5793
5794	if (obj == NULL)
5795		return;
5796
5797	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5798		if (!crtc->fb)
5799			continue;
5800
5801		intel_crtc = to_intel_crtc(crtc);
5802		intel_fb = to_intel_framebuffer(crtc->fb);
5803		if (intel_fb->obj == obj) {
5804			if (!intel_crtc->busy) {
5805				/* Non-busy -> busy, upclock */
5806				intel_increase_pllclock(crtc);
5807				intel_crtc->busy = true;
5808			} else {
5809				/* Busy -> busy, put off timer */
5810				mod_timer(&intel_crtc->idle_timer, jiffies +
5811					  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5812			}
5813		}
5814	}
5815}
5816
5817static void intel_crtc_destroy(struct drm_crtc *crtc)
5818{
5819	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5820	struct drm_device *dev = crtc->dev;
5821	struct intel_unpin_work *work;
5822	unsigned long flags;
5823
5824	spin_lock_irqsave(&dev->event_lock, flags);
5825	work = intel_crtc->unpin_work;
5826	intel_crtc->unpin_work = NULL;
5827	spin_unlock_irqrestore(&dev->event_lock, flags);
5828
5829	if (work) {
5830		cancel_work_sync(&work->work);
5831		kfree(work);
5832	}
5833
5834	drm_crtc_cleanup(crtc);
5835
5836	kfree(intel_crtc);
5837}
5838
5839static void intel_unpin_work_fn(struct work_struct *__work)
5840{
5841	struct intel_unpin_work *work =
5842		container_of(__work, struct intel_unpin_work, work);
5843
5844	mutex_lock(&work->dev->struct_mutex);
5845	intel_unpin_fb_obj(work->old_fb_obj);
5846	drm_gem_object_unreference(&work->pending_flip_obj->base);
5847	drm_gem_object_unreference(&work->old_fb_obj->base);
5848
5849	intel_update_fbc(work->dev);
5850	mutex_unlock(&work->dev->struct_mutex);
5851	kfree(work);
5852}
5853
5854static void do_intel_finish_page_flip(struct drm_device *dev,
5855				      struct drm_crtc *crtc)
5856{
5857	drm_i915_private_t *dev_priv = dev->dev_private;
5858	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5859	struct intel_unpin_work *work;
5860	struct drm_i915_gem_object *obj;
5861	struct drm_pending_vblank_event *e;
5862	struct timeval tnow, tvbl;
5863	unsigned long flags;
5864
5865	/* Ignore early vblank irqs */
5866	if (intel_crtc == NULL)
5867		return;
5868
5869	do_gettimeofday(&tnow);
5870
5871	spin_lock_irqsave(&dev->event_lock, flags);
5872	work = intel_crtc->unpin_work;
5873	if (work == NULL || !work->pending) {
5874		spin_unlock_irqrestore(&dev->event_lock, flags);
5875		return;
5876	}
5877
5878	intel_crtc->unpin_work = NULL;
5879
5880	if (work->event) {
5881		e = work->event;
5882		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
5883
5884		/* Called before vblank count and timestamps have
5885		 * been updated for the vblank interval of flip
5886		 * completion? Need to increment vblank count and
5887		 * add one videorefresh duration to returned timestamp
5888		 * to account for this. We assume this happened if we
5889		 * get called over 0.9 frame durations after the last
5890		 * timestamped vblank.
5891		 *
5892		 * This calculation can not be used with vrefresh rates
5893		 * below 5Hz (10Hz to be on the safe side) without
5894		 * promoting to 64 integers.
5895		 */
5896		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5897		    9 * crtc->framedur_ns) {
5898			e->event.sequence++;
5899			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
5900					     crtc->framedur_ns);
5901		}
5902
5903		e->event.tv_sec = tvbl.tv_sec;
5904		e->event.tv_usec = tvbl.tv_usec;
5905
5906		list_add_tail(&e->base.link,
5907			      &e->base.file_priv->event_list);
5908		wake_up_interruptible(&e->base.file_priv->event_wait);
5909	}
5910
5911	drm_vblank_put(dev, intel_crtc->pipe);
5912
5913	spin_unlock_irqrestore(&dev->event_lock, flags);
5914
5915	obj = work->old_fb_obj;
5916
5917	atomic_clear_mask(1 << intel_crtc->plane,
5918			  &obj->pending_flip.counter);
5919	if (atomic_read(&obj->pending_flip) == 0)
5920		wake_up(&dev_priv->pending_flip_queue);
5921
5922	schedule_work(&work->work);
5923
5924	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
5925}
5926
5927void intel_finish_page_flip(struct drm_device *dev, int pipe)
5928{
5929	drm_i915_private_t *dev_priv = dev->dev_private;
5930	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
5931
5932	do_intel_finish_page_flip(dev, crtc);
5933}
5934
5935void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
5936{
5937	drm_i915_private_t *dev_priv = dev->dev_private;
5938	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
5939
5940	do_intel_finish_page_flip(dev, crtc);
5941}
5942
5943void intel_prepare_page_flip(struct drm_device *dev, int plane)
5944{
5945	drm_i915_private_t *dev_priv = dev->dev_private;
5946	struct intel_crtc *intel_crtc =
5947		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
5948	unsigned long flags;
5949
5950	spin_lock_irqsave(&dev->event_lock, flags);
5951	if (intel_crtc->unpin_work) {
5952		if ((++intel_crtc->unpin_work->pending) > 1)
5953			DRM_ERROR("Prepared flip multiple times\n");
5954	} else {
5955		DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5956	}
5957	spin_unlock_irqrestore(&dev->event_lock, flags);
5958}
5959
5960static int intel_gen2_queue_flip(struct drm_device *dev,
5961				 struct drm_crtc *crtc,
5962				 struct drm_framebuffer *fb,
5963				 struct drm_i915_gem_object *obj)
5964{
5965	struct drm_i915_private *dev_priv = dev->dev_private;
5966	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5967	unsigned long offset;
5968	u32 flip_mask;
5969	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5970	int ret;
5971
5972	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5973	if (ret)
5974		goto err;
5975
5976	/* Offset into the new buffer for cases of shared fbs between CRTCs */
5977	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
5978
5979	ret = intel_ring_begin(ring, 6);
5980	if (ret)
5981		goto err_unpin;
5982
5983	/* Can't queue multiple flips, so wait for the previous
5984	 * one to finish before executing the next.
5985	 */
5986	if (intel_crtc->plane)
5987		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5988	else
5989		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5990	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
5991	intel_ring_emit(ring, MI_NOOP);
5992	intel_ring_emit(ring, MI_DISPLAY_FLIP |
5993			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5994	intel_ring_emit(ring, fb->pitches[0]);
5995	intel_ring_emit(ring, obj->gtt_offset + offset);
5996	intel_ring_emit(ring, 0); /* aux display base address, unused */
5997	intel_ring_advance(ring);
5998	return 0;
5999
6000err_unpin:
6001	intel_unpin_fb_obj(obj);
6002err:
6003	return ret;
6004}
6005
6006static int intel_gen3_queue_flip(struct drm_device *dev,
6007				 struct drm_crtc *crtc,
6008				 struct drm_framebuffer *fb,
6009				 struct drm_i915_gem_object *obj)
6010{
6011	struct drm_i915_private *dev_priv = dev->dev_private;
6012	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6013	unsigned long offset;
6014	u32 flip_mask;
6015	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6016	int ret;
6017
6018	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6019	if (ret)
6020		goto err;
6021
6022	/* Offset into the new buffer for cases of shared fbs between CRTCs */
6023	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
6024
6025	ret = intel_ring_begin(ring, 6);
6026	if (ret)
6027		goto err_unpin;
6028
6029	if (intel_crtc->plane)
6030		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6031	else
6032		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6033	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6034	intel_ring_emit(ring, MI_NOOP);
6035	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6036			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6037	intel_ring_emit(ring, fb->pitches[0]);
6038	intel_ring_emit(ring, obj->gtt_offset + offset);
6039	intel_ring_emit(ring, MI_NOOP);
6040
6041	intel_ring_advance(ring);
6042	return 0;
6043
6044err_unpin:
6045	intel_unpin_fb_obj(obj);
6046err:
6047	return ret;
6048}
6049
6050static int intel_gen4_queue_flip(struct drm_device *dev,
6051				 struct drm_crtc *crtc,
6052				 struct drm_framebuffer *fb,
6053				 struct drm_i915_gem_object *obj)
6054{
6055	struct drm_i915_private *dev_priv = dev->dev_private;
6056	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6057	uint32_t pf, pipesrc;
6058	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6059	int ret;
6060
6061	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6062	if (ret)
6063		goto err;
6064
6065	ret = intel_ring_begin(ring, 4);
6066	if (ret)
6067		goto err_unpin;
6068
6069	/* i965+ uses the linear or tiled offsets from the
6070	 * Display Registers (which do not change across a page-flip)
6071	 * so we need only reprogram the base address.
6072	 */
6073	intel_ring_emit(ring, MI_DISPLAY_FLIP |
6074			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6075	intel_ring_emit(ring, fb->pitches[0]);
6076	intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
6077
6078	/* XXX Enabling the panel-fitter across page-flip is so far
6079	 * untested on non-native modes, so ignore it for now.
6080	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6081	 */
6082	pf = 0;
6083	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6084	intel_ring_emit(ring, pf | pipesrc);
6085	intel_ring_advance(ring);
6086	return 0;
6087
6088err_unpin:
6089	intel_unpin_fb_obj(obj);
6090err:
6091	return ret;
6092}
6093
6094static int intel_gen6_queue_flip(struct drm_device *dev,
6095				 struct drm_crtc *crtc,
6096				 struct drm_framebuffer *fb,
6097				 struct drm_i915_gem_object *obj)
6098{
6099	struct drm_i915_private *dev_priv = dev->dev_private;
6100	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6101	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6102	uint32_t pf, pipesrc;
6103	int ret;
6104
6105	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6106	if (ret)
6107		goto err;
6108
6109	ret = intel_ring_begin(ring, 4);
6110	if (ret)
6111		goto err_unpin;
 
 
 
 
 
6112
6113	intel_ring_emit(ring, MI_DISPLAY_FLIP |
6114			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6115	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
6116	intel_ring_emit(ring, obj->gtt_offset);
6117
6118	/* Contrary to the suggestions in the documentation,
6119	 * "Enable Panel Fitter" does not seem to be required when page
6120	 * flipping with a non-native mode, and worse causes a normal
6121	 * modeset to fail.
6122	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6123	 */
6124	pf = 0;
6125	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6126	intel_ring_emit(ring, pf | pipesrc);
6127	intel_ring_advance(ring);
6128	return 0;
6129
6130err_unpin:
6131	intel_unpin_fb_obj(obj);
6132err:
6133	return ret;
6134}
6135
6136/*
6137 * On gen7 we currently use the blit ring because (in early silicon at least)
6138 * the render ring doesn't give us interrpts for page flip completion, which
6139 * means clients will hang after the first flip is queued.  Fortunately the
6140 * blit ring generates interrupts properly, so use it instead.
6141 */
6142static int intel_gen7_queue_flip(struct drm_device *dev,
6143				 struct drm_crtc *crtc,
6144				 struct drm_framebuffer *fb,
6145				 struct drm_i915_gem_object *obj)
6146{
6147	struct drm_i915_private *dev_priv = dev->dev_private;
6148	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6149	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6150	uint32_t plane_bit = 0;
6151	int ret;
6152
6153	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6154	if (ret)
6155		goto err;
6156
6157	switch(intel_crtc->plane) {
6158	case PLANE_A:
6159		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
6160		break;
6161	case PLANE_B:
6162		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
6163		break;
6164	case PLANE_C:
6165		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
6166		break;
6167	default:
6168		WARN_ONCE(1, "unknown plane in flip command\n");
6169		ret = -ENODEV;
6170		goto err;
6171	}
6172
6173	ret = intel_ring_begin(ring, 4);
6174	if (ret)
6175		goto err_unpin;
6176
6177	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
6178	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6179	intel_ring_emit(ring, (obj->gtt_offset));
6180	intel_ring_emit(ring, (MI_NOOP));
6181	intel_ring_advance(ring);
6182	return 0;
6183
6184err_unpin:
6185	intel_unpin_fb_obj(obj);
6186err:
6187	return ret;
6188}
6189
6190static int intel_default_queue_flip(struct drm_device *dev,
6191				    struct drm_crtc *crtc,
6192				    struct drm_framebuffer *fb,
6193				    struct drm_i915_gem_object *obj)
6194{
6195	return -ENODEV;
6196}
6197
6198static int intel_crtc_page_flip(struct drm_crtc *crtc,
6199				struct drm_framebuffer *fb,
6200				struct drm_pending_vblank_event *event)
6201{
6202	struct drm_device *dev = crtc->dev;
6203	struct drm_i915_private *dev_priv = dev->dev_private;
6204	struct intel_framebuffer *intel_fb;
6205	struct drm_i915_gem_object *obj;
6206	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6207	struct intel_unpin_work *work;
6208	unsigned long flags;
6209	int ret;
6210
6211	work = kzalloc(sizeof *work, GFP_KERNEL);
6212	if (work == NULL)
6213		return -ENOMEM;
6214
6215	work->event = event;
6216	work->dev = crtc->dev;
6217	intel_fb = to_intel_framebuffer(crtc->fb);
6218	work->old_fb_obj = intel_fb->obj;
6219	INIT_WORK(&work->work, intel_unpin_work_fn);
6220
6221	ret = drm_vblank_get(dev, intel_crtc->pipe);
6222	if (ret)
6223		goto free_work;
6224
6225	/* We borrow the event spin lock for protecting unpin_work */
6226	spin_lock_irqsave(&dev->event_lock, flags);
6227	if (intel_crtc->unpin_work) {
6228		spin_unlock_irqrestore(&dev->event_lock, flags);
6229		kfree(work);
6230		drm_vblank_put(dev, intel_crtc->pipe);
6231
6232		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6233		return -EBUSY;
6234	}
6235	intel_crtc->unpin_work = work;
6236	spin_unlock_irqrestore(&dev->event_lock, flags);
6237
6238	intel_fb = to_intel_framebuffer(fb);
6239	obj = intel_fb->obj;
6240
6241	mutex_lock(&dev->struct_mutex);
6242
6243	/* Reference the objects for the scheduled work. */
6244	drm_gem_object_reference(&work->old_fb_obj->base);
6245	drm_gem_object_reference(&obj->base);
6246
6247	crtc->fb = fb;
6248
 
 
 
 
6249	work->pending_flip_obj = obj;
6250
6251	work->enable_stall_check = true;
6252
6253	/* Block clients from rendering to the new back buffer until
6254	 * the flip occurs and the object is no longer visible.
6255	 */
6256	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6257
6258	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6259	if (ret)
6260		goto cleanup_pending;
6261
6262	intel_disable_fbc(dev);
6263	intel_mark_busy(dev, obj);
6264	mutex_unlock(&dev->struct_mutex);
6265
6266	trace_i915_flip_request(intel_crtc->plane, obj);
6267
6268	return 0;
6269
6270cleanup_pending:
6271	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 
6272	drm_gem_object_unreference(&work->old_fb_obj->base);
6273	drm_gem_object_unreference(&obj->base);
6274	mutex_unlock(&dev->struct_mutex);
6275
6276	spin_lock_irqsave(&dev->event_lock, flags);
6277	intel_crtc->unpin_work = NULL;
6278	spin_unlock_irqrestore(&dev->event_lock, flags);
6279
6280	drm_vblank_put(dev, intel_crtc->pipe);
6281free_work:
6282	kfree(work);
6283
6284	return ret;
6285}
6286
6287static void intel_sanitize_modesetting(struct drm_device *dev,
6288				       int pipe, int plane)
6289{
6290	struct drm_i915_private *dev_priv = dev->dev_private;
6291	u32 reg, val;
6292	int i;
6293
6294	/* Clear any frame start delays used for debugging left by the BIOS */
6295	for_each_pipe(i) {
6296		reg = PIPECONF(i);
6297		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6298	}
6299
6300	if (HAS_PCH_SPLIT(dev))
6301		return;
6302
6303	/* Who knows what state these registers were left in by the BIOS or
6304	 * grub?
6305	 *
6306	 * If we leave the registers in a conflicting state (e.g. with the
6307	 * display plane reading from the other pipe than the one we intend
6308	 * to use) then when we attempt to teardown the active mode, we will
6309	 * not disable the pipes and planes in the correct order -- leaving
6310	 * a plane reading from a disabled pipe and possibly leading to
6311	 * undefined behaviour.
6312	 */
6313
6314	reg = DSPCNTR(plane);
6315	val = I915_READ(reg);
6316
6317	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6318		return;
6319	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6320		return;
6321
6322	/* This display plane is active and attached to the other CPU pipe. */
6323	pipe = !pipe;
6324
6325	/* Disable the plane and wait for it to stop reading from the pipe. */
6326	intel_disable_plane(dev_priv, plane, pipe);
6327	intel_disable_pipe(dev_priv, pipe);
6328}
6329
6330static void intel_crtc_reset(struct drm_crtc *crtc)
6331{
6332	struct drm_device *dev = crtc->dev;
6333	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6334
6335	/* Reset flags back to the 'unknown' status so that they
6336	 * will be correctly set on the initial modeset.
6337	 */
6338	intel_crtc->dpms_mode = -1;
6339
6340	/* We need to fix up any BIOS configuration that conflicts with
6341	 * our expectations.
6342	 */
6343	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6344}
6345
6346static struct drm_crtc_helper_funcs intel_helper_funcs = {
6347	.dpms = intel_crtc_dpms,
6348	.mode_fixup = intel_crtc_mode_fixup,
6349	.mode_set = intel_crtc_mode_set,
6350	.mode_set_base = intel_pipe_set_base,
6351	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6352	.load_lut = intel_crtc_load_lut,
6353	.disable = intel_crtc_disable,
6354};
6355
6356static const struct drm_crtc_funcs intel_crtc_funcs = {
6357	.reset = intel_crtc_reset,
6358	.cursor_set = intel_crtc_cursor_set,
6359	.cursor_move = intel_crtc_cursor_move,
6360	.gamma_set = intel_crtc_gamma_set,
6361	.set_config = drm_crtc_helper_set_config,
6362	.destroy = intel_crtc_destroy,
6363	.page_flip = intel_crtc_page_flip,
6364};
6365
6366static void intel_pch_pll_init(struct drm_device *dev)
6367{
6368	drm_i915_private_t *dev_priv = dev->dev_private;
6369	int i;
6370
6371	if (dev_priv->num_pch_pll == 0) {
6372		DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6373		return;
6374	}
6375
6376	for (i = 0; i < dev_priv->num_pch_pll; i++) {
6377		dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6378		dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6379		dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6380	}
6381}
6382
6383static void intel_crtc_init(struct drm_device *dev, int pipe)
6384{
6385	drm_i915_private_t *dev_priv = dev->dev_private;
6386	struct intel_crtc *intel_crtc;
6387	int i;
6388
6389	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6390	if (intel_crtc == NULL)
6391		return;
6392
6393	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6394
6395	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6396	for (i = 0; i < 256; i++) {
6397		intel_crtc->lut_r[i] = i;
6398		intel_crtc->lut_g[i] = i;
6399		intel_crtc->lut_b[i] = i;
6400	}
6401
6402	/* Swap pipes & planes for FBC on pre-965 */
6403	intel_crtc->pipe = pipe;
6404	intel_crtc->plane = pipe;
6405	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6406		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6407		intel_crtc->plane = !pipe;
6408	}
6409
6410	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6411	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6412	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6413	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6414
6415	intel_crtc_reset(&intel_crtc->base);
6416	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6417	intel_crtc->bpp = 24; /* default for pre-Ironlake */
6418
6419	if (HAS_PCH_SPLIT(dev)) {
6420		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6421		intel_helper_funcs.commit = ironlake_crtc_commit;
6422	} else {
6423		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6424		intel_helper_funcs.commit = i9xx_crtc_commit;
6425	}
6426
6427	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6428
6429	intel_crtc->busy = false;
6430
6431	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6432		    (unsigned long)intel_crtc);
6433}
6434
6435int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6436				struct drm_file *file)
6437{
 
6438	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6439	struct drm_mode_object *drmmode_obj;
6440	struct intel_crtc *crtc;
6441
6442	if (!drm_core_check_feature(dev, DRIVER_MODESET))
6443		return -ENODEV;
 
 
6444
6445	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6446			DRM_MODE_OBJECT_CRTC);
6447
6448	if (!drmmode_obj) {
6449		DRM_ERROR("no such CRTC id\n");
6450		return -EINVAL;
6451	}
6452
6453	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6454	pipe_from_crtc_id->pipe = crtc->pipe;
6455
6456	return 0;
6457}
6458
6459static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6460{
6461	struct intel_encoder *encoder;
6462	int index_mask = 0;
6463	int entry = 0;
6464
6465	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6466		if (type_mask & encoder->clone_mask)
6467			index_mask |= (1 << entry);
6468		entry++;
6469	}
6470
6471	return index_mask;
6472}
6473
6474static bool has_edp_a(struct drm_device *dev)
6475{
6476	struct drm_i915_private *dev_priv = dev->dev_private;
6477
6478	if (!IS_MOBILE(dev))
6479		return false;
6480
6481	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6482		return false;
6483
6484	if (IS_GEN5(dev) &&
6485	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6486		return false;
6487
6488	return true;
6489}
6490
6491static void intel_setup_outputs(struct drm_device *dev)
6492{
6493	struct drm_i915_private *dev_priv = dev->dev_private;
6494	struct intel_encoder *encoder;
6495	bool dpd_is_edp = false;
6496	bool has_lvds;
6497
6498	has_lvds = intel_lvds_init(dev);
 
6499	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6500		/* disable the panel fitter on everything but LVDS */
6501		I915_WRITE(PFIT_CONTROL, 0);
6502	}
6503
6504	if (HAS_PCH_SPLIT(dev)) {
6505		dpd_is_edp = intel_dpd_is_edp(dev);
6506
6507		if (has_edp_a(dev))
6508			intel_dp_init(dev, DP_A);
6509
6510		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6511			intel_dp_init(dev, PCH_DP_D);
6512	}
6513
6514	intel_crt_init(dev);
6515
6516	if (IS_HASWELL(dev)) {
6517		int found;
6518
6519		/* Haswell uses DDI functions to detect digital outputs */
6520		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6521		/* DDI A only supports eDP */
6522		if (found)
6523			intel_ddi_init(dev, PORT_A);
6524
6525		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
6526		 * register */
6527		found = I915_READ(SFUSE_STRAP);
6528
6529		if (found & SFUSE_STRAP_DDIB_DETECTED)
6530			intel_ddi_init(dev, PORT_B);
6531		if (found & SFUSE_STRAP_DDIC_DETECTED)
6532			intel_ddi_init(dev, PORT_C);
6533		if (found & SFUSE_STRAP_DDID_DETECTED)
6534			intel_ddi_init(dev, PORT_D);
6535	} else if (HAS_PCH_SPLIT(dev)) {
6536		int found;
6537
6538		if (I915_READ(HDMIB) & PORT_DETECTED) {
6539			/* PCH SDVOB multiplex with HDMIB */
6540			found = intel_sdvo_init(dev, PCH_SDVOB, true);
6541			if (!found)
6542				intel_hdmi_init(dev, HDMIB);
6543			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6544				intel_dp_init(dev, PCH_DP_B);
6545		}
6546
6547		if (I915_READ(HDMIC) & PORT_DETECTED)
6548			intel_hdmi_init(dev, HDMIC);
6549
6550		if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
6551			intel_hdmi_init(dev, HDMID);
6552
6553		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6554			intel_dp_init(dev, PCH_DP_C);
6555
6556		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6557			intel_dp_init(dev, PCH_DP_D);
6558
6559	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6560		bool found = false;
6561
6562		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6563			DRM_DEBUG_KMS("probing SDVOB\n");
6564			found = intel_sdvo_init(dev, SDVOB, true);
6565			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6566				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6567				intel_hdmi_init(dev, SDVOB);
6568			}
6569
6570			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6571				DRM_DEBUG_KMS("probing DP_B\n");
6572				intel_dp_init(dev, DP_B);
6573			}
6574		}
6575
6576		/* Before G4X SDVOC doesn't have its own detect register */
6577
6578		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6579			DRM_DEBUG_KMS("probing SDVOC\n");
6580			found = intel_sdvo_init(dev, SDVOC, false);
6581		}
6582
6583		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6584
6585			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6586				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6587				intel_hdmi_init(dev, SDVOC);
6588			}
6589			if (SUPPORTS_INTEGRATED_DP(dev)) {
6590				DRM_DEBUG_KMS("probing DP_C\n");
6591				intel_dp_init(dev, DP_C);
6592			}
6593		}
6594
6595		if (SUPPORTS_INTEGRATED_DP(dev) &&
6596		    (I915_READ(DP_D) & DP_DETECTED)) {
6597			DRM_DEBUG_KMS("probing DP_D\n");
6598			intel_dp_init(dev, DP_D);
6599		}
6600	} else if (IS_GEN2(dev))
6601		intel_dvo_init(dev);
6602
6603	if (SUPPORTS_TV(dev))
6604		intel_tv_init(dev);
6605
6606	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6607		encoder->base.possible_crtcs = encoder->crtc_mask;
6608		encoder->base.possible_clones =
6609			intel_encoder_clones(dev, encoder->clone_mask);
6610	}
6611
6612	/* disable all the possible outputs/crtcs before entering KMS mode */
6613	drm_helper_disable_unused_functions(dev);
6614
6615	if (HAS_PCH_SPLIT(dev))
6616		ironlake_init_pch_refclk(dev);
6617}
6618
6619static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6620{
6621	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6622
6623	drm_framebuffer_cleanup(fb);
6624	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
6625
6626	kfree(intel_fb);
6627}
6628
6629static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
6630						struct drm_file *file,
6631						unsigned int *handle)
6632{
6633	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6634	struct drm_i915_gem_object *obj = intel_fb->obj;
6635
6636	return drm_gem_handle_create(file, &obj->base, handle);
6637}
6638
6639static const struct drm_framebuffer_funcs intel_fb_funcs = {
6640	.destroy = intel_user_framebuffer_destroy,
6641	.create_handle = intel_user_framebuffer_create_handle,
6642};
6643
6644int intel_framebuffer_init(struct drm_device *dev,
6645			   struct intel_framebuffer *intel_fb,
6646			   struct drm_mode_fb_cmd2 *mode_cmd,
6647			   struct drm_i915_gem_object *obj)
6648{
6649	int ret;
6650
6651	if (obj->tiling_mode == I915_TILING_Y)
6652		return -EINVAL;
6653
6654	if (mode_cmd->pitches[0] & 63)
6655		return -EINVAL;
6656
6657	switch (mode_cmd->pixel_format) {
6658	case DRM_FORMAT_RGB332:
6659	case DRM_FORMAT_RGB565:
6660	case DRM_FORMAT_XRGB8888:
6661	case DRM_FORMAT_XBGR8888:
6662	case DRM_FORMAT_ARGB8888:
6663	case DRM_FORMAT_XRGB2101010:
6664	case DRM_FORMAT_ARGB2101010:
6665		/* RGB formats are common across chipsets */
6666		break;
6667	case DRM_FORMAT_YUYV:
6668	case DRM_FORMAT_UYVY:
6669	case DRM_FORMAT_YVYU:
6670	case DRM_FORMAT_VYUY:
6671		break;
6672	default:
6673		DRM_DEBUG_KMS("unsupported pixel format %u\n",
6674				mode_cmd->pixel_format);
6675		return -EINVAL;
6676	}
6677
6678	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6679	if (ret) {
6680		DRM_ERROR("framebuffer init failed %d\n", ret);
6681		return ret;
6682	}
6683
6684	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6685	intel_fb->obj = obj;
6686	return 0;
6687}
6688
6689static struct drm_framebuffer *
6690intel_user_framebuffer_create(struct drm_device *dev,
6691			      struct drm_file *filp,
6692			      struct drm_mode_fb_cmd2 *mode_cmd)
6693{
6694	struct drm_i915_gem_object *obj;
6695
6696	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
6697						mode_cmd->handles[0]));
6698	if (&obj->base == NULL)
6699		return ERR_PTR(-ENOENT);
6700
6701	return intel_framebuffer_create(dev, mode_cmd, obj);
6702}
6703
6704static const struct drm_mode_config_funcs intel_mode_funcs = {
6705	.fb_create = intel_user_framebuffer_create,
6706	.output_poll_changed = intel_fb_output_poll_changed,
6707};
6708
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6709/* Set up chip specific display functions */
6710static void intel_init_display(struct drm_device *dev)
6711{
6712	struct drm_i915_private *dev_priv = dev->dev_private;
6713
6714	/* We always want a DPMS function */
6715	if (HAS_PCH_SPLIT(dev)) {
6716		dev_priv->display.dpms = ironlake_crtc_dpms;
6717		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6718		dev_priv->display.off = ironlake_crtc_off;
6719		dev_priv->display.update_plane = ironlake_update_plane;
6720	} else {
6721		dev_priv->display.dpms = i9xx_crtc_dpms;
6722		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6723		dev_priv->display.off = i9xx_crtc_off;
6724		dev_priv->display.update_plane = i9xx_update_plane;
6725	}
6726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6727	/* Returns the core display clock speed */
6728	if (IS_VALLEYVIEW(dev))
6729		dev_priv->display.get_display_clock_speed =
6730			valleyview_get_display_clock_speed;
6731	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
6732		dev_priv->display.get_display_clock_speed =
6733			i945_get_display_clock_speed;
6734	else if (IS_I915G(dev))
6735		dev_priv->display.get_display_clock_speed =
6736			i915_get_display_clock_speed;
6737	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
6738		dev_priv->display.get_display_clock_speed =
6739			i9xx_misc_get_display_clock_speed;
6740	else if (IS_I915GM(dev))
6741		dev_priv->display.get_display_clock_speed =
6742			i915gm_get_display_clock_speed;
6743	else if (IS_I865G(dev))
6744		dev_priv->display.get_display_clock_speed =
6745			i865_get_display_clock_speed;
6746	else if (IS_I85X(dev))
6747		dev_priv->display.get_display_clock_speed =
6748			i855_get_display_clock_speed;
6749	else /* 852, 830 */
6750		dev_priv->display.get_display_clock_speed =
6751			i830_get_display_clock_speed;
6752
 
6753	if (HAS_PCH_SPLIT(dev)) {
 
 
 
 
 
6754		if (IS_GEN5(dev)) {
 
 
 
 
 
 
 
6755			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
6756			dev_priv->display.write_eld = ironlake_write_eld;
6757		} else if (IS_GEN6(dev)) {
 
 
 
 
 
 
 
6758			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
6759			dev_priv->display.write_eld = ironlake_write_eld;
6760		} else if (IS_IVYBRIDGE(dev)) {
6761			/* FIXME: detect B0+ stepping and use auto training */
6762			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6763			dev_priv->display.write_eld = ironlake_write_eld;
6764		} else if (IS_HASWELL(dev)) {
6765			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
6766			dev_priv->display.write_eld = ironlake_write_eld;
 
 
 
 
 
6767		} else
6768			dev_priv->display.update_wm = NULL;
6769	} else if (IS_VALLEYVIEW(dev)) {
6770		dev_priv->display.force_wake_get = vlv_force_wake_get;
6771		dev_priv->display.force_wake_put = vlv_force_wake_put;
 
 
 
 
 
 
 
 
 
 
 
 
 
6772	} else if (IS_G4X(dev)) {
6773		dev_priv->display.write_eld = g4x_write_eld;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6774	}
6775
6776	/* Default just returns -ENODEV to indicate unsupported */
6777	dev_priv->display.queue_flip = intel_default_queue_flip;
6778
6779	switch (INTEL_INFO(dev)->gen) {
6780	case 2:
6781		dev_priv->display.queue_flip = intel_gen2_queue_flip;
6782		break;
6783
6784	case 3:
6785		dev_priv->display.queue_flip = intel_gen3_queue_flip;
6786		break;
6787
6788	case 4:
6789	case 5:
6790		dev_priv->display.queue_flip = intel_gen4_queue_flip;
6791		break;
6792
6793	case 6:
6794		dev_priv->display.queue_flip = intel_gen6_queue_flip;
6795		break;
6796	case 7:
6797		dev_priv->display.queue_flip = intel_gen7_queue_flip;
6798		break;
6799	}
6800}
6801
6802/*
6803 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
6804 * resume, or other times.  This quirk makes sure that's the case for
6805 * affected systems.
6806 */
6807static void quirk_pipea_force(struct drm_device *dev)
6808{
6809	struct drm_i915_private *dev_priv = dev->dev_private;
6810
6811	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
6812	DRM_INFO("applying pipe a force quirk\n");
6813}
6814
6815/*
6816 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6817 */
6818static void quirk_ssc_force_disable(struct drm_device *dev)
6819{
6820	struct drm_i915_private *dev_priv = dev->dev_private;
6821	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6822	DRM_INFO("applying lvds SSC disable quirk\n");
6823}
6824
6825/*
6826 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
6827 * brightness value
6828 */
6829static void quirk_invert_brightness(struct drm_device *dev)
6830{
6831	struct drm_i915_private *dev_priv = dev->dev_private;
6832	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
6833	DRM_INFO("applying inverted panel brightness quirk\n");
6834}
6835
6836struct intel_quirk {
6837	int device;
6838	int subsystem_vendor;
6839	int subsystem_device;
6840	void (*hook)(struct drm_device *dev);
6841};
6842
6843static struct intel_quirk intel_quirks[] = {
 
 
6844	/* HP Mini needs pipe A force quirk (LP: #322104) */
6845	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
6846
6847	/* Thinkpad R31 needs pipe A force quirk */
6848	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6849	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6850	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6851
6852	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
6853	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
6854	/* ThinkPad X40 needs pipe A force quirk */
6855
6856	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
6857	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
6858
6859	/* 855 & before need to leave pipe A & dpll A up */
6860	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6861	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6862
6863	/* Lenovo U160 cannot use SSC on LVDS */
6864	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
6865
6866	/* Sony Vaio Y cannot use SSC on LVDS */
6867	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6868
6869	/* Acer Aspire 5734Z must invert backlight brightness */
6870	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
6871};
6872
6873static void intel_init_quirks(struct drm_device *dev)
6874{
6875	struct pci_dev *d = dev->pdev;
6876	int i;
6877
6878	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
6879		struct intel_quirk *q = &intel_quirks[i];
6880
6881		if (d->device == q->device &&
6882		    (d->subsystem_vendor == q->subsystem_vendor ||
6883		     q->subsystem_vendor == PCI_ANY_ID) &&
6884		    (d->subsystem_device == q->subsystem_device ||
6885		     q->subsystem_device == PCI_ANY_ID))
6886			q->hook(dev);
6887	}
6888}
6889
6890/* Disable the VGA plane that we never use */
6891static void i915_disable_vga(struct drm_device *dev)
6892{
6893	struct drm_i915_private *dev_priv = dev->dev_private;
6894	u8 sr1;
6895	u32 vga_reg;
6896
6897	if (HAS_PCH_SPLIT(dev))
6898		vga_reg = CPU_VGACNTRL;
6899	else
6900		vga_reg = VGACNTRL;
6901
6902	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6903	outb(SR01, VGA_SR_INDEX);
6904	sr1 = inb(VGA_SR_DATA);
6905	outb(sr1 | 1<<5, VGA_SR_DATA);
6906	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6907	udelay(300);
6908
6909	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
6910	POSTING_READ(vga_reg);
6911}
6912
6913void intel_modeset_init_hw(struct drm_device *dev)
6914{
6915	struct drm_i915_private *dev_priv = dev->dev_private;
6916
6917	intel_init_clock_gating(dev);
6918
6919	if (IS_IRONLAKE_M(dev)) {
6920		ironlake_enable_drps(dev);
6921		ironlake_enable_rc6(dev);
6922		intel_init_emon(dev);
6923	}
6924
6925	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
6926		gen6_enable_rps(dev_priv);
6927		gen6_update_ring_freq(dev_priv);
6928	}
6929}
6930
6931void intel_modeset_init(struct drm_device *dev)
6932{
6933	struct drm_i915_private *dev_priv = dev->dev_private;
6934	int i, ret;
6935
6936	drm_mode_config_init(dev);
6937
6938	dev->mode_config.min_width = 0;
6939	dev->mode_config.min_height = 0;
6940
6941	dev->mode_config.preferred_depth = 24;
6942	dev->mode_config.prefer_shadow = 1;
6943
6944	dev->mode_config.funcs = &intel_mode_funcs;
6945
6946	intel_init_quirks(dev);
6947
6948	intel_init_pm(dev);
6949
6950	intel_prepare_ddi(dev);
6951
6952	intel_init_display(dev);
6953
6954	if (IS_GEN2(dev)) {
6955		dev->mode_config.max_width = 2048;
6956		dev->mode_config.max_height = 2048;
6957	} else if (IS_GEN3(dev)) {
6958		dev->mode_config.max_width = 4096;
6959		dev->mode_config.max_height = 4096;
6960	} else {
6961		dev->mode_config.max_width = 8192;
6962		dev->mode_config.max_height = 8192;
6963	}
6964	dev->mode_config.fb_base = dev->agp->base;
6965
6966	DRM_DEBUG_KMS("%d display pipe%s available.\n",
6967		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
6968
6969	for (i = 0; i < dev_priv->num_pipe; i++) {
6970		intel_crtc_init(dev, i);
6971		ret = intel_plane_init(dev, i);
6972		if (ret)
6973			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
6974	}
6975
6976	intel_pch_pll_init(dev);
6977
6978	/* Just disable it once at startup */
6979	i915_disable_vga(dev);
6980	intel_setup_outputs(dev);
6981
 
 
 
 
 
 
 
 
 
 
 
 
6982	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6983	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6984		    (unsigned long)dev);
6985}
6986
6987void intel_modeset_gem_init(struct drm_device *dev)
6988{
6989	intel_modeset_init_hw(dev);
 
6990
6991	intel_setup_overlay(dev);
6992}
6993
6994void intel_modeset_cleanup(struct drm_device *dev)
6995{
6996	struct drm_i915_private *dev_priv = dev->dev_private;
6997	struct drm_crtc *crtc;
6998	struct intel_crtc *intel_crtc;
6999
7000	drm_kms_helper_poll_fini(dev);
7001	mutex_lock(&dev->struct_mutex);
7002
7003	intel_unregister_dsm_handler();
7004
7005
7006	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7007		/* Skip inactive CRTCs */
7008		if (!crtc->fb)
7009			continue;
7010
7011		intel_crtc = to_intel_crtc(crtc);
7012		intel_increase_pllclock(crtc);
7013	}
7014
7015	intel_disable_fbc(dev);
7016
7017	if (IS_IRONLAKE_M(dev))
7018		ironlake_disable_drps(dev);
7019	if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
7020		gen6_disable_rps(dev);
7021
7022	if (IS_IRONLAKE_M(dev))
7023		ironlake_disable_rc6(dev);
7024
7025	if (IS_VALLEYVIEW(dev))
7026		vlv_init_dpio(dev);
7027
7028	mutex_unlock(&dev->struct_mutex);
7029
7030	/* Disable the irq before mode object teardown, for the irq might
7031	 * enqueue unpin/hotplug work. */
7032	drm_irq_uninstall(dev);
7033	cancel_work_sync(&dev_priv->hotplug_work);
7034	cancel_work_sync(&dev_priv->rps_work);
7035
7036	/* flush any delayed tasks or pending work */
7037	flush_scheduled_work();
7038
7039	/* Shut off idle work before the crtcs get freed. */
7040	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7041		intel_crtc = to_intel_crtc(crtc);
7042		del_timer_sync(&intel_crtc->idle_timer);
7043	}
7044	del_timer_sync(&dev_priv->idle_timer);
7045	cancel_work_sync(&dev_priv->idle_work);
7046
7047	drm_mode_config_cleanup(dev);
7048}
7049
7050/*
7051 * Return which encoder is currently attached for connector.
7052 */
7053struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7054{
7055	return &intel_attached_encoder(connector)->base;
7056}
7057
7058void intel_connector_attach_encoder(struct intel_connector *connector,
7059				    struct intel_encoder *encoder)
7060{
7061	connector->encoder = encoder;
7062	drm_mode_connector_attach_encoder(&connector->base,
7063					  &encoder->base);
7064}
7065
7066/*
7067 * set vga decode state - true == enable VGA decode
7068 */
7069int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
7070{
7071	struct drm_i915_private *dev_priv = dev->dev_private;
7072	u16 gmch_ctrl;
7073
7074	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
7075	if (state)
7076		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
7077	else
7078		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
7079	pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
7080	return 0;
7081}
7082
7083#ifdef CONFIG_DEBUG_FS
7084#include <linux/seq_file.h>
7085
7086struct intel_display_error_state {
7087	struct intel_cursor_error_state {
7088		u32 control;
7089		u32 position;
7090		u32 base;
7091		u32 size;
7092	} cursor[2];
7093
7094	struct intel_pipe_error_state {
7095		u32 conf;
7096		u32 source;
7097
7098		u32 htotal;
7099		u32 hblank;
7100		u32 hsync;
7101		u32 vtotal;
7102		u32 vblank;
7103		u32 vsync;
7104	} pipe[2];
7105
7106	struct intel_plane_error_state {
7107		u32 control;
7108		u32 stride;
7109		u32 size;
7110		u32 pos;
7111		u32 addr;
7112		u32 surface;
7113		u32 tile_offset;
7114	} plane[2];
7115};
7116
7117struct intel_display_error_state *
7118intel_display_capture_error_state(struct drm_device *dev)
7119{
7120	drm_i915_private_t *dev_priv = dev->dev_private;
7121	struct intel_display_error_state *error;
7122	int i;
7123
7124	error = kmalloc(sizeof(*error), GFP_ATOMIC);
7125	if (error == NULL)
7126		return NULL;
7127
7128	for (i = 0; i < 2; i++) {
7129		error->cursor[i].control = I915_READ(CURCNTR(i));
7130		error->cursor[i].position = I915_READ(CURPOS(i));
7131		error->cursor[i].base = I915_READ(CURBASE(i));
7132
7133		error->plane[i].control = I915_READ(DSPCNTR(i));
7134		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
7135		error->plane[i].size = I915_READ(DSPSIZE(i));
7136		error->plane[i].pos = I915_READ(DSPPOS(i));
7137		error->plane[i].addr = I915_READ(DSPADDR(i));
7138		if (INTEL_INFO(dev)->gen >= 4) {
7139			error->plane[i].surface = I915_READ(DSPSURF(i));
7140			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
7141		}
7142
7143		error->pipe[i].conf = I915_READ(PIPECONF(i));
7144		error->pipe[i].source = I915_READ(PIPESRC(i));
7145		error->pipe[i].htotal = I915_READ(HTOTAL(i));
7146		error->pipe[i].hblank = I915_READ(HBLANK(i));
7147		error->pipe[i].hsync = I915_READ(HSYNC(i));
7148		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
7149		error->pipe[i].vblank = I915_READ(VBLANK(i));
7150		error->pipe[i].vsync = I915_READ(VSYNC(i));
7151	}
7152
7153	return error;
7154}
7155
7156void
7157intel_display_print_error_state(struct seq_file *m,
7158				struct drm_device *dev,
7159				struct intel_display_error_state *error)
7160{
7161	int i;
7162
7163	for (i = 0; i < 2; i++) {
7164		seq_printf(m, "Pipe [%d]:\n", i);
7165		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
7166		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
7167		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
7168		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
7169		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
7170		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
7171		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
7172		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
7173
7174		seq_printf(m, "Plane [%d]:\n", i);
7175		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
7176		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
7177		seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
7178		seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
7179		seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
7180		if (INTEL_INFO(dev)->gen >= 4) {
7181			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
7182			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
7183		}
7184
7185		seq_printf(m, "Cursor [%d]:\n", i);
7186		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
7187		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
7188		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
7189	}
7190}
7191#endif