Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
    1/*
    2 * Copyright © 2006-2007 Intel Corporation
    3 *
    4 * Permission is hereby granted, free of charge, to any person obtaining a
    5 * copy of this software and associated documentation files (the "Software"),
    6 * to deal in the Software without restriction, including without limitation
    7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
    8 * and/or sell copies of the Software, and to permit persons to whom the
    9 * Software is furnished to do so, subject to the following conditions:
   10 *
   11 * The above copyright notice and this permission notice (including the next
   12 * paragraph) shall be included in all copies or substantial portions of the
   13 * Software.
   14 *
   15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   21 * DEALINGS IN THE SOFTWARE.
   22 *
   23 * Authors:
   24 *	Eric Anholt <eric@anholt.net>
   25 */
   26
   27#include <linux/dmi.h>
   28#include <linux/module.h>
   29#include <linux/input.h>
   30#include <linux/i2c.h>
   31#include <linux/kernel.h>
   32#include <linux/slab.h>
   33#include <linux/vgaarb.h>
   34#include <drm/drm_edid.h>
   35#include <drm/drmP.h>
   36#include "intel_drv.h"
   37#include "intel_frontbuffer.h"
   38#include <drm/i915_drm.h>
   39#include "i915_drv.h"
   40#include "i915_gem_clflush.h"
   41#include "intel_dsi.h"
   42#include "i915_trace.h"
   43#include <drm/drm_atomic.h>
   44#include <drm/drm_atomic_helper.h>
   45#include <drm/drm_dp_helper.h>
   46#include <drm/drm_crtc_helper.h>
   47#include <drm/drm_plane_helper.h>
   48#include <drm/drm_rect.h>
   49#include <linux/dma_remapping.h>
   50#include <linux/reservation.h>
   51
   52/* Primary plane formats for gen <= 3 */
   53static const uint32_t i8xx_primary_formats[] = {
   54	DRM_FORMAT_C8,
   55	DRM_FORMAT_RGB565,
   56	DRM_FORMAT_XRGB1555,
   57	DRM_FORMAT_XRGB8888,
   58};
   59
   60/* Primary plane formats for gen >= 4 */
   61static const uint32_t i965_primary_formats[] = {
   62	DRM_FORMAT_C8,
   63	DRM_FORMAT_RGB565,
   64	DRM_FORMAT_XRGB8888,
   65	DRM_FORMAT_XBGR8888,
   66	DRM_FORMAT_XRGB2101010,
   67	DRM_FORMAT_XBGR2101010,
   68};
   69
   70static const uint64_t i9xx_format_modifiers[] = {
   71	I915_FORMAT_MOD_X_TILED,
   72	DRM_FORMAT_MOD_LINEAR,
   73	DRM_FORMAT_MOD_INVALID
   74};
   75
   76static const uint32_t skl_primary_formats[] = {
   77	DRM_FORMAT_C8,
   78	DRM_FORMAT_RGB565,
   79	DRM_FORMAT_XRGB8888,
   80	DRM_FORMAT_XBGR8888,
   81	DRM_FORMAT_ARGB8888,
   82	DRM_FORMAT_ABGR8888,
   83	DRM_FORMAT_XRGB2101010,
   84	DRM_FORMAT_XBGR2101010,
   85	DRM_FORMAT_YUYV,
   86	DRM_FORMAT_YVYU,
   87	DRM_FORMAT_UYVY,
   88	DRM_FORMAT_VYUY,
   89};
   90
   91static const uint64_t skl_format_modifiers_noccs[] = {
   92	I915_FORMAT_MOD_Yf_TILED,
   93	I915_FORMAT_MOD_Y_TILED,
   94	I915_FORMAT_MOD_X_TILED,
   95	DRM_FORMAT_MOD_LINEAR,
   96	DRM_FORMAT_MOD_INVALID
   97};
   98
   99static const uint64_t skl_format_modifiers_ccs[] = {
  100	I915_FORMAT_MOD_Yf_TILED_CCS,
  101	I915_FORMAT_MOD_Y_TILED_CCS,
  102	I915_FORMAT_MOD_Yf_TILED,
  103	I915_FORMAT_MOD_Y_TILED,
  104	I915_FORMAT_MOD_X_TILED,
  105	DRM_FORMAT_MOD_LINEAR,
  106	DRM_FORMAT_MOD_INVALID
  107};
  108
  109/* Cursor formats */
  110static const uint32_t intel_cursor_formats[] = {
  111	DRM_FORMAT_ARGB8888,
  112};
  113
  114static const uint64_t cursor_format_modifiers[] = {
  115	DRM_FORMAT_MOD_LINEAR,
  116	DRM_FORMAT_MOD_INVALID
  117};
  118
  119static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  120				struct intel_crtc_state *pipe_config);
  121static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  122				   struct intel_crtc_state *pipe_config);
  123
  124static int intel_framebuffer_init(struct intel_framebuffer *ifb,
  125				  struct drm_i915_gem_object *obj,
  126				  struct drm_mode_fb_cmd2 *mode_cmd);
  127static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  128static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  129static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
  130static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  131					 struct intel_link_m_n *m_n,
  132					 struct intel_link_m_n *m2_n2);
  133static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  134static void haswell_set_pipeconf(struct drm_crtc *crtc);
  135static void haswell_set_pipemisc(struct drm_crtc *crtc);
  136static void vlv_prepare_pll(struct intel_crtc *crtc,
  137			    const struct intel_crtc_state *pipe_config);
  138static void chv_prepare_pll(struct intel_crtc *crtc,
  139			    const struct intel_crtc_state *pipe_config);
  140static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  141static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  142static void intel_crtc_init_scalers(struct intel_crtc *crtc,
  143				    struct intel_crtc_state *crtc_state);
  144static void skylake_pfit_enable(struct intel_crtc *crtc);
  145static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
  146static void ironlake_pfit_enable(struct intel_crtc *crtc);
  147static void intel_modeset_setup_hw_state(struct drm_device *dev,
  148					 struct drm_modeset_acquire_ctx *ctx);
  149static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
  150
  151struct intel_limit {
  152	struct {
  153		int min, max;
  154	} dot, vco, n, m, m1, m2, p, p1;
  155
  156	struct {
  157		int dot_limit;
  158		int p2_slow, p2_fast;
  159	} p2;
  160};
  161
  162/* returns HPLL frequency in kHz */
  163int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
  164{
  165	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  166
  167	/* Obtain SKU information */
  168	mutex_lock(&dev_priv->sb_lock);
  169	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
  170		CCK_FUSE_HPLL_FREQ_MASK;
  171	mutex_unlock(&dev_priv->sb_lock);
  172
  173	return vco_freq[hpll_freq] * 1000;
  174}
  175
  176int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  177		      const char *name, u32 reg, int ref_freq)
  178{
  179	u32 val;
  180	int divider;
  181
  182	mutex_lock(&dev_priv->sb_lock);
  183	val = vlv_cck_read(dev_priv, reg);
  184	mutex_unlock(&dev_priv->sb_lock);
  185
  186	divider = val & CCK_FREQUENCY_VALUES;
  187
  188	WARN((val & CCK_FREQUENCY_STATUS) !=
  189	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
  190	     "%s change in progress\n", name);
  191
  192	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  193}
  194
  195int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
  196			   const char *name, u32 reg)
  197{
  198	if (dev_priv->hpll_freq == 0)
  199		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
  200
  201	return vlv_get_cck_clock(dev_priv, name, reg,
  202				 dev_priv->hpll_freq);
  203}
  204
  205static void intel_update_czclk(struct drm_i915_private *dev_priv)
  206{
  207	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
  208		return;
  209
  210	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
  211						      CCK_CZ_CLOCK_CONTROL);
  212
  213	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
  214}
  215
  216static inline u32 /* units of 100MHz */
  217intel_fdi_link_freq(struct drm_i915_private *dev_priv,
  218		    const struct intel_crtc_state *pipe_config)
  219{
  220	if (HAS_DDI(dev_priv))
  221		return pipe_config->port_clock; /* SPLL */
  222	else
  223		return dev_priv->fdi_pll_freq;
  224}
  225
  226static const struct intel_limit intel_limits_i8xx_dac = {
  227	.dot = { .min = 25000, .max = 350000 },
  228	.vco = { .min = 908000, .max = 1512000 },
  229	.n = { .min = 2, .max = 16 },
  230	.m = { .min = 96, .max = 140 },
  231	.m1 = { .min = 18, .max = 26 },
  232	.m2 = { .min = 6, .max = 16 },
  233	.p = { .min = 4, .max = 128 },
  234	.p1 = { .min = 2, .max = 33 },
  235	.p2 = { .dot_limit = 165000,
  236		.p2_slow = 4, .p2_fast = 2 },
  237};
  238
  239static const struct intel_limit intel_limits_i8xx_dvo = {
  240	.dot = { .min = 25000, .max = 350000 },
  241	.vco = { .min = 908000, .max = 1512000 },
  242	.n = { .min = 2, .max = 16 },
  243	.m = { .min = 96, .max = 140 },
  244	.m1 = { .min = 18, .max = 26 },
  245	.m2 = { .min = 6, .max = 16 },
  246	.p = { .min = 4, .max = 128 },
  247	.p1 = { .min = 2, .max = 33 },
  248	.p2 = { .dot_limit = 165000,
  249		.p2_slow = 4, .p2_fast = 4 },
  250};
  251
  252static const struct intel_limit intel_limits_i8xx_lvds = {
  253	.dot = { .min = 25000, .max = 350000 },
  254	.vco = { .min = 908000, .max = 1512000 },
  255	.n = { .min = 2, .max = 16 },
  256	.m = { .min = 96, .max = 140 },
  257	.m1 = { .min = 18, .max = 26 },
  258	.m2 = { .min = 6, .max = 16 },
  259	.p = { .min = 4, .max = 128 },
  260	.p1 = { .min = 1, .max = 6 },
  261	.p2 = { .dot_limit = 165000,
  262		.p2_slow = 14, .p2_fast = 7 },
  263};
  264
  265static const struct intel_limit intel_limits_i9xx_sdvo = {
  266	.dot = { .min = 20000, .max = 400000 },
  267	.vco = { .min = 1400000, .max = 2800000 },
  268	.n = { .min = 1, .max = 6 },
  269	.m = { .min = 70, .max = 120 },
  270	.m1 = { .min = 8, .max = 18 },
  271	.m2 = { .min = 3, .max = 7 },
  272	.p = { .min = 5, .max = 80 },
  273	.p1 = { .min = 1, .max = 8 },
  274	.p2 = { .dot_limit = 200000,
  275		.p2_slow = 10, .p2_fast = 5 },
  276};
  277
  278static const struct intel_limit intel_limits_i9xx_lvds = {
  279	.dot = { .min = 20000, .max = 400000 },
  280	.vco = { .min = 1400000, .max = 2800000 },
  281	.n = { .min = 1, .max = 6 },
  282	.m = { .min = 70, .max = 120 },
  283	.m1 = { .min = 8, .max = 18 },
  284	.m2 = { .min = 3, .max = 7 },
  285	.p = { .min = 7, .max = 98 },
  286	.p1 = { .min = 1, .max = 8 },
  287	.p2 = { .dot_limit = 112000,
  288		.p2_slow = 14, .p2_fast = 7 },
  289};
  290
  291
  292static const struct intel_limit intel_limits_g4x_sdvo = {
  293	.dot = { .min = 25000, .max = 270000 },
  294	.vco = { .min = 1750000, .max = 3500000},
  295	.n = { .min = 1, .max = 4 },
  296	.m = { .min = 104, .max = 138 },
  297	.m1 = { .min = 17, .max = 23 },
  298	.m2 = { .min = 5, .max = 11 },
  299	.p = { .min = 10, .max = 30 },
  300	.p1 = { .min = 1, .max = 3},
  301	.p2 = { .dot_limit = 270000,
  302		.p2_slow = 10,
  303		.p2_fast = 10
  304	},
  305};
  306
  307static const struct intel_limit intel_limits_g4x_hdmi = {
  308	.dot = { .min = 22000, .max = 400000 },
  309	.vco = { .min = 1750000, .max = 3500000},
  310	.n = { .min = 1, .max = 4 },
  311	.m = { .min = 104, .max = 138 },
  312	.m1 = { .min = 16, .max = 23 },
  313	.m2 = { .min = 5, .max = 11 },
  314	.p = { .min = 5, .max = 80 },
  315	.p1 = { .min = 1, .max = 8},
  316	.p2 = { .dot_limit = 165000,
  317		.p2_slow = 10, .p2_fast = 5 },
  318};
  319
  320static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
  321	.dot = { .min = 20000, .max = 115000 },
  322	.vco = { .min = 1750000, .max = 3500000 },
  323	.n = { .min = 1, .max = 3 },
  324	.m = { .min = 104, .max = 138 },
  325	.m1 = { .min = 17, .max = 23 },
  326	.m2 = { .min = 5, .max = 11 },
  327	.p = { .min = 28, .max = 112 },
  328	.p1 = { .min = 2, .max = 8 },
  329	.p2 = { .dot_limit = 0,
  330		.p2_slow = 14, .p2_fast = 14
  331	},
  332};
  333
  334static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
  335	.dot = { .min = 80000, .max = 224000 },
  336	.vco = { .min = 1750000, .max = 3500000 },
  337	.n = { .min = 1, .max = 3 },
  338	.m = { .min = 104, .max = 138 },
  339	.m1 = { .min = 17, .max = 23 },
  340	.m2 = { .min = 5, .max = 11 },
  341	.p = { .min = 14, .max = 42 },
  342	.p1 = { .min = 2, .max = 6 },
  343	.p2 = { .dot_limit = 0,
  344		.p2_slow = 7, .p2_fast = 7
  345	},
  346};
  347
  348static const struct intel_limit intel_limits_pineview_sdvo = {
  349	.dot = { .min = 20000, .max = 400000},
  350	.vco = { .min = 1700000, .max = 3500000 },
  351	/* Pineview's Ncounter is a ring counter */
  352	.n = { .min = 3, .max = 6 },
  353	.m = { .min = 2, .max = 256 },
  354	/* Pineview only has one combined m divider, which we treat as m2. */
  355	.m1 = { .min = 0, .max = 0 },
  356	.m2 = { .min = 0, .max = 254 },
  357	.p = { .min = 5, .max = 80 },
  358	.p1 = { .min = 1, .max = 8 },
  359	.p2 = { .dot_limit = 200000,
  360		.p2_slow = 10, .p2_fast = 5 },
  361};
  362
  363static const struct intel_limit intel_limits_pineview_lvds = {
  364	.dot = { .min = 20000, .max = 400000 },
  365	.vco = { .min = 1700000, .max = 3500000 },
  366	.n = { .min = 3, .max = 6 },
  367	.m = { .min = 2, .max = 256 },
  368	.m1 = { .min = 0, .max = 0 },
  369	.m2 = { .min = 0, .max = 254 },
  370	.p = { .min = 7, .max = 112 },
  371	.p1 = { .min = 1, .max = 8 },
  372	.p2 = { .dot_limit = 112000,
  373		.p2_slow = 14, .p2_fast = 14 },
  374};
  375
  376/* Ironlake / Sandybridge
  377 *
  378 * We calculate clock using (register_value + 2) for N/M1/M2, so here
  379 * the range value for them is (actual_value - 2).
  380 */
  381static const struct intel_limit intel_limits_ironlake_dac = {
  382	.dot = { .min = 25000, .max = 350000 },
  383	.vco = { .min = 1760000, .max = 3510000 },
  384	.n = { .min = 1, .max = 5 },
  385	.m = { .min = 79, .max = 127 },
  386	.m1 = { .min = 12, .max = 22 },
  387	.m2 = { .min = 5, .max = 9 },
  388	.p = { .min = 5, .max = 80 },
  389	.p1 = { .min = 1, .max = 8 },
  390	.p2 = { .dot_limit = 225000,
  391		.p2_slow = 10, .p2_fast = 5 },
  392};
  393
  394static const struct intel_limit intel_limits_ironlake_single_lvds = {
  395	.dot = { .min = 25000, .max = 350000 },
  396	.vco = { .min = 1760000, .max = 3510000 },
  397	.n = { .min = 1, .max = 3 },
  398	.m = { .min = 79, .max = 118 },
  399	.m1 = { .min = 12, .max = 22 },
  400	.m2 = { .min = 5, .max = 9 },
  401	.p = { .min = 28, .max = 112 },
  402	.p1 = { .min = 2, .max = 8 },
  403	.p2 = { .dot_limit = 225000,
  404		.p2_slow = 14, .p2_fast = 14 },
  405};
  406
  407static const struct intel_limit intel_limits_ironlake_dual_lvds = {
  408	.dot = { .min = 25000, .max = 350000 },
  409	.vco = { .min = 1760000, .max = 3510000 },
  410	.n = { .min = 1, .max = 3 },
  411	.m = { .min = 79, .max = 127 },
  412	.m1 = { .min = 12, .max = 22 },
  413	.m2 = { .min = 5, .max = 9 },
  414	.p = { .min = 14, .max = 56 },
  415	.p1 = { .min = 2, .max = 8 },
  416	.p2 = { .dot_limit = 225000,
  417		.p2_slow = 7, .p2_fast = 7 },
  418};
  419
  420/* LVDS 100mhz refclk limits. */
  421static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
  422	.dot = { .min = 25000, .max = 350000 },
  423	.vco = { .min = 1760000, .max = 3510000 },
  424	.n = { .min = 1, .max = 2 },
  425	.m = { .min = 79, .max = 126 },
  426	.m1 = { .min = 12, .max = 22 },
  427	.m2 = { .min = 5, .max = 9 },
  428	.p = { .min = 28, .max = 112 },
  429	.p1 = { .min = 2, .max = 8 },
  430	.p2 = { .dot_limit = 225000,
  431		.p2_slow = 14, .p2_fast = 14 },
  432};
  433
  434static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
  435	.dot = { .min = 25000, .max = 350000 },
  436	.vco = { .min = 1760000, .max = 3510000 },
  437	.n = { .min = 1, .max = 3 },
  438	.m = { .min = 79, .max = 126 },
  439	.m1 = { .min = 12, .max = 22 },
  440	.m2 = { .min = 5, .max = 9 },
  441	.p = { .min = 14, .max = 42 },
  442	.p1 = { .min = 2, .max = 6 },
  443	.p2 = { .dot_limit = 225000,
  444		.p2_slow = 7, .p2_fast = 7 },
  445};
  446
  447static const struct intel_limit intel_limits_vlv = {
  448	 /*
  449	  * These are the data rate limits (measured in fast clocks)
  450	  * since those are the strictest limits we have. The fast
  451	  * clock and actual rate limits are more relaxed, so checking
  452	  * them would make no difference.
  453	  */
  454	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
  455	.vco = { .min = 4000000, .max = 6000000 },
  456	.n = { .min = 1, .max = 7 },
  457	.m1 = { .min = 2, .max = 3 },
  458	.m2 = { .min = 11, .max = 156 },
  459	.p1 = { .min = 2, .max = 3 },
  460	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  461};
  462
  463static const struct intel_limit intel_limits_chv = {
  464	/*
  465	 * These are the data rate limits (measured in fast clocks)
  466	 * since those are the strictest limits we have.  The fast
  467	 * clock and actual rate limits are more relaxed, so checking
  468	 * them would make no difference.
  469	 */
  470	.dot = { .min = 25000 * 5, .max = 540000 * 5},
  471	.vco = { .min = 4800000, .max = 6480000 },
  472	.n = { .min = 1, .max = 1 },
  473	.m1 = { .min = 2, .max = 2 },
  474	.m2 = { .min = 24 << 22, .max = 175 << 22 },
  475	.p1 = { .min = 2, .max = 4 },
  476	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
  477};
  478
  479static const struct intel_limit intel_limits_bxt = {
  480	/* FIXME: find real dot limits */
  481	.dot = { .min = 0, .max = INT_MAX },
  482	.vco = { .min = 4800000, .max = 6700000 },
  483	.n = { .min = 1, .max = 1 },
  484	.m1 = { .min = 2, .max = 2 },
  485	/* FIXME: find real m2 limits */
  486	.m2 = { .min = 2 << 22, .max = 255 << 22 },
  487	.p1 = { .min = 2, .max = 4 },
  488	.p2 = { .p2_slow = 1, .p2_fast = 20 },
  489};
  490
  491static bool
  492needs_modeset(const struct drm_crtc_state *state)
  493{
  494	return drm_atomic_crtc_needs_modeset(state);
  495}
  496
  497/*
  498 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  499 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
  500 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
  501 * The helpers' return value is the rate of the clock that is fed to the
  502 * display engine's pipe which can be the above fast dot clock rate or a
  503 * divided-down version of it.
  504 */
  505/* m1 is reserved as 0 in Pineview, n is a ring counter */
  506static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
  507{
  508	clock->m = clock->m2 + 2;
  509	clock->p = clock->p1 * clock->p2;
  510	if (WARN_ON(clock->n == 0 || clock->p == 0))
  511		return 0;
  512	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  513	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  514
  515	return clock->dot;
  516}
  517
  518static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  519{
  520	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  521}
  522
  523static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
  524{
  525	clock->m = i9xx_dpll_compute_m(clock);
  526	clock->p = clock->p1 * clock->p2;
  527	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  528		return 0;
  529	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  530	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  531
  532	return clock->dot;
  533}
  534
  535static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
  536{
  537	clock->m = clock->m1 * clock->m2;
  538	clock->p = clock->p1 * clock->p2;
  539	if (WARN_ON(clock->n == 0 || clock->p == 0))
  540		return 0;
  541	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  542	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  543
  544	return clock->dot / 5;
  545}
  546
  547int chv_calc_dpll_params(int refclk, struct dpll *clock)
  548{
  549	clock->m = clock->m1 * clock->m2;
  550	clock->p = clock->p1 * clock->p2;
  551	if (WARN_ON(clock->n == 0 || clock->p == 0))
  552		return 0;
  553	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  554			clock->n << 22);
  555	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  556
  557	return clock->dot / 5;
  558}
  559
  560#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
  561
  562/*
  563 * Returns whether the given set of divisors are valid for a given refclk with
  564 * the given connectors.
  565 */
  566static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
  567			       const struct intel_limit *limit,
  568			       const struct dpll *clock)
  569{
  570	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
  571		INTELPllInvalid("n out of range\n");
  572	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
  573		INTELPllInvalid("p1 out of range\n");
  574	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
  575		INTELPllInvalid("m2 out of range\n");
  576	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
  577		INTELPllInvalid("m1 out of range\n");
  578
  579	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
  580	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
  581		if (clock->m1 <= clock->m2)
  582			INTELPllInvalid("m1 <= m2\n");
  583
  584	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  585	    !IS_GEN9_LP(dev_priv)) {
  586		if (clock->p < limit->p.min || limit->p.max < clock->p)
  587			INTELPllInvalid("p out of range\n");
  588		if (clock->m < limit->m.min || limit->m.max < clock->m)
  589			INTELPllInvalid("m out of range\n");
  590	}
  591
  592	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  593		INTELPllInvalid("vco out of range\n");
  594	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  595	 * connector, etc., rather than just a single range.
  596	 */
  597	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  598		INTELPllInvalid("dot out of range\n");
  599
  600	return true;
  601}
  602
  603static int
  604i9xx_select_p2_div(const struct intel_limit *limit,
  605		   const struct intel_crtc_state *crtc_state,
  606		   int target)
  607{
  608	struct drm_device *dev = crtc_state->base.crtc->dev;
  609
  610	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  611		/*
  612		 * For LVDS just rely on its current settings for dual-channel.
  613		 * We haven't figured out how to reliably set up different
  614		 * single/dual channel state, if we even can.
  615		 */
  616		if (intel_is_dual_link_lvds(dev))
  617			return limit->p2.p2_fast;
  618		else
  619			return limit->p2.p2_slow;
  620	} else {
  621		if (target < limit->p2.dot_limit)
  622			return limit->p2.p2_slow;
  623		else
  624			return limit->p2.p2_fast;
  625	}
  626}
  627
  628/*
  629 * Returns a set of divisors for the desired target clock with the given
  630 * refclk, or FALSE.  The returned values represent the clock equation:
  631 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  632 *
  633 * Target and reference clocks are specified in kHz.
  634 *
  635 * If match_clock is provided, then best_clock P divider must match the P
  636 * divider from @match_clock used for LVDS downclocking.
  637 */
  638static bool
  639i9xx_find_best_dpll(const struct intel_limit *limit,
  640		    struct intel_crtc_state *crtc_state,
  641		    int target, int refclk, struct dpll *match_clock,
  642		    struct dpll *best_clock)
  643{
  644	struct drm_device *dev = crtc_state->base.crtc->dev;
  645	struct dpll clock;
  646	int err = target;
  647
  648	memset(best_clock, 0, sizeof(*best_clock));
  649
  650	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  651
  652	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  653	     clock.m1++) {
  654		for (clock.m2 = limit->m2.min;
  655		     clock.m2 <= limit->m2.max; clock.m2++) {
  656			if (clock.m2 >= clock.m1)
  657				break;
  658			for (clock.n = limit->n.min;
  659			     clock.n <= limit->n.max; clock.n++) {
  660				for (clock.p1 = limit->p1.min;
  661					clock.p1 <= limit->p1.max; clock.p1++) {
  662					int this_err;
  663
  664					i9xx_calc_dpll_params(refclk, &clock);
  665					if (!intel_PLL_is_valid(to_i915(dev),
  666								limit,
  667								&clock))
  668						continue;
  669					if (match_clock &&
  670					    clock.p != match_clock->p)
  671						continue;
  672
  673					this_err = abs(clock.dot - target);
  674					if (this_err < err) {
  675						*best_clock = clock;
  676						err = this_err;
  677					}
  678				}
  679			}
  680		}
  681	}
  682
  683	return (err != target);
  684}
  685
  686/*
  687 * Returns a set of divisors for the desired target clock with the given
  688 * refclk, or FALSE.  The returned values represent the clock equation:
  689 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  690 *
  691 * Target and reference clocks are specified in kHz.
  692 *
  693 * If match_clock is provided, then best_clock P divider must match the P
  694 * divider from @match_clock used for LVDS downclocking.
  695 */
  696static bool
  697pnv_find_best_dpll(const struct intel_limit *limit,
  698		   struct intel_crtc_state *crtc_state,
  699		   int target, int refclk, struct dpll *match_clock,
  700		   struct dpll *best_clock)
  701{
  702	struct drm_device *dev = crtc_state->base.crtc->dev;
  703	struct dpll clock;
  704	int err = target;
  705
  706	memset(best_clock, 0, sizeof(*best_clock));
  707
  708	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  709
  710	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  711	     clock.m1++) {
  712		for (clock.m2 = limit->m2.min;
  713		     clock.m2 <= limit->m2.max; clock.m2++) {
  714			for (clock.n = limit->n.min;
  715			     clock.n <= limit->n.max; clock.n++) {
  716				for (clock.p1 = limit->p1.min;
  717					clock.p1 <= limit->p1.max; clock.p1++) {
  718					int this_err;
  719
  720					pnv_calc_dpll_params(refclk, &clock);
  721					if (!intel_PLL_is_valid(to_i915(dev),
  722								limit,
  723								&clock))
  724						continue;
  725					if (match_clock &&
  726					    clock.p != match_clock->p)
  727						continue;
  728
  729					this_err = abs(clock.dot - target);
  730					if (this_err < err) {
  731						*best_clock = clock;
  732						err = this_err;
  733					}
  734				}
  735			}
  736		}
  737	}
  738
  739	return (err != target);
  740}
  741
  742/*
  743 * Returns a set of divisors for the desired target clock with the given
  744 * refclk, or FALSE.  The returned values represent the clock equation:
  745 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  746 *
  747 * Target and reference clocks are specified in kHz.
  748 *
  749 * If match_clock is provided, then best_clock P divider must match the P
  750 * divider from @match_clock used for LVDS downclocking.
  751 */
  752static bool
  753g4x_find_best_dpll(const struct intel_limit *limit,
  754		   struct intel_crtc_state *crtc_state,
  755		   int target, int refclk, struct dpll *match_clock,
  756		   struct dpll *best_clock)
  757{
  758	struct drm_device *dev = crtc_state->base.crtc->dev;
  759	struct dpll clock;
  760	int max_n;
  761	bool found = false;
  762	/* approximately equals target * 0.00585 */
  763	int err_most = (target >> 8) + (target >> 9);
  764
  765	memset(best_clock, 0, sizeof(*best_clock));
  766
  767	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  768
  769	max_n = limit->n.max;
  770	/* based on hardware requirement, prefer smaller n to precision */
  771	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  772		/* based on hardware requirement, prefere larger m1,m2 */
  773		for (clock.m1 = limit->m1.max;
  774		     clock.m1 >= limit->m1.min; clock.m1--) {
  775			for (clock.m2 = limit->m2.max;
  776			     clock.m2 >= limit->m2.min; clock.m2--) {
  777				for (clock.p1 = limit->p1.max;
  778				     clock.p1 >= limit->p1.min; clock.p1--) {
  779					int this_err;
  780
  781					i9xx_calc_dpll_params(refclk, &clock);
  782					if (!intel_PLL_is_valid(to_i915(dev),
  783								limit,
  784								&clock))
  785						continue;
  786
  787					this_err = abs(clock.dot - target);
  788					if (this_err < err_most) {
  789						*best_clock = clock;
  790						err_most = this_err;
  791						max_n = clock.n;
  792						found = true;
  793					}
  794				}
  795			}
  796		}
  797	}
  798	return found;
  799}
  800
  801/*
  802 * Check if the calculated PLL configuration is more optimal compared to the
  803 * best configuration and error found so far. Return the calculated error.
  804 */
  805static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  806			       const struct dpll *calculated_clock,
  807			       const struct dpll *best_clock,
  808			       unsigned int best_error_ppm,
  809			       unsigned int *error_ppm)
  810{
  811	/*
  812	 * For CHV ignore the error and consider only the P value.
  813	 * Prefer a bigger P value based on HW requirements.
  814	 */
  815	if (IS_CHERRYVIEW(to_i915(dev))) {
  816		*error_ppm = 0;
  817
  818		return calculated_clock->p > best_clock->p;
  819	}
  820
  821	if (WARN_ON_ONCE(!target_freq))
  822		return false;
  823
  824	*error_ppm = div_u64(1000000ULL *
  825				abs(target_freq - calculated_clock->dot),
  826			     target_freq);
  827	/*
  828	 * Prefer a better P value over a better (smaller) error if the error
  829	 * is small. Ensure this preference for future configurations too by
  830	 * setting the error to 0.
  831	 */
  832	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
  833		*error_ppm = 0;
  834
  835		return true;
  836	}
  837
  838	return *error_ppm + 10 < best_error_ppm;
  839}
  840
  841/*
  842 * Returns a set of divisors for the desired target clock with the given
  843 * refclk, or FALSE.  The returned values represent the clock equation:
  844 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  845 */
  846static bool
  847vlv_find_best_dpll(const struct intel_limit *limit,
  848		   struct intel_crtc_state *crtc_state,
  849		   int target, int refclk, struct dpll *match_clock,
  850		   struct dpll *best_clock)
  851{
  852	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  853	struct drm_device *dev = crtc->base.dev;
  854	struct dpll clock;
  855	unsigned int bestppm = 1000000;
  856	/* min update 19.2 MHz */
  857	int max_n = min(limit->n.max, refclk / 19200);
  858	bool found = false;
  859
  860	target *= 5; /* fast clock */
  861
  862	memset(best_clock, 0, sizeof(*best_clock));
  863
  864	/* based on hardware requirement, prefer smaller n to precision */
  865	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  866		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  867			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  868			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  869				clock.p = clock.p1 * clock.p2;
  870				/* based on hardware requirement, prefer bigger m1,m2 values */
  871				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  872					unsigned int ppm;
  873
  874					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  875								     refclk * clock.m1);
  876
  877					vlv_calc_dpll_params(refclk, &clock);
  878
  879					if (!intel_PLL_is_valid(to_i915(dev),
  880								limit,
  881								&clock))
  882						continue;
  883
  884					if (!vlv_PLL_is_optimal(dev, target,
  885								&clock,
  886								best_clock,
  887								bestppm, &ppm))
  888						continue;
  889
  890					*best_clock = clock;
  891					bestppm = ppm;
  892					found = true;
  893				}
  894			}
  895		}
  896	}
  897
  898	return found;
  899}
  900
  901/*
  902 * Returns a set of divisors for the desired target clock with the given
  903 * refclk, or FALSE.  The returned values represent the clock equation:
  904 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  905 */
  906static bool
  907chv_find_best_dpll(const struct intel_limit *limit,
  908		   struct intel_crtc_state *crtc_state,
  909		   int target, int refclk, struct dpll *match_clock,
  910		   struct dpll *best_clock)
  911{
  912	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  913	struct drm_device *dev = crtc->base.dev;
  914	unsigned int best_error_ppm;
  915	struct dpll clock;
  916	uint64_t m2;
  917	int found = false;
  918
  919	memset(best_clock, 0, sizeof(*best_clock));
  920	best_error_ppm = 1000000;
  921
  922	/*
  923	 * Based on hardware doc, the n always set to 1, and m1 always
  924	 * set to 2.  If requires to support 200Mhz refclk, we need to
  925	 * revisit this because n may not 1 anymore.
  926	 */
  927	clock.n = 1, clock.m1 = 2;
  928	target *= 5;	/* fast clock */
  929
  930	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  931		for (clock.p2 = limit->p2.p2_fast;
  932				clock.p2 >= limit->p2.p2_slow;
  933				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  934			unsigned int error_ppm;
  935
  936			clock.p = clock.p1 * clock.p2;
  937
  938			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  939					clock.n) << 22, refclk * clock.m1);
  940
  941			if (m2 > INT_MAX/clock.m1)
  942				continue;
  943
  944			clock.m2 = m2;
  945
  946			chv_calc_dpll_params(refclk, &clock);
  947
  948			if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
  949				continue;
  950
  951			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
  952						best_error_ppm, &error_ppm))
  953				continue;
  954
  955			*best_clock = clock;
  956			best_error_ppm = error_ppm;
  957			found = true;
  958		}
  959	}
  960
  961	return found;
  962}
  963
  964bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
  965			struct dpll *best_clock)
  966{
  967	int refclk = 100000;
  968	const struct intel_limit *limit = &intel_limits_bxt;
  969
  970	return chv_find_best_dpll(limit, crtc_state,
  971				  target_clock, refclk, NULL, best_clock);
  972}
  973
  974bool intel_crtc_active(struct intel_crtc *crtc)
  975{
  976	/* Be paranoid as we can arrive here with only partial
  977	 * state retrieved from the hardware during setup.
  978	 *
  979	 * We can ditch the adjusted_mode.crtc_clock check as soon
  980	 * as Haswell has gained clock readout/fastboot support.
  981	 *
  982	 * We can ditch the crtc->primary->fb check as soon as we can
  983	 * properly reconstruct framebuffers.
  984	 *
  985	 * FIXME: The intel_crtc->active here should be switched to
  986	 * crtc->state->active once we have proper CRTC states wired up
  987	 * for atomic.
  988	 */
  989	return crtc->active && crtc->base.primary->state->fb &&
  990		crtc->config->base.adjusted_mode.crtc_clock;
  991}
  992
  993enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  994					     enum pipe pipe)
  995{
  996	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  997
  998	return crtc->config->cpu_transcoder;
  999}
 1000
 1001static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
 1002				    enum pipe pipe)
 1003{
 1004	i915_reg_t reg = PIPEDSL(pipe);
 1005	u32 line1, line2;
 1006	u32 line_mask;
 1007
 1008	if (IS_GEN2(dev_priv))
 1009		line_mask = DSL_LINEMASK_GEN2;
 1010	else
 1011		line_mask = DSL_LINEMASK_GEN3;
 1012
 1013	line1 = I915_READ(reg) & line_mask;
 1014	msleep(5);
 1015	line2 = I915_READ(reg) & line_mask;
 1016
 1017	return line1 != line2;
 1018}
 1019
 1020static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
 1021{
 1022	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1023	enum pipe pipe = crtc->pipe;
 1024
 1025	/* Wait for the display line to settle/start moving */
 1026	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
 1027		DRM_ERROR("pipe %c scanline %s wait timed out\n",
 1028			  pipe_name(pipe), onoff(state));
 1029}
 1030
 1031static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
 1032{
 1033	wait_for_pipe_scanline_moving(crtc, false);
 1034}
 1035
 1036static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
 1037{
 1038	wait_for_pipe_scanline_moving(crtc, true);
 1039}
 1040
 1041static void
 1042intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
 1043{
 1044	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 1045	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1046
 1047	if (INTEL_GEN(dev_priv) >= 4) {
 1048		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 1049		i915_reg_t reg = PIPECONF(cpu_transcoder);
 1050
 1051		/* Wait for the Pipe State to go off */
 1052		if (intel_wait_for_register(dev_priv,
 1053					    reg, I965_PIPECONF_ACTIVE, 0,
 1054					    100))
 1055			WARN(1, "pipe_off wait timed out\n");
 1056	} else {
 1057		intel_wait_for_pipe_scanline_stopped(crtc);
 1058	}
 1059}
 1060
 1061/* Only for pre-ILK configs */
 1062void assert_pll(struct drm_i915_private *dev_priv,
 1063		enum pipe pipe, bool state)
 1064{
 1065	u32 val;
 1066	bool cur_state;
 1067
 1068	val = I915_READ(DPLL(pipe));
 1069	cur_state = !!(val & DPLL_VCO_ENABLE);
 1070	I915_STATE_WARN(cur_state != state,
 1071	     "PLL state assertion failure (expected %s, current %s)\n",
 1072			onoff(state), onoff(cur_state));
 1073}
 1074
 1075/* XXX: the dsi pll is shared between MIPI DSI ports */
 1076void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
 1077{
 1078	u32 val;
 1079	bool cur_state;
 1080
 1081	mutex_lock(&dev_priv->sb_lock);
 1082	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
 1083	mutex_unlock(&dev_priv->sb_lock);
 1084
 1085	cur_state = val & DSI_PLL_VCO_EN;
 1086	I915_STATE_WARN(cur_state != state,
 1087	     "DSI PLL state assertion failure (expected %s, current %s)\n",
 1088			onoff(state), onoff(cur_state));
 1089}
 1090
 1091static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 1092			  enum pipe pipe, bool state)
 1093{
 1094	bool cur_state;
 1095	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 1096								      pipe);
 1097
 1098	if (HAS_DDI(dev_priv)) {
 1099		/* DDI does not have a specific FDI_TX register */
 1100		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 1101		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
 1102	} else {
 1103		u32 val = I915_READ(FDI_TX_CTL(pipe));
 1104		cur_state = !!(val & FDI_TX_ENABLE);
 1105	}
 1106	I915_STATE_WARN(cur_state != state,
 1107	     "FDI TX state assertion failure (expected %s, current %s)\n",
 1108			onoff(state), onoff(cur_state));
 1109}
 1110#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
 1111#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
 1112
 1113static void assert_fdi_rx(struct drm_i915_private *dev_priv,
 1114			  enum pipe pipe, bool state)
 1115{
 1116	u32 val;
 1117	bool cur_state;
 1118
 1119	val = I915_READ(FDI_RX_CTL(pipe));
 1120	cur_state = !!(val & FDI_RX_ENABLE);
 1121	I915_STATE_WARN(cur_state != state,
 1122	     "FDI RX state assertion failure (expected %s, current %s)\n",
 1123			onoff(state), onoff(cur_state));
 1124}
 1125#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
 1126#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
 1127
 1128static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
 1129				      enum pipe pipe)
 1130{
 1131	u32 val;
 1132
 1133	/* ILK FDI PLL is always enabled */
 1134	if (IS_GEN5(dev_priv))
 1135		return;
 1136
 1137	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
 1138	if (HAS_DDI(dev_priv))
 1139		return;
 1140
 1141	val = I915_READ(FDI_TX_CTL(pipe));
 1142	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
 1143}
 1144
 1145void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 1146		       enum pipe pipe, bool state)
 1147{
 1148	u32 val;
 1149	bool cur_state;
 1150
 1151	val = I915_READ(FDI_RX_CTL(pipe));
 1152	cur_state = !!(val & FDI_RX_PLL_ENABLE);
 1153	I915_STATE_WARN(cur_state != state,
 1154	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
 1155			onoff(state), onoff(cur_state));
 1156}
 1157
 1158void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 1159{
 1160	i915_reg_t pp_reg;
 1161	u32 val;
 1162	enum pipe panel_pipe = PIPE_A;
 1163	bool locked = true;
 1164
 1165	if (WARN_ON(HAS_DDI(dev_priv)))
 1166		return;
 1167
 1168	if (HAS_PCH_SPLIT(dev_priv)) {
 1169		u32 port_sel;
 1170
 1171		pp_reg = PP_CONTROL(0);
 1172		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 1173
 1174		if (port_sel == PANEL_PORT_SELECT_LVDS &&
 1175		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
 1176			panel_pipe = PIPE_B;
 1177		/* XXX: else fix for eDP */
 1178	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 1179		/* presumably write lock depends on pipe, not port select */
 1180		pp_reg = PP_CONTROL(pipe);
 1181		panel_pipe = pipe;
 1182	} else {
 1183		pp_reg = PP_CONTROL(0);
 1184		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
 1185			panel_pipe = PIPE_B;
 1186	}
 1187
 1188	val = I915_READ(pp_reg);
 1189	if (!(val & PANEL_POWER_ON) ||
 1190	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
 1191		locked = false;
 1192
 1193	I915_STATE_WARN(panel_pipe == pipe && locked,
 1194	     "panel assertion failure, pipe %c regs locked\n",
 1195	     pipe_name(pipe));
 1196}
 1197
 1198void assert_pipe(struct drm_i915_private *dev_priv,
 1199		 enum pipe pipe, bool state)
 1200{
 1201	bool cur_state;
 1202	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 1203								      pipe);
 1204	enum intel_display_power_domain power_domain;
 1205
 1206	/* we keep both pipes enabled on 830 */
 1207	if (IS_I830(dev_priv))
 1208		state = true;
 1209
 1210	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
 1211	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
 1212		u32 val = I915_READ(PIPECONF(cpu_transcoder));
 1213		cur_state = !!(val & PIPECONF_ENABLE);
 1214
 1215		intel_display_power_put(dev_priv, power_domain);
 1216	} else {
 1217		cur_state = false;
 1218	}
 1219
 1220	I915_STATE_WARN(cur_state != state,
 1221	     "pipe %c assertion failure (expected %s, current %s)\n",
 1222			pipe_name(pipe), onoff(state), onoff(cur_state));
 1223}
 1224
 1225static void assert_plane(struct intel_plane *plane, bool state)
 1226{
 1227	bool cur_state = plane->get_hw_state(plane);
 1228
 1229	I915_STATE_WARN(cur_state != state,
 1230			"%s assertion failure (expected %s, current %s)\n",
 1231			plane->base.name, onoff(state), onoff(cur_state));
 1232}
 1233
 1234#define assert_plane_enabled(p) assert_plane(p, true)
 1235#define assert_plane_disabled(p) assert_plane(p, false)
 1236
 1237static void assert_planes_disabled(struct intel_crtc *crtc)
 1238{
 1239	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1240	struct intel_plane *plane;
 1241
 1242	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
 1243		assert_plane_disabled(plane);
 1244}
 1245
 1246static void assert_vblank_disabled(struct drm_crtc *crtc)
 1247{
 1248	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
 1249		drm_crtc_vblank_put(crtc);
 1250}
 1251
 1252void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
 1253				    enum pipe pipe)
 1254{
 1255	u32 val;
 1256	bool enabled;
 1257
 1258	val = I915_READ(PCH_TRANSCONF(pipe));
 1259	enabled = !!(val & TRANS_ENABLE);
 1260	I915_STATE_WARN(enabled,
 1261	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
 1262	     pipe_name(pipe));
 1263}
 1264
 1265static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
 1266			    enum pipe pipe, u32 port_sel, u32 val)
 1267{
 1268	if ((val & DP_PORT_EN) == 0)
 1269		return false;
 1270
 1271	if (HAS_PCH_CPT(dev_priv)) {
 1272		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
 1273		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
 1274			return false;
 1275	} else if (IS_CHERRYVIEW(dev_priv)) {
 1276		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
 1277			return false;
 1278	} else {
 1279		if ((val & DP_PIPE_MASK) != (pipe << 30))
 1280			return false;
 1281	}
 1282	return true;
 1283}
 1284
 1285static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
 1286			      enum pipe pipe, u32 val)
 1287{
 1288	if ((val & SDVO_ENABLE) == 0)
 1289		return false;
 1290
 1291	if (HAS_PCH_CPT(dev_priv)) {
 1292		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
 1293			return false;
 1294	} else if (IS_CHERRYVIEW(dev_priv)) {
 1295		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
 1296			return false;
 1297	} else {
 1298		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
 1299			return false;
 1300	}
 1301	return true;
 1302}
 1303
 1304static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
 1305			      enum pipe pipe, u32 val)
 1306{
 1307	if ((val & LVDS_PORT_EN) == 0)
 1308		return false;
 1309
 1310	if (HAS_PCH_CPT(dev_priv)) {
 1311		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
 1312			return false;
 1313	} else {
 1314		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
 1315			return false;
 1316	}
 1317	return true;
 1318}
 1319
 1320static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
 1321			      enum pipe pipe, u32 val)
 1322{
 1323	if ((val & ADPA_DAC_ENABLE) == 0)
 1324		return false;
 1325	if (HAS_PCH_CPT(dev_priv)) {
 1326		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
 1327			return false;
 1328	} else {
 1329		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
 1330			return false;
 1331	}
 1332	return true;
 1333}
 1334
 1335static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
 1336				   enum pipe pipe, i915_reg_t reg,
 1337				   u32 port_sel)
 1338{
 1339	u32 val = I915_READ(reg);
 1340	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
 1341	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
 1342	     i915_mmio_reg_offset(reg), pipe_name(pipe));
 1343
 1344	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
 1345	     && (val & DP_PIPEB_SELECT),
 1346	     "IBX PCH dp port still using transcoder B\n");
 1347}
 1348
 1349static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
 1350				     enum pipe pipe, i915_reg_t reg)
 1351{
 1352	u32 val = I915_READ(reg);
 1353	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
 1354	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
 1355	     i915_mmio_reg_offset(reg), pipe_name(pipe));
 1356
 1357	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
 1358	     && (val & SDVO_PIPE_B_SELECT),
 1359	     "IBX PCH hdmi port still using transcoder B\n");
 1360}
 1361
 1362static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
 1363				      enum pipe pipe)
 1364{
 1365	u32 val;
 1366
 1367	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
 1368	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
 1369	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
 1370
 1371	val = I915_READ(PCH_ADPA);
 1372	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
 1373	     "PCH VGA enabled on transcoder %c, should be disabled\n",
 1374	     pipe_name(pipe));
 1375
 1376	val = I915_READ(PCH_LVDS);
 1377	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
 1378	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
 1379	     pipe_name(pipe));
 1380
 1381	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
 1382	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
 1383	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 1384}
 1385
 1386static void _vlv_enable_pll(struct intel_crtc *crtc,
 1387			    const struct intel_crtc_state *pipe_config)
 1388{
 1389	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1390	enum pipe pipe = crtc->pipe;
 1391
 1392	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 1393	POSTING_READ(DPLL(pipe));
 1394	udelay(150);
 1395
 1396	if (intel_wait_for_register(dev_priv,
 1397				    DPLL(pipe),
 1398				    DPLL_LOCK_VLV,
 1399				    DPLL_LOCK_VLV,
 1400				    1))
 1401		DRM_ERROR("DPLL %d failed to lock\n", pipe);
 1402}
 1403
 1404static void vlv_enable_pll(struct intel_crtc *crtc,
 1405			   const struct intel_crtc_state *pipe_config)
 1406{
 1407	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1408	enum pipe pipe = crtc->pipe;
 1409
 1410	assert_pipe_disabled(dev_priv, pipe);
 1411
 1412	/* PLL is protected by panel, make sure we can write it */
 1413	assert_panel_unlocked(dev_priv, pipe);
 1414
 1415	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
 1416		_vlv_enable_pll(crtc, pipe_config);
 1417
 1418	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
 1419	POSTING_READ(DPLL_MD(pipe));
 1420}
 1421
 1422
 1423static void _chv_enable_pll(struct intel_crtc *crtc,
 1424			    const struct intel_crtc_state *pipe_config)
 1425{
 1426	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1427	enum pipe pipe = crtc->pipe;
 1428	enum dpio_channel port = vlv_pipe_to_channel(pipe);
 1429	u32 tmp;
 1430
 1431	mutex_lock(&dev_priv->sb_lock);
 1432
 1433	/* Enable back the 10bit clock to display controller */
 1434	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
 1435	tmp |= DPIO_DCLKP_EN;
 1436	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
 1437
 1438	mutex_unlock(&dev_priv->sb_lock);
 1439
 1440	/*
 1441	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
 1442	 */
 1443	udelay(1);
 1444
 1445	/* Enable PLL */
 1446	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 1447
 1448	/* Check PLL is locked */
 1449	if (intel_wait_for_register(dev_priv,
 1450				    DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
 1451				    1))
 1452		DRM_ERROR("PLL %d failed to lock\n", pipe);
 1453}
 1454
 1455static void chv_enable_pll(struct intel_crtc *crtc,
 1456			   const struct intel_crtc_state *pipe_config)
 1457{
 1458	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1459	enum pipe pipe = crtc->pipe;
 1460
 1461	assert_pipe_disabled(dev_priv, pipe);
 1462
 1463	/* PLL is protected by panel, make sure we can write it */
 1464	assert_panel_unlocked(dev_priv, pipe);
 1465
 1466	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
 1467		_chv_enable_pll(crtc, pipe_config);
 1468
 1469	if (pipe != PIPE_A) {
 1470		/*
 1471		 * WaPixelRepeatModeFixForC0:chv
 1472		 *
 1473		 * DPLLCMD is AWOL. Use chicken bits to propagate
 1474		 * the value from DPLLBMD to either pipe B or C.
 1475		 */
 1476		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
 1477		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
 1478		I915_WRITE(CBR4_VLV, 0);
 1479		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
 1480
 1481		/*
 1482		 * DPLLB VGA mode also seems to cause problems.
 1483		 * We should always have it disabled.
 1484		 */
 1485		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
 1486	} else {
 1487		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
 1488		POSTING_READ(DPLL_MD(pipe));
 1489	}
 1490}
 1491
 1492static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
 1493{
 1494	struct intel_crtc *crtc;
 1495	int count = 0;
 1496
 1497	for_each_intel_crtc(&dev_priv->drm, crtc) {
 1498		count += crtc->base.state->active &&
 1499			intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
 1500	}
 1501
 1502	return count;
 1503}
 1504
 1505static void i9xx_enable_pll(struct intel_crtc *crtc,
 1506			    const struct intel_crtc_state *crtc_state)
 1507{
 1508	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1509	i915_reg_t reg = DPLL(crtc->pipe);
 1510	u32 dpll = crtc_state->dpll_hw_state.dpll;
 1511	int i;
 1512
 1513	assert_pipe_disabled(dev_priv, crtc->pipe);
 1514
 1515	/* PLL is protected by panel, make sure we can write it */
 1516	if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
 1517		assert_panel_unlocked(dev_priv, crtc->pipe);
 1518
 1519	/* Enable DVO 2x clock on both PLLs if necessary */
 1520	if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
 1521		/*
 1522		 * It appears to be important that we don't enable this
 1523		 * for the current pipe before otherwise configuring the
 1524		 * PLL. No idea how this should be handled if multiple
 1525		 * DVO outputs are enabled simultaneosly.
 1526		 */
 1527		dpll |= DPLL_DVO_2X_MODE;
 1528		I915_WRITE(DPLL(!crtc->pipe),
 1529			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
 1530	}
 1531
 1532	/*
 1533	 * Apparently we need to have VGA mode enabled prior to changing
 1534	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
 1535	 * dividers, even though the register value does change.
 1536	 */
 1537	I915_WRITE(reg, 0);
 1538
 1539	I915_WRITE(reg, dpll);
 1540
 1541	/* Wait for the clocks to stabilize. */
 1542	POSTING_READ(reg);
 1543	udelay(150);
 1544
 1545	if (INTEL_GEN(dev_priv) >= 4) {
 1546		I915_WRITE(DPLL_MD(crtc->pipe),
 1547			   crtc_state->dpll_hw_state.dpll_md);
 1548	} else {
 1549		/* The pixel multiplier can only be updated once the
 1550		 * DPLL is enabled and the clocks are stable.
 1551		 *
 1552		 * So write it again.
 1553		 */
 1554		I915_WRITE(reg, dpll);
 1555	}
 1556
 1557	/* We do this three times for luck */
 1558	for (i = 0; i < 3; i++) {
 1559		I915_WRITE(reg, dpll);
 1560		POSTING_READ(reg);
 1561		udelay(150); /* wait for warmup */
 1562	}
 1563}
 1564
 1565static void i9xx_disable_pll(struct intel_crtc *crtc)
 1566{
 1567	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1568	enum pipe pipe = crtc->pipe;
 1569
 1570	/* Disable DVO 2x clock on both PLLs if necessary */
 1571	if (IS_I830(dev_priv) &&
 1572	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
 1573	    !intel_num_dvo_pipes(dev_priv)) {
 1574		I915_WRITE(DPLL(PIPE_B),
 1575			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
 1576		I915_WRITE(DPLL(PIPE_A),
 1577			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
 1578	}
 1579
 1580	/* Don't disable pipe or pipe PLLs if needed */
 1581	if (IS_I830(dev_priv))
 1582		return;
 1583
 1584	/* Make sure the pipe isn't still relying on us */
 1585	assert_pipe_disabled(dev_priv, pipe);
 1586
 1587	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
 1588	POSTING_READ(DPLL(pipe));
 1589}
 1590
 1591static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 1592{
 1593	u32 val;
 1594
 1595	/* Make sure the pipe isn't still relying on us */
 1596	assert_pipe_disabled(dev_priv, pipe);
 1597
 1598	val = DPLL_INTEGRATED_REF_CLK_VLV |
 1599		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 1600	if (pipe != PIPE_A)
 1601		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 1602
 1603	I915_WRITE(DPLL(pipe), val);
 1604	POSTING_READ(DPLL(pipe));
 1605}
 1606
 1607static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 1608{
 1609	enum dpio_channel port = vlv_pipe_to_channel(pipe);
 1610	u32 val;
 1611
 1612	/* Make sure the pipe isn't still relying on us */
 1613	assert_pipe_disabled(dev_priv, pipe);
 1614
 1615	val = DPLL_SSC_REF_CLK_CHV |
 1616		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 1617	if (pipe != PIPE_A)
 1618		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 1619
 1620	I915_WRITE(DPLL(pipe), val);
 1621	POSTING_READ(DPLL(pipe));
 1622
 1623	mutex_lock(&dev_priv->sb_lock);
 1624
 1625	/* Disable 10bit clock to display controller */
 1626	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
 1627	val &= ~DPIO_DCLKP_EN;
 1628	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
 1629
 1630	mutex_unlock(&dev_priv->sb_lock);
 1631}
 1632
 1633void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 1634			 struct intel_digital_port *dport,
 1635			 unsigned int expected_mask)
 1636{
 1637	u32 port_mask;
 1638	i915_reg_t dpll_reg;
 1639
 1640	switch (dport->base.port) {
 1641	case PORT_B:
 1642		port_mask = DPLL_PORTB_READY_MASK;
 1643		dpll_reg = DPLL(0);
 1644		break;
 1645	case PORT_C:
 1646		port_mask = DPLL_PORTC_READY_MASK;
 1647		dpll_reg = DPLL(0);
 1648		expected_mask <<= 4;
 1649		break;
 1650	case PORT_D:
 1651		port_mask = DPLL_PORTD_READY_MASK;
 1652		dpll_reg = DPIO_PHY_STATUS;
 1653		break;
 1654	default:
 1655		BUG();
 1656	}
 1657
 1658	if (intel_wait_for_register(dev_priv,
 1659				    dpll_reg, port_mask, expected_mask,
 1660				    1000))
 1661		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
 1662		     port_name(dport->base.port),
 1663		     I915_READ(dpll_reg) & port_mask, expected_mask);
 1664}
 1665
 1666static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 1667					   enum pipe pipe)
 1668{
 1669	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
 1670								pipe);
 1671	i915_reg_t reg;
 1672	uint32_t val, pipeconf_val;
 1673
 1674	/* Make sure PCH DPLL is enabled */
 1675	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
 1676
 1677	/* FDI must be feeding us bits for PCH ports */
 1678	assert_fdi_tx_enabled(dev_priv, pipe);
 1679	assert_fdi_rx_enabled(dev_priv, pipe);
 1680
 1681	if (HAS_PCH_CPT(dev_priv)) {
 1682		/* Workaround: Set the timing override bit before enabling the
 1683		 * pch transcoder. */
 1684		reg = TRANS_CHICKEN2(pipe);
 1685		val = I915_READ(reg);
 1686		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 1687		I915_WRITE(reg, val);
 1688	}
 1689
 1690	reg = PCH_TRANSCONF(pipe);
 1691	val = I915_READ(reg);
 1692	pipeconf_val = I915_READ(PIPECONF(pipe));
 1693
 1694	if (HAS_PCH_IBX(dev_priv)) {
 1695		/*
 1696		 * Make the BPC in transcoder be consistent with
 1697		 * that in pipeconf reg. For HDMI we must use 8bpc
 1698		 * here for both 8bpc and 12bpc.
 1699		 */
 1700		val &= ~PIPECONF_BPC_MASK;
 1701		if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
 1702			val |= PIPECONF_8BPC;
 1703		else
 1704			val |= pipeconf_val & PIPECONF_BPC_MASK;
 1705	}
 1706
 1707	val &= ~TRANS_INTERLACE_MASK;
 1708	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
 1709		if (HAS_PCH_IBX(dev_priv) &&
 1710		    intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 1711			val |= TRANS_LEGACY_INTERLACED_ILK;
 1712		else
 1713			val |= TRANS_INTERLACED;
 1714	else
 1715		val |= TRANS_PROGRESSIVE;
 1716
 1717	I915_WRITE(reg, val | TRANS_ENABLE);
 1718	if (intel_wait_for_register(dev_priv,
 1719				    reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
 1720				    100))
 1721		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
 1722}
 1723
 1724static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 1725				      enum transcoder cpu_transcoder)
 1726{
 1727	u32 val, pipeconf_val;
 1728
 1729	/* FDI must be feeding us bits for PCH ports */
 1730	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
 1731	assert_fdi_rx_enabled(dev_priv, PIPE_A);
 1732
 1733	/* Workaround: set timing override bit. */
 1734	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
 1735	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 1736	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
 1737
 1738	val = TRANS_ENABLE;
 1739	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
 1740
 1741	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
 1742	    PIPECONF_INTERLACED_ILK)
 1743		val |= TRANS_INTERLACED;
 1744	else
 1745		val |= TRANS_PROGRESSIVE;
 1746
 1747	I915_WRITE(LPT_TRANSCONF, val);
 1748	if (intel_wait_for_register(dev_priv,
 1749				    LPT_TRANSCONF,
 1750				    TRANS_STATE_ENABLE,
 1751				    TRANS_STATE_ENABLE,
 1752				    100))
 1753		DRM_ERROR("Failed to enable PCH transcoder\n");
 1754}
 1755
 1756static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
 1757					    enum pipe pipe)
 1758{
 1759	i915_reg_t reg;
 1760	uint32_t val;
 1761
 1762	/* FDI relies on the transcoder */
 1763	assert_fdi_tx_disabled(dev_priv, pipe);
 1764	assert_fdi_rx_disabled(dev_priv, pipe);
 1765
 1766	/* Ports must be off as well */
 1767	assert_pch_ports_disabled(dev_priv, pipe);
 1768
 1769	reg = PCH_TRANSCONF(pipe);
 1770	val = I915_READ(reg);
 1771	val &= ~TRANS_ENABLE;
 1772	I915_WRITE(reg, val);
 1773	/* wait for PCH transcoder off, transcoder state */
 1774	if (intel_wait_for_register(dev_priv,
 1775				    reg, TRANS_STATE_ENABLE, 0,
 1776				    50))
 1777		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 1778
 1779	if (HAS_PCH_CPT(dev_priv)) {
 1780		/* Workaround: Clear the timing override chicken bit again. */
 1781		reg = TRANS_CHICKEN2(pipe);
 1782		val = I915_READ(reg);
 1783		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
 1784		I915_WRITE(reg, val);
 1785	}
 1786}
 1787
 1788void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 1789{
 1790	u32 val;
 1791
 1792	val = I915_READ(LPT_TRANSCONF);
 1793	val &= ~TRANS_ENABLE;
 1794	I915_WRITE(LPT_TRANSCONF, val);
 1795	/* wait for PCH transcoder off, transcoder state */
 1796	if (intel_wait_for_register(dev_priv,
 1797				    LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
 1798				    50))
 1799		DRM_ERROR("Failed to disable PCH transcoder\n");
 1800
 1801	/* Workaround: clear timing override bit. */
 1802	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
 1803	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
 1804	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
 1805}
 1806
 1807enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
 1808{
 1809	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1810
 1811	if (HAS_PCH_LPT(dev_priv))
 1812		return PIPE_A;
 1813	else
 1814		return crtc->pipe;
 1815}
 1816
 1817static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 1818{
 1819	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
 1820	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1821	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 1822	enum pipe pipe = crtc->pipe;
 1823	i915_reg_t reg;
 1824	u32 val;
 1825
 1826	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
 1827
 1828	assert_planes_disabled(crtc);
 1829
 1830	/*
 1831	 * A pipe without a PLL won't actually be able to drive bits from
 1832	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
 1833	 * need the check.
 1834	 */
 1835	if (HAS_GMCH_DISPLAY(dev_priv)) {
 1836		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
 1837			assert_dsi_pll_enabled(dev_priv);
 1838		else
 1839			assert_pll_enabled(dev_priv, pipe);
 1840	} else {
 1841		if (new_crtc_state->has_pch_encoder) {
 1842			/* if driving the PCH, we need FDI enabled */
 1843			assert_fdi_rx_pll_enabled(dev_priv,
 1844						  intel_crtc_pch_transcoder(crtc));
 1845			assert_fdi_tx_pll_enabled(dev_priv,
 1846						  (enum pipe) cpu_transcoder);
 1847		}
 1848		/* FIXME: assert CPU port conditions for SNB+ */
 1849	}
 1850
 1851	reg = PIPECONF(cpu_transcoder);
 1852	val = I915_READ(reg);
 1853	if (val & PIPECONF_ENABLE) {
 1854		/* we keep both pipes enabled on 830 */
 1855		WARN_ON(!IS_I830(dev_priv));
 1856		return;
 1857	}
 1858
 1859	I915_WRITE(reg, val | PIPECONF_ENABLE);
 1860	POSTING_READ(reg);
 1861
 1862	/*
 1863	 * Until the pipe starts PIPEDSL reads will return a stale value,
 1864	 * which causes an apparent vblank timestamp jump when PIPEDSL
 1865	 * resets to its proper value. That also messes up the frame count
 1866	 * when it's derived from the timestamps. So let's wait for the
 1867	 * pipe to start properly before we call drm_crtc_vblank_on()
 1868	 */
 1869	if (dev_priv->drm.max_vblank_count == 0)
 1870		intel_wait_for_pipe_scanline_moving(crtc);
 1871}
 1872
 1873static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 1874{
 1875	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 1876	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 1877	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 1878	enum pipe pipe = crtc->pipe;
 1879	i915_reg_t reg;
 1880	u32 val;
 1881
 1882	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
 1883
 1884	/*
 1885	 * Make sure planes won't keep trying to pump pixels to us,
 1886	 * or we might hang the display.
 1887	 */
 1888	assert_planes_disabled(crtc);
 1889
 1890	reg = PIPECONF(cpu_transcoder);
 1891	val = I915_READ(reg);
 1892	if ((val & PIPECONF_ENABLE) == 0)
 1893		return;
 1894
 1895	/*
 1896	 * Double wide has implications for planes
 1897	 * so best keep it disabled when not needed.
 1898	 */
 1899	if (old_crtc_state->double_wide)
 1900		val &= ~PIPECONF_DOUBLE_WIDE;
 1901
 1902	/* Don't disable pipe or pipe PLLs if needed */
 1903	if (!IS_I830(dev_priv))
 1904		val &= ~PIPECONF_ENABLE;
 1905
 1906	I915_WRITE(reg, val);
 1907	if ((val & PIPECONF_ENABLE) == 0)
 1908		intel_wait_for_pipe_off(old_crtc_state);
 1909}
 1910
 1911static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 1912{
 1913	return IS_GEN2(dev_priv) ? 2048 : 4096;
 1914}
 1915
 1916static unsigned int
 1917intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
 1918{
 1919	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 1920	unsigned int cpp = fb->format->cpp[plane];
 1921
 1922	switch (fb->modifier) {
 1923	case DRM_FORMAT_MOD_LINEAR:
 1924		return cpp;
 1925	case I915_FORMAT_MOD_X_TILED:
 1926		if (IS_GEN2(dev_priv))
 1927			return 128;
 1928		else
 1929			return 512;
 1930	case I915_FORMAT_MOD_Y_TILED_CCS:
 1931		if (plane == 1)
 1932			return 128;
 1933		/* fall through */
 1934	case I915_FORMAT_MOD_Y_TILED:
 1935		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
 1936			return 128;
 1937		else
 1938			return 512;
 1939	case I915_FORMAT_MOD_Yf_TILED_CCS:
 1940		if (plane == 1)
 1941			return 128;
 1942		/* fall through */
 1943	case I915_FORMAT_MOD_Yf_TILED:
 1944		switch (cpp) {
 1945		case 1:
 1946			return 64;
 1947		case 2:
 1948		case 4:
 1949			return 128;
 1950		case 8:
 1951		case 16:
 1952			return 256;
 1953		default:
 1954			MISSING_CASE(cpp);
 1955			return cpp;
 1956		}
 1957		break;
 1958	default:
 1959		MISSING_CASE(fb->modifier);
 1960		return cpp;
 1961	}
 1962}
 1963
 1964static unsigned int
 1965intel_tile_height(const struct drm_framebuffer *fb, int plane)
 1966{
 1967	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
 1968		return 1;
 1969	else
 1970		return intel_tile_size(to_i915(fb->dev)) /
 1971			intel_tile_width_bytes(fb, plane);
 1972}
 1973
 1974/* Return the tile dimensions in pixel units */
 1975static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
 1976			    unsigned int *tile_width,
 1977			    unsigned int *tile_height)
 1978{
 1979	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
 1980	unsigned int cpp = fb->format->cpp[plane];
 1981
 1982	*tile_width = tile_width_bytes / cpp;
 1983	*tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
 1984}
 1985
 1986unsigned int
 1987intel_fb_align_height(const struct drm_framebuffer *fb,
 1988		      int plane, unsigned int height)
 1989{
 1990	unsigned int tile_height = intel_tile_height(fb, plane);
 1991
 1992	return ALIGN(height, tile_height);
 1993}
 1994
 1995unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
 1996{
 1997	unsigned int size = 0;
 1998	int i;
 1999
 2000	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
 2001		size += rot_info->plane[i].width * rot_info->plane[i].height;
 2002
 2003	return size;
 2004}
 2005
 2006static void
 2007intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
 2008			const struct drm_framebuffer *fb,
 2009			unsigned int rotation)
 2010{
 2011	view->type = I915_GGTT_VIEW_NORMAL;
 2012	if (drm_rotation_90_or_270(rotation)) {
 2013		view->type = I915_GGTT_VIEW_ROTATED;
 2014		view->rotated = to_intel_framebuffer(fb)->rot_info;
 2015	}
 2016}
 2017
 2018static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
 2019{
 2020	if (IS_I830(dev_priv))
 2021		return 16 * 1024;
 2022	else if (IS_I85X(dev_priv))
 2023		return 256;
 2024	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
 2025		return 32;
 2026	else
 2027		return 4 * 1024;
 2028}
 2029
 2030static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
 2031{
 2032	if (INTEL_GEN(dev_priv) >= 9)
 2033		return 256 * 1024;
 2034	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
 2035		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 2036		return 128 * 1024;
 2037	else if (INTEL_GEN(dev_priv) >= 4)
 2038		return 4 * 1024;
 2039	else
 2040		return 0;
 2041}
 2042
 2043static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
 2044					 int plane)
 2045{
 2046	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 2047
 2048	/* AUX_DIST needs only 4K alignment */
 2049	if (plane == 1)
 2050		return 4096;
 2051
 2052	switch (fb->modifier) {
 2053	case DRM_FORMAT_MOD_LINEAR:
 2054		return intel_linear_alignment(dev_priv);
 2055	case I915_FORMAT_MOD_X_TILED:
 2056		if (INTEL_GEN(dev_priv) >= 9)
 2057			return 256 * 1024;
 2058		return 0;
 2059	case I915_FORMAT_MOD_Y_TILED_CCS:
 2060	case I915_FORMAT_MOD_Yf_TILED_CCS:
 2061	case I915_FORMAT_MOD_Y_TILED:
 2062	case I915_FORMAT_MOD_Yf_TILED:
 2063		return 1 * 1024 * 1024;
 2064	default:
 2065		MISSING_CASE(fb->modifier);
 2066		return 0;
 2067	}
 2068}
 2069
 2070static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
 2071{
 2072	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 2073	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 2074
 2075	return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
 2076}
 2077
 2078struct i915_vma *
 2079intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 2080			   unsigned int rotation,
 2081			   bool uses_fence,
 2082			   unsigned long *out_flags)
 2083{
 2084	struct drm_device *dev = fb->dev;
 2085	struct drm_i915_private *dev_priv = to_i915(dev);
 2086	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 2087	struct i915_ggtt_view view;
 2088	struct i915_vma *vma;
 2089	unsigned int pinctl;
 2090	u32 alignment;
 2091
 2092	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 2093
 2094	alignment = intel_surf_alignment(fb, 0);
 2095
 2096	intel_fill_fb_ggtt_view(&view, fb, rotation);
 2097
 2098	/* Note that the w/a also requires 64 PTE of padding following the
 2099	 * bo. We currently fill all unused PTE with the shadow page and so
 2100	 * we should always have valid PTE following the scanout preventing
 2101	 * the VT-d warning.
 2102	 */
 2103	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
 2104		alignment = 256 * 1024;
 2105
 2106	/*
 2107	 * Global gtt pte registers are special registers which actually forward
 2108	 * writes to a chunk of system memory. Which means that there is no risk
 2109	 * that the register values disappear as soon as we call
 2110	 * intel_runtime_pm_put(), so it is correct to wrap only the
 2111	 * pin/unpin/fence and not more.
 2112	 */
 2113	intel_runtime_pm_get(dev_priv);
 2114
 2115	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 2116
 2117	pinctl = 0;
 2118
 2119	/* Valleyview is definitely limited to scanning out the first
 2120	 * 512MiB. Lets presume this behaviour was inherited from the
 2121	 * g4x display engine and that all earlier gen are similarly
 2122	 * limited. Testing suggests that it is a little more
 2123	 * complicated than this. For example, Cherryview appears quite
 2124	 * happy to scanout from anywhere within its global aperture.
 2125	 */
 2126	if (HAS_GMCH_DISPLAY(dev_priv))
 2127		pinctl |= PIN_MAPPABLE;
 2128
 2129	vma = i915_gem_object_pin_to_display_plane(obj,
 2130						   alignment, &view, pinctl);
 2131	if (IS_ERR(vma))
 2132		goto err;
 2133
 2134	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
 2135		int ret;
 2136
 2137		/* Install a fence for tiled scan-out. Pre-i965 always needs a
 2138		 * fence, whereas 965+ only requires a fence if using
 2139		 * framebuffer compression.  For simplicity, we always, when
 2140		 * possible, install a fence as the cost is not that onerous.
 2141		 *
 2142		 * If we fail to fence the tiled scanout, then either the
 2143		 * modeset will reject the change (which is highly unlikely as
 2144		 * the affected systems, all but one, do not have unmappable
 2145		 * space) or we will not be able to enable full powersaving
 2146		 * techniques (also likely not to apply due to various limits
 2147		 * FBC and the like impose on the size of the buffer, which
 2148		 * presumably we violated anyway with this unmappable buffer).
 2149		 * Anyway, it is presumably better to stumble onwards with
 2150		 * something and try to run the system in a "less than optimal"
 2151		 * mode that matches the user configuration.
 2152		 */
 2153		ret = i915_vma_pin_fence(vma);
 2154		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
 2155			i915_gem_object_unpin_from_display_plane(vma);
 2156			vma = ERR_PTR(ret);
 2157			goto err;
 2158		}
 2159
 2160		if (ret == 0 && vma->fence)
 2161			*out_flags |= PLANE_HAS_FENCE;
 2162	}
 2163
 2164	i915_vma_get(vma);
 2165err:
 2166	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
 2167
 2168	intel_runtime_pm_put(dev_priv);
 2169	return vma;
 2170}
 2171
 2172void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
 2173{
 2174	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 2175
 2176	if (flags & PLANE_HAS_FENCE)
 2177		i915_vma_unpin_fence(vma);
 2178	i915_gem_object_unpin_from_display_plane(vma);
 2179	i915_vma_put(vma);
 2180}
 2181
 2182static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
 2183			  unsigned int rotation)
 2184{
 2185	if (drm_rotation_90_or_270(rotation))
 2186		return to_intel_framebuffer(fb)->rotated[plane].pitch;
 2187	else
 2188		return fb->pitches[plane];
 2189}
 2190
 2191/*
 2192 * Convert the x/y offsets into a linear offset.
 2193 * Only valid with 0/180 degree rotation, which is fine since linear
 2194 * offset is only used with linear buffers on pre-hsw and tiled buffers
 2195 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
 2196 */
 2197u32 intel_fb_xy_to_linear(int x, int y,
 2198			  const struct intel_plane_state *state,
 2199			  int plane)
 2200{
 2201	const struct drm_framebuffer *fb = state->base.fb;
 2202	unsigned int cpp = fb->format->cpp[plane];
 2203	unsigned int pitch = fb->pitches[plane];
 2204
 2205	return y * pitch + x * cpp;
 2206}
 2207
 2208/*
 2209 * Add the x/y offsets derived from fb->offsets[] to the user
 2210 * specified plane src x/y offsets. The resulting x/y offsets
 2211 * specify the start of scanout from the beginning of the gtt mapping.
 2212 */
 2213void intel_add_fb_offsets(int *x, int *y,
 2214			  const struct intel_plane_state *state,
 2215			  int plane)
 2216
 2217{
 2218	const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
 2219	unsigned int rotation = state->base.rotation;
 2220
 2221	if (drm_rotation_90_or_270(rotation)) {
 2222		*x += intel_fb->rotated[plane].x;
 2223		*y += intel_fb->rotated[plane].y;
 2224	} else {
 2225		*x += intel_fb->normal[plane].x;
 2226		*y += intel_fb->normal[plane].y;
 2227	}
 2228}
 2229
 2230static u32 __intel_adjust_tile_offset(int *x, int *y,
 2231				      unsigned int tile_width,
 2232				      unsigned int tile_height,
 2233				      unsigned int tile_size,
 2234				      unsigned int pitch_tiles,
 2235				      u32 old_offset,
 2236				      u32 new_offset)
 2237{
 2238	unsigned int pitch_pixels = pitch_tiles * tile_width;
 2239	unsigned int tiles;
 2240
 2241	WARN_ON(old_offset & (tile_size - 1));
 2242	WARN_ON(new_offset & (tile_size - 1));
 2243	WARN_ON(new_offset > old_offset);
 2244
 2245	tiles = (old_offset - new_offset) / tile_size;
 2246
 2247	*y += tiles / pitch_tiles * tile_height;
 2248	*x += tiles % pitch_tiles * tile_width;
 2249
 2250	/* minimize x in case it got needlessly big */
 2251	*y += *x / pitch_pixels * tile_height;
 2252	*x %= pitch_pixels;
 2253
 2254	return new_offset;
 2255}
 2256
 2257static u32 _intel_adjust_tile_offset(int *x, int *y,
 2258				     const struct drm_framebuffer *fb, int plane,
 2259				     unsigned int rotation,
 2260				     u32 old_offset, u32 new_offset)
 2261{
 2262	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
 2263	unsigned int cpp = fb->format->cpp[plane];
 2264	unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
 2265
 2266	WARN_ON(new_offset > old_offset);
 2267
 2268	if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
 2269		unsigned int tile_size, tile_width, tile_height;
 2270		unsigned int pitch_tiles;
 2271
 2272		tile_size = intel_tile_size(dev_priv);
 2273		intel_tile_dims(fb, plane, &tile_width, &tile_height);
 2274
 2275		if (drm_rotation_90_or_270(rotation)) {
 2276			pitch_tiles = pitch / tile_height;
 2277			swap(tile_width, tile_height);
 2278		} else {
 2279			pitch_tiles = pitch / (tile_width * cpp);
 2280		}
 2281
 2282		__intel_adjust_tile_offset(x, y, tile_width, tile_height,
 2283					   tile_size, pitch_tiles,
 2284					   old_offset, new_offset);
 2285	} else {
 2286		old_offset += *y * pitch + *x * cpp;
 2287
 2288		*y = (old_offset - new_offset) / pitch;
 2289		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
 2290	}
 2291
 2292	return new_offset;
 2293}
 2294
 2295/*
 2296 * Adjust the tile offset by moving the difference into
 2297 * the x/y offsets.
 2298 */
 2299static u32 intel_adjust_tile_offset(int *x, int *y,
 2300				    const struct intel_plane_state *state, int plane,
 2301				    u32 old_offset, u32 new_offset)
 2302{
 2303	return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
 2304					 state->base.rotation,
 2305					 old_offset, new_offset);
 2306}
 2307
 2308/*
 2309 * Computes the linear offset to the base tile and adjusts
 2310 * x, y. bytes per pixel is assumed to be a power-of-two.
 2311 *
 2312 * In the 90/270 rotated case, x and y are assumed
 2313 * to be already rotated to match the rotated GTT view, and
 2314 * pitch is the tile_height aligned framebuffer height.
 2315 *
 2316 * This function is used when computing the derived information
 2317 * under intel_framebuffer, so using any of that information
 2318 * here is not allowed. Anything under drm_framebuffer can be
 2319 * used. This is why the user has to pass in the pitch since it
 2320 * is specified in the rotated orientation.
 2321 */
 2322static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
 2323				      int *x, int *y,
 2324				      const struct drm_framebuffer *fb, int plane,
 2325				      unsigned int pitch,
 2326				      unsigned int rotation,
 2327				      u32 alignment)
 2328{
 2329	uint64_t fb_modifier = fb->modifier;
 2330	unsigned int cpp = fb->format->cpp[plane];
 2331	u32 offset, offset_aligned;
 2332
 2333	if (alignment)
 2334		alignment--;
 2335
 2336	if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
 2337		unsigned int tile_size, tile_width, tile_height;
 2338		unsigned int tile_rows, tiles, pitch_tiles;
 2339
 2340		tile_size = intel_tile_size(dev_priv);
 2341		intel_tile_dims(fb, plane, &tile_width, &tile_height);
 2342
 2343		if (drm_rotation_90_or_270(rotation)) {
 2344			pitch_tiles = pitch / tile_height;
 2345			swap(tile_width, tile_height);
 2346		} else {
 2347			pitch_tiles = pitch / (tile_width * cpp);
 2348		}
 2349
 2350		tile_rows = *y / tile_height;
 2351		*y %= tile_height;
 2352
 2353		tiles = *x / tile_width;
 2354		*x %= tile_width;
 2355
 2356		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
 2357		offset_aligned = offset & ~alignment;
 2358
 2359		__intel_adjust_tile_offset(x, y, tile_width, tile_height,
 2360					   tile_size, pitch_tiles,
 2361					   offset, offset_aligned);
 2362	} else {
 2363		offset = *y * pitch + *x * cpp;
 2364		offset_aligned = offset & ~alignment;
 2365
 2366		*y = (offset & alignment) / pitch;
 2367		*x = ((offset & alignment) - *y * pitch) / cpp;
 2368	}
 2369
 2370	return offset_aligned;
 2371}
 2372
 2373u32 intel_compute_tile_offset(int *x, int *y,
 2374			      const struct intel_plane_state *state,
 2375			      int plane)
 2376{
 2377	struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
 2378	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
 2379	const struct drm_framebuffer *fb = state->base.fb;
 2380	unsigned int rotation = state->base.rotation;
 2381	int pitch = intel_fb_pitch(fb, plane, rotation);
 2382	u32 alignment;
 2383
 2384	if (intel_plane->id == PLANE_CURSOR)
 2385		alignment = intel_cursor_alignment(dev_priv);
 2386	else
 2387		alignment = intel_surf_alignment(fb, plane);
 2388
 2389	return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
 2390					  rotation, alignment);
 2391}
 2392
 2393/* Convert the fb->offset[] into x/y offsets */
 2394static int intel_fb_offset_to_xy(int *x, int *y,
 2395				 const struct drm_framebuffer *fb, int plane)
 2396{
 2397	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 2398
 2399	if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
 2400	    fb->offsets[plane] % intel_tile_size(dev_priv))
 2401		return -EINVAL;
 2402
 2403	*x = 0;
 2404	*y = 0;
 2405
 2406	_intel_adjust_tile_offset(x, y,
 2407				  fb, plane, DRM_MODE_ROTATE_0,
 2408				  fb->offsets[plane], 0);
 2409
 2410	return 0;
 2411}
 2412
 2413static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
 2414{
 2415	switch (fb_modifier) {
 2416	case I915_FORMAT_MOD_X_TILED:
 2417		return I915_TILING_X;
 2418	case I915_FORMAT_MOD_Y_TILED:
 2419	case I915_FORMAT_MOD_Y_TILED_CCS:
 2420		return I915_TILING_Y;
 2421	default:
 2422		return I915_TILING_NONE;
 2423	}
 2424}
 2425
 2426/*
 2427 * From the Sky Lake PRM:
 2428 * "The Color Control Surface (CCS) contains the compression status of
 2429 *  the cache-line pairs. The compression state of the cache-line pair
 2430 *  is specified by 2 bits in the CCS. Each CCS cache-line represents
 2431 *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
 2432 *  cache-line-pairs. CCS is always Y tiled."
 2433 *
 2434 * Since cache line pairs refers to horizontally adjacent cache lines,
 2435 * each cache line in the CCS corresponds to an area of 32x16 cache
 2436 * lines on the main surface. Since each pixel is 4 bytes, this gives
 2437 * us a ratio of one byte in the CCS for each 8x16 pixels in the
 2438 * main surface.
 2439 */
 2440static const struct drm_format_info ccs_formats[] = {
 2441	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
 2442	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
 2443	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
 2444	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
 2445};
 2446
 2447static const struct drm_format_info *
 2448lookup_format_info(const struct drm_format_info formats[],
 2449		   int num_formats, u32 format)
 2450{
 2451	int i;
 2452
 2453	for (i = 0; i < num_formats; i++) {
 2454		if (formats[i].format == format)
 2455			return &formats[i];
 2456	}
 2457
 2458	return NULL;
 2459}
 2460
 2461static const struct drm_format_info *
 2462intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
 2463{
 2464	switch (cmd->modifier[0]) {
 2465	case I915_FORMAT_MOD_Y_TILED_CCS:
 2466	case I915_FORMAT_MOD_Yf_TILED_CCS:
 2467		return lookup_format_info(ccs_formats,
 2468					  ARRAY_SIZE(ccs_formats),
 2469					  cmd->pixel_format);
 2470	default:
 2471		return NULL;
 2472	}
 2473}
 2474
 2475static int
 2476intel_fill_fb_info(struct drm_i915_private *dev_priv,
 2477		   struct drm_framebuffer *fb)
 2478{
 2479	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 2480	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
 2481	u32 gtt_offset_rotated = 0;
 2482	unsigned int max_size = 0;
 2483	int i, num_planes = fb->format->num_planes;
 2484	unsigned int tile_size = intel_tile_size(dev_priv);
 2485
 2486	for (i = 0; i < num_planes; i++) {
 2487		unsigned int width, height;
 2488		unsigned int cpp, size;
 2489		u32 offset;
 2490		int x, y;
 2491		int ret;
 2492
 2493		cpp = fb->format->cpp[i];
 2494		width = drm_framebuffer_plane_width(fb->width, fb, i);
 2495		height = drm_framebuffer_plane_height(fb->height, fb, i);
 2496
 2497		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
 2498		if (ret) {
 2499			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
 2500				      i, fb->offsets[i]);
 2501			return ret;
 2502		}
 2503
 2504		if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 2505		     fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
 2506			int hsub = fb->format->hsub;
 2507			int vsub = fb->format->vsub;
 2508			int tile_width, tile_height;
 2509			int main_x, main_y;
 2510			int ccs_x, ccs_y;
 2511
 2512			intel_tile_dims(fb, i, &tile_width, &tile_height);
 2513			tile_width *= hsub;
 2514			tile_height *= vsub;
 2515
 2516			ccs_x = (x * hsub) % tile_width;
 2517			ccs_y = (y * vsub) % tile_height;
 2518			main_x = intel_fb->normal[0].x % tile_width;
 2519			main_y = intel_fb->normal[0].y % tile_height;
 2520
 2521			/*
 2522			 * CCS doesn't have its own x/y offset register, so the intra CCS tile
 2523			 * x/y offsets must match between CCS and the main surface.
 2524			 */
 2525			if (main_x != ccs_x || main_y != ccs_y) {
 2526				DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
 2527					      main_x, main_y,
 2528					      ccs_x, ccs_y,
 2529					      intel_fb->normal[0].x,
 2530					      intel_fb->normal[0].y,
 2531					      x, y);
 2532				return -EINVAL;
 2533			}
 2534		}
 2535
 2536		/*
 2537		 * The fence (if used) is aligned to the start of the object
 2538		 * so having the framebuffer wrap around across the edge of the
 2539		 * fenced region doesn't really work. We have no API to configure
 2540		 * the fence start offset within the object (nor could we probably
 2541		 * on gen2/3). So it's just easier if we just require that the
 2542		 * fb layout agrees with the fence layout. We already check that the
 2543		 * fb stride matches the fence stride elsewhere.
 2544		 */
 2545		if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
 2546		    (x + width) * cpp > fb->pitches[i]) {
 2547			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
 2548				      i, fb->offsets[i]);
 2549			return -EINVAL;
 2550		}
 2551
 2552		/*
 2553		 * First pixel of the framebuffer from
 2554		 * the start of the normal gtt mapping.
 2555		 */
 2556		intel_fb->normal[i].x = x;
 2557		intel_fb->normal[i].y = y;
 2558
 2559		offset = _intel_compute_tile_offset(dev_priv, &x, &y,
 2560						    fb, i, fb->pitches[i],
 2561						    DRM_MODE_ROTATE_0, tile_size);
 2562		offset /= tile_size;
 2563
 2564		if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
 2565			unsigned int tile_width, tile_height;
 2566			unsigned int pitch_tiles;
 2567			struct drm_rect r;
 2568
 2569			intel_tile_dims(fb, i, &tile_width, &tile_height);
 2570
 2571			rot_info->plane[i].offset = offset;
 2572			rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
 2573			rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
 2574			rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
 2575
 2576			intel_fb->rotated[i].pitch =
 2577				rot_info->plane[i].height * tile_height;
 2578
 2579			/* how many tiles does this plane need */
 2580			size = rot_info->plane[i].stride * rot_info->plane[i].height;
 2581			/*
 2582			 * If the plane isn't horizontally tile aligned,
 2583			 * we need one more tile.
 2584			 */
 2585			if (x != 0)
 2586				size++;
 2587
 2588			/* rotate the x/y offsets to match the GTT view */
 2589			r.x1 = x;
 2590			r.y1 = y;
 2591			r.x2 = x + width;
 2592			r.y2 = y + height;
 2593			drm_rect_rotate(&r,
 2594					rot_info->plane[i].width * tile_width,
 2595					rot_info->plane[i].height * tile_height,
 2596					DRM_MODE_ROTATE_270);
 2597			x = r.x1;
 2598			y = r.y1;
 2599
 2600			/* rotate the tile dimensions to match the GTT view */
 2601			pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
 2602			swap(tile_width, tile_height);
 2603
 2604			/*
 2605			 * We only keep the x/y offsets, so push all of the
 2606			 * gtt offset into the x/y offsets.
 2607			 */
 2608			__intel_adjust_tile_offset(&x, &y,
 2609						   tile_width, tile_height,
 2610						   tile_size, pitch_tiles,
 2611						   gtt_offset_rotated * tile_size, 0);
 2612
 2613			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
 2614
 2615			/*
 2616			 * First pixel of the framebuffer from
 2617			 * the start of the rotated gtt mapping.
 2618			 */
 2619			intel_fb->rotated[i].x = x;
 2620			intel_fb->rotated[i].y = y;
 2621		} else {
 2622			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
 2623					    x * cpp, tile_size);
 2624		}
 2625
 2626		/* how many tiles in total needed in the bo */
 2627		max_size = max(max_size, offset + size);
 2628	}
 2629
 2630	if (max_size * tile_size > intel_fb->obj->base.size) {
 2631		DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
 2632			      max_size * tile_size, intel_fb->obj->base.size);
 2633		return -EINVAL;
 2634	}
 2635
 2636	return 0;
 2637}
 2638
 2639static int i9xx_format_to_fourcc(int format)
 2640{
 2641	switch (format) {
 2642	case DISPPLANE_8BPP:
 2643		return DRM_FORMAT_C8;
 2644	case DISPPLANE_BGRX555:
 2645		return DRM_FORMAT_XRGB1555;
 2646	case DISPPLANE_BGRX565:
 2647		return DRM_FORMAT_RGB565;
 2648	default:
 2649	case DISPPLANE_BGRX888:
 2650		return DRM_FORMAT_XRGB8888;
 2651	case DISPPLANE_RGBX888:
 2652		return DRM_FORMAT_XBGR8888;
 2653	case DISPPLANE_BGRX101010:
 2654		return DRM_FORMAT_XRGB2101010;
 2655	case DISPPLANE_RGBX101010:
 2656		return DRM_FORMAT_XBGR2101010;
 2657	}
 2658}
 2659
 2660static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
 2661{
 2662	switch (format) {
 2663	case PLANE_CTL_FORMAT_RGB_565:
 2664		return DRM_FORMAT_RGB565;
 2665	default:
 2666	case PLANE_CTL_FORMAT_XRGB_8888:
 2667		if (rgb_order) {
 2668			if (alpha)
 2669				return DRM_FORMAT_ABGR8888;
 2670			else
 2671				return DRM_FORMAT_XBGR8888;
 2672		} else {
 2673			if (alpha)
 2674				return DRM_FORMAT_ARGB8888;
 2675			else
 2676				return DRM_FORMAT_XRGB8888;
 2677		}
 2678	case PLANE_CTL_FORMAT_XRGB_2101010:
 2679		if (rgb_order)
 2680			return DRM_FORMAT_XBGR2101010;
 2681		else
 2682			return DRM_FORMAT_XRGB2101010;
 2683	}
 2684}
 2685
 2686static bool
 2687intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 2688			      struct intel_initial_plane_config *plane_config)
 2689{
 2690	struct drm_device *dev = crtc->base.dev;
 2691	struct drm_i915_private *dev_priv = to_i915(dev);
 2692	struct drm_i915_gem_object *obj = NULL;
 2693	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 2694	struct drm_framebuffer *fb = &plane_config->fb->base;
 2695	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
 2696	u32 size_aligned = round_up(plane_config->base + plane_config->size,
 2697				    PAGE_SIZE);
 2698
 2699	size_aligned -= base_aligned;
 2700
 2701	if (plane_config->size == 0)
 2702		return false;
 2703
 2704	/* If the FB is too big, just don't use it since fbdev is not very
 2705	 * important and we should probably use that space with FBC or other
 2706	 * features. */
 2707	if (size_aligned * 2 > dev_priv->stolen_usable_size)
 2708		return false;
 2709
 2710	mutex_lock(&dev->struct_mutex);
 2711	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
 2712							     base_aligned,
 2713							     base_aligned,
 2714							     size_aligned);
 2715	mutex_unlock(&dev->struct_mutex);
 2716	if (!obj)
 2717		return false;
 2718
 2719	if (plane_config->tiling == I915_TILING_X)
 2720		obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
 2721
 2722	mode_cmd.pixel_format = fb->format->format;
 2723	mode_cmd.width = fb->width;
 2724	mode_cmd.height = fb->height;
 2725	mode_cmd.pitches[0] = fb->pitches[0];
 2726	mode_cmd.modifier[0] = fb->modifier;
 2727	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
 2728
 2729	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
 2730		DRM_DEBUG_KMS("intel fb init failed\n");
 2731		goto out_unref_obj;
 2732	}
 2733
 2734
 2735	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
 2736	return true;
 2737
 2738out_unref_obj:
 2739	i915_gem_object_put(obj);
 2740	return false;
 2741}
 2742
 2743static void
 2744intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 2745			struct intel_plane_state *plane_state,
 2746			bool visible)
 2747{
 2748	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 2749
 2750	plane_state->base.visible = visible;
 2751
 2752	/* FIXME pre-g4x don't work like this */
 2753	if (visible) {
 2754		crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
 2755		crtc_state->active_planes |= BIT(plane->id);
 2756	} else {
 2757		crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
 2758		crtc_state->active_planes &= ~BIT(plane->id);
 2759	}
 2760
 2761	DRM_DEBUG_KMS("%s active planes 0x%x\n",
 2762		      crtc_state->base.crtc->name,
 2763		      crtc_state->active_planes);
 2764}
 2765
 2766static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 2767					 struct intel_plane *plane)
 2768{
 2769	struct intel_crtc_state *crtc_state =
 2770		to_intel_crtc_state(crtc->base.state);
 2771	struct intel_plane_state *plane_state =
 2772		to_intel_plane_state(plane->base.state);
 2773
 2774	intel_set_plane_visible(crtc_state, plane_state, false);
 2775
 2776	if (plane->id == PLANE_PRIMARY)
 2777		intel_pre_disable_primary_noatomic(&crtc->base);
 2778
 2779	trace_intel_disable_plane(&plane->base, crtc);
 2780	plane->disable_plane(plane, crtc);
 2781}
 2782
 2783static void
 2784intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 2785			     struct intel_initial_plane_config *plane_config)
 2786{
 2787	struct drm_device *dev = intel_crtc->base.dev;
 2788	struct drm_i915_private *dev_priv = to_i915(dev);
 2789	struct drm_crtc *c;
 2790	struct drm_i915_gem_object *obj;
 2791	struct drm_plane *primary = intel_crtc->base.primary;
 2792	struct drm_plane_state *plane_state = primary->state;
 2793	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
 2794	struct intel_plane *intel_plane = to_intel_plane(primary);
 2795	struct intel_plane_state *intel_state =
 2796		to_intel_plane_state(plane_state);
 2797	struct drm_framebuffer *fb;
 2798
 2799	if (!plane_config->fb)
 2800		return;
 2801
 2802	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
 2803		fb = &plane_config->fb->base;
 2804		goto valid_fb;
 2805	}
 2806
 2807	kfree(plane_config->fb);
 2808
 2809	/*
 2810	 * Failed to alloc the obj, check to see if we should share
 2811	 * an fb with another CRTC instead
 2812	 */
 2813	for_each_crtc(dev, c) {
 2814		struct intel_plane_state *state;
 2815
 2816		if (c == &intel_crtc->base)
 2817			continue;
 2818
 2819		if (!to_intel_crtc(c)->active)
 2820			continue;
 2821
 2822		state = to_intel_plane_state(c->primary->state);
 2823		if (!state->vma)
 2824			continue;
 2825
 2826		if (intel_plane_ggtt_offset(state) == plane_config->base) {
 2827			fb = c->primary->fb;
 2828			drm_framebuffer_get(fb);
 2829			goto valid_fb;
 2830		}
 2831	}
 2832
 2833	/*
 2834	 * We've failed to reconstruct the BIOS FB.  Current display state
 2835	 * indicates that the primary plane is visible, but has a NULL FB,
 2836	 * which will lead to problems later if we don't fix it up.  The
 2837	 * simplest solution is to just disable the primary plane now and
 2838	 * pretend the BIOS never had it enabled.
 2839	 */
 2840	intel_plane_disable_noatomic(intel_crtc, intel_plane);
 2841
 2842	return;
 2843
 2844valid_fb:
 2845	mutex_lock(&dev->struct_mutex);
 2846	intel_state->vma =
 2847		intel_pin_and_fence_fb_obj(fb,
 2848					   primary->state->rotation,
 2849					   intel_plane_uses_fence(intel_state),
 2850					   &intel_state->flags);
 2851	mutex_unlock(&dev->struct_mutex);
 2852	if (IS_ERR(intel_state->vma)) {
 2853		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
 2854			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
 2855
 2856		intel_state->vma = NULL;
 2857		drm_framebuffer_put(fb);
 2858		return;
 2859	}
 2860
 2861	plane_state->src_x = 0;
 2862	plane_state->src_y = 0;
 2863	plane_state->src_w = fb->width << 16;
 2864	plane_state->src_h = fb->height << 16;
 2865
 2866	plane_state->crtc_x = 0;
 2867	plane_state->crtc_y = 0;
 2868	plane_state->crtc_w = fb->width;
 2869	plane_state->crtc_h = fb->height;
 2870
 2871	intel_state->base.src = drm_plane_state_src(plane_state);
 2872	intel_state->base.dst = drm_plane_state_dest(plane_state);
 2873
 2874	obj = intel_fb_obj(fb);
 2875	if (i915_gem_object_is_tiled(obj))
 2876		dev_priv->preserve_bios_swizzle = true;
 2877
 2878	drm_framebuffer_get(fb);
 2879	primary->fb = primary->state->fb = fb;
 2880	primary->crtc = primary->state->crtc = &intel_crtc->base;
 2881
 2882	intel_set_plane_visible(to_intel_crtc_state(crtc_state),
 2883				to_intel_plane_state(plane_state),
 2884				true);
 2885
 2886	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
 2887		  &obj->frontbuffer_bits);
 2888}
 2889
 2890static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
 2891			       unsigned int rotation)
 2892{
 2893	int cpp = fb->format->cpp[plane];
 2894
 2895	switch (fb->modifier) {
 2896	case DRM_FORMAT_MOD_LINEAR:
 2897	case I915_FORMAT_MOD_X_TILED:
 2898		switch (cpp) {
 2899		case 8:
 2900			return 4096;
 2901		case 4:
 2902		case 2:
 2903		case 1:
 2904			return 8192;
 2905		default:
 2906			MISSING_CASE(cpp);
 2907			break;
 2908		}
 2909		break;
 2910	case I915_FORMAT_MOD_Y_TILED_CCS:
 2911	case I915_FORMAT_MOD_Yf_TILED_CCS:
 2912		/* FIXME AUX plane? */
 2913	case I915_FORMAT_MOD_Y_TILED:
 2914	case I915_FORMAT_MOD_Yf_TILED:
 2915		switch (cpp) {
 2916		case 8:
 2917			return 2048;
 2918		case 4:
 2919			return 4096;
 2920		case 2:
 2921		case 1:
 2922			return 8192;
 2923		default:
 2924			MISSING_CASE(cpp);
 2925			break;
 2926		}
 2927		break;
 2928	default:
 2929		MISSING_CASE(fb->modifier);
 2930	}
 2931
 2932	return 2048;
 2933}
 2934
 2935static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
 2936					   int main_x, int main_y, u32 main_offset)
 2937{
 2938	const struct drm_framebuffer *fb = plane_state->base.fb;
 2939	int hsub = fb->format->hsub;
 2940	int vsub = fb->format->vsub;
 2941	int aux_x = plane_state->aux.x;
 2942	int aux_y = plane_state->aux.y;
 2943	u32 aux_offset = plane_state->aux.offset;
 2944	u32 alignment = intel_surf_alignment(fb, 1);
 2945
 2946	while (aux_offset >= main_offset && aux_y <= main_y) {
 2947		int x, y;
 2948
 2949		if (aux_x == main_x && aux_y == main_y)
 2950			break;
 2951
 2952		if (aux_offset == 0)
 2953			break;
 2954
 2955		x = aux_x / hsub;
 2956		y = aux_y / vsub;
 2957		aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
 2958						      aux_offset, aux_offset - alignment);
 2959		aux_x = x * hsub + aux_x % hsub;
 2960		aux_y = y * vsub + aux_y % vsub;
 2961	}
 2962
 2963	if (aux_x != main_x || aux_y != main_y)
 2964		return false;
 2965
 2966	plane_state->aux.offset = aux_offset;
 2967	plane_state->aux.x = aux_x;
 2968	plane_state->aux.y = aux_y;
 2969
 2970	return true;
 2971}
 2972
 2973static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
 2974				  struct intel_plane_state *plane_state)
 2975{
 2976	struct drm_i915_private *dev_priv =
 2977		to_i915(plane_state->base.plane->dev);
 2978	const struct drm_framebuffer *fb = plane_state->base.fb;
 2979	unsigned int rotation = plane_state->base.rotation;
 2980	int x = plane_state->base.src.x1 >> 16;
 2981	int y = plane_state->base.src.y1 >> 16;
 2982	int w = drm_rect_width(&plane_state->base.src) >> 16;
 2983	int h = drm_rect_height(&plane_state->base.src) >> 16;
 2984	int dst_x = plane_state->base.dst.x1;
 2985	int pipe_src_w = crtc_state->pipe_src_w;
 2986	int max_width = skl_max_plane_width(fb, 0, rotation);
 2987	int max_height = 4096;
 2988	u32 alignment, offset, aux_offset = plane_state->aux.offset;
 2989
 2990	if (w > max_width || h > max_height) {
 2991		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
 2992			      w, h, max_width, max_height);
 2993		return -EINVAL;
 2994	}
 2995
 2996	/*
 2997	 * Display WA #1175: cnl,glk
 2998	 * Planes other than the cursor may cause FIFO underflow and display
 2999	 * corruption if starting less than 4 pixels from the right edge of
 3000	 * the screen.
 3001	 * Besides the above WA fix the similar problem, where planes other
 3002	 * than the cursor ending less than 4 pixels from the left edge of the
 3003	 * screen may cause FIFO underflow and display corruption.
 3004	 */
 3005	if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
 3006	    (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
 3007		DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
 3008			      dst_x + w < 4 ? "end" : "start",
 3009			      dst_x + w < 4 ? dst_x + w : dst_x,
 3010			      4, pipe_src_w - 4);
 3011		return -ERANGE;
 3012	}
 3013
 3014	intel_add_fb_offsets(&x, &y, plane_state, 0);
 3015	offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 3016	alignment = intel_surf_alignment(fb, 0);
 3017
 3018	/*
 3019	 * AUX surface offset is specified as the distance from the
 3020	 * main surface offset, and it must be non-negative. Make
 3021	 * sure that is what we will get.
 3022	 */
 3023	if (offset > aux_offset)
 3024		offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
 3025						  offset, aux_offset & ~(alignment - 1));
 3026
 3027	/*
 3028	 * When using an X-tiled surface, the plane blows up
 3029	 * if the x offset + width exceed the stride.
 3030	 *
 3031	 * TODO: linear and Y-tiled seem fine, Yf untested,
 3032	 */
 3033	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
 3034		int cpp = fb->format->cpp[0];
 3035
 3036		while ((x + w) * cpp > fb->pitches[0]) {
 3037			if (offset == 0) {
 3038				DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
 3039				return -EINVAL;
 3040			}
 3041
 3042			offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
 3043							  offset, offset - alignment);
 3044		}
 3045	}
 3046
 3047	/*
 3048	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
 3049	 * they match with the main surface x/y offsets.
 3050	 */
 3051	if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 3052	    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
 3053		while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
 3054			if (offset == 0)
 3055				break;
 3056
 3057			offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
 3058							  offset, offset - alignment);
 3059		}
 3060
 3061		if (x != plane_state->aux.x || y != plane_state->aux.y) {
 3062			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
 3063			return -EINVAL;
 3064		}
 3065	}
 3066
 3067	plane_state->main.offset = offset;
 3068	plane_state->main.x = x;
 3069	plane_state->main.y = y;
 3070
 3071	return 0;
 3072}
 3073
 3074static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 3075{
 3076	const struct drm_framebuffer *fb = plane_state->base.fb;
 3077	unsigned int rotation = plane_state->base.rotation;
 3078	int max_width = skl_max_plane_width(fb, 1, rotation);
 3079	int max_height = 4096;
 3080	int x = plane_state->base.src.x1 >> 17;
 3081	int y = plane_state->base.src.y1 >> 17;
 3082	int w = drm_rect_width(&plane_state->base.src) >> 17;
 3083	int h = drm_rect_height(&plane_state->base.src) >> 17;
 3084	u32 offset;
 3085
 3086	intel_add_fb_offsets(&x, &y, plane_state, 1);
 3087	offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
 3088
 3089	/* FIXME not quite sure how/if these apply to the chroma plane */
 3090	if (w > max_width || h > max_height) {
 3091		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
 3092			      w, h, max_width, max_height);
 3093		return -EINVAL;
 3094	}
 3095
 3096	plane_state->aux.offset = offset;
 3097	plane_state->aux.x = x;
 3098	plane_state->aux.y = y;
 3099
 3100	return 0;
 3101}
 3102
 3103static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
 3104{
 3105	const struct drm_framebuffer *fb = plane_state->base.fb;
 3106	int src_x = plane_state->base.src.x1 >> 16;
 3107	int src_y = plane_state->base.src.y1 >> 16;
 3108	int hsub = fb->format->hsub;
 3109	int vsub = fb->format->vsub;
 3110	int x = src_x / hsub;
 3111	int y = src_y / vsub;
 3112	u32 offset;
 3113
 3114	if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
 3115		DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
 3116			      plane_state->base.rotation);
 3117		return -EINVAL;
 3118	}
 3119
 3120	intel_add_fb_offsets(&x, &y, plane_state, 1);
 3121	offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
 3122
 3123	plane_state->aux.offset = offset;
 3124	plane_state->aux.x = x * hsub + src_x % hsub;
 3125	plane_state->aux.y = y * vsub + src_y % vsub;
 3126
 3127	return 0;
 3128}
 3129
 3130int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
 3131			    struct intel_plane_state *plane_state)
 3132{
 3133	const struct drm_framebuffer *fb = plane_state->base.fb;
 3134	unsigned int rotation = plane_state->base.rotation;
 3135	int ret;
 3136
 3137	if (rotation & DRM_MODE_REFLECT_X &&
 3138	    fb->modifier == DRM_FORMAT_MOD_LINEAR) {
 3139		DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
 3140		return -EINVAL;
 3141	}
 3142
 3143	if (!plane_state->base.visible)
 3144		return 0;
 3145
 3146	/* Rotate src coordinates to match rotated GTT view */
 3147	if (drm_rotation_90_or_270(rotation))
 3148		drm_rect_rotate(&plane_state->base.src,
 3149				fb->width << 16, fb->height << 16,
 3150				DRM_MODE_ROTATE_270);
 3151
 3152	/*
 3153	 * Handle the AUX surface first since
 3154	 * the main surface setup depends on it.
 3155	 */
 3156	if (fb->format->format == DRM_FORMAT_NV12) {
 3157		ret = skl_check_nv12_aux_surface(plane_state);
 3158		if (ret)
 3159			return ret;
 3160	} else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 3161		   fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
 3162		ret = skl_check_ccs_aux_surface(plane_state);
 3163		if (ret)
 3164			return ret;
 3165	} else {
 3166		plane_state->aux.offset = ~0xfff;
 3167		plane_state->aux.x = 0;
 3168		plane_state->aux.y = 0;
 3169	}
 3170
 3171	ret = skl_check_main_surface(crtc_state, plane_state);
 3172	if (ret)
 3173		return ret;
 3174
 3175	return 0;
 3176}
 3177
 3178static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
 3179			  const struct intel_plane_state *plane_state)
 3180{
 3181	struct drm_i915_private *dev_priv =
 3182		to_i915(plane_state->base.plane->dev);
 3183	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 3184	const struct drm_framebuffer *fb = plane_state->base.fb;
 3185	unsigned int rotation = plane_state->base.rotation;
 3186	u32 dspcntr;
 3187
 3188	dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
 3189
 3190	if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
 3191	    IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
 3192		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 3193
 3194	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 3195		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
 3196
 3197	if (INTEL_GEN(dev_priv) < 5)
 3198		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
 3199
 3200	switch (fb->format->format) {
 3201	case DRM_FORMAT_C8:
 3202		dspcntr |= DISPPLANE_8BPP;
 3203		break;
 3204	case DRM_FORMAT_XRGB1555:
 3205		dspcntr |= DISPPLANE_BGRX555;
 3206		break;
 3207	case DRM_FORMAT_RGB565:
 3208		dspcntr |= DISPPLANE_BGRX565;
 3209		break;
 3210	case DRM_FORMAT_XRGB8888:
 3211		dspcntr |= DISPPLANE_BGRX888;
 3212		break;
 3213	case DRM_FORMAT_XBGR8888:
 3214		dspcntr |= DISPPLANE_RGBX888;
 3215		break;
 3216	case DRM_FORMAT_XRGB2101010:
 3217		dspcntr |= DISPPLANE_BGRX101010;
 3218		break;
 3219	case DRM_FORMAT_XBGR2101010:
 3220		dspcntr |= DISPPLANE_RGBX101010;
 3221		break;
 3222	default:
 3223		MISSING_CASE(fb->format->format);
 3224		return 0;
 3225	}
 3226
 3227	if (INTEL_GEN(dev_priv) >= 4 &&
 3228	    fb->modifier == I915_FORMAT_MOD_X_TILED)
 3229		dspcntr |= DISPPLANE_TILED;
 3230
 3231	if (rotation & DRM_MODE_ROTATE_180)
 3232		dspcntr |= DISPPLANE_ROTATE_180;
 3233
 3234	if (rotation & DRM_MODE_REFLECT_X)
 3235		dspcntr |= DISPPLANE_MIRROR;
 3236
 3237	return dspcntr;
 3238}
 3239
 3240int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
 3241{
 3242	struct drm_i915_private *dev_priv =
 3243		to_i915(plane_state->base.plane->dev);
 3244	int src_x = plane_state->base.src.x1 >> 16;
 3245	int src_y = plane_state->base.src.y1 >> 16;
 3246	u32 offset;
 3247
 3248	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
 3249
 3250	if (INTEL_GEN(dev_priv) >= 4)
 3251		offset = intel_compute_tile_offset(&src_x, &src_y,
 3252						   plane_state, 0);
 3253	else
 3254		offset = 0;
 3255
 3256	/* HSW/BDW do this automagically in hardware */
 3257	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
 3258		unsigned int rotation = plane_state->base.rotation;
 3259		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
 3260		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
 3261
 3262		if (rotation & DRM_MODE_ROTATE_180) {
 3263			src_x += src_w - 1;
 3264			src_y += src_h - 1;
 3265		} else if (rotation & DRM_MODE_REFLECT_X) {
 3266			src_x += src_w - 1;
 3267		}
 3268	}
 3269
 3270	plane_state->main.offset = offset;
 3271	plane_state->main.x = src_x;
 3272	plane_state->main.y = src_y;
 3273
 3274	return 0;
 3275}
 3276
 3277static void i9xx_update_plane(struct intel_plane *plane,
 3278			      const struct intel_crtc_state *crtc_state,
 3279			      const struct intel_plane_state *plane_state)
 3280{
 3281	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 3282	const struct drm_framebuffer *fb = plane_state->base.fb;
 3283	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 3284	u32 linear_offset;
 3285	u32 dspcntr = plane_state->ctl;
 3286	i915_reg_t reg = DSPCNTR(i9xx_plane);
 3287	int x = plane_state->main.x;
 3288	int y = plane_state->main.y;
 3289	unsigned long irqflags;
 3290	u32 dspaddr_offset;
 3291
 3292	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 3293
 3294	if (INTEL_GEN(dev_priv) >= 4)
 3295		dspaddr_offset = plane_state->main.offset;
 3296	else
 3297		dspaddr_offset = linear_offset;
 3298
 3299	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 3300
 3301	if (INTEL_GEN(dev_priv) < 4) {
 3302		/* pipesrc and dspsize control the size that is scaled from,
 3303		 * which should always be the user's requested size.
 3304		 */
 3305		I915_WRITE_FW(DSPSIZE(i9xx_plane),
 3306			      ((crtc_state->pipe_src_h - 1) << 16) |
 3307			      (crtc_state->pipe_src_w - 1));
 3308		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
 3309	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
 3310		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
 3311			      ((crtc_state->pipe_src_h - 1) << 16) |
 3312			      (crtc_state->pipe_src_w - 1));
 3313		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
 3314		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
 3315	}
 3316
 3317	I915_WRITE_FW(reg, dspcntr);
 3318
 3319	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
 3320	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 3321		I915_WRITE_FW(DSPSURF(i9xx_plane),
 3322			      intel_plane_ggtt_offset(plane_state) +
 3323			      dspaddr_offset);
 3324		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
 3325	} else if (INTEL_GEN(dev_priv) >= 4) {
 3326		I915_WRITE_FW(DSPSURF(i9xx_plane),
 3327			      intel_plane_ggtt_offset(plane_state) +
 3328			      dspaddr_offset);
 3329		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
 3330		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
 3331	} else {
 3332		I915_WRITE_FW(DSPADDR(i9xx_plane),
 3333			      intel_plane_ggtt_offset(plane_state) +
 3334			      dspaddr_offset);
 3335	}
 3336	POSTING_READ_FW(reg);
 3337
 3338	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 3339}
 3340
 3341static void i9xx_disable_plane(struct intel_plane *plane,
 3342			       struct intel_crtc *crtc)
 3343{
 3344	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 3345	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 3346	unsigned long irqflags;
 3347
 3348	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 3349
 3350	I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
 3351	if (INTEL_GEN(dev_priv) >= 4)
 3352		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
 3353	else
 3354		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
 3355	POSTING_READ_FW(DSPCNTR(i9xx_plane));
 3356
 3357	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 3358}
 3359
 3360static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
 3361{
 3362	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 3363	enum intel_display_power_domain power_domain;
 3364	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 3365	enum pipe pipe = plane->pipe;
 3366	bool ret;
 3367
 3368	/*
 3369	 * Not 100% correct for planes that can move between pipes,
 3370	 * but that's only the case for gen2-4 which don't have any
 3371	 * display power wells.
 3372	 */
 3373	power_domain = POWER_DOMAIN_PIPE(pipe);
 3374	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 3375		return false;
 3376
 3377	ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
 3378
 3379	intel_display_power_put(dev_priv, power_domain);
 3380
 3381	return ret;
 3382}
 3383
 3384static u32
 3385intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
 3386{
 3387	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
 3388		return 64;
 3389	else
 3390		return intel_tile_width_bytes(fb, plane);
 3391}
 3392
 3393static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 3394{
 3395	struct drm_device *dev = intel_crtc->base.dev;
 3396	struct drm_i915_private *dev_priv = to_i915(dev);
 3397
 3398	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
 3399	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
 3400	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
 3401}
 3402
 3403/*
 3404 * This function detaches (aka. unbinds) unused scalers in hardware
 3405 */
 3406static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 3407{
 3408	struct intel_crtc_scaler_state *scaler_state;
 3409	int i;
 3410
 3411	scaler_state = &intel_crtc->config->scaler_state;
 3412
 3413	/* loop through and disable scalers that aren't in use */
 3414	for (i = 0; i < intel_crtc->num_scalers; i++) {
 3415		if (!scaler_state->scalers[i].in_use)
 3416			skl_detach_scaler(intel_crtc, i);
 3417	}
 3418}
 3419
 3420u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
 3421		     unsigned int rotation)
 3422{
 3423	u32 stride;
 3424
 3425	if (plane >= fb->format->num_planes)
 3426		return 0;
 3427
 3428	stride = intel_fb_pitch(fb, plane, rotation);
 3429
 3430	/*
 3431	 * The stride is either expressed as a multiple of 64 bytes chunks for
 3432	 * linear buffers or in number of tiles for tiled buffers.
 3433	 */
 3434	if (drm_rotation_90_or_270(rotation))
 3435		stride /= intel_tile_height(fb, plane);
 3436	else
 3437		stride /= intel_fb_stride_alignment(fb, plane);
 3438
 3439	return stride;
 3440}
 3441
 3442static u32 skl_plane_ctl_format(uint32_t pixel_format)
 3443{
 3444	switch (pixel_format) {
 3445	case DRM_FORMAT_C8:
 3446		return PLANE_CTL_FORMAT_INDEXED;
 3447	case DRM_FORMAT_RGB565:
 3448		return PLANE_CTL_FORMAT_RGB_565;
 3449	case DRM_FORMAT_XBGR8888:
 3450	case DRM_FORMAT_ABGR8888:
 3451		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
 3452	case DRM_FORMAT_XRGB8888:
 3453	case DRM_FORMAT_ARGB8888:
 3454		return PLANE_CTL_FORMAT_XRGB_8888;
 3455	case DRM_FORMAT_XRGB2101010:
 3456		return PLANE_CTL_FORMAT_XRGB_2101010;
 3457	case DRM_FORMAT_XBGR2101010:
 3458		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
 3459	case DRM_FORMAT_YUYV:
 3460		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
 3461	case DRM_FORMAT_YVYU:
 3462		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
 3463	case DRM_FORMAT_UYVY:
 3464		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
 3465	case DRM_FORMAT_VYUY:
 3466		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
 3467	default:
 3468		MISSING_CASE(pixel_format);
 3469	}
 3470
 3471	return 0;
 3472}
 3473
 3474/*
 3475 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
 3476 * to be already pre-multiplied. We need to add a knob (or a different
 3477 * DRM_FORMAT) for user-space to configure that.
 3478 */
 3479static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
 3480{
 3481	switch (pixel_format) {
 3482	case DRM_FORMAT_ABGR8888:
 3483	case DRM_FORMAT_ARGB8888:
 3484		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
 3485	default:
 3486		return PLANE_CTL_ALPHA_DISABLE;
 3487	}
 3488}
 3489
 3490static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
 3491{
 3492	switch (pixel_format) {
 3493	case DRM_FORMAT_ABGR8888:
 3494	case DRM_FORMAT_ARGB8888:
 3495		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
 3496	default:
 3497		return PLANE_COLOR_ALPHA_DISABLE;
 3498	}
 3499}
 3500
 3501static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
 3502{
 3503	switch (fb_modifier) {
 3504	case DRM_FORMAT_MOD_LINEAR:
 3505		break;
 3506	case I915_FORMAT_MOD_X_TILED:
 3507		return PLANE_CTL_TILED_X;
 3508	case I915_FORMAT_MOD_Y_TILED:
 3509		return PLANE_CTL_TILED_Y;
 3510	case I915_FORMAT_MOD_Y_TILED_CCS:
 3511		return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
 3512	case I915_FORMAT_MOD_Yf_TILED:
 3513		return PLANE_CTL_TILED_YF;
 3514	case I915_FORMAT_MOD_Yf_TILED_CCS:
 3515		return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
 3516	default:
 3517		MISSING_CASE(fb_modifier);
 3518	}
 3519
 3520	return 0;
 3521}
 3522
 3523static u32 skl_plane_ctl_rotate(unsigned int rotate)
 3524{
 3525	switch (rotate) {
 3526	case DRM_MODE_ROTATE_0:
 3527		break;
 3528	/*
 3529	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
 3530	 * while i915 HW rotation is clockwise, thats why this swapping.
 3531	 */
 3532	case DRM_MODE_ROTATE_90:
 3533		return PLANE_CTL_ROTATE_270;
 3534	case DRM_MODE_ROTATE_180:
 3535		return PLANE_CTL_ROTATE_180;
 3536	case DRM_MODE_ROTATE_270:
 3537		return PLANE_CTL_ROTATE_90;
 3538	default:
 3539		MISSING_CASE(rotate);
 3540	}
 3541
 3542	return 0;
 3543}
 3544
 3545static u32 cnl_plane_ctl_flip(unsigned int reflect)
 3546{
 3547	switch (reflect) {
 3548	case 0:
 3549		break;
 3550	case DRM_MODE_REFLECT_X:
 3551		return PLANE_CTL_FLIP_HORIZONTAL;
 3552	case DRM_MODE_REFLECT_Y:
 3553	default:
 3554		MISSING_CASE(reflect);
 3555	}
 3556
 3557	return 0;
 3558}
 3559
 3560u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 3561		  const struct intel_plane_state *plane_state)
 3562{
 3563	struct drm_i915_private *dev_priv =
 3564		to_i915(plane_state->base.plane->dev);
 3565	const struct drm_framebuffer *fb = plane_state->base.fb;
 3566	unsigned int rotation = plane_state->base.rotation;
 3567	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 3568	u32 plane_ctl;
 3569
 3570	plane_ctl = PLANE_CTL_ENABLE;
 3571
 3572	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
 3573		plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
 3574		plane_ctl |=
 3575			PLANE_CTL_PIPE_GAMMA_ENABLE |
 3576			PLANE_CTL_PIPE_CSC_ENABLE |
 3577			PLANE_CTL_PLANE_GAMMA_DISABLE;
 3578
 3579		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
 3580			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
 3581
 3582		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 3583			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
 3584	}
 3585
 3586	plane_ctl |= skl_plane_ctl_format(fb->format->format);
 3587	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
 3588	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
 3589
 3590	if (INTEL_GEN(dev_priv) >= 10)
 3591		plane_ctl |= cnl_plane_ctl_flip(rotation &
 3592						DRM_MODE_REFLECT_MASK);
 3593
 3594	if (key->flags & I915_SET_COLORKEY_DESTINATION)
 3595		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
 3596	else if (key->flags & I915_SET_COLORKEY_SOURCE)
 3597		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
 3598
 3599	return plane_ctl;
 3600}
 3601
 3602u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 3603			const struct intel_plane_state *plane_state)
 3604{
 3605	const struct drm_framebuffer *fb = plane_state->base.fb;
 3606	u32 plane_color_ctl = 0;
 3607
 3608	plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
 3609	plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
 3610	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
 3611	plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
 3612
 3613	if (intel_format_is_yuv(fb->format->format)) {
 3614		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
 3615			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
 3616		else
 3617			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
 3618
 3619		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 3620			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
 3621	}
 3622
 3623	return plane_color_ctl;
 3624}
 3625
 3626static int
 3627__intel_display_resume(struct drm_device *dev,
 3628		       struct drm_atomic_state *state,
 3629		       struct drm_modeset_acquire_ctx *ctx)
 3630{
 3631	struct drm_crtc_state *crtc_state;
 3632	struct drm_crtc *crtc;
 3633	int i, ret;
 3634
 3635	intel_modeset_setup_hw_state(dev, ctx);
 3636	i915_redisable_vga(to_i915(dev));
 3637
 3638	if (!state)
 3639		return 0;
 3640
 3641	/*
 3642	 * We've duplicated the state, pointers to the old state are invalid.
 3643	 *
 3644	 * Don't attempt to use the old state until we commit the duplicated state.
 3645	 */
 3646	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 3647		/*
 3648		 * Force recalculation even if we restore
 3649		 * current state. With fast modeset this may not result
 3650		 * in a modeset when the state is compatible.
 3651		 */
 3652		crtc_state->mode_changed = true;
 3653	}
 3654
 3655	/* ignore any reset values/BIOS leftovers in the WM registers */
 3656	if (!HAS_GMCH_DISPLAY(to_i915(dev)))
 3657		to_intel_atomic_state(state)->skip_intermediate_wm = true;
 3658
 3659	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
 3660
 3661	WARN_ON(ret == -EDEADLK);
 3662	return ret;
 3663}
 3664
 3665static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
 3666{
 3667	return intel_has_gpu_reset(dev_priv) &&
 3668		INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
 3669}
 3670
 3671void intel_prepare_reset(struct drm_i915_private *dev_priv)
 3672{
 3673	struct drm_device *dev = &dev_priv->drm;
 3674	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
 3675	struct drm_atomic_state *state;
 3676	int ret;
 3677
 3678
 3679	/* reset doesn't touch the display */
 3680	if (!i915_modparams.force_reset_modeset_test &&
 3681	    !gpu_reset_clobbers_display(dev_priv))
 3682		return;
 3683
 3684	/* We have a modeset vs reset deadlock, defensively unbreak it. */
 3685	set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
 3686	wake_up_all(&dev_priv->gpu_error.wait_queue);
 3687
 3688	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
 3689		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
 3690		i915_gem_set_wedged(dev_priv);
 3691	}
 3692
 3693	/*
 3694	 * Need mode_config.mutex so that we don't
 3695	 * trample ongoing ->detect() and whatnot.
 3696	 */
 3697	mutex_lock(&dev->mode_config.mutex);
 3698	drm_modeset_acquire_init(ctx, 0);
 3699	while (1) {
 3700		ret = drm_modeset_lock_all_ctx(dev, ctx);
 3701		if (ret != -EDEADLK)
 3702			break;
 3703
 3704		drm_modeset_backoff(ctx);
 3705	}
 3706	/*
 3707	 * Disabling the crtcs gracefully seems nicer. Also the
 3708	 * g33 docs say we should at least disable all the planes.
 3709	 */
 3710	state = drm_atomic_helper_duplicate_state(dev, ctx);
 3711	if (IS_ERR(state)) {
 3712		ret = PTR_ERR(state);
 3713		DRM_ERROR("Duplicating state failed with %i\n", ret);
 3714		return;
 3715	}
 3716
 3717	ret = drm_atomic_helper_disable_all(dev, ctx);
 3718	if (ret) {
 3719		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
 3720		drm_atomic_state_put(state);
 3721		return;
 3722	}
 3723
 3724	dev_priv->modeset_restore_state = state;
 3725	state->acquire_ctx = ctx;
 3726}
 3727
 3728void intel_finish_reset(struct drm_i915_private *dev_priv)
 3729{
 3730	struct drm_device *dev = &dev_priv->drm;
 3731	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
 3732	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
 3733	int ret;
 3734
 3735	/* reset doesn't touch the display */
 3736	if (!i915_modparams.force_reset_modeset_test &&
 3737	    !gpu_reset_clobbers_display(dev_priv))
 3738		return;
 3739
 3740	if (!state)
 3741		goto unlock;
 3742
 3743	dev_priv->modeset_restore_state = NULL;
 3744
 3745	/* reset doesn't touch the display */
 3746	if (!gpu_reset_clobbers_display(dev_priv)) {
 3747		/* for testing only restore the display */
 3748		ret = __intel_display_resume(dev, state, ctx);
 3749		if (ret)
 3750			DRM_ERROR("Restoring old state failed with %i\n", ret);
 3751	} else {
 3752		/*
 3753		 * The display has been reset as well,
 3754		 * so need a full re-initialization.
 3755		 */
 3756		intel_runtime_pm_disable_interrupts(dev_priv);
 3757		intel_runtime_pm_enable_interrupts(dev_priv);
 3758
 3759		intel_pps_unlock_regs_wa(dev_priv);
 3760		intel_modeset_init_hw(dev);
 3761		intel_init_clock_gating(dev_priv);
 3762
 3763		spin_lock_irq(&dev_priv->irq_lock);
 3764		if (dev_priv->display.hpd_irq_setup)
 3765			dev_priv->display.hpd_irq_setup(dev_priv);
 3766		spin_unlock_irq(&dev_priv->irq_lock);
 3767
 3768		ret = __intel_display_resume(dev, state, ctx);
 3769		if (ret)
 3770			DRM_ERROR("Restoring old state failed with %i\n", ret);
 3771
 3772		intel_hpd_init(dev_priv);
 3773	}
 3774
 3775	drm_atomic_state_put(state);
 3776unlock:
 3777	drm_modeset_drop_locks(ctx);
 3778	drm_modeset_acquire_fini(ctx);
 3779	mutex_unlock(&dev->mode_config.mutex);
 3780
 3781	clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
 3782}
 3783
 3784static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
 3785				     const struct intel_crtc_state *new_crtc_state)
 3786{
 3787	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
 3788	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 3789
 3790	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
 3791	crtc->base.mode = new_crtc_state->base.mode;
 3792
 3793	/*
 3794	 * Update pipe size and adjust fitter if needed: the reason for this is
 3795	 * that in compute_mode_changes we check the native mode (not the pfit
 3796	 * mode) to see if we can flip rather than do a full mode set. In the
 3797	 * fastboot case, we'll flip, but if we don't update the pipesrc and
 3798	 * pfit state, we'll end up with a big fb scanned out into the wrong
 3799	 * sized surface.
 3800	 */
 3801
 3802	I915_WRITE(PIPESRC(crtc->pipe),
 3803		   ((new_crtc_state->pipe_src_w - 1) << 16) |
 3804		   (new_crtc_state->pipe_src_h - 1));
 3805
 3806	/* on skylake this is done by detaching scalers */
 3807	if (INTEL_GEN(dev_priv) >= 9) {
 3808		skl_detach_scalers(crtc);
 3809
 3810		if (new_crtc_state->pch_pfit.enabled)
 3811			skylake_pfit_enable(crtc);
 3812	} else if (HAS_PCH_SPLIT(dev_priv)) {
 3813		if (new_crtc_state->pch_pfit.enabled)
 3814			ironlake_pfit_enable(crtc);
 3815		else if (old_crtc_state->pch_pfit.enabled)
 3816			ironlake_pfit_disable(crtc, true);
 3817	}
 3818}
 3819
 3820static void intel_fdi_normal_train(struct intel_crtc *crtc)
 3821{
 3822	struct drm_device *dev = crtc->base.dev;
 3823	struct drm_i915_private *dev_priv = to_i915(dev);
 3824	int pipe = crtc->pipe;
 3825	i915_reg_t reg;
 3826	u32 temp;
 3827
 3828	/* enable normal train */
 3829	reg = FDI_TX_CTL(pipe);
 3830	temp = I915_READ(reg);
 3831	if (IS_IVYBRIDGE(dev_priv)) {
 3832		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 3833		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
 3834	} else {
 3835		temp &= ~FDI_LINK_TRAIN_NONE;
 3836		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
 3837	}
 3838	I915_WRITE(reg, temp);
 3839
 3840	reg = FDI_RX_CTL(pipe);
 3841	temp = I915_READ(reg);
 3842	if (HAS_PCH_CPT(dev_priv)) {
 3843		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 3844		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
 3845	} else {
 3846		temp &= ~FDI_LINK_TRAIN_NONE;
 3847		temp |= FDI_LINK_TRAIN_NONE;
 3848	}
 3849	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
 3850
 3851	/* wait one idle pattern time */
 3852	POSTING_READ(reg);
 3853	udelay(1000);
 3854
 3855	/* IVB wants error correction enabled */
 3856	if (IS_IVYBRIDGE(dev_priv))
 3857		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
 3858			   FDI_FE_ERRC_ENABLE);
 3859}
 3860
 3861/* The FDI link training functions for ILK/Ibexpeak. */
 3862static void ironlake_fdi_link_train(struct intel_crtc *crtc,
 3863				    const struct intel_crtc_state *crtc_state)
 3864{
 3865	struct drm_device *dev = crtc->base.dev;
 3866	struct drm_i915_private *dev_priv = to_i915(dev);
 3867	int pipe = crtc->pipe;
 3868	i915_reg_t reg;
 3869	u32 temp, tries;
 3870
 3871	/* FDI needs bits from pipe first */
 3872	assert_pipe_enabled(dev_priv, pipe);
 3873
 3874	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 3875	   for train result */
 3876	reg = FDI_RX_IMR(pipe);
 3877	temp = I915_READ(reg);
 3878	temp &= ~FDI_RX_SYMBOL_LOCK;
 3879	temp &= ~FDI_RX_BIT_LOCK;
 3880	I915_WRITE(reg, temp);
 3881	I915_READ(reg);
 3882	udelay(150);
 3883
 3884	/* enable CPU FDI TX and PCH FDI RX */
 3885	reg = FDI_TX_CTL(pipe);
 3886	temp = I915_READ(reg);
 3887	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 3888	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 3889	temp &= ~FDI_LINK_TRAIN_NONE;
 3890	temp |= FDI_LINK_TRAIN_PATTERN_1;
 3891	I915_WRITE(reg, temp | FDI_TX_ENABLE);
 3892
 3893	reg = FDI_RX_CTL(pipe);
 3894	temp = I915_READ(reg);
 3895	temp &= ~FDI_LINK_TRAIN_NONE;
 3896	temp |= FDI_LINK_TRAIN_PATTERN_1;
 3897	I915_WRITE(reg, temp | FDI_RX_ENABLE);
 3898
 3899	POSTING_READ(reg);
 3900	udelay(150);
 3901
 3902	/* Ironlake workaround, enable clock pointer after FDI enable*/
 3903	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
 3904	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
 3905		   FDI_RX_PHASE_SYNC_POINTER_EN);
 3906
 3907	reg = FDI_RX_IIR(pipe);
 3908	for (tries = 0; tries < 5; tries++) {
 3909		temp = I915_READ(reg);
 3910		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 3911
 3912		if ((temp & FDI_RX_BIT_LOCK)) {
 3913			DRM_DEBUG_KMS("FDI train 1 done.\n");
 3914			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
 3915			break;
 3916		}
 3917	}
 3918	if (tries == 5)
 3919		DRM_ERROR("FDI train 1 fail!\n");
 3920
 3921	/* Train 2 */
 3922	reg = FDI_TX_CTL(pipe);
 3923	temp = I915_READ(reg);
 3924	temp &= ~FDI_LINK_TRAIN_NONE;
 3925	temp |= FDI_LINK_TRAIN_PATTERN_2;
 3926	I915_WRITE(reg, temp);
 3927
 3928	reg = FDI_RX_CTL(pipe);
 3929	temp = I915_READ(reg);
 3930	temp &= ~FDI_LINK_TRAIN_NONE;
 3931	temp |= FDI_LINK_TRAIN_PATTERN_2;
 3932	I915_WRITE(reg, temp);
 3933
 3934	POSTING_READ(reg);
 3935	udelay(150);
 3936
 3937	reg = FDI_RX_IIR(pipe);
 3938	for (tries = 0; tries < 5; tries++) {
 3939		temp = I915_READ(reg);
 3940		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 3941
 3942		if (temp & FDI_RX_SYMBOL_LOCK) {
 3943			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
 3944			DRM_DEBUG_KMS("FDI train 2 done.\n");
 3945			break;
 3946		}
 3947	}
 3948	if (tries == 5)
 3949		DRM_ERROR("FDI train 2 fail!\n");
 3950
 3951	DRM_DEBUG_KMS("FDI train done\n");
 3952
 3953}
 3954
 3955static const int snb_b_fdi_train_param[] = {
 3956	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
 3957	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
 3958	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
 3959	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
 3960};
 3961
 3962/* The FDI link training functions for SNB/Cougarpoint. */
 3963static void gen6_fdi_link_train(struct intel_crtc *crtc,
 3964				const struct intel_crtc_state *crtc_state)
 3965{
 3966	struct drm_device *dev = crtc->base.dev;
 3967	struct drm_i915_private *dev_priv = to_i915(dev);
 3968	int pipe = crtc->pipe;
 3969	i915_reg_t reg;
 3970	u32 temp, i, retry;
 3971
 3972	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 3973	   for train result */
 3974	reg = FDI_RX_IMR(pipe);
 3975	temp = I915_READ(reg);
 3976	temp &= ~FDI_RX_SYMBOL_LOCK;
 3977	temp &= ~FDI_RX_BIT_LOCK;
 3978	I915_WRITE(reg, temp);
 3979
 3980	POSTING_READ(reg);
 3981	udelay(150);
 3982
 3983	/* enable CPU FDI TX and PCH FDI RX */
 3984	reg = FDI_TX_CTL(pipe);
 3985	temp = I915_READ(reg);
 3986	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 3987	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 3988	temp &= ~FDI_LINK_TRAIN_NONE;
 3989	temp |= FDI_LINK_TRAIN_PATTERN_1;
 3990	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 3991	/* SNB-B */
 3992	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 3993	I915_WRITE(reg, temp | FDI_TX_ENABLE);
 3994
 3995	I915_WRITE(FDI_RX_MISC(pipe),
 3996		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 3997
 3998	reg = FDI_RX_CTL(pipe);
 3999	temp = I915_READ(reg);
 4000	if (HAS_PCH_CPT(dev_priv)) {
 4001		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 4002		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 4003	} else {
 4004		temp &= ~FDI_LINK_TRAIN_NONE;
 4005		temp |= FDI_LINK_TRAIN_PATTERN_1;
 4006	}
 4007	I915_WRITE(reg, temp | FDI_RX_ENABLE);
 4008
 4009	POSTING_READ(reg);
 4010	udelay(150);
 4011
 4012	for (i = 0; i < 4; i++) {
 4013		reg = FDI_TX_CTL(pipe);
 4014		temp = I915_READ(reg);
 4015		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 4016		temp |= snb_b_fdi_train_param[i];
 4017		I915_WRITE(reg, temp);
 4018
 4019		POSTING_READ(reg);
 4020		udelay(500);
 4021
 4022		for (retry = 0; retry < 5; retry++) {
 4023			reg = FDI_RX_IIR(pipe);
 4024			temp = I915_READ(reg);
 4025			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 4026			if (temp & FDI_RX_BIT_LOCK) {
 4027				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
 4028				DRM_DEBUG_KMS("FDI train 1 done.\n");
 4029				break;
 4030			}
 4031			udelay(50);
 4032		}
 4033		if (retry < 5)
 4034			break;
 4035	}
 4036	if (i == 4)
 4037		DRM_ERROR("FDI train 1 fail!\n");
 4038
 4039	/* Train 2 */
 4040	reg = FDI_TX_CTL(pipe);
 4041	temp = I915_READ(reg);
 4042	temp &= ~FDI_LINK_TRAIN_NONE;
 4043	temp |= FDI_LINK_TRAIN_PATTERN_2;
 4044	if (IS_GEN6(dev_priv)) {
 4045		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 4046		/* SNB-B */
 4047		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 4048	}
 4049	I915_WRITE(reg, temp);
 4050
 4051	reg = FDI_RX_CTL(pipe);
 4052	temp = I915_READ(reg);
 4053	if (HAS_PCH_CPT(dev_priv)) {
 4054		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 4055		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
 4056	} else {
 4057		temp &= ~FDI_LINK_TRAIN_NONE;
 4058		temp |= FDI_LINK_TRAIN_PATTERN_2;
 4059	}
 4060	I915_WRITE(reg, temp);
 4061
 4062	POSTING_READ(reg);
 4063	udelay(150);
 4064
 4065	for (i = 0; i < 4; i++) {
 4066		reg = FDI_TX_CTL(pipe);
 4067		temp = I915_READ(reg);
 4068		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 4069		temp |= snb_b_fdi_train_param[i];
 4070		I915_WRITE(reg, temp);
 4071
 4072		POSTING_READ(reg);
 4073		udelay(500);
 4074
 4075		for (retry = 0; retry < 5; retry++) {
 4076			reg = FDI_RX_IIR(pipe);
 4077			temp = I915_READ(reg);
 4078			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 4079			if (temp & FDI_RX_SYMBOL_LOCK) {
 4080				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
 4081				DRM_DEBUG_KMS("FDI train 2 done.\n");
 4082				break;
 4083			}
 4084			udelay(50);
 4085		}
 4086		if (retry < 5)
 4087			break;
 4088	}
 4089	if (i == 4)
 4090		DRM_ERROR("FDI train 2 fail!\n");
 4091
 4092	DRM_DEBUG_KMS("FDI train done.\n");
 4093}
 4094
 4095/* Manual link training for Ivy Bridge A0 parts */
 4096static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 4097				      const struct intel_crtc_state *crtc_state)
 4098{
 4099	struct drm_device *dev = crtc->base.dev;
 4100	struct drm_i915_private *dev_priv = to_i915(dev);
 4101	int pipe = crtc->pipe;
 4102	i915_reg_t reg;
 4103	u32 temp, i, j;
 4104
 4105	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 4106	   for train result */
 4107	reg = FDI_RX_IMR(pipe);
 4108	temp = I915_READ(reg);
 4109	temp &= ~FDI_RX_SYMBOL_LOCK;
 4110	temp &= ~FDI_RX_BIT_LOCK;
 4111	I915_WRITE(reg, temp);
 4112
 4113	POSTING_READ(reg);
 4114	udelay(150);
 4115
 4116	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
 4117		      I915_READ(FDI_RX_IIR(pipe)));
 4118
 4119	/* Try each vswing and preemphasis setting twice before moving on */
 4120	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
 4121		/* disable first in case we need to retry */
 4122		reg = FDI_TX_CTL(pipe);
 4123		temp = I915_READ(reg);
 4124		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
 4125		temp &= ~FDI_TX_ENABLE;
 4126		I915_WRITE(reg, temp);
 4127
 4128		reg = FDI_RX_CTL(pipe);
 4129		temp = I915_READ(reg);
 4130		temp &= ~FDI_LINK_TRAIN_AUTO;
 4131		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 4132		temp &= ~FDI_RX_ENABLE;
 4133		I915_WRITE(reg, temp);
 4134
 4135		/* enable CPU FDI TX and PCH FDI RX */
 4136		reg = FDI_TX_CTL(pipe);
 4137		temp = I915_READ(reg);
 4138		temp &= ~FDI_DP_PORT_WIDTH_MASK;
 4139		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 4140		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
 4141		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 4142		temp |= snb_b_fdi_train_param[j/2];
 4143		temp |= FDI_COMPOSITE_SYNC;
 4144		I915_WRITE(reg, temp | FDI_TX_ENABLE);
 4145
 4146		I915_WRITE(FDI_RX_MISC(pipe),
 4147			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 4148
 4149		reg = FDI_RX_CTL(pipe);
 4150		temp = I915_READ(reg);
 4151		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 4152		temp |= FDI_COMPOSITE_SYNC;
 4153		I915_WRITE(reg, temp | FDI_RX_ENABLE);
 4154
 4155		POSTING_READ(reg);
 4156		udelay(1); /* should be 0.5us */
 4157
 4158		for (i = 0; i < 4; i++) {
 4159			reg = FDI_RX_IIR(pipe);
 4160			temp = I915_READ(reg);
 4161			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 4162
 4163			if (temp & FDI_RX_BIT_LOCK ||
 4164			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
 4165				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
 4166				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
 4167					      i);
 4168				break;
 4169			}
 4170			udelay(1); /* should be 0.5us */
 4171		}
 4172		if (i == 4) {
 4173			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
 4174			continue;
 4175		}
 4176
 4177		/* Train 2 */
 4178		reg = FDI_TX_CTL(pipe);
 4179		temp = I915_READ(reg);
 4180		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 4181		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
 4182		I915_WRITE(reg, temp);
 4183
 4184		reg = FDI_RX_CTL(pipe);
 4185		temp = I915_READ(reg);
 4186		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 4187		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
 4188		I915_WRITE(reg, temp);
 4189
 4190		POSTING_READ(reg);
 4191		udelay(2); /* should be 1.5us */
 4192
 4193		for (i = 0; i < 4; i++) {
 4194			reg = FDI_RX_IIR(pipe);
 4195			temp = I915_READ(reg);
 4196			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 4197
 4198			if (temp & FDI_RX_SYMBOL_LOCK ||
 4199			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
 4200				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
 4201				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
 4202					      i);
 4203				goto train_done;
 4204			}
 4205			udelay(2); /* should be 1.5us */
 4206		}
 4207		if (i == 4)
 4208			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
 4209	}
 4210
 4211train_done:
 4212	DRM_DEBUG_KMS("FDI train done.\n");
 4213}
 4214
 4215static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 4216{
 4217	struct drm_device *dev = intel_crtc->base.dev;
 4218	struct drm_i915_private *dev_priv = to_i915(dev);
 4219	int pipe = intel_crtc->pipe;
 4220	i915_reg_t reg;
 4221	u32 temp;
 4222
 4223	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
 4224	reg = FDI_RX_CTL(pipe);
 4225	temp = I915_READ(reg);
 4226	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
 4227	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
 4228	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
 4229	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 4230
 4231	POSTING_READ(reg);
 4232	udelay(200);
 4233
 4234	/* Switch from Rawclk to PCDclk */
 4235	temp = I915_READ(reg);
 4236	I915_WRITE(reg, temp | FDI_PCDCLK);
 4237
 4238	POSTING_READ(reg);
 4239	udelay(200);
 4240
 4241	/* Enable CPU FDI TX PLL, always on for Ironlake */
 4242	reg = FDI_TX_CTL(pipe);
 4243	temp = I915_READ(reg);
 4244	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
 4245		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
 4246
 4247		POSTING_READ(reg);
 4248		udelay(100);
 4249	}
 4250}
 4251
 4252static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 4253{
 4254	struct drm_device *dev = intel_crtc->base.dev;
 4255	struct drm_i915_private *dev_priv = to_i915(dev);
 4256	int pipe = intel_crtc->pipe;
 4257	i915_reg_t reg;
 4258	u32 temp;
 4259
 4260	/* Switch from PCDclk to Rawclk */
 4261	reg = FDI_RX_CTL(pipe);
 4262	temp = I915_READ(reg);
 4263	I915_WRITE(reg, temp & ~FDI_PCDCLK);
 4264
 4265	/* Disable CPU FDI TX PLL */
 4266	reg = FDI_TX_CTL(pipe);
 4267	temp = I915_READ(reg);
 4268	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
 4269
 4270	POSTING_READ(reg);
 4271	udelay(100);
 4272
 4273	reg = FDI_RX_CTL(pipe);
 4274	temp = I915_READ(reg);
 4275	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
 4276
 4277	/* Wait for the clocks to turn off. */
 4278	POSTING_READ(reg);
 4279	udelay(100);
 4280}
 4281
 4282static void ironlake_fdi_disable(struct drm_crtc *crtc)
 4283{
 4284	struct drm_device *dev = crtc->dev;
 4285	struct drm_i915_private *dev_priv = to_i915(dev);
 4286	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 4287	int pipe = intel_crtc->pipe;
 4288	i915_reg_t reg;
 4289	u32 temp;
 4290
 4291	/* disable CPU FDI tx and PCH FDI rx */
 4292	reg = FDI_TX_CTL(pipe);
 4293	temp = I915_READ(reg);
 4294	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
 4295	POSTING_READ(reg);
 4296
 4297	reg = FDI_RX_CTL(pipe);
 4298	temp = I915_READ(reg);
 4299	temp &= ~(0x7 << 16);
 4300	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
 4301	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
 4302
 4303	POSTING_READ(reg);
 4304	udelay(100);
 4305
 4306	/* Ironlake workaround, disable clock pointer after downing FDI */
 4307	if (HAS_PCH_IBX(dev_priv))
 4308		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
 4309
 4310	/* still set train pattern 1 */
 4311	reg = FDI_TX_CTL(pipe);
 4312	temp = I915_READ(reg);
 4313	temp &= ~FDI_LINK_TRAIN_NONE;
 4314	temp |= FDI_LINK_TRAIN_PATTERN_1;
 4315	I915_WRITE(reg, temp);
 4316
 4317	reg = FDI_RX_CTL(pipe);
 4318	temp = I915_READ(reg);
 4319	if (HAS_PCH_CPT(dev_priv)) {
 4320		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 4321		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 4322	} else {
 4323		temp &= ~FDI_LINK_TRAIN_NONE;
 4324		temp |= FDI_LINK_TRAIN_PATTERN_1;
 4325	}
 4326	/* BPC in FDI rx is consistent with that in PIPECONF */
 4327	temp &= ~(0x07 << 16);
 4328	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
 4329	I915_WRITE(reg, temp);
 4330
 4331	POSTING_READ(reg);
 4332	udelay(100);
 4333}
 4334
 4335bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
 4336{
 4337	struct drm_crtc *crtc;
 4338	bool cleanup_done;
 4339
 4340	drm_for_each_crtc(crtc, &dev_priv->drm) {
 4341		struct drm_crtc_commit *commit;
 4342		spin_lock(&crtc->commit_lock);
 4343		commit = list_first_entry_or_null(&crtc->commit_list,
 4344						  struct drm_crtc_commit, commit_entry);
 4345		cleanup_done = commit ?
 4346			try_wait_for_completion(&commit->cleanup_done) : true;
 4347		spin_unlock(&crtc->commit_lock);
 4348
 4349		if (cleanup_done)
 4350			continue;
 4351
 4352		drm_crtc_wait_one_vblank(crtc);
 4353
 4354		return true;
 4355	}
 4356
 4357	return false;
 4358}
 4359
 4360void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 4361{
 4362	u32 temp;
 4363
 4364	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
 4365
 4366	mutex_lock(&dev_priv->sb_lock);
 4367
 4368	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
 4369	temp |= SBI_SSCCTL_DISABLE;
 4370	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 4371
 4372	mutex_unlock(&dev_priv->sb_lock);
 4373}
 4374
 4375/* Program iCLKIP clock to the desired frequency */
 4376static void lpt_program_iclkip(struct intel_crtc *crtc)
 4377{
 4378	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 4379	int clock = crtc->config->base.adjusted_mode.crtc_clock;
 4380	u32 divsel, phaseinc, auxdiv, phasedir = 0;
 4381	u32 temp;
 4382
 4383	lpt_disable_iclkip(dev_priv);
 4384
 4385	/* The iCLK virtual clock root frequency is in MHz,
 4386	 * but the adjusted_mode->crtc_clock in in KHz. To get the
 4387	 * divisors, it is necessary to divide one by another, so we
 4388	 * convert the virtual clock precision to KHz here for higher
 4389	 * precision.
 4390	 */
 4391	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
 4392		u32 iclk_virtual_root_freq = 172800 * 1000;
 4393		u32 iclk_pi_range = 64;
 4394		u32 desired_divisor;
 4395
 4396		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
 4397						    clock << auxdiv);
 4398		divsel = (desired_divisor / iclk_pi_range) - 2;
 4399		phaseinc = desired_divisor % iclk_pi_range;
 4400
 4401		/*
 4402		 * Near 20MHz is a corner case which is
 4403		 * out of range for the 7-bit divisor
 4404		 */
 4405		if (divsel <= 0x7f)
 4406			break;
 4407	}
 4408
 4409	/* This should not happen with any sane values */
 4410	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
 4411		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
 4412	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
 4413		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 4414
 4415	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
 4416			clock,
 4417			auxdiv,
 4418			divsel,
 4419			phasedir,
 4420			phaseinc);
 4421
 4422	mutex_lock(&dev_priv->sb_lock);
 4423
 4424	/* Program SSCDIVINTPHASE6 */
 4425	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
 4426	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
 4427	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
 4428	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
 4429	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
 4430	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
 4431	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
 4432	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
 4433
 4434	/* Program SSCAUXDIV */
 4435	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
 4436	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
 4437	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
 4438	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
 4439
 4440	/* Enable modulator and associated divider */
 4441	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
 4442	temp &= ~SBI_SSCCTL_DISABLE;
 4443	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 4444
 4445	mutex_unlock(&dev_priv->sb_lock);
 4446
 4447	/* Wait for initialization time */
 4448	udelay(24);
 4449
 4450	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 4451}
 4452
 4453int lpt_get_iclkip(struct drm_i915_private *dev_priv)
 4454{
 4455	u32 divsel, phaseinc, auxdiv;
 4456	u32 iclk_virtual_root_freq = 172800 * 1000;
 4457	u32 iclk_pi_range = 64;
 4458	u32 desired_divisor;
 4459	u32 temp;
 4460
 4461	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
 4462		return 0;
 4463
 4464	mutex_lock(&dev_priv->sb_lock);
 4465
 4466	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
 4467	if (temp & SBI_SSCCTL_DISABLE) {
 4468		mutex_unlock(&dev_priv->sb_lock);
 4469		return 0;
 4470	}
 4471
 4472	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
 4473	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
 4474		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
 4475	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
 4476		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
 4477
 4478	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
 4479	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
 4480		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
 4481
 4482	mutex_unlock(&dev_priv->sb_lock);
 4483
 4484	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
 4485
 4486	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
 4487				 desired_divisor << auxdiv);
 4488}
 4489
 4490static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
 4491						enum pipe pch_transcoder)
 4492{
 4493	struct drm_device *dev = crtc->base.dev;
 4494	struct drm_i915_private *dev_priv = to_i915(dev);
 4495	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 4496
 4497	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
 4498		   I915_READ(HTOTAL(cpu_transcoder)));
 4499	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
 4500		   I915_READ(HBLANK(cpu_transcoder)));
 4501	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
 4502		   I915_READ(HSYNC(cpu_transcoder)));
 4503
 4504	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
 4505		   I915_READ(VTOTAL(cpu_transcoder)));
 4506	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
 4507		   I915_READ(VBLANK(cpu_transcoder)));
 4508	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
 4509		   I915_READ(VSYNC(cpu_transcoder)));
 4510	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
 4511		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
 4512}
 4513
 4514static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
 4515{
 4516	struct drm_i915_private *dev_priv = to_i915(dev);
 4517	uint32_t temp;
 4518
 4519	temp = I915_READ(SOUTH_CHICKEN1);
 4520	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
 4521		return;
 4522
 4523	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
 4524	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 4525
 4526	temp &= ~FDI_BC_BIFURCATION_SELECT;
 4527	if (enable)
 4528		temp |= FDI_BC_BIFURCATION_SELECT;
 4529
 4530	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
 4531	I915_WRITE(SOUTH_CHICKEN1, temp);
 4532	POSTING_READ(SOUTH_CHICKEN1);
 4533}
 4534
 4535static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
 4536{
 4537	struct drm_device *dev = intel_crtc->base.dev;
 4538
 4539	switch (intel_crtc->pipe) {
 4540	case PIPE_A:
 4541		break;
 4542	case PIPE_B:
 4543		if (intel_crtc->config->fdi_lanes > 2)
 4544			cpt_set_fdi_bc_bifurcation(dev, false);
 4545		else
 4546			cpt_set_fdi_bc_bifurcation(dev, true);
 4547
 4548		break;
 4549	case PIPE_C:
 4550		cpt_set_fdi_bc_bifurcation(dev, true);
 4551
 4552		break;
 4553	default:
 4554		BUG();
 4555	}
 4556}
 4557
 4558/* Return which DP Port should be selected for Transcoder DP control */
 4559static enum port
 4560intel_trans_dp_port_sel(struct intel_crtc *crtc)
 4561{
 4562	struct drm_device *dev = crtc->base.dev;
 4563	struct intel_encoder *encoder;
 4564
 4565	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
 4566		if (encoder->type == INTEL_OUTPUT_DP ||
 4567		    encoder->type == INTEL_OUTPUT_EDP)
 4568			return encoder->port;
 4569	}
 4570
 4571	return -1;
 4572}
 4573
 4574/*
 4575 * Enable PCH resources required for PCH ports:
 4576 *   - PCH PLLs
 4577 *   - FDI training & RX/TX
 4578 *   - update transcoder timings
 4579 *   - DP transcoding bits
 4580 *   - transcoder
 4581 */
 4582static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
 4583{
 4584	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 4585	struct drm_device *dev = crtc->base.dev;
 4586	struct drm_i915_private *dev_priv = to_i915(dev);
 4587	int pipe = crtc->pipe;
 4588	u32 temp;
 4589
 4590	assert_pch_transcoder_disabled(dev_priv, pipe);
 4591
 4592	if (IS_IVYBRIDGE(dev_priv))
 4593		ivybridge_update_fdi_bc_bifurcation(crtc);
 4594
 4595	/* Write the TU size bits before fdi link training, so that error
 4596	 * detection works. */
 4597	I915_WRITE(FDI_RX_TUSIZE1(pipe),
 4598		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 4599
 4600	/* For PCH output, training FDI link */
 4601	dev_priv->display.fdi_link_train(crtc, crtc_state);
 4602
 4603	/* We need to program the right clock selection before writing the pixel
 4604	 * mutliplier into the DPLL. */
 4605	if (HAS_PCH_CPT(dev_priv)) {
 4606		u32 sel;
 4607
 4608		temp = I915_READ(PCH_DPLL_SEL);
 4609		temp |= TRANS_DPLL_ENABLE(pipe);
 4610		sel = TRANS_DPLLB_SEL(pipe);
 4611		if (crtc_state->shared_dpll ==
 4612		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
 4613			temp |= sel;
 4614		else
 4615			temp &= ~sel;
 4616		I915_WRITE(PCH_DPLL_SEL, temp);
 4617	}
 4618
 4619	/* XXX: pch pll's can be enabled any time before we enable the PCH
 4620	 * transcoder, and we actually should do this to not upset any PCH
 4621	 * transcoder that already use the clock when we share it.
 4622	 *
 4623	 * Note that enable_shared_dpll tries to do the right thing, but
 4624	 * get_shared_dpll unconditionally resets the pll - we need that to have
 4625	 * the right LVDS enable sequence. */
 4626	intel_enable_shared_dpll(crtc);
 4627
 4628	/* set transcoder timing, panel must allow it */
 4629	assert_panel_unlocked(dev_priv, pipe);
 4630	ironlake_pch_transcoder_set_timings(crtc, pipe);
 4631
 4632	intel_fdi_normal_train(crtc);
 4633
 4634	/* For PCH DP, enable TRANS_DP_CTL */
 4635	if (HAS_PCH_CPT(dev_priv) &&
 4636	    intel_crtc_has_dp_encoder(crtc_state)) {
 4637		const struct drm_display_mode *adjusted_mode =
 4638			&crtc_state->base.adjusted_mode;
 4639		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
 4640		i915_reg_t reg = TRANS_DP_CTL(pipe);
 4641		temp = I915_READ(reg);
 4642		temp &= ~(TRANS_DP_PORT_SEL_MASK |
 4643			  TRANS_DP_SYNC_MASK |
 4644			  TRANS_DP_BPC_MASK);
 4645		temp |= TRANS_DP_OUTPUT_ENABLE;
 4646		temp |= bpc << 9; /* same format but at 11:9 */
 4647
 4648		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 4649			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
 4650		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 4651			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 4652
 4653		switch (intel_trans_dp_port_sel(crtc)) {
 4654		case PORT_B:
 4655			temp |= TRANS_DP_PORT_SEL_B;
 4656			break;
 4657		case PORT_C:
 4658			temp |= TRANS_DP_PORT_SEL_C;
 4659			break;
 4660		case PORT_D:
 4661			temp |= TRANS_DP_PORT_SEL_D;
 4662			break;
 4663		default:
 4664			BUG();
 4665		}
 4666
 4667		I915_WRITE(reg, temp);
 4668	}
 4669
 4670	ironlake_enable_pch_transcoder(dev_priv, pipe);
 4671}
 4672
 4673static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
 4674{
 4675	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 4676	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 4677	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 4678
 4679	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
 4680
 4681	lpt_program_iclkip(crtc);
 4682
 4683	/* Set transcoder timing. */
 4684	ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
 4685
 4686	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 4687}
 4688
 4689static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 4690{
 4691	struct drm_i915_private *dev_priv = to_i915(dev);
 4692	i915_reg_t dslreg = PIPEDSL(pipe);
 4693	u32 temp;
 4694
 4695	temp = I915_READ(dslreg);
 4696	udelay(500);
 4697	if (wait_for(I915_READ(dslreg) != temp, 5)) {
 4698		if (wait_for(I915_READ(dslreg) != temp, 5))
 4699			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
 4700	}
 4701}
 4702
 4703static int
 4704skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 4705		  unsigned int scaler_user, int *scaler_id,
 4706		  int src_w, int src_h, int dst_w, int dst_h)
 4707{
 4708	struct intel_crtc_scaler_state *scaler_state =
 4709		&crtc_state->scaler_state;
 4710	struct intel_crtc *intel_crtc =
 4711		to_intel_crtc(crtc_state->base.crtc);
 4712	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 4713	const struct drm_display_mode *adjusted_mode =
 4714		&crtc_state->base.adjusted_mode;
 4715	int need_scaling;
 4716
 4717	/*
 4718	 * Src coordinates are already rotated by 270 degrees for
 4719	 * the 90/270 degree plane rotation cases (to match the
 4720	 * GTT mapping), hence no need to account for rotation here.
 4721	 */
 4722	need_scaling = src_w != dst_w || src_h != dst_h;
 4723
 4724	if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
 4725		need_scaling = true;
 4726
 4727	/*
 4728	 * Scaling/fitting not supported in IF-ID mode in GEN9+
 4729	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
 4730	 * Once NV12 is enabled, handle it here while allocating scaler
 4731	 * for NV12.
 4732	 */
 4733	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
 4734	    need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 4735		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
 4736		return -EINVAL;
 4737	}
 4738
 4739	/*
 4740	 * if plane is being disabled or scaler is no more required or force detach
 4741	 *  - free scaler binded to this plane/crtc
 4742	 *  - in order to do this, update crtc->scaler_usage
 4743	 *
 4744	 * Here scaler state in crtc_state is set free so that
 4745	 * scaler can be assigned to other user. Actual register
 4746	 * update to free the scaler is done in plane/panel-fit programming.
 4747	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
 4748	 */
 4749	if (force_detach || !need_scaling) {
 4750		if (*scaler_id >= 0) {
 4751			scaler_state->scaler_users &= ~(1 << scaler_user);
 4752			scaler_state->scalers[*scaler_id].in_use = 0;
 4753
 4754			DRM_DEBUG_KMS("scaler_user index %u.%u: "
 4755				"Staged freeing scaler id %d scaler_users = 0x%x\n",
 4756				intel_crtc->pipe, scaler_user, *scaler_id,
 4757				scaler_state->scaler_users);
 4758			*scaler_id = -1;
 4759		}
 4760		return 0;
 4761	}
 4762
 4763	/* range checks */
 4764	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
 4765		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
 4766
 4767		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
 4768		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
 4769		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
 4770			"size is out of scaler range\n",
 4771			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
 4772		return -EINVAL;
 4773	}
 4774
 4775	/* mark this plane as a scaler user in crtc_state */
 4776	scaler_state->scaler_users |= (1 << scaler_user);
 4777	DRM_DEBUG_KMS("scaler_user index %u.%u: "
 4778		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
 4779		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
 4780		scaler_state->scaler_users);
 4781
 4782	return 0;
 4783}
 4784
 4785/**
 4786 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
 4787 *
 4788 * @state: crtc's scaler state
 4789 *
 4790 * Return
 4791 *     0 - scaler_usage updated successfully
 4792 *    error - requested scaling cannot be supported or other error condition
 4793 */
 4794int skl_update_scaler_crtc(struct intel_crtc_state *state)
 4795{
 4796	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 4797
 4798	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
 4799		&state->scaler_state.scaler_id,
 4800		state->pipe_src_w, state->pipe_src_h,
 4801		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 4802}
 4803
 4804/**
 4805 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
 4806 * @crtc_state: crtc's scaler state
 4807 * @plane_state: atomic plane state to update
 4808 *
 4809 * Return
 4810 *     0 - scaler_usage updated successfully
 4811 *    error - requested scaling cannot be supported or other error condition
 4812 */
 4813static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 4814				   struct intel_plane_state *plane_state)
 4815{
 4816
 4817	struct intel_plane *intel_plane =
 4818		to_intel_plane(plane_state->base.plane);
 4819	struct drm_framebuffer *fb = plane_state->base.fb;
 4820	int ret;
 4821
 4822	bool force_detach = !fb || !plane_state->base.visible;
 4823
 4824	ret = skl_update_scaler(crtc_state, force_detach,
 4825				drm_plane_index(&intel_plane->base),
 4826				&plane_state->scaler_id,
 4827				drm_rect_width(&plane_state->base.src) >> 16,
 4828				drm_rect_height(&plane_state->base.src) >> 16,
 4829				drm_rect_width(&plane_state->base.dst),
 4830				drm_rect_height(&plane_state->base.dst));
 4831
 4832	if (ret || plane_state->scaler_id < 0)
 4833		return ret;
 4834
 4835	/* check colorkey */
 4836	if (plane_state->ckey.flags) {
 4837		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
 4838			      intel_plane->base.base.id,
 4839			      intel_plane->base.name);
 4840		return -EINVAL;
 4841	}
 4842
 4843	/* Check src format */
 4844	switch (fb->format->format) {
 4845	case DRM_FORMAT_RGB565:
 4846	case DRM_FORMAT_XBGR8888:
 4847	case DRM_FORMAT_XRGB8888:
 4848	case DRM_FORMAT_ABGR8888:
 4849	case DRM_FORMAT_ARGB8888:
 4850	case DRM_FORMAT_XRGB2101010:
 4851	case DRM_FORMAT_XBGR2101010:
 4852	case DRM_FORMAT_YUYV:
 4853	case DRM_FORMAT_YVYU:
 4854	case DRM_FORMAT_UYVY:
 4855	case DRM_FORMAT_VYUY:
 4856		break;
 4857	default:
 4858		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
 4859			      intel_plane->base.base.id, intel_plane->base.name,
 4860			      fb->base.id, fb->format->format);
 4861		return -EINVAL;
 4862	}
 4863
 4864	return 0;
 4865}
 4866
 4867static void skylake_scaler_disable(struct intel_crtc *crtc)
 4868{
 4869	int i;
 4870
 4871	for (i = 0; i < crtc->num_scalers; i++)
 4872		skl_detach_scaler(crtc, i);
 4873}
 4874
 4875static void skylake_pfit_enable(struct intel_crtc *crtc)
 4876{
 4877	struct drm_device *dev = crtc->base.dev;
 4878	struct drm_i915_private *dev_priv = to_i915(dev);
 4879	int pipe = crtc->pipe;
 4880	struct intel_crtc_scaler_state *scaler_state =
 4881		&crtc->config->scaler_state;
 4882
 4883	if (crtc->config->pch_pfit.enabled) {
 4884		int id;
 4885
 4886		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
 4887			return;
 4888
 4889		id = scaler_state->scaler_id;
 4890		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
 4891			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
 4892		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
 4893		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
 4894	}
 4895}
 4896
 4897static void ironlake_pfit_enable(struct intel_crtc *crtc)
 4898{
 4899	struct drm_device *dev = crtc->base.dev;
 4900	struct drm_i915_private *dev_priv = to_i915(dev);
 4901	int pipe = crtc->pipe;
 4902
 4903	if (crtc->config->pch_pfit.enabled) {
 4904		/* Force use of hard-coded filter coefficients
 4905		 * as some pre-programmed values are broken,
 4906		 * e.g. x201.
 4907		 */
 4908		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
 4909			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
 4910						 PF_PIPE_SEL_IVB(pipe));
 4911		else
 4912			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
 4913		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
 4914		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
 4915	}
 4916}
 4917
 4918void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
 4919{
 4920	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 4921	struct drm_device *dev = crtc->base.dev;
 4922	struct drm_i915_private *dev_priv = to_i915(dev);
 4923
 4924	if (!crtc_state->ips_enabled)
 4925		return;
 4926
 4927	/*
 4928	 * We can only enable IPS after we enable a plane and wait for a vblank
 4929	 * This function is called from post_plane_update, which is run after
 4930	 * a vblank wait.
 4931	 */
 4932	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
 4933
 4934	if (IS_BROADWELL(dev_priv)) {
 4935		mutex_lock(&dev_priv->pcu_lock);
 4936		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
 4937						IPS_ENABLE | IPS_PCODE_CONTROL));
 4938		mutex_unlock(&dev_priv->pcu_lock);
 4939		/* Quoting Art Runyan: "its not safe to expect any particular
 4940		 * value in IPS_CTL bit 31 after enabling IPS through the
 4941		 * mailbox." Moreover, the mailbox may return a bogus state,
 4942		 * so we need to just enable it and continue on.
 4943		 */
 4944	} else {
 4945		I915_WRITE(IPS_CTL, IPS_ENABLE);
 4946		/* The bit only becomes 1 in the next vblank, so this wait here
 4947		 * is essentially intel_wait_for_vblank. If we don't have this
 4948		 * and don't wait for vblanks until the end of crtc_enable, then
 4949		 * the HW state readout code will complain that the expected
 4950		 * IPS_CTL value is not the one we read. */
 4951		if (intel_wait_for_register(dev_priv,
 4952					    IPS_CTL, IPS_ENABLE, IPS_ENABLE,
 4953					    50))
 4954			DRM_ERROR("Timed out waiting for IPS enable\n");
 4955	}
 4956}
 4957
 4958void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
 4959{
 4960	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 4961	struct drm_device *dev = crtc->base.dev;
 4962	struct drm_i915_private *dev_priv = to_i915(dev);
 4963
 4964	if (!crtc_state->ips_enabled)
 4965		return;
 4966
 4967	if (IS_BROADWELL(dev_priv)) {
 4968		mutex_lock(&dev_priv->pcu_lock);
 4969		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
 4970		mutex_unlock(&dev_priv->pcu_lock);
 4971		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
 4972		if (intel_wait_for_register(dev_priv,
 4973					    IPS_CTL, IPS_ENABLE, 0,
 4974					    42))
 4975			DRM_ERROR("Timed out waiting for IPS disable\n");
 4976	} else {
 4977		I915_WRITE(IPS_CTL, 0);
 4978		POSTING_READ(IPS_CTL);
 4979	}
 4980
 4981	/* We need to wait for a vblank before we can disable the plane. */
 4982	intel_wait_for_vblank(dev_priv, crtc->pipe);
 4983}
 4984
 4985static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
 4986{
 4987	if (intel_crtc->overlay) {
 4988		struct drm_device *dev = intel_crtc->base.dev;
 4989
 4990		mutex_lock(&dev->struct_mutex);
 4991		(void) intel_overlay_switch_off(intel_crtc->overlay);
 4992		mutex_unlock(&dev->struct_mutex);
 4993	}
 4994
 4995	/* Let userspace switch the overlay on again. In most cases userspace
 4996	 * has to recompute where to put it anyway.
 4997	 */
 4998}
 4999
 5000/**
 5001 * intel_post_enable_primary - Perform operations after enabling primary plane
 5002 * @crtc: the CRTC whose primary plane was just enabled
 5003 * @new_crtc_state: the enabling state
 5004 *
 5005 * Performs potentially sleeping operations that must be done after the primary
 5006 * plane is enabled, such as updating FBC and IPS.  Note that this may be
 5007 * called due to an explicit primary plane update, or due to an implicit
 5008 * re-enable that is caused when a sprite plane is updated to no longer
 5009 * completely hide the primary plane.
 5010 */
 5011static void
 5012intel_post_enable_primary(struct drm_crtc *crtc,
 5013			  const struct intel_crtc_state *new_crtc_state)
 5014{
 5015	struct drm_device *dev = crtc->dev;
 5016	struct drm_i915_private *dev_priv = to_i915(dev);
 5017	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5018	int pipe = intel_crtc->pipe;
 5019
 5020	/*
 5021	 * Gen2 reports pipe underruns whenever all planes are disabled.
 5022	 * So don't enable underrun reporting before at least some planes
 5023	 * are enabled.
 5024	 * FIXME: Need to fix the logic to work when we turn off all planes
 5025	 * but leave the pipe running.
 5026	 */
 5027	if (IS_GEN2(dev_priv))
 5028		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 5029
 5030	/* Underruns don't always raise interrupts, so check manually. */
 5031	intel_check_cpu_fifo_underruns(dev_priv);
 5032	intel_check_pch_fifo_underruns(dev_priv);
 5033}
 5034
 5035/* FIXME get rid of this and use pre_plane_update */
 5036static void
 5037intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
 5038{
 5039	struct drm_device *dev = crtc->dev;
 5040	struct drm_i915_private *dev_priv = to_i915(dev);
 5041	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5042	int pipe = intel_crtc->pipe;
 5043
 5044	/*
 5045	 * Gen2 reports pipe underruns whenever all planes are disabled.
 5046	 * So disable underrun reporting before all the planes get disabled.
 5047	 */
 5048	if (IS_GEN2(dev_priv))
 5049		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 5050
 5051	hsw_disable_ips(to_intel_crtc_state(crtc->state));
 5052
 5053	/*
 5054	 * Vblank time updates from the shadow to live plane control register
 5055	 * are blocked if the memory self-refresh mode is active at that
 5056	 * moment. So to make sure the plane gets truly disabled, disable
 5057	 * first the self-refresh mode. The self-refresh enable bit in turn
 5058	 * will be checked/applied by the HW only at the next frame start
 5059	 * event which is after the vblank start event, so we need to have a
 5060	 * wait-for-vblank between disabling the plane and the pipe.
 5061	 */
 5062	if (HAS_GMCH_DISPLAY(dev_priv) &&
 5063	    intel_set_memory_cxsr(dev_priv, false))
 5064		intel_wait_for_vblank(dev_priv, pipe);
 5065}
 5066
 5067static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
 5068				       const struct intel_crtc_state *new_crtc_state)
 5069{
 5070	if (!old_crtc_state->ips_enabled)
 5071		return false;
 5072
 5073	if (needs_modeset(&new_crtc_state->base))
 5074		return true;
 5075
 5076	return !new_crtc_state->ips_enabled;
 5077}
 5078
 5079static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
 5080				       const struct intel_crtc_state *new_crtc_state)
 5081{
 5082	if (!new_crtc_state->ips_enabled)
 5083		return false;
 5084
 5085	if (needs_modeset(&new_crtc_state->base))
 5086		return true;
 5087
 5088	/*
 5089	 * We can't read out IPS on broadwell, assume the worst and
 5090	 * forcibly enable IPS on the first fastset.
 5091	 */
 5092	if (new_crtc_state->update_pipe &&
 5093	    old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
 5094		return true;
 5095
 5096	return !old_crtc_state->ips_enabled;
 5097}
 5098
 5099static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 5100{
 5101	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 5102	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 5103	struct intel_crtc_state *pipe_config =
 5104		intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
 5105						crtc);
 5106	struct drm_plane *primary = crtc->base.primary;
 5107	struct drm_plane_state *old_pri_state =
 5108		drm_atomic_get_existing_plane_state(old_state, primary);
 5109
 5110	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 5111
 5112	if (pipe_config->update_wm_post && pipe_config->base.active)
 5113		intel_update_watermarks(crtc);
 5114
 5115	if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
 5116		hsw_enable_ips(pipe_config);
 5117
 5118	if (old_pri_state) {
 5119		struct intel_plane_state *primary_state =
 5120			intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
 5121							 to_intel_plane(primary));
 5122		struct intel_plane_state *old_primary_state =
 5123			to_intel_plane_state(old_pri_state);
 5124
 5125		intel_fbc_post_update(crtc);
 5126
 5127		if (primary_state->base.visible &&
 5128		    (needs_modeset(&pipe_config->base) ||
 5129		     !old_primary_state->base.visible))
 5130			intel_post_enable_primary(&crtc->base, pipe_config);
 5131	}
 5132}
 5133
 5134static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 5135				   struct intel_crtc_state *pipe_config)
 5136{
 5137	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 5138	struct drm_device *dev = crtc->base.dev;
 5139	struct drm_i915_private *dev_priv = to_i915(dev);
 5140	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 5141	struct drm_plane *primary = crtc->base.primary;
 5142	struct drm_plane_state *old_pri_state =
 5143		drm_atomic_get_existing_plane_state(old_state, primary);
 5144	bool modeset = needs_modeset(&pipe_config->base);
 5145	struct intel_atomic_state *old_intel_state =
 5146		to_intel_atomic_state(old_state);
 5147
 5148	if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
 5149		hsw_disable_ips(old_crtc_state);
 5150
 5151	if (old_pri_state) {
 5152		struct intel_plane_state *primary_state =
 5153			intel_atomic_get_new_plane_state(old_intel_state,
 5154							 to_intel_plane(primary));
 5155		struct intel_plane_state *old_primary_state =
 5156			to_intel_plane_state(old_pri_state);
 5157
 5158		intel_fbc_pre_update(crtc, pipe_config, primary_state);
 5159		/*
 5160		 * Gen2 reports pipe underruns whenever all planes are disabled.
 5161		 * So disable underrun reporting before all the planes get disabled.
 5162		 */
 5163		if (IS_GEN2(dev_priv) && old_primary_state->base.visible &&
 5164		    (modeset || !primary_state->base.visible))
 5165			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 5166	}
 5167
 5168	/*
 5169	 * Vblank time updates from the shadow to live plane control register
 5170	 * are blocked if the memory self-refresh mode is active at that
 5171	 * moment. So to make sure the plane gets truly disabled, disable
 5172	 * first the self-refresh mode. The self-refresh enable bit in turn
 5173	 * will be checked/applied by the HW only at the next frame start
 5174	 * event which is after the vblank start event, so we need to have a
 5175	 * wait-for-vblank between disabling the plane and the pipe.
 5176	 */
 5177	if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
 5178	    pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
 5179		intel_wait_for_vblank(dev_priv, crtc->pipe);
 5180
 5181	/*
 5182	 * IVB workaround: must disable low power watermarks for at least
 5183	 * one frame before enabling scaling.  LP watermarks can be re-enabled
 5184	 * when scaling is disabled.
 5185	 *
 5186	 * WaCxSRDisabledForSpriteScaling:ivb
 5187	 */
 5188	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
 5189		intel_wait_for_vblank(dev_priv, crtc->pipe);
 5190
 5191	/*
 5192	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
 5193	 * watermark programming here.
 5194	 */
 5195	if (needs_modeset(&pipe_config->base))
 5196		return;
 5197
 5198	/*
 5199	 * For platforms that support atomic watermarks, program the
 5200	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
 5201	 * will be the intermediate values that are safe for both pre- and
 5202	 * post- vblank; when vblank happens, the 'active' values will be set
 5203	 * to the final 'target' values and we'll do this again to get the
 5204	 * optimal watermarks.  For gen9+ platforms, the values we program here
 5205	 * will be the final target values which will get automatically latched
 5206	 * at vblank time; no further programming will be necessary.
 5207	 *
 5208	 * If a platform hasn't been transitioned to atomic watermarks yet,
 5209	 * we'll continue to update watermarks the old way, if flags tell
 5210	 * us to.
 5211	 */
 5212	if (dev_priv->display.initial_watermarks != NULL)
 5213		dev_priv->display.initial_watermarks(old_intel_state,
 5214						     pipe_config);
 5215	else if (pipe_config->update_wm_pre)
 5216		intel_update_watermarks(crtc);
 5217}
 5218
 5219static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
 5220{
 5221	struct drm_device *dev = crtc->dev;
 5222	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5223	struct drm_plane *p;
 5224	int pipe = intel_crtc->pipe;
 5225
 5226	intel_crtc_dpms_overlay_disable(intel_crtc);
 5227
 5228	drm_for_each_plane_mask(p, dev, plane_mask)
 5229		to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
 5230
 5231	/*
 5232	 * FIXME: Once we grow proper nuclear flip support out of this we need
 5233	 * to compute the mask of flip planes precisely. For the time being
 5234	 * consider this a flip to a NULL plane.
 5235	 */
 5236	intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 5237}
 5238
 5239static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
 5240					  struct intel_crtc_state *crtc_state,
 5241					  struct drm_atomic_state *old_state)
 5242{
 5243	struct drm_connector_state *conn_state;
 5244	struct drm_connector *conn;
 5245	int i;
 5246
 5247	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
 5248		struct intel_encoder *encoder =
 5249			to_intel_encoder(conn_state->best_encoder);
 5250
 5251		if (conn_state->crtc != crtc)
 5252			continue;
 5253
 5254		if (encoder->pre_pll_enable)
 5255			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
 5256	}
 5257}
 5258
 5259static void intel_encoders_pre_enable(struct drm_crtc *crtc,
 5260				      struct intel_crtc_state *crtc_state,
 5261				      struct drm_atomic_state *old_state)
 5262{
 5263	struct drm_connector_state *conn_state;
 5264	struct drm_connector *conn;
 5265	int i;
 5266
 5267	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
 5268		struct intel_encoder *encoder =
 5269			to_intel_encoder(conn_state->best_encoder);
 5270
 5271		if (conn_state->crtc != crtc)
 5272			continue;
 5273
 5274		if (encoder->pre_enable)
 5275			encoder->pre_enable(encoder, crtc_state, conn_state);
 5276	}
 5277}
 5278
 5279static void intel_encoders_enable(struct drm_crtc *crtc,
 5280				  struct intel_crtc_state *crtc_state,
 5281				  struct drm_atomic_state *old_state)
 5282{
 5283	struct drm_connector_state *conn_state;
 5284	struct drm_connector *conn;
 5285	int i;
 5286
 5287	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
 5288		struct intel_encoder *encoder =
 5289			to_intel_encoder(conn_state->best_encoder);
 5290
 5291		if (conn_state->crtc != crtc)
 5292			continue;
 5293
 5294		encoder->enable(encoder, crtc_state, conn_state);
 5295		intel_opregion_notify_encoder(encoder, true);
 5296	}
 5297}
 5298
 5299static void intel_encoders_disable(struct drm_crtc *crtc,
 5300				   struct intel_crtc_state *old_crtc_state,
 5301				   struct drm_atomic_state *old_state)
 5302{
 5303	struct drm_connector_state *old_conn_state;
 5304	struct drm_connector *conn;
 5305	int i;
 5306
 5307	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
 5308		struct intel_encoder *encoder =
 5309			to_intel_encoder(old_conn_state->best_encoder);
 5310
 5311		if (old_conn_state->crtc != crtc)
 5312			continue;
 5313
 5314		intel_opregion_notify_encoder(encoder, false);
 5315		encoder->disable(encoder, old_crtc_state, old_conn_state);
 5316	}
 5317}
 5318
 5319static void intel_encoders_post_disable(struct drm_crtc *crtc,
 5320					struct intel_crtc_state *old_crtc_state,
 5321					struct drm_atomic_state *old_state)
 5322{
 5323	struct drm_connector_state *old_conn_state;
 5324	struct drm_connector *conn;
 5325	int i;
 5326
 5327	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
 5328		struct intel_encoder *encoder =
 5329			to_intel_encoder(old_conn_state->best_encoder);
 5330
 5331		if (old_conn_state->crtc != crtc)
 5332			continue;
 5333
 5334		if (encoder->post_disable)
 5335			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
 5336	}
 5337}
 5338
 5339static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
 5340					    struct intel_crtc_state *old_crtc_state,
 5341					    struct drm_atomic_state *old_state)
 5342{
 5343	struct drm_connector_state *old_conn_state;
 5344	struct drm_connector *conn;
 5345	int i;
 5346
 5347	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
 5348		struct intel_encoder *encoder =
 5349			to_intel_encoder(old_conn_state->best_encoder);
 5350
 5351		if (old_conn_state->crtc != crtc)
 5352			continue;
 5353
 5354		if (encoder->post_pll_disable)
 5355			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
 5356	}
 5357}
 5358
 5359static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 5360				 struct drm_atomic_state *old_state)
 5361{
 5362	struct drm_crtc *crtc = pipe_config->base.crtc;
 5363	struct drm_device *dev = crtc->dev;
 5364	struct drm_i915_private *dev_priv = to_i915(dev);
 5365	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5366	int pipe = intel_crtc->pipe;
 5367	struct intel_atomic_state *old_intel_state =
 5368		to_intel_atomic_state(old_state);
 5369
 5370	if (WARN_ON(intel_crtc->active))
 5371		return;
 5372
 5373	/*
 5374	 * Sometimes spurious CPU pipe underruns happen during FDI
 5375	 * training, at least with VGA+HDMI cloning. Suppress them.
 5376	 *
 5377	 * On ILK we get an occasional spurious CPU pipe underruns
 5378	 * between eDP port A enable and vdd enable. Also PCH port
 5379	 * enable seems to result in the occasional CPU pipe underrun.
 5380	 *
 5381	 * Spurious PCH underruns also occur during PCH enabling.
 5382	 */
 5383	if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
 5384		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 5385	if (intel_crtc->config->has_pch_encoder)
 5386		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 5387
 5388	if (intel_crtc->config->has_pch_encoder)
 5389		intel_prepare_shared_dpll(intel_crtc);
 5390
 5391	if (intel_crtc_has_dp_encoder(intel_crtc->config))
 5392		intel_dp_set_m_n(intel_crtc, M1_N1);
 5393
 5394	intel_set_pipe_timings(intel_crtc);
 5395	intel_set_pipe_src_size(intel_crtc);
 5396
 5397	if (intel_crtc->config->has_pch_encoder) {
 5398		intel_cpu_transcoder_set_m_n(intel_crtc,
 5399				     &intel_crtc->config->fdi_m_n, NULL);
 5400	}
 5401
 5402	ironlake_set_pipeconf(crtc);
 5403
 5404	intel_crtc->active = true;
 5405
 5406	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 5407
 5408	if (intel_crtc->config->has_pch_encoder) {
 5409		/* Note: FDI PLL enabling _must_ be done before we enable the
 5410		 * cpu pipes, hence this is separate from all the other fdi/pch
 5411		 * enabling. */
 5412		ironlake_fdi_pll_enable(intel_crtc);
 5413	} else {
 5414		assert_fdi_tx_disabled(dev_priv, pipe);
 5415		assert_fdi_rx_disabled(dev_priv, pipe);
 5416	}
 5417
 5418	ironlake_pfit_enable(intel_crtc);
 5419
 5420	/*
 5421	 * On ILK+ LUT must be loaded before the pipe is running but with
 5422	 * clocks enabled
 5423	 */
 5424	intel_color_load_luts(&pipe_config->base);
 5425
 5426	if (dev_priv->display.initial_watermarks != NULL)
 5427		dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
 5428	intel_enable_pipe(pipe_config);
 5429
 5430	if (intel_crtc->config->has_pch_encoder)
 5431		ironlake_pch_enable(pipe_config);
 5432
 5433	assert_vblank_disabled(crtc);
 5434	drm_crtc_vblank_on(crtc);
 5435
 5436	intel_encoders_enable(crtc, pipe_config, old_state);
 5437
 5438	if (HAS_PCH_CPT(dev_priv))
 5439		cpt_verify_modeset(dev, intel_crtc->pipe);
 5440
 5441	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
 5442	if (intel_crtc->config->has_pch_encoder)
 5443		intel_wait_for_vblank(dev_priv, pipe);
 5444	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 5445	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 5446}
 5447
 5448/* IPS only exists on ULT machines and is tied to pipe A. */
 5449static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 5450{
 5451	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
 5452}
 5453
 5454static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
 5455					    enum pipe pipe, bool apply)
 5456{
 5457	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
 5458	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
 5459
 5460	if (apply)
 5461		val |= mask;
 5462	else
 5463		val &= ~mask;
 5464
 5465	I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
 5466}
 5467
 5468static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
 5469{
 5470	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 5471	enum pipe pipe = crtc->pipe;
 5472	uint32_t val;
 5473
 5474	val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
 5475
 5476	/* Program B credit equally to all pipes */
 5477	val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
 5478
 5479	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
 5480}
 5481
 5482static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 5483				struct drm_atomic_state *old_state)
 5484{
 5485	struct drm_crtc *crtc = pipe_config->base.crtc;
 5486	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 5487	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5488	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
 5489	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 5490	struct intel_atomic_state *old_intel_state =
 5491		to_intel_atomic_state(old_state);
 5492	bool psl_clkgate_wa;
 5493
 5494	if (WARN_ON(intel_crtc->active))
 5495		return;
 5496
 5497	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 5498
 5499	if (intel_crtc->config->shared_dpll)
 5500		intel_enable_shared_dpll(intel_crtc);
 5501
 5502	if (intel_crtc_has_dp_encoder(intel_crtc->config))
 5503		intel_dp_set_m_n(intel_crtc, M1_N1);
 5504
 5505	if (!transcoder_is_dsi(cpu_transcoder))
 5506		intel_set_pipe_timings(intel_crtc);
 5507
 5508	intel_set_pipe_src_size(intel_crtc);
 5509
 5510	if (cpu_transcoder != TRANSCODER_EDP &&
 5511	    !transcoder_is_dsi(cpu_transcoder)) {
 5512		I915_WRITE(PIPE_MULT(cpu_transcoder),
 5513			   intel_crtc->config->pixel_multiplier - 1);
 5514	}
 5515
 5516	if (intel_crtc->config->has_pch_encoder) {
 5517		intel_cpu_transcoder_set_m_n(intel_crtc,
 5518				     &intel_crtc->config->fdi_m_n, NULL);
 5519	}
 5520
 5521	if (!transcoder_is_dsi(cpu_transcoder))
 5522		haswell_set_pipeconf(crtc);
 5523
 5524	haswell_set_pipemisc(crtc);
 5525
 5526	intel_color_set_csc(&pipe_config->base);
 5527
 5528	intel_crtc->active = true;
 5529
 5530	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 5531
 5532	if (!transcoder_is_dsi(cpu_transcoder))
 5533		intel_ddi_enable_pipe_clock(pipe_config);
 5534
 5535	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
 5536	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
 5537			 intel_crtc->config->pch_pfit.enabled;
 5538	if (psl_clkgate_wa)
 5539		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
 5540
 5541	if (INTEL_GEN(dev_priv) >= 9)
 5542		skylake_pfit_enable(intel_crtc);
 5543	else
 5544		ironlake_pfit_enable(intel_crtc);
 5545
 5546	/*
 5547	 * On ILK+ LUT must be loaded before the pipe is running but with
 5548	 * clocks enabled
 5549	 */
 5550	intel_color_load_luts(&pipe_config->base);
 5551
 5552	intel_ddi_set_pipe_settings(pipe_config);
 5553	if (!transcoder_is_dsi(cpu_transcoder))
 5554		intel_ddi_enable_transcoder_func(pipe_config);
 5555
 5556	if (dev_priv->display.initial_watermarks != NULL)
 5557		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
 5558
 5559	if (INTEL_GEN(dev_priv) >= 11)
 5560		icl_pipe_mbus_enable(intel_crtc);
 5561
 5562	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
 5563	if (!transcoder_is_dsi(cpu_transcoder))
 5564		intel_enable_pipe(pipe_config);
 5565
 5566	if (intel_crtc->config->has_pch_encoder)
 5567		lpt_pch_enable(pipe_config);
 5568
 5569	if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
 5570		intel_ddi_set_vc_payload_alloc(pipe_config, true);
 5571
 5572	assert_vblank_disabled(crtc);
 5573	drm_crtc_vblank_on(crtc);
 5574
 5575	intel_encoders_enable(crtc, pipe_config, old_state);
 5576
 5577	if (psl_clkgate_wa) {
 5578		intel_wait_for_vblank(dev_priv, pipe);
 5579		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
 5580	}
 5581
 5582	/* If we change the relative order between pipe/planes enabling, we need
 5583	 * to change the workaround. */
 5584	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
 5585	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
 5586		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
 5587		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
 5588	}
 5589}
 5590
 5591static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 5592{
 5593	struct drm_device *dev = crtc->base.dev;
 5594	struct drm_i915_private *dev_priv = to_i915(dev);
 5595	int pipe = crtc->pipe;
 5596
 5597	/* To avoid upsetting the power well on haswell only disable the pfit if
 5598	 * it's in use. The hw state code will make sure we get this right. */
 5599	if (force || crtc->config->pch_pfit.enabled) {
 5600		I915_WRITE(PF_CTL(pipe), 0);
 5601		I915_WRITE(PF_WIN_POS(pipe), 0);
 5602		I915_WRITE(PF_WIN_SZ(pipe), 0);
 5603	}
 5604}
 5605
 5606static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 5607				  struct drm_atomic_state *old_state)
 5608{
 5609	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 5610	struct drm_device *dev = crtc->dev;
 5611	struct drm_i915_private *dev_priv = to_i915(dev);
 5612	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5613	int pipe = intel_crtc->pipe;
 5614
 5615	/*
 5616	 * Sometimes spurious CPU pipe underruns happen when the
 5617	 * pipe is already disabled, but FDI RX/TX is still enabled.
 5618	 * Happens at least with VGA+HDMI cloning. Suppress them.
 5619	 */
 5620	if (intel_crtc->config->has_pch_encoder) {
 5621		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 5622		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 5623	}
 5624
 5625	intel_encoders_disable(crtc, old_crtc_state, old_state);
 5626
 5627	drm_crtc_vblank_off(crtc);
 5628	assert_vblank_disabled(crtc);
 5629
 5630	intel_disable_pipe(old_crtc_state);
 5631
 5632	ironlake_pfit_disable(intel_crtc, false);
 5633
 5634	if (intel_crtc->config->has_pch_encoder)
 5635		ironlake_fdi_disable(crtc);
 5636
 5637	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 5638
 5639	if (intel_crtc->config->has_pch_encoder) {
 5640		ironlake_disable_pch_transcoder(dev_priv, pipe);
 5641
 5642		if (HAS_PCH_CPT(dev_priv)) {
 5643			i915_reg_t reg;
 5644			u32 temp;
 5645
 5646			/* disable TRANS_DP_CTL */
 5647			reg = TRANS_DP_CTL(pipe);
 5648			temp = I915_READ(reg);
 5649			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
 5650				  TRANS_DP_PORT_SEL_MASK);
 5651			temp |= TRANS_DP_PORT_SEL_NONE;
 5652			I915_WRITE(reg, temp);
 5653
 5654			/* disable DPLL_SEL */
 5655			temp = I915_READ(PCH_DPLL_SEL);
 5656			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
 5657			I915_WRITE(PCH_DPLL_SEL, temp);
 5658		}
 5659
 5660		ironlake_fdi_pll_disable(intel_crtc);
 5661	}
 5662
 5663	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 5664	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 5665}
 5666
 5667static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
 5668				 struct drm_atomic_state *old_state)
 5669{
 5670	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 5671	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 5672	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5673	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 5674
 5675	intel_encoders_disable(crtc, old_crtc_state, old_state);
 5676
 5677	drm_crtc_vblank_off(crtc);
 5678	assert_vblank_disabled(crtc);
 5679
 5680	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
 5681	if (!transcoder_is_dsi(cpu_transcoder))
 5682		intel_disable_pipe(old_crtc_state);
 5683
 5684	if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
 5685		intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
 5686
 5687	if (!transcoder_is_dsi(cpu_transcoder))
 5688		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 5689
 5690	if (INTEL_GEN(dev_priv) >= 9)
 5691		skylake_scaler_disable(intel_crtc);
 5692	else
 5693		ironlake_pfit_disable(intel_crtc, false);
 5694
 5695	if (!transcoder_is_dsi(cpu_transcoder))
 5696		intel_ddi_disable_pipe_clock(intel_crtc->config);
 5697
 5698	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 5699}
 5700
 5701static void i9xx_pfit_enable(struct intel_crtc *crtc)
 5702{
 5703	struct drm_device *dev = crtc->base.dev;
 5704	struct drm_i915_private *dev_priv = to_i915(dev);
 5705	struct intel_crtc_state *pipe_config = crtc->config;
 5706
 5707	if (!pipe_config->gmch_pfit.control)
 5708		return;
 5709
 5710	/*
 5711	 * The panel fitter should only be adjusted whilst the pipe is disabled,
 5712	 * according to register description and PRM.
 5713	 */
 5714	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
 5715	assert_pipe_disabled(dev_priv, crtc->pipe);
 5716
 5717	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
 5718	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
 5719
 5720	/* Border color in case we don't scale up to the full screen. Black by
 5721	 * default, change to something else for debugging. */
 5722	I915_WRITE(BCLRPAT(crtc->pipe), 0);
 5723}
 5724
 5725enum intel_display_power_domain intel_port_to_power_domain(enum port port)
 5726{
 5727	switch (port) {
 5728	case PORT_A:
 5729		return POWER_DOMAIN_PORT_DDI_A_LANES;
 5730	case PORT_B:
 5731		return POWER_DOMAIN_PORT_DDI_B_LANES;
 5732	case PORT_C:
 5733		return POWER_DOMAIN_PORT_DDI_C_LANES;
 5734	case PORT_D:
 5735		return POWER_DOMAIN_PORT_DDI_D_LANES;
 5736	case PORT_E:
 5737		return POWER_DOMAIN_PORT_DDI_E_LANES;
 5738	case PORT_F:
 5739		return POWER_DOMAIN_PORT_DDI_F_LANES;
 5740	default:
 5741		MISSING_CASE(port);
 5742		return POWER_DOMAIN_PORT_OTHER;
 5743	}
 5744}
 5745
 5746static u64 get_crtc_power_domains(struct drm_crtc *crtc,
 5747				  struct intel_crtc_state *crtc_state)
 5748{
 5749	struct drm_device *dev = crtc->dev;
 5750	struct drm_i915_private *dev_priv = to_i915(dev);
 5751	struct drm_encoder *encoder;
 5752	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5753	enum pipe pipe = intel_crtc->pipe;
 5754	u64 mask;
 5755	enum transcoder transcoder = crtc_state->cpu_transcoder;
 5756
 5757	if (!crtc_state->base.active)
 5758		return 0;
 5759
 5760	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
 5761	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
 5762	if (crtc_state->pch_pfit.enabled ||
 5763	    crtc_state->pch_pfit.force_thru)
 5764		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 5765
 5766	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
 5767		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 5768
 5769		mask |= BIT_ULL(intel_encoder->power_domain);
 5770	}
 5771
 5772	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
 5773		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
 5774
 5775	if (crtc_state->shared_dpll)
 5776		mask |= BIT_ULL(POWER_DOMAIN_PLLS);
 5777
 5778	return mask;
 5779}
 5780
 5781static u64
 5782modeset_get_crtc_power_domains(struct drm_crtc *crtc,
 5783			       struct intel_crtc_state *crtc_state)
 5784{
 5785	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 5786	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5787	enum intel_display_power_domain domain;
 5788	u64 domains, new_domains, old_domains;
 5789
 5790	old_domains = intel_crtc->enabled_power_domains;
 5791	intel_crtc->enabled_power_domains = new_domains =
 5792		get_crtc_power_domains(crtc, crtc_state);
 5793
 5794	domains = new_domains & ~old_domains;
 5795
 5796	for_each_power_domain(domain, domains)
 5797		intel_display_power_get(dev_priv, domain);
 5798
 5799	return old_domains & ~new_domains;
 5800}
 5801
 5802static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
 5803				      u64 domains)
 5804{
 5805	enum intel_display_power_domain domain;
 5806
 5807	for_each_power_domain(domain, domains)
 5808		intel_display_power_put(dev_priv, domain);
 5809}
 5810
 5811static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 5812				   struct drm_atomic_state *old_state)
 5813{
 5814	struct intel_atomic_state *old_intel_state =
 5815		to_intel_atomic_state(old_state);
 5816	struct drm_crtc *crtc = pipe_config->base.crtc;
 5817	struct drm_device *dev = crtc->dev;
 5818	struct drm_i915_private *dev_priv = to_i915(dev);
 5819	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5820	int pipe = intel_crtc->pipe;
 5821
 5822	if (WARN_ON(intel_crtc->active))
 5823		return;
 5824
 5825	if (intel_crtc_has_dp_encoder(intel_crtc->config))
 5826		intel_dp_set_m_n(intel_crtc, M1_N1);
 5827
 5828	intel_set_pipe_timings(intel_crtc);
 5829	intel_set_pipe_src_size(intel_crtc);
 5830
 5831	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 5832		struct drm_i915_private *dev_priv = to_i915(dev);
 5833
 5834		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
 5835		I915_WRITE(CHV_CANVAS(pipe), 0);
 5836	}
 5837
 5838	i9xx_set_pipeconf(intel_crtc);
 5839
 5840	intel_crtc->active = true;
 5841
 5842	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 5843
 5844	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 5845
 5846	if (IS_CHERRYVIEW(dev_priv)) {
 5847		chv_prepare_pll(intel_crtc, intel_crtc->config);
 5848		chv_enable_pll(intel_crtc, intel_crtc->config);
 5849	} else {
 5850		vlv_prepare_pll(intel_crtc, intel_crtc->config);
 5851		vlv_enable_pll(intel_crtc, intel_crtc->config);
 5852	}
 5853
 5854	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 5855
 5856	i9xx_pfit_enable(intel_crtc);
 5857
 5858	intel_color_load_luts(&pipe_config->base);
 5859
 5860	dev_priv->display.initial_watermarks(old_intel_state,
 5861					     pipe_config);
 5862	intel_enable_pipe(pipe_config);
 5863
 5864	assert_vblank_disabled(crtc);
 5865	drm_crtc_vblank_on(crtc);
 5866
 5867	intel_encoders_enable(crtc, pipe_config, old_state);
 5868}
 5869
 5870static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
 5871{
 5872	struct drm_device *dev = crtc->base.dev;
 5873	struct drm_i915_private *dev_priv = to_i915(dev);
 5874
 5875	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
 5876	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
 5877}
 5878
 5879static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 5880			     struct drm_atomic_state *old_state)
 5881{
 5882	struct intel_atomic_state *old_intel_state =
 5883		to_intel_atomic_state(old_state);
 5884	struct drm_crtc *crtc = pipe_config->base.crtc;
 5885	struct drm_device *dev = crtc->dev;
 5886	struct drm_i915_private *dev_priv = to_i915(dev);
 5887	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5888	enum pipe pipe = intel_crtc->pipe;
 5889
 5890	if (WARN_ON(intel_crtc->active))
 5891		return;
 5892
 5893	i9xx_set_pll_dividers(intel_crtc);
 5894
 5895	if (intel_crtc_has_dp_encoder(intel_crtc->config))
 5896		intel_dp_set_m_n(intel_crtc, M1_N1);
 5897
 5898	intel_set_pipe_timings(intel_crtc);
 5899	intel_set_pipe_src_size(intel_crtc);
 5900
 5901	i9xx_set_pipeconf(intel_crtc);
 5902
 5903	intel_crtc->active = true;
 5904
 5905	if (!IS_GEN2(dev_priv))
 5906		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 5907
 5908	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 5909
 5910	i9xx_enable_pll(intel_crtc, pipe_config);
 5911
 5912	i9xx_pfit_enable(intel_crtc);
 5913
 5914	intel_color_load_luts(&pipe_config->base);
 5915
 5916	if (dev_priv->display.initial_watermarks != NULL)
 5917		dev_priv->display.initial_watermarks(old_intel_state,
 5918						     intel_crtc->config);
 5919	else
 5920		intel_update_watermarks(intel_crtc);
 5921	intel_enable_pipe(pipe_config);
 5922
 5923	assert_vblank_disabled(crtc);
 5924	drm_crtc_vblank_on(crtc);
 5925
 5926	intel_encoders_enable(crtc, pipe_config, old_state);
 5927}
 5928
 5929static void i9xx_pfit_disable(struct intel_crtc *crtc)
 5930{
 5931	struct drm_device *dev = crtc->base.dev;
 5932	struct drm_i915_private *dev_priv = to_i915(dev);
 5933
 5934	if (!crtc->config->gmch_pfit.control)
 5935		return;
 5936
 5937	assert_pipe_disabled(dev_priv, crtc->pipe);
 5938
 5939	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
 5940			 I915_READ(PFIT_CONTROL));
 5941	I915_WRITE(PFIT_CONTROL, 0);
 5942}
 5943
 5944static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 5945			      struct drm_atomic_state *old_state)
 5946{
 5947	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 5948	struct drm_device *dev = crtc->dev;
 5949	struct drm_i915_private *dev_priv = to_i915(dev);
 5950	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5951	int pipe = intel_crtc->pipe;
 5952
 5953	/*
 5954	 * On gen2 planes are double buffered but the pipe isn't, so we must
 5955	 * wait for planes to fully turn off before disabling the pipe.
 5956	 */
 5957	if (IS_GEN2(dev_priv))
 5958		intel_wait_for_vblank(dev_priv, pipe);
 5959
 5960	intel_encoders_disable(crtc, old_crtc_state, old_state);
 5961
 5962	drm_crtc_vblank_off(crtc);
 5963	assert_vblank_disabled(crtc);
 5964
 5965	intel_disable_pipe(old_crtc_state);
 5966
 5967	i9xx_pfit_disable(intel_crtc);
 5968
 5969	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 5970
 5971	if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
 5972		if (IS_CHERRYVIEW(dev_priv))
 5973			chv_disable_pll(dev_priv, pipe);
 5974		else if (IS_VALLEYVIEW(dev_priv))
 5975			vlv_disable_pll(dev_priv, pipe);
 5976		else
 5977			i9xx_disable_pll(intel_crtc);
 5978	}
 5979
 5980	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 5981
 5982	if (!IS_GEN2(dev_priv))
 5983		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 5984
 5985	if (!dev_priv->display.initial_watermarks)
 5986		intel_update_watermarks(intel_crtc);
 5987
 5988	/* clock the pipe down to 640x480@60 to potentially save power */
 5989	if (IS_I830(dev_priv))
 5990		i830_enable_pipe(dev_priv, pipe);
 5991}
 5992
 5993static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
 5994					struct drm_modeset_acquire_ctx *ctx)
 5995{
 5996	struct intel_encoder *encoder;
 5997	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 5998	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 5999	enum intel_display_power_domain domain;
 6000	struct intel_plane *plane;
 6001	u64 domains;
 6002	struct drm_atomic_state *state;
 6003	struct intel_crtc_state *crtc_state;
 6004	int ret;
 6005
 6006	if (!intel_crtc->active)
 6007		return;
 6008
 6009	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
 6010		const struct intel_plane_state *plane_state =
 6011			to_intel_plane_state(plane->base.state);
 6012
 6013		if (plane_state->base.visible)
 6014			intel_plane_disable_noatomic(intel_crtc, plane);
 6015	}
 6016
 6017	state = drm_atomic_state_alloc(crtc->dev);
 6018	if (!state) {
 6019		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
 6020			      crtc->base.id, crtc->name);
 6021		return;
 6022	}
 6023
 6024	state->acquire_ctx = ctx;
 6025
 6026	/* Everything's already locked, -EDEADLK can't happen. */
 6027	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
 6028	ret = drm_atomic_add_affected_connectors(state, crtc);
 6029
 6030	WARN_ON(IS_ERR(crtc_state) || ret);
 6031
 6032	dev_priv->display.crtc_disable(crtc_state, state);
 6033
 6034	drm_atomic_state_put(state);
 6035
 6036	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
 6037		      crtc->base.id, crtc->name);
 6038
 6039	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
 6040	crtc->state->active = false;
 6041	intel_crtc->active = false;
 6042	crtc->enabled = false;
 6043	crtc->state->connector_mask = 0;
 6044	crtc->state->encoder_mask = 0;
 6045
 6046	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
 6047		encoder->base.crtc = NULL;
 6048
 6049	intel_fbc_disable(intel_crtc);
 6050	intel_update_watermarks(intel_crtc);
 6051	intel_disable_shared_dpll(intel_crtc);
 6052
 6053	domains = intel_crtc->enabled_power_domains;
 6054	for_each_power_domain(domain, domains)
 6055		intel_display_power_put(dev_priv, domain);
 6056	intel_crtc->enabled_power_domains = 0;
 6057
 6058	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
 6059	dev_priv->min_cdclk[intel_crtc->pipe] = 0;
 6060	dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
 6061}
 6062
 6063/*
 6064 * turn all crtc's off, but do not adjust state
 6065 * This has to be paired with a call to intel_modeset_setup_hw_state.
 6066 */
 6067int intel_display_suspend(struct drm_device *dev)
 6068{
 6069	struct drm_i915_private *dev_priv = to_i915(dev);
 6070	struct drm_atomic_state *state;
 6071	int ret;
 6072
 6073	state = drm_atomic_helper_suspend(dev);
 6074	ret = PTR_ERR_OR_ZERO(state);
 6075	if (ret)
 6076		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
 6077	else
 6078		dev_priv->modeset_restore_state = state;
 6079	return ret;
 6080}
 6081
 6082void intel_encoder_destroy(struct drm_encoder *encoder)
 6083{
 6084	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 6085
 6086	drm_encoder_cleanup(encoder);
 6087	kfree(intel_encoder);
 6088}
 6089
 6090/* Cross check the actual hw state with our own modeset state tracking (and it's
 6091 * internal consistency). */
 6092static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
 6093					 struct drm_connector_state *conn_state)
 6094{
 6095	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 6096
 6097	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 6098		      connector->base.base.id,
 6099		      connector->base.name);
 6100
 6101	if (connector->get_hw_state(connector)) {
 6102		struct intel_encoder *encoder = connector->encoder;
 6103
 6104		I915_STATE_WARN(!crtc_state,
 6105			 "connector enabled without attached crtc\n");
 6106
 6107		if (!crtc_state)
 6108			return;
 6109
 6110		I915_STATE_WARN(!crtc_state->active,
 6111		      "connector is active, but attached crtc isn't\n");
 6112
 6113		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
 6114			return;
 6115
 6116		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
 6117			"atomic encoder doesn't match attached encoder\n");
 6118
 6119		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
 6120			"attached encoder crtc differs from connector crtc\n");
 6121	} else {
 6122		I915_STATE_WARN(crtc_state && crtc_state->active,
 6123			"attached crtc is active, but connector isn't\n");
 6124		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
 6125			"best encoder set without crtc!\n");
 6126	}
 6127}
 6128
 6129int intel_connector_init(struct intel_connector *connector)
 6130{
 6131	struct intel_digital_connector_state *conn_state;
 6132
 6133	/*
 6134	 * Allocate enough memory to hold intel_digital_connector_state,
 6135	 * This might be a few bytes too many, but for connectors that don't
 6136	 * need it we'll free the state and allocate a smaller one on the first
 6137	 * succesful commit anyway.
 6138	 */
 6139	conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
 6140	if (!conn_state)
 6141		return -ENOMEM;
 6142
 6143	__drm_atomic_helper_connector_reset(&connector->base,
 6144					    &conn_state->base);
 6145
 6146	return 0;
 6147}
 6148
 6149struct intel_connector *intel_connector_alloc(void)
 6150{
 6151	struct intel_connector *connector;
 6152
 6153	connector = kzalloc(sizeof *connector, GFP_KERNEL);
 6154	if (!connector)
 6155		return NULL;
 6156
 6157	if (intel_connector_init(connector) < 0) {
 6158		kfree(connector);
 6159		return NULL;
 6160	}
 6161
 6162	return connector;
 6163}
 6164
 6165/*
 6166 * Free the bits allocated by intel_connector_alloc.
 6167 * This should only be used after intel_connector_alloc has returned
 6168 * successfully, and before drm_connector_init returns successfully.
 6169 * Otherwise the destroy callbacks for the connector and the state should
 6170 * take care of proper cleanup/free
 6171 */
 6172void intel_connector_free(struct intel_connector *connector)
 6173{
 6174	kfree(to_intel_digital_connector_state(connector->base.state));
 6175	kfree(connector);
 6176}
 6177
 6178/* Simple connector->get_hw_state implementation for encoders that support only
 6179 * one connector and no cloning and hence the encoder state determines the state
 6180 * of the connector. */
 6181bool intel_connector_get_hw_state(struct intel_connector *connector)
 6182{
 6183	enum pipe pipe = 0;
 6184	struct intel_encoder *encoder = connector->encoder;
 6185
 6186	return encoder->get_hw_state(encoder, &pipe);
 6187}
 6188
 6189static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 6190{
 6191	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
 6192		return crtc_state->fdi_lanes;
 6193
 6194	return 0;
 6195}
 6196
 6197static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 6198				     struct intel_crtc_state *pipe_config)
 6199{
 6200	struct drm_i915_private *dev_priv = to_i915(dev);
 6201	struct drm_atomic_state *state = pipe_config->base.state;
 6202	struct intel_crtc *other_crtc;
 6203	struct intel_crtc_state *other_crtc_state;
 6204
 6205	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
 6206		      pipe_name(pipe), pipe_config->fdi_lanes);
 6207	if (pipe_config->fdi_lanes > 4) {
 6208		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
 6209			      pipe_name(pipe), pipe_config->fdi_lanes);
 6210		return -EINVAL;
 6211	}
 6212
 6213	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 6214		if (pipe_config->fdi_lanes > 2) {
 6215			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
 6216				      pipe_config->fdi_lanes);
 6217			return -EINVAL;
 6218		} else {
 6219			return 0;
 6220		}
 6221	}
 6222
 6223	if (INTEL_INFO(dev_priv)->num_pipes == 2)
 6224		return 0;
 6225
 6226	/* Ivybridge 3 pipe is really complicated */
 6227	switch (pipe) {
 6228	case PIPE_A:
 6229		return 0;
 6230	case PIPE_B:
 6231		if (pipe_config->fdi_lanes <= 2)
 6232			return 0;
 6233
 6234		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
 6235		other_crtc_state =
 6236			intel_atomic_get_crtc_state(state, other_crtc);
 6237		if (IS_ERR(other_crtc_state))
 6238			return PTR_ERR(other_crtc_state);
 6239
 6240		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
 6241			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
 6242				      pipe_name(pipe), pipe_config->fdi_lanes);
 6243			return -EINVAL;
 6244		}
 6245		return 0;
 6246	case PIPE_C:
 6247		if (pipe_config->fdi_lanes > 2) {
 6248			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
 6249				      pipe_name(pipe), pipe_config->fdi_lanes);
 6250			return -EINVAL;
 6251		}
 6252
 6253		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
 6254		other_crtc_state =
 6255			intel_atomic_get_crtc_state(state, other_crtc);
 6256		if (IS_ERR(other_crtc_state))
 6257			return PTR_ERR(other_crtc_state);
 6258
 6259		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
 6260			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
 6261			return -EINVAL;
 6262		}
 6263		return 0;
 6264	default:
 6265		BUG();
 6266	}
 6267}
 6268
 6269#define RETRY 1
 6270static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
 6271				       struct intel_crtc_state *pipe_config)
 6272{
 6273	struct drm_device *dev = intel_crtc->base.dev;
 6274	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 6275	int lane, link_bw, fdi_dotclock, ret;
 6276	bool needs_recompute = false;
 6277
 6278retry:
 6279	/* FDI is a binary signal running at ~2.7GHz, encoding
 6280	 * each output octet as 10 bits. The actual frequency
 6281	 * is stored as a divider into a 100MHz clock, and the
 6282	 * mode pixel clock is stored in units of 1KHz.
 6283	 * Hence the bw of each lane in terms of the mode signal
 6284	 * is:
 6285	 */
 6286	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
 6287
 6288	fdi_dotclock = adjusted_mode->crtc_clock;
 6289
 6290	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
 6291					   pipe_config->pipe_bpp);
 6292
 6293	pipe_config->fdi_lanes = lane;
 6294
 6295	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
 6296			       link_bw, &pipe_config->fdi_m_n, false);
 6297
 6298	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
 6299	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
 6300		pipe_config->pipe_bpp -= 2*3;
 6301		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
 6302			      pipe_config->pipe_bpp);
 6303		needs_recompute = true;
 6304		pipe_config->bw_constrained = true;
 6305
 6306		goto retry;
 6307	}
 6308
 6309	if (needs_recompute)
 6310		return RETRY;
 6311
 6312	return ret;
 6313}
 6314
 6315bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
 6316{
 6317	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 6318	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 6319
 6320	/* IPS only exists on ULT machines and is tied to pipe A. */
 6321	if (!hsw_crtc_supports_ips(crtc))
 6322		return false;
 6323
 6324	if (!i915_modparams.enable_ips)
 6325		return false;
 6326
 6327	if (crtc_state->pipe_bpp > 24)
 6328		return false;
 6329
 6330	/*
 6331	 * We compare against max which means we must take
 6332	 * the increased cdclk requirement into account when
 6333	 * calculating the new cdclk.
 6334	 *
 6335	 * Should measure whether using a lower cdclk w/o IPS
 6336	 */
 6337	if (IS_BROADWELL(dev_priv) &&
 6338	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
 6339		return false;
 6340
 6341	return true;
 6342}
 6343
 6344static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
 6345{
 6346	struct drm_i915_private *dev_priv =
 6347		to_i915(crtc_state->base.crtc->dev);
 6348	struct intel_atomic_state *intel_state =
 6349		to_intel_atomic_state(crtc_state->base.state);
 6350
 6351	if (!hsw_crtc_state_ips_capable(crtc_state))
 6352		return false;
 6353
 6354	if (crtc_state->ips_force_disable)
 6355		return false;
 6356
 6357	/* IPS should be fine as long as at least one plane is enabled. */
 6358	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
 6359		return false;
 6360
 6361	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
 6362	if (IS_BROADWELL(dev_priv) &&
 6363	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
 6364		return false;
 6365
 6366	return true;
 6367}
 6368
 6369static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
 6370{
 6371	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 6372
 6373	/* GDG double wide on either pipe, otherwise pipe A only */
 6374	return INTEL_GEN(dev_priv) < 4 &&
 6375		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
 6376}
 6377
 6378static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 6379{
 6380	uint32_t pixel_rate;
 6381
 6382	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 6383
 6384	/*
 6385	 * We only use IF-ID interlacing. If we ever use
 6386	 * PF-ID we'll need to adjust the pixel_rate here.
 6387	 */
 6388
 6389	if (pipe_config->pch_pfit.enabled) {
 6390		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
 6391		uint32_t pfit_size = pipe_config->pch_pfit.size;
 6392
 6393		pipe_w = pipe_config->pipe_src_w;
 6394		pipe_h = pipe_config->pipe_src_h;
 6395
 6396		pfit_w = (pfit_size >> 16) & 0xFFFF;
 6397		pfit_h = pfit_size & 0xFFFF;
 6398		if (pipe_w < pfit_w)
 6399			pipe_w = pfit_w;
 6400		if (pipe_h < pfit_h)
 6401			pipe_h = pfit_h;
 6402
 6403		if (WARN_ON(!pfit_w || !pfit_h))
 6404			return pixel_rate;
 6405
 6406		pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
 6407				     pfit_w * pfit_h);
 6408	}
 6409
 6410	return pixel_rate;
 6411}
 6412
 6413static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
 6414{
 6415	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 6416
 6417	if (HAS_GMCH_DISPLAY(dev_priv))
 6418		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
 6419		crtc_state->pixel_rate =
 6420			crtc_state->base.adjusted_mode.crtc_clock;
 6421	else
 6422		crtc_state->pixel_rate =
 6423			ilk_pipe_pixel_rate(crtc_state);
 6424}
 6425
 6426static int intel_crtc_compute_config(struct intel_crtc *crtc,
 6427				     struct intel_crtc_state *pipe_config)
 6428{
 6429	struct drm_device *dev = crtc->base.dev;
 6430	struct drm_i915_private *dev_priv = to_i915(dev);
 6431	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 6432	int clock_limit = dev_priv->max_dotclk_freq;
 6433
 6434	if (INTEL_GEN(dev_priv) < 4) {
 6435		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
 6436
 6437		/*
 6438		 * Enable double wide mode when the dot clock
 6439		 * is > 90% of the (display) core speed.
 6440		 */
 6441		if (intel_crtc_supports_double_wide(crtc) &&
 6442		    adjusted_mode->crtc_clock > clock_limit) {
 6443			clock_limit = dev_priv->max_dotclk_freq;
 6444			pipe_config->double_wide = true;
 6445		}
 6446	}
 6447
 6448	if (adjusted_mode->crtc_clock > clock_limit) {
 6449		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
 6450			      adjusted_mode->crtc_clock, clock_limit,
 6451			      yesno(pipe_config->double_wide));
 6452		return -EINVAL;
 6453	}
 6454
 6455	if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
 6456		/*
 6457		 * There is only one pipe CSC unit per pipe, and we need that
 6458		 * for output conversion from RGB->YCBCR. So if CTM is already
 6459		 * applied we can't support YCBCR420 output.
 6460		 */
 6461		DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
 6462		return -EINVAL;
 6463	}
 6464
 6465	/*
 6466	 * Pipe horizontal size must be even in:
 6467	 * - DVO ganged mode
 6468	 * - LVDS dual channel mode
 6469	 * - Double wide pipe
 6470	 */
 6471	if (pipe_config->pipe_src_w & 1) {
 6472		if (pipe_config->double_wide) {
 6473			DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
 6474			return -EINVAL;
 6475		}
 6476
 6477		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
 6478		    intel_is_dual_link_lvds(dev)) {
 6479			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
 6480			return -EINVAL;
 6481		}
 6482	}
 6483
 6484	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
 6485	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
 6486	 */
 6487	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
 6488		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
 6489		return -EINVAL;
 6490
 6491	intel_crtc_compute_pixel_rate(pipe_config);
 6492
 6493	if (pipe_config->has_pch_encoder)
 6494		return ironlake_fdi_compute_config(crtc, pipe_config);
 6495
 6496	return 0;
 6497}
 6498
 6499static void
 6500intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 6501{
 6502	while (*num > DATA_LINK_M_N_MASK ||
 6503	       *den > DATA_LINK_M_N_MASK) {
 6504		*num >>= 1;
 6505		*den >>= 1;
 6506	}
 6507}
 6508
 6509static void compute_m_n(unsigned int m, unsigned int n,
 6510			uint32_t *ret_m, uint32_t *ret_n,
 6511			bool reduce_m_n)
 6512{
 6513	/*
 6514	 * Reduce M/N as much as possible without loss in precision. Several DP
 6515	 * dongles in particular seem to be fussy about too large *link* M/N
 6516	 * values. The passed in values are more likely to have the least
 6517	 * significant bits zero than M after rounding below, so do this first.
 6518	 */
 6519	if (reduce_m_n) {
 6520		while ((m & 1) == 0 && (n & 1) == 0) {
 6521			m >>= 1;
 6522			n >>= 1;
 6523		}
 6524	}
 6525
 6526	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
 6527	*ret_m = div_u64((uint64_t) m * *ret_n, n);
 6528	intel_reduce_m_n_ratio(ret_m, ret_n);
 6529}
 6530
 6531void
 6532intel_link_compute_m_n(int bits_per_pixel, int nlanes,
 6533		       int pixel_clock, int link_clock,
 6534		       struct intel_link_m_n *m_n,
 6535		       bool reduce_m_n)
 6536{
 6537	m_n->tu = 64;
 6538
 6539	compute_m_n(bits_per_pixel * pixel_clock,
 6540		    link_clock * nlanes * 8,
 6541		    &m_n->gmch_m, &m_n->gmch_n,
 6542		    reduce_m_n);
 6543
 6544	compute_m_n(pixel_clock, link_clock,
 6545		    &m_n->link_m, &m_n->link_n,
 6546		    reduce_m_n);
 6547}
 6548
 6549static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 6550{
 6551	if (i915_modparams.panel_use_ssc >= 0)
 6552		return i915_modparams.panel_use_ssc != 0;
 6553	return dev_priv->vbt.lvds_use_ssc
 6554		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 6555}
 6556
 6557static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
 6558{
 6559	return (1 << dpll->n) << 16 | dpll->m2;
 6560}
 6561
 6562static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
 6563{
 6564	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
 6565}
 6566
 6567static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
 6568				     struct intel_crtc_state *crtc_state,
 6569				     struct dpll *reduced_clock)
 6570{
 6571	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 6572	u32 fp, fp2 = 0;
 6573
 6574	if (IS_PINEVIEW(dev_priv)) {
 6575		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
 6576		if (reduced_clock)
 6577			fp2 = pnv_dpll_compute_fp(reduced_clock);
 6578	} else {
 6579		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
 6580		if (reduced_clock)
 6581			fp2 = i9xx_dpll_compute_fp(reduced_clock);
 6582	}
 6583
 6584	crtc_state->dpll_hw_state.fp0 = fp;
 6585
 6586	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
 6587	    reduced_clock) {
 6588		crtc_state->dpll_hw_state.fp1 = fp2;
 6589	} else {
 6590		crtc_state->dpll_hw_state.fp1 = fp;
 6591	}
 6592}
 6593
 6594static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
 6595		pipe)
 6596{
 6597	u32 reg_val;
 6598
 6599	/*
 6600	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
 6601	 * and set it to a reasonable value instead.
 6602	 */
 6603	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
 6604	reg_val &= 0xffffff00;
 6605	reg_val |= 0x00000030;
 6606	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 6607
 6608	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
 6609	reg_val &= 0x00ffffff;
 6610	reg_val |= 0x8c000000;
 6611	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 6612
 6613	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
 6614	reg_val &= 0xffffff00;
 6615	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 6616
 6617	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
 6618	reg_val &= 0x00ffffff;
 6619	reg_val |= 0xb0000000;
 6620	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 6621}
 6622
 6623static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
 6624					 struct intel_link_m_n *m_n)
 6625{
 6626	struct drm_device *dev = crtc->base.dev;
 6627	struct drm_i915_private *dev_priv = to_i915(dev);
 6628	int pipe = crtc->pipe;
 6629
 6630	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
 6631	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
 6632	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
 6633	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
 6634}
 6635
 6636static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 6637					 struct intel_link_m_n *m_n,
 6638					 struct intel_link_m_n *m2_n2)
 6639{
 6640	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 6641	int pipe = crtc->pipe;
 6642	enum transcoder transcoder = crtc->config->cpu_transcoder;
 6643
 6644	if (INTEL_GEN(dev_priv) >= 5) {
 6645		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
 6646		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
 6647		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
 6648		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
 6649		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
 6650		 * for gen < 8) and if DRRS is supported (to make sure the
 6651		 * registers are not unnecessarily accessed).
 6652		 */
 6653		if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
 6654		    INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
 6655			I915_WRITE(PIPE_DATA_M2(transcoder),
 6656					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
 6657			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
 6658			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
 6659			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
 6660		}
 6661	} else {
 6662		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
 6663		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
 6664		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
 6665		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
 6666	}
 6667}
 6668
 6669void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
 6670{
 6671	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
 6672
 6673	if (m_n == M1_N1) {
 6674		dp_m_n = &crtc->config->dp_m_n;
 6675		dp_m2_n2 = &crtc->config->dp_m2_n2;
 6676	} else if (m_n == M2_N2) {
 6677
 6678		/*
 6679		 * M2_N2 registers are not supported. Hence m2_n2 divider value
 6680		 * needs to be programmed into M1_N1.
 6681		 */
 6682		dp_m_n = &crtc->config->dp_m2_n2;
 6683	} else {
 6684		DRM_ERROR("Unsupported divider value\n");
 6685		return;
 6686	}
 6687
 6688	if (crtc->config->has_pch_encoder)
 6689		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
 6690	else
 6691		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
 6692}
 6693
 6694static void vlv_compute_dpll(struct intel_crtc *crtc,
 6695			     struct intel_crtc_state *pipe_config)
 6696{
 6697	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
 6698		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 6699	if (crtc->pipe != PIPE_A)
 6700		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 6701
 6702	/* DPLL not used with DSI, but still need the rest set up */
 6703	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
 6704		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
 6705			DPLL_EXT_BUFFER_ENABLE_VLV;
 6706
 6707	pipe_config->dpll_hw_state.dpll_md =
 6708		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 6709}
 6710
 6711static void chv_compute_dpll(struct intel_crtc *crtc,
 6712			     struct intel_crtc_state *pipe_config)
 6713{
 6714	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
 6715		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 6716	if (crtc->pipe != PIPE_A)
 6717		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 6718
 6719	/* DPLL not used with DSI, but still need the rest set up */
 6720	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
 6721		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
 6722
 6723	pipe_config->dpll_hw_state.dpll_md =
 6724		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 6725}
 6726
 6727static void vlv_prepare_pll(struct intel_crtc *crtc,
 6728			    const struct intel_crtc_state *pipe_config)
 6729{
 6730	struct drm_device *dev = crtc->base.dev;
 6731	struct drm_i915_private *dev_priv = to_i915(dev);
 6732	enum pipe pipe = crtc->pipe;
 6733	u32 mdiv;
 6734	u32 bestn, bestm1, bestm2, bestp1, bestp2;
 6735	u32 coreclk, reg_val;
 6736
 6737	/* Enable Refclk */
 6738	I915_WRITE(DPLL(pipe),
 6739		   pipe_config->dpll_hw_state.dpll &
 6740		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
 6741
 6742	/* No need to actually set up the DPLL with DSI */
 6743	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
 6744		return;
 6745
 6746	mutex_lock(&dev_priv->sb_lock);
 6747
 6748	bestn = pipe_config->dpll.n;
 6749	bestm1 = pipe_config->dpll.m1;
 6750	bestm2 = pipe_config->dpll.m2;
 6751	bestp1 = pipe_config->dpll.p1;
 6752	bestp2 = pipe_config->dpll.p2;
 6753
 6754	/* See eDP HDMI DPIO driver vbios notes doc */
 6755
 6756	/* PLL B needs special handling */
 6757	if (pipe == PIPE_B)
 6758		vlv_pllb_recal_opamp(dev_priv, pipe);
 6759
 6760	/* Set up Tx target for periodic Rcomp update */
 6761	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
 6762
 6763	/* Disable target IRef on PLL */
 6764	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
 6765	reg_val &= 0x00ffffff;
 6766	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
 6767
 6768	/* Disable fast lock */
 6769	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
 6770
 6771	/* Set idtafcrecal before PLL is enabled */
 6772	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
 6773	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
 6774	mdiv |= ((bestn << DPIO_N_SHIFT));
 6775	mdiv |= (1 << DPIO_K_SHIFT);
 6776
 6777	/*
 6778	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
 6779	 * but we don't support that).
 6780	 * Note: don't use the DAC post divider as it seems unstable.
 6781	 */
 6782	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
 6783	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 6784
 6785	mdiv |= DPIO_ENABLE_CALIBRATION;
 6786	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 6787
 6788	/* Set HBR and RBR LPF coefficients */
 6789	if (pipe_config->port_clock == 162000 ||
 6790	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
 6791	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
 6792		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 6793				 0x009f0003);
 6794	else
 6795		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 6796				 0x00d0000f);
 6797
 6798	if (intel_crtc_has_dp_encoder(pipe_config)) {
 6799		/* Use SSC source */
 6800		if (pipe == PIPE_A)
 6801			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 6802					 0x0df40000);
 6803		else
 6804			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 6805					 0x0df70000);
 6806	} else { /* HDMI or VGA */
 6807		/* Use bend source */
 6808		if (pipe == PIPE_A)
 6809			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 6810					 0x0df70000);
 6811		else
 6812			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 6813					 0x0df40000);
 6814	}
 6815
 6816	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
 6817	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 6818	if (intel_crtc_has_dp_encoder(crtc->config))
 6819		coreclk |= 0x01000000;
 6820	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 6821
 6822	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
 6823	mutex_unlock(&dev_priv->sb_lock);
 6824}
 6825
 6826static void chv_prepare_pll(struct intel_crtc *crtc,
 6827			    const struct intel_crtc_state *pipe_config)
 6828{
 6829	struct drm_device *dev = crtc->base.dev;
 6830	struct drm_i915_private *dev_priv = to_i915(dev);
 6831	enum pipe pipe = crtc->pipe;
 6832	enum dpio_channel port = vlv_pipe_to_channel(pipe);
 6833	u32 loopfilter, tribuf_calcntr;
 6834	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
 6835	u32 dpio_val;
 6836	int vco;
 6837
 6838	/* Enable Refclk and SSC */
 6839	I915_WRITE(DPLL(pipe),
 6840		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
 6841
 6842	/* No need to actually set up the DPLL with DSI */
 6843	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
 6844		return;
 6845
 6846	bestn = pipe_config->dpll.n;
 6847	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
 6848	bestm1 = pipe_config->dpll.m1;
 6849	bestm2 = pipe_config->dpll.m2 >> 22;
 6850	bestp1 = pipe_config->dpll.p1;
 6851	bestp2 = pipe_config->dpll.p2;
 6852	vco = pipe_config->dpll.vco;
 6853	dpio_val = 0;
 6854	loopfilter = 0;
 6855
 6856	mutex_lock(&dev_priv->sb_lock);
 6857
 6858	/* p1 and p2 divider */
 6859	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
 6860			5 << DPIO_CHV_S1_DIV_SHIFT |
 6861			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
 6862			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
 6863			1 << DPIO_CHV_K_DIV_SHIFT);
 6864
 6865	/* Feedback post-divider - m2 */
 6866	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
 6867
 6868	/* Feedback refclk divider - n and m1 */
 6869	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
 6870			DPIO_CHV_M1_DIV_BY_2 |
 6871			1 << DPIO_CHV_N_DIV_SHIFT);
 6872
 6873	/* M2 fraction division */
 6874	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
 6875
 6876	/* M2 fraction division enable */
 6877	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
 6878	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
 6879	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
 6880	if (bestm2_frac)
 6881		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
 6882	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
 6883
 6884	/* Program digital lock detect threshold */
 6885	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
 6886	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
 6887					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
 6888	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
 6889	if (!bestm2_frac)
 6890		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
 6891	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
 6892
 6893	/* Loop filter */
 6894	if (vco == 5400000) {
 6895		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
 6896		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
 6897		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
 6898		tribuf_calcntr = 0x9;
 6899	} else if (vco <= 6200000) {
 6900		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
 6901		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
 6902		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
 6903		tribuf_calcntr = 0x9;
 6904	} else if (vco <= 6480000) {
 6905		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
 6906		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
 6907		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
 6908		tribuf_calcntr = 0x8;
 6909	} else {
 6910		/* Not supported. Apply the same limits as in the max case */
 6911		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
 6912		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
 6913		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
 6914		tribuf_calcntr = 0;
 6915	}
 6916	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
 6917
 6918	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
 6919	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
 6920	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
 6921	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
 6922
 6923	/* AFC Recal */
 6924	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
 6925			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
 6926			DPIO_AFC_RECAL);
 6927
 6928	mutex_unlock(&dev_priv->sb_lock);
 6929}
 6930
 6931/**
 6932 * vlv_force_pll_on - forcibly enable just the PLL
 6933 * @dev_priv: i915 private structure
 6934 * @pipe: pipe PLL to enable
 6935 * @dpll: PLL configuration
 6936 *
 6937 * Enable the PLL for @pipe using the supplied @dpll config. To be used
 6938 * in cases where we need the PLL enabled even when @pipe is not going to
 6939 * be enabled.
 6940 */
 6941int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
 6942		     const struct dpll *dpll)
 6943{
 6944	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 6945	struct intel_crtc_state *pipe_config;
 6946
 6947	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
 6948	if (!pipe_config)
 6949		return -ENOMEM;
 6950
 6951	pipe_config->base.crtc = &crtc->base;
 6952	pipe_config->pixel_multiplier = 1;
 6953	pipe_config->dpll = *dpll;
 6954
 6955	if (IS_CHERRYVIEW(dev_priv)) {
 6956		chv_compute_dpll(crtc, pipe_config);
 6957		chv_prepare_pll(crtc, pipe_config);
 6958		chv_enable_pll(crtc, pipe_config);
 6959	} else {
 6960		vlv_compute_dpll(crtc, pipe_config);
 6961		vlv_prepare_pll(crtc, pipe_config);
 6962		vlv_enable_pll(crtc, pipe_config);
 6963	}
 6964
 6965	kfree(pipe_config);
 6966
 6967	return 0;
 6968}
 6969
 6970/**
 6971 * vlv_force_pll_off - forcibly disable just the PLL
 6972 * @dev_priv: i915 private structure
 6973 * @pipe: pipe PLL to disable
 6974 *
 6975 * Disable the PLL for @pipe. To be used in cases where we need
 6976 * the PLL enabled even when @pipe is not going to be enabled.
 6977 */
 6978void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
 6979{
 6980	if (IS_CHERRYVIEW(dev_priv))
 6981		chv_disable_pll(dev_priv, pipe);
 6982	else
 6983		vlv_disable_pll(dev_priv, pipe);
 6984}
 6985
 6986static void i9xx_compute_dpll(struct intel_crtc *crtc,
 6987			      struct intel_crtc_state *crtc_state,
 6988			      struct dpll *reduced_clock)
 6989{
 6990	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 6991	u32 dpll;
 6992	struct dpll *clock = &crtc_state->dpll;
 6993
 6994	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
 6995
 6996	dpll = DPLL_VGA_MODE_DIS;
 6997
 6998	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
 6999		dpll |= DPLLB_MODE_LVDS;
 7000	else
 7001		dpll |= DPLLB_MODE_DAC_SERIAL;
 7002
 7003	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
 7004	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
 7005		dpll |= (crtc_state->pixel_multiplier - 1)
 7006			<< SDVO_MULTIPLIER_SHIFT_HIRES;
 7007	}
 7008
 7009	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
 7010	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
 7011		dpll |= DPLL_SDVO_HIGH_SPEED;
 7012
 7013	if (intel_crtc_has_dp_encoder(crtc_state))
 7014		dpll |= DPLL_SDVO_HIGH_SPEED;
 7015
 7016	/* compute bitmask from p1 value */
 7017	if (IS_PINEVIEW(dev_priv))
 7018		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
 7019	else {
 7020		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 7021		if (IS_G4X(dev_priv) && reduced_clock)
 7022			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 7023	}
 7024	switch (clock->p2) {
 7025	case 5:
 7026		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 7027		break;
 7028	case 7:
 7029		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
 7030		break;
 7031	case 10:
 7032		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
 7033		break;
 7034	case 14:
 7035		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 7036		break;
 7037	}
 7038	if (INTEL_GEN(dev_priv) >= 4)
 7039		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 7040
 7041	if (crtc_state->sdvo_tv_clock)
 7042		dpll |= PLL_REF_INPUT_TVCLKINBC;
 7043	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
 7044		 intel_panel_use_ssc(dev_priv))
 7045		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 7046	else
 7047		dpll |= PLL_REF_INPUT_DREFCLK;
 7048
 7049	dpll |= DPLL_VCO_ENABLE;
 7050	crtc_state->dpll_hw_state.dpll = dpll;
 7051
 7052	if (INTEL_GEN(dev_priv) >= 4) {
 7053		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
 7054			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
 7055		crtc_state->dpll_hw_state.dpll_md = dpll_md;
 7056	}
 7057}
 7058
 7059static void i8xx_compute_dpll(struct intel_crtc *crtc,
 7060			      struct intel_crtc_state *crtc_state,
 7061			      struct dpll *reduced_clock)
 7062{
 7063	struct drm_device *dev = crtc->base.dev;
 7064	struct drm_i915_private *dev_priv = to_i915(dev);
 7065	u32 dpll;
 7066	struct dpll *clock = &crtc_state->dpll;
 7067
 7068	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
 7069
 7070	dpll = DPLL_VGA_MODE_DIS;
 7071
 7072	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 7073		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 7074	} else {
 7075		if (clock->p1 == 2)
 7076			dpll |= PLL_P1_DIVIDE_BY_TWO;
 7077		else
 7078			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 7079		if (clock->p2 == 4)
 7080			dpll |= PLL_P2_DIVIDE_BY_4;
 7081	}
 7082
 7083	if (!IS_I830(dev_priv) &&
 7084	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
 7085		dpll |= DPLL_DVO_2X_MODE;
 7086
 7087	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
 7088	    intel_panel_use_ssc(dev_priv))
 7089		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 7090	else
 7091		dpll |= PLL_REF_INPUT_DREFCLK;
 7092
 7093	dpll |= DPLL_VCO_ENABLE;
 7094	crtc_state->dpll_hw_state.dpll = dpll;
 7095}
 7096
 7097static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 7098{
 7099	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 7100	enum pipe pipe = intel_crtc->pipe;
 7101	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 7102	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
 7103	uint32_t crtc_vtotal, crtc_vblank_end;
 7104	int vsyncshift = 0;
 7105
 7106	/* We need to be careful not to changed the adjusted mode, for otherwise
 7107	 * the hw state checker will get angry at the mismatch. */
 7108	crtc_vtotal = adjusted_mode->crtc_vtotal;
 7109	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
 7110
 7111	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 7112		/* the chip adds 2 halflines automatically */
 7113		crtc_vtotal -= 1;
 7114		crtc_vblank_end -= 1;
 7115
 7116		if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 7117			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
 7118		else
 7119			vsyncshift = adjusted_mode->crtc_hsync_start -
 7120				adjusted_mode->crtc_htotal / 2;
 7121		if (vsyncshift < 0)
 7122			vsyncshift += adjusted_mode->crtc_htotal;
 7123	}
 7124
 7125	if (INTEL_GEN(dev_priv) > 3)
 7126		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
 7127
 7128	I915_WRITE(HTOTAL(cpu_transcoder),
 7129		   (adjusted_mode->crtc_hdisplay - 1) |
 7130		   ((adjusted_mode->crtc_htotal - 1) << 16));
 7131	I915_WRITE(HBLANK(cpu_transcoder),
 7132		   (adjusted_mode->crtc_hblank_start - 1) |
 7133		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
 7134	I915_WRITE(HSYNC(cpu_transcoder),
 7135		   (adjusted_mode->crtc_hsync_start - 1) |
 7136		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
 7137
 7138	I915_WRITE(VTOTAL(cpu_transcoder),
 7139		   (adjusted_mode->crtc_vdisplay - 1) |
 7140		   ((crtc_vtotal - 1) << 16));
 7141	I915_WRITE(VBLANK(cpu_transcoder),
 7142		   (adjusted_mode->crtc_vblank_start - 1) |
 7143		   ((crtc_vblank_end - 1) << 16));
 7144	I915_WRITE(VSYNC(cpu_transcoder),
 7145		   (adjusted_mode->crtc_vsync_start - 1) |
 7146		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
 7147
 7148	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
 7149	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
 7150	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
 7151	 * bits. */
 7152	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
 7153	    (pipe == PIPE_B || pipe == PIPE_C))
 7154		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
 7155
 7156}
 7157
 7158static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
 7159{
 7160	struct drm_device *dev = intel_crtc->base.dev;
 7161	struct drm_i915_private *dev_priv = to_i915(dev);
 7162	enum pipe pipe = intel_crtc->pipe;
 7163
 7164	/* pipesrc controls the size that is scaled from, which should
 7165	 * always be the user's requested size.
 7166	 */
 7167	I915_WRITE(PIPESRC(pipe),
 7168		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
 7169		   (intel_crtc->config->pipe_src_h - 1));
 7170}
 7171
 7172static void intel_get_pipe_timings(struct intel_crtc *crtc,
 7173				   struct intel_crtc_state *pipe_config)
 7174{
 7175	struct drm_device *dev = crtc->base.dev;
 7176	struct drm_i915_private *dev_priv = to_i915(dev);
 7177	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 7178	uint32_t tmp;
 7179
 7180	tmp = I915_READ(HTOTAL(cpu_transcoder));
 7181	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
 7182	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
 7183	tmp = I915_READ(HBLANK(cpu_transcoder));
 7184	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
 7185	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
 7186	tmp = I915_READ(HSYNC(cpu_transcoder));
 7187	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
 7188	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
 7189
 7190	tmp = I915_READ(VTOTAL(cpu_transcoder));
 7191	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
 7192	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
 7193	tmp = I915_READ(VBLANK(cpu_transcoder));
 7194	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
 7195	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
 7196	tmp = I915_READ(VSYNC(cpu_transcoder));
 7197	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
 7198	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
 7199
 7200	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
 7201		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
 7202		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
 7203		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
 7204	}
 7205}
 7206
 7207static void intel_get_pipe_src_size(struct intel_crtc *crtc,
 7208				    struct intel_crtc_state *pipe_config)
 7209{
 7210	struct drm_device *dev = crtc->base.dev;
 7211	struct drm_i915_private *dev_priv = to_i915(dev);
 7212	u32 tmp;
 7213
 7214	tmp = I915_READ(PIPESRC(crtc->pipe));
 7215	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
 7216	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
 7217
 7218	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
 7219	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
 7220}
 7221
 7222void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 7223				 struct intel_crtc_state *pipe_config)
 7224{
 7225	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
 7226	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
 7227	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
 7228	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
 7229
 7230	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
 7231	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
 7232	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
 7233	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
 7234
 7235	mode->flags = pipe_config->base.adjusted_mode.flags;
 7236	mode->type = DRM_MODE_TYPE_DRIVER;
 7237
 7238	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
 7239
 7240	mode->hsync = drm_mode_hsync(mode);
 7241	mode->vrefresh = drm_mode_vrefresh(mode);
 7242	drm_mode_set_name(mode);
 7243}
 7244
 7245static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 7246{
 7247	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 7248	uint32_t pipeconf;
 7249
 7250	pipeconf = 0;
 7251
 7252	/* we keep both pipes enabled on 830 */
 7253	if (IS_I830(dev_priv))
 7254		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
 7255
 7256	if (intel_crtc->config->double_wide)
 7257		pipeconf |= PIPECONF_DOUBLE_WIDE;
 7258
 7259	/* only g4x and later have fancy bpc/dither controls */
 7260	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
 7261	    IS_CHERRYVIEW(dev_priv)) {
 7262		/* Bspec claims that we can't use dithering for 30bpp pipes. */
 7263		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
 7264			pipeconf |= PIPECONF_DITHER_EN |
 7265				    PIPECONF_DITHER_TYPE_SP;
 7266
 7267		switch (intel_crtc->config->pipe_bpp) {
 7268		case 18:
 7269			pipeconf |= PIPECONF_6BPC;
 7270			break;
 7271		case 24:
 7272			pipeconf |= PIPECONF_8BPC;
 7273			break;
 7274		case 30:
 7275			pipeconf |= PIPECONF_10BPC;
 7276			break;
 7277		default:
 7278			/* Case prevented by intel_choose_pipe_bpp_dither. */
 7279			BUG();
 7280		}
 7281	}
 7282
 7283	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 7284		if (INTEL_GEN(dev_priv) < 4 ||
 7285		    intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 7286			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
 7287		else
 7288			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
 7289	} else
 7290		pipeconf |= PIPECONF_PROGRESSIVE;
 7291
 7292	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 7293	     intel_crtc->config->limited_color_range)
 7294		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 7295
 7296	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
 7297	POSTING_READ(PIPECONF(intel_crtc->pipe));
 7298}
 7299
 7300static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
 7301				   struct intel_crtc_state *crtc_state)
 7302{
 7303	struct drm_device *dev = crtc->base.dev;
 7304	struct drm_i915_private *dev_priv = to_i915(dev);
 7305	const struct intel_limit *limit;
 7306	int refclk = 48000;
 7307
 7308	memset(&crtc_state->dpll_hw_state, 0,
 7309	       sizeof(crtc_state->dpll_hw_state));
 7310
 7311	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 7312		if (intel_panel_use_ssc(dev_priv)) {
 7313			refclk = dev_priv->vbt.lvds_ssc_freq;
 7314			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 7315		}
 7316
 7317		limit = &intel_limits_i8xx_lvds;
 7318	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
 7319		limit = &intel_limits_i8xx_dvo;
 7320	} else {
 7321		limit = &intel_limits_i8xx_dac;
 7322	}
 7323
 7324	if (!crtc_state->clock_set &&
 7325	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 7326				 refclk, NULL, &crtc_state->dpll)) {
 7327		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 7328		return -EINVAL;
 7329	}
 7330
 7331	i8xx_compute_dpll(crtc, crtc_state, NULL);
 7332
 7333	return 0;
 7334}
 7335
 7336static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
 7337				  struct intel_crtc_state *crtc_state)
 7338{
 7339	struct drm_device *dev = crtc->base.dev;
 7340	struct drm_i915_private *dev_priv = to_i915(dev);
 7341	const struct intel_limit *limit;
 7342	int refclk = 96000;
 7343
 7344	memset(&crtc_state->dpll_hw_state, 0,
 7345	       sizeof(crtc_state->dpll_hw_state));
 7346
 7347	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 7348		if (intel_panel_use_ssc(dev_priv)) {
 7349			refclk = dev_priv->vbt.lvds_ssc_freq;
 7350			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 7351		}
 7352
 7353		if (intel_is_dual_link_lvds(dev))
 7354			limit = &intel_limits_g4x_dual_channel_lvds;
 7355		else
 7356			limit = &intel_limits_g4x_single_channel_lvds;
 7357	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
 7358		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
 7359		limit = &intel_limits_g4x_hdmi;
 7360	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
 7361		limit = &intel_limits_g4x_sdvo;
 7362	} else {
 7363		/* The option is for other outputs */
 7364		limit = &intel_limits_i9xx_sdvo;
 7365	}
 7366
 7367	if (!crtc_state->clock_set &&
 7368	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 7369				refclk, NULL, &crtc_state->dpll)) {
 7370		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 7371		return -EINVAL;
 7372	}
 7373
 7374	i9xx_compute_dpll(crtc, crtc_state, NULL);
 7375
 7376	return 0;
 7377}
 7378
 7379static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
 7380				  struct intel_crtc_state *crtc_state)
 7381{
 7382	struct drm_device *dev = crtc->base.dev;
 7383	struct drm_i915_private *dev_priv = to_i915(dev);
 7384	const struct intel_limit *limit;
 7385	int refclk = 96000;
 7386
 7387	memset(&crtc_state->dpll_hw_state, 0,
 7388	       sizeof(crtc_state->dpll_hw_state));
 7389
 7390	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 7391		if (intel_panel_use_ssc(dev_priv)) {
 7392			refclk = dev_priv->vbt.lvds_ssc_freq;
 7393			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 7394		}
 7395
 7396		limit = &intel_limits_pineview_lvds;
 7397	} else {
 7398		limit = &intel_limits_pineview_sdvo;
 7399	}
 7400
 7401	if (!crtc_state->clock_set &&
 7402	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 7403				refclk, NULL, &crtc_state->dpll)) {
 7404		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 7405		return -EINVAL;
 7406	}
 7407
 7408	i9xx_compute_dpll(crtc, crtc_state, NULL);
 7409
 7410	return 0;
 7411}
 7412
 7413static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 7414				   struct intel_crtc_state *crtc_state)
 7415{
 7416	struct drm_device *dev = crtc->base.dev;
 7417	struct drm_i915_private *dev_priv = to_i915(dev);
 7418	const struct intel_limit *limit;
 7419	int refclk = 96000;
 7420
 7421	memset(&crtc_state->dpll_hw_state, 0,
 7422	       sizeof(crtc_state->dpll_hw_state));
 7423
 7424	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 7425		if (intel_panel_use_ssc(dev_priv)) {
 7426			refclk = dev_priv->vbt.lvds_ssc_freq;
 7427			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 7428		}
 7429
 7430		limit = &intel_limits_i9xx_lvds;
 7431	} else {
 7432		limit = &intel_limits_i9xx_sdvo;
 7433	}
 7434
 7435	if (!crtc_state->clock_set &&
 7436	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 7437				 refclk, NULL, &crtc_state->dpll)) {
 7438		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 7439		return -EINVAL;
 7440	}
 7441
 7442	i9xx_compute_dpll(crtc, crtc_state, NULL);
 7443
 7444	return 0;
 7445}
 7446
 7447static int chv_crtc_compute_clock(struct intel_crtc *crtc,
 7448				  struct intel_crtc_state *crtc_state)
 7449{
 7450	int refclk = 100000;
 7451	const struct intel_limit *limit = &intel_limits_chv;
 7452
 7453	memset(&crtc_state->dpll_hw_state, 0,
 7454	       sizeof(crtc_state->dpll_hw_state));
 7455
 7456	if (!crtc_state->clock_set &&
 7457	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 7458				refclk, NULL, &crtc_state->dpll)) {
 7459		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 7460		return -EINVAL;
 7461	}
 7462
 7463	chv_compute_dpll(crtc, crtc_state);
 7464
 7465	return 0;
 7466}
 7467
 7468static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
 7469				  struct intel_crtc_state *crtc_state)
 7470{
 7471	int refclk = 100000;
 7472	const struct intel_limit *limit = &intel_limits_vlv;
 7473
 7474	memset(&crtc_state->dpll_hw_state, 0,
 7475	       sizeof(crtc_state->dpll_hw_state));
 7476
 7477	if (!crtc_state->clock_set &&
 7478	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 7479				refclk, NULL, &crtc_state->dpll)) {
 7480		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 7481		return -EINVAL;
 7482	}
 7483
 7484	vlv_compute_dpll(crtc, crtc_state);
 7485
 7486	return 0;
 7487}
 7488
 7489static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 7490				 struct intel_crtc_state *pipe_config)
 7491{
 7492	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 7493	uint32_t tmp;
 7494
 7495	if (INTEL_GEN(dev_priv) <= 3 &&
 7496	    (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
 7497		return;
 7498
 7499	tmp = I915_READ(PFIT_CONTROL);
 7500	if (!(tmp & PFIT_ENABLE))
 7501		return;
 7502
 7503	/* Check whether the pfit is attached to our pipe. */
 7504	if (INTEL_GEN(dev_priv) < 4) {
 7505		if (crtc->pipe != PIPE_B)
 7506			return;
 7507	} else {
 7508		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
 7509			return;
 7510	}
 7511
 7512	pipe_config->gmch_pfit.control = tmp;
 7513	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
 7514}
 7515
 7516static void vlv_crtc_clock_get(struct intel_crtc *crtc,
 7517			       struct intel_crtc_state *pipe_config)
 7518{
 7519	struct drm_device *dev = crtc->base.dev;
 7520	struct drm_i915_private *dev_priv = to_i915(dev);
 7521	int pipe = pipe_config->cpu_transcoder;
 7522	struct dpll clock;
 7523	u32 mdiv;
 7524	int refclk = 100000;
 7525
 7526	/* In case of DSI, DPLL will not be used */
 7527	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
 7528		return;
 7529
 7530	mutex_lock(&dev_priv->sb_lock);
 7531	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
 7532	mutex_unlock(&dev_priv->sb_lock);
 7533
 7534	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
 7535	clock.m2 = mdiv & DPIO_M2DIV_MASK;
 7536	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
 7537	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
 7538	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 7539
 7540	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
 7541}
 7542
 7543static void
 7544i9xx_get_initial_plane_config(struct intel_crtc *crtc,
 7545			      struct intel_initial_plane_config *plane_config)
 7546{
 7547	struct drm_device *dev = crtc->base.dev;
 7548	struct drm_i915_private *dev_priv = to_i915(dev);
 7549	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
 7550	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 7551	enum pipe pipe = crtc->pipe;
 7552	u32 val, base, offset;
 7553	int fourcc, pixel_format;
 7554	unsigned int aligned_height;
 7555	struct drm_framebuffer *fb;
 7556	struct intel_framebuffer *intel_fb;
 7557
 7558	if (!plane->get_hw_state(plane))
 7559		return;
 7560
 7561	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 7562	if (!intel_fb) {
 7563		DRM_DEBUG_KMS("failed to alloc fb\n");
 7564		return;
 7565	}
 7566
 7567	fb = &intel_fb->base;
 7568
 7569	fb->dev = dev;
 7570
 7571	val = I915_READ(DSPCNTR(i9xx_plane));
 7572
 7573	if (INTEL_GEN(dev_priv) >= 4) {
 7574		if (val & DISPPLANE_TILED) {
 7575			plane_config->tiling = I915_TILING_X;
 7576			fb->modifier = I915_FORMAT_MOD_X_TILED;
 7577		}
 7578	}
 7579
 7580	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
 7581	fourcc = i9xx_format_to_fourcc(pixel_format);
 7582	fb->format = drm_format_info(fourcc);
 7583
 7584	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 7585		offset = I915_READ(DSPOFFSET(i9xx_plane));
 7586		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
 7587	} else if (INTEL_GEN(dev_priv) >= 4) {
 7588		if (plane_config->tiling)
 7589			offset = I915_READ(DSPTILEOFF(i9xx_plane));
 7590		else
 7591			offset = I915_READ(DSPLINOFF(i9xx_plane));
 7592		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
 7593	} else {
 7594		base = I915_READ(DSPADDR(i9xx_plane));
 7595	}
 7596	plane_config->base = base;
 7597
 7598	val = I915_READ(PIPESRC(pipe));
 7599	fb->width = ((val >> 16) & 0xfff) + 1;
 7600	fb->height = ((val >> 0) & 0xfff) + 1;
 7601
 7602	val = I915_READ(DSPSTRIDE(i9xx_plane));
 7603	fb->pitches[0] = val & 0xffffffc0;
 7604
 7605	aligned_height = intel_fb_align_height(fb, 0, fb->height);
 7606
 7607	plane_config->size = fb->pitches[0] * aligned_height;
 7608
 7609	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 7610		      crtc->base.name, plane->base.name, fb->width, fb->height,
 7611		      fb->format->cpp[0] * 8, base, fb->pitches[0],
 7612		      plane_config->size);
 7613
 7614	plane_config->fb = intel_fb;
 7615}
 7616
 7617static void chv_crtc_clock_get(struct intel_crtc *crtc,
 7618			       struct intel_crtc_state *pipe_config)
 7619{
 7620	struct drm_device *dev = crtc->base.dev;
 7621	struct drm_i915_private *dev_priv = to_i915(dev);
 7622	int pipe = pipe_config->cpu_transcoder;
 7623	enum dpio_channel port = vlv_pipe_to_channel(pipe);
 7624	struct dpll clock;
 7625	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
 7626	int refclk = 100000;
 7627
 7628	/* In case of DSI, DPLL will not be used */
 7629	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
 7630		return;
 7631
 7632	mutex_lock(&dev_priv->sb_lock);
 7633	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
 7634	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
 7635	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
 7636	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
 7637	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
 7638	mutex_unlock(&dev_priv->sb_lock);
 7639
 7640	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
 7641	clock.m2 = (pll_dw0 & 0xff) << 22;
 7642	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
 7643		clock.m2 |= pll_dw2 & 0x3fffff;
 7644	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
 7645	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
 7646	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 7647
 7648	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 7649}
 7650
 7651static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 7652				 struct intel_crtc_state *pipe_config)
 7653{
 7654	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 7655	enum intel_display_power_domain power_domain;
 7656	uint32_t tmp;
 7657	bool ret;
 7658
 7659	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
 7660	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 7661		return false;
 7662
 7663	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 7664	pipe_config->shared_dpll = NULL;
 7665
 7666	ret = false;
 7667
 7668	tmp = I915_READ(PIPECONF(crtc->pipe));
 7669	if (!(tmp & PIPECONF_ENABLE))
 7670		goto out;
 7671
 7672	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
 7673	    IS_CHERRYVIEW(dev_priv)) {
 7674		switch (tmp & PIPECONF_BPC_MASK) {
 7675		case PIPECONF_6BPC:
 7676			pipe_config->pipe_bpp = 18;
 7677			break;
 7678		case PIPECONF_8BPC:
 7679			pipe_config->pipe_bpp = 24;
 7680			break;
 7681		case PIPECONF_10BPC:
 7682			pipe_config->pipe_bpp = 30;
 7683			break;
 7684		default:
 7685			break;
 7686		}
 7687	}
 7688
 7689	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 7690	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
 7691		pipe_config->limited_color_range = true;
 7692
 7693	if (INTEL_GEN(dev_priv) < 4)
 7694		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
 7695
 7696	intel_get_pipe_timings(crtc, pipe_config);
 7697	intel_get_pipe_src_size(crtc, pipe_config);
 7698
 7699	i9xx_get_pfit_config(crtc, pipe_config);
 7700
 7701	if (INTEL_GEN(dev_priv) >= 4) {
 7702		/* No way to read it out on pipes B and C */
 7703		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
 7704			tmp = dev_priv->chv_dpll_md[crtc->pipe];
 7705		else
 7706			tmp = I915_READ(DPLL_MD(crtc->pipe));
 7707		pipe_config->pixel_multiplier =
 7708			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
 7709			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
 7710		pipe_config->dpll_hw_state.dpll_md = tmp;
 7711	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
 7712		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
 7713		tmp = I915_READ(DPLL(crtc->pipe));
 7714		pipe_config->pixel_multiplier =
 7715			((tmp & SDVO_MULTIPLIER_MASK)
 7716			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
 7717	} else {
 7718		/* Note that on i915G/GM the pixel multiplier is in the sdvo
 7719		 * port and will be fixed up in the encoder->get_config
 7720		 * function. */
 7721		pipe_config->pixel_multiplier = 1;
 7722	}
 7723	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
 7724	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
 7725		/*
 7726		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
 7727		 * on 830. Filter it out here so that we don't
 7728		 * report errors due to that.
 7729		 */
 7730		if (IS_I830(dev_priv))
 7731			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
 7732
 7733		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
 7734		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
 7735	} else {
 7736		/* Mask out read-only status bits. */
 7737		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
 7738						     DPLL_PORTC_READY_MASK |
 7739						     DPLL_PORTB_READY_MASK);
 7740	}
 7741
 7742	if (IS_CHERRYVIEW(dev_priv))
 7743		chv_crtc_clock_get(crtc, pipe_config);
 7744	else if (IS_VALLEYVIEW(dev_priv))
 7745		vlv_crtc_clock_get(crtc, pipe_config);
 7746	else
 7747		i9xx_crtc_clock_get(crtc, pipe_config);
 7748
 7749	/*
 7750	 * Normally the dotclock is filled in by the encoder .get_config()
 7751	 * but in case the pipe is enabled w/o any ports we need a sane
 7752	 * default.
 7753	 */
 7754	pipe_config->base.adjusted_mode.crtc_clock =
 7755		pipe_config->port_clock / pipe_config->pixel_multiplier;
 7756
 7757	ret = true;
 7758
 7759out:
 7760	intel_display_power_put(dev_priv, power_domain);
 7761
 7762	return ret;
 7763}
 7764
 7765static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
 7766{
 7767	struct intel_encoder *encoder;
 7768	int i;
 7769	u32 val, final;
 7770	bool has_lvds = false;
 7771	bool has_cpu_edp = false;
 7772	bool has_panel = false;
 7773	bool has_ck505 = false;
 7774	bool can_ssc = false;
 7775	bool using_ssc_source = false;
 7776
 7777	/* We need to take the global config into account */
 7778	for_each_intel_encoder(&dev_priv->drm, encoder) {
 7779		switch (encoder->type) {
 7780		case INTEL_OUTPUT_LVDS:
 7781			has_panel = true;
 7782			has_lvds = true;
 7783			break;
 7784		case INTEL_OUTPUT_EDP:
 7785			has_panel = true;
 7786			if (encoder->port == PORT_A)
 7787				has_cpu_edp = true;
 7788			break;
 7789		default:
 7790			break;
 7791		}
 7792	}
 7793
 7794	if (HAS_PCH_IBX(dev_priv)) {
 7795		has_ck505 = dev_priv->vbt.display_clock_mode;
 7796		can_ssc = has_ck505;
 7797	} else {
 7798		has_ck505 = false;
 7799		can_ssc = true;
 7800	}
 7801
 7802	/* Check if any DPLLs are using the SSC source */
 7803	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 7804		u32 temp = I915_READ(PCH_DPLL(i));
 7805
 7806		if (!(temp & DPLL_VCO_ENABLE))
 7807			continue;
 7808
 7809		if ((temp & PLL_REF_INPUT_MASK) ==
 7810		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
 7811			using_ssc_source = true;
 7812			break;
 7813		}
 7814	}
 7815
 7816	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
 7817		      has_panel, has_lvds, has_ck505, using_ssc_source);
 7818
 7819	/* Ironlake: try to setup display ref clock before DPLL
 7820	 * enabling. This is only under driver's control after
 7821	 * PCH B stepping, previous chipset stepping should be
 7822	 * ignoring this setting.
 7823	 */
 7824	val = I915_READ(PCH_DREF_CONTROL);
 7825
 7826	/* As we must carefully and slowly disable/enable each source in turn,
 7827	 * compute the final state we want first and check if we need to
 7828	 * make any changes at all.
 7829	 */
 7830	final = val;
 7831	final &= ~DREF_NONSPREAD_SOURCE_MASK;
 7832	if (has_ck505)
 7833		final |= DREF_NONSPREAD_CK505_ENABLE;
 7834	else
 7835		final |= DREF_NONSPREAD_SOURCE_ENABLE;
 7836
 7837	final &= ~DREF_SSC_SOURCE_MASK;
 7838	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 7839	final &= ~DREF_SSC1_ENABLE;
 7840
 7841	if (has_panel) {
 7842		final |= DREF_SSC_SOURCE_ENABLE;
 7843
 7844		if (intel_panel_use_ssc(dev_priv) && can_ssc)
 7845			final |= DREF_SSC1_ENABLE;
 7846
 7847		if (has_cpu_edp) {
 7848			if (intel_panel_use_ssc(dev_priv) && can_ssc)
 7849				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 7850			else
 7851				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 7852		} else
 7853			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 7854	} else if (using_ssc_source) {
 7855		final |= DREF_SSC_SOURCE_ENABLE;
 7856		final |= DREF_SSC1_ENABLE;
 7857	}
 7858
 7859	if (final == val)
 7860		return;
 7861
 7862	/* Always enable nonspread source */
 7863	val &= ~DREF_NONSPREAD_SOURCE_MASK;
 7864
 7865	if (has_ck505)
 7866		val |= DREF_NONSPREAD_CK505_ENABLE;
 7867	else
 7868		val |= DREF_NONSPREAD_SOURCE_ENABLE;
 7869
 7870	if (has_panel) {
 7871		val &= ~DREF_SSC_SOURCE_MASK;
 7872		val |= DREF_SSC_SOURCE_ENABLE;
 7873
 7874		/* SSC must be turned on before enabling the CPU output  */
 7875		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 7876			DRM_DEBUG_KMS("Using SSC on panel\n");
 7877			val |= DREF_SSC1_ENABLE;
 7878		} else
 7879			val &= ~DREF_SSC1_ENABLE;
 7880
 7881		/* Get SSC going before enabling the outputs */
 7882		I915_WRITE(PCH_DREF_CONTROL, val);
 7883		POSTING_READ(PCH_DREF_CONTROL);
 7884		udelay(200);
 7885
 7886		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 7887
 7888		/* Enable CPU source on CPU attached eDP */
 7889		if (has_cpu_edp) {
 7890			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 7891				DRM_DEBUG_KMS("Using SSC on eDP\n");
 7892				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 7893			} else
 7894				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 7895		} else
 7896			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 7897
 7898		I915_WRITE(PCH_DREF_CONTROL, val);
 7899		POSTING_READ(PCH_DREF_CONTROL);
 7900		udelay(200);
 7901	} else {
 7902		DRM_DEBUG_KMS("Disabling CPU source output\n");
 7903
 7904		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 7905
 7906		/* Turn off CPU output */
 7907		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 7908
 7909		I915_WRITE(PCH_DREF_CONTROL, val);
 7910		POSTING_READ(PCH_DREF_CONTROL);
 7911		udelay(200);
 7912
 7913		if (!using_ssc_source) {
 7914			DRM_DEBUG_KMS("Disabling SSC source\n");
 7915
 7916			/* Turn off the SSC source */
 7917			val &= ~DREF_SSC_SOURCE_MASK;
 7918			val |= DREF_SSC_SOURCE_DISABLE;
 7919
 7920			/* Turn off SSC1 */
 7921			val &= ~DREF_SSC1_ENABLE;
 7922
 7923			I915_WRITE(PCH_DREF_CONTROL, val);
 7924			POSTING_READ(PCH_DREF_CONTROL);
 7925			udelay(200);
 7926		}
 7927	}
 7928
 7929	BUG_ON(val != final);
 7930}
 7931
 7932static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
 7933{
 7934	uint32_t tmp;
 7935
 7936	tmp = I915_READ(SOUTH_CHICKEN2);
 7937	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
 7938	I915_WRITE(SOUTH_CHICKEN2, tmp);
 7939
 7940	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
 7941			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
 7942		DRM_ERROR("FDI mPHY reset assert timeout\n");
 7943
 7944	tmp = I915_READ(SOUTH_CHICKEN2);
 7945	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
 7946	I915_WRITE(SOUTH_CHICKEN2, tmp);
 7947
 7948	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
 7949			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
 7950		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
 7951}
 7952
 7953/* WaMPhyProgramming:hsw */
 7954static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
 7955{
 7956	uint32_t tmp;
 7957
 7958	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
 7959	tmp &= ~(0xFF << 24);
 7960	tmp |= (0x12 << 24);
 7961	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 7962
 7963	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
 7964	tmp |= (1 << 11);
 7965	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
 7966
 7967	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
 7968	tmp |= (1 << 11);
 7969	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 7970
 7971	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
 7972	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
 7973	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
 7974
 7975	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
 7976	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
 7977	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 7978
 7979	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
 7980	tmp &= ~(7 << 13);
 7981	tmp |= (5 << 13);
 7982	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
 7983
 7984	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
 7985	tmp &= ~(7 << 13);
 7986	tmp |= (5 << 13);
 7987	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
 7988
 7989	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
 7990	tmp &= ~0xFF;
 7991	tmp |= 0x1C;
 7992	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
 7993
 7994	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
 7995	tmp &= ~0xFF;
 7996	tmp |= 0x1C;
 7997	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
 7998
 7999	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
 8000	tmp &= ~(0xFF << 16);
 8001	tmp |= (0x1C << 16);
 8002	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
 8003
 8004	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
 8005	tmp &= ~(0xFF << 16);
 8006	tmp |= (0x1C << 16);
 8007	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
 8008
 8009	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
 8010	tmp |= (1 << 27);
 8011	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
 8012
 8013	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
 8014	tmp |= (1 << 27);
 8015	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
 8016
 8017	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
 8018	tmp &= ~(0xF << 28);
 8019	tmp |= (4 << 28);
 8020	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
 8021
 8022	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
 8023	tmp &= ~(0xF << 28);
 8024	tmp |= (4 << 28);
 8025	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
 8026}
 8027
 8028/* Implements 3 different sequences from BSpec chapter "Display iCLK
 8029 * Programming" based on the parameters passed:
 8030 * - Sequence to enable CLKOUT_DP
 8031 * - Sequence to enable CLKOUT_DP without spread
 8032 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
 8033 */
 8034static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
 8035				 bool with_spread, bool with_fdi)
 8036{
 8037	uint32_t reg, tmp;
 8038
 8039	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
 8040		with_spread = true;
 8041	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
 8042	    with_fdi, "LP PCH doesn't have FDI\n"))
 8043		with_fdi = false;
 8044
 8045	mutex_lock(&dev_priv->sb_lock);
 8046
 8047	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
 8048	tmp &= ~SBI_SSCCTL_DISABLE;
 8049	tmp |= SBI_SSCCTL_PATHALT;
 8050	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 8051
 8052	udelay(24);
 8053
 8054	if (with_spread) {
 8055		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
 8056		tmp &= ~SBI_SSCCTL_PATHALT;
 8057		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 8058
 8059		if (with_fdi) {
 8060			lpt_reset_fdi_mphy(dev_priv);
 8061			lpt_program_fdi_mphy(dev_priv);
 8062		}
 8063	}
 8064
 8065	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
 8066	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
 8067	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
 8068	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 8069
 8070	mutex_unlock(&dev_priv->sb_lock);
 8071}
 8072
 8073/* Sequence to disable CLKOUT_DP */
 8074static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
 8075{
 8076	uint32_t reg, tmp;
 8077
 8078	mutex_lock(&dev_priv->sb_lock);
 8079
 8080	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
 8081	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
 8082	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
 8083	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 8084
 8085	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
 8086	if (!(tmp & SBI_SSCCTL_DISABLE)) {
 8087		if (!(tmp & SBI_SSCCTL_PATHALT)) {
 8088			tmp |= SBI_SSCCTL_PATHALT;
 8089			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 8090			udelay(32);
 8091		}
 8092		tmp |= SBI_SSCCTL_DISABLE;
 8093		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 8094	}
 8095
 8096	mutex_unlock(&dev_priv->sb_lock);
 8097}
 8098
 8099#define BEND_IDX(steps) ((50 + (steps)) / 5)
 8100
 8101static const uint16_t sscdivintphase[] = {
 8102	[BEND_IDX( 50)] = 0x3B23,
 8103	[BEND_IDX( 45)] = 0x3B23,
 8104	[BEND_IDX( 40)] = 0x3C23,
 8105	[BEND_IDX( 35)] = 0x3C23,
 8106	[BEND_IDX( 30)] = 0x3D23,
 8107	[BEND_IDX( 25)] = 0x3D23,
 8108	[BEND_IDX( 20)] = 0x3E23,
 8109	[BEND_IDX( 15)] = 0x3E23,
 8110	[BEND_IDX( 10)] = 0x3F23,
 8111	[BEND_IDX(  5)] = 0x3F23,
 8112	[BEND_IDX(  0)] = 0x0025,
 8113	[BEND_IDX( -5)] = 0x0025,
 8114	[BEND_IDX(-10)] = 0x0125,
 8115	[BEND_IDX(-15)] = 0x0125,
 8116	[BEND_IDX(-20)] = 0x0225,
 8117	[BEND_IDX(-25)] = 0x0225,
 8118	[BEND_IDX(-30)] = 0x0325,
 8119	[BEND_IDX(-35)] = 0x0325,
 8120	[BEND_IDX(-40)] = 0x0425,
 8121	[BEND_IDX(-45)] = 0x0425,
 8122	[BEND_IDX(-50)] = 0x0525,
 8123};
 8124
 8125/*
 8126 * Bend CLKOUT_DP
 8127 * steps -50 to 50 inclusive, in steps of 5
 8128 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
 8129 * change in clock period = -(steps / 10) * 5.787 ps
 8130 */
 8131static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
 8132{
 8133	uint32_t tmp;
 8134	int idx = BEND_IDX(steps);
 8135
 8136	if (WARN_ON(steps % 5 != 0))
 8137		return;
 8138
 8139	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
 8140		return;
 8141
 8142	mutex_lock(&dev_priv->sb_lock);
 8143
 8144	if (steps % 10 != 0)
 8145		tmp = 0xAAAAAAAB;
 8146	else
 8147		tmp = 0x00000000;
 8148	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
 8149
 8150	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
 8151	tmp &= 0xffff0000;
 8152	tmp |= sscdivintphase[idx];
 8153	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
 8154
 8155	mutex_unlock(&dev_priv->sb_lock);
 8156}
 8157
 8158#undef BEND_IDX
 8159
 8160static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
 8161{
 8162	struct intel_encoder *encoder;
 8163	bool has_vga = false;
 8164
 8165	for_each_intel_encoder(&dev_priv->drm, encoder) {
 8166		switch (encoder->type) {
 8167		case INTEL_OUTPUT_ANALOG:
 8168			has_vga = true;
 8169			break;
 8170		default:
 8171			break;
 8172		}
 8173	}
 8174
 8175	if (has_vga) {
 8176		lpt_bend_clkout_dp(dev_priv, 0);
 8177		lpt_enable_clkout_dp(dev_priv, true, true);
 8178	} else {
 8179		lpt_disable_clkout_dp(dev_priv);
 8180	}
 8181}
 8182
 8183/*
 8184 * Initialize reference clocks when the driver loads
 8185 */
 8186void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
 8187{
 8188	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
 8189		ironlake_init_pch_refclk(dev_priv);
 8190	else if (HAS_PCH_LPT(dev_priv))
 8191		lpt_init_pch_refclk(dev_priv);
 8192}
 8193
 8194static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 8195{
 8196	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 8197	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 8198	int pipe = intel_crtc->pipe;
 8199	uint32_t val;
 8200
 8201	val = 0;
 8202
 8203	switch (intel_crtc->config->pipe_bpp) {
 8204	case 18:
 8205		val |= PIPECONF_6BPC;
 8206		break;
 8207	case 24:
 8208		val |= PIPECONF_8BPC;
 8209		break;
 8210	case 30:
 8211		val |= PIPECONF_10BPC;
 8212		break;
 8213	case 36:
 8214		val |= PIPECONF_12BPC;
 8215		break;
 8216	default:
 8217		/* Case prevented by intel_choose_pipe_bpp_dither. */
 8218		BUG();
 8219	}
 8220
 8221	if (intel_crtc->config->dither)
 8222		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 8223
 8224	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 8225		val |= PIPECONF_INTERLACED_ILK;
 8226	else
 8227		val |= PIPECONF_PROGRESSIVE;
 8228
 8229	if (intel_crtc->config->limited_color_range)
 8230		val |= PIPECONF_COLOR_RANGE_SELECT;
 8231
 8232	I915_WRITE(PIPECONF(pipe), val);
 8233	POSTING_READ(PIPECONF(pipe));
 8234}
 8235
 8236static void haswell_set_pipeconf(struct drm_crtc *crtc)
 8237{
 8238	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 8239	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 8240	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 8241	u32 val = 0;
 8242
 8243	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
 8244		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 8245
 8246	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 8247		val |= PIPECONF_INTERLACED_ILK;
 8248	else
 8249		val |= PIPECONF_PROGRESSIVE;
 8250
 8251	I915_WRITE(PIPECONF(cpu_transcoder), val);
 8252	POSTING_READ(PIPECONF(cpu_transcoder));
 8253}
 8254
 8255static void haswell_set_pipemisc(struct drm_crtc *crtc)
 8256{
 8257	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 8258	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 8259	struct intel_crtc_state *config = intel_crtc->config;
 8260
 8261	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
 8262		u32 val = 0;
 8263
 8264		switch (intel_crtc->config->pipe_bpp) {
 8265		case 18:
 8266			val |= PIPEMISC_DITHER_6_BPC;
 8267			break;
 8268		case 24:
 8269			val |= PIPEMISC_DITHER_8_BPC;
 8270			break;
 8271		case 30:
 8272			val |= PIPEMISC_DITHER_10_BPC;
 8273			break;
 8274		case 36:
 8275			val |= PIPEMISC_DITHER_12_BPC;
 8276			break;
 8277		default:
 8278			/* Case prevented by pipe_config_set_bpp. */
 8279			BUG();
 8280		}
 8281
 8282		if (intel_crtc->config->dither)
 8283			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
 8284
 8285		if (config->ycbcr420) {
 8286			val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
 8287				PIPEMISC_YUV420_ENABLE |
 8288				PIPEMISC_YUV420_MODE_FULL_BLEND;
 8289		}
 8290
 8291		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
 8292	}
 8293}
 8294
 8295int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
 8296{
 8297	/*
 8298	 * Account for spread spectrum to avoid
 8299	 * oversubscribing the link. Max center spread
 8300	 * is 2.5%; use 5% for safety's sake.
 8301	 */
 8302	u32 bps = target_clock * bpp * 21 / 20;
 8303	return DIV_ROUND_UP(bps, link_bw * 8);
 8304}
 8305
 8306static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
 8307{
 8308	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
 8309}
 8310
 8311static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 8312				  struct intel_crtc_state *crtc_state,
 8313				  struct dpll *reduced_clock)
 8314{
 8315	struct drm_crtc *crtc = &intel_crtc->base;
 8316	struct drm_device *dev = crtc->dev;
 8317	struct drm_i915_private *dev_priv = to_i915(dev);
 8318	u32 dpll, fp, fp2;
 8319	int factor;
 8320
 8321	/* Enable autotuning of the PLL clock (if permissible) */
 8322	factor = 21;
 8323	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 8324		if ((intel_panel_use_ssc(dev_priv) &&
 8325		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
 8326		    (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
 8327			factor = 25;
 8328	} else if (crtc_state->sdvo_tv_clock)
 8329		factor = 20;
 8330
 8331	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
 8332
 8333	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
 8334		fp |= FP_CB_TUNE;
 8335
 8336	if (reduced_clock) {
 8337		fp2 = i9xx_dpll_compute_fp(reduced_clock);
 8338
 8339		if (reduced_clock->m < factor * reduced_clock->n)
 8340			fp2 |= FP_CB_TUNE;
 8341	} else {
 8342		fp2 = fp;
 8343	}
 8344
 8345	dpll = 0;
 8346
 8347	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
 8348		dpll |= DPLLB_MODE_LVDS;
 8349	else
 8350		dpll |= DPLLB_MODE_DAC_SERIAL;
 8351
 8352	dpll |= (crtc_state->pixel_multiplier - 1)
 8353		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 8354
 8355	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
 8356	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
 8357		dpll |= DPLL_SDVO_HIGH_SPEED;
 8358
 8359	if (intel_crtc_has_dp_encoder(crtc_state))
 8360		dpll |= DPLL_SDVO_HIGH_SPEED;
 8361
 8362	/*
 8363	 * The high speed IO clock is only really required for
 8364	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
 8365	 * possible to share the DPLL between CRT and HDMI. Enabling
 8366	 * the clock needlessly does no real harm, except use up a
 8367	 * bit of power potentially.
 8368	 *
 8369	 * We'll limit this to IVB with 3 pipes, since it has only two
 8370	 * DPLLs and so DPLL sharing is the only way to get three pipes
 8371	 * driving PCH ports at the same time. On SNB we could do this,
 8372	 * and potentially avoid enabling the second DPLL, but it's not
 8373	 * clear if it''s a win or loss power wise. No point in doing
 8374	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
 8375	 */
 8376	if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
 8377	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
 8378		dpll |= DPLL_SDVO_HIGH_SPEED;
 8379
 8380	/* compute bitmask from p1 value */
 8381	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 8382	/* also FPA1 */
 8383	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 8384
 8385	switch (crtc_state->dpll.p2) {
 8386	case 5:
 8387		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 8388		break;
 8389	case 7:
 8390		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
 8391		break;
 8392	case 10:
 8393		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
 8394		break;
 8395	case 14:
 8396		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 8397		break;
 8398	}
 8399
 8400	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
 8401	    intel_panel_use_ssc(dev_priv))
 8402		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 8403	else
 8404		dpll |= PLL_REF_INPUT_DREFCLK;
 8405
 8406	dpll |= DPLL_VCO_ENABLE;
 8407
 8408	crtc_state->dpll_hw_state.dpll = dpll;
 8409	crtc_state->dpll_hw_state.fp0 = fp;
 8410	crtc_state->dpll_hw_state.fp1 = fp2;
 8411}
 8412
 8413static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 8414				       struct intel_crtc_state *crtc_state)
 8415{
 8416	struct drm_device *dev = crtc->base.dev;
 8417	struct drm_i915_private *dev_priv = to_i915(dev);
 8418	const struct intel_limit *limit;
 8419	int refclk = 120000;
 8420
 8421	memset(&crtc_state->dpll_hw_state, 0,
 8422	       sizeof(crtc_state->dpll_hw_state));
 8423
 8424	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
 8425	if (!crtc_state->has_pch_encoder)
 8426		return 0;
 8427
 8428	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 8429		if (intel_panel_use_ssc(dev_priv)) {
 8430			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
 8431				      dev_priv->vbt.lvds_ssc_freq);
 8432			refclk = dev_priv->vbt.lvds_ssc_freq;
 8433		}
 8434
 8435		if (intel_is_dual_link_lvds(dev)) {
 8436			if (refclk == 100000)
 8437				limit = &intel_limits_ironlake_dual_lvds_100m;
 8438			else
 8439				limit = &intel_limits_ironlake_dual_lvds;
 8440		} else {
 8441			if (refclk == 100000)
 8442				limit = &intel_limits_ironlake_single_lvds_100m;
 8443			else
 8444				limit = &intel_limits_ironlake_single_lvds;
 8445		}
 8446	} else {
 8447		limit = &intel_limits_ironlake_dac;
 8448	}
 8449
 8450	if (!crtc_state->clock_set &&
 8451	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 8452				refclk, NULL, &crtc_state->dpll)) {
 8453		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 8454		return -EINVAL;
 8455	}
 8456
 8457	ironlake_compute_dpll(crtc, crtc_state, NULL);
 8458
 8459	if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
 8460		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
 8461				 pipe_name(crtc->pipe));
 8462		return -EINVAL;
 8463	}
 8464
 8465	return 0;
 8466}
 8467
 8468static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
 8469					 struct intel_link_m_n *m_n)
 8470{
 8471	struct drm_device *dev = crtc->base.dev;
 8472	struct drm_i915_private *dev_priv = to_i915(dev);
 8473	enum pipe pipe = crtc->pipe;
 8474
 8475	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
 8476	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
 8477	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
 8478		& ~TU_SIZE_MASK;
 8479	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
 8480	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
 8481		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 8482}
 8483
 8484static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
 8485					 enum transcoder transcoder,
 8486					 struct intel_link_m_n *m_n,
 8487					 struct intel_link_m_n *m2_n2)
 8488{
 8489	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 8490	enum pipe pipe = crtc->pipe;
 8491
 8492	if (INTEL_GEN(dev_priv) >= 5) {
 8493		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
 8494		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
 8495		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
 8496			& ~TU_SIZE_MASK;
 8497		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
 8498		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
 8499			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 8500		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
 8501		 * gen < 8) and if DRRS is supported (to make sure the
 8502		 * registers are not unnecessarily read).
 8503		 */
 8504		if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
 8505			crtc->config->has_drrs) {
 8506			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
 8507			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
 8508			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
 8509					& ~TU_SIZE_MASK;
 8510			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
 8511			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
 8512					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 8513		}
 8514	} else {
 8515		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
 8516		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
 8517		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
 8518			& ~TU_SIZE_MASK;
 8519		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
 8520		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
 8521			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 8522	}
 8523}
 8524
 8525void intel_dp_get_m_n(struct intel_crtc *crtc,
 8526		      struct intel_crtc_state *pipe_config)
 8527{
 8528	if (pipe_config->has_pch_encoder)
 8529		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
 8530	else
 8531		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
 8532					     &pipe_config->dp_m_n,
 8533					     &pipe_config->dp_m2_n2);
 8534}
 8535
 8536static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
 8537					struct intel_crtc_state *pipe_config)
 8538{
 8539	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
 8540				     &pipe_config->fdi_m_n, NULL);
 8541}
 8542
 8543static void skylake_get_pfit_config(struct intel_crtc *crtc,
 8544				    struct intel_crtc_state *pipe_config)
 8545{
 8546	struct drm_device *dev = crtc->base.dev;
 8547	struct drm_i915_private *dev_priv = to_i915(dev);
 8548	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
 8549	uint32_t ps_ctrl = 0;
 8550	int id = -1;
 8551	int i;
 8552
 8553	/* find scaler attached to this pipe */
 8554	for (i = 0; i < crtc->num_scalers; i++) {
 8555		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
 8556		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
 8557			id = i;
 8558			pipe_config->pch_pfit.enabled = true;
 8559			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
 8560			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
 8561			break;
 8562		}
 8563	}
 8564
 8565	scaler_state->scaler_id = id;
 8566	if (id >= 0) {
 8567		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
 8568	} else {
 8569		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
 8570	}
 8571}
 8572
 8573static void
 8574skylake_get_initial_plane_config(struct intel_crtc *crtc,
 8575				 struct intel_initial_plane_config *plane_config)
 8576{
 8577	struct drm_device *dev = crtc->base.dev;
 8578	struct drm_i915_private *dev_priv = to_i915(dev);
 8579	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
 8580	enum plane_id plane_id = plane->id;
 8581	enum pipe pipe = crtc->pipe;
 8582	u32 val, base, offset, stride_mult, tiling, alpha;
 8583	int fourcc, pixel_format;
 8584	unsigned int aligned_height;
 8585	struct drm_framebuffer *fb;
 8586	struct intel_framebuffer *intel_fb;
 8587
 8588	if (!plane->get_hw_state(plane))
 8589		return;
 8590
 8591	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 8592	if (!intel_fb) {
 8593		DRM_DEBUG_KMS("failed to alloc fb\n");
 8594		return;
 8595	}
 8596
 8597	fb = &intel_fb->base;
 8598
 8599	fb->dev = dev;
 8600
 8601	val = I915_READ(PLANE_CTL(pipe, plane_id));
 8602
 8603	if (INTEL_GEN(dev_priv) >= 11)
 8604		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
 8605	else
 8606		pixel_format = val & PLANE_CTL_FORMAT_MASK;
 8607
 8608	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
 8609		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
 8610		alpha &= PLANE_COLOR_ALPHA_MASK;
 8611	} else {
 8612		alpha = val & PLANE_CTL_ALPHA_MASK;
 8613	}
 8614
 8615	fourcc = skl_format_to_fourcc(pixel_format,
 8616				      val & PLANE_CTL_ORDER_RGBX, alpha);
 8617	fb->format = drm_format_info(fourcc);
 8618
 8619	tiling = val & PLANE_CTL_TILED_MASK;
 8620	switch (tiling) {
 8621	case PLANE_CTL_TILED_LINEAR:
 8622		fb->modifier = DRM_FORMAT_MOD_LINEAR;
 8623		break;
 8624	case PLANE_CTL_TILED_X:
 8625		plane_config->tiling = I915_TILING_X;
 8626		fb->modifier = I915_FORMAT_MOD_X_TILED;
 8627		break;
 8628	case PLANE_CTL_TILED_Y:
 8629		if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
 8630			fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
 8631		else
 8632			fb->modifier = I915_FORMAT_MOD_Y_TILED;
 8633		break;
 8634	case PLANE_CTL_TILED_YF:
 8635		if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
 8636			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
 8637		else
 8638			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
 8639		break;
 8640	default:
 8641		MISSING_CASE(tiling);
 8642		goto error;
 8643	}
 8644
 8645	base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
 8646	plane_config->base = base;
 8647
 8648	offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
 8649
 8650	val = I915_READ(PLANE_SIZE(pipe, plane_id));
 8651	fb->height = ((val >> 16) & 0xfff) + 1;
 8652	fb->width = ((val >> 0) & 0x1fff) + 1;
 8653
 8654	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
 8655	stride_mult = intel_fb_stride_alignment(fb, 0);
 8656	fb->pitches[0] = (val & 0x3ff) * stride_mult;
 8657
 8658	aligned_height = intel_fb_align_height(fb, 0, fb->height);
 8659
 8660	plane_config->size = fb->pitches[0] * aligned_height;
 8661
 8662	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 8663		      crtc->base.name, plane->base.name, fb->width, fb->height,
 8664		      fb->format->cpp[0] * 8, base, fb->pitches[0],
 8665		      plane_config->size);
 8666
 8667	plane_config->fb = intel_fb;
 8668	return;
 8669
 8670error:
 8671	kfree(intel_fb);
 8672}
 8673
 8674static void ironlake_get_pfit_config(struct intel_crtc *crtc,
 8675				     struct intel_crtc_state *pipe_config)
 8676{
 8677	struct drm_device *dev = crtc->base.dev;
 8678	struct drm_i915_private *dev_priv = to_i915(dev);
 8679	uint32_t tmp;
 8680
 8681	tmp = I915_READ(PF_CTL(crtc->pipe));
 8682
 8683	if (tmp & PF_ENABLE) {
 8684		pipe_config->pch_pfit.enabled = true;
 8685		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
 8686		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
 8687
 8688		/* We currently do not free assignements of panel fitters on
 8689		 * ivb/hsw (since we don't use the higher upscaling modes which
 8690		 * differentiates them) so just WARN about this case for now. */
 8691		if (IS_GEN7(dev_priv)) {
 8692			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
 8693				PF_PIPE_SEL_IVB(crtc->pipe));
 8694		}
 8695	}
 8696}
 8697
 8698static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 8699				     struct intel_crtc_state *pipe_config)
 8700{
 8701	struct drm_device *dev = crtc->base.dev;
 8702	struct drm_i915_private *dev_priv = to_i915(dev);
 8703	enum intel_display_power_domain power_domain;
 8704	uint32_t tmp;
 8705	bool ret;
 8706
 8707	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
 8708	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 8709		return false;
 8710
 8711	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 8712	pipe_config->shared_dpll = NULL;
 8713
 8714	ret = false;
 8715	tmp = I915_READ(PIPECONF(crtc->pipe));
 8716	if (!(tmp & PIPECONF_ENABLE))
 8717		goto out;
 8718
 8719	switch (tmp & PIPECONF_BPC_MASK) {
 8720	case PIPECONF_6BPC:
 8721		pipe_config->pipe_bpp = 18;
 8722		break;
 8723	case PIPECONF_8BPC:
 8724		pipe_config->pipe_bpp = 24;
 8725		break;
 8726	case PIPECONF_10BPC:
 8727		pipe_config->pipe_bpp = 30;
 8728		break;
 8729	case PIPECONF_12BPC:
 8730		pipe_config->pipe_bpp = 36;
 8731		break;
 8732	default:
 8733		break;
 8734	}
 8735
 8736	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
 8737		pipe_config->limited_color_range = true;
 8738
 8739	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
 8740		struct intel_shared_dpll *pll;
 8741		enum intel_dpll_id pll_id;
 8742
 8743		pipe_config->has_pch_encoder = true;
 8744
 8745		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
 8746		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
 8747					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
 8748
 8749		ironlake_get_fdi_m_n_config(crtc, pipe_config);
 8750
 8751		if (HAS_PCH_IBX(dev_priv)) {
 8752			/*
 8753			 * The pipe->pch transcoder and pch transcoder->pll
 8754			 * mapping is fixed.
 8755			 */
 8756			pll_id = (enum intel_dpll_id) crtc->pipe;
 8757		} else {
 8758			tmp = I915_READ(PCH_DPLL_SEL);
 8759			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
 8760				pll_id = DPLL_ID_PCH_PLL_B;
 8761			else
 8762				pll_id= DPLL_ID_PCH_PLL_A;
 8763		}
 8764
 8765		pipe_config->shared_dpll =
 8766			intel_get_shared_dpll_by_id(dev_priv, pll_id);
 8767		pll = pipe_config->shared_dpll;
 8768
 8769		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
 8770						 &pipe_config->dpll_hw_state));
 8771
 8772		tmp = pipe_config->dpll_hw_state.dpll;
 8773		pipe_config->pixel_multiplier =
 8774			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
 8775			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
 8776
 8777		ironlake_pch_clock_get(crtc, pipe_config);
 8778	} else {
 8779		pipe_config->pixel_multiplier = 1;
 8780	}
 8781
 8782	intel_get_pipe_timings(crtc, pipe_config);
 8783	intel_get_pipe_src_size(crtc, pipe_config);
 8784
 8785	ironlake_get_pfit_config(crtc, pipe_config);
 8786
 8787	ret = true;
 8788
 8789out:
 8790	intel_display_power_put(dev_priv, power_domain);
 8791
 8792	return ret;
 8793}
 8794
 8795static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 8796{
 8797	struct drm_device *dev = &dev_priv->drm;
 8798	struct intel_crtc *crtc;
 8799
 8800	for_each_intel_crtc(dev, crtc)
 8801		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
 8802		     pipe_name(crtc->pipe));
 8803
 8804	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
 8805			"Display power well on\n");
 8806	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
 8807	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
 8808	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
 8809	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
 8810	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
 8811	     "CPU PWM1 enabled\n");
 8812	if (IS_HASWELL(dev_priv))
 8813		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
 8814		     "CPU PWM2 enabled\n");
 8815	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
 8816	     "PCH PWM1 enabled\n");
 8817	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
 8818	     "Utility pin enabled\n");
 8819	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
 8820
 8821	/*
 8822	 * In theory we can still leave IRQs enabled, as long as only the HPD
 8823	 * interrupts remain enabled. We used to check for that, but since it's
 8824	 * gen-specific and since we only disable LCPLL after we fully disable
 8825	 * the interrupts, the check below should be enough.
 8826	 */
 8827	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
 8828}
 8829
 8830static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 8831{
 8832	if (IS_HASWELL(dev_priv))
 8833		return I915_READ(D_COMP_HSW);
 8834	else
 8835		return I915_READ(D_COMP_BDW);
 8836}
 8837
 8838static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 8839{
 8840	if (IS_HASWELL(dev_priv)) {
 8841		mutex_lock(&dev_priv->pcu_lock);
 8842		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
 8843					    val))
 8844			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
 8845		mutex_unlock(&dev_priv->pcu_lock);
 8846	} else {
 8847		I915_WRITE(D_COMP_BDW, val);
 8848		POSTING_READ(D_COMP_BDW);
 8849	}
 8850}
 8851
 8852/*
 8853 * This function implements pieces of two sequences from BSpec:
 8854 * - Sequence for display software to disable LCPLL
 8855 * - Sequence for display software to allow package C8+
 8856 * The steps implemented here are just the steps that actually touch the LCPLL
 8857 * register. Callers should take care of disabling all the display engine
 8858 * functions, doing the mode unset, fixing interrupts, etc.
 8859 */
 8860static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 8861			      bool switch_to_fclk, bool allow_power_down)
 8862{
 8863	uint32_t val;
 8864
 8865	assert_can_disable_lcpll(dev_priv);
 8866
 8867	val = I915_READ(LCPLL_CTL);
 8868
 8869	if (switch_to_fclk) {
 8870		val |= LCPLL_CD_SOURCE_FCLK;
 8871		I915_WRITE(LCPLL_CTL, val);
 8872
 8873		if (wait_for_us(I915_READ(LCPLL_CTL) &
 8874				LCPLL_CD_SOURCE_FCLK_DONE, 1))
 8875			DRM_ERROR("Switching to FCLK failed\n");
 8876
 8877		val = I915_READ(LCPLL_CTL);
 8878	}
 8879
 8880	val |= LCPLL_PLL_DISABLE;
 8881	I915_WRITE(LCPLL_CTL, val);
 8882	POSTING_READ(LCPLL_CTL);
 8883
 8884	if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
 8885		DRM_ERROR("LCPLL still locked\n");
 8886
 8887	val = hsw_read_dcomp(dev_priv);
 8888	val |= D_COMP_COMP_DISABLE;
 8889	hsw_write_dcomp(dev_priv, val);
 8890	ndelay(100);
 8891
 8892	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
 8893		     1))
 8894		DRM_ERROR("D_COMP RCOMP still in progress\n");
 8895
 8896	if (allow_power_down) {
 8897		val = I915_READ(LCPLL_CTL);
 8898		val |= LCPLL_POWER_DOWN_ALLOW;
 8899		I915_WRITE(LCPLL_CTL, val);
 8900		POSTING_READ(LCPLL_CTL);
 8901	}
 8902}
 8903
 8904/*
 8905 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
 8906 * source.
 8907 */
 8908static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 8909{
 8910	uint32_t val;
 8911
 8912	val = I915_READ(LCPLL_CTL);
 8913
 8914	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
 8915		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
 8916		return;
 8917
 8918	/*
 8919	 * Make sure we're not on PC8 state before disabling PC8, otherwise
 8920	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
 8921	 */
 8922	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 8923
 8924	if (val & LCPLL_POWER_DOWN_ALLOW) {
 8925		val &= ~LCPLL_POWER_DOWN_ALLOW;
 8926		I915_WRITE(LCPLL_CTL, val);
 8927		POSTING_READ(LCPLL_CTL);
 8928	}
 8929
 8930	val = hsw_read_dcomp(dev_priv);
 8931	val |= D_COMP_COMP_FORCE;
 8932	val &= ~D_COMP_COMP_DISABLE;
 8933	hsw_write_dcomp(dev_priv, val);
 8934
 8935	val = I915_READ(LCPLL_CTL);
 8936	val &= ~LCPLL_PLL_DISABLE;
 8937	I915_WRITE(LCPLL_CTL, val);
 8938
 8939	if (intel_wait_for_register(dev_priv,
 8940				    LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
 8941				    5))
 8942		DRM_ERROR("LCPLL not locked yet\n");
 8943
 8944	if (val & LCPLL_CD_SOURCE_FCLK) {
 8945		val = I915_READ(LCPLL_CTL);
 8946		val &= ~LCPLL_CD_SOURCE_FCLK;
 8947		I915_WRITE(LCPLL_CTL, val);
 8948
 8949		if (wait_for_us((I915_READ(LCPLL_CTL) &
 8950				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
 8951			DRM_ERROR("Switching back to LCPLL failed\n");
 8952	}
 8953
 8954	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 8955
 8956	intel_update_cdclk(dev_priv);
 8957	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
 8958}
 8959
 8960/*
 8961 * Package states C8 and deeper are really deep PC states that can only be
 8962 * reached when all the devices on the system allow it, so even if the graphics
 8963 * device allows PC8+, it doesn't mean the system will actually get to these
 8964 * states. Our driver only allows PC8+ when going into runtime PM.
 8965 *
 8966 * The requirements for PC8+ are that all the outputs are disabled, the power
 8967 * well is disabled and most interrupts are disabled, and these are also
 8968 * requirements for runtime PM. When these conditions are met, we manually do
 8969 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
 8970 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
 8971 * hang the machine.
 8972 *
 8973 * When we really reach PC8 or deeper states (not just when we allow it) we lose
 8974 * the state of some registers, so when we come back from PC8+ we need to
 8975 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
 8976 * need to take care of the registers kept by RC6. Notice that this happens even
 8977 * if we don't put the device in PCI D3 state (which is what currently happens
 8978 * because of the runtime PM support).
 8979 *
 8980 * For more, read "Display Sequences for Package C8" on the hardware
 8981 * documentation.
 8982 */
 8983void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 8984{
 8985	uint32_t val;
 8986
 8987	DRM_DEBUG_KMS("Enabling package C8+\n");
 8988
 8989	if (HAS_PCH_LPT_LP(dev_priv)) {
 8990		val = I915_READ(SOUTH_DSPCLK_GATE_D);
 8991		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
 8992		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
 8993	}
 8994
 8995	lpt_disable_clkout_dp(dev_priv);
 8996	hsw_disable_lcpll(dev_priv, true, true);
 8997}
 8998
 8999void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 9000{
 9001	uint32_t val;
 9002
 9003	DRM_DEBUG_KMS("Disabling package C8+\n");
 9004
 9005	hsw_restore_lcpll(dev_priv);
 9006	lpt_init_pch_refclk(dev_priv);
 9007
 9008	if (HAS_PCH_LPT_LP(dev_priv)) {
 9009		val = I915_READ(SOUTH_DSPCLK_GATE_D);
 9010		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
 9011		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
 9012	}
 9013}
 9014
 9015static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 9016				      struct intel_crtc_state *crtc_state)
 9017{
 9018	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
 9019		struct intel_encoder *encoder =
 9020			intel_ddi_get_crtc_new_encoder(crtc_state);
 9021
 9022		if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
 9023			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
 9024					 pipe_name(crtc->pipe));
 9025			return -EINVAL;
 9026		}
 9027	}
 9028
 9029	return 0;
 9030}
 9031
 9032static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
 9033				   enum port port,
 9034				   struct intel_crtc_state *pipe_config)
 9035{
 9036	enum intel_dpll_id id;
 9037	u32 temp;
 9038
 9039	temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 9040	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
 9041
 9042	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
 9043		return;
 9044
 9045	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 9046}
 9047
 9048static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
 9049				enum port port,
 9050				struct intel_crtc_state *pipe_config)
 9051{
 9052	enum intel_dpll_id id;
 9053
 9054	switch (port) {
 9055	case PORT_A:
 9056		id = DPLL_ID_SKL_DPLL0;
 9057		break;
 9058	case PORT_B:
 9059		id = DPLL_ID_SKL_DPLL1;
 9060		break;
 9061	case PORT_C:
 9062		id = DPLL_ID_SKL_DPLL2;
 9063		break;
 9064	default:
 9065		DRM_ERROR("Incorrect port type\n");
 9066		return;
 9067	}
 9068
 9069	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 9070}
 9071
 9072static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
 9073				enum port port,
 9074				struct intel_crtc_state *pipe_config)
 9075{
 9076	enum intel_dpll_id id;
 9077	u32 temp;
 9078
 9079	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
 9080	id = temp >> (port * 3 + 1);
 9081
 9082	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
 9083		return;
 9084
 9085	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 9086}
 9087
 9088static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
 9089				enum port port,
 9090				struct intel_crtc_state *pipe_config)
 9091{
 9092	enum intel_dpll_id id;
 9093	uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 9094
 9095	switch (ddi_pll_sel) {
 9096	case PORT_CLK_SEL_WRPLL1:
 9097		id = DPLL_ID_WRPLL1;
 9098		break;
 9099	case PORT_CLK_SEL_WRPLL2:
 9100		id = DPLL_ID_WRPLL2;
 9101		break;
 9102	case PORT_CLK_SEL_SPLL:
 9103		id = DPLL_ID_SPLL;
 9104		break;
 9105	case PORT_CLK_SEL_LCPLL_810:
 9106		id = DPLL_ID_LCPLL_810;
 9107		break;
 9108	case PORT_CLK_SEL_LCPLL_1350:
 9109		id = DPLL_ID_LCPLL_1350;
 9110		break;
 9111	case PORT_CLK_SEL_LCPLL_2700:
 9112		id = DPLL_ID_LCPLL_2700;
 9113		break;
 9114	default:
 9115		MISSING_CASE(ddi_pll_sel);
 9116		/* fall through */
 9117	case PORT_CLK_SEL_NONE:
 9118		return;
 9119	}
 9120
 9121	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 9122}
 9123
 9124static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 9125				     struct intel_crtc_state *pipe_config,
 9126				     u64 *power_domain_mask)
 9127{
 9128	struct drm_device *dev = crtc->base.dev;
 9129	struct drm_i915_private *dev_priv = to_i915(dev);
 9130	enum intel_display_power_domain power_domain;
 9131	u32 tmp;
 9132
 9133	/*
 9134	 * The pipe->transcoder mapping is fixed with the exception of the eDP
 9135	 * transcoder handled below.
 9136	 */
 9137	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 9138
 9139	/*
 9140	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
 9141	 * consistency and less surprising code; it's in always on power).
 9142	 */
 9143	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 9144	if (tmp & TRANS_DDI_FUNC_ENABLE) {
 9145		enum pipe trans_edp_pipe;
 9146		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
 9147		default:
 9148			WARN(1, "unknown pipe linked to edp transcoder\n");
 9149		case TRANS_DDI_EDP_INPUT_A_ONOFF:
 9150		case TRANS_DDI_EDP_INPUT_A_ON:
 9151			trans_edp_pipe = PIPE_A;
 9152			break;
 9153		case TRANS_DDI_EDP_INPUT_B_ONOFF:
 9154			trans_edp_pipe = PIPE_B;
 9155			break;
 9156		case TRANS_DDI_EDP_INPUT_C_ONOFF:
 9157			trans_edp_pipe = PIPE_C;
 9158			break;
 9159		}
 9160
 9161		if (trans_edp_pipe == crtc->pipe)
 9162			pipe_config->cpu_transcoder = TRANSCODER_EDP;
 9163	}
 9164
 9165	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
 9166	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 9167		return false;
 9168	*power_domain_mask |= BIT_ULL(power_domain);
 9169
 9170	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
 9171
 9172	return tmp & PIPECONF_ENABLE;
 9173}
 9174
 9175static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
 9176					 struct intel_crtc_state *pipe_config,
 9177					 u64 *power_domain_mask)
 9178{
 9179	struct drm_device *dev = crtc->base.dev;
 9180	struct drm_i915_private *dev_priv = to_i915(dev);
 9181	enum intel_display_power_domain power_domain;
 9182	enum port port;
 9183	enum transcoder cpu_transcoder;
 9184	u32 tmp;
 9185
 9186	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
 9187		if (port == PORT_A)
 9188			cpu_transcoder = TRANSCODER_DSI_A;
 9189		else
 9190			cpu_transcoder = TRANSCODER_DSI_C;
 9191
 9192		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
 9193		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 9194			continue;
 9195		*power_domain_mask |= BIT_ULL(power_domain);
 9196
 9197		/*
 9198		 * The PLL needs to be enabled with a valid divider
 9199		 * configuration, otherwise accessing DSI registers will hang
 9200		 * the machine. See BSpec North Display Engine
 9201		 * registers/MIPI[BXT]. We can break out here early, since we
 9202		 * need the same DSI PLL to be enabled for both DSI ports.
 9203		 */
 9204		if (!intel_dsi_pll_is_enabled(dev_priv))
 9205			break;
 9206
 9207		/* XXX: this works for video mode only */
 9208		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
 9209		if (!(tmp & DPI_ENABLE))
 9210			continue;
 9211
 9212		tmp = I915_READ(MIPI_CTRL(port));
 9213		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
 9214			continue;
 9215
 9216		pipe_config->cpu_transcoder = cpu_transcoder;
 9217		break;
 9218	}
 9219
 9220	return transcoder_is_dsi(pipe_config->cpu_transcoder);
 9221}
 9222
 9223static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 9224				       struct intel_crtc_state *pipe_config)
 9225{
 9226	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 9227	struct intel_shared_dpll *pll;
 9228	enum port port;
 9229	uint32_t tmp;
 9230
 9231	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
 9232
 9233	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 9234
 9235	if (IS_CANNONLAKE(dev_priv))
 9236		cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
 9237	else if (IS_GEN9_BC(dev_priv))
 9238		skylake_get_ddi_pll(dev_priv, port, pipe_config);
 9239	else if (IS_GEN9_LP(dev_priv))
 9240		bxt_get_ddi_pll(dev_priv, port, pipe_config);
 9241	else
 9242		haswell_get_ddi_pll(dev_priv, port, pipe_config);
 9243
 9244	pll = pipe_config->shared_dpll;
 9245	if (pll) {
 9246		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
 9247						 &pipe_config->dpll_hw_state));
 9248	}
 9249
 9250	/*
 9251	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
 9252	 * DDI E. So just check whether this pipe is wired to DDI E and whether
 9253	 * the PCH transcoder is on.
 9254	 */
 9255	if (INTEL_GEN(dev_priv) < 9 &&
 9256	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
 9257		pipe_config->has_pch_encoder = true;
 9258
 9259		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
 9260		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
 9261					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
 9262
 9263		ironlake_get_fdi_m_n_config(crtc, pipe_config);
 9264	}
 9265}
 9266
 9267static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 9268				    struct intel_crtc_state *pipe_config)
 9269{
 9270	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 9271	enum intel_display_power_domain power_domain;
 9272	u64 power_domain_mask;
 9273	bool active;
 9274
 9275	intel_crtc_init_scalers(crtc, pipe_config);
 9276
 9277	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
 9278	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 9279		return false;
 9280	power_domain_mask = BIT_ULL(power_domain);
 9281
 9282	pipe_config->shared_dpll = NULL;
 9283
 9284	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
 9285
 9286	if (IS_GEN9_LP(dev_priv) &&
 9287	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
 9288		WARN_ON(active);
 9289		active = true;
 9290	}
 9291
 9292	if (!active)
 9293		goto out;
 9294
 9295	if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
 9296		haswell_get_ddi_port_state(crtc, pipe_config);
 9297		intel_get_pipe_timings(crtc, pipe_config);
 9298	}
 9299
 9300	intel_get_pipe_src_size(crtc, pipe_config);
 9301
 9302	pipe_config->gamma_mode =
 9303		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
 9304
 9305	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
 9306		u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
 9307		bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
 9308
 9309		if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 9310			bool blend_mode_420 = tmp &
 9311					      PIPEMISC_YUV420_MODE_FULL_BLEND;
 9312
 9313			pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
 9314			if (pipe_config->ycbcr420 != clrspace_yuv ||
 9315			    pipe_config->ycbcr420 != blend_mode_420)
 9316				DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
 9317		} else if (clrspace_yuv) {
 9318			DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
 9319		}
 9320	}
 9321
 9322	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 9323	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
 9324		power_domain_mask |= BIT_ULL(power_domain);
 9325		if (INTEL_GEN(dev_priv) >= 9)
 9326			skylake_get_pfit_config(crtc, pipe_config);
 9327		else
 9328			ironlake_get_pfit_config(crtc, pipe_config);
 9329	}
 9330
 9331	if (hsw_crtc_supports_ips(crtc)) {
 9332		if (IS_HASWELL(dev_priv))
 9333			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
 9334		else {
 9335			/*
 9336			 * We cannot readout IPS state on broadwell, set to
 9337			 * true so we can set it to a defined state on first
 9338			 * commit.
 9339			 */
 9340			pipe_config->ips_enabled = true;
 9341		}
 9342	}
 9343
 9344	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
 9345	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
 9346		pipe_config->pixel_multiplier =
 9347			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
 9348	} else {
 9349		pipe_config->pixel_multiplier = 1;
 9350	}
 9351
 9352out:
 9353	for_each_power_domain(power_domain, power_domain_mask)
 9354		intel_display_power_put(dev_priv, power_domain);
 9355
 9356	return active;
 9357}
 9358
 9359static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
 9360{
 9361	struct drm_i915_private *dev_priv =
 9362		to_i915(plane_state->base.plane->dev);
 9363	const struct drm_framebuffer *fb = plane_state->base.fb;
 9364	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 9365	u32 base;
 9366
 9367	if (INTEL_INFO(dev_priv)->cursor_needs_physical)
 9368		base = obj->phys_handle->busaddr;
 9369	else
 9370		base = intel_plane_ggtt_offset(plane_state);
 9371
 9372	base += plane_state->main.offset;
 9373
 9374	/* ILK+ do this automagically */
 9375	if (HAS_GMCH_DISPLAY(dev_priv) &&
 9376	    plane_state->base.rotation & DRM_MODE_ROTATE_180)
 9377		base += (plane_state->base.crtc_h *
 9378			 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
 9379
 9380	return base;
 9381}
 9382
 9383static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
 9384{
 9385	int x = plane_state->base.crtc_x;
 9386	int y = plane_state->base.crtc_y;
 9387	u32 pos = 0;
 9388
 9389	if (x < 0) {
 9390		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
 9391		x = -x;
 9392	}
 9393	pos |= x << CURSOR_X_SHIFT;
 9394
 9395	if (y < 0) {
 9396		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
 9397		y = -y;
 9398	}
 9399	pos |= y << CURSOR_Y_SHIFT;
 9400
 9401	return pos;
 9402}
 9403
 9404static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
 9405{
 9406	const struct drm_mode_config *config =
 9407		&plane_state->base.plane->dev->mode_config;
 9408	int width = plane_state->base.crtc_w;
 9409	int height = plane_state->base.crtc_h;
 9410
 9411	return width > 0 && width <= config->cursor_width &&
 9412		height > 0 && height <= config->cursor_height;
 9413}
 9414
 9415static int intel_check_cursor(struct intel_crtc_state *crtc_state,
 9416			      struct intel_plane_state *plane_state)
 9417{
 9418	const struct drm_framebuffer *fb = plane_state->base.fb;
 9419	int src_x, src_y;
 9420	u32 offset;
 9421	int ret;
 9422
 9423	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
 9424						  &crtc_state->base,
 9425						  DRM_PLANE_HELPER_NO_SCALING,
 9426						  DRM_PLANE_HELPER_NO_SCALING,
 9427						  true, true);
 9428	if (ret)
 9429		return ret;
 9430
 9431	if (!fb)
 9432		return 0;
 9433
 9434	if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
 9435		DRM_DEBUG_KMS("cursor cannot be tiled\n");
 9436		return -EINVAL;
 9437	}
 9438
 9439	src_x = plane_state->base.src_x >> 16;
 9440	src_y = plane_state->base.src_y >> 16;
 9441
 9442	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
 9443	offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
 9444
 9445	if (src_x != 0 || src_y != 0) {
 9446		DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
 9447		return -EINVAL;
 9448	}
 9449
 9450	plane_state->main.offset = offset;
 9451
 9452	return 0;
 9453}
 9454
 9455static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
 9456			   const struct intel_plane_state *plane_state)
 9457{
 9458	const struct drm_framebuffer *fb = plane_state->base.fb;
 9459
 9460	return CURSOR_ENABLE |
 9461		CURSOR_GAMMA_ENABLE |
 9462		CURSOR_FORMAT_ARGB |
 9463		CURSOR_STRIDE(fb->pitches[0]);
 9464}
 9465
 9466static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
 9467{
 9468	int width = plane_state->base.crtc_w;
 9469
 9470	/*
 9471	 * 845g/865g are only limited by the width of their cursors,
 9472	 * the height is arbitrary up to the precision of the register.
 9473	 */
 9474	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
 9475}
 9476
 9477static int i845_check_cursor(struct intel_plane *plane,
 9478			     struct intel_crtc_state *crtc_state,
 9479			     struct intel_plane_state *plane_state)
 9480{
 9481	const struct drm_framebuffer *fb = plane_state->base.fb;
 9482	int ret;
 9483
 9484	ret = intel_check_cursor(crtc_state, plane_state);
 9485	if (ret)
 9486		return ret;
 9487
 9488	/* if we want to turn off the cursor ignore width and height */
 9489	if (!fb)
 9490		return 0;
 9491
 9492	/* Check for which cursor types we support */
 9493	if (!i845_cursor_size_ok(plane_state)) {
 9494		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
 9495			  plane_state->base.crtc_w,
 9496			  plane_state->base.crtc_h);
 9497		return -EINVAL;
 9498	}
 9499
 9500	switch (fb->pitches[0]) {
 9501	case 256:
 9502	case 512:
 9503	case 1024:
 9504	case 2048:
 9505		break;
 9506	default:
 9507		DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
 9508			      fb->pitches[0]);
 9509		return -EINVAL;
 9510	}
 9511
 9512	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
 9513
 9514	return 0;
 9515}
 9516
 9517static void i845_update_cursor(struct intel_plane *plane,
 9518			       const struct intel_crtc_state *crtc_state,
 9519			       const struct intel_plane_state *plane_state)
 9520{
 9521	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 9522	u32 cntl = 0, base = 0, pos = 0, size = 0;
 9523	unsigned long irqflags;
 9524
 9525	if (plane_state && plane_state->base.visible) {
 9526		unsigned int width = plane_state->base.crtc_w;
 9527		unsigned int height = plane_state->base.crtc_h;
 9528
 9529		cntl = plane_state->ctl;
 9530		size = (height << 12) | width;
 9531
 9532		base = intel_cursor_base(plane_state);
 9533		pos = intel_cursor_position(plane_state);
 9534	}
 9535
 9536	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 9537
 9538	/* On these chipsets we can only modify the base/size/stride
 9539	 * whilst the cursor is disabled.
 9540	 */
 9541	if (plane->cursor.base != base ||
 9542	    plane->cursor.size != size ||
 9543	    plane->cursor.cntl != cntl) {
 9544		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
 9545		I915_WRITE_FW(CURBASE(PIPE_A), base);
 9546		I915_WRITE_FW(CURSIZE, size);
 9547		I915_WRITE_FW(CURPOS(PIPE_A), pos);
 9548		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
 9549
 9550		plane->cursor.base = base;
 9551		plane->cursor.size = size;
 9552		plane->cursor.cntl = cntl;
 9553	} else {
 9554		I915_WRITE_FW(CURPOS(PIPE_A), pos);
 9555	}
 9556
 9557	POSTING_READ_FW(CURCNTR(PIPE_A));
 9558
 9559	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 9560}
 9561
 9562static void i845_disable_cursor(struct intel_plane *plane,
 9563				struct intel_crtc *crtc)
 9564{
 9565	i845_update_cursor(plane, NULL, NULL);
 9566}
 9567
 9568static bool i845_cursor_get_hw_state(struct intel_plane *plane)
 9569{
 9570	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 9571	enum intel_display_power_domain power_domain;
 9572	bool ret;
 9573
 9574	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
 9575	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 9576		return false;
 9577
 9578	ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 9579
 9580	intel_display_power_put(dev_priv, power_domain);
 9581
 9582	return ret;
 9583}
 9584
 9585static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
 9586			   const struct intel_plane_state *plane_state)
 9587{
 9588	struct drm_i915_private *dev_priv =
 9589		to_i915(plane_state->base.plane->dev);
 9590	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 9591	u32 cntl;
 9592
 9593	cntl = MCURSOR_GAMMA_ENABLE;
 9594
 9595	if (HAS_DDI(dev_priv))
 9596		cntl |= CURSOR_PIPE_CSC_ENABLE;
 9597
 9598	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
 9599		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
 9600
 9601	switch (plane_state->base.crtc_w) {
 9602	case 64:
 9603		cntl |= CURSOR_MODE_64_ARGB_AX;
 9604		break;
 9605	case 128:
 9606		cntl |= CURSOR_MODE_128_ARGB_AX;
 9607		break;
 9608	case 256:
 9609		cntl |= CURSOR_MODE_256_ARGB_AX;
 9610		break;
 9611	default:
 9612		MISSING_CASE(plane_state->base.crtc_w);
 9613		return 0;
 9614	}
 9615
 9616	if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
 9617		cntl |= CURSOR_ROTATE_180;
 9618
 9619	return cntl;
 9620}
 9621
 9622static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
 9623{
 9624	struct drm_i915_private *dev_priv =
 9625		to_i915(plane_state->base.plane->dev);
 9626	int width = plane_state->base.crtc_w;
 9627	int height = plane_state->base.crtc_h;
 9628
 9629	if (!intel_cursor_size_ok(plane_state))
 9630		return false;
 9631
 9632	/* Cursor width is limited to a few power-of-two sizes */
 9633	switch (width) {
 9634	case 256:
 9635	case 128:
 9636	case 64:
 9637		break;
 9638	default:
 9639		return false;
 9640	}
 9641
 9642	/*
 9643	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
 9644	 * height from 8 lines up to the cursor width, when the
 9645	 * cursor is not rotated. Everything else requires square
 9646	 * cursors.
 9647	 */
 9648	if (HAS_CUR_FBC(dev_priv) &&
 9649	    plane_state->base.rotation & DRM_MODE_ROTATE_0) {
 9650		if (height < 8 || height > width)
 9651			return false;
 9652	} else {
 9653		if (height != width)
 9654			return false;
 9655	}
 9656
 9657	return true;
 9658}
 9659
 9660static int i9xx_check_cursor(struct intel_plane *plane,
 9661			     struct intel_crtc_state *crtc_state,
 9662			     struct intel_plane_state *plane_state)
 9663{
 9664	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 9665	const struct drm_framebuffer *fb = plane_state->base.fb;
 9666	enum pipe pipe = plane->pipe;
 9667	int ret;
 9668
 9669	ret = intel_check_cursor(crtc_state, plane_state);
 9670	if (ret)
 9671		return ret;
 9672
 9673	/* if we want to turn off the cursor ignore width and height */
 9674	if (!fb)
 9675		return 0;
 9676
 9677	/* Check for which cursor types we support */
 9678	if (!i9xx_cursor_size_ok(plane_state)) {
 9679		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
 9680			  plane_state->base.crtc_w,
 9681			  plane_state->base.crtc_h);
 9682		return -EINVAL;
 9683	}
 9684
 9685	if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
 9686		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
 9687			      fb->pitches[0], plane_state->base.crtc_w);
 9688		return -EINVAL;
 9689	}
 9690
 9691	/*
 9692	 * There's something wrong with the cursor on CHV pipe C.
 9693	 * If it straddles the left edge of the screen then
 9694	 * moving it away from the edge or disabling it often
 9695	 * results in a pipe underrun, and often that can lead to
 9696	 * dead pipe (constant underrun reported, and it scans
 9697	 * out just a solid color). To recover from that, the
 9698	 * display power well must be turned off and on again.
 9699	 * Refuse the put the cursor into that compromised position.
 9700	 */
 9701	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
 9702	    plane_state->base.visible && plane_state->base.crtc_x < 0) {
 9703		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
 9704		return -EINVAL;
 9705	}
 9706
 9707	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
 9708
 9709	return 0;
 9710}
 9711
 9712static void i9xx_update_cursor(struct intel_plane *plane,
 9713			       const struct intel_crtc_state *crtc_state,
 9714			       const struct intel_plane_state *plane_state)
 9715{
 9716	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 9717	enum pipe pipe = plane->pipe;
 9718	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
 9719	unsigned long irqflags;
 9720
 9721	if (plane_state && plane_state->base.visible) {
 9722		cntl = plane_state->ctl;
 9723
 9724		if (plane_state->base.crtc_h != plane_state->base.crtc_w)
 9725			fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
 9726
 9727		base = intel_cursor_base(plane_state);
 9728		pos = intel_cursor_position(plane_state);
 9729	}
 9730
 9731	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 9732
 9733	/*
 9734	 * On some platforms writing CURCNTR first will also
 9735	 * cause CURPOS to be armed by the CURBASE write.
 9736	 * Without the CURCNTR write the CURPOS write would
 9737	 * arm itself. Thus we always start the full update
 9738	 * with a CURCNTR write.
 9739	 *
 9740	 * On other platforms CURPOS always requires the
 9741	 * CURBASE write to arm the update. Additonally
 9742	 * a write to any of the cursor register will cancel
 9743	 * an already armed cursor update. Thus leaving out
 9744	 * the CURBASE write after CURPOS could lead to a
 9745	 * cursor that doesn't appear to move, or even change
 9746	 * shape. Thus we always write CURBASE.
 9747	 *
 9748	 * CURCNTR and CUR_FBC_CTL are always
 9749	 * armed by the CURBASE write only.
 9750	 */
 9751	if (plane->cursor.base != base ||
 9752	    plane->cursor.size != fbc_ctl ||
 9753	    plane->cursor.cntl != cntl) {
 9754		I915_WRITE_FW(CURCNTR(pipe), cntl);
 9755		if (HAS_CUR_FBC(dev_priv))
 9756			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
 9757		I915_WRITE_FW(CURPOS(pipe), pos);
 9758		I915_WRITE_FW(CURBASE(pipe), base);
 9759
 9760		plane->cursor.base = base;
 9761		plane->cursor.size = fbc_ctl;
 9762		plane->cursor.cntl = cntl;
 9763	} else {
 9764		I915_WRITE_FW(CURPOS(pipe), pos);
 9765		I915_WRITE_FW(CURBASE(pipe), base);
 9766	}
 9767
 9768	POSTING_READ_FW(CURBASE(pipe));
 9769
 9770	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 9771}
 9772
 9773static void i9xx_disable_cursor(struct intel_plane *plane,
 9774				struct intel_crtc *crtc)
 9775{
 9776	i9xx_update_cursor(plane, NULL, NULL);
 9777}
 9778
 9779static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
 9780{
 9781	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 9782	enum intel_display_power_domain power_domain;
 9783	enum pipe pipe = plane->pipe;
 9784	bool ret;
 9785
 9786	/*
 9787	 * Not 100% correct for planes that can move between pipes,
 9788	 * but that's only the case for gen2-3 which don't have any
 9789	 * display power wells.
 9790	 */
 9791	power_domain = POWER_DOMAIN_PIPE(pipe);
 9792	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 9793		return false;
 9794
 9795	ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
 9796
 9797	intel_display_power_put(dev_priv, power_domain);
 9798
 9799	return ret;
 9800}
 9801
 9802/* VESA 640x480x72Hz mode to set on the pipe */
 9803static const struct drm_display_mode load_detect_mode = {
 9804	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
 9805		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
 9806};
 9807
 9808struct drm_framebuffer *
 9809intel_framebuffer_create(struct drm_i915_gem_object *obj,
 9810			 struct drm_mode_fb_cmd2 *mode_cmd)
 9811{
 9812	struct intel_framebuffer *intel_fb;
 9813	int ret;
 9814
 9815	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 9816	if (!intel_fb)
 9817		return ERR_PTR(-ENOMEM);
 9818
 9819	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
 9820	if (ret)
 9821		goto err;
 9822
 9823	return &intel_fb->base;
 9824
 9825err:
 9826	kfree(intel_fb);
 9827	return ERR_PTR(ret);
 9828}
 9829
 9830static int intel_modeset_disable_planes(struct drm_atomic_state *state,
 9831					struct drm_crtc *crtc)
 9832{
 9833	struct drm_plane *plane;
 9834	struct drm_plane_state *plane_state;
 9835	int ret, i;
 9836
 9837	ret = drm_atomic_add_affected_planes(state, crtc);
 9838	if (ret)
 9839		return ret;
 9840
 9841	for_each_new_plane_in_state(state, plane, plane_state, i) {
 9842		if (plane_state->crtc != crtc)
 9843			continue;
 9844
 9845		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
 9846		if (ret)
 9847			return ret;
 9848
 9849		drm_atomic_set_fb_for_plane(plane_state, NULL);
 9850	}
 9851
 9852	return 0;
 9853}
 9854
 9855int intel_get_load_detect_pipe(struct drm_connector *connector,
 9856			       const struct drm_display_mode *mode,
 9857			       struct intel_load_detect_pipe *old,
 9858			       struct drm_modeset_acquire_ctx *ctx)
 9859{
 9860	struct intel_crtc *intel_crtc;
 9861	struct intel_encoder *intel_encoder =
 9862		intel_attached_encoder(connector);
 9863	struct drm_crtc *possible_crtc;
 9864	struct drm_encoder *encoder = &intel_encoder->base;
 9865	struct drm_crtc *crtc = NULL;
 9866	struct drm_device *dev = encoder->dev;
 9867	struct drm_i915_private *dev_priv = to_i915(dev);
 9868	struct drm_mode_config *config = &dev->mode_config;
 9869	struct drm_atomic_state *state = NULL, *restore_state = NULL;
 9870	struct drm_connector_state *connector_state;
 9871	struct intel_crtc_state *crtc_state;
 9872	int ret, i = -1;
 9873
 9874	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 9875		      connector->base.id, connector->name,
 9876		      encoder->base.id, encoder->name);
 9877
 9878	old->restore_state = NULL;
 9879
 9880	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 9881
 9882	/*
 9883	 * Algorithm gets a little messy:
 9884	 *
 9885	 *   - if the connector already has an assigned crtc, use it (but make
 9886	 *     sure it's on first)
 9887	 *
 9888	 *   - try to find the first unused crtc that can drive this connector,
 9889	 *     and use that if we find one
 9890	 */
 9891
 9892	/* See if we already have a CRTC for this connector */
 9893	if (connector->state->crtc) {
 9894		crtc = connector->state->crtc;
 9895
 9896		ret = drm_modeset_lock(&crtc->mutex, ctx);
 9897		if (ret)
 9898			goto fail;
 9899
 9900		/* Make sure the crtc and connector are running */
 9901		goto found;
 9902	}
 9903
 9904	/* Find an unused one (if possible) */
 9905	for_each_crtc(dev, possible_crtc) {
 9906		i++;
 9907		if (!(encoder->possible_crtcs & (1 << i)))
 9908			continue;
 9909
 9910		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
 9911		if (ret)
 9912			goto fail;
 9913
 9914		if (possible_crtc->state->enable) {
 9915			drm_modeset_unlock(&possible_crtc->mutex);
 9916			continue;
 9917		}
 9918
 9919		crtc = possible_crtc;
 9920		break;
 9921	}
 9922
 9923	/*
 9924	 * If we didn't find an unused CRTC, don't use any.
 9925	 */
 9926	if (!crtc) {
 9927		DRM_DEBUG_KMS("no pipe available for load-detect\n");
 9928		ret = -ENODEV;
 9929		goto fail;
 9930	}
 9931
 9932found:
 9933	intel_crtc = to_intel_crtc(crtc);
 9934
 9935	state = drm_atomic_state_alloc(dev);
 9936	restore_state = drm_atomic_state_alloc(dev);
 9937	if (!state || !restore_state) {
 9938		ret = -ENOMEM;
 9939		goto fail;
 9940	}
 9941
 9942	state->acquire_ctx = ctx;
 9943	restore_state->acquire_ctx = ctx;
 9944
 9945	connector_state = drm_atomic_get_connector_state(state, connector);
 9946	if (IS_ERR(connector_state)) {
 9947		ret = PTR_ERR(connector_state);
 9948		goto fail;
 9949	}
 9950
 9951	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
 9952	if (ret)
 9953		goto fail;
 9954
 9955	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
 9956	if (IS_ERR(crtc_state)) {
 9957		ret = PTR_ERR(crtc_state);
 9958		goto fail;
 9959	}
 9960
 9961	crtc_state->base.active = crtc_state->base.enable = true;
 9962
 9963	if (!mode)
 9964		mode = &load_detect_mode;
 9965
 9966	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
 9967	if (ret)
 9968		goto fail;
 9969
 9970	ret = intel_modeset_disable_planes(state, crtc);
 9971	if (ret)
 9972		goto fail;
 9973
 9974	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
 9975	if (!ret)
 9976		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
 9977	if (ret) {
 9978		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
 9979		goto fail;
 9980	}
 9981
 9982	ret = drm_atomic_commit(state);
 9983	if (ret) {
 9984		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 9985		goto fail;
 9986	}
 9987
 9988	old->restore_state = restore_state;
 9989	drm_atomic_state_put(state);
 9990
 9991	/* let the connector get through one full cycle before testing */
 9992	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 9993	return true;
 9994
 9995fail:
 9996	if (state) {
 9997		drm_atomic_state_put(state);
 9998		state = NULL;
 9999	}
10000	if (restore_state) {
10001		drm_atomic_state_put(restore_state);
10002		restore_state = NULL;
10003	}
10004
10005	if (ret == -EDEADLK)
10006		return ret;
10007
10008	return false;
10009}
10010
10011void intel_release_load_detect_pipe(struct drm_connector *connector,
10012				    struct intel_load_detect_pipe *old,
10013				    struct drm_modeset_acquire_ctx *ctx)
10014{
10015	struct intel_encoder *intel_encoder =
10016		intel_attached_encoder(connector);
10017	struct drm_encoder *encoder = &intel_encoder->base;
10018	struct drm_atomic_state *state = old->restore_state;
10019	int ret;
10020
10021	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10022		      connector->base.id, connector->name,
10023		      encoder->base.id, encoder->name);
10024
10025	if (!state)
10026		return;
10027
10028	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10029	if (ret)
10030		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10031	drm_atomic_state_put(state);
10032}
10033
10034static int i9xx_pll_refclk(struct drm_device *dev,
10035			   const struct intel_crtc_state *pipe_config)
10036{
10037	struct drm_i915_private *dev_priv = to_i915(dev);
10038	u32 dpll = pipe_config->dpll_hw_state.dpll;
10039
10040	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10041		return dev_priv->vbt.lvds_ssc_freq;
10042	else if (HAS_PCH_SPLIT(dev_priv))
10043		return 120000;
10044	else if (!IS_GEN2(dev_priv))
10045		return 96000;
10046	else
10047		return 48000;
10048}
10049
10050/* Returns the clock of the currently programmed mode of the given pipe. */
10051static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10052				struct intel_crtc_state *pipe_config)
10053{
10054	struct drm_device *dev = crtc->base.dev;
10055	struct drm_i915_private *dev_priv = to_i915(dev);
10056	int pipe = pipe_config->cpu_transcoder;
10057	u32 dpll = pipe_config->dpll_hw_state.dpll;
10058	u32 fp;
10059	struct dpll clock;
10060	int port_clock;
10061	int refclk = i9xx_pll_refclk(dev, pipe_config);
10062
10063	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10064		fp = pipe_config->dpll_hw_state.fp0;
10065	else
10066		fp = pipe_config->dpll_hw_state.fp1;
10067
10068	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10069	if (IS_PINEVIEW(dev_priv)) {
10070		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10071		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10072	} else {
10073		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10074		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10075	}
10076
10077	if (!IS_GEN2(dev_priv)) {
10078		if (IS_PINEVIEW(dev_priv))
10079			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10080				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10081		else
10082			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10083			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10084
10085		switch (dpll & DPLL_MODE_MASK) {
10086		case DPLLB_MODE_DAC_SERIAL:
10087			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10088				5 : 10;
10089			break;
10090		case DPLLB_MODE_LVDS:
10091			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10092				7 : 14;
10093			break;
10094		default:
10095			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10096				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10097			return;
10098		}
10099
10100		if (IS_PINEVIEW(dev_priv))
10101			port_clock = pnv_calc_dpll_params(refclk, &clock);
10102		else
10103			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10104	} else {
10105		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10106		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10107
10108		if (is_lvds) {
10109			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10110				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10111
10112			if (lvds & LVDS_CLKB_POWER_UP)
10113				clock.p2 = 7;
10114			else
10115				clock.p2 = 14;
10116		} else {
10117			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10118				clock.p1 = 2;
10119			else {
10120				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10121					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10122			}
10123			if (dpll & PLL_P2_DIVIDE_BY_4)
10124				clock.p2 = 4;
10125			else
10126				clock.p2 = 2;
10127		}
10128
10129		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10130	}
10131
10132	/*
10133	 * This value includes pixel_multiplier. We will use
10134	 * port_clock to compute adjusted_mode.crtc_clock in the
10135	 * encoder's get_config() function.
10136	 */
10137	pipe_config->port_clock = port_clock;
10138}
10139
10140int intel_dotclock_calculate(int link_freq,
10141			     const struct intel_link_m_n *m_n)
10142{
10143	/*
10144	 * The calculation for the data clock is:
10145	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10146	 * But we want to avoid losing precison if possible, so:
10147	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10148	 *
10149	 * and the link clock is simpler:
10150	 * link_clock = (m * link_clock) / n
10151	 */
10152
10153	if (!m_n->link_n)
10154		return 0;
10155
10156	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10157}
10158
10159static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10160				   struct intel_crtc_state *pipe_config)
10161{
10162	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10163
10164	/* read out port_clock from the DPLL */
10165	i9xx_crtc_clock_get(crtc, pipe_config);
10166
10167	/*
10168	 * In case there is an active pipe without active ports,
10169	 * we may need some idea for the dotclock anyway.
10170	 * Calculate one based on the FDI configuration.
10171	 */
10172	pipe_config->base.adjusted_mode.crtc_clock =
10173		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10174					 &pipe_config->fdi_m_n);
10175}
10176
10177/* Returns the currently programmed mode of the given encoder. */
10178struct drm_display_mode *
10179intel_encoder_current_mode(struct intel_encoder *encoder)
10180{
10181	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10182	struct intel_crtc_state *crtc_state;
10183	struct drm_display_mode *mode;
10184	struct intel_crtc *crtc;
10185	enum pipe pipe;
10186
10187	if (!encoder->get_hw_state(encoder, &pipe))
10188		return NULL;
10189
10190	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10191
10192	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10193	if (!mode)
10194		return NULL;
10195
10196	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10197	if (!crtc_state) {
10198		kfree(mode);
10199		return NULL;
10200	}
10201
10202	crtc_state->base.crtc = &crtc->base;
10203
10204	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10205		kfree(crtc_state);
10206		kfree(mode);
10207		return NULL;
10208	}
10209
10210	encoder->get_config(encoder, crtc_state);
10211
10212	intel_mode_from_pipe_config(mode, crtc_state);
10213
10214	kfree(crtc_state);
10215
10216	return mode;
10217}
10218
10219static void intel_crtc_destroy(struct drm_crtc *crtc)
10220{
10221	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10222
10223	drm_crtc_cleanup(crtc);
10224	kfree(intel_crtc);
10225}
10226
10227/**
10228 * intel_wm_need_update - Check whether watermarks need updating
10229 * @plane: drm plane
10230 * @state: new plane state
10231 *
10232 * Check current plane state versus the new one to determine whether
10233 * watermarks need to be recalculated.
10234 *
10235 * Returns true or false.
10236 */
10237static bool intel_wm_need_update(struct drm_plane *plane,
10238				 struct drm_plane_state *state)
10239{
10240	struct intel_plane_state *new = to_intel_plane_state(state);
10241	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10242
10243	/* Update watermarks on tiling or size changes. */
10244	if (new->base.visible != cur->base.visible)
10245		return true;
10246
10247	if (!cur->base.fb || !new->base.fb)
10248		return false;
10249
10250	if (cur->base.fb->modifier != new->base.fb->modifier ||
10251	    cur->base.rotation != new->base.rotation ||
10252	    drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10253	    drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10254	    drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10255	    drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10256		return true;
10257
10258	return false;
10259}
10260
10261static bool needs_scaling(const struct intel_plane_state *state)
10262{
10263	int src_w = drm_rect_width(&state->base.src) >> 16;
10264	int src_h = drm_rect_height(&state->base.src) >> 16;
10265	int dst_w = drm_rect_width(&state->base.dst);
10266	int dst_h = drm_rect_height(&state->base.dst);
10267
10268	return (src_w != dst_w || src_h != dst_h);
10269}
10270
10271int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10272				    struct drm_crtc_state *crtc_state,
10273				    const struct intel_plane_state *old_plane_state,
10274				    struct drm_plane_state *plane_state)
10275{
10276	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10277	struct drm_crtc *crtc = crtc_state->crtc;
10278	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10279	struct intel_plane *plane = to_intel_plane(plane_state->plane);
10280	struct drm_device *dev = crtc->dev;
10281	struct drm_i915_private *dev_priv = to_i915(dev);
10282	bool mode_changed = needs_modeset(crtc_state);
10283	bool was_crtc_enabled = old_crtc_state->base.active;
10284	bool is_crtc_enabled = crtc_state->active;
10285	bool turn_off, turn_on, visible, was_visible;
10286	struct drm_framebuffer *fb = plane_state->fb;
10287	int ret;
10288
10289	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10290		ret = skl_update_scaler_plane(
10291			to_intel_crtc_state(crtc_state),
10292			to_intel_plane_state(plane_state));
10293		if (ret)
10294			return ret;
10295	}
10296
10297	was_visible = old_plane_state->base.visible;
10298	visible = plane_state->visible;
10299
10300	if (!was_crtc_enabled && WARN_ON(was_visible))
10301		was_visible = false;
10302
10303	/*
10304	 * Visibility is calculated as if the crtc was on, but
10305	 * after scaler setup everything depends on it being off
10306	 * when the crtc isn't active.
10307	 *
10308	 * FIXME this is wrong for watermarks. Watermarks should also
10309	 * be computed as if the pipe would be active. Perhaps move
10310	 * per-plane wm computation to the .check_plane() hook, and
10311	 * only combine the results from all planes in the current place?
10312	 */
10313	if (!is_crtc_enabled) {
10314		plane_state->visible = visible = false;
10315		to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10316	}
10317
10318	if (!was_visible && !visible)
10319		return 0;
10320
10321	if (fb != old_plane_state->base.fb)
10322		pipe_config->fb_changed = true;
10323
10324	turn_off = was_visible && (!visible || mode_changed);
10325	turn_on = visible && (!was_visible || mode_changed);
10326
10327	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10328			 intel_crtc->base.base.id, intel_crtc->base.name,
10329			 plane->base.base.id, plane->base.name,
10330			 fb ? fb->base.id : -1);
10331
10332	DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10333			 plane->base.base.id, plane->base.name,
10334			 was_visible, visible,
10335			 turn_off, turn_on, mode_changed);
10336
10337	if (turn_on) {
10338		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10339			pipe_config->update_wm_pre = true;
10340
10341		/* must disable cxsr around plane enable/disable */
10342		if (plane->id != PLANE_CURSOR)
10343			pipe_config->disable_cxsr = true;
10344	} else if (turn_off) {
10345		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10346			pipe_config->update_wm_post = true;
10347
10348		/* must disable cxsr around plane enable/disable */
10349		if (plane->id != PLANE_CURSOR)
10350			pipe_config->disable_cxsr = true;
10351	} else if (intel_wm_need_update(&plane->base, plane_state)) {
10352		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10353			/* FIXME bollocks */
10354			pipe_config->update_wm_pre = true;
10355			pipe_config->update_wm_post = true;
10356		}
10357	}
10358
10359	if (visible || was_visible)
10360		pipe_config->fb_bits |= plane->frontbuffer_bit;
10361
10362	/*
10363	 * WaCxSRDisabledForSpriteScaling:ivb
10364	 *
10365	 * cstate->update_wm was already set above, so this flag will
10366	 * take effect when we commit and program watermarks.
10367	 */
10368	if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
10369	    needs_scaling(to_intel_plane_state(plane_state)) &&
10370	    !needs_scaling(old_plane_state))
10371		pipe_config->disable_lp_wm = true;
10372
10373	return 0;
10374}
10375
10376static bool encoders_cloneable(const struct intel_encoder *a,
10377			       const struct intel_encoder *b)
10378{
10379	/* masks could be asymmetric, so check both ways */
10380	return a == b || (a->cloneable & (1 << b->type) &&
10381			  b->cloneable & (1 << a->type));
10382}
10383
10384static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10385					 struct intel_crtc *crtc,
10386					 struct intel_encoder *encoder)
10387{
10388	struct intel_encoder *source_encoder;
10389	struct drm_connector *connector;
10390	struct drm_connector_state *connector_state;
10391	int i;
10392
10393	for_each_new_connector_in_state(state, connector, connector_state, i) {
10394		if (connector_state->crtc != &crtc->base)
10395			continue;
10396
10397		source_encoder =
10398			to_intel_encoder(connector_state->best_encoder);
10399		if (!encoders_cloneable(encoder, source_encoder))
10400			return false;
10401	}
10402
10403	return true;
10404}
10405
10406static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10407				   struct drm_crtc_state *crtc_state)
10408{
10409	struct drm_device *dev = crtc->dev;
10410	struct drm_i915_private *dev_priv = to_i915(dev);
10411	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10412	struct intel_crtc_state *pipe_config =
10413		to_intel_crtc_state(crtc_state);
10414	struct drm_atomic_state *state = crtc_state->state;
10415	int ret;
10416	bool mode_changed = needs_modeset(crtc_state);
10417
10418	if (mode_changed && !crtc_state->active)
10419		pipe_config->update_wm_post = true;
10420
10421	if (mode_changed && crtc_state->enable &&
10422	    dev_priv->display.crtc_compute_clock &&
10423	    !WARN_ON(pipe_config->shared_dpll)) {
10424		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10425							   pipe_config);
10426		if (ret)
10427			return ret;
10428	}
10429
10430	if (crtc_state->color_mgmt_changed) {
10431		ret = intel_color_check(crtc, crtc_state);
10432		if (ret)
10433			return ret;
10434
10435		/*
10436		 * Changing color management on Intel hardware is
10437		 * handled as part of planes update.
10438		 */
10439		crtc_state->planes_changed = true;
10440	}
10441
10442	ret = 0;
10443	if (dev_priv->display.compute_pipe_wm) {
10444		ret = dev_priv->display.compute_pipe_wm(pipe_config);
10445		if (ret) {
10446			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
10447			return ret;
10448		}
10449	}
10450
10451	if (dev_priv->display.compute_intermediate_wm &&
10452	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
10453		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10454			return 0;
10455
10456		/*
10457		 * Calculate 'intermediate' watermarks that satisfy both the
10458		 * old state and the new state.  We can program these
10459		 * immediately.
10460		 */
10461		ret = dev_priv->display.compute_intermediate_wm(dev,
10462								intel_crtc,
10463								pipe_config);
10464		if (ret) {
10465			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10466			return ret;
10467		}
10468	} else if (dev_priv->display.compute_intermediate_wm) {
10469		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10470			pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10471	}
10472
10473	if (INTEL_GEN(dev_priv) >= 9) {
10474		if (mode_changed)
10475			ret = skl_update_scaler_crtc(pipe_config);
10476
10477		if (!ret)
10478			ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10479							    pipe_config);
10480		if (!ret)
10481			ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
10482							 pipe_config);
10483	}
10484
10485	if (HAS_IPS(dev_priv))
10486		pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10487
10488	return ret;
10489}
10490
10491static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10492	.atomic_begin = intel_begin_crtc_commit,
10493	.atomic_flush = intel_finish_crtc_commit,
10494	.atomic_check = intel_crtc_atomic_check,
10495};
10496
10497static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10498{
10499	struct intel_connector *connector;
10500	struct drm_connector_list_iter conn_iter;
10501
10502	drm_connector_list_iter_begin(dev, &conn_iter);
10503	for_each_intel_connector_iter(connector, &conn_iter) {
10504		if (connector->base.state->crtc)
10505			drm_connector_unreference(&connector->base);
10506
10507		if (connector->base.encoder) {
10508			connector->base.state->best_encoder =
10509				connector->base.encoder;
10510			connector->base.state->crtc =
10511				connector->base.encoder->crtc;
10512
10513			drm_connector_reference(&connector->base);
10514		} else {
10515			connector->base.state->best_encoder = NULL;
10516			connector->base.state->crtc = NULL;
10517		}
10518	}
10519	drm_connector_list_iter_end(&conn_iter);
10520}
10521
10522static void
10523connected_sink_compute_bpp(struct intel_connector *connector,
10524			   struct intel_crtc_state *pipe_config)
10525{
10526	const struct drm_display_info *info = &connector->base.display_info;
10527	int bpp = pipe_config->pipe_bpp;
10528
10529	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10530		      connector->base.base.id,
10531		      connector->base.name);
10532
10533	/* Don't use an invalid EDID bpc value */
10534	if (info->bpc != 0 && info->bpc * 3 < bpp) {
10535		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10536			      bpp, info->bpc * 3);
10537		pipe_config->pipe_bpp = info->bpc * 3;
10538	}
10539
10540	/* Clamp bpp to 8 on screens without EDID 1.4 */
10541	if (info->bpc == 0 && bpp > 24) {
10542		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10543			      bpp);
10544		pipe_config->pipe_bpp = 24;
10545	}
10546}
10547
10548static int
10549compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10550			  struct intel_crtc_state *pipe_config)
10551{
10552	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10553	struct drm_atomic_state *state;
10554	struct drm_connector *connector;
10555	struct drm_connector_state *connector_state;
10556	int bpp, i;
10557
10558	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10559	    IS_CHERRYVIEW(dev_priv)))
10560		bpp = 10*3;
10561	else if (INTEL_GEN(dev_priv) >= 5)
10562		bpp = 12*3;
10563	else
10564		bpp = 8*3;
10565
10566
10567	pipe_config->pipe_bpp = bpp;
10568
10569	state = pipe_config->base.state;
10570
10571	/* Clamp display bpp to EDID value */
10572	for_each_new_connector_in_state(state, connector, connector_state, i) {
10573		if (connector_state->crtc != &crtc->base)
10574			continue;
10575
10576		connected_sink_compute_bpp(to_intel_connector(connector),
10577					   pipe_config);
10578	}
10579
10580	return bpp;
10581}
10582
10583static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10584{
10585	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10586			"type: 0x%x flags: 0x%x\n",
10587		mode->crtc_clock,
10588		mode->crtc_hdisplay, mode->crtc_hsync_start,
10589		mode->crtc_hsync_end, mode->crtc_htotal,
10590		mode->crtc_vdisplay, mode->crtc_vsync_start,
10591		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10592}
10593
10594static inline void
10595intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
10596		      unsigned int lane_count, struct intel_link_m_n *m_n)
10597{
10598	DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10599		      id, lane_count,
10600		      m_n->gmch_m, m_n->gmch_n,
10601		      m_n->link_m, m_n->link_n, m_n->tu);
10602}
10603
10604#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10605
10606static const char * const output_type_str[] = {
10607	OUTPUT_TYPE(UNUSED),
10608	OUTPUT_TYPE(ANALOG),
10609	OUTPUT_TYPE(DVO),
10610	OUTPUT_TYPE(SDVO),
10611	OUTPUT_TYPE(LVDS),
10612	OUTPUT_TYPE(TVOUT),
10613	OUTPUT_TYPE(HDMI),
10614	OUTPUT_TYPE(DP),
10615	OUTPUT_TYPE(EDP),
10616	OUTPUT_TYPE(DSI),
10617	OUTPUT_TYPE(DDI),
10618	OUTPUT_TYPE(DP_MST),
10619};
10620
10621#undef OUTPUT_TYPE
10622
10623static void snprintf_output_types(char *buf, size_t len,
10624				  unsigned int output_types)
10625{
10626	char *str = buf;
10627	int i;
10628
10629	str[0] = '\0';
10630
10631	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10632		int r;
10633
10634		if ((output_types & BIT(i)) == 0)
10635			continue;
10636
10637		r = snprintf(str, len, "%s%s",
10638			     str != buf ? "," : "", output_type_str[i]);
10639		if (r >= len)
10640			break;
10641		str += r;
10642		len -= r;
10643
10644		output_types &= ~BIT(i);
10645	}
10646
10647	WARN_ON_ONCE(output_types != 0);
10648}
10649
10650static void intel_dump_pipe_config(struct intel_crtc *crtc,
10651				   struct intel_crtc_state *pipe_config,
10652				   const char *context)
10653{
10654	struct drm_device *dev = crtc->base.dev;
10655	struct drm_i915_private *dev_priv = to_i915(dev);
10656	struct drm_plane *plane;
10657	struct intel_plane *intel_plane;
10658	struct intel_plane_state *state;
10659	struct drm_framebuffer *fb;
10660	char buf[64];
10661
10662	DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10663		      crtc->base.base.id, crtc->base.name, context);
10664
10665	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
10666	DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
10667		      buf, pipe_config->output_types);
10668
10669	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
10670		      transcoder_name(pipe_config->cpu_transcoder),
10671		      pipe_config->pipe_bpp, pipe_config->dither);
10672
10673	if (pipe_config->has_pch_encoder)
10674		intel_dump_m_n_config(pipe_config, "fdi",
10675				      pipe_config->fdi_lanes,
10676				      &pipe_config->fdi_m_n);
10677
10678	if (pipe_config->ycbcr420)
10679		DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
10680
10681	if (intel_crtc_has_dp_encoder(pipe_config)) {
10682		intel_dump_m_n_config(pipe_config, "dp m_n",
10683				pipe_config->lane_count, &pipe_config->dp_m_n);
10684		if (pipe_config->has_drrs)
10685			intel_dump_m_n_config(pipe_config, "dp m2_n2",
10686					      pipe_config->lane_count,
10687					      &pipe_config->dp_m2_n2);
10688	}
10689
10690	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10691		      pipe_config->has_audio, pipe_config->has_infoframe);
10692
10693	DRM_DEBUG_KMS("requested mode:\n");
10694	drm_mode_debug_printmodeline(&pipe_config->base.mode);
10695	DRM_DEBUG_KMS("adjusted mode:\n");
10696	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
10697	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
10698	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
10699		      pipe_config->port_clock,
10700		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
10701		      pipe_config->pixel_rate);
10702
10703	if (INTEL_GEN(dev_priv) >= 9)
10704		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
10705			      crtc->num_scalers,
10706			      pipe_config->scaler_state.scaler_users,
10707		              pipe_config->scaler_state.scaler_id);
10708
10709	if (HAS_GMCH_DISPLAY(dev_priv))
10710		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
10711			      pipe_config->gmch_pfit.control,
10712			      pipe_config->gmch_pfit.pgm_ratios,
10713			      pipe_config->gmch_pfit.lvds_border_bits);
10714	else
10715		DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
10716			      pipe_config->pch_pfit.pos,
10717			      pipe_config->pch_pfit.size,
10718		              enableddisabled(pipe_config->pch_pfit.enabled));
10719
10720	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
10721		      pipe_config->ips_enabled, pipe_config->double_wide);
10722
10723	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
10724
10725	DRM_DEBUG_KMS("planes on this crtc\n");
10726	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
10727		struct drm_format_name_buf format_name;
10728		intel_plane = to_intel_plane(plane);
10729		if (intel_plane->pipe != crtc->pipe)
10730			continue;
10731
10732		state = to_intel_plane_state(plane->state);
10733		fb = state->base.fb;
10734		if (!fb) {
10735			DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
10736				      plane->base.id, plane->name, state->scaler_id);
10737			continue;
10738		}
10739
10740		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
10741			      plane->base.id, plane->name,
10742			      fb->base.id, fb->width, fb->height,
10743			      drm_get_format_name(fb->format->format, &format_name));
10744		if (INTEL_GEN(dev_priv) >= 9)
10745			DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
10746				      state->scaler_id,
10747				      state->base.src.x1 >> 16,
10748				      state->base.src.y1 >> 16,
10749				      drm_rect_width(&state->base.src) >> 16,
10750				      drm_rect_height(&state->base.src) >> 16,
10751				      state->base.dst.x1, state->base.dst.y1,
10752				      drm_rect_width(&state->base.dst),
10753				      drm_rect_height(&state->base.dst));
10754	}
10755}
10756
10757static bool check_digital_port_conflicts(struct drm_atomic_state *state)
10758{
10759	struct drm_device *dev = state->dev;
10760	struct drm_connector *connector;
10761	struct drm_connector_list_iter conn_iter;
10762	unsigned int used_ports = 0;
10763	unsigned int used_mst_ports = 0;
10764	bool ret = true;
10765
10766	/*
10767	 * Walk the connector list instead of the encoder
10768	 * list to detect the problem on ddi platforms
10769	 * where there's just one encoder per digital port.
10770	 */
10771	drm_connector_list_iter_begin(dev, &conn_iter);
10772	drm_for_each_connector_iter(connector, &conn_iter) {
10773		struct drm_connector_state *connector_state;
10774		struct intel_encoder *encoder;
10775
10776		connector_state = drm_atomic_get_existing_connector_state(state, connector);
10777		if (!connector_state)
10778			connector_state = connector->state;
10779
10780		if (!connector_state->best_encoder)
10781			continue;
10782
10783		encoder = to_intel_encoder(connector_state->best_encoder);
10784
10785		WARN_ON(!connector_state->crtc);
10786
10787		switch (encoder->type) {
10788			unsigned int port_mask;
10789		case INTEL_OUTPUT_DDI:
10790			if (WARN_ON(!HAS_DDI(to_i915(dev))))
10791				break;
10792		case INTEL_OUTPUT_DP:
10793		case INTEL_OUTPUT_HDMI:
10794		case INTEL_OUTPUT_EDP:
10795			port_mask = 1 << encoder->port;
10796
10797			/* the same port mustn't appear more than once */
10798			if (used_ports & port_mask)
10799				ret = false;
10800
10801			used_ports |= port_mask;
10802			break;
10803		case INTEL_OUTPUT_DP_MST:
10804			used_mst_ports |=
10805				1 << encoder->port;
10806			break;
10807		default:
10808			break;
10809		}
10810	}
10811	drm_connector_list_iter_end(&conn_iter);
10812
10813	/* can't mix MST and SST/HDMI on the same port */
10814	if (used_ports & used_mst_ports)
10815		return false;
10816
10817	return ret;
10818}
10819
10820static void
10821clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
10822{
10823	struct drm_i915_private *dev_priv =
10824		to_i915(crtc_state->base.crtc->dev);
10825	struct intel_crtc_scaler_state scaler_state;
10826	struct intel_dpll_hw_state dpll_hw_state;
10827	struct intel_shared_dpll *shared_dpll;
10828	struct intel_crtc_wm_state wm_state;
10829	bool force_thru, ips_force_disable;
10830
10831	/* FIXME: before the switch to atomic started, a new pipe_config was
10832	 * kzalloc'd. Code that depends on any field being zero should be
10833	 * fixed, so that the crtc_state can be safely duplicated. For now,
10834	 * only fields that are know to not cause problems are preserved. */
10835
10836	scaler_state = crtc_state->scaler_state;
10837	shared_dpll = crtc_state->shared_dpll;
10838	dpll_hw_state = crtc_state->dpll_hw_state;
10839	force_thru = crtc_state->pch_pfit.force_thru;
10840	ips_force_disable = crtc_state->ips_force_disable;
10841	if (IS_G4X(dev_priv) ||
10842	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10843		wm_state = crtc_state->wm;
10844
10845	/* Keep base drm_crtc_state intact, only clear our extended struct */
10846	BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
10847	memset(&crtc_state->base + 1, 0,
10848	       sizeof(*crtc_state) - sizeof(crtc_state->base));
10849
10850	crtc_state->scaler_state = scaler_state;
10851	crtc_state->shared_dpll = shared_dpll;
10852	crtc_state->dpll_hw_state = dpll_hw_state;
10853	crtc_state->pch_pfit.force_thru = force_thru;
10854	crtc_state->ips_force_disable = ips_force_disable;
10855	if (IS_G4X(dev_priv) ||
10856	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10857		crtc_state->wm = wm_state;
10858}
10859
10860static int
10861intel_modeset_pipe_config(struct drm_crtc *crtc,
10862			  struct intel_crtc_state *pipe_config)
10863{
10864	struct drm_atomic_state *state = pipe_config->base.state;
10865	struct intel_encoder *encoder;
10866	struct drm_connector *connector;
10867	struct drm_connector_state *connector_state;
10868	int base_bpp, ret = -EINVAL;
10869	int i;
10870	bool retry = true;
10871
10872	clear_intel_crtc_state(pipe_config);
10873
10874	pipe_config->cpu_transcoder =
10875		(enum transcoder) to_intel_crtc(crtc)->pipe;
10876
10877	/*
10878	 * Sanitize sync polarity flags based on requested ones. If neither
10879	 * positive or negative polarity is requested, treat this as meaning
10880	 * negative polarity.
10881	 */
10882	if (!(pipe_config->base.adjusted_mode.flags &
10883	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
10884		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
10885
10886	if (!(pipe_config->base.adjusted_mode.flags &
10887	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
10888		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
10889
10890	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10891					     pipe_config);
10892	if (base_bpp < 0)
10893		goto fail;
10894
10895	/*
10896	 * Determine the real pipe dimensions. Note that stereo modes can
10897	 * increase the actual pipe size due to the frame doubling and
10898	 * insertion of additional space for blanks between the frame. This
10899	 * is stored in the crtc timings. We use the requested mode to do this
10900	 * computation to clearly distinguish it from the adjusted mode, which
10901	 * can be changed by the connectors in the below retry loop.
10902	 */
10903	drm_mode_get_hv_timing(&pipe_config->base.mode,
10904			       &pipe_config->pipe_src_w,
10905			       &pipe_config->pipe_src_h);
10906
10907	for_each_new_connector_in_state(state, connector, connector_state, i) {
10908		if (connector_state->crtc != crtc)
10909			continue;
10910
10911		encoder = to_intel_encoder(connector_state->best_encoder);
10912
10913		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
10914			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
10915			goto fail;
10916		}
10917
10918		/*
10919		 * Determine output_types before calling the .compute_config()
10920		 * hooks so that the hooks can use this information safely.
10921		 */
10922		if (encoder->compute_output_type)
10923			pipe_config->output_types |=
10924				BIT(encoder->compute_output_type(encoder, pipe_config,
10925								 connector_state));
10926		else
10927			pipe_config->output_types |= BIT(encoder->type);
10928	}
10929
10930encoder_retry:
10931	/* Ensure the port clock defaults are reset when retrying. */
10932	pipe_config->port_clock = 0;
10933	pipe_config->pixel_multiplier = 1;
10934
10935	/* Fill in default crtc timings, allow encoders to overwrite them. */
10936	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
10937			      CRTC_STEREO_DOUBLE);
10938
10939	/* Pass our mode to the connectors and the CRTC to give them a chance to
10940	 * adjust it according to limitations or connector properties, and also
10941	 * a chance to reject the mode entirely.
10942	 */
10943	for_each_new_connector_in_state(state, connector, connector_state, i) {
10944		if (connector_state->crtc != crtc)
10945			continue;
10946
10947		encoder = to_intel_encoder(connector_state->best_encoder);
10948
10949		if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
10950			DRM_DEBUG_KMS("Encoder config failure\n");
10951			goto fail;
10952		}
10953	}
10954
10955	/* Set default port clock if not overwritten by the encoder. Needs to be
10956	 * done afterwards in case the encoder adjusts the mode. */
10957	if (!pipe_config->port_clock)
10958		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
10959			* pipe_config->pixel_multiplier;
10960
10961	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10962	if (ret < 0) {
10963		DRM_DEBUG_KMS("CRTC fixup failed\n");
10964		goto fail;
10965	}
10966
10967	if (ret == RETRY) {
10968		if (WARN(!retry, "loop in pipe configuration computation\n")) {
10969			ret = -EINVAL;
10970			goto fail;
10971		}
10972
10973		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10974		retry = false;
10975		goto encoder_retry;
10976	}
10977
10978	/* Dithering seems to not pass-through bits correctly when it should, so
10979	 * only enable it on 6bpc panels and when its not a compliance
10980	 * test requesting 6bpc video pattern.
10981	 */
10982	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
10983		!pipe_config->dither_force_disable;
10984	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
10985		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10986
10987fail:
10988	return ret;
10989}
10990
10991static bool intel_fuzzy_clock_check(int clock1, int clock2)
10992{
10993	int diff;
10994
10995	if (clock1 == clock2)
10996		return true;
10997
10998	if (!clock1 || !clock2)
10999		return false;
11000
11001	diff = abs(clock1 - clock2);
11002
11003	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11004		return true;
11005
11006	return false;
11007}
11008
11009static bool
11010intel_compare_m_n(unsigned int m, unsigned int n,
11011		  unsigned int m2, unsigned int n2,
11012		  bool exact)
11013{
11014	if (m == m2 && n == n2)
11015		return true;
11016
11017	if (exact || !m || !n || !m2 || !n2)
11018		return false;
11019
11020	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11021
11022	if (n > n2) {
11023		while (n > n2) {
11024			m2 <<= 1;
11025			n2 <<= 1;
11026		}
11027	} else if (n < n2) {
11028		while (n < n2) {
11029			m <<= 1;
11030			n <<= 1;
11031		}
11032	}
11033
11034	if (n != n2)
11035		return false;
11036
11037	return intel_fuzzy_clock_check(m, m2);
11038}
11039
11040static bool
11041intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11042		       struct intel_link_m_n *m2_n2,
11043		       bool adjust)
11044{
11045	if (m_n->tu == m2_n2->tu &&
11046	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11047			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11048	    intel_compare_m_n(m_n->link_m, m_n->link_n,
11049			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
11050		if (adjust)
11051			*m2_n2 = *m_n;
11052
11053		return true;
11054	}
11055
11056	return false;
11057}
11058
11059static void __printf(3, 4)
11060pipe_config_err(bool adjust, const char *name, const char *format, ...)
11061{
11062	struct va_format vaf;
11063	va_list args;
11064
11065	va_start(args, format);
11066	vaf.fmt = format;
11067	vaf.va = &args;
11068
11069	if (adjust)
11070		drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11071	else
11072		drm_err("mismatch in %s %pV", name, &vaf);
11073
11074	va_end(args);
11075}
11076
11077static bool
11078intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11079			  struct intel_crtc_state *current_config,
11080			  struct intel_crtc_state *pipe_config,
11081			  bool adjust)
11082{
11083	bool ret = true;
11084	bool fixup_inherited = adjust &&
11085		(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11086		!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11087
11088#define PIPE_CONF_CHECK_X(name)	\
11089	if (current_config->name != pipe_config->name) { \
11090		pipe_config_err(adjust, __stringify(name), \
11091			  "(expected 0x%08x, found 0x%08x)\n", \
11092			  current_config->name, \
11093			  pipe_config->name); \
11094		ret = false; \
11095	}
11096
11097#define PIPE_CONF_CHECK_I(name)	\
11098	if (current_config->name != pipe_config->name) { \
11099		pipe_config_err(adjust, __stringify(name), \
11100			  "(expected %i, found %i)\n", \
11101			  current_config->name, \
11102			  pipe_config->name); \
11103		ret = false; \
11104	}
11105
11106#define PIPE_CONF_CHECK_BOOL(name)	\
11107	if (current_config->name != pipe_config->name) { \
11108		pipe_config_err(adjust, __stringify(name), \
11109			  "(expected %s, found %s)\n", \
11110			  yesno(current_config->name), \
11111			  yesno(pipe_config->name)); \
11112		ret = false; \
11113	}
11114
11115/*
11116 * Checks state where we only read out the enabling, but not the entire
11117 * state itself (like full infoframes or ELD for audio). These states
11118 * require a full modeset on bootup to fix up.
11119 */
11120#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
11121	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11122		PIPE_CONF_CHECK_BOOL(name); \
11123	} else { \
11124		pipe_config_err(adjust, __stringify(name), \
11125			  "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11126			  yesno(current_config->name), \
11127			  yesno(pipe_config->name)); \
11128		ret = false; \
11129	}
11130
11131#define PIPE_CONF_CHECK_P(name)	\
11132	if (current_config->name != pipe_config->name) { \
11133		pipe_config_err(adjust, __stringify(name), \
11134			  "(expected %p, found %p)\n", \
11135			  current_config->name, \
11136			  pipe_config->name); \
11137		ret = false; \
11138	}
11139
11140#define PIPE_CONF_CHECK_M_N(name) \
11141	if (!intel_compare_link_m_n(&current_config->name, \
11142				    &pipe_config->name,\
11143				    adjust)) { \
11144		pipe_config_err(adjust, __stringify(name), \
11145			  "(expected tu %i gmch %i/%i link %i/%i, " \
11146			  "found tu %i, gmch %i/%i link %i/%i)\n", \
11147			  current_config->name.tu, \
11148			  current_config->name.gmch_m, \
11149			  current_config->name.gmch_n, \
11150			  current_config->name.link_m, \
11151			  current_config->name.link_n, \
11152			  pipe_config->name.tu, \
11153			  pipe_config->name.gmch_m, \
11154			  pipe_config->name.gmch_n, \
11155			  pipe_config->name.link_m, \
11156			  pipe_config->name.link_n); \
11157		ret = false; \
11158	}
11159
11160/* This is required for BDW+ where there is only one set of registers for
11161 * switching between high and low RR.
11162 * This macro can be used whenever a comparison has to be made between one
11163 * hw state and multiple sw state variables.
11164 */
11165#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
11166	if (!intel_compare_link_m_n(&current_config->name, \
11167				    &pipe_config->name, adjust) && \
11168	    !intel_compare_link_m_n(&current_config->alt_name, \
11169				    &pipe_config->name, adjust)) { \
11170		pipe_config_err(adjust, __stringify(name), \
11171			  "(expected tu %i gmch %i/%i link %i/%i, " \
11172			  "or tu %i gmch %i/%i link %i/%i, " \
11173			  "found tu %i, gmch %i/%i link %i/%i)\n", \
11174			  current_config->name.tu, \
11175			  current_config->name.gmch_m, \
11176			  current_config->name.gmch_n, \
11177			  current_config->name.link_m, \
11178			  current_config->name.link_n, \
11179			  current_config->alt_name.tu, \
11180			  current_config->alt_name.gmch_m, \
11181			  current_config->alt_name.gmch_n, \
11182			  current_config->alt_name.link_m, \
11183			  current_config->alt_name.link_n, \
11184			  pipe_config->name.tu, \
11185			  pipe_config->name.gmch_m, \
11186			  pipe_config->name.gmch_n, \
11187			  pipe_config->name.link_m, \
11188			  pipe_config->name.link_n); \
11189		ret = false; \
11190	}
11191
11192#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
11193	if ((current_config->name ^ pipe_config->name) & (mask)) { \
11194		pipe_config_err(adjust, __stringify(name), \
11195			  "(%x) (expected %i, found %i)\n", \
11196			  (mask), \
11197			  current_config->name & (mask), \
11198			  pipe_config->name & (mask)); \
11199		ret = false; \
11200	}
11201
11202#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
11203	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11204		pipe_config_err(adjust, __stringify(name), \
11205			  "(expected %i, found %i)\n", \
11206			  current_config->name, \
11207			  pipe_config->name); \
11208		ret = false; \
11209	}
11210
11211#define PIPE_CONF_QUIRK(quirk)	\
11212	((current_config->quirks | pipe_config->quirks) & (quirk))
11213
11214	PIPE_CONF_CHECK_I(cpu_transcoder);
11215
11216	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
11217	PIPE_CONF_CHECK_I(fdi_lanes);
11218	PIPE_CONF_CHECK_M_N(fdi_m_n);
11219
11220	PIPE_CONF_CHECK_I(lane_count);
11221	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11222
11223	if (INTEL_GEN(dev_priv) < 8) {
11224		PIPE_CONF_CHECK_M_N(dp_m_n);
11225
11226		if (current_config->has_drrs)
11227			PIPE_CONF_CHECK_M_N(dp_m2_n2);
11228	} else
11229		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11230
11231	PIPE_CONF_CHECK_X(output_types);
11232
11233	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11234	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11235	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11236	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11237	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11238	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11239
11240	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11241	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11242	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11243	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11244	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11245	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11246
11247	PIPE_CONF_CHECK_I(pixel_multiplier);
11248	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11249	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11250	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11251		PIPE_CONF_CHECK_BOOL(limited_color_range);
11252
11253	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11254	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11255	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11256	PIPE_CONF_CHECK_BOOL(ycbcr420);
11257
11258	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11259
11260	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11261			      DRM_MODE_FLAG_INTERLACE);
11262
11263	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11264		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11265				      DRM_MODE_FLAG_PHSYNC);
11266		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11267				      DRM_MODE_FLAG_NHSYNC);
11268		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11269				      DRM_MODE_FLAG_PVSYNC);
11270		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11271				      DRM_MODE_FLAG_NVSYNC);
11272	}
11273
11274	PIPE_CONF_CHECK_X(gmch_pfit.control);
11275	/* pfit ratios are autocomputed by the hw on gen4+ */
11276	if (INTEL_GEN(dev_priv) < 4)
11277		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11278	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11279
11280	if (!adjust) {
11281		PIPE_CONF_CHECK_I(pipe_src_w);
11282		PIPE_CONF_CHECK_I(pipe_src_h);
11283
11284		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
11285		if (current_config->pch_pfit.enabled) {
11286			PIPE_CONF_CHECK_X(pch_pfit.pos);
11287			PIPE_CONF_CHECK_X(pch_pfit.size);
11288		}
11289
11290		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11291		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11292	}
11293
11294	PIPE_CONF_CHECK_BOOL(double_wide);
11295
11296	PIPE_CONF_CHECK_P(shared_dpll);
11297	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11298	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11299	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11300	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11301	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11302	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11303	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11304	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11305	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11306	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11307	PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11308	PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11309	PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11310	PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11311	PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11312	PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11313	PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11314	PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11315	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11316	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11317	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
11318
11319	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11320	PIPE_CONF_CHECK_X(dsi_pll.div);
11321
11322	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11323		PIPE_CONF_CHECK_I(pipe_bpp);
11324
11325	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11326	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11327
11328	PIPE_CONF_CHECK_I(min_voltage_level);
11329
11330#undef PIPE_CONF_CHECK_X
11331#undef PIPE_CONF_CHECK_I
11332#undef PIPE_CONF_CHECK_BOOL
11333#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
11334#undef PIPE_CONF_CHECK_P
11335#undef PIPE_CONF_CHECK_FLAGS
11336#undef PIPE_CONF_CHECK_CLOCK_FUZZY
11337#undef PIPE_CONF_QUIRK
11338
11339	return ret;
11340}
11341
11342static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11343					   const struct intel_crtc_state *pipe_config)
11344{
11345	if (pipe_config->has_pch_encoder) {
11346		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11347							    &pipe_config->fdi_m_n);
11348		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11349
11350		/*
11351		 * FDI already provided one idea for the dotclock.
11352		 * Yell if the encoder disagrees.
11353		 */
11354		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11355		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11356		     fdi_dotclock, dotclock);
11357	}
11358}
11359
11360static void verify_wm_state(struct drm_crtc *crtc,
11361			    struct drm_crtc_state *new_state)
11362{
11363	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11364	struct skl_ddb_allocation hw_ddb, *sw_ddb;
11365	struct skl_pipe_wm hw_wm, *sw_wm;
11366	struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11367	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11368	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11369	const enum pipe pipe = intel_crtc->pipe;
11370	int plane, level, max_level = ilk_wm_max_level(dev_priv);
11371
11372	if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11373		return;
11374
11375	skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11376	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11377
11378	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11379	sw_ddb = &dev_priv->wm.skl_hw.ddb;
11380
11381	/* planes */
11382	for_each_universal_plane(dev_priv, pipe, plane) {
11383		hw_plane_wm = &hw_wm.planes[plane];
11384		sw_plane_wm = &sw_wm->planes[plane];
11385
11386		/* Watermarks */
11387		for (level = 0; level <= max_level; level++) {
11388			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11389						&sw_plane_wm->wm[level]))
11390				continue;
11391
11392			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11393				  pipe_name(pipe), plane + 1, level,
11394				  sw_plane_wm->wm[level].plane_en,
11395				  sw_plane_wm->wm[level].plane_res_b,
11396				  sw_plane_wm->wm[level].plane_res_l,
11397				  hw_plane_wm->wm[level].plane_en,
11398				  hw_plane_wm->wm[level].plane_res_b,
11399				  hw_plane_wm->wm[level].plane_res_l);
11400		}
11401
11402		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11403					 &sw_plane_wm->trans_wm)) {
11404			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11405				  pipe_name(pipe), plane + 1,
11406				  sw_plane_wm->trans_wm.plane_en,
11407				  sw_plane_wm->trans_wm.plane_res_b,
11408				  sw_plane_wm->trans_wm.plane_res_l,
11409				  hw_plane_wm->trans_wm.plane_en,
11410				  hw_plane_wm->trans_wm.plane_res_b,
11411				  hw_plane_wm->trans_wm.plane_res_l);
11412		}
11413
11414		/* DDB */
11415		hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11416		sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11417
11418		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11419			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
11420				  pipe_name(pipe), plane + 1,
11421				  sw_ddb_entry->start, sw_ddb_entry->end,
11422				  hw_ddb_entry->start, hw_ddb_entry->end);
11423		}
11424	}
11425
11426	/*
11427	 * cursor
11428	 * If the cursor plane isn't active, we may not have updated it's ddb
11429	 * allocation. In that case since the ddb allocation will be updated
11430	 * once the plane becomes visible, we can skip this check
11431	 */
11432	if (1) {
11433		hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11434		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
11435
11436		/* Watermarks */
11437		for (level = 0; level <= max_level; level++) {
11438			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11439						&sw_plane_wm->wm[level]))
11440				continue;
11441
11442			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11443				  pipe_name(pipe), level,
11444				  sw_plane_wm->wm[level].plane_en,
11445				  sw_plane_wm->wm[level].plane_res_b,
11446				  sw_plane_wm->wm[level].plane_res_l,
11447				  hw_plane_wm->wm[level].plane_en,
11448				  hw_plane_wm->wm[level].plane_res_b,
11449				  hw_plane_wm->wm[level].plane_res_l);
11450		}
11451
11452		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11453					 &sw_plane_wm->trans_wm)) {
11454			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11455				  pipe_name(pipe),
11456				  sw_plane_wm->trans_wm.plane_en,
11457				  sw_plane_wm->trans_wm.plane_res_b,
11458				  sw_plane_wm->trans_wm.plane_res_l,
11459				  hw_plane_wm->trans_wm.plane_en,
11460				  hw_plane_wm->trans_wm.plane_res_b,
11461				  hw_plane_wm->trans_wm.plane_res_l);
11462		}
11463
11464		/* DDB */
11465		hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11466		sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11467
11468		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11469			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
11470				  pipe_name(pipe),
11471				  sw_ddb_entry->start, sw_ddb_entry->end,
11472				  hw_ddb_entry->start, hw_ddb_entry->end);
11473		}
11474	}
11475}
11476
11477static void
11478verify_connector_state(struct drm_device *dev,
11479		       struct drm_atomic_state *state,
11480		       struct drm_crtc *crtc)
11481{
11482	struct drm_connector *connector;
11483	struct drm_connector_state *new_conn_state;
11484	int i;
11485
11486	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
11487		struct drm_encoder *encoder = connector->encoder;
11488		struct drm_crtc_state *crtc_state = NULL;
11489
11490		if (new_conn_state->crtc != crtc)
11491			continue;
11492
11493		if (crtc)
11494			crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11495
11496		intel_connector_verify_state(crtc_state, new_conn_state);
11497
11498		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
11499		     "connector's atomic encoder doesn't match legacy encoder\n");
11500	}
11501}
11502
11503static void
11504verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
11505{
11506	struct intel_encoder *encoder;
11507	struct drm_connector *connector;
11508	struct drm_connector_state *old_conn_state, *new_conn_state;
11509	int i;
11510
11511	for_each_intel_encoder(dev, encoder) {
11512		bool enabled = false, found = false;
11513		enum pipe pipe;
11514
11515		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11516			      encoder->base.base.id,
11517			      encoder->base.name);
11518
11519		for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11520						   new_conn_state, i) {
11521			if (old_conn_state->best_encoder == &encoder->base)
11522				found = true;
11523
11524			if (new_conn_state->best_encoder != &encoder->base)
11525				continue;
11526			found = enabled = true;
11527
11528			I915_STATE_WARN(new_conn_state->crtc !=
11529					encoder->base.crtc,
11530			     "connector's crtc doesn't match encoder crtc\n");
11531		}
11532
11533		if (!found)
11534			continue;
11535
11536		I915_STATE_WARN(!!encoder->base.crtc != enabled,
11537		     "encoder's enabled state mismatch "
11538		     "(expected %i, found %i)\n",
11539		     !!encoder->base.crtc, enabled);
11540
11541		if (!encoder->base.crtc) {
11542			bool active;
11543
11544			active = encoder->get_hw_state(encoder, &pipe);
11545			I915_STATE_WARN(active,
11546			     "encoder detached but still enabled on pipe %c.\n",
11547			     pipe_name(pipe));
11548		}
11549	}
11550}
11551
11552static void
11553verify_crtc_state(struct drm_crtc *crtc,
11554		  struct drm_crtc_state *old_crtc_state,
11555		  struct drm_crtc_state *new_crtc_state)
11556{
11557	struct drm_device *dev = crtc->dev;
11558	struct drm_i915_private *dev_priv = to_i915(dev);
11559	struct intel_encoder *encoder;
11560	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11561	struct intel_crtc_state *pipe_config, *sw_config;
11562	struct drm_atomic_state *old_state;
11563	bool active;
11564
11565	old_state = old_crtc_state->state;
11566	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
11567	pipe_config = to_intel_crtc_state(old_crtc_state);
11568	memset(pipe_config, 0, sizeof(*pipe_config));
11569	pipe_config->base.crtc = crtc;
11570	pipe_config->base.state = old_state;
11571
11572	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
11573
11574	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
11575
11576	/* we keep both pipes enabled on 830 */
11577	if (IS_I830(dev_priv))
11578		active = new_crtc_state->active;
11579
11580	I915_STATE_WARN(new_crtc_state->active != active,
11581	     "crtc active state doesn't match with hw state "
11582	     "(expected %i, found %i)\n", new_crtc_state->active, active);
11583
11584	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11585	     "transitional active state does not match atomic hw state "
11586	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
11587
11588	for_each_encoder_on_crtc(dev, crtc, encoder) {
11589		enum pipe pipe;
11590
11591		active = encoder->get_hw_state(encoder, &pipe);
11592		I915_STATE_WARN(active != new_crtc_state->active,
11593			"[ENCODER:%i] active %i with crtc active %i\n",
11594			encoder->base.base.id, active, new_crtc_state->active);
11595
11596		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11597				"Encoder connected to wrong pipe %c\n",
11598				pipe_name(pipe));
11599
11600		if (active)
11601			encoder->get_config(encoder, pipe_config);
11602	}
11603
11604	intel_crtc_compute_pixel_rate(pipe_config);
11605
11606	if (!new_crtc_state->active)
11607		return;
11608
11609	intel_pipe_config_sanity_check(dev_priv, pipe_config);
11610
11611	sw_config = to_intel_crtc_state(new_crtc_state);
11612	if (!intel_pipe_config_compare(dev_priv, sw_config,
11613				       pipe_config, false)) {
11614		I915_STATE_WARN(1, "pipe state doesn't match!\n");
11615		intel_dump_pipe_config(intel_crtc, pipe_config,
11616				       "[hw state]");
11617		intel_dump_pipe_config(intel_crtc, sw_config,
11618				       "[sw state]");
11619	}
11620}
11621
11622static void
11623intel_verify_planes(struct intel_atomic_state *state)
11624{
11625	struct intel_plane *plane;
11626	const struct intel_plane_state *plane_state;
11627	int i;
11628
11629	for_each_new_intel_plane_in_state(state, plane,
11630					  plane_state, i)
11631		assert_plane(plane, plane_state->base.visible);
11632}
11633
11634static void
11635verify_single_dpll_state(struct drm_i915_private *dev_priv,
11636			 struct intel_shared_dpll *pll,
11637			 struct drm_crtc *crtc,
11638			 struct drm_crtc_state *new_state)
11639{
11640	struct intel_dpll_hw_state dpll_hw_state;
11641	unsigned crtc_mask;
11642	bool active;
11643
11644	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11645
11646	DRM_DEBUG_KMS("%s\n", pll->name);
11647
11648	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
11649
11650	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
11651		I915_STATE_WARN(!pll->on && pll->active_mask,
11652		     "pll in active use but not on in sw tracking\n");
11653		I915_STATE_WARN(pll->on && !pll->active_mask,
11654		     "pll is on but not used by any active crtc\n");
11655		I915_STATE_WARN(pll->on != active,
11656		     "pll on state mismatch (expected %i, found %i)\n",
11657		     pll->on, active);
11658	}
11659
11660	if (!crtc) {
11661		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
11662				"more active pll users than references: %x vs %x\n",
11663				pll->active_mask, pll->state.crtc_mask);
11664
11665		return;
11666	}
11667
11668	crtc_mask = 1 << drm_crtc_index(crtc);
11669
11670	if (new_state->active)
11671		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
11672				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
11673				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11674	else
11675		I915_STATE_WARN(pll->active_mask & crtc_mask,
11676				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
11677				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11678
11679	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
11680			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
11681			crtc_mask, pll->state.crtc_mask);
11682
11683	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
11684					  &dpll_hw_state,
11685					  sizeof(dpll_hw_state)),
11686			"pll hw state mismatch\n");
11687}
11688
11689static void
11690verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
11691			 struct drm_crtc_state *old_crtc_state,
11692			 struct drm_crtc_state *new_crtc_state)
11693{
11694	struct drm_i915_private *dev_priv = to_i915(dev);
11695	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
11696	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
11697
11698	if (new_state->shared_dpll)
11699		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
11700
11701	if (old_state->shared_dpll &&
11702	    old_state->shared_dpll != new_state->shared_dpll) {
11703		unsigned crtc_mask = 1 << drm_crtc_index(crtc);
11704		struct intel_shared_dpll *pll = old_state->shared_dpll;
11705
11706		I915_STATE_WARN(pll->active_mask & crtc_mask,
11707				"pll active mismatch (didn't expect pipe %c in active mask)\n",
11708				pipe_name(drm_crtc_index(crtc)));
11709		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
11710				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
11711				pipe_name(drm_crtc_index(crtc)));
11712	}
11713}
11714
11715static void
11716intel_modeset_verify_crtc(struct drm_crtc *crtc,
11717			  struct drm_atomic_state *state,
11718			  struct drm_crtc_state *old_state,
11719			  struct drm_crtc_state *new_state)
11720{
11721	if (!needs_modeset(new_state) &&
11722	    !to_intel_crtc_state(new_state)->update_pipe)
11723		return;
11724
11725	verify_wm_state(crtc, new_state);
11726	verify_connector_state(crtc->dev, state, crtc);
11727	verify_crtc_state(crtc, old_state, new_state);
11728	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
11729}
11730
11731static void
11732verify_disabled_dpll_state(struct drm_device *dev)
11733{
11734	struct drm_i915_private *dev_priv = to_i915(dev);
11735	int i;
11736
11737	for (i = 0; i < dev_priv->num_shared_dpll; i++)
11738		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
11739}
11740
11741static void
11742intel_modeset_verify_disabled(struct drm_device *dev,
11743			      struct drm_atomic_state *state)
11744{
11745	verify_encoder_state(dev, state);
11746	verify_connector_state(dev, state, NULL);
11747	verify_disabled_dpll_state(dev);
11748}
11749
11750static void update_scanline_offset(struct intel_crtc *crtc)
11751{
11752	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11753
11754	/*
11755	 * The scanline counter increments at the leading edge of hsync.
11756	 *
11757	 * On most platforms it starts counting from vtotal-1 on the
11758	 * first active line. That means the scanline counter value is
11759	 * always one less than what we would expect. Ie. just after
11760	 * start of vblank, which also occurs at start of hsync (on the
11761	 * last active line), the scanline counter will read vblank_start-1.
11762	 *
11763	 * On gen2 the scanline counter starts counting from 1 instead
11764	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
11765	 * to keep the value positive), instead of adding one.
11766	 *
11767	 * On HSW+ the behaviour of the scanline counter depends on the output
11768	 * type. For DP ports it behaves like most other platforms, but on HDMI
11769	 * there's an extra 1 line difference. So we need to add two instead of
11770	 * one to the value.
11771	 *
11772	 * On VLV/CHV DSI the scanline counter would appear to increment
11773	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
11774	 * that means we can't tell whether we're in vblank or not while
11775	 * we're on that particular line. We must still set scanline_offset
11776	 * to 1 so that the vblank timestamps come out correct when we query
11777	 * the scanline counter from within the vblank interrupt handler.
11778	 * However if queried just before the start of vblank we'll get an
11779	 * answer that's slightly in the future.
11780	 */
11781	if (IS_GEN2(dev_priv)) {
11782		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
11783		int vtotal;
11784
11785		vtotal = adjusted_mode->crtc_vtotal;
11786		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
11787			vtotal /= 2;
11788
11789		crtc->scanline_offset = vtotal - 1;
11790	} else if (HAS_DDI(dev_priv) &&
11791		   intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
11792		crtc->scanline_offset = 2;
11793	} else
11794		crtc->scanline_offset = 1;
11795}
11796
11797static void intel_modeset_clear_plls(struct drm_atomic_state *state)
11798{
11799	struct drm_device *dev = state->dev;
11800	struct drm_i915_private *dev_priv = to_i915(dev);
11801	struct drm_crtc *crtc;
11802	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11803	int i;
11804
11805	if (!dev_priv->display.crtc_compute_clock)
11806		return;
11807
11808	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11809		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11810		struct intel_shared_dpll *old_dpll =
11811			to_intel_crtc_state(old_crtc_state)->shared_dpll;
11812
11813		if (!needs_modeset(new_crtc_state))
11814			continue;
11815
11816		to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
11817
11818		if (!old_dpll)
11819			continue;
11820
11821		intel_release_shared_dpll(old_dpll, intel_crtc, state);
11822	}
11823}
11824
11825/*
11826 * This implements the workaround described in the "notes" section of the mode
11827 * set sequence documentation. When going from no pipes or single pipe to
11828 * multiple pipes, and planes are enabled after the pipe, we need to wait at
11829 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
11830 */
11831static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
11832{
11833	struct drm_crtc_state *crtc_state;
11834	struct intel_crtc *intel_crtc;
11835	struct drm_crtc *crtc;
11836	struct intel_crtc_state *first_crtc_state = NULL;
11837	struct intel_crtc_state *other_crtc_state = NULL;
11838	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
11839	int i;
11840
11841	/* look at all crtc's that are going to be enabled in during modeset */
11842	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
11843		intel_crtc = to_intel_crtc(crtc);
11844
11845		if (!crtc_state->active || !needs_modeset(crtc_state))
11846			continue;
11847
11848		if (first_crtc_state) {
11849			other_crtc_state = to_intel_crtc_state(crtc_state);
11850			break;
11851		} else {
11852			first_crtc_state = to_intel_crtc_state(crtc_state);
11853			first_pipe = intel_crtc->pipe;
11854		}
11855	}
11856
11857	/* No workaround needed? */
11858	if (!first_crtc_state)
11859		return 0;
11860
11861	/* w/a possibly needed, check how many crtc's are already enabled. */
11862	for_each_intel_crtc(state->dev, intel_crtc) {
11863		struct intel_crtc_state *pipe_config;
11864
11865		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
11866		if (IS_ERR(pipe_config))
11867			return PTR_ERR(pipe_config);
11868
11869		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
11870
11871		if (!pipe_config->base.active ||
11872		    needs_modeset(&pipe_config->base))
11873			continue;
11874
11875		/* 2 or more enabled crtcs means no need for w/a */
11876		if (enabled_pipe != INVALID_PIPE)
11877			return 0;
11878
11879		enabled_pipe = intel_crtc->pipe;
11880	}
11881
11882	if (enabled_pipe != INVALID_PIPE)
11883		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
11884	else if (other_crtc_state)
11885		other_crtc_state->hsw_workaround_pipe = first_pipe;
11886
11887	return 0;
11888}
11889
11890static int intel_lock_all_pipes(struct drm_atomic_state *state)
11891{
11892	struct drm_crtc *crtc;
11893
11894	/* Add all pipes to the state */
11895	for_each_crtc(state->dev, crtc) {
11896		struct drm_crtc_state *crtc_state;
11897
11898		crtc_state = drm_atomic_get_crtc_state(state, crtc);
11899		if (IS_ERR(crtc_state))
11900			return PTR_ERR(crtc_state);
11901	}
11902
11903	return 0;
11904}
11905
11906static int intel_modeset_all_pipes(struct drm_atomic_state *state)
11907{
11908	struct drm_crtc *crtc;
11909
11910	/*
11911	 * Add all pipes to the state, and force
11912	 * a modeset on all the active ones.
11913	 */
11914	for_each_crtc(state->dev, crtc) {
11915		struct drm_crtc_state *crtc_state;
11916		int ret;
11917
11918		crtc_state = drm_atomic_get_crtc_state(state, crtc);
11919		if (IS_ERR(crtc_state))
11920			return PTR_ERR(crtc_state);
11921
11922		if (!crtc_state->active || needs_modeset(crtc_state))
11923			continue;
11924
11925		crtc_state->mode_changed = true;
11926
11927		ret = drm_atomic_add_affected_connectors(state, crtc);
11928		if (ret)
11929			return ret;
11930
11931		ret = drm_atomic_add_affected_planes(state, crtc);
11932		if (ret)
11933			return ret;
11934	}
11935
11936	return 0;
11937}
11938
11939static int intel_modeset_checks(struct drm_atomic_state *state)
11940{
11941	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
11942	struct drm_i915_private *dev_priv = to_i915(state->dev);
11943	struct drm_crtc *crtc;
11944	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11945	int ret = 0, i;
11946
11947	if (!check_digital_port_conflicts(state)) {
11948		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
11949		return -EINVAL;
11950	}
11951
11952	intel_state->modeset = true;
11953	intel_state->active_crtcs = dev_priv->active_crtcs;
11954	intel_state->cdclk.logical = dev_priv->cdclk.logical;
11955	intel_state->cdclk.actual = dev_priv->cdclk.actual;
11956
11957	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11958		if (new_crtc_state->active)
11959			intel_state->active_crtcs |= 1 << i;
11960		else
11961			intel_state->active_crtcs &= ~(1 << i);
11962
11963		if (old_crtc_state->active != new_crtc_state->active)
11964			intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
11965	}
11966
11967	/*
11968	 * See if the config requires any additional preparation, e.g.
11969	 * to adjust global state with pipes off.  We need to do this
11970	 * here so we can get the modeset_pipe updated config for the new
11971	 * mode set on this crtc.  For other crtcs we need to use the
11972	 * adjusted_mode bits in the crtc directly.
11973	 */
11974	if (dev_priv->display.modeset_calc_cdclk) {
11975		ret = dev_priv->display.modeset_calc_cdclk(state);
11976		if (ret < 0)
11977			return ret;
11978
11979		/*
11980		 * Writes to dev_priv->cdclk.logical must protected by
11981		 * holding all the crtc locks, even if we don't end up
11982		 * touching the hardware
11983		 */
11984		if (intel_cdclk_changed(&dev_priv->cdclk.logical,
11985					&intel_state->cdclk.logical)) {
11986			ret = intel_lock_all_pipes(state);
11987			if (ret < 0)
11988				return ret;
11989		}
11990
11991		/* All pipes must be switched off while we change the cdclk. */
11992		if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
11993					      &intel_state->cdclk.actual)) {
11994			ret = intel_modeset_all_pipes(state);
11995			if (ret < 0)
11996				return ret;
11997		}
11998
11999		DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12000			      intel_state->cdclk.logical.cdclk,
12001			      intel_state->cdclk.actual.cdclk);
12002		DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12003			      intel_state->cdclk.logical.voltage_level,
12004			      intel_state->cdclk.actual.voltage_level);
12005	} else {
12006		to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12007	}
12008
12009	intel_modeset_clear_plls(state);
12010
12011	if (IS_HASWELL(dev_priv))
12012		return haswell_mode_set_planes_workaround(state);
12013
12014	return 0;
12015}
12016
12017/*
12018 * Handle calculation of various watermark data at the end of the atomic check
12019 * phase.  The code here should be run after the per-crtc and per-plane 'check'
12020 * handlers to ensure that all derived state has been updated.
12021 */
12022static int calc_watermark_data(struct drm_atomic_state *state)
12023{
12024	struct drm_device *dev = state->dev;
12025	struct drm_i915_private *dev_priv = to_i915(dev);
12026
12027	/* Is there platform-specific watermark information to calculate? */
12028	if (dev_priv->display.compute_global_watermarks)
12029		return dev_priv->display.compute_global_watermarks(state);
12030
12031	return 0;
12032}
12033
12034/**
12035 * intel_atomic_check - validate state object
12036 * @dev: drm device
12037 * @state: state to validate
12038 */
12039static int intel_atomic_check(struct drm_device *dev,
12040			      struct drm_atomic_state *state)
12041{
12042	struct drm_i915_private *dev_priv = to_i915(dev);
12043	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12044	struct drm_crtc *crtc;
12045	struct drm_crtc_state *old_crtc_state, *crtc_state;
12046	int ret, i;
12047	bool any_ms = false;
12048
12049	/* Catch I915_MODE_FLAG_INHERITED */
12050	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12051				      crtc_state, i) {
12052		if (crtc_state->mode.private_flags !=
12053		    old_crtc_state->mode.private_flags)
12054			crtc_state->mode_changed = true;
12055	}
12056
12057	ret = drm_atomic_helper_check_modeset(dev, state);
12058	if (ret)
12059		return ret;
12060
12061	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12062		struct intel_crtc_state *pipe_config =
12063			to_intel_crtc_state(crtc_state);
12064
12065		if (!needs_modeset(crtc_state))
12066			continue;
12067
12068		if (!crtc_state->enable) {
12069			any_ms = true;
12070			continue;
12071		}
12072
12073		ret = intel_modeset_pipe_config(crtc, pipe_config);
12074		if (ret) {
12075			intel_dump_pipe_config(to_intel_crtc(crtc),
12076					       pipe_config, "[failed]");
12077			return ret;
12078		}
12079
12080		if (i915_modparams.fastboot &&
12081		    intel_pipe_config_compare(dev_priv,
12082					to_intel_crtc_state(old_crtc_state),
12083					pipe_config, true)) {
12084			crtc_state->mode_changed = false;
12085			pipe_config->update_pipe = true;
12086		}
12087
12088		if (needs_modeset(crtc_state))
12089			any_ms = true;
12090
12091		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12092				       needs_modeset(crtc_state) ?
12093				       "[modeset]" : "[fastset]");
12094	}
12095
12096	if (any_ms) {
12097		ret = intel_modeset_checks(state);
12098
12099		if (ret)
12100			return ret;
12101	} else {
12102		intel_state->cdclk.logical = dev_priv->cdclk.logical;
12103	}
12104
12105	ret = drm_atomic_helper_check_planes(dev, state);
12106	if (ret)
12107		return ret;
12108
12109	intel_fbc_choose_crtc(dev_priv, intel_state);
12110	return calc_watermark_data(state);
12111}
12112
12113static int intel_atomic_prepare_commit(struct drm_device *dev,
12114				       struct drm_atomic_state *state)
12115{
12116	return drm_atomic_helper_prepare_planes(dev, state);
12117}
12118
12119u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12120{
12121	struct drm_device *dev = crtc->base.dev;
12122
12123	if (!dev->max_vblank_count)
12124		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12125
12126	return dev->driver->get_vblank_counter(dev, crtc->pipe);
12127}
12128
12129static void intel_update_crtc(struct drm_crtc *crtc,
12130			      struct drm_atomic_state *state,
12131			      struct drm_crtc_state *old_crtc_state,
12132			      struct drm_crtc_state *new_crtc_state)
12133{
12134	struct drm_device *dev = crtc->dev;
12135	struct drm_i915_private *dev_priv = to_i915(dev);
12136	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12137	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12138	bool modeset = needs_modeset(new_crtc_state);
12139
12140	if (modeset) {
12141		update_scanline_offset(intel_crtc);
12142		dev_priv->display.crtc_enable(pipe_config, state);
12143	} else {
12144		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12145				       pipe_config);
12146	}
12147
12148	if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12149		intel_fbc_enable(
12150		    intel_crtc, pipe_config,
12151		    to_intel_plane_state(crtc->primary->state));
12152	}
12153
12154	drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
12155}
12156
12157static void intel_update_crtcs(struct drm_atomic_state *state)
12158{
12159	struct drm_crtc *crtc;
12160	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12161	int i;
12162
12163	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12164		if (!new_crtc_state->active)
12165			continue;
12166
12167		intel_update_crtc(crtc, state, old_crtc_state,
12168				  new_crtc_state);
12169	}
12170}
12171
12172static void skl_update_crtcs(struct drm_atomic_state *state)
12173{
12174	struct drm_i915_private *dev_priv = to_i915(state->dev);
12175	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12176	struct drm_crtc *crtc;
12177	struct intel_crtc *intel_crtc;
12178	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12179	struct intel_crtc_state *cstate;
12180	unsigned int updated = 0;
12181	bool progress;
12182	enum pipe pipe;
12183	int i;
12184
12185	const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12186
12187	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12188		/* ignore allocations for crtc's that have been turned off. */
12189		if (new_crtc_state->active)
12190			entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12191
12192	/*
12193	 * Whenever the number of active pipes changes, we need to make sure we
12194	 * update the pipes in the right order so that their ddb allocations
12195	 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12196	 * cause pipe underruns and other bad stuff.
12197	 */
12198	do {
12199		progress = false;
12200
12201		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12202			bool vbl_wait = false;
12203			unsigned int cmask = drm_crtc_mask(crtc);
12204
12205			intel_crtc = to_intel_crtc(crtc);
12206			cstate = to_intel_crtc_state(new_crtc_state);
12207			pipe = intel_crtc->pipe;
12208
12209			if (updated & cmask || !cstate->base.active)
12210				continue;
12211
12212			if (skl_ddb_allocation_overlaps(dev_priv,
12213							entries,
12214							&cstate->wm.skl.ddb,
12215							i))
12216				continue;
12217
12218			updated |= cmask;
12219			entries[i] = &cstate->wm.skl.ddb;
12220
12221			/*
12222			 * If this is an already active pipe, it's DDB changed,
12223			 * and this isn't the last pipe that needs updating
12224			 * then we need to wait for a vblank to pass for the
12225			 * new ddb allocation to take effect.
12226			 */
12227			if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12228						 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12229			    !new_crtc_state->active_changed &&
12230			    intel_state->wm_results.dirty_pipes != updated)
12231				vbl_wait = true;
12232
12233			intel_update_crtc(crtc, state, old_crtc_state,
12234					  new_crtc_state);
12235
12236			if (vbl_wait)
12237				intel_wait_for_vblank(dev_priv, pipe);
12238
12239			progress = true;
12240		}
12241	} while (progress);
12242}
12243
12244static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12245{
12246	struct intel_atomic_state *state, *next;
12247	struct llist_node *freed;
12248
12249	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12250	llist_for_each_entry_safe(state, next, freed, freed)
12251		drm_atomic_state_put(&state->base);
12252}
12253
12254static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12255{
12256	struct drm_i915_private *dev_priv =
12257		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12258
12259	intel_atomic_helper_free_state(dev_priv);
12260}
12261
12262static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12263{
12264	struct wait_queue_entry wait_fence, wait_reset;
12265	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12266
12267	init_wait_entry(&wait_fence, 0);
12268	init_wait_entry(&wait_reset, 0);
12269	for (;;) {
12270		prepare_to_wait(&intel_state->commit_ready.wait,
12271				&wait_fence, TASK_UNINTERRUPTIBLE);
12272		prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12273				&wait_reset, TASK_UNINTERRUPTIBLE);
12274
12275
12276		if (i915_sw_fence_done(&intel_state->commit_ready)
12277		    || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12278			break;
12279
12280		schedule();
12281	}
12282	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12283	finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12284}
12285
12286static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12287{
12288	struct drm_device *dev = state->dev;
12289	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12290	struct drm_i915_private *dev_priv = to_i915(dev);
12291	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12292	struct drm_crtc *crtc;
12293	struct intel_crtc_state *intel_cstate;
12294	u64 put_domains[I915_MAX_PIPES] = {};
12295	int i;
12296
12297	intel_atomic_commit_fence_wait(intel_state);
12298
12299	drm_atomic_helper_wait_for_dependencies(state);
12300
12301	if (intel_state->modeset)
12302		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12303
12304	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12305		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12306
12307		if (needs_modeset(new_crtc_state) ||
12308		    to_intel_crtc_state(new_crtc_state)->update_pipe) {
12309
12310			put_domains[to_intel_crtc(crtc)->pipe] =
12311				modeset_get_crtc_power_domains(crtc,
12312					to_intel_crtc_state(new_crtc_state));
12313		}
12314
12315		if (!needs_modeset(new_crtc_state))
12316			continue;
12317
12318		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12319				       to_intel_crtc_state(new_crtc_state));
12320
12321		if (old_crtc_state->active) {
12322			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
12323			dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
12324			intel_crtc->active = false;
12325			intel_fbc_disable(intel_crtc);
12326			intel_disable_shared_dpll(intel_crtc);
12327
12328			/*
12329			 * Underruns don't always raise
12330			 * interrupts, so check manually.
12331			 */
12332			intel_check_cpu_fifo_underruns(dev_priv);
12333			intel_check_pch_fifo_underruns(dev_priv);
12334
12335			if (!new_crtc_state->active) {
12336				/*
12337				 * Make sure we don't call initial_watermarks
12338				 * for ILK-style watermark updates.
12339				 *
12340				 * No clue what this is supposed to achieve.
12341				 */
12342				if (INTEL_GEN(dev_priv) >= 9)
12343					dev_priv->display.initial_watermarks(intel_state,
12344									     to_intel_crtc_state(new_crtc_state));
12345			}
12346		}
12347	}
12348
12349	/* FIXME: Eventually get rid of our intel_crtc->config pointer */
12350	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12351		to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
12352
12353	if (intel_state->modeset) {
12354		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12355
12356		intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12357
12358		/*
12359		 * SKL workaround: bspec recommends we disable the SAGV when we
12360		 * have more then one pipe enabled
12361		 */
12362		if (!intel_can_enable_sagv(state))
12363			intel_disable_sagv(dev_priv);
12364
12365		intel_modeset_verify_disabled(dev, state);
12366	}
12367
12368	/* Complete the events for pipes that have now been disabled */
12369	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12370		bool modeset = needs_modeset(new_crtc_state);
12371
12372		/* Complete events for now disable pipes here. */
12373		if (modeset && !new_crtc_state->active && new_crtc_state->event) {
12374			spin_lock_irq(&dev->event_lock);
12375			drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
12376			spin_unlock_irq(&dev->event_lock);
12377
12378			new_crtc_state->event = NULL;
12379		}
12380	}
12381
12382	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
12383	dev_priv->display.update_crtcs(state);
12384
12385	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12386	 * already, but still need the state for the delayed optimization. To
12387	 * fix this:
12388	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12389	 * - schedule that vblank worker _before_ calling hw_done
12390	 * - at the start of commit_tail, cancel it _synchrously
12391	 * - switch over to the vblank wait helper in the core after that since
12392	 *   we don't need out special handling any more.
12393	 */
12394	drm_atomic_helper_wait_for_flip_done(dev, state);
12395
12396	/*
12397	 * Now that the vblank has passed, we can go ahead and program the
12398	 * optimal watermarks on platforms that need two-step watermark
12399	 * programming.
12400	 *
12401	 * TODO: Move this (and other cleanup) to an async worker eventually.
12402	 */
12403	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12404		intel_cstate = to_intel_crtc_state(new_crtc_state);
12405
12406		if (dev_priv->display.optimize_watermarks)
12407			dev_priv->display.optimize_watermarks(intel_state,
12408							      intel_cstate);
12409	}
12410
12411	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12412		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12413
12414		if (put_domains[i])
12415			modeset_put_power_domains(dev_priv, put_domains[i]);
12416
12417		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12418	}
12419
12420	if (intel_state->modeset)
12421		intel_verify_planes(intel_state);
12422
12423	if (intel_state->modeset && intel_can_enable_sagv(state))
12424		intel_enable_sagv(dev_priv);
12425
12426	drm_atomic_helper_commit_hw_done(state);
12427
12428	if (intel_state->modeset) {
12429		/* As one of the primary mmio accessors, KMS has a high
12430		 * likelihood of triggering bugs in unclaimed access. After we
12431		 * finish modesetting, see if an error has been flagged, and if
12432		 * so enable debugging for the next modeset - and hope we catch
12433		 * the culprit.
12434		 */
12435		intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
12436		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
12437	}
12438
12439	drm_atomic_helper_cleanup_planes(dev, state);
12440
12441	drm_atomic_helper_commit_cleanup_done(state);
12442
12443	drm_atomic_state_put(state);
12444
12445	intel_atomic_helper_free_state(dev_priv);
12446}
12447
12448static void intel_atomic_commit_work(struct work_struct *work)
12449{
12450	struct drm_atomic_state *state =
12451		container_of(work, struct drm_atomic_state, commit_work);
12452
12453	intel_atomic_commit_tail(state);
12454}
12455
12456static int __i915_sw_fence_call
12457intel_atomic_commit_ready(struct i915_sw_fence *fence,
12458			  enum i915_sw_fence_notify notify)
12459{
12460	struct intel_atomic_state *state =
12461		container_of(fence, struct intel_atomic_state, commit_ready);
12462
12463	switch (notify) {
12464	case FENCE_COMPLETE:
12465		/* we do blocking waits in the worker, nothing to do here */
12466		break;
12467	case FENCE_FREE:
12468		{
12469			struct intel_atomic_helper *helper =
12470				&to_i915(state->base.dev)->atomic_helper;
12471
12472			if (llist_add(&state->freed, &helper->free_list))
12473				schedule_work(&helper->free_work);
12474			break;
12475		}
12476	}
12477
12478	return NOTIFY_DONE;
12479}
12480
12481static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12482{
12483	struct drm_plane_state *old_plane_state, *new_plane_state;
12484	struct drm_plane *plane;
12485	int i;
12486
12487	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
12488		i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
12489				  intel_fb_obj(new_plane_state->fb),
12490				  to_intel_plane(plane)->frontbuffer_bit);
12491}
12492
12493/**
12494 * intel_atomic_commit - commit validated state object
12495 * @dev: DRM device
12496 * @state: the top-level driver state object
12497 * @nonblock: nonblocking commit
12498 *
12499 * This function commits a top-level state object that has been validated
12500 * with drm_atomic_helper_check().
12501 *
12502 * RETURNS
12503 * Zero for success or -errno.
12504 */
12505static int intel_atomic_commit(struct drm_device *dev,
12506			       struct drm_atomic_state *state,
12507			       bool nonblock)
12508{
12509	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12510	struct drm_i915_private *dev_priv = to_i915(dev);
12511	int ret = 0;
12512
12513	drm_atomic_state_get(state);
12514	i915_sw_fence_init(&intel_state->commit_ready,
12515			   intel_atomic_commit_ready);
12516
12517	/*
12518	 * The intel_legacy_cursor_update() fast path takes care
12519	 * of avoiding the vblank waits for simple cursor
12520	 * movement and flips. For cursor on/off and size changes,
12521	 * we want to perform the vblank waits so that watermark
12522	 * updates happen during the correct frames. Gen9+ have
12523	 * double buffered watermarks and so shouldn't need this.
12524	 *
12525	 * Unset state->legacy_cursor_update before the call to
12526	 * drm_atomic_helper_setup_commit() because otherwise
12527	 * drm_atomic_helper_wait_for_flip_done() is a noop and
12528	 * we get FIFO underruns because we didn't wait
12529	 * for vblank.
12530	 *
12531	 * FIXME doing watermarks and fb cleanup from a vblank worker
12532	 * (assuming we had any) would solve these problems.
12533	 */
12534	if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12535		struct intel_crtc_state *new_crtc_state;
12536		struct intel_crtc *crtc;
12537		int i;
12538
12539		for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12540			if (new_crtc_state->wm.need_postvbl_update ||
12541			    new_crtc_state->update_wm_post)
12542				state->legacy_cursor_update = false;
12543	}
12544
12545	ret = intel_atomic_prepare_commit(dev, state);
12546	if (ret) {
12547		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12548		i915_sw_fence_commit(&intel_state->commit_ready);
12549		return ret;
12550	}
12551
12552	ret = drm_atomic_helper_setup_commit(state, nonblock);
12553	if (!ret)
12554		ret = drm_atomic_helper_swap_state(state, true);
12555
12556	if (ret) {
12557		i915_sw_fence_commit(&intel_state->commit_ready);
12558
12559		drm_atomic_helper_cleanup_planes(dev, state);
12560		return ret;
12561	}
12562	dev_priv->wm.distrust_bios_wm = false;
12563	intel_shared_dpll_swap_state(state);
12564	intel_atomic_track_fbs(state);
12565
12566	if (intel_state->modeset) {
12567		memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12568		       sizeof(intel_state->min_cdclk));
12569		memcpy(dev_priv->min_voltage_level,
12570		       intel_state->min_voltage_level,
12571		       sizeof(intel_state->min_voltage_level));
12572		dev_priv->active_crtcs = intel_state->active_crtcs;
12573		dev_priv->cdclk.logical = intel_state->cdclk.logical;
12574		dev_priv->cdclk.actual = intel_state->cdclk.actual;
12575	}
12576
12577	drm_atomic_state_get(state);
12578	INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12579
12580	i915_sw_fence_commit(&intel_state->commit_ready);
12581	if (nonblock && intel_state->modeset) {
12582		queue_work(dev_priv->modeset_wq, &state->commit_work);
12583	} else if (nonblock) {
12584		queue_work(system_unbound_wq, &state->commit_work);
12585	} else {
12586		if (intel_state->modeset)
12587			flush_workqueue(dev_priv->modeset_wq);
12588		intel_atomic_commit_tail(state);
12589	}
12590
12591	return 0;
12592}
12593
12594static const struct drm_crtc_funcs intel_crtc_funcs = {
12595	.gamma_set = drm_atomic_helper_legacy_gamma_set,
12596	.set_config = drm_atomic_helper_set_config,
12597	.destroy = intel_crtc_destroy,
12598	.page_flip = drm_atomic_helper_page_flip,
12599	.atomic_duplicate_state = intel_crtc_duplicate_state,
12600	.atomic_destroy_state = intel_crtc_destroy_state,
12601	.set_crc_source = intel_crtc_set_crc_source,
12602};
12603
12604struct wait_rps_boost {
12605	struct wait_queue_entry wait;
12606
12607	struct drm_crtc *crtc;
12608	struct i915_request *request;
12609};
12610
12611static int do_rps_boost(struct wait_queue_entry *_wait,
12612			unsigned mode, int sync, void *key)
12613{
12614	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
12615	struct i915_request *rq = wait->request;
12616
12617	/*
12618	 * If we missed the vblank, but the request is already running it
12619	 * is reasonable to assume that it will complete before the next
12620	 * vblank without our intervention, so leave RPS alone.
12621	 */
12622	if (!i915_request_started(rq))
12623		gen6_rps_boost(rq, NULL);
12624	i915_request_put(rq);
12625
12626	drm_crtc_vblank_put(wait->crtc);
12627
12628	list_del(&wait->wait.entry);
12629	kfree(wait);
12630	return 1;
12631}
12632
12633static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12634				       struct dma_fence *fence)
12635{
12636	struct wait_rps_boost *wait;
12637
12638	if (!dma_fence_is_i915(fence))
12639		return;
12640
12641	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
12642		return;
12643
12644	if (drm_crtc_vblank_get(crtc))
12645		return;
12646
12647	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
12648	if (!wait) {
12649		drm_crtc_vblank_put(crtc);
12650		return;
12651	}
12652
12653	wait->request = to_request(dma_fence_get(fence));
12654	wait->crtc = crtc;
12655
12656	wait->wait.func = do_rps_boost;
12657	wait->wait.flags = 0;
12658
12659	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
12660}
12661
12662static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12663{
12664	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12665	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12666	struct drm_framebuffer *fb = plane_state->base.fb;
12667	struct i915_vma *vma;
12668
12669	if (plane->id == PLANE_CURSOR &&
12670	    INTEL_INFO(dev_priv)->cursor_needs_physical) {
12671		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12672		const int align = intel_cursor_alignment(dev_priv);
12673
12674		return i915_gem_object_attach_phys(obj, align);
12675	}
12676
12677	vma = intel_pin_and_fence_fb_obj(fb,
12678					 plane_state->base.rotation,
12679					 intel_plane_uses_fence(plane_state),
12680					 &plane_state->flags);
12681	if (IS_ERR(vma))
12682		return PTR_ERR(vma);
12683
12684	plane_state->vma = vma;
12685
12686	return 0;
12687}
12688
12689static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
12690{
12691	struct i915_vma *vma;
12692
12693	vma = fetch_and_zero(&old_plane_state->vma);
12694	if (vma)
12695		intel_unpin_fb_vma(vma, old_plane_state->flags);
12696}
12697
12698/**
12699 * intel_prepare_plane_fb - Prepare fb for usage on plane
12700 * @plane: drm plane to prepare for
12701 * @new_state: the plane state being prepared
12702 *
12703 * Prepares a framebuffer for usage on a display plane.  Generally this
12704 * involves pinning the underlying object and updating the frontbuffer tracking
12705 * bits.  Some older platforms need special physical address handling for
12706 * cursor planes.
12707 *
12708 * Must be called with struct_mutex held.
12709 *
12710 * Returns 0 on success, negative error code on failure.
12711 */
12712int
12713intel_prepare_plane_fb(struct drm_plane *plane,
12714		       struct drm_plane_state *new_state)
12715{
12716	struct intel_atomic_state *intel_state =
12717		to_intel_atomic_state(new_state->state);
12718	struct drm_i915_private *dev_priv = to_i915(plane->dev);
12719	struct drm_framebuffer *fb = new_state->fb;
12720	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12721	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
12722	int ret;
12723
12724	if (old_obj) {
12725		struct drm_crtc_state *crtc_state =
12726			drm_atomic_get_existing_crtc_state(new_state->state,
12727							   plane->state->crtc);
12728
12729		/* Big Hammer, we also need to ensure that any pending
12730		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
12731		 * current scanout is retired before unpinning the old
12732		 * framebuffer. Note that we rely on userspace rendering
12733		 * into the buffer attached to the pipe they are waiting
12734		 * on. If not, userspace generates a GPU hang with IPEHR
12735		 * point to the MI_WAIT_FOR_EVENT.
12736		 *
12737		 * This should only fail upon a hung GPU, in which case we
12738		 * can safely continue.
12739		 */
12740		if (needs_modeset(crtc_state)) {
12741			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
12742							      old_obj->resv, NULL,
12743							      false, 0,
12744							      GFP_KERNEL);
12745			if (ret < 0)
12746				return ret;
12747		}
12748	}
12749
12750	if (new_state->fence) { /* explicit fencing */
12751		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
12752						    new_state->fence,
12753						    I915_FENCE_TIMEOUT,
12754						    GFP_KERNEL);
12755		if (ret < 0)
12756			return ret;
12757	}
12758
12759	if (!obj)
12760		return 0;
12761
12762	ret = i915_gem_object_pin_pages(obj);
12763	if (ret)
12764		return ret;
12765
12766	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
12767	if (ret) {
12768		i915_gem_object_unpin_pages(obj);
12769		return ret;
12770	}
12771
12772	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
12773
12774	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
12775
12776	mutex_unlock(&dev_priv->drm.struct_mutex);
12777	i915_gem_object_unpin_pages(obj);
12778	if (ret)
12779		return ret;
12780
12781	if (!new_state->fence) { /* implicit fencing */
12782		struct dma_fence *fence;
12783
12784		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
12785						      obj->resv, NULL,
12786						      false, I915_FENCE_TIMEOUT,
12787						      GFP_KERNEL);
12788		if (ret < 0)
12789			return ret;
12790
12791		fence = reservation_object_get_excl_rcu(obj->resv);
12792		if (fence) {
12793			add_rps_boost_after_vblank(new_state->crtc, fence);
12794			dma_fence_put(fence);
12795		}
12796	} else {
12797		add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
12798	}
12799
12800	return 0;
12801}
12802
12803/**
12804 * intel_cleanup_plane_fb - Cleans up an fb after plane use
12805 * @plane: drm plane to clean up for
12806 * @old_state: the state from the previous modeset
12807 *
12808 * Cleans up a framebuffer that has just been removed from a plane.
12809 *
12810 * Must be called with struct_mutex held.
12811 */
12812void
12813intel_cleanup_plane_fb(struct drm_plane *plane,
12814		       struct drm_plane_state *old_state)
12815{
12816	struct drm_i915_private *dev_priv = to_i915(plane->dev);
12817
12818	/* Should only be called after a successful intel_prepare_plane_fb()! */
12819	mutex_lock(&dev_priv->drm.struct_mutex);
12820	intel_plane_unpin_fb(to_intel_plane_state(old_state));
12821	mutex_unlock(&dev_priv->drm.struct_mutex);
12822}
12823
12824int
12825skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
12826{
12827	struct drm_i915_private *dev_priv;
12828	int max_scale;
12829	int crtc_clock, max_dotclk;
12830
12831	if (!intel_crtc || !crtc_state->base.enable)
12832		return DRM_PLANE_HELPER_NO_SCALING;
12833
12834	dev_priv = to_i915(intel_crtc->base.dev);
12835
12836	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
12837	max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
12838
12839	if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
12840		max_dotclk *= 2;
12841
12842	if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
12843		return DRM_PLANE_HELPER_NO_SCALING;
12844
12845	/*
12846	 * skl max scale is lower of:
12847	 *    close to 3 but not 3, -1 is for that purpose
12848	 *            or
12849	 *    cdclk/crtc_clock
12850	 */
12851	max_scale = min((1 << 16) * 3 - 1,
12852			(1 << 8) * ((max_dotclk << 8) / crtc_clock));
12853
12854	return max_scale;
12855}
12856
12857static int
12858intel_check_primary_plane(struct intel_plane *plane,
12859			  struct intel_crtc_state *crtc_state,
12860			  struct intel_plane_state *state)
12861{
12862	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12863	struct drm_crtc *crtc = state->base.crtc;
12864	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
12865	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
12866	bool can_position = false;
12867	int ret;
12868
12869	if (INTEL_GEN(dev_priv) >= 9) {
12870		/* use scaler when colorkey is not required */
12871		if (!state->ckey.flags) {
12872			min_scale = 1;
12873			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
12874		}
12875		can_position = true;
12876	}
12877
12878	ret = drm_atomic_helper_check_plane_state(&state->base,
12879						  &crtc_state->base,
12880						  min_scale, max_scale,
12881						  can_position, true);
12882	if (ret)
12883		return ret;
12884
12885	if (!state->base.fb)
12886		return 0;
12887
12888	if (INTEL_GEN(dev_priv) >= 9) {
12889		ret = skl_check_plane_surface(crtc_state, state);
12890		if (ret)
12891			return ret;
12892
12893		state->ctl = skl_plane_ctl(crtc_state, state);
12894	} else {
12895		ret = i9xx_check_plane_surface(state);
12896		if (ret)
12897			return ret;
12898
12899		state->ctl = i9xx_plane_ctl(crtc_state, state);
12900	}
12901
12902	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
12903		state->color_ctl = glk_plane_color_ctl(crtc_state, state);
12904
12905	return 0;
12906}
12907
12908static void intel_begin_crtc_commit(struct drm_crtc *crtc,
12909				    struct drm_crtc_state *old_crtc_state)
12910{
12911	struct drm_device *dev = crtc->dev;
12912	struct drm_i915_private *dev_priv = to_i915(dev);
12913	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12914	struct intel_crtc_state *old_intel_cstate =
12915		to_intel_crtc_state(old_crtc_state);
12916	struct intel_atomic_state *old_intel_state =
12917		to_intel_atomic_state(old_crtc_state->state);
12918	struct intel_crtc_state *intel_cstate =
12919		intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
12920	bool modeset = needs_modeset(&intel_cstate->base);
12921
12922	if (!modeset &&
12923	    (intel_cstate->base.color_mgmt_changed ||
12924	     intel_cstate->update_pipe)) {
12925		intel_color_set_csc(&intel_cstate->base);
12926		intel_color_load_luts(&intel_cstate->base);
12927	}
12928
12929	/* Perform vblank evasion around commit operation */
12930	intel_pipe_update_start(intel_cstate);
12931
12932	if (modeset)
12933		goto out;
12934
12935	if (intel_cstate->update_pipe)
12936		intel_update_pipe_config(old_intel_cstate, intel_cstate);
12937	else if (INTEL_GEN(dev_priv) >= 9)
12938		skl_detach_scalers(intel_crtc);
12939
12940out:
12941	if (dev_priv->display.atomic_update_watermarks)
12942		dev_priv->display.atomic_update_watermarks(old_intel_state,
12943							   intel_cstate);
12944}
12945
12946static void intel_finish_crtc_commit(struct drm_crtc *crtc,
12947				     struct drm_crtc_state *old_crtc_state)
12948{
12949	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12950	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12951	struct intel_atomic_state *old_intel_state =
12952		to_intel_atomic_state(old_crtc_state->state);
12953	struct intel_crtc_state *new_crtc_state =
12954		intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
12955
12956	intel_pipe_update_end(new_crtc_state);
12957
12958	if (new_crtc_state->update_pipe &&
12959	    !needs_modeset(&new_crtc_state->base) &&
12960	    old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
12961		if (!IS_GEN2(dev_priv))
12962			intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
12963
12964		if (new_crtc_state->has_pch_encoder) {
12965			enum pipe pch_transcoder =
12966				intel_crtc_pch_transcoder(intel_crtc);
12967
12968			intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
12969		}
12970	}
12971}
12972
12973/**
12974 * intel_plane_destroy - destroy a plane
12975 * @plane: plane to destroy
12976 *
12977 * Common destruction function for all types of planes (primary, cursor,
12978 * sprite).
12979 */
12980void intel_plane_destroy(struct drm_plane *plane)
12981{
12982	drm_plane_cleanup(plane);
12983	kfree(to_intel_plane(plane));
12984}
12985
12986static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
12987{
12988	switch (format) {
12989	case DRM_FORMAT_C8:
12990	case DRM_FORMAT_RGB565:
12991	case DRM_FORMAT_XRGB1555:
12992	case DRM_FORMAT_XRGB8888:
12993		return modifier == DRM_FORMAT_MOD_LINEAR ||
12994			modifier == I915_FORMAT_MOD_X_TILED;
12995	default:
12996		return false;
12997	}
12998}
12999
13000static bool i965_mod_supported(uint32_t format, uint64_t modifier)
13001{
13002	switch (format) {
13003	case DRM_FORMAT_C8:
13004	case DRM_FORMAT_RGB565:
13005	case DRM_FORMAT_XRGB8888:
13006	case DRM_FORMAT_XBGR8888:
13007	case DRM_FORMAT_XRGB2101010:
13008	case DRM_FORMAT_XBGR2101010:
13009		return modifier == DRM_FORMAT_MOD_LINEAR ||
13010			modifier == I915_FORMAT_MOD_X_TILED;
13011	default:
13012		return false;
13013	}
13014}
13015
13016static bool skl_mod_supported(uint32_t format, uint64_t modifier)
13017{
13018	switch (format) {
13019	case DRM_FORMAT_XRGB8888:
13020	case DRM_FORMAT_XBGR8888:
13021	case DRM_FORMAT_ARGB8888:
13022	case DRM_FORMAT_ABGR8888:
13023		if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
13024		    modifier == I915_FORMAT_MOD_Y_TILED_CCS)
13025			return true;
13026		/* fall through */
13027	case DRM_FORMAT_RGB565:
13028	case DRM_FORMAT_XRGB2101010:
13029	case DRM_FORMAT_XBGR2101010:
13030	case DRM_FORMAT_YUYV:
13031	case DRM_FORMAT_YVYU:
13032	case DRM_FORMAT_UYVY:
13033	case DRM_FORMAT_VYUY:
13034		if (modifier == I915_FORMAT_MOD_Yf_TILED)
13035			return true;
13036		/* fall through */
13037	case DRM_FORMAT_C8:
13038		if (modifier == DRM_FORMAT_MOD_LINEAR ||
13039		    modifier == I915_FORMAT_MOD_X_TILED ||
13040		    modifier == I915_FORMAT_MOD_Y_TILED)
13041			return true;
13042		/* fall through */
13043	default:
13044		return false;
13045	}
13046}
13047
13048static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
13049						     uint32_t format,
13050						     uint64_t modifier)
13051{
13052	struct drm_i915_private *dev_priv = to_i915(plane->dev);
13053
13054	if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
13055		return false;
13056
13057	if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
13058	    modifier != DRM_FORMAT_MOD_LINEAR)
13059		return false;
13060
13061	if (INTEL_GEN(dev_priv) >= 9)
13062		return skl_mod_supported(format, modifier);
13063	else if (INTEL_GEN(dev_priv) >= 4)
13064		return i965_mod_supported(format, modifier);
13065	else
13066		return i8xx_mod_supported(format, modifier);
13067}
13068
13069static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
13070						    uint32_t format,
13071						    uint64_t modifier)
13072{
13073	if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
13074		return false;
13075
13076	return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
13077}
13078
13079static struct drm_plane_funcs intel_plane_funcs = {
13080	.update_plane = drm_atomic_helper_update_plane,
13081	.disable_plane = drm_atomic_helper_disable_plane,
13082	.destroy = intel_plane_destroy,
13083	.atomic_get_property = intel_plane_atomic_get_property,
13084	.atomic_set_property = intel_plane_atomic_set_property,
13085	.atomic_duplicate_state = intel_plane_duplicate_state,
13086	.atomic_destroy_state = intel_plane_destroy_state,
13087	.format_mod_supported = intel_primary_plane_format_mod_supported,
13088};
13089
13090static int
13091intel_legacy_cursor_update(struct drm_plane *plane,
13092			   struct drm_crtc *crtc,
13093			   struct drm_framebuffer *fb,
13094			   int crtc_x, int crtc_y,
13095			   unsigned int crtc_w, unsigned int crtc_h,
13096			   uint32_t src_x, uint32_t src_y,
13097			   uint32_t src_w, uint32_t src_h,
13098			   struct drm_modeset_acquire_ctx *ctx)
13099{
13100	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13101	int ret;
13102	struct drm_plane_state *old_plane_state, *new_plane_state;
13103	struct intel_plane *intel_plane = to_intel_plane(plane);
13104	struct drm_framebuffer *old_fb;
13105	struct drm_crtc_state *crtc_state = crtc->state;
13106
13107	/*
13108	 * When crtc is inactive or there is a modeset pending,
13109	 * wait for it to complete in the slowpath
13110	 */
13111	if (!crtc_state->active || needs_modeset(crtc_state) ||
13112	    to_intel_crtc_state(crtc_state)->update_pipe)
13113		goto slow;
13114
13115	old_plane_state = plane->state;
13116	/*
13117	 * Don't do an async update if there is an outstanding commit modifying
13118	 * the plane.  This prevents our async update's changes from getting
13119	 * overridden by a previous synchronous update's state.
13120	 */
13121	if (old_plane_state->commit &&
13122	    !try_wait_for_completion(&old_plane_state->commit->hw_done))
13123		goto slow;
13124
13125	/*
13126	 * If any parameters change that may affect watermarks,
13127	 * take the slowpath. Only changing fb or position should be
13128	 * in the fastpath.
13129	 */
13130	if (old_plane_state->crtc != crtc ||
13131	    old_plane_state->src_w != src_w ||
13132	    old_plane_state->src_h != src_h ||
13133	    old_plane_state->crtc_w != crtc_w ||
13134	    old_plane_state->crtc_h != crtc_h ||
13135	    !old_plane_state->fb != !fb)
13136		goto slow;
13137
13138	new_plane_state = intel_plane_duplicate_state(plane);
13139	if (!new_plane_state)
13140		return -ENOMEM;
13141
13142	drm_atomic_set_fb_for_plane(new_plane_state, fb);
13143
13144	new_plane_state->src_x = src_x;
13145	new_plane_state->src_y = src_y;
13146	new_plane_state->src_w = src_w;
13147	new_plane_state->src_h = src_h;
13148	new_plane_state->crtc_x = crtc_x;
13149	new_plane_state->crtc_y = crtc_y;
13150	new_plane_state->crtc_w = crtc_w;
13151	new_plane_state->crtc_h = crtc_h;
13152
13153	ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
13154						  to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
13155						  to_intel_plane_state(plane->state),
13156						  to_intel_plane_state(new_plane_state));
13157	if (ret)
13158		goto out_free;
13159
13160	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13161	if (ret)
13162		goto out_free;
13163
13164	ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13165	if (ret)
13166		goto out_unlock;
13167
13168	old_fb = old_plane_state->fb;
13169
13170	i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13171			  intel_plane->frontbuffer_bit);
13172
13173	/* Swap plane state */
13174	plane->state = new_plane_state;
13175
13176	if (plane->state->visible) {
13177		trace_intel_update_plane(plane, to_intel_crtc(crtc));
13178		intel_plane->update_plane(intel_plane,
13179					  to_intel_crtc_state(crtc->state),
13180					  to_intel_plane_state(plane->state));
13181	} else {
13182		trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13183		intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13184	}
13185
13186	intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
13187
13188out_unlock:
13189	mutex_unlock(&dev_priv->drm.struct_mutex);
13190out_free:
13191	if (ret)
13192		intel_plane_destroy_state(plane, new_plane_state);
13193	else
13194		intel_plane_destroy_state(plane, old_plane_state);
13195	return ret;
13196
13197slow:
13198	return drm_atomic_helper_update_plane(plane, crtc, fb,
13199					      crtc_x, crtc_y, crtc_w, crtc_h,
13200					      src_x, src_y, src_w, src_h, ctx);
13201}
13202
13203static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13204	.update_plane = intel_legacy_cursor_update,
13205	.disable_plane = drm_atomic_helper_disable_plane,
13206	.destroy = intel_plane_destroy,
13207	.atomic_get_property = intel_plane_atomic_get_property,
13208	.atomic_set_property = intel_plane_atomic_set_property,
13209	.atomic_duplicate_state = intel_plane_duplicate_state,
13210	.atomic_destroy_state = intel_plane_destroy_state,
13211	.format_mod_supported = intel_cursor_plane_format_mod_supported,
13212};
13213
13214static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13215			       enum i9xx_plane_id i9xx_plane)
13216{
13217	if (!HAS_FBC(dev_priv))
13218		return false;
13219
13220	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13221		return i9xx_plane == PLANE_A; /* tied to pipe A */
13222	else if (IS_IVYBRIDGE(dev_priv))
13223		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13224			i9xx_plane == PLANE_C;
13225	else if (INTEL_GEN(dev_priv) >= 4)
13226		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13227	else
13228		return i9xx_plane == PLANE_A;
13229}
13230
13231static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13232			      enum pipe pipe, enum plane_id plane_id)
13233{
13234	if (!HAS_FBC(dev_priv))
13235		return false;
13236
13237	return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13238}
13239
13240static struct intel_plane *
13241intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13242{
13243	struct intel_plane *primary = NULL;
13244	struct intel_plane_state *state = NULL;
13245	const uint32_t *intel_primary_formats;
13246	unsigned int supported_rotations;
13247	unsigned int num_formats;
13248	const uint64_t *modifiers;
13249	int ret;
13250
13251	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13252	if (!primary) {
13253		ret = -ENOMEM;
13254		goto fail;
13255	}
13256
13257	state = intel_create_plane_state(&primary->base);
13258	if (!state) {
13259		ret = -ENOMEM;
13260		goto fail;
13261	}
13262
13263	primary->base.state = &state->base;
13264
13265	primary->can_scale = false;
13266	primary->max_downscale = 1;
13267	if (INTEL_GEN(dev_priv) >= 9) {
13268		primary->can_scale = true;
13269		state->scaler_id = -1;
13270	}
13271	primary->pipe = pipe;
13272	/*
13273	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13274	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13275	 */
13276	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13277		primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
13278	else
13279		primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13280	primary->id = PLANE_PRIMARY;
13281	primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13282
13283	if (INTEL_GEN(dev_priv) >= 9)
13284		primary->has_fbc = skl_plane_has_fbc(dev_priv,
13285						     primary->pipe,
13286						     primary->id);
13287	else
13288		primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13289						      primary->i9xx_plane);
13290
13291	if (primary->has_fbc) {
13292		struct intel_fbc *fbc = &dev_priv->fbc;
13293
13294		fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13295	}
13296
13297	primary->check_plane = intel_check_primary_plane;
13298
13299	if (INTEL_GEN(dev_priv) >= 9) {
13300		intel_primary_formats = skl_primary_formats;
13301		num_formats = ARRAY_SIZE(skl_primary_formats);
13302
13303		if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
13304			modifiers = skl_format_modifiers_ccs;
13305		else
13306			modifiers = skl_format_modifiers_noccs;
13307
13308		primary->update_plane = skl_update_plane;
13309		primary->disable_plane = skl_disable_plane;
13310		primary->get_hw_state = skl_plane_get_hw_state;
13311	} else if (INTEL_GEN(dev_priv) >= 4) {
13312		intel_primary_formats = i965_primary_formats;
13313		num_formats = ARRAY_SIZE(i965_primary_formats);
13314		modifiers = i9xx_format_modifiers;
13315
13316		primary->update_plane = i9xx_update_plane;
13317		primary->disable_plane = i9xx_disable_plane;
13318		primary->get_hw_state = i9xx_plane_get_hw_state;
13319	} else {
13320		intel_primary_formats = i8xx_primary_formats;
13321		num_formats = ARRAY_SIZE(i8xx_primary_formats);
13322		modifiers = i9xx_format_modifiers;
13323
13324		primary->update_plane = i9xx_update_plane;
13325		primary->disable_plane = i9xx_disable_plane;
13326		primary->get_hw_state = i9xx_plane_get_hw_state;
13327	}
13328
13329	if (INTEL_GEN(dev_priv) >= 9)
13330		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13331					       0, &intel_plane_funcs,
13332					       intel_primary_formats, num_formats,
13333					       modifiers,
13334					       DRM_PLANE_TYPE_PRIMARY,
13335					       "plane 1%c", pipe_name(pipe));
13336	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13337		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13338					       0, &intel_plane_funcs,
13339					       intel_primary_formats, num_formats,
13340					       modifiers,
13341					       DRM_PLANE_TYPE_PRIMARY,
13342					       "primary %c", pipe_name(pipe));
13343	else
13344		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13345					       0, &intel_plane_funcs,
13346					       intel_primary_formats, num_formats,
13347					       modifiers,
13348					       DRM_PLANE_TYPE_PRIMARY,
13349					       "plane %c",
13350					       plane_name(primary->i9xx_plane));
13351	if (ret)
13352		goto fail;
13353
13354	if (INTEL_GEN(dev_priv) >= 10) {
13355		supported_rotations =
13356			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13357			DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13358			DRM_MODE_REFLECT_X;
13359	} else if (INTEL_GEN(dev_priv) >= 9) {
13360		supported_rotations =
13361			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13362			DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13363	} else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13364		supported_rotations =
13365			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13366			DRM_MODE_REFLECT_X;
13367	} else if (INTEL_GEN(dev_priv) >= 4) {
13368		supported_rotations =
13369			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
13370	} else {
13371		supported_rotations = DRM_MODE_ROTATE_0;
13372	}
13373
13374	if (INTEL_GEN(dev_priv) >= 4)
13375		drm_plane_create_rotation_property(&primary->base,
13376						   DRM_MODE_ROTATE_0,
13377						   supported_rotations);
13378
13379	if (INTEL_GEN(dev_priv) >= 9)
13380		drm_plane_create_color_properties(&primary->base,
13381						  BIT(DRM_COLOR_YCBCR_BT601) |
13382						  BIT(DRM_COLOR_YCBCR_BT709),
13383						  BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13384						  BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13385						  DRM_COLOR_YCBCR_BT709,
13386						  DRM_COLOR_YCBCR_LIMITED_RANGE);
13387
13388	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13389
13390	return primary;
13391
13392fail:
13393	kfree(state);
13394	kfree(primary);
13395
13396	return ERR_PTR(ret);
13397}
13398
13399static struct intel_plane *
13400intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13401			  enum pipe pipe)
13402{
13403	struct intel_plane *cursor = NULL;
13404	struct intel_plane_state *state = NULL;
13405	int ret;
13406
13407	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13408	if (!cursor) {
13409		ret = -ENOMEM;
13410		goto fail;
13411	}
13412
13413	state = intel_create_plane_state(&cursor->base);
13414	if (!state) {
13415		ret = -ENOMEM;
13416		goto fail;
13417	}
13418
13419	cursor->base.state = &state->base;
13420
13421	cursor->can_scale = false;
13422	cursor->max_downscale = 1;
13423	cursor->pipe = pipe;
13424	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13425	cursor->id = PLANE_CURSOR;
13426	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13427
13428	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13429		cursor->update_plane = i845_update_cursor;
13430		cursor->disable_plane = i845_disable_cursor;
13431		cursor->get_hw_state = i845_cursor_get_hw_state;
13432		cursor->check_plane = i845_check_cursor;
13433	} else {
13434		cursor->update_plane = i9xx_update_cursor;
13435		cursor->disable_plane = i9xx_disable_cursor;
13436		cursor->get_hw_state = i9xx_cursor_get_hw_state;
13437		cursor->check_plane = i9xx_check_cursor;
13438	}
13439
13440	cursor->cursor.base = ~0;
13441	cursor->cursor.cntl = ~0;
13442
13443	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13444		cursor->cursor.size = ~0;
13445
13446	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13447				       0, &intel_cursor_plane_funcs,
13448				       intel_cursor_formats,
13449				       ARRAY_SIZE(intel_cursor_formats),
13450				       cursor_format_modifiers,
13451				       DRM_PLANE_TYPE_CURSOR,
13452				       "cursor %c", pipe_name(pipe));
13453	if (ret)
13454		goto fail;
13455
13456	if (INTEL_GEN(dev_priv) >= 4)
13457		drm_plane_create_rotation_property(&cursor->base,
13458						   DRM_MODE_ROTATE_0,
13459						   DRM_MODE_ROTATE_0 |
13460						   DRM_MODE_ROTATE_180);
13461
13462	if (INTEL_GEN(dev_priv) >= 9)
13463		state->scaler_id = -1;
13464
13465	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13466
13467	return cursor;
13468
13469fail:
13470	kfree(state);
13471	kfree(cursor);
13472
13473	return ERR_PTR(ret);
13474}
13475
13476static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13477				    struct intel_crtc_state *crtc_state)
13478{
13479	struct intel_crtc_scaler_state *scaler_state =
13480		&crtc_state->scaler_state;
13481	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13482	int i;
13483
13484	crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13485	if (!crtc->num_scalers)
13486		return;
13487
13488	for (i = 0; i < crtc->num_scalers; i++) {
13489		struct intel_scaler *scaler = &scaler_state->scalers[i];
13490
13491		scaler->in_use = 0;
13492		scaler->mode = PS_SCALER_MODE_DYN;
13493	}
13494
13495	scaler_state->scaler_id = -1;
13496}
13497
13498static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
13499{
13500	struct intel_crtc *intel_crtc;
13501	struct intel_crtc_state *crtc_state = NULL;
13502	struct intel_plane *primary = NULL;
13503	struct intel_plane *cursor = NULL;
13504	int sprite, ret;
13505
13506	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13507	if (!intel_crtc)
13508		return -ENOMEM;
13509
13510	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13511	if (!crtc_state) {
13512		ret = -ENOMEM;
13513		goto fail;
13514	}
13515	intel_crtc->config = crtc_state;
13516	intel_crtc->base.state = &crtc_state->base;
13517	crtc_state->base.crtc = &intel_crtc->base;
13518
13519	primary = intel_primary_plane_create(dev_priv, pipe);
13520	if (IS_ERR(primary)) {
13521		ret = PTR_ERR(primary);
13522		goto fail;
13523	}
13524	intel_crtc->plane_ids_mask |= BIT(primary->id);
13525
13526	for_each_sprite(dev_priv, pipe, sprite) {
13527		struct intel_plane *plane;
13528
13529		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
13530		if (IS_ERR(plane)) {
13531			ret = PTR_ERR(plane);
13532			goto fail;
13533		}
13534		intel_crtc->plane_ids_mask |= BIT(plane->id);
13535	}
13536
13537	cursor = intel_cursor_plane_create(dev_priv, pipe);
13538	if (IS_ERR(cursor)) {
13539		ret = PTR_ERR(cursor);
13540		goto fail;
13541	}
13542	intel_crtc->plane_ids_mask |= BIT(cursor->id);
13543
13544	ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
13545					&primary->base, &cursor->base,
13546					&intel_crtc_funcs,
13547					"pipe %c", pipe_name(pipe));
13548	if (ret)
13549		goto fail;
13550
13551	intel_crtc->pipe = pipe;
13552
13553	/* initialize shared scalers */
13554	intel_crtc_init_scalers(intel_crtc, crtc_state);
13555
13556	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13557	       dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] != NULL);
13558	dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] = intel_crtc;
13559	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
13560
13561	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
13562
13563	intel_color_init(&intel_crtc->base);
13564
13565	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13566
13567	return 0;
13568
13569fail:
13570	/*
13571	 * drm_mode_config_cleanup() will free up any
13572	 * crtcs/planes already initialized.
13573	 */
13574	kfree(crtc_state);
13575	kfree(intel_crtc);
13576
13577	return ret;
13578}
13579
13580enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
13581{
13582	struct drm_device *dev = connector->base.dev;
13583
13584	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
13585
13586	if (!connector->base.state->crtc)
13587		return INVALID_PIPE;
13588
13589	return to_intel_crtc(connector->base.state->crtc)->pipe;
13590}
13591
13592int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
13593				      struct drm_file *file)
13594{
13595	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
13596	struct drm_crtc *drmmode_crtc;
13597	struct intel_crtc *crtc;
13598
13599	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
13600	if (!drmmode_crtc)
13601		return -ENOENT;
13602
13603	crtc = to_intel_crtc(drmmode_crtc);
13604	pipe_from_crtc_id->pipe = crtc->pipe;
13605
13606	return 0;
13607}
13608
13609static int intel_encoder_clones(struct intel_encoder *encoder)
13610{
13611	struct drm_device *dev = encoder->base.dev;
13612	struct intel_encoder *source_encoder;
13613	int index_mask = 0;
13614	int entry = 0;
13615
13616	for_each_intel_encoder(dev, source_encoder) {
13617		if (encoders_cloneable(encoder, source_encoder))
13618			index_mask |= (1 << entry);
13619
13620		entry++;
13621	}
13622
13623	return index_mask;
13624}
13625
13626static bool has_edp_a(struct drm_i915_private *dev_priv)
13627{
13628	if (!IS_MOBILE(dev_priv))
13629		return false;
13630
13631	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
13632		return false;
13633
13634	if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
13635		return false;
13636
13637	return true;
13638}
13639
13640static bool intel_crt_present(struct drm_i915_private *dev_priv)
13641{
13642	if (INTEL_GEN(dev_priv) >= 9)
13643		return false;
13644
13645	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
13646		return false;
13647
13648	if (IS_CHERRYVIEW(dev_priv))
13649		return false;
13650
13651	if (HAS_PCH_LPT_H(dev_priv) &&
13652	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
13653		return false;
13654
13655	/* DDI E can't be used if DDI A requires 4 lanes */
13656	if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
13657		return false;
13658
13659	if (!dev_priv->vbt.int_crt_support)
13660		return false;
13661
13662	return true;
13663}
13664
13665void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
13666{
13667	int pps_num;
13668	int pps_idx;
13669
13670	if (HAS_DDI(dev_priv))
13671		return;
13672	/*
13673	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
13674	 * everywhere where registers can be write protected.
13675	 */
13676	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13677		pps_num = 2;
13678	else
13679		pps_num = 1;
13680
13681	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
13682		u32 val = I915_READ(PP_CONTROL(pps_idx));
13683
13684		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
13685		I915_WRITE(PP_CONTROL(pps_idx), val);
13686	}
13687}
13688
13689static void intel_pps_init(struct drm_i915_private *dev_priv)
13690{
13691	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
13692		dev_priv->pps_mmio_base = PCH_PPS_BASE;
13693	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13694		dev_priv->pps_mmio_base = VLV_PPS_BASE;
13695	else
13696		dev_priv->pps_mmio_base = PPS_BASE;
13697
13698	intel_pps_unlock_regs_wa(dev_priv);
13699}
13700
13701static void intel_setup_outputs(struct drm_i915_private *dev_priv)
13702{
13703	struct intel_encoder *encoder;
13704	bool dpd_is_edp = false;
13705
13706	intel_pps_init(dev_priv);
13707
13708	/*
13709	 * intel_edp_init_connector() depends on this completing first, to
13710	 * prevent the registeration of both eDP and LVDS and the incorrect
13711	 * sharing of the PPS.
13712	 */
13713	intel_lvds_init(dev_priv);
13714
13715	if (intel_crt_present(dev_priv))
13716		intel_crt_init(dev_priv);
13717
13718	if (IS_GEN9_LP(dev_priv)) {
13719		/*
13720		 * FIXME: Broxton doesn't support port detection via the
13721		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
13722		 * detect the ports.
13723		 */
13724		intel_ddi_init(dev_priv, PORT_A);
13725		intel_ddi_init(dev_priv, PORT_B);
13726		intel_ddi_init(dev_priv, PORT_C);
13727
13728		intel_dsi_init(dev_priv);
13729	} else if (HAS_DDI(dev_priv)) {
13730		int found;
13731
13732		/*
13733		 * Haswell uses DDI functions to detect digital outputs.
13734		 * On SKL pre-D0 the strap isn't connected, so we assume
13735		 * it's there.
13736		 */
13737		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
13738		/* WaIgnoreDDIAStrap: skl */
13739		if (found || IS_GEN9_BC(dev_priv))
13740			intel_ddi_init(dev_priv, PORT_A);
13741
13742		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
13743		 * register */
13744		found = I915_READ(SFUSE_STRAP);
13745
13746		if (found & SFUSE_STRAP_DDIB_DETECTED)
13747			intel_ddi_init(dev_priv, PORT_B);
13748		if (found & SFUSE_STRAP_DDIC_DETECTED)
13749			intel_ddi_init(dev_priv, PORT_C);
13750		if (found & SFUSE_STRAP_DDID_DETECTED)
13751			intel_ddi_init(dev_priv, PORT_D);
13752		if (found & SFUSE_STRAP_DDIF_DETECTED)
13753			intel_ddi_init(dev_priv, PORT_F);
13754		/*
13755		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
13756		 */
13757		if (IS_GEN9_BC(dev_priv) &&
13758		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
13759		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
13760		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
13761			intel_ddi_init(dev_priv, PORT_E);
13762
13763	} else if (HAS_PCH_SPLIT(dev_priv)) {
13764		int found;
13765		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
13766
13767		if (has_edp_a(dev_priv))
13768			intel_dp_init(dev_priv, DP_A, PORT_A);
13769
13770		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
13771			/* PCH SDVOB multiplex with HDMIB */
13772			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
13773			if (!found)
13774				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
13775			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
13776				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
13777		}
13778
13779		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
13780			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
13781
13782		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
13783			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
13784
13785		if (I915_READ(PCH_DP_C) & DP_DETECTED)
13786			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
13787
13788		if (I915_READ(PCH_DP_D) & DP_DETECTED)
13789			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
13790	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13791		bool has_edp, has_port;
13792
13793		/*
13794		 * The DP_DETECTED bit is the latched state of the DDC
13795		 * SDA pin at boot. However since eDP doesn't require DDC
13796		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
13797		 * eDP ports may have been muxed to an alternate function.
13798		 * Thus we can't rely on the DP_DETECTED bit alone to detect
13799		 * eDP ports. Consult the VBT as well as DP_DETECTED to
13800		 * detect eDP ports.
13801		 *
13802		 * Sadly the straps seem to be missing sometimes even for HDMI
13803		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
13804		 * and VBT for the presence of the port. Additionally we can't
13805		 * trust the port type the VBT declares as we've seen at least
13806		 * HDMI ports that the VBT claim are DP or eDP.
13807		 */
13808		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
13809		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
13810		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
13811			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
13812		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
13813			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
13814
13815		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
13816		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
13817		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
13818			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
13819		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
13820			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
13821
13822		if (IS_CHERRYVIEW(dev_priv)) {
13823			/*
13824			 * eDP not supported on port D,
13825			 * so no need to worry about it
13826			 */
13827			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
13828			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
13829				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
13830			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
13831				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
13832		}
13833
13834		intel_dsi_init(dev_priv);
13835	} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
13836		bool found = false;
13837
13838		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
13839			DRM_DEBUG_KMS("probing SDVOB\n");
13840			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
13841			if (!found && IS_G4X(dev_priv)) {
13842				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
13843				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
13844			}
13845
13846			if (!found && IS_G4X(dev_priv))
13847				intel_dp_init(dev_priv, DP_B, PORT_B);
13848		}
13849
13850		/* Before G4X SDVOC doesn't have its own detect register */
13851
13852		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
13853			DRM_DEBUG_KMS("probing SDVOC\n");
13854			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
13855		}
13856
13857		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
13858
13859			if (IS_G4X(dev_priv)) {
13860				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
13861				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
13862			}
13863			if (IS_G4X(dev_priv))
13864				intel_dp_init(dev_priv, DP_C, PORT_C);
13865		}
13866
13867		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
13868			intel_dp_init(dev_priv, DP_D, PORT_D);
13869	} else if (IS_GEN2(dev_priv))
13870		intel_dvo_init(dev_priv);
13871
13872	if (SUPPORTS_TV(dev_priv))
13873		intel_tv_init(dev_priv);
13874
13875	intel_psr_init(dev_priv);
13876
13877	for_each_intel_encoder(&dev_priv->drm, encoder) {
13878		encoder->base.possible_crtcs = encoder->crtc_mask;
13879		encoder->base.possible_clones =
13880			intel_encoder_clones(encoder);
13881	}
13882
13883	intel_init_pch_refclk(dev_priv);
13884
13885	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
13886}
13887
13888static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
13889{
13890	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
13891
13892	drm_framebuffer_cleanup(fb);
13893
13894	i915_gem_object_lock(intel_fb->obj);
13895	WARN_ON(!intel_fb->obj->framebuffer_references--);
13896	i915_gem_object_unlock(intel_fb->obj);
13897
13898	i915_gem_object_put(intel_fb->obj);
13899
13900	kfree(intel_fb);
13901}
13902
13903static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
13904						struct drm_file *file,
13905						unsigned int *handle)
13906{
13907	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
13908	struct drm_i915_gem_object *obj = intel_fb->obj;
13909
13910	if (obj->userptr.mm) {
13911		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
13912		return -EINVAL;
13913	}
13914
13915	return drm_gem_handle_create(file, &obj->base, handle);
13916}
13917
13918static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
13919					struct drm_file *file,
13920					unsigned flags, unsigned color,
13921					struct drm_clip_rect *clips,
13922					unsigned num_clips)
13923{
13924	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13925
13926	i915_gem_object_flush_if_display(obj);
13927	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13928
13929	return 0;
13930}
13931
13932static const struct drm_framebuffer_funcs intel_fb_funcs = {
13933	.destroy = intel_user_framebuffer_destroy,
13934	.create_handle = intel_user_framebuffer_create_handle,
13935	.dirty = intel_user_framebuffer_dirty,
13936};
13937
13938static
13939u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
13940			 uint64_t fb_modifier, uint32_t pixel_format)
13941{
13942	u32 gen = INTEL_GEN(dev_priv);
13943
13944	if (gen >= 9) {
13945		int cpp = drm_format_plane_cpp(pixel_format, 0);
13946
13947		/* "The stride in bytes must not exceed the of the size of 8K
13948		 *  pixels and 32K bytes."
13949		 */
13950		return min(8192 * cpp, 32768);
13951	} else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
13952		return 32*1024;
13953	} else if (gen >= 4) {
13954		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
13955			return 16*1024;
13956		else
13957			return 32*1024;
13958	} else if (gen >= 3) {
13959		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
13960			return 8*1024;
13961		else
13962			return 16*1024;
13963	} else {
13964		/* XXX DSPC is limited to 4k tiled */
13965		return 8*1024;
13966	}
13967}
13968
13969static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
13970				  struct drm_i915_gem_object *obj,
13971				  struct drm_mode_fb_cmd2 *mode_cmd)
13972{
13973	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
13974	struct drm_framebuffer *fb = &intel_fb->base;
13975	struct drm_format_name_buf format_name;
13976	u32 pitch_limit;
13977	unsigned int tiling, stride;
13978	int ret = -EINVAL;
13979	int i;
13980
13981	i915_gem_object_lock(obj);
13982	obj->framebuffer_references++;
13983	tiling = i915_gem_object_get_tiling(obj);
13984	stride = i915_gem_object_get_stride(obj);
13985	i915_gem_object_unlock(obj);
13986
13987	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
13988		/*
13989		 * If there's a fence, enforce that
13990		 * the fb modifier and tiling mode match.
13991		 */
13992		if (tiling != I915_TILING_NONE &&
13993		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
13994			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
13995			goto err;
13996		}
13997	} else {
13998		if (tiling == I915_TILING_X) {
13999			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14000		} else if (tiling == I915_TILING_Y) {
14001			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14002			goto err;
14003		}
14004	}
14005
14006	/* Passed in modifier sanity checking. */
14007	switch (mode_cmd->modifier[0]) {
14008	case I915_FORMAT_MOD_Y_TILED_CCS:
14009	case I915_FORMAT_MOD_Yf_TILED_CCS:
14010		switch (mode_cmd->pixel_format) {
14011		case DRM_FORMAT_XBGR8888:
14012		case DRM_FORMAT_ABGR8888:
14013		case DRM_FORMAT_XRGB8888:
14014		case DRM_FORMAT_ARGB8888:
14015			break;
14016		default:
14017			DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14018			goto err;
14019		}
14020		/* fall through */
14021	case I915_FORMAT_MOD_Y_TILED:
14022	case I915_FORMAT_MOD_Yf_TILED:
14023		if (INTEL_GEN(dev_priv) < 9) {
14024			DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14025				      mode_cmd->modifier[0]);
14026			goto err;
14027		}
14028	case DRM_FORMAT_MOD_LINEAR:
14029	case I915_FORMAT_MOD_X_TILED:
14030		break;
14031	default:
14032		DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14033			      mode_cmd->modifier[0]);
14034		goto err;
14035	}
14036
14037	/*
14038	 * gen2/3 display engine uses the fence if present,
14039	 * so the tiling mode must match the fb modifier exactly.
14040	 */
14041	if (INTEL_GEN(dev_priv) < 4 &&
14042	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14043		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14044		goto err;
14045	}
14046
14047	pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14048					   mode_cmd->pixel_format);
14049	if (mode_cmd->pitches[0] > pitch_limit) {
14050		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14051			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14052			      "tiled" : "linear",
14053			      mode_cmd->pitches[0], pitch_limit);
14054		goto err;
14055	}
14056
14057	/*
14058	 * If there's a fence, enforce that
14059	 * the fb pitch and fence stride match.
14060	 */
14061	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14062		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14063			      mode_cmd->pitches[0], stride);
14064		goto err;
14065	}
14066
14067	/* Reject formats not supported by any plane early. */
14068	switch (mode_cmd->pixel_format) {
14069	case DRM_FORMAT_C8:
14070	case DRM_FORMAT_RGB565:
14071	case DRM_FORMAT_XRGB8888:
14072	case DRM_FORMAT_ARGB8888:
14073		break;
14074	case DRM_FORMAT_XRGB1555:
14075		if (INTEL_GEN(dev_priv) > 3) {
14076			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14077				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
14078			goto err;
14079		}
14080		break;
14081	case DRM_FORMAT_ABGR8888:
14082		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14083		    INTEL_GEN(dev_priv) < 9) {
14084			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14085				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
14086			goto err;
14087		}
14088		break;
14089	case DRM_FORMAT_XBGR8888:
14090	case DRM_FORMAT_XRGB2101010:
14091	case DRM_FORMAT_XBGR2101010:
14092		if (INTEL_GEN(dev_priv) < 4) {
14093			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14094				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
14095			goto err;
14096		}
14097		break;
14098	case DRM_FORMAT_ABGR2101010:
14099		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14100			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14101				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
14102			goto err;
14103		}
14104		break;
14105	case DRM_FORMAT_YUYV:
14106	case DRM_FORMAT_UYVY:
14107	case DRM_FORMAT_YVYU:
14108	case DRM_FORMAT_VYUY:
14109		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14110			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14111				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
14112			goto err;
14113		}
14114		break;
14115	default:
14116		DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14117			      drm_get_format_name(mode_cmd->pixel_format, &format_name));
14118		goto err;
14119	}
14120
14121	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14122	if (mode_cmd->offsets[0] != 0)
14123		goto err;
14124
14125	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14126
14127	for (i = 0; i < fb->format->num_planes; i++) {
14128		u32 stride_alignment;
14129
14130		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14131			DRM_DEBUG_KMS("bad plane %d handle\n", i);
14132			goto err;
14133		}
14134
14135		stride_alignment = intel_fb_stride_alignment(fb, i);
14136
14137		/*
14138		 * Display WA #0531: skl,bxt,kbl,glk
14139		 *
14140		 * Render decompression and plane width > 3840
14141		 * combined with horizontal panning requires the
14142		 * plane stride to be a multiple of 4. We'll just
14143		 * require the entire fb to accommodate that to avoid
14144		 * potential runtime errors at plane configuration time.
14145		 */
14146		if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14147		    (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
14148		     fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
14149			stride_alignment *= 4;
14150
14151		if (fb->pitches[i] & (stride_alignment - 1)) {
14152			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14153				      i, fb->pitches[i], stride_alignment);
14154			goto err;
14155		}
14156	}
14157
14158	intel_fb->obj = obj;
14159
14160	ret = intel_fill_fb_info(dev_priv, fb);
14161	if (ret)
14162		goto err;
14163
14164	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14165	if (ret) {
14166		DRM_ERROR("framebuffer init failed %d\n", ret);
14167		goto err;
14168	}
14169
14170	return 0;
14171
14172err:
14173	i915_gem_object_lock(obj);
14174	obj->framebuffer_references--;
14175	i915_gem_object_unlock(obj);
14176	return ret;
14177}
14178
14179static struct drm_framebuffer *
14180intel_user_framebuffer_create(struct drm_device *dev,
14181			      struct drm_file *filp,
14182			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
14183{
14184	struct drm_framebuffer *fb;
14185	struct drm_i915_gem_object *obj;
14186	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14187
14188	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14189	if (!obj)
14190		return ERR_PTR(-ENOENT);
14191
14192	fb = intel_framebuffer_create(obj, &mode_cmd);
14193	if (IS_ERR(fb))
14194		i915_gem_object_put(obj);
14195
14196	return fb;
14197}
14198
14199static void intel_atomic_state_free(struct drm_atomic_state *state)
14200{
14201	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14202
14203	drm_atomic_state_default_release(state);
14204
14205	i915_sw_fence_fini(&intel_state->commit_ready);
14206
14207	kfree(state);
14208}
14209
14210static enum drm_mode_status
14211intel_mode_valid(struct drm_device *dev,
14212		 const struct drm_display_mode *mode)
14213{
14214	if (mode->vscan > 1)
14215		return MODE_NO_VSCAN;
14216
14217	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
14218		return MODE_NO_DBLESCAN;
14219
14220	if (mode->flags & DRM_MODE_FLAG_HSKEW)
14221		return MODE_H_ILLEGAL;
14222
14223	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14224			   DRM_MODE_FLAG_NCSYNC |
14225			   DRM_MODE_FLAG_PCSYNC))
14226		return MODE_HSYNC;
14227
14228	if (mode->flags & (DRM_MODE_FLAG_BCAST |
14229			   DRM_MODE_FLAG_PIXMUX |
14230			   DRM_MODE_FLAG_CLKDIV2))
14231		return MODE_BAD;
14232
14233	return MODE_OK;
14234}
14235
14236static const struct drm_mode_config_funcs intel_mode_funcs = {
14237	.fb_create = intel_user_framebuffer_create,
14238	.get_format_info = intel_get_format_info,
14239	.output_poll_changed = intel_fbdev_output_poll_changed,
14240	.mode_valid = intel_mode_valid,
14241	.atomic_check = intel_atomic_check,
14242	.atomic_commit = intel_atomic_commit,
14243	.atomic_state_alloc = intel_atomic_state_alloc,
14244	.atomic_state_clear = intel_atomic_state_clear,
14245	.atomic_state_free = intel_atomic_state_free,
14246};
14247
14248/**
14249 * intel_init_display_hooks - initialize the display modesetting hooks
14250 * @dev_priv: device private
14251 */
14252void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14253{
14254	intel_init_cdclk_hooks(dev_priv);
14255
14256	if (INTEL_GEN(dev_priv) >= 9) {
14257		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14258		dev_priv->display.get_initial_plane_config =
14259			skylake_get_initial_plane_config;
14260		dev_priv->display.crtc_compute_clock =
14261			haswell_crtc_compute_clock;
14262		dev_priv->display.crtc_enable = haswell_crtc_enable;
14263		dev_priv->display.crtc_disable = haswell_crtc_disable;
14264	} else if (HAS_DDI(dev_priv)) {
14265		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14266		dev_priv->display.get_initial_plane_config =
14267			i9xx_get_initial_plane_config;
14268		dev_priv->display.crtc_compute_clock =
14269			haswell_crtc_compute_clock;
14270		dev_priv->display.crtc_enable = haswell_crtc_enable;
14271		dev_priv->display.crtc_disable = haswell_crtc_disable;
14272	} else if (HAS_PCH_SPLIT(dev_priv)) {
14273		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14274		dev_priv->display.get_initial_plane_config =
14275			i9xx_get_initial_plane_config;
14276		dev_priv->display.crtc_compute_clock =
14277			ironlake_crtc_compute_clock;
14278		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14279		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14280	} else if (IS_CHERRYVIEW(dev_priv)) {
14281		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14282		dev_priv->display.get_initial_plane_config =
14283			i9xx_get_initial_plane_config;
14284		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14285		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14286		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14287	} else if (IS_VALLEYVIEW(dev_priv)) {
14288		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14289		dev_priv->display.get_initial_plane_config =
14290			i9xx_get_initial_plane_config;
14291		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14292		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14293		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14294	} else if (IS_G4X(dev_priv)) {
14295		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14296		dev_priv->display.get_initial_plane_config =
14297			i9xx_get_initial_plane_config;
14298		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14299		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14300		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14301	} else if (IS_PINEVIEW(dev_priv)) {
14302		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14303		dev_priv->display.get_initial_plane_config =
14304			i9xx_get_initial_plane_config;
14305		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14306		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14307		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14308	} else if (!IS_GEN2(dev_priv)) {
14309		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14310		dev_priv->display.get_initial_plane_config =
14311			i9xx_get_initial_plane_config;
14312		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14313		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14314		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14315	} else {
14316		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14317		dev_priv->display.get_initial_plane_config =
14318			i9xx_get_initial_plane_config;
14319		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14320		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14321		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14322	}
14323
14324	if (IS_GEN5(dev_priv)) {
14325		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14326	} else if (IS_GEN6(dev_priv)) {
14327		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14328	} else if (IS_IVYBRIDGE(dev_priv)) {
14329		/* FIXME: detect B0+ stepping and use auto training */
14330		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14331	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14332		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14333	}
14334
14335	if (INTEL_GEN(dev_priv) >= 9)
14336		dev_priv->display.update_crtcs = skl_update_crtcs;
14337	else
14338		dev_priv->display.update_crtcs = intel_update_crtcs;
14339}
14340
14341/*
14342 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14343 */
14344static void quirk_ssc_force_disable(struct drm_device *dev)
14345{
14346	struct drm_i915_private *dev_priv = to_i915(dev);
14347	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14348	DRM_INFO("applying lvds SSC disable quirk\n");
14349}
14350
14351/*
14352 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14353 * brightness value
14354 */
14355static void quirk_invert_brightness(struct drm_device *dev)
14356{
14357	struct drm_i915_private *dev_priv = to_i915(dev);
14358	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14359	DRM_INFO("applying inverted panel brightness quirk\n");
14360}
14361
14362/* Some VBT's incorrectly indicate no backlight is present */
14363static void quirk_backlight_present(struct drm_device *dev)
14364{
14365	struct drm_i915_private *dev_priv = to_i915(dev);
14366	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14367	DRM_INFO("applying backlight present quirk\n");
14368}
14369
14370/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14371 * which is 300 ms greater than eDP spec T12 min.
14372 */
14373static void quirk_increase_t12_delay(struct drm_device *dev)
14374{
14375	struct drm_i915_private *dev_priv = to_i915(dev);
14376
14377	dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14378	DRM_INFO("Applying T12 delay quirk\n");
14379}
14380
14381struct intel_quirk {
14382	int device;
14383	int subsystem_vendor;
14384	int subsystem_device;
14385	void (*hook)(struct drm_device *dev);
14386};
14387
14388/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14389struct intel_dmi_quirk {
14390	void (*hook)(struct drm_device *dev);
14391	const struct dmi_system_id (*dmi_id_list)[];
14392};
14393
14394static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14395{
14396	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14397	return 1;
14398}
14399
14400static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14401	{
14402		.dmi_id_list = &(const struct dmi_system_id[]) {
14403			{
14404				.callback = intel_dmi_reverse_brightness,
14405				.ident = "NCR Corporation",
14406				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14407					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
14408				},
14409			},
14410			{ }  /* terminating entry */
14411		},
14412		.hook = quirk_invert_brightness,
14413	},
14414};
14415
14416static struct intel_quirk intel_quirks[] = {
14417	/* Lenovo U160 cannot use SSC on LVDS */
14418	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14419
14420	/* Sony Vaio Y cannot use SSC on LVDS */
14421	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14422
14423	/* Acer Aspire 5734Z must invert backlight brightness */
14424	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14425
14426	/* Acer/eMachines G725 */
14427	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14428
14429	/* Acer/eMachines e725 */
14430	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14431
14432	/* Acer/Packard Bell NCL20 */
14433	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14434
14435	/* Acer Aspire 4736Z */
14436	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14437
14438	/* Acer Aspire 5336 */
14439	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14440
14441	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14442	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14443
14444	/* Acer C720 Chromebook (Core i3 4005U) */
14445	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14446
14447	/* Apple Macbook 2,1 (Core 2 T7400) */
14448	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14449
14450	/* Apple Macbook 4,1 */
14451	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14452
14453	/* Toshiba CB35 Chromebook (Celeron 2955U) */
14454	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14455
14456	/* HP Chromebook 14 (Celeron 2955U) */
14457	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14458
14459	/* Dell Chromebook 11 */
14460	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14461
14462	/* Dell Chromebook 11 (2015 version) */
14463	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14464
14465	/* Toshiba Satellite P50-C-18C */
14466	{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
14467};
14468
14469static void intel_init_quirks(struct drm_device *dev)
14470{
14471	struct pci_dev *d = dev->pdev;
14472	int i;
14473
14474	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14475		struct intel_quirk *q = &intel_quirks[i];
14476
14477		if (d->device == q->device &&
14478		    (d->subsystem_vendor == q->subsystem_vendor ||
14479		     q->subsystem_vendor == PCI_ANY_ID) &&
14480		    (d->subsystem_device == q->subsystem_device ||
14481		     q->subsystem_device == PCI_ANY_ID))
14482			q->hook(dev);
14483	}
14484	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14485		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14486			intel_dmi_quirks[i].hook(dev);
14487	}
14488}
14489
14490/* Disable the VGA plane that we never use */
14491static void i915_disable_vga(struct drm_i915_private *dev_priv)
14492{
14493	struct pci_dev *pdev = dev_priv->drm.pdev;
14494	u8 sr1;
14495	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
14496
14497	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14498	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
14499	outb(SR01, VGA_SR_INDEX);
14500	sr1 = inb(VGA_SR_DATA);
14501	outb(sr1 | 1<<5, VGA_SR_DATA);
14502	vga_put(pdev, VGA_RSRC_LEGACY_IO);
14503	udelay(300);
14504
14505	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
14506	POSTING_READ(vga_reg);
14507}
14508
14509void intel_modeset_init_hw(struct drm_device *dev)
14510{
14511	struct drm_i915_private *dev_priv = to_i915(dev);
14512
14513	intel_update_cdclk(dev_priv);
14514	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
14515	dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
14516}
14517
14518/*
14519 * Calculate what we think the watermarks should be for the state we've read
14520 * out of the hardware and then immediately program those watermarks so that
14521 * we ensure the hardware settings match our internal state.
14522 *
14523 * We can calculate what we think WM's should be by creating a duplicate of the
14524 * current state (which was constructed during hardware readout) and running it
14525 * through the atomic check code to calculate new watermark values in the
14526 * state object.
14527 */
14528static void sanitize_watermarks(struct drm_device *dev)
14529{
14530	struct drm_i915_private *dev_priv = to_i915(dev);
14531	struct drm_atomic_state *state;
14532	struct intel_atomic_state *intel_state;
14533	struct drm_crtc *crtc;
14534	struct drm_crtc_state *cstate;
14535	struct drm_modeset_acquire_ctx ctx;
14536	int ret;
14537	int i;
14538
14539	/* Only supported on platforms that use atomic watermark design */
14540	if (!dev_priv->display.optimize_watermarks)
14541		return;
14542
14543	/*
14544	 * We need to hold connection_mutex before calling duplicate_state so
14545	 * that the connector loop is protected.
14546	 */
14547	drm_modeset_acquire_init(&ctx, 0);
14548retry:
14549	ret = drm_modeset_lock_all_ctx(dev, &ctx);
14550	if (ret == -EDEADLK) {
14551		drm_modeset_backoff(&ctx);
14552		goto retry;
14553	} else if (WARN_ON(ret)) {
14554		goto fail;
14555	}
14556
14557	state = drm_atomic_helper_duplicate_state(dev, &ctx);
14558	if (WARN_ON(IS_ERR(state)))
14559		goto fail;
14560
14561	intel_state = to_intel_atomic_state(state);
14562
14563	/*
14564	 * Hardware readout is the only time we don't want to calculate
14565	 * intermediate watermarks (since we don't trust the current
14566	 * watermarks).
14567	 */
14568	if (!HAS_GMCH_DISPLAY(dev_priv))
14569		intel_state->skip_intermediate_wm = true;
14570
14571	ret = intel_atomic_check(dev, state);
14572	if (ret) {
14573		/*
14574		 * If we fail here, it means that the hardware appears to be
14575		 * programmed in a way that shouldn't be possible, given our
14576		 * understanding of watermark requirements.  This might mean a
14577		 * mistake in the hardware readout code or a mistake in the
14578		 * watermark calculations for a given platform.  Raise a WARN
14579		 * so that this is noticeable.
14580		 *
14581		 * If this actually happens, we'll have to just leave the
14582		 * BIOS-programmed watermarks untouched and hope for the best.
14583		 */
14584		WARN(true, "Could not determine valid watermarks for inherited state\n");
14585		goto put_state;
14586	}
14587
14588	/* Write calculated watermark values back */
14589	for_each_new_crtc_in_state(state, crtc, cstate, i) {
14590		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
14591
14592		cs->wm.need_postvbl_update = true;
14593		dev_priv->display.optimize_watermarks(intel_state, cs);
14594
14595		to_intel_crtc_state(crtc->state)->wm = cs->wm;
14596	}
14597
14598put_state:
14599	drm_atomic_state_put(state);
14600fail:
14601	drm_modeset_drop_locks(&ctx);
14602	drm_modeset_acquire_fini(&ctx);
14603}
14604
14605static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
14606{
14607	if (IS_GEN5(dev_priv)) {
14608		u32 fdi_pll_clk =
14609			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
14610
14611		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
14612	} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
14613		dev_priv->fdi_pll_freq = 270000;
14614	} else {
14615		return;
14616	}
14617
14618	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
14619}
14620
14621int intel_modeset_init(struct drm_device *dev)
14622{
14623	struct drm_i915_private *dev_priv = to_i915(dev);
14624	struct i915_ggtt *ggtt = &dev_priv->ggtt;
14625	enum pipe pipe;
14626	struct intel_crtc *crtc;
14627
14628	dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
14629
14630	drm_mode_config_init(dev);
14631
14632	dev->mode_config.min_width = 0;
14633	dev->mode_config.min_height = 0;
14634
14635	dev->mode_config.preferred_depth = 24;
14636	dev->mode_config.prefer_shadow = 1;
14637
14638	dev->mode_config.allow_fb_modifiers = true;
14639
14640	dev->mode_config.funcs = &intel_mode_funcs;
14641
14642	init_llist_head(&dev_priv->atomic_helper.free_list);
14643	INIT_WORK(&dev_priv->atomic_helper.free_work,
14644		  intel_atomic_helper_free_state_worker);
14645
14646	intel_init_quirks(dev);
14647
14648	intel_init_pm(dev_priv);
14649
14650	if (INTEL_INFO(dev_priv)->num_pipes == 0)
14651		return 0;
14652
14653	/*
14654	 * There may be no VBT; and if the BIOS enabled SSC we can
14655	 * just keep using it to avoid unnecessary flicker.  Whereas if the
14656	 * BIOS isn't using it, don't assume it will work even if the VBT
14657	 * indicates as much.
14658	 */
14659	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
14660		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14661					    DREF_SSC1_ENABLE);
14662
14663		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14664			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14665				     bios_lvds_use_ssc ? "en" : "dis",
14666				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14667			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14668		}
14669	}
14670
14671	if (IS_GEN2(dev_priv)) {
14672		dev->mode_config.max_width = 2048;
14673		dev->mode_config.max_height = 2048;
14674	} else if (IS_GEN3(dev_priv)) {
14675		dev->mode_config.max_width = 4096;
14676		dev->mode_config.max_height = 4096;
14677	} else {
14678		dev->mode_config.max_width = 8192;
14679		dev->mode_config.max_height = 8192;
14680	}
14681
14682	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14683		dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
14684		dev->mode_config.cursor_height = 1023;
14685	} else if (IS_GEN2(dev_priv)) {
14686		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
14687		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
14688	} else {
14689		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
14690		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
14691	}
14692
14693	dev->mode_config.fb_base = ggtt->gmadr.start;
14694
14695	DRM_DEBUG_KMS("%d display pipe%s available.\n",
14696		      INTEL_INFO(dev_priv)->num_pipes,
14697		      INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
14698
14699	for_each_pipe(dev_priv, pipe) {
14700		int ret;
14701
14702		ret = intel_crtc_init(dev_priv, pipe);
14703		if (ret) {
14704			drm_mode_config_cleanup(dev);
14705			return ret;
14706		}
14707	}
14708
14709	intel_shared_dpll_init(dev);
14710	intel_update_fdi_pll_freq(dev_priv);
14711
14712	intel_update_czclk(dev_priv);
14713	intel_modeset_init_hw(dev);
14714
14715	if (dev_priv->max_cdclk_freq == 0)
14716		intel_update_max_cdclk(dev_priv);
14717
14718	/* Just disable it once at startup */
14719	i915_disable_vga(dev_priv);
14720	intel_setup_outputs(dev_priv);
14721
14722	drm_modeset_lock_all(dev);
14723	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
14724	drm_modeset_unlock_all(dev);
14725
14726	for_each_intel_crtc(dev, crtc) {
14727		struct intel_initial_plane_config plane_config = {};
14728
14729		if (!crtc->active)
14730			continue;
14731
14732		/*
14733		 * Note that reserving the BIOS fb up front prevents us
14734		 * from stuffing other stolen allocations like the ring
14735		 * on top.  This prevents some ugliness at boot time, and
14736		 * can even allow for smooth boot transitions if the BIOS
14737		 * fb is large enough for the active pipe configuration.
14738		 */
14739		dev_priv->display.get_initial_plane_config(crtc,
14740							   &plane_config);
14741
14742		/*
14743		 * If the fb is shared between multiple heads, we'll
14744		 * just get the first one.
14745		 */
14746		intel_find_initial_plane_obj(crtc, &plane_config);
14747	}
14748
14749	/*
14750	 * Make sure hardware watermarks really match the state we read out.
14751	 * Note that we need to do this after reconstructing the BIOS fb's
14752	 * since the watermark calculation done here will use pstate->fb.
14753	 */
14754	if (!HAS_GMCH_DISPLAY(dev_priv))
14755		sanitize_watermarks(dev);
14756
14757	return 0;
14758}
14759
14760void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14761{
14762	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14763	/* 640x480@60Hz, ~25175 kHz */
14764	struct dpll clock = {
14765		.m1 = 18,
14766		.m2 = 7,
14767		.p1 = 13,
14768		.p2 = 4,
14769		.n = 2,
14770	};
14771	u32 dpll, fp;
14772	int i;
14773
14774	WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
14775
14776	DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
14777		      pipe_name(pipe), clock.vco, clock.dot);
14778
14779	fp = i9xx_dpll_compute_fp(&clock);
14780	dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
14781		DPLL_VGA_MODE_DIS |
14782		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
14783		PLL_P2_DIVIDE_BY_4 |
14784		PLL_REF_INPUT_DREFCLK |
14785		DPLL_VCO_ENABLE;
14786
14787	I915_WRITE(FP0(pipe), fp);
14788	I915_WRITE(FP1(pipe), fp);
14789
14790	I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
14791	I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
14792	I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
14793	I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
14794	I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
14795	I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
14796	I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
14797
14798	/*
14799	 * Apparently we need to have VGA mode enabled prior to changing
14800	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
14801	 * dividers, even though the register value does change.
14802	 */
14803	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
14804	I915_WRITE(DPLL(pipe), dpll);
14805
14806	/* Wait for the clocks to stabilize. */
14807	POSTING_READ(DPLL(pipe));
14808	udelay(150);
14809
14810	/* The pixel multiplier can only be updated once the
14811	 * DPLL is enabled and the clocks are stable.
14812	 *
14813	 * So write it again.
14814	 */
14815	I915_WRITE(DPLL(pipe), dpll);
14816
14817	/* We do this three times for luck */
14818	for (i = 0; i < 3 ; i++) {
14819		I915_WRITE(DPLL(pipe), dpll);
14820		POSTING_READ(DPLL(pipe));
14821		udelay(150); /* wait for warmup */
14822	}
14823
14824	I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
14825	POSTING_READ(PIPECONF(pipe));
14826
14827	intel_wait_for_pipe_scanline_moving(crtc);
14828}
14829
14830void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14831{
14832	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14833
14834	DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
14835		      pipe_name(pipe));
14836
14837	WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
14838	WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
14839	WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
14840	WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
14841	WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
14842
14843	I915_WRITE(PIPECONF(pipe), 0);
14844	POSTING_READ(PIPECONF(pipe));
14845
14846	intel_wait_for_pipe_scanline_stopped(crtc);
14847
14848	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
14849	POSTING_READ(DPLL(pipe));
14850}
14851
14852static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
14853				   struct intel_plane *plane)
14854{
14855	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14856	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
14857	u32 val = I915_READ(DSPCNTR(i9xx_plane));
14858
14859	return (val & DISPLAY_PLANE_ENABLE) == 0 ||
14860		(val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
14861}
14862
14863static void
14864intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
14865{
14866	struct intel_crtc *crtc;
14867
14868	if (INTEL_GEN(dev_priv) >= 4)
14869		return;
14870
14871	for_each_intel_crtc(&dev_priv->drm, crtc) {
14872		struct intel_plane *plane =
14873			to_intel_plane(crtc->base.primary);
14874
14875		if (intel_plane_mapping_ok(crtc, plane))
14876			continue;
14877
14878		DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
14879			      plane->base.name);
14880		intel_plane_disable_noatomic(crtc, plane);
14881	}
14882}
14883
14884static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
14885{
14886	struct drm_device *dev = crtc->base.dev;
14887	struct intel_encoder *encoder;
14888
14889	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
14890		return true;
14891
14892	return false;
14893}
14894
14895static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
14896{
14897	struct drm_device *dev = encoder->base.dev;
14898	struct intel_connector *connector;
14899
14900	for_each_connector_on_encoder(dev, &encoder->base, connector)
14901		return connector;
14902
14903	return NULL;
14904}
14905
14906static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
14907			      enum pipe pch_transcoder)
14908{
14909	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
14910		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
14911}
14912
14913static void intel_sanitize_crtc(struct intel_crtc *crtc,
14914				struct drm_modeset_acquire_ctx *ctx)
14915{
14916	struct drm_device *dev = crtc->base.dev;
14917	struct drm_i915_private *dev_priv = to_i915(dev);
14918	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
14919
14920	/* Clear any frame start delays used for debugging left by the BIOS */
14921	if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
14922		i915_reg_t reg = PIPECONF(cpu_transcoder);
14923
14924		I915_WRITE(reg,
14925			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
14926	}
14927
14928	/* restore vblank interrupts to correct state */
14929	drm_crtc_vblank_reset(&crtc->base);
14930	if (crtc->active) {
14931		struct intel_plane *plane;
14932
14933		drm_crtc_vblank_on(&crtc->base);
14934
14935		/* Disable everything but the primary plane */
14936		for_each_intel_plane_on_crtc(dev, crtc, plane) {
14937			const struct intel_plane_state *plane_state =
14938				to_intel_plane_state(plane->base.state);
14939
14940			if (plane_state->base.visible &&
14941			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
14942				intel_plane_disable_noatomic(crtc, plane);
14943		}
14944	}
14945
14946	/* Adjust the state of the output pipe according to whether we
14947	 * have active connectors/encoders. */
14948	if (crtc->active && !intel_crtc_has_encoders(crtc))
14949		intel_crtc_disable_noatomic(&crtc->base, ctx);
14950
14951	if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
14952		/*
14953		 * We start out with underrun reporting disabled to avoid races.
14954		 * For correct bookkeeping mark this on active crtcs.
14955		 *
14956		 * Also on gmch platforms we dont have any hardware bits to
14957		 * disable the underrun reporting. Which means we need to start
14958		 * out with underrun reporting disabled also on inactive pipes,
14959		 * since otherwise we'll complain about the garbage we read when
14960		 * e.g. coming up after runtime pm.
14961		 *
14962		 * No protection against concurrent access is required - at
14963		 * worst a fifo underrun happens which also sets this to false.
14964		 */
14965		crtc->cpu_fifo_underrun_disabled = true;
14966		/*
14967		 * We track the PCH trancoder underrun reporting state
14968		 * within the crtc. With crtc for pipe A housing the underrun
14969		 * reporting state for PCH transcoder A, crtc for pipe B housing
14970		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
14971		 * and marking underrun reporting as disabled for the non-existing
14972		 * PCH transcoders B and C would prevent enabling the south
14973		 * error interrupt (see cpt_can_enable_serr_int()).
14974		 */
14975		if (has_pch_trancoder(dev_priv, crtc->pipe))
14976			crtc->pch_fifo_underrun_disabled = true;
14977	}
14978}
14979
14980static void intel_sanitize_encoder(struct intel_encoder *encoder)
14981{
14982	struct intel_connector *connector;
14983
14984	/* We need to check both for a crtc link (meaning that the
14985	 * encoder is active and trying to read from a pipe) and the
14986	 * pipe itself being active. */
14987	bool has_active_crtc = encoder->base.crtc &&
14988		to_intel_crtc(encoder->base.crtc)->active;
14989
14990	connector = intel_encoder_find_connector(encoder);
14991	if (connector && !has_active_crtc) {
14992		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
14993			      encoder->base.base.id,
14994			      encoder->base.name);
14995
14996		/* Connector is active, but has no active pipe. This is
14997		 * fallout from our resume register restoring. Disable
14998		 * the encoder manually again. */
14999		if (encoder->base.crtc) {
15000			struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15001
15002			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15003				      encoder->base.base.id,
15004				      encoder->base.name);
15005			encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15006			if (encoder->post_disable)
15007				encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15008		}
15009		encoder->base.crtc = NULL;
15010
15011		/* Inconsistent output/port/pipe state happens presumably due to
15012		 * a bug in one of the get_hw_state functions. Or someplace else
15013		 * in our code, like the register restore mess on resume. Clamp
15014		 * things to off as a safer default. */
15015
15016		connector->base.dpms = DRM_MODE_DPMS_OFF;
15017		connector->base.encoder = NULL;
15018	}
15019}
15020
15021void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15022{
15023	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15024
15025	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15026		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15027		i915_disable_vga(dev_priv);
15028	}
15029}
15030
15031void i915_redisable_vga(struct drm_i915_private *dev_priv)
15032{
15033	/* This function can be called both from intel_modeset_setup_hw_state or
15034	 * at a very early point in our resume sequence, where the power well
15035	 * structures are not yet restored. Since this function is at a very
15036	 * paranoid "someone might have enabled VGA while we were not looking"
15037	 * level, just check if the power well is enabled instead of trying to
15038	 * follow the "don't touch the power well if we don't need it" policy
15039	 * the rest of the driver uses. */
15040	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15041		return;
15042
15043	i915_redisable_vga_power_on(dev_priv);
15044
15045	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15046}
15047
15048/* FIXME read out full plane state for all planes */
15049static void readout_plane_state(struct intel_crtc *crtc)
15050{
15051	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15052	struct intel_crtc_state *crtc_state =
15053		to_intel_crtc_state(crtc->base.state);
15054	struct intel_plane *plane;
15055
15056	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15057		struct intel_plane_state *plane_state =
15058			to_intel_plane_state(plane->base.state);
15059		bool visible = plane->get_hw_state(plane);
15060
15061		intel_set_plane_visible(crtc_state, plane_state, visible);
15062	}
15063}
15064
15065static void intel_modeset_readout_hw_state(struct drm_device *dev)
15066{
15067	struct drm_i915_private *dev_priv = to_i915(dev);
15068	enum pipe pipe;
15069	struct intel_crtc *crtc;
15070	struct intel_encoder *encoder;
15071	struct intel_connector *connector;
15072	struct drm_connector_list_iter conn_iter;
15073	int i;
15074
15075	dev_priv->active_crtcs = 0;
15076
15077	for_each_intel_crtc(dev, crtc) {
15078		struct intel_crtc_state *crtc_state =
15079			to_intel_crtc_state(crtc->base.state);
15080
15081		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15082		memset(crtc_state, 0, sizeof(*crtc_state));
15083		crtc_state->base.crtc = &crtc->base;
15084
15085		crtc_state->base.active = crtc_state->base.enable =
15086			dev_priv->display.get_pipe_config(crtc, crtc_state);
15087
15088		crtc->base.enabled = crtc_state->base.enable;
15089		crtc->active = crtc_state->base.active;
15090
15091		if (crtc_state->base.active)
15092			dev_priv->active_crtcs |= 1 << crtc->pipe;
15093
15094		readout_plane_state(crtc);
15095
15096		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15097			      crtc->base.base.id, crtc->base.name,
15098			      enableddisabled(crtc_state->base.active));
15099	}
15100
15101	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15102		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15103
15104		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15105						  &pll->state.hw_state);
15106		pll->state.crtc_mask = 0;
15107		for_each_intel_crtc(dev, crtc) {
15108			struct intel_crtc_state *crtc_state =
15109				to_intel_crtc_state(crtc->base.state);
15110
15111			if (crtc_state->base.active &&
15112			    crtc_state->shared_dpll == pll)
15113				pll->state.crtc_mask |= 1 << crtc->pipe;
15114		}
15115		pll->active_mask = pll->state.crtc_mask;
15116
15117		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15118			      pll->name, pll->state.crtc_mask, pll->on);
15119	}
15120
15121	for_each_intel_encoder(dev, encoder) {
15122		pipe = 0;
15123
15124		if (encoder->get_hw_state(encoder, &pipe)) {
15125			struct intel_crtc_state *crtc_state;
15126
15127			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15128			crtc_state = to_intel_crtc_state(crtc->base.state);
15129
15130			encoder->base.crtc = &crtc->base;
15131			encoder->get_config(encoder, crtc_state);
15132		} else {
15133			encoder->base.crtc = NULL;
15134		}
15135
15136		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15137			      encoder->base.base.id, encoder->base.name,
15138			      enableddisabled(encoder->base.crtc),
15139			      pipe_name(pipe));
15140	}
15141
15142	drm_connector_list_iter_begin(dev, &conn_iter);
15143	for_each_intel_connector_iter(connector, &conn_iter) {
15144		if (connector->get_hw_state(connector)) {
15145			connector->base.dpms = DRM_MODE_DPMS_ON;
15146
15147			encoder = connector->encoder;
15148			connector->base.encoder = &encoder->base;
15149
15150			if (encoder->base.crtc &&
15151			    encoder->base.crtc->state->active) {
15152				/*
15153				 * This has to be done during hardware readout
15154				 * because anything calling .crtc_disable may
15155				 * rely on the connector_mask being accurate.
15156				 */
15157				encoder->base.crtc->state->connector_mask |=
15158					1 << drm_connector_index(&connector->base);
15159				encoder->base.crtc->state->encoder_mask |=
15160					1 << drm_encoder_index(&encoder->base);
15161			}
15162
15163		} else {
15164			connector->base.dpms = DRM_MODE_DPMS_OFF;
15165			connector->base.encoder = NULL;
15166		}
15167		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15168			      connector->base.base.id, connector->base.name,
15169			      enableddisabled(connector->base.encoder));
15170	}
15171	drm_connector_list_iter_end(&conn_iter);
15172
15173	for_each_intel_crtc(dev, crtc) {
15174		struct intel_crtc_state *crtc_state =
15175			to_intel_crtc_state(crtc->base.state);
15176		int min_cdclk = 0;
15177
15178		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15179		if (crtc_state->base.active) {
15180			intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15181			crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15182			crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15183			intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15184			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15185
15186			/*
15187			 * The initial mode needs to be set in order to keep
15188			 * the atomic core happy. It wants a valid mode if the
15189			 * crtc's enabled, so we do the above call.
15190			 *
15191			 * But we don't set all the derived state fully, hence
15192			 * set a flag to indicate that a full recalculation is
15193			 * needed on the next commit.
15194			 */
15195			crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15196
15197			intel_crtc_compute_pixel_rate(crtc_state);
15198
15199			if (dev_priv->display.modeset_calc_cdclk) {
15200				min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15201				if (WARN_ON(min_cdclk < 0))
15202					min_cdclk = 0;
15203			}
15204
15205			drm_calc_timestamping_constants(&crtc->base,
15206							&crtc_state->base.adjusted_mode);
15207			update_scanline_offset(crtc);
15208		}
15209
15210		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15211		dev_priv->min_voltage_level[crtc->pipe] =
15212			crtc_state->min_voltage_level;
15213
15214		intel_pipe_config_sanity_check(dev_priv, crtc_state);
15215	}
15216}
15217
15218static void
15219get_encoder_power_domains(struct drm_i915_private *dev_priv)
15220{
15221	struct intel_encoder *encoder;
15222
15223	for_each_intel_encoder(&dev_priv->drm, encoder) {
15224		u64 get_domains;
15225		enum intel_display_power_domain domain;
15226
15227		if (!encoder->get_power_domains)
15228			continue;
15229
15230		get_domains = encoder->get_power_domains(encoder);
15231		for_each_power_domain(domain, get_domains)
15232			intel_display_power_get(dev_priv, domain);
15233	}
15234}
15235
15236static void intel_early_display_was(struct drm_i915_private *dev_priv)
15237{
15238	/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15239	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15240		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15241			   DARBF_GATING_DIS);
15242
15243	if (IS_HASWELL(dev_priv)) {
15244		/*
15245		 * WaRsPkgCStateDisplayPMReq:hsw
15246		 * System hang if this isn't done before disabling all planes!
15247		 */
15248		I915_WRITE(CHICKEN_PAR1_1,
15249			   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15250	}
15251}
15252
15253/* Scan out the current hw modeset state,
15254 * and sanitizes it to the current state
15255 */
15256static void
15257intel_modeset_setup_hw_state(struct drm_device *dev,
15258			     struct drm_modeset_acquire_ctx *ctx)
15259{
15260	struct drm_i915_private *dev_priv = to_i915(dev);
15261	enum pipe pipe;
15262	struct intel_crtc *crtc;
15263	struct intel_encoder *encoder;
15264	int i;
15265
15266	intel_early_display_was(dev_priv);
15267	intel_modeset_readout_hw_state(dev);
15268
15269	/* HW state is read out, now we need to sanitize this mess. */
15270	get_encoder_power_domains(dev_priv);
15271
15272	intel_sanitize_plane_mapping(dev_priv);
15273
15274	for_each_intel_encoder(dev, encoder) {
15275		intel_sanitize_encoder(encoder);
15276	}
15277
15278	for_each_pipe(dev_priv, pipe) {
15279		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15280
15281		intel_sanitize_crtc(crtc, ctx);
15282		intel_dump_pipe_config(crtc, crtc->config,
15283				       "[setup_hw_state]");
15284	}
15285
15286	intel_modeset_update_connector_atomic_state(dev);
15287
15288	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15289		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15290
15291		if (!pll->on || pll->active_mask)
15292			continue;
15293
15294		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15295
15296		pll->funcs.disable(dev_priv, pll);
15297		pll->on = false;
15298	}
15299
15300	if (IS_G4X(dev_priv)) {
15301		g4x_wm_get_hw_state(dev);
15302		g4x_wm_sanitize(dev_priv);
15303	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15304		vlv_wm_get_hw_state(dev);
15305		vlv_wm_sanitize(dev_priv);
15306	} else if (INTEL_GEN(dev_priv) >= 9) {
15307		skl_wm_get_hw_state(dev);
15308	} else if (HAS_PCH_SPLIT(dev_priv)) {
15309		ilk_wm_get_hw_state(dev);
15310	}
15311
15312	for_each_intel_crtc(dev, crtc) {
15313		u64 put_domains;
15314
15315		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15316		if (WARN_ON(put_domains))
15317			modeset_put_power_domains(dev_priv, put_domains);
15318	}
15319	intel_display_set_init_power(dev_priv, false);
15320
15321	intel_power_domains_verify_state(dev_priv);
15322
15323	intel_fbc_init_pipe_state(dev_priv);
15324}
15325
15326void intel_display_resume(struct drm_device *dev)
15327{
15328	struct drm_i915_private *dev_priv = to_i915(dev);
15329	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15330	struct drm_modeset_acquire_ctx ctx;
15331	int ret;
15332
15333	dev_priv->modeset_restore_state = NULL;
15334	if (state)
15335		state->acquire_ctx = &ctx;
15336
15337	drm_modeset_acquire_init(&ctx, 0);
15338
15339	while (1) {
15340		ret = drm_modeset_lock_all_ctx(dev, &ctx);
15341		if (ret != -EDEADLK)
15342			break;
15343
15344		drm_modeset_backoff(&ctx);
15345	}
15346
15347	if (!ret)
15348		ret = __intel_display_resume(dev, state, &ctx);
15349
15350	intel_enable_ipc(dev_priv);
15351	drm_modeset_drop_locks(&ctx);
15352	drm_modeset_acquire_fini(&ctx);
15353
15354	if (ret)
15355		DRM_ERROR("Restoring old state failed with %i\n", ret);
15356	if (state)
15357		drm_atomic_state_put(state);
15358}
15359
15360int intel_connector_register(struct drm_connector *connector)
15361{
15362	struct intel_connector *intel_connector = to_intel_connector(connector);
15363	int ret;
15364
15365	ret = intel_backlight_device_register(intel_connector);
15366	if (ret)
15367		goto err;
15368
15369	return 0;
15370
15371err:
15372	return ret;
15373}
15374
15375void intel_connector_unregister(struct drm_connector *connector)
15376{
15377	struct intel_connector *intel_connector = to_intel_connector(connector);
15378
15379	intel_backlight_device_unregister(intel_connector);
15380	intel_panel_destroy_backlight(connector);
15381}
15382
15383static void intel_hpd_poll_fini(struct drm_device *dev)
15384{
15385	struct intel_connector *connector;
15386	struct drm_connector_list_iter conn_iter;
15387
15388	/* Kill all the work that may have been queued by hpd. */
15389	drm_connector_list_iter_begin(dev, &conn_iter);
15390	for_each_intel_connector_iter(connector, &conn_iter) {
15391		if (connector->modeset_retry_work.func)
15392			cancel_work_sync(&connector->modeset_retry_work);
15393		if (connector->hdcp_shim) {
15394			cancel_delayed_work_sync(&connector->hdcp_check_work);
15395			cancel_work_sync(&connector->hdcp_prop_work);
15396		}
15397	}
15398	drm_connector_list_iter_end(&conn_iter);
15399}
15400
15401void intel_modeset_cleanup(struct drm_device *dev)
15402{
15403	struct drm_i915_private *dev_priv = to_i915(dev);
15404
15405	flush_work(&dev_priv->atomic_helper.free_work);
15406	WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
15407
15408	intel_disable_gt_powersave(dev_priv);
15409
15410	/*
15411	 * Interrupts and polling as the first thing to avoid creating havoc.
15412	 * Too much stuff here (turning of connectors, ...) would
15413	 * experience fancy races otherwise.
15414	 */
15415	intel_irq_uninstall(dev_priv);
15416
15417	/*
15418	 * Due to the hpd irq storm handling the hotplug work can re-arm the
15419	 * poll handlers. Hence disable polling after hpd handling is shut down.
15420	 */
15421	intel_hpd_poll_fini(dev);
15422
15423	/* poll work can call into fbdev, hence clean that up afterwards */
15424	intel_fbdev_fini(dev_priv);
15425
15426	intel_unregister_dsm_handler();
15427
15428	intel_fbc_global_disable(dev_priv);
15429
15430	/* flush any delayed tasks or pending work */
15431	flush_scheduled_work();
15432
15433	drm_mode_config_cleanup(dev);
15434
15435	intel_cleanup_overlay(dev_priv);
15436
15437	intel_cleanup_gt_powersave(dev_priv);
15438
15439	intel_teardown_gmbus(dev_priv);
15440
15441	destroy_workqueue(dev_priv->modeset_wq);
15442}
15443
15444void intel_connector_attach_encoder(struct intel_connector *connector,
15445				    struct intel_encoder *encoder)
15446{
15447	connector->encoder = encoder;
15448	drm_mode_connector_attach_encoder(&connector->base,
15449					  &encoder->base);
15450}
15451
15452/*
15453 * set vga decode state - true == enable VGA decode
15454 */
15455int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
15456{
15457	unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15458	u16 gmch_ctrl;
15459
15460	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15461		DRM_ERROR("failed to read control word\n");
15462		return -EIO;
15463	}
15464
15465	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15466		return 0;
15467
15468	if (state)
15469		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15470	else
15471		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
15472
15473	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15474		DRM_ERROR("failed to write control word\n");
15475		return -EIO;
15476	}
15477
15478	return 0;
15479}
15480
15481#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
15482
15483struct intel_display_error_state {
15484
15485	u32 power_well_driver;
15486
15487	int num_transcoders;
15488
15489	struct intel_cursor_error_state {
15490		u32 control;
15491		u32 position;
15492		u32 base;
15493		u32 size;
15494	} cursor[I915_MAX_PIPES];
15495
15496	struct intel_pipe_error_state {
15497		bool power_domain_on;
15498		u32 source;
15499		u32 stat;
15500	} pipe[I915_MAX_PIPES];
15501
15502	struct intel_plane_error_state {
15503		u32 control;
15504		u32 stride;
15505		u32 size;
15506		u32 pos;
15507		u32 addr;
15508		u32 surface;
15509		u32 tile_offset;
15510	} plane[I915_MAX_PIPES];
15511
15512	struct intel_transcoder_error_state {
15513		bool power_domain_on;
15514		enum transcoder cpu_transcoder;
15515
15516		u32 conf;
15517
15518		u32 htotal;
15519		u32 hblank;
15520		u32 hsync;
15521		u32 vtotal;
15522		u32 vblank;
15523		u32 vsync;
15524	} transcoder[4];
15525};
15526
15527struct intel_display_error_state *
15528intel_display_capture_error_state(struct drm_i915_private *dev_priv)
15529{
15530	struct intel_display_error_state *error;
15531	int transcoders[] = {
15532		TRANSCODER_A,
15533		TRANSCODER_B,
15534		TRANSCODER_C,
15535		TRANSCODER_EDP,
15536	};
15537	int i;
15538
15539	if (INTEL_INFO(dev_priv)->num_pipes == 0)
15540		return NULL;
15541
15542	error = kzalloc(sizeof(*error), GFP_ATOMIC);
15543	if (error == NULL)
15544		return NULL;
15545
15546	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15547		error->power_well_driver =
15548			I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
15549
15550	for_each_pipe(dev_priv, i) {
15551		error->pipe[i].power_domain_on =
15552			__intel_display_power_is_enabled(dev_priv,
15553							 POWER_DOMAIN_PIPE(i));
15554		if (!error->pipe[i].power_domain_on)
15555			continue;
15556
15557		error->cursor[i].control = I915_READ(CURCNTR(i));
15558		error->cursor[i].position = I915_READ(CURPOS(i));
15559		error->cursor[i].base = I915_READ(CURBASE(i));
15560
15561		error->plane[i].control = I915_READ(DSPCNTR(i));
15562		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
15563		if (INTEL_GEN(dev_priv) <= 3) {
15564			error->plane[i].size = I915_READ(DSPSIZE(i));
15565			error->plane[i].pos = I915_READ(DSPPOS(i));
15566		}
15567		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15568			error->plane[i].addr = I915_READ(DSPADDR(i));
15569		if (INTEL_GEN(dev_priv) >= 4) {
15570			error->plane[i].surface = I915_READ(DSPSURF(i));
15571			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15572		}
15573
15574		error->pipe[i].source = I915_READ(PIPESRC(i));
15575
15576		if (HAS_GMCH_DISPLAY(dev_priv))
15577			error->pipe[i].stat = I915_READ(PIPESTAT(i));
15578	}
15579
15580	/* Note: this does not include DSI transcoders. */
15581	error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
15582	if (HAS_DDI(dev_priv))
15583		error->num_transcoders++; /* Account for eDP. */
15584
15585	for (i = 0; i < error->num_transcoders; i++) {
15586		enum transcoder cpu_transcoder = transcoders[i];
15587
15588		error->transcoder[i].power_domain_on =
15589			__intel_display_power_is_enabled(dev_priv,
15590				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15591		if (!error->transcoder[i].power_domain_on)
15592			continue;
15593
15594		error->transcoder[i].cpu_transcoder = cpu_transcoder;
15595
15596		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15597		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15598		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15599		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15600		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15601		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15602		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15603	}
15604
15605	return error;
15606}
15607
15608#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15609
15610void
15611intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15612				struct intel_display_error_state *error)
15613{
15614	struct drm_i915_private *dev_priv = m->i915;
15615	int i;
15616
15617	if (!error)
15618		return;
15619
15620	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
15621	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15622		err_printf(m, "PWR_WELL_CTL2: %08x\n",
15623			   error->power_well_driver);
15624	for_each_pipe(dev_priv, i) {
15625		err_printf(m, "Pipe [%d]:\n", i);
15626		err_printf(m, "  Power: %s\n",
15627			   onoff(error->pipe[i].power_domain_on));
15628		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
15629		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
15630
15631		err_printf(m, "Plane [%d]:\n", i);
15632		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
15633		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
15634		if (INTEL_GEN(dev_priv) <= 3) {
15635			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
15636			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
15637		}
15638		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15639			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
15640		if (INTEL_GEN(dev_priv) >= 4) {
15641			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
15642			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
15643		}
15644
15645		err_printf(m, "Cursor [%d]:\n", i);
15646		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
15647		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
15648		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
15649	}
15650
15651	for (i = 0; i < error->num_transcoders; i++) {
15652		err_printf(m, "CPU transcoder: %s\n",
15653			   transcoder_name(error->transcoder[i].cpu_transcoder));
15654		err_printf(m, "  Power: %s\n",
15655			   onoff(error->transcoder[i].power_domain_on));
15656		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
15657		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
15658		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
15659		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
15660		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
15661		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
15662		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
15663	}
15664}
15665
15666#endif