Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright © 2006-2007 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *	Eric Anholt <eric@anholt.net>
  25 */
  26
  27#include <acpi/video.h>
  28#include <linux/i2c.h>
  29#include <linux/input.h>
  30#include <linux/kernel.h>
  31#include <linux/module.h>
  32#include <linux/dma-resv.h>
  33#include <linux/slab.h>
  34#include <linux/string_helpers.h>
  35#include <linux/vga_switcheroo.h>
  36
  37#include <drm/display/drm_dp_helper.h>
  38#include <drm/drm_atomic.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_atomic_uapi.h>
  41#include <drm/drm_damage_helper.h>
  42#include <drm/drm_edid.h>
  43#include <drm/drm_fourcc.h>
  44#include <drm/drm_privacy_screen_consumer.h>
  45#include <drm/drm_probe_helper.h>
  46#include <drm/drm_rect.h>
  47
  48#include "display/intel_audio.h"
  49#include "display/intel_crt.h"
  50#include "display/intel_ddi.h"
  51#include "display/intel_display_debugfs.h"
  52#include "display/intel_display_power.h"
  53#include "display/intel_dp.h"
  54#include "display/intel_dp_mst.h"
  55#include "display/intel_dpll.h"
  56#include "display/intel_dpll_mgr.h"
  57#include "display/intel_drrs.h"
  58#include "display/intel_dsi.h"
  59#include "display/intel_dvo.h"
  60#include "display/intel_fb.h"
  61#include "display/intel_gmbus.h"
  62#include "display/intel_hdmi.h"
  63#include "display/intel_lvds.h"
  64#include "display/intel_sdvo.h"
  65#include "display/intel_snps_phy.h"
  66#include "display/intel_tv.h"
  67#include "display/intel_vdsc.h"
  68#include "display/intel_vrr.h"
  69
  70#include "gem/i915_gem_lmem.h"
  71#include "gem/i915_gem_object.h"
  72
  73#include "gt/gen8_ppgtt.h"
  74
  75#include "g4x_dp.h"
  76#include "g4x_hdmi.h"
  77#include "hsw_ips.h"
  78#include "i915_drv.h"
  79#include "i915_reg.h"
  80#include "i915_utils.h"
  81#include "icl_dsi.h"
  82#include "intel_acpi.h"
  83#include "intel_atomic.h"
  84#include "intel_atomic_plane.h"
  85#include "intel_bw.h"
  86#include "intel_cdclk.h"
  87#include "intel_color.h"
  88#include "intel_crtc.h"
  89#include "intel_crtc_state_dump.h"
  90#include "intel_de.h"
  91#include "intel_display_types.h"
  92#include "intel_dmc.h"
  93#include "intel_dp_link_training.h"
  94#include "intel_dpio_phy.h"
  95#include "intel_dpt.h"
  96#include "intel_dsb.h"
  97#include "intel_fbc.h"
  98#include "intel_fbdev.h"
  99#include "intel_fdi.h"
 100#include "intel_fifo_underrun.h"
 101#include "intel_frontbuffer.h"
 102#include "intel_hdcp.h"
 103#include "intel_hotplug.h"
 104#include "intel_hti.h"
 105#include "intel_modeset_verify.h"
 106#include "intel_modeset_setup.h"
 107#include "intel_overlay.h"
 108#include "intel_panel.h"
 109#include "intel_pch_display.h"
 110#include "intel_pch_refclk.h"
 111#include "intel_pcode.h"
 112#include "intel_pipe_crc.h"
 113#include "intel_plane_initial.h"
 114#include "intel_pm.h"
 115#include "intel_pps.h"
 116#include "intel_psr.h"
 117#include "intel_quirks.h"
 118#include "intel_sprite.h"
 119#include "intel_tc.h"
 120#include "intel_vga.h"
 121#include "i9xx_plane.h"
 122#include "skl_scaler.h"
 123#include "skl_universal_plane.h"
 124#include "skl_watermark.h"
 125#include "vlv_dsi.h"
 126#include "vlv_dsi_pll.h"
 127#include "vlv_dsi_regs.h"
 128#include "vlv_sideband.h"
 129
 130static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
 131static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
 132static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
 133static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
 134static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
 135
 136/**
 137 * intel_update_watermarks - update FIFO watermark values based on current modes
 138 * @dev_priv: i915 device
 139 *
 140 * Calculate watermark values for the various WM regs based on current mode
 141 * and plane configuration.
 142 *
 143 * There are several cases to deal with here:
 144 *   - normal (i.e. non-self-refresh)
 145 *   - self-refresh (SR) mode
 146 *   - lines are large relative to FIFO size (buffer can hold up to 2)
 147 *   - lines are small relative to FIFO size (buffer can hold more than 2
 148 *     lines), so need to account for TLB latency
 149 *
 150 *   The normal calculation is:
 151 *     watermark = dotclock * bytes per pixel * latency
 152 *   where latency is platform & configuration dependent (we assume pessimal
 153 *   values here).
 154 *
 155 *   The SR calculation is:
 156 *     watermark = (trunc(latency/line time)+1) * surface width *
 157 *       bytes per pixel
 158 *   where
 159 *     line time = htotal / dotclock
 160 *     surface width = hdisplay for normal plane and 64 for cursor
 161 *   and latency is assumed to be high, as above.
 162 *
 163 * The final value programmed to the register should always be rounded up,
 164 * and include an extra 2 entries to account for clock crossings.
 165 *
 166 * We don't use the sprite, so we can ignore that.  And on Crestline we have
 167 * to set the non-SR watermarks to 8.
 168 */
 169void intel_update_watermarks(struct drm_i915_private *dev_priv)
 170{
 171	if (dev_priv->display.funcs.wm->update_wm)
 172		dev_priv->display.funcs.wm->update_wm(dev_priv);
 173}
 174
 175static int intel_compute_pipe_wm(struct intel_atomic_state *state,
 176				 struct intel_crtc *crtc)
 177{
 178	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 179	if (dev_priv->display.funcs.wm->compute_pipe_wm)
 180		return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
 181	return 0;
 182}
 183
 184static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
 185					 struct intel_crtc *crtc)
 186{
 187	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 188	if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
 189		return 0;
 190	if (drm_WARN_ON(&dev_priv->drm,
 191			!dev_priv->display.funcs.wm->compute_pipe_wm))
 192		return 0;
 193	return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
 194}
 195
 196static bool intel_initial_watermarks(struct intel_atomic_state *state,
 197				     struct intel_crtc *crtc)
 198{
 199	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 200	if (dev_priv->display.funcs.wm->initial_watermarks) {
 201		dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
 202		return true;
 203	}
 204	return false;
 205}
 206
 207static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
 208					   struct intel_crtc *crtc)
 209{
 210	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 211	if (dev_priv->display.funcs.wm->atomic_update_watermarks)
 212		dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
 213}
 214
 215static void intel_optimize_watermarks(struct intel_atomic_state *state,
 216				      struct intel_crtc *crtc)
 217{
 218	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 219	if (dev_priv->display.funcs.wm->optimize_watermarks)
 220		dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
 221}
 222
 223static int intel_compute_global_watermarks(struct intel_atomic_state *state)
 224{
 225	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 226	if (dev_priv->display.funcs.wm->compute_global_watermarks)
 227		return dev_priv->display.funcs.wm->compute_global_watermarks(state);
 228	return 0;
 229}
 230
 231/* returns HPLL frequency in kHz */
 232int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
 233{
 234	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 235
 236	/* Obtain SKU information */
 237	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
 238		CCK_FUSE_HPLL_FREQ_MASK;
 239
 240	return vco_freq[hpll_freq] * 1000;
 241}
 242
 243int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 244		      const char *name, u32 reg, int ref_freq)
 245{
 246	u32 val;
 247	int divider;
 248
 249	val = vlv_cck_read(dev_priv, reg);
 250	divider = val & CCK_FREQUENCY_VALUES;
 251
 252	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
 253		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
 254		 "%s change in progress\n", name);
 255
 256	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
 257}
 258
 259int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
 260			   const char *name, u32 reg)
 261{
 262	int hpll;
 263
 264	vlv_cck_get(dev_priv);
 265
 266	if (dev_priv->hpll_freq == 0)
 267		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
 268
 269	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
 270
 271	vlv_cck_put(dev_priv);
 272
 273	return hpll;
 274}
 275
 276static void intel_update_czclk(struct drm_i915_private *dev_priv)
 277{
 278	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
 279		return;
 280
 281	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
 282						      CCK_CZ_CLOCK_CONTROL);
 283
 284	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
 285		dev_priv->czclk_freq);
 286}
 287
 288static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
 289{
 290	return (crtc_state->active_planes &
 291		~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
 292}
 293
 294/* WA Display #0827: Gen9:all */
 295static void
 296skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
 297{
 298	if (enable)
 299		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 300		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
 301	else
 302		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 303		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
 304}
 305
 306/* Wa_2006604312:icl,ehl */
 307static void
 308icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 309		       bool enable)
 310{
 311	if (enable)
 312		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 313		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
 314	else
 315		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 316		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
 317}
 318
 319/* Wa_1604331009:icl,jsl,ehl */
 320static void
 321icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 322		       bool enable)
 323{
 324	intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
 325		     enable ? CURSOR_GATING_DIS : 0);
 326}
 327
 328static bool
 329is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
 330{
 331	return crtc_state->master_transcoder != INVALID_TRANSCODER;
 332}
 333
 334static bool
 335is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
 336{
 337	return crtc_state->sync_mode_slaves_mask != 0;
 338}
 339
 340bool
 341is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
 342{
 343	return is_trans_port_sync_master(crtc_state) ||
 344		is_trans_port_sync_slave(crtc_state);
 345}
 346
 347static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
 348{
 349	return ffs(crtc_state->bigjoiner_pipes) - 1;
 350}
 351
 352u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
 353{
 354	if (crtc_state->bigjoiner_pipes)
 355		return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
 356	else
 357		return 0;
 358}
 359
 360bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
 361{
 362	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 363
 364	return crtc_state->bigjoiner_pipes &&
 365		crtc->pipe != bigjoiner_master_pipe(crtc_state);
 366}
 367
 368bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
 369{
 370	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 371
 372	return crtc_state->bigjoiner_pipes &&
 373		crtc->pipe == bigjoiner_master_pipe(crtc_state);
 374}
 375
 376static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
 377{
 378	return hweight8(crtc_state->bigjoiner_pipes);
 379}
 380
 381struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
 382{
 383	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 384
 385	if (intel_crtc_is_bigjoiner_slave(crtc_state))
 386		return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
 387	else
 388		return to_intel_crtc(crtc_state->uapi.crtc);
 389}
 390
 391static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
 392				    enum pipe pipe)
 393{
 394	i915_reg_t reg = PIPEDSL(pipe);
 395	u32 line1, line2;
 396
 397	line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
 398	msleep(5);
 399	line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
 400
 401	return line1 != line2;
 402}
 403
 404static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
 405{
 406	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 407	enum pipe pipe = crtc->pipe;
 408
 409	/* Wait for the display line to settle/start moving */
 410	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
 411		drm_err(&dev_priv->drm,
 412			"pipe %c scanline %s wait timed out\n",
 413			pipe_name(pipe), str_on_off(state));
 414}
 415
 416static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
 417{
 418	wait_for_pipe_scanline_moving(crtc, false);
 419}
 420
 421static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
 422{
 423	wait_for_pipe_scanline_moving(crtc, true);
 424}
 425
 426static void
 427intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
 428{
 429	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 430	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 431
 432	if (DISPLAY_VER(dev_priv) >= 4) {
 433		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 434
 435		/* Wait for the Pipe State to go off */
 436		if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
 437					    PIPECONF_STATE_ENABLE, 100))
 438			drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
 439	} else {
 440		intel_wait_for_pipe_scanline_stopped(crtc);
 441	}
 442}
 443
 444void assert_transcoder(struct drm_i915_private *dev_priv,
 445		       enum transcoder cpu_transcoder, bool state)
 446{
 447	bool cur_state;
 448	enum intel_display_power_domain power_domain;
 449	intel_wakeref_t wakeref;
 450
 451	/* we keep both pipes enabled on 830 */
 452	if (IS_I830(dev_priv))
 453		state = true;
 454
 455	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
 456	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
 457	if (wakeref) {
 458		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
 459		cur_state = !!(val & PIPECONF_ENABLE);
 460
 461		intel_display_power_put(dev_priv, power_domain, wakeref);
 462	} else {
 463		cur_state = false;
 464	}
 465
 466	I915_STATE_WARN(cur_state != state,
 467			"transcoder %s assertion failure (expected %s, current %s)\n",
 468			transcoder_name(cpu_transcoder),
 469			str_on_off(state), str_on_off(cur_state));
 470}
 471
 472static void assert_plane(struct intel_plane *plane, bool state)
 473{
 474	enum pipe pipe;
 475	bool cur_state;
 476
 477	cur_state = plane->get_hw_state(plane, &pipe);
 478
 479	I915_STATE_WARN(cur_state != state,
 480			"%s assertion failure (expected %s, current %s)\n",
 481			plane->base.name, str_on_off(state),
 482			str_on_off(cur_state));
 483}
 484
 485#define assert_plane_enabled(p) assert_plane(p, true)
 486#define assert_plane_disabled(p) assert_plane(p, false)
 487
 488static void assert_planes_disabled(struct intel_crtc *crtc)
 489{
 490	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 491	struct intel_plane *plane;
 492
 493	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
 494		assert_plane_disabled(plane);
 495}
 496
 497void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 498			 struct intel_digital_port *dig_port,
 499			 unsigned int expected_mask)
 500{
 501	u32 port_mask;
 502	i915_reg_t dpll_reg;
 503
 504	switch (dig_port->base.port) {
 505	default:
 506		MISSING_CASE(dig_port->base.port);
 507		fallthrough;
 508	case PORT_B:
 509		port_mask = DPLL_PORTB_READY_MASK;
 510		dpll_reg = DPLL(0);
 511		break;
 512	case PORT_C:
 513		port_mask = DPLL_PORTC_READY_MASK;
 514		dpll_reg = DPLL(0);
 515		expected_mask <<= 4;
 516		break;
 517	case PORT_D:
 518		port_mask = DPLL_PORTD_READY_MASK;
 519		dpll_reg = DPIO_PHY_STATUS;
 520		break;
 521	}
 522
 523	if (intel_de_wait_for_register(dev_priv, dpll_reg,
 524				       port_mask, expected_mask, 1000))
 525		drm_WARN(&dev_priv->drm, 1,
 526			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
 527			 dig_port->base.base.base.id, dig_port->base.base.name,
 528			 intel_de_read(dev_priv, dpll_reg) & port_mask,
 529			 expected_mask);
 530}
 531
 532void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
 533{
 534	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 535	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 536	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 537	enum pipe pipe = crtc->pipe;
 538	i915_reg_t reg;
 539	u32 val;
 540
 541	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
 542
 543	assert_planes_disabled(crtc);
 544
 545	/*
 546	 * A pipe without a PLL won't actually be able to drive bits from
 547	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
 548	 * need the check.
 549	 */
 550	if (HAS_GMCH(dev_priv)) {
 551		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
 552			assert_dsi_pll_enabled(dev_priv);
 553		else
 554			assert_pll_enabled(dev_priv, pipe);
 555	} else {
 556		if (new_crtc_state->has_pch_encoder) {
 557			/* if driving the PCH, we need FDI enabled */
 558			assert_fdi_rx_pll_enabled(dev_priv,
 559						  intel_crtc_pch_transcoder(crtc));
 560			assert_fdi_tx_pll_enabled(dev_priv,
 561						  (enum pipe) cpu_transcoder);
 562		}
 563		/* FIXME: assert CPU port conditions for SNB+ */
 564	}
 565
 566	/* Wa_22012358565:adl-p */
 567	if (DISPLAY_VER(dev_priv) == 13)
 568		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
 569			     0, PIPE_ARB_USE_PROG_SLOTS);
 570
 571	reg = PIPECONF(cpu_transcoder);
 572	val = intel_de_read(dev_priv, reg);
 573	if (val & PIPECONF_ENABLE) {
 574		/* we keep both pipes enabled on 830 */
 575		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
 576		return;
 577	}
 578
 579	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
 580	intel_de_posting_read(dev_priv, reg);
 581
 582	/*
 583	 * Until the pipe starts PIPEDSL reads will return a stale value,
 584	 * which causes an apparent vblank timestamp jump when PIPEDSL
 585	 * resets to its proper value. That also messes up the frame count
 586	 * when it's derived from the timestamps. So let's wait for the
 587	 * pipe to start properly before we call drm_crtc_vblank_on()
 588	 */
 589	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
 590		intel_wait_for_pipe_scanline_moving(crtc);
 591}
 592
 593void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
 594{
 595	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 596	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 597	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 598	enum pipe pipe = crtc->pipe;
 599	i915_reg_t reg;
 600	u32 val;
 601
 602	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
 603
 604	/*
 605	 * Make sure planes won't keep trying to pump pixels to us,
 606	 * or we might hang the display.
 607	 */
 608	assert_planes_disabled(crtc);
 609
 610	reg = PIPECONF(cpu_transcoder);
 611	val = intel_de_read(dev_priv, reg);
 612	if ((val & PIPECONF_ENABLE) == 0)
 613		return;
 614
 615	/*
 616	 * Double wide has implications for planes
 617	 * so best keep it disabled when not needed.
 618	 */
 619	if (old_crtc_state->double_wide)
 620		val &= ~PIPECONF_DOUBLE_WIDE;
 621
 622	/* Don't disable pipe or pipe PLLs if needed */
 623	if (!IS_I830(dev_priv))
 624		val &= ~PIPECONF_ENABLE;
 625
 626	if (DISPLAY_VER(dev_priv) >= 14)
 627		intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
 628			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
 629	else if (DISPLAY_VER(dev_priv) >= 12)
 630		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
 631			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
 632
 633	intel_de_write(dev_priv, reg, val);
 634	if ((val & PIPECONF_ENABLE) == 0)
 635		intel_wait_for_pipe_off(old_crtc_state);
 636}
 637
 638unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
 639{
 640	unsigned int size = 0;
 641	int i;
 642
 643	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
 644		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
 645
 646	return size;
 647}
 648
 649unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
 650{
 651	unsigned int size = 0;
 652	int i;
 653
 654	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
 655		unsigned int plane_size;
 656
 657		if (rem_info->plane[i].linear)
 658			plane_size = rem_info->plane[i].size;
 659		else
 660			plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
 661
 662		if (plane_size == 0)
 663			continue;
 664
 665		if (rem_info->plane_alignment)
 666			size = ALIGN(size, rem_info->plane_alignment);
 667
 668		size += plane_size;
 669	}
 670
 671	return size;
 672}
 673
 674bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
 675{
 676	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 677	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 678
 679	return DISPLAY_VER(dev_priv) < 4 ||
 680		(plane->fbc &&
 681		 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
 682}
 683
 684/*
 685 * Convert the x/y offsets into a linear offset.
 686 * Only valid with 0/180 degree rotation, which is fine since linear
 687 * offset is only used with linear buffers on pre-hsw and tiled buffers
 688 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
 689 */
 690u32 intel_fb_xy_to_linear(int x, int y,
 691			  const struct intel_plane_state *state,
 692			  int color_plane)
 693{
 694	const struct drm_framebuffer *fb = state->hw.fb;
 695	unsigned int cpp = fb->format->cpp[color_plane];
 696	unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
 697
 698	return y * pitch + x * cpp;
 699}
 700
 701/*
 702 * Add the x/y offsets derived from fb->offsets[] to the user
 703 * specified plane src x/y offsets. The resulting x/y offsets
 704 * specify the start of scanout from the beginning of the gtt mapping.
 705 */
 706void intel_add_fb_offsets(int *x, int *y,
 707			  const struct intel_plane_state *state,
 708			  int color_plane)
 709
 710{
 711	*x += state->view.color_plane[color_plane].x;
 712	*y += state->view.color_plane[color_plane].y;
 713}
 714
 715u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
 716			      u32 pixel_format, u64 modifier)
 717{
 718	struct intel_crtc *crtc;
 719	struct intel_plane *plane;
 720
 721	if (!HAS_DISPLAY(dev_priv))
 722		return 0;
 723
 724	/*
 725	 * We assume the primary plane for pipe A has
 726	 * the highest stride limits of them all,
 727	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
 728	 */
 729	crtc = intel_first_crtc(dev_priv);
 730	if (!crtc)
 731		return 0;
 732
 733	plane = to_intel_plane(crtc->base.primary);
 734
 735	return plane->max_stride(plane, pixel_format, modifier,
 736				 DRM_MODE_ROTATE_0);
 737}
 738
 739void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 740			     struct intel_plane_state *plane_state,
 741			     bool visible)
 742{
 743	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 744
 745	plane_state->uapi.visible = visible;
 746
 747	if (visible)
 748		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
 749	else
 750		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
 751}
 752
 753void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
 754{
 755	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 756	struct drm_plane *plane;
 757
 758	/*
 759	 * Active_planes aliases if multiple "primary" or cursor planes
 760	 * have been used on the same (or wrong) pipe. plane_mask uses
 761	 * unique ids, hence we can use that to reconstruct active_planes.
 762	 */
 763	crtc_state->enabled_planes = 0;
 764	crtc_state->active_planes = 0;
 765
 766	drm_for_each_plane_mask(plane, &dev_priv->drm,
 767				crtc_state->uapi.plane_mask) {
 768		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
 769		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
 770	}
 771}
 772
 773void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 774				  struct intel_plane *plane)
 775{
 776	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 777	struct intel_crtc_state *crtc_state =
 778		to_intel_crtc_state(crtc->base.state);
 779	struct intel_plane_state *plane_state =
 780		to_intel_plane_state(plane->base.state);
 781
 782	drm_dbg_kms(&dev_priv->drm,
 783		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
 784		    plane->base.base.id, plane->base.name,
 785		    crtc->base.base.id, crtc->base.name);
 786
 787	intel_set_plane_visible(crtc_state, plane_state, false);
 788	intel_plane_fixup_bitmasks(crtc_state);
 789	crtc_state->data_rate[plane->id] = 0;
 790	crtc_state->data_rate_y[plane->id] = 0;
 791	crtc_state->rel_data_rate[plane->id] = 0;
 792	crtc_state->rel_data_rate_y[plane->id] = 0;
 793	crtc_state->min_cdclk[plane->id] = 0;
 794
 795	if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
 796	    hsw_ips_disable(crtc_state)) {
 797		crtc_state->ips_enabled = false;
 798		intel_crtc_wait_for_next_vblank(crtc);
 799	}
 800
 801	/*
 802	 * Vblank time updates from the shadow to live plane control register
 803	 * are blocked if the memory self-refresh mode is active at that
 804	 * moment. So to make sure the plane gets truly disabled, disable
 805	 * first the self-refresh mode. The self-refresh enable bit in turn
 806	 * will be checked/applied by the HW only at the next frame start
 807	 * event which is after the vblank start event, so we need to have a
 808	 * wait-for-vblank between disabling the plane and the pipe.
 809	 */
 810	if (HAS_GMCH(dev_priv) &&
 811	    intel_set_memory_cxsr(dev_priv, false))
 812		intel_crtc_wait_for_next_vblank(crtc);
 813
 814	/*
 815	 * Gen2 reports pipe underruns whenever all planes are disabled.
 816	 * So disable underrun reporting before all the planes get disabled.
 817	 */
 818	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
 819		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 820
 821	intel_plane_disable_arm(plane, crtc_state);
 822	intel_crtc_wait_for_next_vblank(crtc);
 823}
 824
 825unsigned int
 826intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
 827{
 828	int x = 0, y = 0;
 829
 830	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
 831					  plane_state->view.color_plane[0].offset, 0);
 832
 833	return y;
 834}
 835
 836static int
 837intel_display_commit_duplicated_state(struct intel_atomic_state *state,
 838				      struct drm_modeset_acquire_ctx *ctx)
 839{
 840	struct drm_i915_private *i915 = to_i915(state->base.dev);
 841	int ret;
 842
 843	ret = drm_atomic_helper_commit_duplicated_state(&state->base, ctx);
 844
 845	drm_WARN_ON(&i915->drm, ret == -EDEADLK);
 846
 847	return ret;
 848}
 849
 850static int
 851__intel_display_resume(struct drm_i915_private *i915,
 852		       struct drm_atomic_state *state,
 853		       struct drm_modeset_acquire_ctx *ctx)
 854{
 855	struct drm_crtc_state *crtc_state;
 856	struct drm_crtc *crtc;
 857	int i;
 858
 859	intel_modeset_setup_hw_state(i915, ctx);
 860	intel_vga_redisable(i915);
 861
 862	if (!state)
 863		return 0;
 864
 865	/*
 866	 * We've duplicated the state, pointers to the old state are invalid.
 867	 *
 868	 * Don't attempt to use the old state until we commit the duplicated state.
 869	 */
 870	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 871		/*
 872		 * Force recalculation even if we restore
 873		 * current state. With fast modeset this may not result
 874		 * in a modeset when the state is compatible.
 875		 */
 876		crtc_state->mode_changed = true;
 877	}
 878
 879	/* ignore any reset values/BIOS leftovers in the WM registers */
 880	if (!HAS_GMCH(i915))
 881		to_intel_atomic_state(state)->skip_intermediate_wm = true;
 882
 883	return intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
 884}
 885
 886static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
 887{
 888	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
 889		intel_has_gpu_reset(to_gt(dev_priv)));
 890}
 891
 892void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
 893{
 894	struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
 895	struct drm_atomic_state *state;
 896	int ret;
 897
 898	if (!HAS_DISPLAY(dev_priv))
 899		return;
 900
 901	/* reset doesn't touch the display */
 902	if (!dev_priv->params.force_reset_modeset_test &&
 903	    !gpu_reset_clobbers_display(dev_priv))
 904		return;
 905
 906	/* We have a modeset vs reset deadlock, defensively unbreak it. */
 907	set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
 908	smp_mb__after_atomic();
 909	wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
 910
 911	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
 912		drm_dbg_kms(&dev_priv->drm,
 913			    "Modeset potentially stuck, unbreaking through wedging\n");
 914		intel_gt_set_wedged(to_gt(dev_priv));
 915	}
 916
 917	/*
 918	 * Need mode_config.mutex so that we don't
 919	 * trample ongoing ->detect() and whatnot.
 920	 */
 921	mutex_lock(&dev_priv->drm.mode_config.mutex);
 922	drm_modeset_acquire_init(ctx, 0);
 923	while (1) {
 924		ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
 925		if (ret != -EDEADLK)
 926			break;
 927
 928		drm_modeset_backoff(ctx);
 929	}
 930	/*
 931	 * Disabling the crtcs gracefully seems nicer. Also the
 932	 * g33 docs say we should at least disable all the planes.
 933	 */
 934	state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
 935	if (IS_ERR(state)) {
 936		ret = PTR_ERR(state);
 937		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
 938			ret);
 939		return;
 940	}
 941
 942	ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
 943	if (ret) {
 944		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
 945			ret);
 946		drm_atomic_state_put(state);
 947		return;
 948	}
 949
 950	dev_priv->display.restore.modeset_state = state;
 951	state->acquire_ctx = ctx;
 952}
 953
 954void intel_display_finish_reset(struct drm_i915_private *i915)
 955{
 956	struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
 957	struct drm_atomic_state *state;
 958	int ret;
 959
 960	if (!HAS_DISPLAY(i915))
 961		return;
 962
 963	/* reset doesn't touch the display */
 964	if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
 965		return;
 966
 967	state = fetch_and_zero(&i915->display.restore.modeset_state);
 968	if (!state)
 969		goto unlock;
 970
 971	/* reset doesn't touch the display */
 972	if (!gpu_reset_clobbers_display(i915)) {
 973		/* for testing only restore the display */
 974		ret = intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
 975		if (ret)
 976			drm_err(&i915->drm,
 977				"Restoring old state failed with %i\n", ret);
 978	} else {
 979		/*
 980		 * The display has been reset as well,
 981		 * so need a full re-initialization.
 982		 */
 983		intel_pps_unlock_regs_wa(i915);
 984		intel_modeset_init_hw(i915);
 985		intel_init_clock_gating(i915);
 986		intel_hpd_init(i915);
 987
 988		ret = __intel_display_resume(i915, state, ctx);
 989		if (ret)
 990			drm_err(&i915->drm,
 991				"Restoring old state failed with %i\n", ret);
 992
 993		intel_hpd_poll_disable(i915);
 994	}
 995
 996	drm_atomic_state_put(state);
 997unlock:
 998	drm_modeset_drop_locks(ctx);
 999	drm_modeset_acquire_fini(ctx);
1000	mutex_unlock(&i915->drm.mode_config.mutex);
1001
1002	clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
1003}
1004
1005static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
1006{
1007	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1008	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1009	enum pipe pipe = crtc->pipe;
1010	u32 tmp;
1011
1012	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1013
1014	/*
1015	 * Display WA #1153: icl
1016	 * enable hardware to bypass the alpha math
1017	 * and rounding for per-pixel values 00 and 0xff
1018	 */
1019	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1020	/*
1021	 * Display WA # 1605353570: icl
1022	 * Set the pixel rounding bit to 1 for allowing
1023	 * passthrough of Frame buffer pixels unmodified
1024	 * across pipe
1025	 */
1026	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1027
1028	/*
1029	 * Underrun recovery must always be disabled on display 13+.
1030	 * DG2 chicken bit meaning is inverted compared to other platforms.
1031	 */
1032	if (IS_DG2(dev_priv))
1033		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1034	else if (DISPLAY_VER(dev_priv) >= 13)
1035		tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1036
1037	/* Wa_14010547955:dg2 */
1038	if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
1039		tmp |= DG2_RENDER_CCSTAG_4_3_EN;
1040
1041	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1042}
1043
1044bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1045{
1046	struct drm_crtc *crtc;
1047	bool cleanup_done;
1048
1049	drm_for_each_crtc(crtc, &dev_priv->drm) {
1050		struct drm_crtc_commit *commit;
1051		spin_lock(&crtc->commit_lock);
1052		commit = list_first_entry_or_null(&crtc->commit_list,
1053						  struct drm_crtc_commit, commit_entry);
1054		cleanup_done = commit ?
1055			try_wait_for_completion(&commit->cleanup_done) : true;
1056		spin_unlock(&crtc->commit_lock);
1057
1058		if (cleanup_done)
1059			continue;
1060
1061		intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1062
1063		return true;
1064	}
1065
1066	return false;
1067}
1068
1069/*
1070 * Finds the encoder associated with the given CRTC. This can only be
1071 * used when we know that the CRTC isn't feeding multiple encoders!
1072 */
1073struct intel_encoder *
1074intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1075			   const struct intel_crtc_state *crtc_state)
1076{
1077	const struct drm_connector_state *connector_state;
1078	const struct drm_connector *connector;
1079	struct intel_encoder *encoder = NULL;
1080	struct intel_crtc *master_crtc;
1081	int num_encoders = 0;
1082	int i;
1083
1084	master_crtc = intel_master_crtc(crtc_state);
1085
1086	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1087		if (connector_state->crtc != &master_crtc->base)
1088			continue;
1089
1090		encoder = to_intel_encoder(connector_state->best_encoder);
1091		num_encoders++;
1092	}
1093
1094	drm_WARN(encoder->base.dev, num_encoders != 1,
1095		 "%d encoders for pipe %c\n",
1096		 num_encoders, pipe_name(master_crtc->pipe));
1097
1098	return encoder;
1099}
1100
1101static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1102			       enum pipe pipe)
1103{
1104	i915_reg_t dslreg = PIPEDSL(pipe);
1105	u32 temp;
1106
1107	temp = intel_de_read(dev_priv, dslreg);
1108	udelay(500);
1109	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1110		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1111			drm_err(&dev_priv->drm,
1112				"mode set failed: pipe %c stuck\n",
1113				pipe_name(pipe));
1114	}
1115}
1116
1117static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1118{
1119	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1120	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1121	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1122	enum pipe pipe = crtc->pipe;
1123	int width = drm_rect_width(dst);
1124	int height = drm_rect_height(dst);
1125	int x = dst->x1;
1126	int y = dst->y1;
1127
1128	if (!crtc_state->pch_pfit.enabled)
1129		return;
1130
1131	/* Force use of hard-coded filter coefficients
1132	 * as some pre-programmed values are broken,
1133	 * e.g. x201.
1134	 */
1135	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1136		intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1137				  PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1138	else
1139		intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1140				  PF_FILTER_MED_3x3);
1141	intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1142	intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1143}
1144
1145static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1146{
1147	if (crtc->overlay)
1148		(void) intel_overlay_switch_off(crtc->overlay);
1149
1150	/* Let userspace switch the overlay on again. In most cases userspace
1151	 * has to recompute where to put it anyway.
1152	 */
1153}
1154
1155static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1156{
1157	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1158
1159	if (!crtc_state->nv12_planes)
1160		return false;
1161
1162	/* WA Display #0827: Gen9:all */
1163	if (DISPLAY_VER(dev_priv) == 9)
1164		return true;
1165
1166	return false;
1167}
1168
1169static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1170{
1171	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1172
1173	/* Wa_2006604312:icl,ehl */
1174	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1175		return true;
1176
1177	return false;
1178}
1179
1180static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1181{
1182	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1183
1184	/* Wa_1604331009:icl,jsl,ehl */
1185	if (is_hdr_mode(crtc_state) &&
1186	    crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1187	    DISPLAY_VER(dev_priv) == 11)
1188		return true;
1189
1190	return false;
1191}
1192
1193static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1194				    enum pipe pipe, bool enable)
1195{
1196	if (DISPLAY_VER(i915) == 9) {
1197		/*
1198		 * "Plane N strech max must be programmed to 11b (x1)
1199		 *  when Async flips are enabled on that plane."
1200		 */
1201		intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1202			     SKL_PLANE1_STRETCH_MAX_MASK,
1203			     enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1204	} else {
1205		/* Also needed on HSW/BDW albeit undocumented */
1206		intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1207			     HSW_PRI_STRETCH_MAX_MASK,
1208			     enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1209	}
1210}
1211
1212static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1213{
1214	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1215
1216	return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
1217		(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1218}
1219
1220static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1221			    const struct intel_crtc_state *new_crtc_state)
1222{
1223	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1224		new_crtc_state->active_planes;
1225}
1226
1227static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1228			     const struct intel_crtc_state *new_crtc_state)
1229{
1230	return old_crtc_state->active_planes &&
1231		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1232}
1233
1234static void intel_post_plane_update(struct intel_atomic_state *state,
1235				    struct intel_crtc *crtc)
1236{
1237	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1238	const struct intel_crtc_state *old_crtc_state =
1239		intel_atomic_get_old_crtc_state(state, crtc);
1240	const struct intel_crtc_state *new_crtc_state =
1241		intel_atomic_get_new_crtc_state(state, crtc);
1242	enum pipe pipe = crtc->pipe;
1243
1244	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1245
1246	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1247		intel_update_watermarks(dev_priv);
1248
1249	hsw_ips_post_update(state, crtc);
1250	intel_fbc_post_update(state, crtc);
1251
1252	if (needs_async_flip_vtd_wa(old_crtc_state) &&
1253	    !needs_async_flip_vtd_wa(new_crtc_state))
1254		intel_async_flip_vtd_wa(dev_priv, pipe, false);
1255
1256	if (needs_nv12_wa(old_crtc_state) &&
1257	    !needs_nv12_wa(new_crtc_state))
1258		skl_wa_827(dev_priv, pipe, false);
1259
1260	if (needs_scalerclk_wa(old_crtc_state) &&
1261	    !needs_scalerclk_wa(new_crtc_state))
1262		icl_wa_scalerclkgating(dev_priv, pipe, false);
1263
1264	if (needs_cursorclk_wa(old_crtc_state) &&
1265	    !needs_cursorclk_wa(new_crtc_state))
1266		icl_wa_cursorclkgating(dev_priv, pipe, false);
1267}
1268
1269static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1270					struct intel_crtc *crtc)
1271{
1272	const struct intel_crtc_state *crtc_state =
1273		intel_atomic_get_new_crtc_state(state, crtc);
1274	u8 update_planes = crtc_state->update_planes;
1275	const struct intel_plane_state *plane_state;
1276	struct intel_plane *plane;
1277	int i;
1278
1279	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1280		if (plane->pipe == crtc->pipe &&
1281		    update_planes & BIT(plane->id))
1282			plane->enable_flip_done(plane);
1283	}
1284}
1285
1286static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1287					 struct intel_crtc *crtc)
1288{
1289	const struct intel_crtc_state *crtc_state =
1290		intel_atomic_get_new_crtc_state(state, crtc);
1291	u8 update_planes = crtc_state->update_planes;
1292	const struct intel_plane_state *plane_state;
1293	struct intel_plane *plane;
1294	int i;
1295
1296	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1297		if (plane->pipe == crtc->pipe &&
1298		    update_planes & BIT(plane->id))
1299			plane->disable_flip_done(plane);
1300	}
1301}
1302
1303static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1304					     struct intel_crtc *crtc)
1305{
1306	const struct intel_crtc_state *old_crtc_state =
1307		intel_atomic_get_old_crtc_state(state, crtc);
1308	const struct intel_crtc_state *new_crtc_state =
1309		intel_atomic_get_new_crtc_state(state, crtc);
1310	u8 update_planes = new_crtc_state->update_planes;
1311	const struct intel_plane_state *old_plane_state;
1312	struct intel_plane *plane;
1313	bool need_vbl_wait = false;
1314	int i;
1315
1316	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1317		if (plane->need_async_flip_disable_wa &&
1318		    plane->pipe == crtc->pipe &&
1319		    update_planes & BIT(plane->id)) {
1320			/*
1321			 * Apart from the async flip bit we want to
1322			 * preserve the old state for the plane.
1323			 */
1324			plane->async_flip(plane, old_crtc_state,
1325					  old_plane_state, false);
1326			need_vbl_wait = true;
1327		}
1328	}
1329
1330	if (need_vbl_wait)
1331		intel_crtc_wait_for_next_vblank(crtc);
1332}
1333
1334static void intel_pre_plane_update(struct intel_atomic_state *state,
1335				   struct intel_crtc *crtc)
1336{
1337	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1338	const struct intel_crtc_state *old_crtc_state =
1339		intel_atomic_get_old_crtc_state(state, crtc);
1340	const struct intel_crtc_state *new_crtc_state =
1341		intel_atomic_get_new_crtc_state(state, crtc);
1342	enum pipe pipe = crtc->pipe;
1343
1344	intel_drrs_deactivate(old_crtc_state);
1345
1346	intel_psr_pre_plane_update(state, crtc);
1347
1348	if (hsw_ips_pre_update(state, crtc))
1349		intel_crtc_wait_for_next_vblank(crtc);
1350
1351	if (intel_fbc_pre_update(state, crtc))
1352		intel_crtc_wait_for_next_vblank(crtc);
1353
1354	if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1355	    needs_async_flip_vtd_wa(new_crtc_state))
1356		intel_async_flip_vtd_wa(dev_priv, pipe, true);
1357
1358	/* Display WA 827 */
1359	if (!needs_nv12_wa(old_crtc_state) &&
1360	    needs_nv12_wa(new_crtc_state))
1361		skl_wa_827(dev_priv, pipe, true);
1362
1363	/* Wa_2006604312:icl,ehl */
1364	if (!needs_scalerclk_wa(old_crtc_state) &&
1365	    needs_scalerclk_wa(new_crtc_state))
1366		icl_wa_scalerclkgating(dev_priv, pipe, true);
1367
1368	/* Wa_1604331009:icl,jsl,ehl */
1369	if (!needs_cursorclk_wa(old_crtc_state) &&
1370	    needs_cursorclk_wa(new_crtc_state))
1371		icl_wa_cursorclkgating(dev_priv, pipe, true);
1372
1373	/*
1374	 * Vblank time updates from the shadow to live plane control register
1375	 * are blocked if the memory self-refresh mode is active at that
1376	 * moment. So to make sure the plane gets truly disabled, disable
1377	 * first the self-refresh mode. The self-refresh enable bit in turn
1378	 * will be checked/applied by the HW only at the next frame start
1379	 * event which is after the vblank start event, so we need to have a
1380	 * wait-for-vblank between disabling the plane and the pipe.
1381	 */
1382	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1383	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1384		intel_crtc_wait_for_next_vblank(crtc);
1385
1386	/*
1387	 * IVB workaround: must disable low power watermarks for at least
1388	 * one frame before enabling scaling.  LP watermarks can be re-enabled
1389	 * when scaling is disabled.
1390	 *
1391	 * WaCxSRDisabledForSpriteScaling:ivb
1392	 */
1393	if (old_crtc_state->hw.active &&
1394	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1395		intel_crtc_wait_for_next_vblank(crtc);
1396
1397	/*
1398	 * If we're doing a modeset we don't need to do any
1399	 * pre-vblank watermark programming here.
1400	 */
1401	if (!intel_crtc_needs_modeset(new_crtc_state)) {
1402		/*
1403		 * For platforms that support atomic watermarks, program the
1404		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1405		 * will be the intermediate values that are safe for both pre- and
1406		 * post- vblank; when vblank happens, the 'active' values will be set
1407		 * to the final 'target' values and we'll do this again to get the
1408		 * optimal watermarks.  For gen9+ platforms, the values we program here
1409		 * will be the final target values which will get automatically latched
1410		 * at vblank time; no further programming will be necessary.
1411		 *
1412		 * If a platform hasn't been transitioned to atomic watermarks yet,
1413		 * we'll continue to update watermarks the old way, if flags tell
1414		 * us to.
1415		 */
1416		if (!intel_initial_watermarks(state, crtc))
1417			if (new_crtc_state->update_wm_pre)
1418				intel_update_watermarks(dev_priv);
1419	}
1420
1421	/*
1422	 * Gen2 reports pipe underruns whenever all planes are disabled.
1423	 * So disable underrun reporting before all the planes get disabled.
1424	 *
1425	 * We do this after .initial_watermarks() so that we have a
1426	 * chance of catching underruns with the intermediate watermarks
1427	 * vs. the old plane configuration.
1428	 */
1429	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1430		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1431
1432	/*
1433	 * WA for platforms where async address update enable bit
1434	 * is double buffered and only latched at start of vblank.
1435	 */
1436	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1437		intel_crtc_async_flip_disable_wa(state, crtc);
1438}
1439
1440static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1441				      struct intel_crtc *crtc)
1442{
1443	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1444	const struct intel_crtc_state *new_crtc_state =
1445		intel_atomic_get_new_crtc_state(state, crtc);
1446	unsigned int update_mask = new_crtc_state->update_planes;
1447	const struct intel_plane_state *old_plane_state;
1448	struct intel_plane *plane;
1449	unsigned fb_bits = 0;
1450	int i;
1451
1452	intel_crtc_dpms_overlay_disable(crtc);
1453
1454	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1455		if (crtc->pipe != plane->pipe ||
1456		    !(update_mask & BIT(plane->id)))
1457			continue;
1458
1459		intel_plane_disable_arm(plane, new_crtc_state);
1460
1461		if (old_plane_state->uapi.visible)
1462			fb_bits |= plane->frontbuffer_bit;
1463	}
1464
1465	intel_frontbuffer_flip(dev_priv, fb_bits);
1466}
1467
1468/*
1469 * intel_connector_primary_encoder - get the primary encoder for a connector
1470 * @connector: connector for which to return the encoder
1471 *
1472 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1473 * all connectors to their encoder, except for DP-MST connectors which have
1474 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1475 * pointed to by as many DP-MST connectors as there are pipes.
1476 */
1477static struct intel_encoder *
1478intel_connector_primary_encoder(struct intel_connector *connector)
1479{
1480	struct intel_encoder *encoder;
1481
1482	if (connector->mst_port)
1483		return &dp_to_dig_port(connector->mst_port)->base;
1484
1485	encoder = intel_attached_encoder(connector);
1486	drm_WARN_ON(connector->base.dev, !encoder);
1487
1488	return encoder;
1489}
1490
1491static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1492{
1493	struct drm_i915_private *i915 = to_i915(state->base.dev);
1494	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1495	struct intel_crtc *crtc;
1496	struct drm_connector_state *new_conn_state;
1497	struct drm_connector *connector;
1498	int i;
1499
1500	/*
1501	 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1502	 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1503	 */
1504	if (i915->display.dpll.mgr) {
1505		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1506			if (intel_crtc_needs_modeset(new_crtc_state))
1507				continue;
1508
1509			new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1510			new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1511		}
1512	}
1513
1514	if (!state->modeset)
1515		return;
1516
1517	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1518					i) {
1519		struct intel_connector *intel_connector;
1520		struct intel_encoder *encoder;
1521		struct intel_crtc *crtc;
1522
1523		if (!intel_connector_needs_modeset(state, connector))
1524			continue;
1525
1526		intel_connector = to_intel_connector(connector);
1527		encoder = intel_connector_primary_encoder(intel_connector);
1528		if (!encoder->update_prepare)
1529			continue;
1530
1531		crtc = new_conn_state->crtc ?
1532			to_intel_crtc(new_conn_state->crtc) : NULL;
1533		encoder->update_prepare(state, encoder, crtc);
1534	}
1535}
1536
1537static void intel_encoders_update_complete(struct intel_atomic_state *state)
1538{
1539	struct drm_connector_state *new_conn_state;
1540	struct drm_connector *connector;
1541	int i;
1542
1543	if (!state->modeset)
1544		return;
1545
1546	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1547					i) {
1548		struct intel_connector *intel_connector;
1549		struct intel_encoder *encoder;
1550		struct intel_crtc *crtc;
1551
1552		if (!intel_connector_needs_modeset(state, connector))
1553			continue;
1554
1555		intel_connector = to_intel_connector(connector);
1556		encoder = intel_connector_primary_encoder(intel_connector);
1557		if (!encoder->update_complete)
1558			continue;
1559
1560		crtc = new_conn_state->crtc ?
1561			to_intel_crtc(new_conn_state->crtc) : NULL;
1562		encoder->update_complete(state, encoder, crtc);
1563	}
1564}
1565
1566static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1567					  struct intel_crtc *crtc)
1568{
1569	const struct intel_crtc_state *crtc_state =
1570		intel_atomic_get_new_crtc_state(state, crtc);
1571	const struct drm_connector_state *conn_state;
1572	struct drm_connector *conn;
1573	int i;
1574
1575	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1576		struct intel_encoder *encoder =
1577			to_intel_encoder(conn_state->best_encoder);
1578
1579		if (conn_state->crtc != &crtc->base)
1580			continue;
1581
1582		if (encoder->pre_pll_enable)
1583			encoder->pre_pll_enable(state, encoder,
1584						crtc_state, conn_state);
1585	}
1586}
1587
1588static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1589				      struct intel_crtc *crtc)
1590{
1591	const struct intel_crtc_state *crtc_state =
1592		intel_atomic_get_new_crtc_state(state, crtc);
1593	const struct drm_connector_state *conn_state;
1594	struct drm_connector *conn;
1595	int i;
1596
1597	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1598		struct intel_encoder *encoder =
1599			to_intel_encoder(conn_state->best_encoder);
1600
1601		if (conn_state->crtc != &crtc->base)
1602			continue;
1603
1604		if (encoder->pre_enable)
1605			encoder->pre_enable(state, encoder,
1606					    crtc_state, conn_state);
1607	}
1608}
1609
1610static void intel_encoders_enable(struct intel_atomic_state *state,
1611				  struct intel_crtc *crtc)
1612{
1613	const struct intel_crtc_state *crtc_state =
1614		intel_atomic_get_new_crtc_state(state, crtc);
1615	const struct drm_connector_state *conn_state;
1616	struct drm_connector *conn;
1617	int i;
1618
1619	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1620		struct intel_encoder *encoder =
1621			to_intel_encoder(conn_state->best_encoder);
1622
1623		if (conn_state->crtc != &crtc->base)
1624			continue;
1625
1626		if (encoder->enable)
1627			encoder->enable(state, encoder,
1628					crtc_state, conn_state);
1629		intel_opregion_notify_encoder(encoder, true);
1630	}
1631}
1632
1633static void intel_encoders_disable(struct intel_atomic_state *state,
1634				   struct intel_crtc *crtc)
1635{
1636	const struct intel_crtc_state *old_crtc_state =
1637		intel_atomic_get_old_crtc_state(state, crtc);
1638	const struct drm_connector_state *old_conn_state;
1639	struct drm_connector *conn;
1640	int i;
1641
1642	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1643		struct intel_encoder *encoder =
1644			to_intel_encoder(old_conn_state->best_encoder);
1645
1646		if (old_conn_state->crtc != &crtc->base)
1647			continue;
1648
1649		intel_opregion_notify_encoder(encoder, false);
1650		if (encoder->disable)
1651			encoder->disable(state, encoder,
1652					 old_crtc_state, old_conn_state);
1653	}
1654}
1655
1656static void intel_encoders_post_disable(struct intel_atomic_state *state,
1657					struct intel_crtc *crtc)
1658{
1659	const struct intel_crtc_state *old_crtc_state =
1660		intel_atomic_get_old_crtc_state(state, crtc);
1661	const struct drm_connector_state *old_conn_state;
1662	struct drm_connector *conn;
1663	int i;
1664
1665	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1666		struct intel_encoder *encoder =
1667			to_intel_encoder(old_conn_state->best_encoder);
1668
1669		if (old_conn_state->crtc != &crtc->base)
1670			continue;
1671
1672		if (encoder->post_disable)
1673			encoder->post_disable(state, encoder,
1674					      old_crtc_state, old_conn_state);
1675	}
1676}
1677
1678static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1679					    struct intel_crtc *crtc)
1680{
1681	const struct intel_crtc_state *old_crtc_state =
1682		intel_atomic_get_old_crtc_state(state, crtc);
1683	const struct drm_connector_state *old_conn_state;
1684	struct drm_connector *conn;
1685	int i;
1686
1687	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1688		struct intel_encoder *encoder =
1689			to_intel_encoder(old_conn_state->best_encoder);
1690
1691		if (old_conn_state->crtc != &crtc->base)
1692			continue;
1693
1694		if (encoder->post_pll_disable)
1695			encoder->post_pll_disable(state, encoder,
1696						  old_crtc_state, old_conn_state);
1697	}
1698}
1699
1700static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1701				       struct intel_crtc *crtc)
1702{
1703	const struct intel_crtc_state *crtc_state =
1704		intel_atomic_get_new_crtc_state(state, crtc);
1705	const struct drm_connector_state *conn_state;
1706	struct drm_connector *conn;
1707	int i;
1708
1709	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1710		struct intel_encoder *encoder =
1711			to_intel_encoder(conn_state->best_encoder);
1712
1713		if (conn_state->crtc != &crtc->base)
1714			continue;
1715
1716		if (encoder->update_pipe)
1717			encoder->update_pipe(state, encoder,
1718					     crtc_state, conn_state);
1719	}
1720}
1721
1722static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1723{
1724	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1725	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1726
1727	plane->disable_arm(plane, crtc_state);
1728}
1729
1730static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1731{
1732	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1733	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1734
1735	if (crtc_state->has_pch_encoder) {
1736		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1737					       &crtc_state->fdi_m_n);
1738	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1739		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1740					       &crtc_state->dp_m_n);
1741		intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1742					       &crtc_state->dp_m2_n2);
1743	}
1744
1745	intel_set_transcoder_timings(crtc_state);
1746
1747	ilk_set_pipeconf(crtc_state);
1748}
1749
1750static void ilk_crtc_enable(struct intel_atomic_state *state,
1751			    struct intel_crtc *crtc)
1752{
1753	const struct intel_crtc_state *new_crtc_state =
1754		intel_atomic_get_new_crtc_state(state, crtc);
1755	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1756	enum pipe pipe = crtc->pipe;
1757
1758	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1759		return;
1760
1761	/*
1762	 * Sometimes spurious CPU pipe underruns happen during FDI
1763	 * training, at least with VGA+HDMI cloning. Suppress them.
1764	 *
1765	 * On ILK we get an occasional spurious CPU pipe underruns
1766	 * between eDP port A enable and vdd enable. Also PCH port
1767	 * enable seems to result in the occasional CPU pipe underrun.
1768	 *
1769	 * Spurious PCH underruns also occur during PCH enabling.
1770	 */
1771	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1772	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1773
1774	ilk_configure_cpu_transcoder(new_crtc_state);
1775
1776	intel_set_pipe_src_size(new_crtc_state);
1777
1778	crtc->active = true;
1779
1780	intel_encoders_pre_enable(state, crtc);
1781
1782	if (new_crtc_state->has_pch_encoder) {
1783		ilk_pch_pre_enable(state, crtc);
1784	} else {
1785		assert_fdi_tx_disabled(dev_priv, pipe);
1786		assert_fdi_rx_disabled(dev_priv, pipe);
1787	}
1788
1789	ilk_pfit_enable(new_crtc_state);
1790
1791	/*
1792	 * On ILK+ LUT must be loaded before the pipe is running but with
1793	 * clocks enabled
1794	 */
1795	intel_color_load_luts(new_crtc_state);
1796	intel_color_commit_noarm(new_crtc_state);
1797	intel_color_commit_arm(new_crtc_state);
1798	/* update DSPCNTR to configure gamma for pipe bottom color */
1799	intel_disable_primary_plane(new_crtc_state);
1800
1801	intel_initial_watermarks(state, crtc);
1802	intel_enable_transcoder(new_crtc_state);
1803
1804	if (new_crtc_state->has_pch_encoder)
1805		ilk_pch_enable(state, crtc);
1806
1807	intel_crtc_vblank_on(new_crtc_state);
1808
1809	intel_encoders_enable(state, crtc);
1810
1811	if (HAS_PCH_CPT(dev_priv))
1812		cpt_verify_modeset(dev_priv, pipe);
1813
1814	/*
1815	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1816	 * And a second vblank wait is needed at least on ILK with
1817	 * some interlaced HDMI modes. Let's do the double wait always
1818	 * in case there are more corner cases we don't know about.
1819	 */
1820	if (new_crtc_state->has_pch_encoder) {
1821		intel_crtc_wait_for_next_vblank(crtc);
1822		intel_crtc_wait_for_next_vblank(crtc);
1823	}
1824	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1825	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1826}
1827
1828static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1829					    enum pipe pipe, bool apply)
1830{
1831	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1832	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1833
1834	if (apply)
1835		val |= mask;
1836	else
1837		val &= ~mask;
1838
1839	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1840}
1841
1842static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1843{
1844	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1845	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1846
1847	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1848		       HSW_LINETIME(crtc_state->linetime) |
1849		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
1850}
1851
1852static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1853{
1854	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1855	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1856	enum transcoder transcoder = crtc_state->cpu_transcoder;
1857	i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
1858			 CHICKEN_TRANS(transcoder);
1859	u32 val;
1860
1861	val = intel_de_read(dev_priv, reg);
1862	val &= ~HSW_FRAME_START_DELAY_MASK;
1863	val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
1864	intel_de_write(dev_priv, reg, val);
1865}
1866
1867static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1868					 const struct intel_crtc_state *crtc_state)
1869{
1870	struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1871
1872	/*
1873	 * Enable sequence steps 1-7 on bigjoiner master
1874	 */
1875	if (intel_crtc_is_bigjoiner_slave(crtc_state))
1876		intel_encoders_pre_pll_enable(state, master_crtc);
1877
1878	if (crtc_state->shared_dpll)
1879		intel_enable_shared_dpll(crtc_state);
1880
1881	if (intel_crtc_is_bigjoiner_slave(crtc_state))
1882		intel_encoders_pre_enable(state, master_crtc);
1883}
1884
1885static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1886{
1887	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1888	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1889	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1890
1891	if (crtc_state->has_pch_encoder) {
1892		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1893					       &crtc_state->fdi_m_n);
1894	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1895		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1896					       &crtc_state->dp_m_n);
1897		intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1898					       &crtc_state->dp_m2_n2);
1899	}
1900
1901	intel_set_transcoder_timings(crtc_state);
1902
1903	if (cpu_transcoder != TRANSCODER_EDP)
1904		intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
1905			       crtc_state->pixel_multiplier - 1);
1906
1907	hsw_set_frame_start_delay(crtc_state);
1908
1909	hsw_set_transconf(crtc_state);
1910}
1911
1912static void hsw_crtc_enable(struct intel_atomic_state *state,
1913			    struct intel_crtc *crtc)
1914{
1915	const struct intel_crtc_state *new_crtc_state =
1916		intel_atomic_get_new_crtc_state(state, crtc);
1917	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1918	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1919	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1920	bool psl_clkgate_wa;
1921
1922	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1923		return;
1924
1925	if (!new_crtc_state->bigjoiner_pipes) {
1926		intel_encoders_pre_pll_enable(state, crtc);
1927
1928		if (new_crtc_state->shared_dpll)
1929			intel_enable_shared_dpll(new_crtc_state);
1930
1931		intel_encoders_pre_enable(state, crtc);
1932	} else {
1933		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1934	}
1935
1936	intel_dsc_enable(new_crtc_state);
1937
1938	if (DISPLAY_VER(dev_priv) >= 13)
1939		intel_uncompressed_joiner_enable(new_crtc_state);
1940
1941	intel_set_pipe_src_size(new_crtc_state);
1942	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1943		bdw_set_pipemisc(new_crtc_state);
1944
1945	if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1946	    !transcoder_is_dsi(cpu_transcoder))
1947		hsw_configure_cpu_transcoder(new_crtc_state);
1948
1949	crtc->active = true;
1950
1951	/* Display WA #1180: WaDisableScalarClockGating: glk */
1952	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1953		new_crtc_state->pch_pfit.enabled;
1954	if (psl_clkgate_wa)
1955		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1956
1957	if (DISPLAY_VER(dev_priv) >= 9)
1958		skl_pfit_enable(new_crtc_state);
1959	else
1960		ilk_pfit_enable(new_crtc_state);
1961
1962	/*
1963	 * On ILK+ LUT must be loaded before the pipe is running but with
1964	 * clocks enabled
1965	 */
1966	intel_color_load_luts(new_crtc_state);
1967	intel_color_commit_noarm(new_crtc_state);
1968	intel_color_commit_arm(new_crtc_state);
1969	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
1970	if (DISPLAY_VER(dev_priv) < 9)
1971		intel_disable_primary_plane(new_crtc_state);
1972
1973	hsw_set_linetime_wm(new_crtc_state);
1974
1975	if (DISPLAY_VER(dev_priv) >= 11)
1976		icl_set_pipe_chicken(new_crtc_state);
1977
1978	intel_initial_watermarks(state, crtc);
1979
1980	if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1981		intel_crtc_vblank_on(new_crtc_state);
1982
1983	intel_encoders_enable(state, crtc);
1984
1985	if (psl_clkgate_wa) {
1986		intel_crtc_wait_for_next_vblank(crtc);
1987		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1988	}
1989
1990	/* If we change the relative order between pipe/planes enabling, we need
1991	 * to change the workaround. */
1992	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1993	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1994		struct intel_crtc *wa_crtc;
1995
1996		wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1997
1998		intel_crtc_wait_for_next_vblank(wa_crtc);
1999		intel_crtc_wait_for_next_vblank(wa_crtc);
2000	}
2001}
2002
2003void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2004{
2005	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2006	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2007	enum pipe pipe = crtc->pipe;
2008
2009	/* To avoid upsetting the power well on haswell only disable the pfit if
2010	 * it's in use. The hw state code will make sure we get this right. */
2011	if (!old_crtc_state->pch_pfit.enabled)
2012		return;
2013
2014	intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
2015	intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
2016	intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
2017}
2018
2019static void ilk_crtc_disable(struct intel_atomic_state *state,
2020			     struct intel_crtc *crtc)
2021{
2022	const struct intel_crtc_state *old_crtc_state =
2023		intel_atomic_get_old_crtc_state(state, crtc);
2024	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2025	enum pipe pipe = crtc->pipe;
2026
2027	/*
2028	 * Sometimes spurious CPU pipe underruns happen when the
2029	 * pipe is already disabled, but FDI RX/TX is still enabled.
2030	 * Happens at least with VGA+HDMI cloning. Suppress them.
2031	 */
2032	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2033	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2034
2035	intel_encoders_disable(state, crtc);
2036
2037	intel_crtc_vblank_off(old_crtc_state);
2038
2039	intel_disable_transcoder(old_crtc_state);
2040
2041	ilk_pfit_disable(old_crtc_state);
2042
2043	if (old_crtc_state->has_pch_encoder)
2044		ilk_pch_disable(state, crtc);
2045
2046	intel_encoders_post_disable(state, crtc);
2047
2048	if (old_crtc_state->has_pch_encoder)
2049		ilk_pch_post_disable(state, crtc);
2050
2051	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2052	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2053}
2054
2055static void hsw_crtc_disable(struct intel_atomic_state *state,
2056			     struct intel_crtc *crtc)
2057{
2058	const struct intel_crtc_state *old_crtc_state =
2059		intel_atomic_get_old_crtc_state(state, crtc);
2060
2061	/*
2062	 * FIXME collapse everything to one hook.
2063	 * Need care with mst->ddi interactions.
2064	 */
2065	if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
2066		intel_encoders_disable(state, crtc);
2067		intel_encoders_post_disable(state, crtc);
2068	}
2069}
2070
2071static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2072{
2073	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2074	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2075
2076	if (!crtc_state->gmch_pfit.control)
2077		return;
2078
2079	/*
2080	 * The panel fitter should only be adjusted whilst the pipe is disabled,
2081	 * according to register description and PRM.
2082	 */
2083	drm_WARN_ON(&dev_priv->drm,
2084		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2085	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2086
2087	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2088		       crtc_state->gmch_pfit.pgm_ratios);
2089	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2090
2091	/* Border color in case we don't scale up to the full screen. Black by
2092	 * default, change to something else for debugging. */
2093	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2094}
2095
2096bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2097{
2098	if (phy == PHY_NONE)
2099		return false;
2100	else if (IS_ALDERLAKE_S(dev_priv))
2101		return phy <= PHY_E;
2102	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2103		return phy <= PHY_D;
2104	else if (IS_JSL_EHL(dev_priv))
2105		return phy <= PHY_C;
2106	else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
2107		return phy <= PHY_B;
2108	else
2109		/*
2110		 * DG2 outputs labelled as "combo PHY" in the bspec use
2111		 * SNPS PHYs with completely different programming,
2112		 * hence we always return false here.
2113		 */
2114		return false;
2115}
2116
2117bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2118{
2119	if (IS_DG2(dev_priv))
2120		/* DG2's "TC1" output uses a SNPS PHY */
2121		return false;
2122	else if (IS_ALDERLAKE_P(dev_priv))
2123		return phy >= PHY_F && phy <= PHY_I;
2124	else if (IS_TIGERLAKE(dev_priv))
2125		return phy >= PHY_D && phy <= PHY_I;
2126	else if (IS_ICELAKE(dev_priv))
2127		return phy >= PHY_C && phy <= PHY_F;
2128	else
2129		return false;
2130}
2131
2132bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2133{
2134	if (phy == PHY_NONE)
2135		return false;
2136	else if (IS_DG2(dev_priv))
2137		/*
2138		 * All four "combo" ports and the TC1 port (PHY E) use
2139		 * Synopsis PHYs.
2140		 */
2141		return phy <= PHY_E;
2142
2143	return false;
2144}
2145
2146enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2147{
2148	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2149		return PHY_D + port - PORT_D_XELPD;
2150	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2151		return PHY_F + port - PORT_TC1;
2152	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2153		return PHY_B + port - PORT_TC1;
2154	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2155		return PHY_C + port - PORT_TC1;
2156	else if (IS_JSL_EHL(i915) && port == PORT_D)
2157		return PHY_A;
2158
2159	return PHY_A + port - PORT_A;
2160}
2161
2162enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2163{
2164	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2165		return TC_PORT_NONE;
2166
2167	if (DISPLAY_VER(dev_priv) >= 12)
2168		return TC_PORT_1 + port - PORT_TC1;
2169	else
2170		return TC_PORT_1 + port - PORT_C;
2171}
2172
2173enum intel_display_power_domain
2174intel_aux_power_domain(struct intel_digital_port *dig_port)
2175{
2176	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2177
2178	if (intel_tc_port_in_tbt_alt_mode(dig_port))
2179		return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
2180
2181	return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
2182}
2183
2184static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2185				   struct intel_power_domain_mask *mask)
2186{
2187	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2188	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2189	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2190	struct drm_encoder *encoder;
2191	enum pipe pipe = crtc->pipe;
2192
2193	bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2194
2195	if (!crtc_state->hw.active)
2196		return;
2197
2198	set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2199	set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2200	if (crtc_state->pch_pfit.enabled ||
2201	    crtc_state->pch_pfit.force_thru)
2202		set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2203
2204	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2205				  crtc_state->uapi.encoder_mask) {
2206		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2207
2208		set_bit(intel_encoder->power_domain, mask->bits);
2209	}
2210
2211	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2212		set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2213
2214	if (crtc_state->shared_dpll)
2215		set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2216
2217	if (crtc_state->dsc.compression_enable)
2218		set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2219}
2220
2221void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2222					  struct intel_power_domain_mask *old_domains)
2223{
2224	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2225	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2226	enum intel_display_power_domain domain;
2227	struct intel_power_domain_mask domains, new_domains;
2228
2229	get_crtc_power_domains(crtc_state, &domains);
2230
2231	bitmap_andnot(new_domains.bits,
2232		      domains.bits,
2233		      crtc->enabled_power_domains.mask.bits,
2234		      POWER_DOMAIN_NUM);
2235	bitmap_andnot(old_domains->bits,
2236		      crtc->enabled_power_domains.mask.bits,
2237		      domains.bits,
2238		      POWER_DOMAIN_NUM);
2239
2240	for_each_power_domain(domain, &new_domains)
2241		intel_display_power_get_in_set(dev_priv,
2242					       &crtc->enabled_power_domains,
2243					       domain);
2244}
2245
2246void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2247					  struct intel_power_domain_mask *domains)
2248{
2249	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2250					    &crtc->enabled_power_domains,
2251					    domains);
2252}
2253
2254static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2255{
2256	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2257	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2258
2259	if (intel_crtc_has_dp_encoder(crtc_state)) {
2260		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2261					       &crtc_state->dp_m_n);
2262		intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2263					       &crtc_state->dp_m2_n2);
2264	}
2265
2266	intel_set_transcoder_timings(crtc_state);
2267
2268	i9xx_set_pipeconf(crtc_state);
2269}
2270
2271static void valleyview_crtc_enable(struct intel_atomic_state *state,
2272				   struct intel_crtc *crtc)
2273{
2274	const struct intel_crtc_state *new_crtc_state =
2275		intel_atomic_get_new_crtc_state(state, crtc);
2276	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2277	enum pipe pipe = crtc->pipe;
2278
2279	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2280		return;
2281
2282	i9xx_configure_cpu_transcoder(new_crtc_state);
2283
2284	intel_set_pipe_src_size(new_crtc_state);
2285
2286	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2287		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2288		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2289	}
2290
2291	crtc->active = true;
2292
2293	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2294
2295	intel_encoders_pre_pll_enable(state, crtc);
2296
2297	if (IS_CHERRYVIEW(dev_priv))
2298		chv_enable_pll(new_crtc_state);
2299	else
2300		vlv_enable_pll(new_crtc_state);
2301
2302	intel_encoders_pre_enable(state, crtc);
2303
2304	i9xx_pfit_enable(new_crtc_state);
2305
2306	intel_color_load_luts(new_crtc_state);
2307	intel_color_commit_noarm(new_crtc_state);
2308	intel_color_commit_arm(new_crtc_state);
2309	/* update DSPCNTR to configure gamma for pipe bottom color */
2310	intel_disable_primary_plane(new_crtc_state);
2311
2312	intel_initial_watermarks(state, crtc);
2313	intel_enable_transcoder(new_crtc_state);
2314
2315	intel_crtc_vblank_on(new_crtc_state);
2316
2317	intel_encoders_enable(state, crtc);
2318}
2319
2320static void i9xx_crtc_enable(struct intel_atomic_state *state,
2321			     struct intel_crtc *crtc)
2322{
2323	const struct intel_crtc_state *new_crtc_state =
2324		intel_atomic_get_new_crtc_state(state, crtc);
2325	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2326	enum pipe pipe = crtc->pipe;
2327
2328	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2329		return;
2330
2331	i9xx_configure_cpu_transcoder(new_crtc_state);
2332
2333	intel_set_pipe_src_size(new_crtc_state);
2334
2335	crtc->active = true;
2336
2337	if (DISPLAY_VER(dev_priv) != 2)
2338		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2339
2340	intel_encoders_pre_enable(state, crtc);
2341
2342	i9xx_enable_pll(new_crtc_state);
2343
2344	i9xx_pfit_enable(new_crtc_state);
2345
2346	intel_color_load_luts(new_crtc_state);
2347	intel_color_commit_noarm(new_crtc_state);
2348	intel_color_commit_arm(new_crtc_state);
2349	/* update DSPCNTR to configure gamma for pipe bottom color */
2350	intel_disable_primary_plane(new_crtc_state);
2351
2352	if (!intel_initial_watermarks(state, crtc))
2353		intel_update_watermarks(dev_priv);
2354	intel_enable_transcoder(new_crtc_state);
2355
2356	intel_crtc_vblank_on(new_crtc_state);
2357
2358	intel_encoders_enable(state, crtc);
2359
2360	/* prevents spurious underruns */
2361	if (DISPLAY_VER(dev_priv) == 2)
2362		intel_crtc_wait_for_next_vblank(crtc);
2363}
2364
2365static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2366{
2367	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2368	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2369
2370	if (!old_crtc_state->gmch_pfit.control)
2371		return;
2372
2373	assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2374
2375	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2376		    intel_de_read(dev_priv, PFIT_CONTROL));
2377	intel_de_write(dev_priv, PFIT_CONTROL, 0);
2378}
2379
2380static void i9xx_crtc_disable(struct intel_atomic_state *state,
2381			      struct intel_crtc *crtc)
2382{
2383	struct intel_crtc_state *old_crtc_state =
2384		intel_atomic_get_old_crtc_state(state, crtc);
2385	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2386	enum pipe pipe = crtc->pipe;
2387
2388	/*
2389	 * On gen2 planes are double buffered but the pipe isn't, so we must
2390	 * wait for planes to fully turn off before disabling the pipe.
2391	 */
2392	if (DISPLAY_VER(dev_priv) == 2)
2393		intel_crtc_wait_for_next_vblank(crtc);
2394
2395	intel_encoders_disable(state, crtc);
2396
2397	intel_crtc_vblank_off(old_crtc_state);
2398
2399	intel_disable_transcoder(old_crtc_state);
2400
2401	i9xx_pfit_disable(old_crtc_state);
2402
2403	intel_encoders_post_disable(state, crtc);
2404
2405	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2406		if (IS_CHERRYVIEW(dev_priv))
2407			chv_disable_pll(dev_priv, pipe);
2408		else if (IS_VALLEYVIEW(dev_priv))
2409			vlv_disable_pll(dev_priv, pipe);
2410		else
2411			i9xx_disable_pll(old_crtc_state);
2412	}
2413
2414	intel_encoders_post_pll_disable(state, crtc);
2415
2416	if (DISPLAY_VER(dev_priv) != 2)
2417		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2418
2419	if (!dev_priv->display.funcs.wm->initial_watermarks)
2420		intel_update_watermarks(dev_priv);
2421
2422	/* clock the pipe down to 640x480@60 to potentially save power */
2423	if (IS_I830(dev_priv))
2424		i830_enable_pipe(dev_priv, pipe);
2425}
2426
2427
2428/*
2429 * turn all crtc's off, but do not adjust state
2430 * This has to be paired with a call to intel_modeset_setup_hw_state.
2431 */
2432int intel_display_suspend(struct drm_device *dev)
2433{
2434	struct drm_i915_private *dev_priv = to_i915(dev);
2435	struct drm_atomic_state *state;
2436	int ret;
2437
2438	if (!HAS_DISPLAY(dev_priv))
2439		return 0;
2440
2441	state = drm_atomic_helper_suspend(dev);
2442	ret = PTR_ERR_OR_ZERO(state);
2443	if (ret)
2444		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2445			ret);
2446	else
2447		dev_priv->display.restore.modeset_state = state;
2448	return ret;
2449}
2450
2451void intel_encoder_destroy(struct drm_encoder *encoder)
2452{
2453	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2454
2455	drm_encoder_cleanup(encoder);
2456	kfree(intel_encoder);
2457}
2458
2459static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2460{
2461	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2462
2463	/* GDG double wide on either pipe, otherwise pipe A only */
2464	return DISPLAY_VER(dev_priv) < 4 &&
2465		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2466}
2467
2468static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2469{
2470	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2471	struct drm_rect src;
2472
2473	/*
2474	 * We only use IF-ID interlacing. If we ever use
2475	 * PF-ID we'll need to adjust the pixel_rate here.
2476	 */
2477
2478	if (!crtc_state->pch_pfit.enabled)
2479		return pixel_rate;
2480
2481	drm_rect_init(&src, 0, 0,
2482		      drm_rect_width(&crtc_state->pipe_src) << 16,
2483		      drm_rect_height(&crtc_state->pipe_src) << 16);
2484
2485	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2486				   pixel_rate);
2487}
2488
2489static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2490					 const struct drm_display_mode *timings)
2491{
2492	mode->hdisplay = timings->crtc_hdisplay;
2493	mode->htotal = timings->crtc_htotal;
2494	mode->hsync_start = timings->crtc_hsync_start;
2495	mode->hsync_end = timings->crtc_hsync_end;
2496
2497	mode->vdisplay = timings->crtc_vdisplay;
2498	mode->vtotal = timings->crtc_vtotal;
2499	mode->vsync_start = timings->crtc_vsync_start;
2500	mode->vsync_end = timings->crtc_vsync_end;
2501
2502	mode->flags = timings->flags;
2503	mode->type = DRM_MODE_TYPE_DRIVER;
2504
2505	mode->clock = timings->crtc_clock;
2506
2507	drm_mode_set_name(mode);
2508}
2509
2510static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2511{
2512	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2513
2514	if (HAS_GMCH(dev_priv))
2515		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
2516		crtc_state->pixel_rate =
2517			crtc_state->hw.pipe_mode.crtc_clock;
2518	else
2519		crtc_state->pixel_rate =
2520			ilk_pipe_pixel_rate(crtc_state);
2521}
2522
2523static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2524					   struct drm_display_mode *mode)
2525{
2526	int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2527
2528	if (num_pipes < 2)
2529		return;
2530
2531	mode->crtc_clock /= num_pipes;
2532	mode->crtc_hdisplay /= num_pipes;
2533	mode->crtc_hblank_start /= num_pipes;
2534	mode->crtc_hblank_end /= num_pipes;
2535	mode->crtc_hsync_start /= num_pipes;
2536	mode->crtc_hsync_end /= num_pipes;
2537	mode->crtc_htotal /= num_pipes;
2538}
2539
2540static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2541					  struct drm_display_mode *mode)
2542{
2543	int overlap = crtc_state->splitter.pixel_overlap;
2544	int n = crtc_state->splitter.link_count;
2545
2546	if (!crtc_state->splitter.enable)
2547		return;
2548
2549	/*
2550	 * eDP MSO uses segment timings from EDID for transcoder
2551	 * timings, but full mode for everything else.
2552	 *
2553	 * h_full = (h_segment - pixel_overlap) * link_count
2554	 */
2555	mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2556	mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2557	mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2558	mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2559	mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2560	mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2561	mode->crtc_clock *= n;
2562}
2563
2564static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2565{
2566	struct drm_display_mode *mode = &crtc_state->hw.mode;
2567	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2568	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2569
2570	/*
2571	 * Start with the adjusted_mode crtc timings, which
2572	 * have been filled with the transcoder timings.
2573	 */
2574	drm_mode_copy(pipe_mode, adjusted_mode);
2575
2576	/* Expand MSO per-segment transcoder timings to full */
2577	intel_splitter_adjust_timings(crtc_state, pipe_mode);
2578
2579	/*
2580	 * We want the full numbers in adjusted_mode normal timings,
2581	 * adjusted_mode crtc timings are left with the raw transcoder
2582	 * timings.
2583	 */
2584	intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2585
2586	/* Populate the "user" mode with full numbers */
2587	drm_mode_copy(mode, pipe_mode);
2588	intel_mode_from_crtc_timings(mode, mode);
2589	mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2590		(intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2591	mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2592
2593	/* Derive per-pipe timings in case bigjoiner is used */
2594	intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2595	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2596
2597	intel_crtc_compute_pixel_rate(crtc_state);
2598}
2599
2600void intel_encoder_get_config(struct intel_encoder *encoder,
2601			      struct intel_crtc_state *crtc_state)
2602{
2603	encoder->get_config(encoder, crtc_state);
2604
2605	intel_crtc_readout_derived_state(crtc_state);
2606}
2607
2608static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2609{
2610	int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2611	int width, height;
2612
2613	if (num_pipes < 2)
2614		return;
2615
2616	width = drm_rect_width(&crtc_state->pipe_src);
2617	height = drm_rect_height(&crtc_state->pipe_src);
2618
2619	drm_rect_init(&crtc_state->pipe_src, 0, 0,
2620		      width / num_pipes, height);
2621}
2622
2623static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2624{
2625	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2626	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2627
2628	intel_bigjoiner_compute_pipe_src(crtc_state);
2629
2630	/*
2631	 * Pipe horizontal size must be even in:
2632	 * - DVO ganged mode
2633	 * - LVDS dual channel mode
2634	 * - Double wide pipe
2635	 */
2636	if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2637		if (crtc_state->double_wide) {
2638			drm_dbg_kms(&i915->drm,
2639				    "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2640				    crtc->base.base.id, crtc->base.name);
2641			return -EINVAL;
2642		}
2643
2644		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2645		    intel_is_dual_link_lvds(i915)) {
2646			drm_dbg_kms(&i915->drm,
2647				    "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2648				    crtc->base.base.id, crtc->base.name);
2649			return -EINVAL;
2650		}
2651	}
2652
2653	return 0;
2654}
2655
2656static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2657{
2658	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2659	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2660	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2661	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2662	int clock_limit = i915->max_dotclk_freq;
2663
2664	/*
2665	 * Start with the adjusted_mode crtc timings, which
2666	 * have been filled with the transcoder timings.
2667	 */
2668	drm_mode_copy(pipe_mode, adjusted_mode);
2669
2670	/* Expand MSO per-segment transcoder timings to full */
2671	intel_splitter_adjust_timings(crtc_state, pipe_mode);
2672
2673	/* Derive per-pipe timings in case bigjoiner is used */
2674	intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2675	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2676
2677	if (DISPLAY_VER(i915) < 4) {
2678		clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2679
2680		/*
2681		 * Enable double wide mode when the dot clock
2682		 * is > 90% of the (display) core speed.
2683		 */
2684		if (intel_crtc_supports_double_wide(crtc) &&
2685		    pipe_mode->crtc_clock > clock_limit) {
2686			clock_limit = i915->max_dotclk_freq;
2687			crtc_state->double_wide = true;
2688		}
2689	}
2690
2691	if (pipe_mode->crtc_clock > clock_limit) {
2692		drm_dbg_kms(&i915->drm,
2693			    "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2694			    crtc->base.base.id, crtc->base.name,
2695			    pipe_mode->crtc_clock, clock_limit,
2696			    str_yes_no(crtc_state->double_wide));
2697		return -EINVAL;
2698	}
2699
2700	return 0;
2701}
2702
2703static int intel_crtc_compute_config(struct intel_atomic_state *state,
2704				     struct intel_crtc *crtc)
2705{
2706	struct intel_crtc_state *crtc_state =
2707		intel_atomic_get_new_crtc_state(state, crtc);
2708	int ret;
2709
2710	ret = intel_dpll_crtc_compute_clock(state, crtc);
2711	if (ret)
2712		return ret;
2713
2714	ret = intel_crtc_compute_pipe_src(crtc_state);
2715	if (ret)
2716		return ret;
2717
2718	ret = intel_crtc_compute_pipe_mode(crtc_state);
2719	if (ret)
2720		return ret;
2721
2722	intel_crtc_compute_pixel_rate(crtc_state);
2723
2724	if (crtc_state->has_pch_encoder)
2725		return ilk_fdi_compute_config(crtc, crtc_state);
2726
2727	return 0;
2728}
2729
2730static void
2731intel_reduce_m_n_ratio(u32 *num, u32 *den)
2732{
2733	while (*num > DATA_LINK_M_N_MASK ||
2734	       *den > DATA_LINK_M_N_MASK) {
2735		*num >>= 1;
2736		*den >>= 1;
2737	}
2738}
2739
2740static void compute_m_n(u32 *ret_m, u32 *ret_n,
2741			u32 m, u32 n, u32 constant_n)
2742{
2743	if (constant_n)
2744		*ret_n = constant_n;
2745	else
2746		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2747
2748	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2749	intel_reduce_m_n_ratio(ret_m, ret_n);
2750}
2751
2752void
2753intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2754		       int pixel_clock, int link_clock,
2755		       struct intel_link_m_n *m_n,
2756		       bool fec_enable)
2757{
2758	u32 data_clock = bits_per_pixel * pixel_clock;
2759
2760	if (fec_enable)
2761		data_clock = intel_dp_mode_to_fec_clock(data_clock);
2762
2763	/*
2764	 * Windows/BIOS uses fixed M/N values always. Follow suit.
2765	 *
2766	 * Also several DP dongles in particular seem to be fussy
2767	 * about too large link M/N values. Presumably the 20bit
2768	 * value used by Windows/BIOS is acceptable to everyone.
2769	 */
2770	m_n->tu = 64;
2771	compute_m_n(&m_n->data_m, &m_n->data_n,
2772		    data_clock, link_clock * nlanes * 8,
2773		    0x8000000);
2774
2775	compute_m_n(&m_n->link_m, &m_n->link_n,
2776		    pixel_clock, link_clock,
2777		    0x80000);
2778}
2779
2780static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2781{
2782	/*
2783	 * There may be no VBT; and if the BIOS enabled SSC we can
2784	 * just keep using it to avoid unnecessary flicker.  Whereas if the
2785	 * BIOS isn't using it, don't assume it will work even if the VBT
2786	 * indicates as much.
2787	 */
2788	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2789		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2790						       PCH_DREF_CONTROL) &
2791			DREF_SSC1_ENABLE;
2792
2793		if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2794			drm_dbg_kms(&dev_priv->drm,
2795				    "SSC %s by BIOS, overriding VBT which says %s\n",
2796				    str_enabled_disabled(bios_lvds_use_ssc),
2797				    str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2798			dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2799		}
2800	}
2801}
2802
2803void intel_zero_m_n(struct intel_link_m_n *m_n)
2804{
2805	/* corresponds to 0 register value */
2806	memset(m_n, 0, sizeof(*m_n));
2807	m_n->tu = 1;
2808}
2809
2810void intel_set_m_n(struct drm_i915_private *i915,
2811		   const struct intel_link_m_n *m_n,
2812		   i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2813		   i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2814{
2815	intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2816	intel_de_write(i915, data_n_reg, m_n->data_n);
2817	intel_de_write(i915, link_m_reg, m_n->link_m);
2818	/*
2819	 * On BDW+ writing LINK_N arms the double buffered update
2820	 * of all the M/N registers, so it must be written last.
2821	 */
2822	intel_de_write(i915, link_n_reg, m_n->link_n);
2823}
2824
2825bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2826				    enum transcoder transcoder)
2827{
2828	if (IS_HASWELL(dev_priv))
2829		return transcoder == TRANSCODER_EDP;
2830
2831	return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2832}
2833
2834void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2835				    enum transcoder transcoder,
2836				    const struct intel_link_m_n *m_n)
2837{
2838	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2839	enum pipe pipe = crtc->pipe;
2840
2841	if (DISPLAY_VER(dev_priv) >= 5)
2842		intel_set_m_n(dev_priv, m_n,
2843			      PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2844			      PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2845	else
2846		intel_set_m_n(dev_priv, m_n,
2847			      PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2848			      PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2849}
2850
2851void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2852				    enum transcoder transcoder,
2853				    const struct intel_link_m_n *m_n)
2854{
2855	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2856
2857	if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2858		return;
2859
2860	intel_set_m_n(dev_priv, m_n,
2861		      PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2862		      PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2863}
2864
2865static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2866{
2867	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2868	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2869	enum pipe pipe = crtc->pipe;
2870	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2871	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2872	u32 crtc_vtotal, crtc_vblank_end;
2873	int vsyncshift = 0;
2874
2875	/* We need to be careful not to changed the adjusted mode, for otherwise
2876	 * the hw state checker will get angry at the mismatch. */
2877	crtc_vtotal = adjusted_mode->crtc_vtotal;
2878	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2879
2880	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2881		/* the chip adds 2 halflines automatically */
2882		crtc_vtotal -= 1;
2883		crtc_vblank_end -= 1;
2884
2885		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2886			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2887		else
2888			vsyncshift = adjusted_mode->crtc_hsync_start -
2889				adjusted_mode->crtc_htotal / 2;
2890		if (vsyncshift < 0)
2891			vsyncshift += adjusted_mode->crtc_htotal;
2892	}
2893
2894	if (DISPLAY_VER(dev_priv) > 3)
2895		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
2896		               vsyncshift);
2897
2898	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
2899		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
2900	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
2901		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
2902	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
2903		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
2904
2905	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
2906		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
2907	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
2908		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
2909	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
2910		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
2911
2912	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2913	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2914	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
2915	 * bits. */
2916	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2917	    (pipe == PIPE_B || pipe == PIPE_C))
2918		intel_de_write(dev_priv, VTOTAL(pipe),
2919		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2920
2921}
2922
2923static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2924{
2925	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2926	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2927	int width = drm_rect_width(&crtc_state->pipe_src);
2928	int height = drm_rect_height(&crtc_state->pipe_src);
2929	enum pipe pipe = crtc->pipe;
2930
2931	/* pipesrc controls the size that is scaled from, which should
2932	 * always be the user's requested size.
2933	 */
2934	intel_de_write(dev_priv, PIPESRC(pipe),
2935		       PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2936}
2937
2938static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2939{
2940	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2941	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2942
2943	if (DISPLAY_VER(dev_priv) == 2)
2944		return false;
2945
2946	if (DISPLAY_VER(dev_priv) >= 9 ||
2947	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2948		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
2949	else
2950		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
2951}
2952
2953static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2954					 struct intel_crtc_state *pipe_config)
2955{
2956	struct drm_device *dev = crtc->base.dev;
2957	struct drm_i915_private *dev_priv = to_i915(dev);
2958	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2959	u32 tmp;
2960
2961	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
2962	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
2963	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
2964
2965	if (!transcoder_is_dsi(cpu_transcoder)) {
2966		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
2967		pipe_config->hw.adjusted_mode.crtc_hblank_start =
2968							(tmp & 0xffff) + 1;
2969		pipe_config->hw.adjusted_mode.crtc_hblank_end =
2970						((tmp >> 16) & 0xffff) + 1;
2971	}
2972	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
2973	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
2974	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
2975
2976	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
2977	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
2978	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
2979
2980	if (!transcoder_is_dsi(cpu_transcoder)) {
2981		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
2982		pipe_config->hw.adjusted_mode.crtc_vblank_start =
2983							(tmp & 0xffff) + 1;
2984		pipe_config->hw.adjusted_mode.crtc_vblank_end =
2985						((tmp >> 16) & 0xffff) + 1;
2986	}
2987	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
2988	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
2989	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
2990
2991	if (intel_pipe_is_interlaced(pipe_config)) {
2992		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
2993		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
2994		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
2995	}
2996}
2997
2998static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2999{
3000	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3001	int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
3002	enum pipe master_pipe, pipe = crtc->pipe;
3003	int width;
3004
3005	if (num_pipes < 2)
3006		return;
3007
3008	master_pipe = bigjoiner_master_pipe(crtc_state);
3009	width = drm_rect_width(&crtc_state->pipe_src);
3010
3011	drm_rect_translate_to(&crtc_state->pipe_src,
3012			      (pipe - master_pipe) * width, 0);
3013}
3014
3015static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3016				    struct intel_crtc_state *pipe_config)
3017{
3018	struct drm_device *dev = crtc->base.dev;
3019	struct drm_i915_private *dev_priv = to_i915(dev);
3020	u32 tmp;
3021
3022	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3023
3024	drm_rect_init(&pipe_config->pipe_src, 0, 0,
3025		      REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
3026		      REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
3027
3028	intel_bigjoiner_adjust_pipe_src(pipe_config);
3029}
3030
3031void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3032{
3033	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3034	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3035	u32 pipeconf = 0;
3036
3037	/*
3038	 * - We keep both pipes enabled on 830
3039	 * - During modeset the pipe is still disabled and must remain so
3040	 * - During fastset the pipe is already enabled and must remain so
3041	 */
3042	if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
3043		pipeconf |= PIPECONF_ENABLE;
3044
3045	if (crtc_state->double_wide)
3046		pipeconf |= PIPECONF_DOUBLE_WIDE;
3047
3048	/* only g4x and later have fancy bpc/dither controls */
3049	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3050	    IS_CHERRYVIEW(dev_priv)) {
3051		/* Bspec claims that we can't use dithering for 30bpp pipes. */
3052		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3053			pipeconf |= PIPECONF_DITHER_EN |
3054				    PIPECONF_DITHER_TYPE_SP;
3055
3056		switch (crtc_state->pipe_bpp) {
3057		default:
3058			/* Case prevented by intel_choose_pipe_bpp_dither. */
3059			MISSING_CASE(crtc_state->pipe_bpp);
3060			fallthrough;
3061		case 18:
3062			pipeconf |= PIPECONF_BPC_6;
3063			break;
3064		case 24:
3065			pipeconf |= PIPECONF_BPC_8;
3066			break;
3067		case 30:
3068			pipeconf |= PIPECONF_BPC_10;
3069			break;
3070		}
3071	}
3072
3073	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3074		if (DISPLAY_VER(dev_priv) < 4 ||
3075		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3076			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3077		else
3078			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3079	} else {
3080		pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
3081	}
3082
3083	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3084	     crtc_state->limited_color_range)
3085		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3086
3087	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3088
3089	pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3090
3091	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3092	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3093}
3094
3095static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3096{
3097	if (IS_I830(dev_priv))
3098		return false;
3099
3100	return DISPLAY_VER(dev_priv) >= 4 ||
3101		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3102}
3103
3104static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3105{
3106	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3107	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3108	u32 tmp;
3109
3110	if (!i9xx_has_pfit(dev_priv))
3111		return;
3112
3113	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3114	if (!(tmp & PFIT_ENABLE))
3115		return;
3116
3117	/* Check whether the pfit is attached to our pipe. */
3118	if (DISPLAY_VER(dev_priv) < 4) {
3119		if (crtc->pipe != PIPE_B)
3120			return;
3121	} else {
3122		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3123			return;
3124	}
3125
3126	crtc_state->gmch_pfit.control = tmp;
3127	crtc_state->gmch_pfit.pgm_ratios =
3128		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3129}
3130
3131static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3132			       struct intel_crtc_state *pipe_config)
3133{
3134	struct drm_device *dev = crtc->base.dev;
3135	struct drm_i915_private *dev_priv = to_i915(dev);
3136	enum pipe pipe = crtc->pipe;
3137	struct dpll clock;
3138	u32 mdiv;
3139	int refclk = 100000;
3140
3141	/* In case of DSI, DPLL will not be used */
3142	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3143		return;
3144
3145	vlv_dpio_get(dev_priv);
3146	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3147	vlv_dpio_put(dev_priv);
3148
3149	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3150	clock.m2 = mdiv & DPIO_M2DIV_MASK;
3151	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3152	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3153	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3154
3155	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3156}
3157
3158static void chv_crtc_clock_get(struct intel_crtc *crtc,
3159			       struct intel_crtc_state *pipe_config)
3160{
3161	struct drm_device *dev = crtc->base.dev;
3162	struct drm_i915_private *dev_priv = to_i915(dev);
3163	enum pipe pipe = crtc->pipe;
3164	enum dpio_channel port = vlv_pipe_to_channel(pipe);
3165	struct dpll clock;
3166	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3167	int refclk = 100000;
3168
3169	/* In case of DSI, DPLL will not be used */
3170	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3171		return;
3172
3173	vlv_dpio_get(dev_priv);
3174	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3175	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3176	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3177	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3178	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3179	vlv_dpio_put(dev_priv);
3180
3181	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3182	clock.m2 = (pll_dw0 & 0xff) << 22;
3183	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3184		clock.m2 |= pll_dw2 & 0x3fffff;
3185	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3186	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3187	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3188
3189	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3190}
3191
3192static enum intel_output_format
3193bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3194{
3195	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3196	u32 tmp;
3197
3198	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3199
3200	if (tmp & PIPEMISC_YUV420_ENABLE) {
3201		/* We support 4:2:0 in full blend mode only */
3202		drm_WARN_ON(&dev_priv->drm,
3203			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3204
3205		return INTEL_OUTPUT_FORMAT_YCBCR420;
3206	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3207		return INTEL_OUTPUT_FORMAT_YCBCR444;
3208	} else {
3209		return INTEL_OUTPUT_FORMAT_RGB;
3210	}
3211}
3212
3213static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3214{
3215	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3216	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3217	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3218	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3219	u32 tmp;
3220
3221	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3222
3223	if (tmp & DISP_PIPE_GAMMA_ENABLE)
3224		crtc_state->gamma_enable = true;
3225
3226	if (!HAS_GMCH(dev_priv) &&
3227	    tmp & DISP_PIPE_CSC_ENABLE)
3228		crtc_state->csc_enable = true;
3229}
3230
3231static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3232				 struct intel_crtc_state *pipe_config)
3233{
3234	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3235	enum intel_display_power_domain power_domain;
3236	intel_wakeref_t wakeref;
3237	u32 tmp;
3238	bool ret;
3239
3240	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3241	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3242	if (!wakeref)
3243		return false;
3244
3245	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3246	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3247	pipe_config->shared_dpll = NULL;
3248
3249	ret = false;
3250
3251	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3252	if (!(tmp & PIPECONF_ENABLE))
3253		goto out;
3254
3255	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3256	    IS_CHERRYVIEW(dev_priv)) {
3257		switch (tmp & PIPECONF_BPC_MASK) {
3258		case PIPECONF_BPC_6:
3259			pipe_config->pipe_bpp = 18;
3260			break;
3261		case PIPECONF_BPC_8:
3262			pipe_config->pipe_bpp = 24;
3263			break;
3264		case PIPECONF_BPC_10:
3265			pipe_config->pipe_bpp = 30;
3266			break;
3267		default:
3268			MISSING_CASE(tmp);
3269			break;
3270		}
3271	}
3272
3273	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3274	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
3275		pipe_config->limited_color_range = true;
3276
3277	pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
3278
3279	pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3280
3281	if (IS_CHERRYVIEW(dev_priv))
3282		pipe_config->cgm_mode = intel_de_read(dev_priv,
3283						      CGM_PIPE_MODE(crtc->pipe));
3284
3285	i9xx_get_pipe_color_config(pipe_config);
3286	intel_color_get_config(pipe_config);
3287
3288	if (DISPLAY_VER(dev_priv) < 4)
3289		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3290
3291	intel_get_transcoder_timings(crtc, pipe_config);
3292	intel_get_pipe_src_size(crtc, pipe_config);
3293
3294	i9xx_get_pfit_config(pipe_config);
3295
3296	if (DISPLAY_VER(dev_priv) >= 4) {
3297		/* No way to read it out on pipes B and C */
3298		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3299			tmp = dev_priv->chv_dpll_md[crtc->pipe];
3300		else
3301			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3302		pipe_config->pixel_multiplier =
3303			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3304			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3305		pipe_config->dpll_hw_state.dpll_md = tmp;
3306	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3307		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3308		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3309		pipe_config->pixel_multiplier =
3310			((tmp & SDVO_MULTIPLIER_MASK)
3311			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3312	} else {
3313		/* Note that on i915G/GM the pixel multiplier is in the sdvo
3314		 * port and will be fixed up in the encoder->get_config
3315		 * function. */
3316		pipe_config->pixel_multiplier = 1;
3317	}
3318	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3319							DPLL(crtc->pipe));
3320	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3321		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3322							       FP0(crtc->pipe));
3323		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3324							       FP1(crtc->pipe));
3325	} else {
3326		/* Mask out read-only status bits. */
3327		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3328						     DPLL_PORTC_READY_MASK |
3329						     DPLL_PORTB_READY_MASK);
3330	}
3331
3332	if (IS_CHERRYVIEW(dev_priv))
3333		chv_crtc_clock_get(crtc, pipe_config);
3334	else if (IS_VALLEYVIEW(dev_priv))
3335		vlv_crtc_clock_get(crtc, pipe_config);
3336	else
3337		i9xx_crtc_clock_get(crtc, pipe_config);
3338
3339	/*
3340	 * Normally the dotclock is filled in by the encoder .get_config()
3341	 * but in case the pipe is enabled w/o any ports we need a sane
3342	 * default.
3343	 */
3344	pipe_config->hw.adjusted_mode.crtc_clock =
3345		pipe_config->port_clock / pipe_config->pixel_multiplier;
3346
3347	ret = true;
3348
3349out:
3350	intel_display_power_put(dev_priv, power_domain, wakeref);
3351
3352	return ret;
3353}
3354
3355void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3356{
3357	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3358	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3359	enum pipe pipe = crtc->pipe;
3360	u32 val = 0;
3361
3362	/*
3363	 * - During modeset the pipe is still disabled and must remain so
3364	 * - During fastset the pipe is already enabled and must remain so
3365	 */
3366	if (!intel_crtc_needs_modeset(crtc_state))
3367		val |= PIPECONF_ENABLE;
3368
3369	switch (crtc_state->pipe_bpp) {
3370	default:
3371		/* Case prevented by intel_choose_pipe_bpp_dither. */
3372		MISSING_CASE(crtc_state->pipe_bpp);
3373		fallthrough;
3374	case 18:
3375		val |= PIPECONF_BPC_6;
3376		break;
3377	case 24:
3378		val |= PIPECONF_BPC_8;
3379		break;
3380	case 30:
3381		val |= PIPECONF_BPC_10;
3382		break;
3383	case 36:
3384		val |= PIPECONF_BPC_12;
3385		break;
3386	}
3387
3388	if (crtc_state->dither)
3389		val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3390
3391	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3392		val |= PIPECONF_INTERLACE_IF_ID_ILK;
3393	else
3394		val |= PIPECONF_INTERLACE_PF_PD_ILK;
3395
3396	/*
3397	 * This would end up with an odd purple hue over
3398	 * the entire display. Make sure we don't do it.
3399	 */
3400	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3401		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3402
3403	if (crtc_state->limited_color_range &&
3404	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3405		val |= PIPECONF_COLOR_RANGE_SELECT;
3406
3407	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3408		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3409
3410	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3411
3412	val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3413	val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3414
3415	intel_de_write(dev_priv, PIPECONF(pipe), val);
3416	intel_de_posting_read(dev_priv, PIPECONF(pipe));
3417}
3418
3419static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3420{
3421	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3422	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3423	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3424	u32 val = 0;
3425
3426	/*
3427	 * - During modeset the pipe is still disabled and must remain so
3428	 * - During fastset the pipe is already enabled and must remain so
3429	 */
3430	if (!intel_crtc_needs_modeset(crtc_state))
3431		val |= PIPECONF_ENABLE;
3432
3433	if (IS_HASWELL(dev_priv) && crtc_state->dither)
3434		val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3435
3436	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3437		val |= PIPECONF_INTERLACE_IF_ID_ILK;
3438	else
3439		val |= PIPECONF_INTERLACE_PF_PD_ILK;
3440
3441	if (IS_HASWELL(dev_priv) &&
3442	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3443		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3444
3445	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3446	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3447}
3448
3449static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3450{
3451	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3452	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3453	u32 val = 0;
3454
3455	switch (crtc_state->pipe_bpp) {
3456	case 18:
3457		val |= PIPEMISC_BPC_6;
3458		break;
3459	case 24:
3460		val |= PIPEMISC_BPC_8;
3461		break;
3462	case 30:
3463		val |= PIPEMISC_BPC_10;
3464		break;
3465	case 36:
3466		/* Port output 12BPC defined for ADLP+ */
3467		if (DISPLAY_VER(dev_priv) > 12)
3468			val |= PIPEMISC_BPC_12_ADLP;
3469		break;
3470	default:
3471		MISSING_CASE(crtc_state->pipe_bpp);
3472		break;
3473	}
3474
3475	if (crtc_state->dither)
3476		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3477
3478	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3479	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3480		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3481
3482	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3483		val |= PIPEMISC_YUV420_ENABLE |
3484			PIPEMISC_YUV420_MODE_FULL_BLEND;
3485
3486	if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3487		val |= PIPEMISC_HDR_MODE_PRECISION;
3488
3489	if (DISPLAY_VER(dev_priv) >= 12)
3490		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3491
3492	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3493}
3494
3495int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3496{
3497	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3498	u32 tmp;
3499
3500	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3501
3502	switch (tmp & PIPEMISC_BPC_MASK) {
3503	case PIPEMISC_BPC_6:
3504		return 18;
3505	case PIPEMISC_BPC_8:
3506		return 24;
3507	case PIPEMISC_BPC_10:
3508		return 30;
3509	/*
3510	 * PORT OUTPUT 12 BPC defined for ADLP+.
3511	 *
3512	 * TODO:
3513	 * For previous platforms with DSI interface, bits 5:7
3514	 * are used for storing pipe_bpp irrespective of dithering.
3515	 * Since the value of 12 BPC is not defined for these bits
3516	 * on older platforms, need to find a workaround for 12 BPC
3517	 * MIPI DSI HW readout.
3518	 */
3519	case PIPEMISC_BPC_12_ADLP:
3520		if (DISPLAY_VER(dev_priv) > 12)
3521			return 36;
3522		fallthrough;
3523	default:
3524		MISSING_CASE(tmp);
3525		return 0;
3526	}
3527}
3528
3529int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3530{
3531	/*
3532	 * Account for spread spectrum to avoid
3533	 * oversubscribing the link. Max center spread
3534	 * is 2.5%; use 5% for safety's sake.
3535	 */
3536	u32 bps = target_clock * bpp * 21 / 20;
3537	return DIV_ROUND_UP(bps, link_bw * 8);
3538}
3539
3540void intel_get_m_n(struct drm_i915_private *i915,
3541		   struct intel_link_m_n *m_n,
3542		   i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3543		   i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3544{
3545	m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3546	m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3547	m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3548	m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3549	m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3550}
3551
3552void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3553				    enum transcoder transcoder,
3554				    struct intel_link_m_n *m_n)
3555{
3556	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3557	enum pipe pipe = crtc->pipe;
3558
3559	if (DISPLAY_VER(dev_priv) >= 5)
3560		intel_get_m_n(dev_priv, m_n,
3561			      PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3562			      PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3563	else
3564		intel_get_m_n(dev_priv, m_n,
3565			      PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3566			      PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3567}
3568
3569void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3570				    enum transcoder transcoder,
3571				    struct intel_link_m_n *m_n)
3572{
3573	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3574
3575	if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3576		return;
3577
3578	intel_get_m_n(dev_priv, m_n,
3579		      PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3580		      PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3581}
3582
3583static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3584				  u32 pos, u32 size)
3585{
3586	drm_rect_init(&crtc_state->pch_pfit.dst,
3587		      pos >> 16, pos & 0xffff,
3588		      size >> 16, size & 0xffff);
3589}
3590
3591static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3592{
3593	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3594	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3595	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3596	int id = -1;
3597	int i;
3598
3599	/* find scaler attached to this pipe */
3600	for (i = 0; i < crtc->num_scalers; i++) {
3601		u32 ctl, pos, size;
3602
3603		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3604		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3605			continue;
3606
3607		id = i;
3608		crtc_state->pch_pfit.enabled = true;
3609
3610		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3611		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3612
3613		ilk_get_pfit_pos_size(crtc_state, pos, size);
3614
3615		scaler_state->scalers[i].in_use = true;
3616		break;
3617	}
3618
3619	scaler_state->scaler_id = id;
3620	if (id >= 0)
3621		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3622	else
3623		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3624}
3625
3626static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3627{
3628	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3629	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3630	u32 ctl, pos, size;
3631
3632	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3633	if ((ctl & PF_ENABLE) == 0)
3634		return;
3635
3636	crtc_state->pch_pfit.enabled = true;
3637
3638	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3639	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3640
3641	ilk_get_pfit_pos_size(crtc_state, pos, size);
3642
3643	/*
3644	 * We currently do not free assignements of panel fitters on
3645	 * ivb/hsw (since we don't use the higher upscaling modes which
3646	 * differentiates them) so just WARN about this case for now.
3647	 */
3648	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3649		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3650}
3651
3652static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3653				struct intel_crtc_state *pipe_config)
3654{
3655	struct drm_device *dev = crtc->base.dev;
3656	struct drm_i915_private *dev_priv = to_i915(dev);
3657	enum intel_display_power_domain power_domain;
3658	intel_wakeref_t wakeref;
3659	u32 tmp;
3660	bool ret;
3661
3662	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3663	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3664	if (!wakeref)
3665		return false;
3666
3667	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3668	pipe_config->shared_dpll = NULL;
3669
3670	ret = false;
3671	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3672	if (!(tmp & PIPECONF_ENABLE))
3673		goto out;
3674
3675	switch (tmp & PIPECONF_BPC_MASK) {
3676	case PIPECONF_BPC_6:
3677		pipe_config->pipe_bpp = 18;
3678		break;
3679	case PIPECONF_BPC_8:
3680		pipe_config->pipe_bpp = 24;
3681		break;
3682	case PIPECONF_BPC_10:
3683		pipe_config->pipe_bpp = 30;
3684		break;
3685	case PIPECONF_BPC_12:
3686		pipe_config->pipe_bpp = 36;
3687		break;
3688	default:
3689		break;
3690	}
3691
3692	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
3693		pipe_config->limited_color_range = true;
3694
3695	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
3696	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
3697	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
3698		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3699		break;
3700	default:
3701		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3702		break;
3703	}
3704
3705	pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
3706
3707	pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3708
3709	pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
3710
3711	pipe_config->csc_mode = intel_de_read(dev_priv,
3712					      PIPE_CSC_MODE(crtc->pipe));
3713
3714	i9xx_get_pipe_color_config(pipe_config);
3715	intel_color_get_config(pipe_config);
3716
3717	pipe_config->pixel_multiplier = 1;
3718
3719	ilk_pch_get_config(pipe_config);
3720
3721	intel_get_transcoder_timings(crtc, pipe_config);
3722	intel_get_pipe_src_size(crtc, pipe_config);
3723
3724	ilk_get_pfit_config(pipe_config);
3725
3726	ret = true;
3727
3728out:
3729	intel_display_power_put(dev_priv, power_domain, wakeref);
3730
3731	return ret;
3732}
3733
3734static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3735{
3736	u8 pipes;
3737
3738	if (DISPLAY_VER(i915) >= 12)
3739		pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3740	else if (DISPLAY_VER(i915) >= 11)
3741		pipes = BIT(PIPE_B) | BIT(PIPE_C);
3742	else
3743		pipes = 0;
3744
3745	return pipes & RUNTIME_INFO(i915)->pipe_mask;
3746}
3747
3748static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3749					   enum transcoder cpu_transcoder)
3750{
3751	enum intel_display_power_domain power_domain;
3752	intel_wakeref_t wakeref;
3753	u32 tmp = 0;
3754
3755	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3756
3757	with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3758		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3759
3760	return tmp & TRANS_DDI_FUNC_ENABLE;
3761}
3762
3763static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3764				    u8 *master_pipes, u8 *slave_pipes)
3765{
3766	struct intel_crtc *crtc;
3767
3768	*master_pipes = 0;
3769	*slave_pipes = 0;
3770
3771	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3772					 bigjoiner_pipes(dev_priv)) {
3773		enum intel_display_power_domain power_domain;
3774		enum pipe pipe = crtc->pipe;
3775		intel_wakeref_t wakeref;
3776
3777		power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3778		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3779			u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3780
3781			if (!(tmp & BIG_JOINER_ENABLE))
3782				continue;
3783
3784			if (tmp & MASTER_BIG_JOINER_ENABLE)
3785				*master_pipes |= BIT(pipe);
3786			else
3787				*slave_pipes |= BIT(pipe);
3788		}
3789
3790		if (DISPLAY_VER(dev_priv) < 13)
3791			continue;
3792
3793		power_domain = POWER_DOMAIN_PIPE(pipe);
3794		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3795			u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3796
3797			if (tmp & UNCOMPRESSED_JOINER_MASTER)
3798				*master_pipes |= BIT(pipe);
3799			if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3800				*slave_pipes |= BIT(pipe);
3801		}
3802	}
3803
3804	/* Bigjoiner pipes should always be consecutive master and slave */
3805	drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3806		 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3807		 *master_pipes, *slave_pipes);
3808}
3809
3810static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3811{
3812	if ((slave_pipes & BIT(pipe)) == 0)
3813		return pipe;
3814
3815	/* ignore everything above our pipe */
3816	master_pipes &= ~GENMASK(7, pipe);
3817
3818	/* highest remaining bit should be our master pipe */
3819	return fls(master_pipes) - 1;
3820}
3821
3822static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3823{
3824	enum pipe master_pipe, next_master_pipe;
3825
3826	master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3827
3828	if ((master_pipes & BIT(master_pipe)) == 0)
3829		return 0;
3830
3831	/* ignore our master pipe and everything below it */
3832	master_pipes &= ~GENMASK(master_pipe, 0);
3833	/* make sure a high bit is set for the ffs() */
3834	master_pipes |= BIT(7);
3835	/* lowest remaining bit should be the next master pipe */
3836	next_master_pipe = ffs(master_pipes) - 1;
3837
3838	return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3839}
3840
3841static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3842{
3843	u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3844
3845	if (DISPLAY_VER(i915) >= 11)
3846		panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3847
3848	return panel_transcoder_mask;
3849}
3850
3851static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3852{
3853	struct drm_device *dev = crtc->base.dev;
3854	struct drm_i915_private *dev_priv = to_i915(dev);
3855	u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3856	enum transcoder cpu_transcoder;
3857	u8 master_pipes, slave_pipes;
3858	u8 enabled_transcoders = 0;
3859
3860	/*
3861	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
3862	 * consistency and less surprising code; it's in always on power).
3863	 */
3864	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3865				       panel_transcoder_mask) {
3866		enum intel_display_power_domain power_domain;
3867		intel_wakeref_t wakeref;
3868		enum pipe trans_pipe;
3869		u32 tmp = 0;
3870
3871		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3872		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3873			tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3874
3875		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3876			continue;
3877
3878		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3879		default:
3880			drm_WARN(dev, 1,
3881				 "unknown pipe linked to transcoder %s\n",
3882				 transcoder_name(cpu_transcoder));
3883			fallthrough;
3884		case TRANS_DDI_EDP_INPUT_A_ONOFF:
3885		case TRANS_DDI_EDP_INPUT_A_ON:
3886			trans_pipe = PIPE_A;
3887			break;
3888		case TRANS_DDI_EDP_INPUT_B_ONOFF:
3889			trans_pipe = PIPE_B;
3890			break;
3891		case TRANS_DDI_EDP_INPUT_C_ONOFF:
3892			trans_pipe = PIPE_C;
3893			break;
3894		case TRANS_DDI_EDP_INPUT_D_ONOFF:
3895			trans_pipe = PIPE_D;
3896			break;
3897		}
3898
3899		if (trans_pipe == crtc->pipe)
3900			enabled_transcoders |= BIT(cpu_transcoder);
3901	}
3902
3903	/* single pipe or bigjoiner master */
3904	cpu_transcoder = (enum transcoder) crtc->pipe;
3905	if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3906		enabled_transcoders |= BIT(cpu_transcoder);
3907
3908	/* bigjoiner slave -> consider the master pipe's transcoder as well */
3909	enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3910	if (slave_pipes & BIT(crtc->pipe)) {
3911		cpu_transcoder = (enum transcoder)
3912			get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3913		if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3914			enabled_transcoders |= BIT(cpu_transcoder);
3915	}
3916
3917	return enabled_transcoders;
3918}
3919
3920static bool has_edp_transcoders(u8 enabled_transcoders)
3921{
3922	return enabled_transcoders & BIT(TRANSCODER_EDP);
3923}
3924
3925static bool has_dsi_transcoders(u8 enabled_transcoders)
3926{
3927	return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3928				      BIT(TRANSCODER_DSI_1));
3929}
3930
3931static bool has_pipe_transcoders(u8 enabled_transcoders)
3932{
3933	return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3934				       BIT(TRANSCODER_DSI_0) |
3935				       BIT(TRANSCODER_DSI_1));
3936}
3937
3938static void assert_enabled_transcoders(struct drm_i915_private *i915,
3939				       u8 enabled_transcoders)
3940{
3941	/* Only one type of transcoder please */
3942	drm_WARN_ON(&i915->drm,
3943		    has_edp_transcoders(enabled_transcoders) +
3944		    has_dsi_transcoders(enabled_transcoders) +
3945		    has_pipe_transcoders(enabled_transcoders) > 1);
3946
3947	/* Only DSI transcoders can be ganged */
3948	drm_WARN_ON(&i915->drm,
3949		    !has_dsi_transcoders(enabled_transcoders) &&
3950		    !is_power_of_2(enabled_transcoders));
3951}
3952
3953static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3954				     struct intel_crtc_state *pipe_config,
3955				     struct intel_display_power_domain_set *power_domain_set)
3956{
3957	struct drm_device *dev = crtc->base.dev;
3958	struct drm_i915_private *dev_priv = to_i915(dev);
3959	unsigned long enabled_transcoders;
3960	u32 tmp;
3961
3962	enabled_transcoders = hsw_enabled_transcoders(crtc);
3963	if (!enabled_transcoders)
3964		return false;
3965
3966	assert_enabled_transcoders(dev_priv, enabled_transcoders);
3967
3968	/*
3969	 * With the exception of DSI we should only ever have
3970	 * a single enabled transcoder. With DSI let's just
3971	 * pick the first one.
3972	 */
3973	pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3974
3975	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3976						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3977		return false;
3978
3979	if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3980		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3981
3982		if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3983			pipe_config->pch_pfit.force_thru = true;
3984	}
3985
3986	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
3987
3988	return tmp & PIPECONF_ENABLE;
3989}
3990
3991static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3992					 struct intel_crtc_state *pipe_config,
3993					 struct intel_display_power_domain_set *power_domain_set)
3994{
3995	struct drm_device *dev = crtc->base.dev;
3996	struct drm_i915_private *dev_priv = to_i915(dev);
3997	enum transcoder cpu_transcoder;
3998	enum port port;
3999	u32 tmp;
4000
4001	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4002		if (port == PORT_A)
4003			cpu_transcoder = TRANSCODER_DSI_A;
4004		else
4005			cpu_transcoder = TRANSCODER_DSI_C;
4006
4007		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4008							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4009			continue;
4010
4011		/*
4012		 * The PLL needs to be enabled with a valid divider
4013		 * configuration, otherwise accessing DSI registers will hang
4014		 * the machine. See BSpec North Display Engine
4015		 * registers/MIPI[BXT]. We can break out here early, since we
4016		 * need the same DSI PLL to be enabled for both DSI ports.
4017		 */
4018		if (!bxt_dsi_pll_is_enabled(dev_priv))
4019			break;
4020
4021		/* XXX: this works for video mode only */
4022		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4023		if (!(tmp & DPI_ENABLE))
4024			continue;
4025
4026		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4027		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4028			continue;
4029
4030		pipe_config->cpu_transcoder = cpu_transcoder;
4031		break;
4032	}
4033
4034	return transcoder_is_dsi(pipe_config->cpu_transcoder);
4035}
4036
4037static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
4038{
4039	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4040	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4041	u8 master_pipes, slave_pipes;
4042	enum pipe pipe = crtc->pipe;
4043
4044	enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
4045
4046	if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
4047		return;
4048
4049	crtc_state->bigjoiner_pipes =
4050		BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
4051		get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
4052}
4053
4054static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4055				struct intel_crtc_state *pipe_config)
4056{
4057	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4058	bool active;
4059	u32 tmp;
4060
4061	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
4062						       POWER_DOMAIN_PIPE(crtc->pipe)))
4063		return false;
4064
4065	pipe_config->shared_dpll = NULL;
4066
4067	active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
4068
4069	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4070	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
4071		drm_WARN_ON(&dev_priv->drm, active);
4072		active = true;
4073	}
4074
4075	if (!active)
4076		goto out;
4077
4078	intel_dsc_get_config(pipe_config);
4079	intel_bigjoiner_get_config(pipe_config);
4080
4081	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4082	    DISPLAY_VER(dev_priv) >= 11)
4083		intel_get_transcoder_timings(crtc, pipe_config);
4084
4085	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4086		intel_vrr_get_config(crtc, pipe_config);
4087
4088	intel_get_pipe_src_size(crtc, pipe_config);
4089
4090	if (IS_HASWELL(dev_priv)) {
4091		u32 tmp = intel_de_read(dev_priv,
4092					PIPECONF(pipe_config->cpu_transcoder));
4093
4094		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4095			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4096		else
4097			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4098	} else {
4099		pipe_config->output_format =
4100			bdw_get_pipemisc_output_format(crtc);
4101	}
4102
4103	pipe_config->gamma_mode = intel_de_read(dev_priv,
4104						GAMMA_MODE(crtc->pipe));
4105
4106	pipe_config->csc_mode = intel_de_read(dev_priv,
4107					      PIPE_CSC_MODE(crtc->pipe));
4108
4109	if (DISPLAY_VER(dev_priv) >= 9) {
4110		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4111
4112		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4113			pipe_config->gamma_enable = true;
4114
4115		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4116			pipe_config->csc_enable = true;
4117	} else {
4118		i9xx_get_pipe_color_config(pipe_config);
4119	}
4120
4121	intel_color_get_config(pipe_config);
4122
4123	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4124	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4125	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4126		pipe_config->ips_linetime =
4127			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4128
4129	if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
4130						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4131		if (DISPLAY_VER(dev_priv) >= 9)
4132			skl_get_pfit_config(pipe_config);
4133		else
4134			ilk_get_pfit_config(pipe_config);
4135	}
4136
4137	hsw_ips_get_config(pipe_config);
4138
4139	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4140	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4141		pipe_config->pixel_multiplier =
4142			intel_de_read(dev_priv,
4143				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4144	} else {
4145		pipe_config->pixel_multiplier = 1;
4146	}
4147
4148	if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4149		tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
4150				    MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
4151				    CHICKEN_TRANS(pipe_config->cpu_transcoder));
4152
4153		pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
4154	} else {
4155		/* no idea if this is correct */
4156		pipe_config->framestart_delay = 1;
4157	}
4158
4159out:
4160	intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
4161
4162	return active;
4163}
4164
4165bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4166{
4167	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4168	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4169
4170	if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
4171		return false;
4172
4173	crtc_state->hw.active = true;
4174
4175	intel_crtc_readout_derived_state(crtc_state);
4176
4177	return true;
4178}
4179
4180/* VESA 640x480x72Hz mode to set on the pipe */
4181static const struct drm_display_mode load_detect_mode = {
4182	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4183		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4184};
4185
4186static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4187					struct drm_crtc *crtc)
4188{
4189	struct drm_plane *plane;
4190	struct drm_plane_state *plane_state;
4191	int ret, i;
4192
4193	ret = drm_atomic_add_affected_planes(state, crtc);
4194	if (ret)
4195		return ret;
4196
4197	for_each_new_plane_in_state(state, plane, plane_state, i) {
4198		if (plane_state->crtc != crtc)
4199			continue;
4200
4201		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4202		if (ret)
4203			return ret;
4204
4205		drm_atomic_set_fb_for_plane(plane_state, NULL);
4206	}
4207
4208	return 0;
4209}
4210
4211int intel_get_load_detect_pipe(struct drm_connector *connector,
4212			       struct intel_load_detect_pipe *old,
4213			       struct drm_modeset_acquire_ctx *ctx)
4214{
4215	struct intel_encoder *encoder =
4216		intel_attached_encoder(to_intel_connector(connector));
4217	struct intel_crtc *possible_crtc;
4218	struct intel_crtc *crtc = NULL;
4219	struct drm_device *dev = encoder->base.dev;
4220	struct drm_i915_private *dev_priv = to_i915(dev);
4221	struct drm_mode_config *config = &dev->mode_config;
4222	struct drm_atomic_state *state = NULL, *restore_state = NULL;
4223	struct drm_connector_state *connector_state;
4224	struct intel_crtc_state *crtc_state;
4225	int ret;
4226
4227	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4228		    connector->base.id, connector->name,
4229		    encoder->base.base.id, encoder->base.name);
4230
4231	old->restore_state = NULL;
4232
4233	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4234
4235	/*
4236	 * Algorithm gets a little messy:
4237	 *
4238	 *   - if the connector already has an assigned crtc, use it (but make
4239	 *     sure it's on first)
4240	 *
4241	 *   - try to find the first unused crtc that can drive this connector,
4242	 *     and use that if we find one
4243	 */
4244
4245	/* See if we already have a CRTC for this connector */
4246	if (connector->state->crtc) {
4247		crtc = to_intel_crtc(connector->state->crtc);
4248
4249		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4250		if (ret)
4251			goto fail;
4252
4253		/* Make sure the crtc and connector are running */
4254		goto found;
4255	}
4256
4257	/* Find an unused one (if possible) */
4258	for_each_intel_crtc(dev, possible_crtc) {
4259		if (!(encoder->base.possible_crtcs &
4260		      drm_crtc_mask(&possible_crtc->base)))
4261			continue;
4262
4263		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4264		if (ret)
4265			goto fail;
4266
4267		if (possible_crtc->base.state->enable) {
4268			drm_modeset_unlock(&possible_crtc->base.mutex);
4269			continue;
4270		}
4271
4272		crtc = possible_crtc;
4273		break;
4274	}
4275
4276	/*
4277	 * If we didn't find an unused CRTC, don't use any.
4278	 */
4279	if (!crtc) {
4280		drm_dbg_kms(&dev_priv->drm,
4281			    "no pipe available for load-detect\n");
4282		ret = -ENODEV;
4283		goto fail;
4284	}
4285
4286found:
4287	state = drm_atomic_state_alloc(dev);
4288	restore_state = drm_atomic_state_alloc(dev);
4289	if (!state || !restore_state) {
4290		ret = -ENOMEM;
4291		goto fail;
4292	}
4293
4294	state->acquire_ctx = ctx;
4295	restore_state->acquire_ctx = ctx;
4296
4297	connector_state = drm_atomic_get_connector_state(state, connector);
4298	if (IS_ERR(connector_state)) {
4299		ret = PTR_ERR(connector_state);
4300		goto fail;
4301	}
4302
4303	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4304	if (ret)
4305		goto fail;
4306
4307	crtc_state = intel_atomic_get_crtc_state(state, crtc);
4308	if (IS_ERR(crtc_state)) {
4309		ret = PTR_ERR(crtc_state);
4310		goto fail;
4311	}
4312
4313	crtc_state->uapi.active = true;
4314
4315	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4316					   &load_detect_mode);
4317	if (ret)
4318		goto fail;
4319
4320	ret = intel_modeset_disable_planes(state, &crtc->base);
4321	if (ret)
4322		goto fail;
4323
4324	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4325	if (!ret)
4326		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4327	if (!ret)
4328		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4329	if (ret) {
4330		drm_dbg_kms(&dev_priv->drm,
4331			    "Failed to create a copy of old state to restore: %i\n",
4332			    ret);
4333		goto fail;
4334	}
4335
4336	ret = drm_atomic_commit(state);
4337	if (ret) {
4338		drm_dbg_kms(&dev_priv->drm,
4339			    "failed to set mode on load-detect pipe\n");
4340		goto fail;
4341	}
4342
4343	old->restore_state = restore_state;
4344	drm_atomic_state_put(state);
4345
4346	/* let the connector get through one full cycle before testing */
4347	intel_crtc_wait_for_next_vblank(crtc);
4348
4349	return true;
4350
4351fail:
4352	if (state) {
4353		drm_atomic_state_put(state);
4354		state = NULL;
4355	}
4356	if (restore_state) {
4357		drm_atomic_state_put(restore_state);
4358		restore_state = NULL;
4359	}
4360
4361	if (ret == -EDEADLK)
4362		return ret;
4363
4364	return false;
4365}
4366
4367void intel_release_load_detect_pipe(struct drm_connector *connector,
4368				    struct intel_load_detect_pipe *old,
4369				    struct drm_modeset_acquire_ctx *ctx)
4370{
4371	struct intel_encoder *intel_encoder =
4372		intel_attached_encoder(to_intel_connector(connector));
4373	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4374	struct drm_encoder *encoder = &intel_encoder->base;
4375	struct drm_atomic_state *state = old->restore_state;
4376	int ret;
4377
4378	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4379		    connector->base.id, connector->name,
4380		    encoder->base.id, encoder->name);
4381
4382	if (!state)
4383		return;
4384
4385	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4386	if (ret)
4387		drm_dbg_kms(&i915->drm,
4388			    "Couldn't release load detect pipe: %i\n", ret);
4389	drm_atomic_state_put(state);
4390}
4391
4392static int i9xx_pll_refclk(struct drm_device *dev,
4393			   const struct intel_crtc_state *pipe_config)
4394{
4395	struct drm_i915_private *dev_priv = to_i915(dev);
4396	u32 dpll = pipe_config->dpll_hw_state.dpll;
4397
4398	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4399		return dev_priv->display.vbt.lvds_ssc_freq;
4400	else if (HAS_PCH_SPLIT(dev_priv))
4401		return 120000;
4402	else if (DISPLAY_VER(dev_priv) != 2)
4403		return 96000;
4404	else
4405		return 48000;
4406}
4407
4408/* Returns the clock of the currently programmed mode of the given pipe. */
4409void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4410			 struct intel_crtc_state *pipe_config)
4411{
4412	struct drm_device *dev = crtc->base.dev;
4413	struct drm_i915_private *dev_priv = to_i915(dev);
4414	u32 dpll = pipe_config->dpll_hw_state.dpll;
4415	u32 fp;
4416	struct dpll clock;
4417	int port_clock;
4418	int refclk = i9xx_pll_refclk(dev, pipe_config);
4419
4420	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4421		fp = pipe_config->dpll_hw_state.fp0;
4422	else
4423		fp = pipe_config->dpll_hw_state.fp1;
4424
4425	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4426	if (IS_PINEVIEW(dev_priv)) {
4427		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4428		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4429	} else {
4430		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4431		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4432	}
4433
4434	if (DISPLAY_VER(dev_priv) != 2) {
4435		if (IS_PINEVIEW(dev_priv))
4436			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4437				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4438		else
4439			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4440			       DPLL_FPA01_P1_POST_DIV_SHIFT);
4441
4442		switch (dpll & DPLL_MODE_MASK) {
4443		case DPLLB_MODE_DAC_SERIAL:
4444			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4445				5 : 10;
4446			break;
4447		case DPLLB_MODE_LVDS:
4448			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4449				7 : 14;
4450			break;
4451		default:
4452			drm_dbg_kms(&dev_priv->drm,
4453				    "Unknown DPLL mode %08x in programmed "
4454				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
4455			return;
4456		}
4457
4458		if (IS_PINEVIEW(dev_priv))
4459			port_clock = pnv_calc_dpll_params(refclk, &clock);
4460		else
4461			port_clock = i9xx_calc_dpll_params(refclk, &clock);
4462	} else {
4463		enum pipe lvds_pipe;
4464
4465		if (IS_I85X(dev_priv) &&
4466		    intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4467		    lvds_pipe == crtc->pipe) {
4468			u32 lvds = intel_de_read(dev_priv, LVDS);
4469
4470			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4471				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4472
4473			if (lvds & LVDS_CLKB_POWER_UP)
4474				clock.p2 = 7;
4475			else
4476				clock.p2 = 14;
4477		} else {
4478			if (dpll & PLL_P1_DIVIDE_BY_TWO)
4479				clock.p1 = 2;
4480			else {
4481				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4482					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4483			}
4484			if (dpll & PLL_P2_DIVIDE_BY_4)
4485				clock.p2 = 4;
4486			else
4487				clock.p2 = 2;
4488		}
4489
4490		port_clock = i9xx_calc_dpll_params(refclk, &clock);
4491	}
4492
4493	/*
4494	 * This value includes pixel_multiplier. We will use
4495	 * port_clock to compute adjusted_mode.crtc_clock in the
4496	 * encoder's get_config() function.
4497	 */
4498	pipe_config->port_clock = port_clock;
4499}
4500
4501int intel_dotclock_calculate(int link_freq,
4502			     const struct intel_link_m_n *m_n)
4503{
4504	/*
4505	 * The calculation for the data clock is:
4506	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4507	 * But we want to avoid losing precison if possible, so:
4508	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4509	 *
4510	 * and the link clock is simpler:
4511	 * link_clock = (m * link_clock) / n
4512	 */
4513
4514	if (!m_n->link_n)
4515		return 0;
4516
4517	return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
4518				m_n->link_n);
4519}
4520
4521int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
4522{
4523	int dotclock;
4524
4525	if (intel_crtc_has_dp_encoder(pipe_config))
4526		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
4527						    &pipe_config->dp_m_n);
4528	else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
4529		dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
4530					     pipe_config->pipe_bpp);
4531	else
4532		dotclock = pipe_config->port_clock;
4533
4534	if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4535	    !intel_crtc_has_dp_encoder(pipe_config))
4536		dotclock *= 2;
4537
4538	if (pipe_config->pixel_multiplier)
4539		dotclock /= pipe_config->pixel_multiplier;
4540
4541	return dotclock;
4542}
4543
4544/* Returns the currently programmed mode of the given encoder. */
4545struct drm_display_mode *
4546intel_encoder_current_mode(struct intel_encoder *encoder)
4547{
4548	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4549	struct intel_crtc_state *crtc_state;
4550	struct drm_display_mode *mode;
4551	struct intel_crtc *crtc;
4552	enum pipe pipe;
4553
4554	if (!encoder->get_hw_state(encoder, &pipe))
4555		return NULL;
4556
4557	crtc = intel_crtc_for_pipe(dev_priv, pipe);
4558
4559	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4560	if (!mode)
4561		return NULL;
4562
4563	crtc_state = intel_crtc_state_alloc(crtc);
4564	if (!crtc_state) {
4565		kfree(mode);
4566		return NULL;
4567	}
4568
4569	if (!intel_crtc_get_pipe_config(crtc_state)) {
4570		kfree(crtc_state);
4571		kfree(mode);
4572		return NULL;
4573	}
4574
4575	intel_encoder_get_config(encoder, crtc_state);
4576
4577	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4578
4579	kfree(crtc_state);
4580
4581	return mode;
4582}
4583
4584static bool encoders_cloneable(const struct intel_encoder *a,
4585			       const struct intel_encoder *b)
4586{
4587	/* masks could be asymmetric, so check both ways */
4588	return a == b || (a->cloneable & BIT(b->type) &&
4589			  b->cloneable & BIT(a->type));
4590}
4591
4592static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4593					 struct intel_crtc *crtc,
4594					 struct intel_encoder *encoder)
4595{
4596	struct intel_encoder *source_encoder;
4597	struct drm_connector *connector;
4598	struct drm_connector_state *connector_state;
4599	int i;
4600
4601	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4602		if (connector_state->crtc != &crtc->base)
4603			continue;
4604
4605		source_encoder =
4606			to_intel_encoder(connector_state->best_encoder);
4607		if (!encoders_cloneable(encoder, source_encoder))
4608			return false;
4609	}
4610
4611	return true;
4612}
4613
4614static int icl_add_linked_planes(struct intel_atomic_state *state)
4615{
4616	struct intel_plane *plane, *linked;
4617	struct intel_plane_state *plane_state, *linked_plane_state;
4618	int i;
4619
4620	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4621		linked = plane_state->planar_linked_plane;
4622
4623		if (!linked)
4624			continue;
4625
4626		linked_plane_state = intel_atomic_get_plane_state(state, linked);
4627		if (IS_ERR(linked_plane_state))
4628			return PTR_ERR(linked_plane_state);
4629
4630		drm_WARN_ON(state->base.dev,
4631			    linked_plane_state->planar_linked_plane != plane);
4632		drm_WARN_ON(state->base.dev,
4633			    linked_plane_state->planar_slave == plane_state->planar_slave);
4634	}
4635
4636	return 0;
4637}
4638
4639static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4640{
4641	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4642	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4643	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4644	struct intel_plane *plane, *linked;
4645	struct intel_plane_state *plane_state;
4646	int i;
4647
4648	if (DISPLAY_VER(dev_priv) < 11)
4649		return 0;
4650
4651	/*
4652	 * Destroy all old plane links and make the slave plane invisible
4653	 * in the crtc_state->active_planes mask.
4654	 */
4655	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4656		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4657			continue;
4658
4659		plane_state->planar_linked_plane = NULL;
4660		if (plane_state->planar_slave && !plane_state->uapi.visible) {
4661			crtc_state->enabled_planes &= ~BIT(plane->id);
4662			crtc_state->active_planes &= ~BIT(plane->id);
4663			crtc_state->update_planes |= BIT(plane->id);
4664			crtc_state->data_rate[plane->id] = 0;
4665			crtc_state->rel_data_rate[plane->id] = 0;
4666		}
4667
4668		plane_state->planar_slave = false;
4669	}
4670
4671	if (!crtc_state->nv12_planes)
4672		return 0;
4673
4674	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4675		struct intel_plane_state *linked_state = NULL;
4676
4677		if (plane->pipe != crtc->pipe ||
4678		    !(crtc_state->nv12_planes & BIT(plane->id)))
4679			continue;
4680
4681		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4682			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4683				continue;
4684
4685			if (crtc_state->active_planes & BIT(linked->id))
4686				continue;
4687
4688			linked_state = intel_atomic_get_plane_state(state, linked);
4689			if (IS_ERR(linked_state))
4690				return PTR_ERR(linked_state);
4691
4692			break;
4693		}
4694
4695		if (!linked_state) {
4696			drm_dbg_kms(&dev_priv->drm,
4697				    "Need %d free Y planes for planar YUV\n",
4698				    hweight8(crtc_state->nv12_planes));
4699
4700			return -EINVAL;
4701		}
4702
4703		plane_state->planar_linked_plane = linked;
4704
4705		linked_state->planar_slave = true;
4706		linked_state->planar_linked_plane = plane;
4707		crtc_state->enabled_planes |= BIT(linked->id);
4708		crtc_state->active_planes |= BIT(linked->id);
4709		crtc_state->update_planes |= BIT(linked->id);
4710		crtc_state->data_rate[linked->id] =
4711			crtc_state->data_rate_y[plane->id];
4712		crtc_state->rel_data_rate[linked->id] =
4713			crtc_state->rel_data_rate_y[plane->id];
4714		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4715			    linked->base.name, plane->base.name);
4716
4717		/* Copy parameters to slave plane */
4718		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4719		linked_state->color_ctl = plane_state->color_ctl;
4720		linked_state->view = plane_state->view;
4721		linked_state->decrypt = plane_state->decrypt;
4722
4723		intel_plane_copy_hw_state(linked_state, plane_state);
4724		linked_state->uapi.src = plane_state->uapi.src;
4725		linked_state->uapi.dst = plane_state->uapi.dst;
4726
4727		if (icl_is_hdr_plane(dev_priv, plane->id)) {
4728			if (linked->id == PLANE_SPRITE5)
4729				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4730			else if (linked->id == PLANE_SPRITE4)
4731				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4732			else if (linked->id == PLANE_SPRITE3)
4733				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4734			else if (linked->id == PLANE_SPRITE2)
4735				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4736			else
4737				MISSING_CASE(linked->id);
4738		}
4739	}
4740
4741	return 0;
4742}
4743
4744static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4745{
4746	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4747	struct intel_atomic_state *state =
4748		to_intel_atomic_state(new_crtc_state->uapi.state);
4749	const struct intel_crtc_state *old_crtc_state =
4750		intel_atomic_get_old_crtc_state(state, crtc);
4751
4752	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4753}
4754
4755static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4756{
4757	const struct drm_display_mode *pipe_mode =
4758		&crtc_state->hw.pipe_mode;
4759	int linetime_wm;
4760
4761	if (!crtc_state->hw.enable)
4762		return 0;
4763
4764	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4765					pipe_mode->crtc_clock);
4766
4767	return min(linetime_wm, 0x1ff);
4768}
4769
4770static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4771			       const struct intel_cdclk_state *cdclk_state)
4772{
4773	const struct drm_display_mode *pipe_mode =
4774		&crtc_state->hw.pipe_mode;
4775	int linetime_wm;
4776
4777	if (!crtc_state->hw.enable)
4778		return 0;
4779
4780	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4781					cdclk_state->logical.cdclk);
4782
4783	return min(linetime_wm, 0x1ff);
4784}
4785
4786static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4787{
4788	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4789	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4790	const struct drm_display_mode *pipe_mode =
4791		&crtc_state->hw.pipe_mode;
4792	int linetime_wm;
4793
4794	if (!crtc_state->hw.enable)
4795		return 0;
4796
4797	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4798				   crtc_state->pixel_rate);
4799
4800	/* Display WA #1135: BXT:ALL GLK:ALL */
4801	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4802	    skl_watermark_ipc_enabled(dev_priv))
4803		linetime_wm /= 2;
4804
4805	return min(linetime_wm, 0x1ff);
4806}
4807
4808static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4809				   struct intel_crtc *crtc)
4810{
4811	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4812	struct intel_crtc_state *crtc_state =
4813		intel_atomic_get_new_crtc_state(state, crtc);
4814	const struct intel_cdclk_state *cdclk_state;
4815
4816	if (DISPLAY_VER(dev_priv) >= 9)
4817		crtc_state->linetime = skl_linetime_wm(crtc_state);
4818	else
4819		crtc_state->linetime = hsw_linetime_wm(crtc_state);
4820
4821	if (!hsw_crtc_supports_ips(crtc))
4822		return 0;
4823
4824	cdclk_state = intel_atomic_get_cdclk_state(state);
4825	if (IS_ERR(cdclk_state))
4826		return PTR_ERR(cdclk_state);
4827
4828	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4829						       cdclk_state);
4830
4831	return 0;
4832}
4833
4834static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4835				   struct intel_crtc *crtc)
4836{
4837	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4838	struct intel_crtc_state *crtc_state =
4839		intel_atomic_get_new_crtc_state(state, crtc);
4840	int ret;
4841
4842	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4843	    intel_crtc_needs_modeset(crtc_state) &&
4844	    !crtc_state->hw.active)
4845		crtc_state->update_wm_post = true;
4846
4847	if (intel_crtc_needs_modeset(crtc_state)) {
4848		ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4849		if (ret)
4850			return ret;
4851	}
4852
4853	/*
4854	 * May need to update pipe gamma enable bits
4855	 * when C8 planes are getting enabled/disabled.
4856	 */
4857	if (c8_planes_changed(crtc_state))
4858		crtc_state->uapi.color_mgmt_changed = true;
4859
4860	if (intel_crtc_needs_color_update(crtc_state)) {
4861		ret = intel_color_check(crtc_state);
4862		if (ret)
4863			return ret;
4864	}
4865
4866	ret = intel_compute_pipe_wm(state, crtc);
4867	if (ret) {
4868		drm_dbg_kms(&dev_priv->drm,
4869			    "Target pipe watermarks are invalid\n");
4870		return ret;
4871	}
4872
4873	/*
4874	 * Calculate 'intermediate' watermarks that satisfy both the
4875	 * old state and the new state.  We can program these
4876	 * immediately.
4877	 */
4878	ret = intel_compute_intermediate_wm(state, crtc);
4879	if (ret) {
4880		drm_dbg_kms(&dev_priv->drm,
4881			    "No valid intermediate pipe watermarks are possible\n");
4882		return ret;
4883	}
4884
4885	if (DISPLAY_VER(dev_priv) >= 9) {
4886		if (intel_crtc_needs_modeset(crtc_state) ||
4887		    intel_crtc_needs_fastset(crtc_state)) {
4888			ret = skl_update_scaler_crtc(crtc_state);
4889			if (ret)
4890				return ret;
4891		}
4892
4893		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4894		if (ret)
4895			return ret;
4896	}
4897
4898	if (HAS_IPS(dev_priv)) {
4899		ret = hsw_ips_compute_config(state, crtc);
4900		if (ret)
4901			return ret;
4902	}
4903
4904	if (DISPLAY_VER(dev_priv) >= 9 ||
4905	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4906		ret = hsw_compute_linetime_wm(state, crtc);
4907		if (ret)
4908			return ret;
4909
4910	}
4911
4912	ret = intel_psr2_sel_fetch_update(state, crtc);
4913	if (ret)
4914		return ret;
4915
4916	return 0;
4917}
4918
4919static int
4920compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4921		      struct intel_crtc_state *crtc_state)
4922{
4923	struct drm_connector *connector = conn_state->connector;
4924	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4925	const struct drm_display_info *info = &connector->display_info;
4926	int bpp;
4927
4928	switch (conn_state->max_bpc) {
4929	case 6 ... 7:
4930		bpp = 6 * 3;
4931		break;
4932	case 8 ... 9:
4933		bpp = 8 * 3;
4934		break;
4935	case 10 ... 11:
4936		bpp = 10 * 3;
4937		break;
4938	case 12 ... 16:
4939		bpp = 12 * 3;
4940		break;
4941	default:
4942		MISSING_CASE(conn_state->max_bpc);
4943		return -EINVAL;
4944	}
4945
4946	if (bpp < crtc_state->pipe_bpp) {
4947		drm_dbg_kms(&i915->drm,
4948			    "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4949			    "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4950			    connector->base.id, connector->name,
4951			    bpp, 3 * info->bpc,
4952			    3 * conn_state->max_requested_bpc,
4953			    crtc_state->pipe_bpp);
4954
4955		crtc_state->pipe_bpp = bpp;
4956	}
4957
4958	return 0;
4959}
4960
4961static int
4962compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4963			  struct intel_crtc *crtc)
4964{
4965	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4966	struct intel_crtc_state *crtc_state =
4967		intel_atomic_get_new_crtc_state(state, crtc);
4968	struct drm_connector *connector;
4969	struct drm_connector_state *connector_state;
4970	int bpp, i;
4971
4972	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4973	    IS_CHERRYVIEW(dev_priv)))
4974		bpp = 10*3;
4975	else if (DISPLAY_VER(dev_priv) >= 5)
4976		bpp = 12*3;
4977	else
4978		bpp = 8*3;
4979
4980	crtc_state->pipe_bpp = bpp;
4981
4982	/* Clamp display bpp to connector max bpp */
4983	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4984		int ret;
4985
4986		if (connector_state->crtc != &crtc->base)
4987			continue;
4988
4989		ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4990		if (ret)
4991			return ret;
4992	}
4993
4994	return 0;
4995}
4996
4997static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4998{
4999	struct drm_device *dev = state->base.dev;
5000	struct drm_connector *connector;
5001	struct drm_connector_list_iter conn_iter;
5002	unsigned int used_ports = 0;
5003	unsigned int used_mst_ports = 0;
5004	bool ret = true;
5005
5006	/*
5007	 * We're going to peek into connector->state,
5008	 * hence connection_mutex must be held.
5009	 */
5010	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5011
5012	/*
5013	 * Walk the connector list instead of the encoder
5014	 * list to detect the problem on ddi platforms
5015	 * where there's just one encoder per digital port.
5016	 */
5017	drm_connector_list_iter_begin(dev, &conn_iter);
5018	drm_for_each_connector_iter(connector, &conn_iter) {
5019		struct drm_connector_state *connector_state;
5020		struct intel_encoder *encoder;
5021
5022		connector_state =
5023			drm_atomic_get_new_connector_state(&state->base,
5024							   connector);
5025		if (!connector_state)
5026			connector_state = connector->state;
5027
5028		if (!connector_state->best_encoder)
5029			continue;
5030
5031		encoder = to_intel_encoder(connector_state->best_encoder);
5032
5033		drm_WARN_ON(dev, !connector_state->crtc);
5034
5035		switch (encoder->type) {
5036		case INTEL_OUTPUT_DDI:
5037			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5038				break;
5039			fallthrough;
5040		case INTEL_OUTPUT_DP:
5041		case INTEL_OUTPUT_HDMI:
5042		case INTEL_OUTPUT_EDP:
5043			/* the same port mustn't appear more than once */
5044			if (used_ports & BIT(encoder->port))
5045				ret = false;
5046
5047			used_ports |= BIT(encoder->port);
5048			break;
5049		case INTEL_OUTPUT_DP_MST:
5050			used_mst_ports |=
5051				1 << encoder->port;
5052			break;
5053		default:
5054			break;
5055		}
5056	}
5057	drm_connector_list_iter_end(&conn_iter);
5058
5059	/* can't mix MST and SST/HDMI on the same port */
5060	if (used_ports & used_mst_ports)
5061		return false;
5062
5063	return ret;
5064}
5065
5066static void
5067intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5068					   struct intel_crtc *crtc)
5069{
5070	struct intel_crtc_state *crtc_state =
5071		intel_atomic_get_new_crtc_state(state, crtc);
5072
5073	WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5074
5075	drm_property_replace_blob(&crtc_state->hw.degamma_lut,
5076				  crtc_state->uapi.degamma_lut);
5077	drm_property_replace_blob(&crtc_state->hw.gamma_lut,
5078				  crtc_state->uapi.gamma_lut);
5079	drm_property_replace_blob(&crtc_state->hw.ctm,
5080				  crtc_state->uapi.ctm);
5081}
5082
5083static void
5084intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
5085					 struct intel_crtc *crtc)
5086{
5087	struct intel_crtc_state *crtc_state =
5088		intel_atomic_get_new_crtc_state(state, crtc);
5089
5090	WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5091
5092	crtc_state->hw.enable = crtc_state->uapi.enable;
5093	crtc_state->hw.active = crtc_state->uapi.active;
5094	drm_mode_copy(&crtc_state->hw.mode,
5095		      &crtc_state->uapi.mode);
5096	drm_mode_copy(&crtc_state->hw.adjusted_mode,
5097		      &crtc_state->uapi.adjusted_mode);
5098	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5099
5100	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
5101}
5102
5103static void
5104copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
5105				    struct intel_crtc *slave_crtc)
5106{
5107	struct intel_crtc_state *slave_crtc_state =
5108		intel_atomic_get_new_crtc_state(state, slave_crtc);
5109	struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5110	const struct intel_crtc_state *master_crtc_state =
5111		intel_atomic_get_new_crtc_state(state, master_crtc);
5112
5113	drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
5114				  master_crtc_state->hw.degamma_lut);
5115	drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
5116				  master_crtc_state->hw.gamma_lut);
5117	drm_property_replace_blob(&slave_crtc_state->hw.ctm,
5118				  master_crtc_state->hw.ctm);
5119
5120	slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
5121}
5122
5123static int
5124copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
5125				  struct intel_crtc *slave_crtc)
5126{
5127	struct intel_crtc_state *slave_crtc_state =
5128		intel_atomic_get_new_crtc_state(state, slave_crtc);
5129	struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5130	const struct intel_crtc_state *master_crtc_state =
5131		intel_atomic_get_new_crtc_state(state, master_crtc);
5132	struct intel_crtc_state *saved_state;
5133
5134	WARN_ON(master_crtc_state->bigjoiner_pipes !=
5135		slave_crtc_state->bigjoiner_pipes);
5136
5137	saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5138	if (!saved_state)
5139		return -ENOMEM;
5140
5141	/* preserve some things from the slave's original crtc state */
5142	saved_state->uapi = slave_crtc_state->uapi;
5143	saved_state->scaler_state = slave_crtc_state->scaler_state;
5144	saved_state->shared_dpll = slave_crtc_state->shared_dpll;
5145	saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
5146	saved_state->crc_enabled = slave_crtc_state->crc_enabled;
5147
5148	intel_crtc_free_hw_state(slave_crtc_state);
5149	memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
5150	kfree(saved_state);
5151
5152	/* Re-init hw state */
5153	memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
5154	slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
5155	slave_crtc_state->hw.active = master_crtc_state->hw.active;
5156	drm_mode_copy(&slave_crtc_state->hw.mode,
5157		      &master_crtc_state->hw.mode);
5158	drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
5159		      &master_crtc_state->hw.pipe_mode);
5160	drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
5161		      &master_crtc_state->hw.adjusted_mode);
5162	slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
5163
5164	copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
5165
5166	slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
5167	slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
5168	slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
5169
5170	WARN_ON(master_crtc_state->bigjoiner_pipes !=
5171		slave_crtc_state->bigjoiner_pipes);
5172
5173	return 0;
5174}
5175
5176static int
5177intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5178				 struct intel_crtc *crtc)
5179{
5180	struct intel_crtc_state *crtc_state =
5181		intel_atomic_get_new_crtc_state(state, crtc);
5182	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5183	struct intel_crtc_state *saved_state;
5184
5185	saved_state = intel_crtc_state_alloc(crtc);
5186	if (!saved_state)
5187		return -ENOMEM;
5188
5189	/* free the old crtc_state->hw members */
5190	intel_crtc_free_hw_state(crtc_state);
5191
5192	/* FIXME: before the switch to atomic started, a new pipe_config was
5193	 * kzalloc'd. Code that depends on any field being zero should be
5194	 * fixed, so that the crtc_state can be safely duplicated. For now,
5195	 * only fields that are know to not cause problems are preserved. */
5196
5197	saved_state->uapi = crtc_state->uapi;
5198	saved_state->scaler_state = crtc_state->scaler_state;
5199	saved_state->shared_dpll = crtc_state->shared_dpll;
5200	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5201	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5202	       sizeof(saved_state->icl_port_dplls));
5203	saved_state->crc_enabled = crtc_state->crc_enabled;
5204	if (IS_G4X(dev_priv) ||
5205	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5206		saved_state->wm = crtc_state->wm;
5207
5208	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5209	kfree(saved_state);
5210
5211	intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
5212
5213	return 0;
5214}
5215
5216static int
5217intel_modeset_pipe_config(struct intel_atomic_state *state,
5218			  struct intel_crtc *crtc)
5219{
5220	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5221	struct intel_crtc_state *crtc_state =
5222		intel_atomic_get_new_crtc_state(state, crtc);
5223	struct drm_connector *connector;
5224	struct drm_connector_state *connector_state;
5225	int pipe_src_w, pipe_src_h;
5226	int base_bpp, ret, i;
5227	bool retry = true;
5228
5229	crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
5230
5231	crtc_state->framestart_delay = 1;
5232
5233	/*
5234	 * Sanitize sync polarity flags based on requested ones. If neither
5235	 * positive or negative polarity is requested, treat this as meaning
5236	 * negative polarity.
5237	 */
5238	if (!(crtc_state->hw.adjusted_mode.flags &
5239	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5240		crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5241
5242	if (!(crtc_state->hw.adjusted_mode.flags &
5243	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5244		crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5245
5246	ret = compute_baseline_pipe_bpp(state, crtc);
5247	if (ret)
5248		return ret;
5249
5250	base_bpp = crtc_state->pipe_bpp;
5251
5252	/*
5253	 * Determine the real pipe dimensions. Note that stereo modes can
5254	 * increase the actual pipe size due to the frame doubling and
5255	 * insertion of additional space for blanks between the frame. This
5256	 * is stored in the crtc timings. We use the requested mode to do this
5257	 * computation to clearly distinguish it from the adjusted mode, which
5258	 * can be changed by the connectors in the below retry loop.
5259	 */
5260	drm_mode_get_hv_timing(&crtc_state->hw.mode,
5261			       &pipe_src_w, &pipe_src_h);
5262	drm_rect_init(&crtc_state->pipe_src, 0, 0,
5263		      pipe_src_w, pipe_src_h);
5264
5265	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5266		struct intel_encoder *encoder =
5267			to_intel_encoder(connector_state->best_encoder);
5268
5269		if (connector_state->crtc != &crtc->base)
5270			continue;
5271
5272		if (!check_single_encoder_cloning(state, crtc, encoder)) {
5273			drm_dbg_kms(&i915->drm,
5274				    "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
5275				    encoder->base.base.id, encoder->base.name);
5276			return -EINVAL;
5277		}
5278
5279		/*
5280		 * Determine output_types before calling the .compute_config()
5281		 * hooks so that the hooks can use this information safely.
5282		 */
5283		if (encoder->compute_output_type)
5284			crtc_state->output_types |=
5285				BIT(encoder->compute_output_type(encoder, crtc_state,
5286								 connector_state));
5287		else
5288			crtc_state->output_types |= BIT(encoder->type);
5289	}
5290
5291encoder_retry:
5292	/* Ensure the port clock defaults are reset when retrying. */
5293	crtc_state->port_clock = 0;
5294	crtc_state->pixel_multiplier = 1;
5295
5296	/* Fill in default crtc timings, allow encoders to overwrite them. */
5297	drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
5298			      CRTC_STEREO_DOUBLE);
5299
5300	/* Pass our mode to the connectors and the CRTC to give them a chance to
5301	 * adjust it according to limitations or connector properties, and also
5302	 * a chance to reject the mode entirely.
5303	 */
5304	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5305		struct intel_encoder *encoder =
5306			to_intel_encoder(connector_state->best_encoder);
5307
5308		if (connector_state->crtc != &crtc->base)
5309			continue;
5310
5311		ret = encoder->compute_config(encoder, crtc_state,
5312					      connector_state);
5313		if (ret == -EDEADLK)
5314			return ret;
5315		if (ret < 0) {
5316			drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
5317				    encoder->base.base.id, encoder->base.name, ret);
5318			return ret;
5319		}
5320	}
5321
5322	/* Set default port clock if not overwritten by the encoder. Needs to be
5323	 * done afterwards in case the encoder adjusts the mode. */
5324	if (!crtc_state->port_clock)
5325		crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
5326			* crtc_state->pixel_multiplier;
5327
5328	ret = intel_crtc_compute_config(state, crtc);
5329	if (ret == -EDEADLK)
5330		return ret;
5331	if (ret == -EAGAIN) {
5332		if (drm_WARN(&i915->drm, !retry,
5333			     "[CRTC:%d:%s] loop in pipe configuration computation\n",
5334			     crtc->base.base.id, crtc->base.name))
5335			return -EINVAL;
5336
5337		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
5338			    crtc->base.base.id, crtc->base.name);
5339		retry = false;
5340		goto encoder_retry;
5341	}
5342	if (ret < 0) {
5343		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
5344			    crtc->base.base.id, crtc->base.name, ret);
5345		return ret;
5346	}
5347
5348	/* Dithering seems to not pass-through bits correctly when it should, so
5349	 * only enable it on 6bpc panels and when its not a compliance
5350	 * test requesting 6bpc video pattern.
5351	 */
5352	crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
5353		!crtc_state->dither_force_disable;
5354	drm_dbg_kms(&i915->drm,
5355		    "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5356		    crtc->base.base.id, crtc->base.name,
5357		    base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
5358
5359	return 0;
5360}
5361
5362static int
5363intel_modeset_pipe_config_late(struct intel_atomic_state *state,
5364			       struct intel_crtc *crtc)
5365{
5366	struct intel_crtc_state *crtc_state =
5367		intel_atomic_get_new_crtc_state(state, crtc);
5368	struct drm_connector_state *conn_state;
5369	struct drm_connector *connector;
5370	int i;
5371
5372	intel_bigjoiner_adjust_pipe_src(crtc_state);
5373
5374	for_each_new_connector_in_state(&state->base, connector,
5375					conn_state, i) {
5376		struct intel_encoder *encoder =
5377			to_intel_encoder(conn_state->best_encoder);
5378		int ret;
5379
5380		if (conn_state->crtc != &crtc->base ||
5381		    !encoder->compute_config_late)
5382			continue;
5383
5384		ret = encoder->compute_config_late(encoder, crtc_state,
5385						   conn_state);
5386		if (ret)
5387			return ret;
5388	}
5389
5390	return 0;
5391}
5392
5393bool intel_fuzzy_clock_check(int clock1, int clock2)
5394{
5395	int diff;
5396
5397	if (clock1 == clock2)
5398		return true;
5399
5400	if (!clock1 || !clock2)
5401		return false;
5402
5403	diff = abs(clock1 - clock2);
5404
5405	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5406		return true;
5407
5408	return false;
5409}
5410
5411static bool
5412intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5413		       const struct intel_link_m_n *m2_n2)
5414{
5415	return m_n->tu == m2_n2->tu &&
5416		m_n->data_m == m2_n2->data_m &&
5417		m_n->data_n == m2_n2->data_n &&
5418		m_n->link_m == m2_n2->link_m &&
5419		m_n->link_n == m2_n2->link_n;
5420}
5421
5422static bool
5423intel_compare_infoframe(const union hdmi_infoframe *a,
5424			const union hdmi_infoframe *b)
5425{
5426	return memcmp(a, b, sizeof(*a)) == 0;
5427}
5428
5429static bool
5430intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5431			 const struct drm_dp_vsc_sdp *b)
5432{
5433	return memcmp(a, b, sizeof(*a)) == 0;
5434}
5435
5436static void
5437pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5438			       bool fastset, const char *name,
5439			       const union hdmi_infoframe *a,
5440			       const union hdmi_infoframe *b)
5441{
5442	if (fastset) {
5443		if (!drm_debug_enabled(DRM_UT_KMS))
5444			return;
5445
5446		drm_dbg_kms(&dev_priv->drm,
5447			    "fastset mismatch in %s infoframe\n", name);
5448		drm_dbg_kms(&dev_priv->drm, "expected:\n");
5449		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5450		drm_dbg_kms(&dev_priv->drm, "found:\n");
5451		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5452	} else {
5453		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5454		drm_err(&dev_priv->drm, "expected:\n");
5455		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5456		drm_err(&dev_priv->drm, "found:\n");
5457		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5458	}
5459}
5460
5461static void
5462pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5463				bool fastset, const char *name,
5464				const struct drm_dp_vsc_sdp *a,
5465				const struct drm_dp_vsc_sdp *b)
5466{
5467	if (fastset) {
5468		if (!drm_debug_enabled(DRM_UT_KMS))
5469			return;
5470
5471		drm_dbg_kms(&dev_priv->drm,
5472			    "fastset mismatch in %s dp sdp\n", name);
5473		drm_dbg_kms(&dev_priv->drm, "expected:\n");
5474		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5475		drm_dbg_kms(&dev_priv->drm, "found:\n");
5476		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5477	} else {
5478		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5479		drm_err(&dev_priv->drm, "expected:\n");
5480		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5481		drm_err(&dev_priv->drm, "found:\n");
5482		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
5483	}
5484}
5485
5486static void __printf(4, 5)
5487pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
5488		     const char *name, const char *format, ...)
5489{
5490	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5491	struct va_format vaf;
5492	va_list args;
5493
5494	va_start(args, format);
5495	vaf.fmt = format;
5496	vaf.va = &args;
5497
5498	if (fastset)
5499		drm_dbg_kms(&i915->drm,
5500			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
5501			    crtc->base.base.id, crtc->base.name, name, &vaf);
5502	else
5503		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
5504			crtc->base.base.id, crtc->base.name, name, &vaf);
5505
5506	va_end(args);
5507}
5508
5509static bool fastboot_enabled(struct drm_i915_private *dev_priv)
5510{
5511	if (dev_priv->params.fastboot != -1)
5512		return dev_priv->params.fastboot;
5513
5514	/* Enable fastboot by default on Skylake and newer */
5515	if (DISPLAY_VER(dev_priv) >= 9)
5516		return true;
5517
5518	/* Enable fastboot by default on VLV and CHV */
5519	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5520		return true;
5521
5522	/* Disabled by default on all others */
5523	return false;
5524}
5525
5526bool
5527intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5528			  const struct intel_crtc_state *pipe_config,
5529			  bool fastset)
5530{
5531	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5532	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5533	bool ret = true;
5534	u32 bp_gamma = 0;
5535	bool fixup_inherited = fastset &&
5536		current_config->inherited && !pipe_config->inherited;
5537
5538	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
5539		drm_dbg_kms(&dev_priv->drm,
5540			    "initial modeset and fastboot not set\n");
5541		ret = false;
5542	}
5543
5544#define PIPE_CONF_CHECK_X(name) do { \
5545	if (current_config->name != pipe_config->name) { \
5546		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5547				     "(expected 0x%08x, found 0x%08x)", \
5548				     current_config->name, \
5549				     pipe_config->name); \
5550		ret = false; \
5551	} \
5552} while (0)
5553
5554#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5555	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5556		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5557				     "(expected 0x%08x, found 0x%08x)", \
5558				     current_config->name & (mask), \
5559				     pipe_config->name & (mask)); \
5560		ret = false; \
5561	} \
5562} while (0)
5563
5564#define PIPE_CONF_CHECK_I(name) do { \
5565	if (current_config->name != pipe_config->name) { \
5566		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5567				     "(expected %i, found %i)", \
5568				     current_config->name, \
5569				     pipe_config->name); \
5570		ret = false; \
5571	} \
5572} while (0)
5573
5574#define PIPE_CONF_CHECK_BOOL(name) do { \
5575	if (current_config->name != pipe_config->name) { \
5576		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
5577				     "(expected %s, found %s)", \
5578				     str_yes_no(current_config->name), \
5579				     str_yes_no(pipe_config->name)); \
5580		ret = false; \
5581	} \
5582} while (0)
5583
5584/*
5585 * Checks state where we only read out the enabling, but not the entire
5586 * state itself (like full infoframes or ELD for audio). These states
5587 * require a full modeset on bootup to fix up.
5588 */
5589#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
5590	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
5591		PIPE_CONF_CHECK_BOOL(name); \
5592	} else { \
5593		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5594				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
5595				     str_yes_no(current_config->name), \
5596				     str_yes_no(pipe_config->name)); \
5597		ret = false; \
5598	} \
5599} while (0)
5600
5601#define PIPE_CONF_CHECK_P(name) do { \
5602	if (current_config->name != pipe_config->name) { \
5603		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5604				     "(expected %p, found %p)", \
5605				     current_config->name, \
5606				     pipe_config->name); \
5607		ret = false; \
5608	} \
5609} while (0)
5610
5611#define PIPE_CONF_CHECK_M_N(name) do { \
5612	if (!intel_compare_link_m_n(&current_config->name, \
5613				    &pipe_config->name)) { \
5614		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5615				     "(expected tu %i data %i/%i link %i/%i, " \
5616				     "found tu %i, data %i/%i link %i/%i)", \
5617				     current_config->name.tu, \
5618				     current_config->name.data_m, \
5619				     current_config->name.data_n, \
5620				     current_config->name.link_m, \
5621				     current_config->name.link_n, \
5622				     pipe_config->name.tu, \
5623				     pipe_config->name.data_m, \
5624				     pipe_config->name.data_n, \
5625				     pipe_config->name.link_m, \
5626				     pipe_config->name.link_n); \
5627		ret = false; \
5628	} \
5629} while (0)
5630
5631#define PIPE_CONF_CHECK_TIMINGS(name) do { \
5632	PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5633	PIPE_CONF_CHECK_I(name.crtc_htotal); \
5634	PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5635	PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5636	PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5637	PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5638	PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5639	PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5640	PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5641	PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5642	PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5643	PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5644} while (0)
5645
5646#define PIPE_CONF_CHECK_RECT(name) do { \
5647	PIPE_CONF_CHECK_I(name.x1); \
5648	PIPE_CONF_CHECK_I(name.x2); \
5649	PIPE_CONF_CHECK_I(name.y1); \
5650	PIPE_CONF_CHECK_I(name.y2); \
5651} while (0)
5652
5653#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5654	if ((current_config->name ^ pipe_config->name) & (mask)) { \
5655		pipe_config_mismatch(fastset, crtc, __stringify(name), \
5656				     "(%x) (expected %i, found %i)", \
5657				     (mask), \
5658				     current_config->name & (mask), \
5659				     pipe_config->name & (mask)); \
5660		ret = false; \
5661	} \
5662} while (0)
5663
5664#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5665	if (!intel_compare_infoframe(&current_config->infoframes.name, \
5666				     &pipe_config->infoframes.name)) { \
5667		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5668					       &current_config->infoframes.name, \
5669					       &pipe_config->infoframes.name); \
5670		ret = false; \
5671	} \
5672} while (0)
5673
5674#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5675	if (!current_config->has_psr && !pipe_config->has_psr && \
5676	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
5677				      &pipe_config->infoframes.name)) { \
5678		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5679						&current_config->infoframes.name, \
5680						&pipe_config->infoframes.name); \
5681		ret = false; \
5682	} \
5683} while (0)
5684
5685#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
5686	if (current_config->name1 != pipe_config->name1) { \
5687		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
5688				"(expected %i, found %i, won't compare lut values)", \
5689				current_config->name1, \
5690				pipe_config->name1); \
5691		ret = false;\
5692	} else { \
5693		if (!intel_color_lut_equal(current_config->name2, \
5694					pipe_config->name2, pipe_config->name1, \
5695					bit_precision)) { \
5696			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
5697					"hw_state doesn't match sw_state"); \
5698			ret = false; \
5699		} \
5700	} \
5701} while (0)
5702
5703#define PIPE_CONF_QUIRK(quirk) \
5704	((current_config->quirks | pipe_config->quirks) & (quirk))
5705
5706	PIPE_CONF_CHECK_I(hw.enable);
5707	PIPE_CONF_CHECK_I(hw.active);
5708
5709	PIPE_CONF_CHECK_I(cpu_transcoder);
5710	PIPE_CONF_CHECK_I(mst_master_transcoder);
5711
5712	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5713	PIPE_CONF_CHECK_I(fdi_lanes);
5714	PIPE_CONF_CHECK_M_N(fdi_m_n);
5715
5716	PIPE_CONF_CHECK_I(lane_count);
5717	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5718
5719	if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5720		if (!fastset || !pipe_config->seamless_m_n)
5721			PIPE_CONF_CHECK_M_N(dp_m_n);
5722	} else {
5723		PIPE_CONF_CHECK_M_N(dp_m_n);
5724		PIPE_CONF_CHECK_M_N(dp_m2_n2);
5725	}
5726
5727	PIPE_CONF_CHECK_X(output_types);
5728
5729	PIPE_CONF_CHECK_I(framestart_delay);
5730	PIPE_CONF_CHECK_I(msa_timing_delay);
5731
5732	PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5733	PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5734
5735	PIPE_CONF_CHECK_I(pixel_multiplier);
5736
5737	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5738			      DRM_MODE_FLAG_INTERLACE);
5739
5740	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5741		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5742				      DRM_MODE_FLAG_PHSYNC);
5743		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5744				      DRM_MODE_FLAG_NHSYNC);
5745		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5746				      DRM_MODE_FLAG_PVSYNC);
5747		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5748				      DRM_MODE_FLAG_NVSYNC);
5749	}
5750
5751	PIPE_CONF_CHECK_I(output_format);
5752	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5753	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5754	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5755		PIPE_CONF_CHECK_BOOL(limited_color_range);
5756
5757	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5758	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5759	PIPE_CONF_CHECK_BOOL(has_infoframe);
5760	PIPE_CONF_CHECK_BOOL(fec_enable);
5761
5762	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
5763
5764	PIPE_CONF_CHECK_X(gmch_pfit.control);
5765	/* pfit ratios are autocomputed by the hw on gen4+ */
5766	if (DISPLAY_VER(dev_priv) < 4)
5767		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5768	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5769
5770	/*
5771	 * Changing the EDP transcoder input mux
5772	 * (A_ONOFF vs. A_ON) requires a full modeset.
5773	 */
5774	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5775
5776	if (!fastset) {
5777		PIPE_CONF_CHECK_RECT(pipe_src);
5778
5779		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5780		PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5781
5782		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5783		PIPE_CONF_CHECK_I(pixel_rate);
5784
5785		PIPE_CONF_CHECK_X(gamma_mode);
5786		if (IS_CHERRYVIEW(dev_priv))
5787			PIPE_CONF_CHECK_X(cgm_mode);
5788		else
5789			PIPE_CONF_CHECK_X(csc_mode);
5790		PIPE_CONF_CHECK_BOOL(gamma_enable);
5791		PIPE_CONF_CHECK_BOOL(csc_enable);
5792
5793		PIPE_CONF_CHECK_I(linetime);
5794		PIPE_CONF_CHECK_I(ips_linetime);
5795
5796		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
5797		if (bp_gamma)
5798			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, post_csc_lut, bp_gamma);
5799
5800		if (current_config->active_planes) {
5801			PIPE_CONF_CHECK_BOOL(has_psr);
5802			PIPE_CONF_CHECK_BOOL(has_psr2);
5803			PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5804			PIPE_CONF_CHECK_I(dc3co_exitline);
5805		}
5806	}
5807
5808	PIPE_CONF_CHECK_BOOL(double_wide);
5809
5810	if (dev_priv->display.dpll.mgr) {
5811		PIPE_CONF_CHECK_P(shared_dpll);
5812
5813		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5814		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5815		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5816		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5817		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5818		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5819		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5820		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5821		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5822		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5823		PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5824		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5825		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5826		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5827		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5828		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5829		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5830		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5831		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5832		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5833		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5834		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5835		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5836		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5837		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5838		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5839		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5840		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5841		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5842		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5843		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5844		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5845	}
5846
5847	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5848	PIPE_CONF_CHECK_X(dsi_pll.div);
5849
5850	if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5851		PIPE_CONF_CHECK_I(pipe_bpp);
5852
5853	if (!fastset || !pipe_config->seamless_m_n) {
5854		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5855		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5856	}
5857	PIPE_CONF_CHECK_I(port_clock);
5858
5859	PIPE_CONF_CHECK_I(min_voltage_level);
5860
5861	if (current_config->has_psr || pipe_config->has_psr)
5862		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5863					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5864	else
5865		PIPE_CONF_CHECK_X(infoframes.enable);
5866
5867	PIPE_CONF_CHECK_X(infoframes.gcp);
5868	PIPE_CONF_CHECK_INFOFRAME(avi);
5869	PIPE_CONF_CHECK_INFOFRAME(spd);
5870	PIPE_CONF_CHECK_INFOFRAME(hdmi);
5871	PIPE_CONF_CHECK_INFOFRAME(drm);
5872	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5873
5874	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5875	PIPE_CONF_CHECK_I(master_transcoder);
5876	PIPE_CONF_CHECK_X(bigjoiner_pipes);
5877
5878	PIPE_CONF_CHECK_I(dsc.compression_enable);
5879	PIPE_CONF_CHECK_I(dsc.dsc_split);
5880	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
5881
5882	PIPE_CONF_CHECK_BOOL(splitter.enable);
5883	PIPE_CONF_CHECK_I(splitter.link_count);
5884	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5885
5886	PIPE_CONF_CHECK_BOOL(vrr.enable);
5887	PIPE_CONF_CHECK_I(vrr.vmin);
5888	PIPE_CONF_CHECK_I(vrr.vmax);
5889	PIPE_CONF_CHECK_I(vrr.flipline);
5890	PIPE_CONF_CHECK_I(vrr.pipeline_full);
5891	PIPE_CONF_CHECK_I(vrr.guardband);
5892
5893#undef PIPE_CONF_CHECK_X
5894#undef PIPE_CONF_CHECK_I
5895#undef PIPE_CONF_CHECK_BOOL
5896#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
5897#undef PIPE_CONF_CHECK_P
5898#undef PIPE_CONF_CHECK_FLAGS
5899#undef PIPE_CONF_CHECK_COLOR_LUT
5900#undef PIPE_CONF_CHECK_TIMINGS
5901#undef PIPE_CONF_CHECK_RECT
5902#undef PIPE_CONF_QUIRK
5903
5904	return ret;
5905}
5906
5907static void
5908intel_verify_planes(struct intel_atomic_state *state)
5909{
5910	struct intel_plane *plane;
5911	const struct intel_plane_state *plane_state;
5912	int i;
5913
5914	for_each_new_intel_plane_in_state(state, plane,
5915					  plane_state, i)
5916		assert_plane(plane, plane_state->planar_slave ||
5917			     plane_state->uapi.visible);
5918}
5919
5920int intel_modeset_all_pipes(struct intel_atomic_state *state,
5921			    const char *reason)
5922{
5923	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5924	struct intel_crtc *crtc;
5925
5926	/*
5927	 * Add all pipes to the state, and force
5928	 * a modeset on all the active ones.
5929	 */
5930	for_each_intel_crtc(&dev_priv->drm, crtc) {
5931		struct intel_crtc_state *crtc_state;
5932		int ret;
5933
5934		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5935		if (IS_ERR(crtc_state))
5936			return PTR_ERR(crtc_state);
5937
5938		if (!crtc_state->hw.active ||
5939		    intel_crtc_needs_modeset(crtc_state))
5940			continue;
5941
5942		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5943			    crtc->base.base.id, crtc->base.name, reason);
5944
5945		crtc_state->uapi.mode_changed = true;
5946		crtc_state->update_pipe = false;
5947
5948		ret = drm_atomic_add_affected_connectors(&state->base,
5949							 &crtc->base);
5950		if (ret)
5951			return ret;
5952
5953		ret = intel_atomic_add_affected_planes(state, crtc);
5954		if (ret)
5955			return ret;
5956
5957		crtc_state->update_planes |= crtc_state->active_planes;
5958	}
5959
5960	return 0;
5961}
5962
5963void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
5964{
5965	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5966	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5967	struct drm_display_mode adjusted_mode;
5968
5969	drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
5970
5971	if (crtc_state->vrr.enable) {
5972		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
5973		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
5974		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
5975		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
5976	}
5977
5978	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
5979
5980	crtc->mode_flags = crtc_state->mode_flags;
5981
5982	/*
5983	 * The scanline counter increments at the leading edge of hsync.
5984	 *
5985	 * On most platforms it starts counting from vtotal-1 on the
5986	 * first active line. That means the scanline counter value is
5987	 * always one less than what we would expect. Ie. just after
5988	 * start of vblank, which also occurs at start of hsync (on the
5989	 * last active line), the scanline counter will read vblank_start-1.
5990	 *
5991	 * On gen2 the scanline counter starts counting from 1 instead
5992	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
5993	 * to keep the value positive), instead of adding one.
5994	 *
5995	 * On HSW+ the behaviour of the scanline counter depends on the output
5996	 * type. For DP ports it behaves like most other platforms, but on HDMI
5997	 * there's an extra 1 line difference. So we need to add two instead of
5998	 * one to the value.
5999	 *
6000	 * On VLV/CHV DSI the scanline counter would appear to increment
6001	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
6002	 * that means we can't tell whether we're in vblank or not while
6003	 * we're on that particular line. We must still set scanline_offset
6004	 * to 1 so that the vblank timestamps come out correct when we query
6005	 * the scanline counter from within the vblank interrupt handler.
6006	 * However if queried just before the start of vblank we'll get an
6007	 * answer that's slightly in the future.
6008	 */
6009	if (DISPLAY_VER(dev_priv) == 2) {
6010		int vtotal;
6011
6012		vtotal = adjusted_mode.crtc_vtotal;
6013		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6014			vtotal /= 2;
6015
6016		crtc->scanline_offset = vtotal - 1;
6017	} else if (HAS_DDI(dev_priv) &&
6018		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
6019		crtc->scanline_offset = 2;
6020	} else {
6021		crtc->scanline_offset = 1;
6022	}
6023}
6024
6025/*
6026 * This implements the workaround described in the "notes" section of the mode
6027 * set sequence documentation. When going from no pipes or single pipe to
6028 * multiple pipes, and planes are enabled after the pipe, we need to wait at
6029 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
6030 */
6031static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
6032{
6033	struct intel_crtc_state *crtc_state;
6034	struct intel_crtc *crtc;
6035	struct intel_crtc_state *first_crtc_state = NULL;
6036	struct intel_crtc_state *other_crtc_state = NULL;
6037	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
6038	int i;
6039
6040	/* look at all crtc's that are going to be enabled in during modeset */
6041	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6042		if (!crtc_state->hw.active ||
6043		    !intel_crtc_needs_modeset(crtc_state))
6044			continue;
6045
6046		if (first_crtc_state) {
6047			other_crtc_state = crtc_state;
6048			break;
6049		} else {
6050			first_crtc_state = crtc_state;
6051			first_pipe = crtc->pipe;
6052		}
6053	}
6054
6055	/* No workaround needed? */
6056	if (!first_crtc_state)
6057		return 0;
6058
6059	/* w/a possibly needed, check how many crtc's are already enabled. */
6060	for_each_intel_crtc(state->base.dev, crtc) {
6061		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6062		if (IS_ERR(crtc_state))
6063			return PTR_ERR(crtc_state);
6064
6065		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
6066
6067		if (!crtc_state->hw.active ||
6068		    intel_crtc_needs_modeset(crtc_state))
6069			continue;
6070
6071		/* 2 or more enabled crtcs means no need for w/a */
6072		if (enabled_pipe != INVALID_PIPE)
6073			return 0;
6074
6075		enabled_pipe = crtc->pipe;
6076	}
6077
6078	if (enabled_pipe != INVALID_PIPE)
6079		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
6080	else if (other_crtc_state)
6081		other_crtc_state->hsw_workaround_pipe = first_pipe;
6082
6083	return 0;
6084}
6085
6086u8 intel_calc_active_pipes(struct intel_atomic_state *state,
6087			   u8 active_pipes)
6088{
6089	const struct intel_crtc_state *crtc_state;
6090	struct intel_crtc *crtc;
6091	int i;
6092
6093	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6094		if (crtc_state->hw.active)
6095			active_pipes |= BIT(crtc->pipe);
6096		else
6097			active_pipes &= ~BIT(crtc->pipe);
6098	}
6099
6100	return active_pipes;
6101}
6102
6103static int intel_modeset_checks(struct intel_atomic_state *state)
6104{
6105	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6106
6107	state->modeset = true;
6108
6109	if (IS_HASWELL(dev_priv))
6110		return hsw_mode_set_planes_workaround(state);
6111
6112	return 0;
6113}
6114
6115static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
6116				     struct intel_crtc_state *new_crtc_state)
6117{
6118	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
6119		return;
6120
6121	new_crtc_state->uapi.mode_changed = false;
6122	if (!intel_crtc_needs_modeset(new_crtc_state))
6123		new_crtc_state->update_pipe = true;
6124}
6125
6126static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
6127					  struct intel_crtc *crtc,
6128					  u8 plane_ids_mask)
6129{
6130	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6131	struct intel_plane *plane;
6132
6133	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6134		struct intel_plane_state *plane_state;
6135
6136		if ((plane_ids_mask & BIT(plane->id)) == 0)
6137			continue;
6138
6139		plane_state = intel_atomic_get_plane_state(state, plane);
6140		if (IS_ERR(plane_state))
6141			return PTR_ERR(plane_state);
6142	}
6143
6144	return 0;
6145}
6146
6147int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
6148				     struct intel_crtc *crtc)
6149{
6150	const struct intel_crtc_state *old_crtc_state =
6151		intel_atomic_get_old_crtc_state(state, crtc);
6152	const struct intel_crtc_state *new_crtc_state =
6153		intel_atomic_get_new_crtc_state(state, crtc);
6154
6155	return intel_crtc_add_planes_to_state(state, crtc,
6156					      old_crtc_state->enabled_planes |
6157					      new_crtc_state->enabled_planes);
6158}
6159
6160static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
6161{
6162	/* See {hsw,vlv,ivb}_plane_ratio() */
6163	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
6164		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6165		IS_IVYBRIDGE(dev_priv);
6166}
6167
6168static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
6169					   struct intel_crtc *crtc,
6170					   struct intel_crtc *other)
6171{
6172	const struct intel_plane_state *plane_state;
6173	struct intel_plane *plane;
6174	u8 plane_ids = 0;
6175	int i;
6176
6177	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6178		if (plane->pipe == crtc->pipe)
6179			plane_ids |= BIT(plane->id);
6180	}
6181
6182	return intel_crtc_add_planes_to_state(state, other, plane_ids);
6183}
6184
6185static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
6186{
6187	struct drm_i915_private *i915 = to_i915(state->base.dev);
6188	const struct intel_crtc_state *crtc_state;
6189	struct intel_crtc *crtc;
6190	int i;
6191
6192	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6193		struct intel_crtc *other;
6194
6195		for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
6196						 crtc_state->bigjoiner_pipes) {
6197			int ret;
6198
6199			if (crtc == other)
6200				continue;
6201
6202			ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
6203			if (ret)
6204				return ret;
6205		}
6206	}
6207
6208	return 0;
6209}
6210
6211static int intel_atomic_check_planes(struct intel_atomic_state *state)
6212{
6213	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6214	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6215	struct intel_plane_state *plane_state;
6216	struct intel_plane *plane;
6217	struct intel_crtc *crtc;
6218	int i, ret;
6219
6220	ret = icl_add_linked_planes(state);
6221	if (ret)
6222		return ret;
6223
6224	ret = intel_bigjoiner_add_affected_planes(state);
6225	if (ret)
6226		return ret;
6227
6228	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6229		ret = intel_plane_atomic_check(state, plane);
6230		if (ret) {
6231			drm_dbg_atomic(&dev_priv->drm,
6232				       "[PLANE:%d:%s] atomic driver check failed\n",
6233				       plane->base.base.id, plane->base.name);
6234			return ret;
6235		}
6236	}
6237
6238	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6239					    new_crtc_state, i) {
6240		u8 old_active_planes, new_active_planes;
6241
6242		ret = icl_check_nv12_planes(new_crtc_state);
6243		if (ret)
6244			return ret;
6245
6246		/*
6247		 * On some platforms the number of active planes affects
6248		 * the planes' minimum cdclk calculation. Add such planes
6249		 * to the state before we compute the minimum cdclk.
6250		 */
6251		if (!active_planes_affects_min_cdclk(dev_priv))
6252			continue;
6253
6254		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6255		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6256
6257		if (hweight8(old_active_planes) == hweight8(new_active_planes))
6258			continue;
6259
6260		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
6261		if (ret)
6262			return ret;
6263	}
6264
6265	return 0;
6266}
6267
6268static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
6269{
6270	struct intel_crtc_state *crtc_state;
6271	struct intel_crtc *crtc;
6272	int i;
6273
6274	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6275		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6276		int ret;
6277
6278		ret = intel_crtc_atomic_check(state, crtc);
6279		if (ret) {
6280			drm_dbg_atomic(&i915->drm,
6281				       "[CRTC:%d:%s] atomic driver check failed\n",
6282				       crtc->base.base.id, crtc->base.name);
6283			return ret;
6284		}
6285	}
6286
6287	return 0;
6288}
6289
6290static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
6291					       u8 transcoders)
6292{
6293	const struct intel_crtc_state *new_crtc_state;
6294	struct intel_crtc *crtc;
6295	int i;
6296
6297	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6298		if (new_crtc_state->hw.enable &&
6299		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
6300		    intel_crtc_needs_modeset(new_crtc_state))
6301			return true;
6302	}
6303
6304	return false;
6305}
6306
6307static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
6308				     u8 pipes)
6309{
6310	const struct intel_crtc_state *new_crtc_state;
6311	struct intel_crtc *crtc;
6312	int i;
6313
6314	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6315		if (new_crtc_state->hw.enable &&
6316		    pipes & BIT(crtc->pipe) &&
6317		    intel_crtc_needs_modeset(new_crtc_state))
6318			return true;
6319	}
6320
6321	return false;
6322}
6323
6324static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
6325					struct intel_crtc *master_crtc)
6326{
6327	struct drm_i915_private *i915 = to_i915(state->base.dev);
6328	struct intel_crtc_state *master_crtc_state =
6329		intel_atomic_get_new_crtc_state(state, master_crtc);
6330	struct intel_crtc *slave_crtc;
6331
6332	if (!master_crtc_state->bigjoiner_pipes)
6333		return 0;
6334
6335	/* sanity check */
6336	if (drm_WARN_ON(&i915->drm,
6337			master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
6338		return -EINVAL;
6339
6340	if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
6341		drm_dbg_kms(&i915->drm,
6342			    "[CRTC:%d:%s] Cannot act as big joiner master "
6343			    "(need 0x%x as pipes, only 0x%x possible)\n",
6344			    master_crtc->base.base.id, master_crtc->base.name,
6345			    master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
6346		return -EINVAL;
6347	}
6348
6349	for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6350					 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6351		struct intel_crtc_state *slave_crtc_state;
6352		int ret;
6353
6354		slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
6355		if (IS_ERR(slave_crtc_state))
6356			return PTR_ERR(slave_crtc_state);
6357
6358		/* master being enabled, slave was already configured? */
6359		if (slave_crtc_state->uapi.enable) {
6360			drm_dbg_kms(&i915->drm,
6361				    "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
6362				    "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
6363				    slave_crtc->base.base.id, slave_crtc->base.name,
6364				    master_crtc->base.base.id, master_crtc->base.name);
6365			return -EINVAL;
6366		}
6367
6368		/*
6369		 * The state copy logic assumes the master crtc gets processed
6370		 * before the slave crtc during the main compute_config loop.
6371		 * This works because the crtcs are created in pipe order,
6372		 * and the hardware requires master pipe < slave pipe as well.
6373		 * Should that change we need to rethink the logic.
6374		 */
6375		if (WARN_ON(drm_crtc_index(&master_crtc->base) >
6376			    drm_crtc_index(&slave_crtc->base)))
6377			return -EINVAL;
6378
6379		drm_dbg_kms(&i915->drm,
6380			    "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
6381			    slave_crtc->base.base.id, slave_crtc->base.name,
6382			    master_crtc->base.base.id, master_crtc->base.name);
6383
6384		slave_crtc_state->bigjoiner_pipes =
6385			master_crtc_state->bigjoiner_pipes;
6386
6387		ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
6388		if (ret)
6389			return ret;
6390	}
6391
6392	return 0;
6393}
6394
6395static void kill_bigjoiner_slave(struct intel_atomic_state *state,
6396				 struct intel_crtc *master_crtc)
6397{
6398	struct drm_i915_private *i915 = to_i915(state->base.dev);
6399	struct intel_crtc_state *master_crtc_state =
6400		intel_atomic_get_new_crtc_state(state, master_crtc);
6401	struct intel_crtc *slave_crtc;
6402
6403	for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6404					 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6405		struct intel_crtc_state *slave_crtc_state =
6406			intel_atomic_get_new_crtc_state(state, slave_crtc);
6407
6408		slave_crtc_state->bigjoiner_pipes = 0;
6409
6410		intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
6411	}
6412
6413	master_crtc_state->bigjoiner_pipes = 0;
6414}
6415
6416/**
6417 * DOC: asynchronous flip implementation
6418 *
6419 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6420 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6421 * Correspondingly, support is currently added for primary plane only.
6422 *
6423 * Async flip can only change the plane surface address, so anything else
6424 * changing is rejected from the intel_async_flip_check_hw() function.
6425 * Once this check is cleared, flip done interrupt is enabled using
6426 * the intel_crtc_enable_flip_done() function.
6427 *
6428 * As soon as the surface address register is written, flip done interrupt is
6429 * generated and the requested events are sent to the usersapce in the interrupt
6430 * handler itself. The timestamp and sequence sent during the flip done event
6431 * correspond to the last vblank and have no relation to the actual time when
6432 * the flip done event was sent.
6433 */
6434static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6435				       struct intel_crtc *crtc)
6436{
6437	struct drm_i915_private *i915 = to_i915(state->base.dev);
6438	const struct intel_crtc_state *new_crtc_state =
6439		intel_atomic_get_new_crtc_state(state, crtc);
6440	const struct intel_plane_state *old_plane_state;
6441	struct intel_plane_state *new_plane_state;
6442	struct intel_plane *plane;
6443	int i;
6444
6445	if (!new_crtc_state->uapi.async_flip)
6446		return 0;
6447
6448	if (!new_crtc_state->uapi.active) {
6449		drm_dbg_kms(&i915->drm,
6450			    "[CRTC:%d:%s] not active\n",
6451			    crtc->base.base.id, crtc->base.name);
6452		return -EINVAL;
6453	}
6454
6455	if (intel_crtc_needs_modeset(new_crtc_state)) {
6456		drm_dbg_kms(&i915->drm,
6457			    "[CRTC:%d:%s] modeset required\n",
6458			    crtc->base.base.id, crtc->base.name);
6459		return -EINVAL;
6460	}
6461
6462	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6463					     new_plane_state, i) {
6464		if (plane->pipe != crtc->pipe)
6465			continue;
6466
6467		/*
6468		 * TODO: Async flip is only supported through the page flip IOCTL
6469		 * as of now. So support currently added for primary plane only.
6470		 * Support for other planes on platforms on which supports
6471		 * this(vlv/chv and icl+) should be added when async flip is
6472		 * enabled in the atomic IOCTL path.
6473		 */
6474		if (!plane->async_flip) {
6475			drm_dbg_kms(&i915->drm,
6476				    "[PLANE:%d:%s] async flip not supported\n",
6477				    plane->base.base.id, plane->base.name);
6478			return -EINVAL;
6479		}
6480
6481		if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6482			drm_dbg_kms(&i915->drm,
6483				    "[PLANE:%d:%s] no old or new framebuffer\n",
6484				    plane->base.base.id, plane->base.name);
6485			return -EINVAL;
6486		}
6487	}
6488
6489	return 0;
6490}
6491
6492static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6493{
6494	struct drm_i915_private *i915 = to_i915(state->base.dev);
6495	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6496	const struct intel_plane_state *new_plane_state, *old_plane_state;
6497	struct intel_plane *plane;
6498	int i;
6499
6500	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6501	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6502
6503	if (!new_crtc_state->uapi.async_flip)
6504		return 0;
6505
6506	if (!new_crtc_state->hw.active) {
6507		drm_dbg_kms(&i915->drm,
6508			    "[CRTC:%d:%s] not active\n",
6509			    crtc->base.base.id, crtc->base.name);
6510		return -EINVAL;
6511	}
6512
6513	if (intel_crtc_needs_modeset(new_crtc_state)) {
6514		drm_dbg_kms(&i915->drm,
6515			    "[CRTC:%d:%s] modeset required\n",
6516			    crtc->base.base.id, crtc->base.name);
6517		return -EINVAL;
6518	}
6519
6520	if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6521		drm_dbg_kms(&i915->drm,
6522			    "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6523			    crtc->base.base.id, crtc->base.name);
6524		return -EINVAL;
6525	}
6526
6527	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6528					     new_plane_state, i) {
6529		if (plane->pipe != crtc->pipe)
6530			continue;
6531
6532		/*
6533		 * Only async flip capable planes should be in the state
6534		 * if we're really about to ask the hardware to perform
6535		 * an async flip. We should never get this far otherwise.
6536		 */
6537		if (drm_WARN_ON(&i915->drm,
6538				new_crtc_state->do_async_flip && !plane->async_flip))
6539			return -EINVAL;
6540
6541		/*
6542		 * Only check async flip capable planes other planes
6543		 * may be involved in the initial commit due to
6544		 * the wm0/ddb optimization.
6545		 *
6546		 * TODO maybe should track which planes actually
6547		 * were requested to do the async flip...
6548		 */
6549		if (!plane->async_flip)
6550			continue;
6551
6552		/*
6553		 * FIXME: This check is kept generic for all platforms.
6554		 * Need to verify this for all gen9 platforms to enable
6555		 * this selectively if required.
6556		 */
6557		switch (new_plane_state->hw.fb->modifier) {
6558		case I915_FORMAT_MOD_X_TILED:
6559		case I915_FORMAT_MOD_Y_TILED:
6560		case I915_FORMAT_MOD_Yf_TILED:
6561		case I915_FORMAT_MOD_4_TILED:
6562			break;
6563		default:
6564			drm_dbg_kms(&i915->drm,
6565				    "[PLANE:%d:%s] Modifier does not support async flips\n",
6566				    plane->base.base.id, plane->base.name);
6567			return -EINVAL;
6568		}
6569
6570		if (new_plane_state->hw.fb->format->num_planes > 1) {
6571			drm_dbg_kms(&i915->drm,
6572				    "[PLANE:%d:%s] Planar formats do not support async flips\n",
6573				    plane->base.base.id, plane->base.name);
6574			return -EINVAL;
6575		}
6576
6577		if (old_plane_state->view.color_plane[0].mapping_stride !=
6578		    new_plane_state->view.color_plane[0].mapping_stride) {
6579			drm_dbg_kms(&i915->drm,
6580				    "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6581				    plane->base.base.id, plane->base.name);
6582			return -EINVAL;
6583		}
6584
6585		if (old_plane_state->hw.fb->modifier !=
6586		    new_plane_state->hw.fb->modifier) {
6587			drm_dbg_kms(&i915->drm,
6588				    "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6589				    plane->base.base.id, plane->base.name);
6590			return -EINVAL;
6591		}
6592
6593		if (old_plane_state->hw.fb->format !=
6594		    new_plane_state->hw.fb->format) {
6595			drm_dbg_kms(&i915->drm,
6596				    "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6597				    plane->base.base.id, plane->base.name);
6598			return -EINVAL;
6599		}
6600
6601		if (old_plane_state->hw.rotation !=
6602		    new_plane_state->hw.rotation) {
6603			drm_dbg_kms(&i915->drm,
6604				    "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6605				    plane->base.base.id, plane->base.name);
6606			return -EINVAL;
6607		}
6608
6609		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6610		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6611			drm_dbg_kms(&i915->drm,
6612				    "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6613				    plane->base.base.id, plane->base.name);
6614			return -EINVAL;
6615		}
6616
6617		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6618			drm_dbg_kms(&i915->drm,
6619				    "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6620				    plane->base.base.id, plane->base.name);
6621			return -EINVAL;
6622		}
6623
6624		if (old_plane_state->hw.pixel_blend_mode !=
6625		    new_plane_state->hw.pixel_blend_mode) {
6626			drm_dbg_kms(&i915->drm,
6627				    "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6628				    plane->base.base.id, plane->base.name);
6629			return -EINVAL;
6630		}
6631
6632		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6633			drm_dbg_kms(&i915->drm,
6634				    "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6635				    plane->base.base.id, plane->base.name);
6636			return -EINVAL;
6637		}
6638
6639		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6640			drm_dbg_kms(&i915->drm,
6641				    "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6642				    plane->base.base.id, plane->base.name);
6643			return -EINVAL;
6644		}
6645
6646		/* plane decryption is allow to change only in synchronous flips */
6647		if (old_plane_state->decrypt != new_plane_state->decrypt) {
6648			drm_dbg_kms(&i915->drm,
6649				    "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6650				    plane->base.base.id, plane->base.name);
6651			return -EINVAL;
6652		}
6653	}
6654
6655	return 0;
6656}
6657
6658static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6659{
6660	struct drm_i915_private *i915 = to_i915(state->base.dev);
6661	struct intel_crtc_state *crtc_state;
6662	struct intel_crtc *crtc;
6663	u8 affected_pipes = 0;
6664	u8 modeset_pipes = 0;
6665	int i;
6666
6667	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6668		affected_pipes |= crtc_state->bigjoiner_pipes;
6669		if (intel_crtc_needs_modeset(crtc_state))
6670			modeset_pipes |= crtc_state->bigjoiner_pipes;
6671	}
6672
6673	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6674		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6675		if (IS_ERR(crtc_state))
6676			return PTR_ERR(crtc_state);
6677	}
6678
6679	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6680		int ret;
6681
6682		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6683
6684		crtc_state->uapi.mode_changed = true;
6685
6686		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6687		if (ret)
6688			return ret;
6689
6690		ret = intel_atomic_add_affected_planes(state, crtc);
6691		if (ret)
6692			return ret;
6693	}
6694
6695	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6696		/* Kill old bigjoiner link, we may re-establish afterwards */
6697		if (intel_crtc_needs_modeset(crtc_state) &&
6698		    intel_crtc_is_bigjoiner_master(crtc_state))
6699			kill_bigjoiner_slave(state, crtc);
6700	}
6701
6702	return 0;
6703}
6704
6705/**
6706 * intel_atomic_check - validate state object
6707 * @dev: drm device
6708 * @_state: state to validate
6709 */
6710static int intel_atomic_check(struct drm_device *dev,
6711			      struct drm_atomic_state *_state)
6712{
6713	struct drm_i915_private *dev_priv = to_i915(dev);
6714	struct intel_atomic_state *state = to_intel_atomic_state(_state);
6715	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6716	struct intel_crtc *crtc;
6717	int ret, i;
6718	bool any_ms = false;
6719
6720	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6721					    new_crtc_state, i) {
6722		if (new_crtc_state->inherited != old_crtc_state->inherited)
6723			new_crtc_state->uapi.mode_changed = true;
6724
6725		if (new_crtc_state->uapi.scaling_filter !=
6726		    old_crtc_state->uapi.scaling_filter)
6727			new_crtc_state->uapi.mode_changed = true;
6728	}
6729
6730	intel_vrr_check_modeset(state);
6731
6732	ret = drm_atomic_helper_check_modeset(dev, &state->base);
6733	if (ret)
6734		goto fail;
6735
6736	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6737		ret = intel_async_flip_check_uapi(state, crtc);
6738		if (ret)
6739			return ret;
6740	}
6741
6742	ret = intel_bigjoiner_add_affected_crtcs(state);
6743	if (ret)
6744		goto fail;
6745
6746	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6747					    new_crtc_state, i) {
6748		if (!intel_crtc_needs_modeset(new_crtc_state)) {
6749			if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6750				copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6751			else
6752				intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6753			continue;
6754		}
6755
6756		if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6757			drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6758			continue;
6759		}
6760
6761		ret = intel_crtc_prepare_cleared_state(state, crtc);
6762		if (ret)
6763			goto fail;
6764
6765		if (!new_crtc_state->hw.enable)
6766			continue;
6767
6768		ret = intel_modeset_pipe_config(state, crtc);
6769		if (ret)
6770			goto fail;
6771
6772		ret = intel_atomic_check_bigjoiner(state, crtc);
6773		if (ret)
6774			goto fail;
6775	}
6776
6777	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6778					    new_crtc_state, i) {
6779		if (!intel_crtc_needs_modeset(new_crtc_state))
6780			continue;
6781
6782		if (new_crtc_state->hw.enable) {
6783			ret = intel_modeset_pipe_config_late(state, crtc);
6784			if (ret)
6785				goto fail;
6786		}
6787
6788		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6789	}
6790
6791	/**
6792	 * Check if fastset is allowed by external dependencies like other
6793	 * pipes and transcoders.
6794	 *
6795	 * Right now it only forces a fullmodeset when the MST master
6796	 * transcoder did not changed but the pipe of the master transcoder
6797	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6798	 * in case of port synced crtcs, if one of the synced crtcs
6799	 * needs a full modeset, all other synced crtcs should be
6800	 * forced a full modeset.
6801	 */
6802	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6803		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6804			continue;
6805
6806		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6807			enum transcoder master = new_crtc_state->mst_master_transcoder;
6808
6809			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
6810				new_crtc_state->uapi.mode_changed = true;
6811				new_crtc_state->update_pipe = false;
6812			}
6813		}
6814
6815		if (is_trans_port_sync_mode(new_crtc_state)) {
6816			u8 trans = new_crtc_state->sync_mode_slaves_mask;
6817
6818			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6819				trans |= BIT(new_crtc_state->master_transcoder);
6820
6821			if (intel_cpu_transcoders_need_modeset(state, trans)) {
6822				new_crtc_state->uapi.mode_changed = true;
6823				new_crtc_state->update_pipe = false;
6824			}
6825		}
6826
6827		if (new_crtc_state->bigjoiner_pipes) {
6828			if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
6829				new_crtc_state->uapi.mode_changed = true;
6830				new_crtc_state->update_pipe = false;
6831			}
6832		}
6833	}
6834
6835	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6836					    new_crtc_state, i) {
6837		if (!intel_crtc_needs_modeset(new_crtc_state))
6838			continue;
6839
6840		any_ms = true;
6841
6842		intel_release_shared_dplls(state, crtc);
6843	}
6844
6845	if (any_ms && !check_digital_port_conflicts(state)) {
6846		drm_dbg_kms(&dev_priv->drm,
6847			    "rejecting conflicting digital port configuration\n");
6848		ret = -EINVAL;
6849		goto fail;
6850	}
6851
6852	ret = drm_dp_mst_atomic_check(&state->base);
6853	if (ret)
6854		goto fail;
6855
6856	ret = intel_atomic_check_planes(state);
6857	if (ret)
6858		goto fail;
6859
6860	ret = intel_compute_global_watermarks(state);
6861	if (ret)
6862		goto fail;
6863
6864	ret = intel_bw_atomic_check(state);
6865	if (ret)
6866		goto fail;
6867
6868	ret = intel_cdclk_atomic_check(state, &any_ms);
6869	if (ret)
6870		goto fail;
6871
6872	if (intel_any_crtc_needs_modeset(state))
6873		any_ms = true;
6874
6875	if (any_ms) {
6876		ret = intel_modeset_checks(state);
6877		if (ret)
6878			goto fail;
6879
6880		ret = intel_modeset_calc_cdclk(state);
6881		if (ret)
6882			return ret;
6883	}
6884
6885	ret = intel_atomic_check_crtcs(state);
6886	if (ret)
6887		goto fail;
6888
6889	ret = intel_fbc_atomic_check(state);
6890	if (ret)
6891		goto fail;
6892
6893	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6894					    new_crtc_state, i) {
6895		intel_color_assert_luts(new_crtc_state);
6896
6897		ret = intel_async_flip_check_hw(state, crtc);
6898		if (ret)
6899			goto fail;
6900
6901		/* Either full modeset or fastset (or neither), never both */
6902		drm_WARN_ON(&dev_priv->drm,
6903			    intel_crtc_needs_modeset(new_crtc_state) &&
6904			    intel_crtc_needs_fastset(new_crtc_state));
6905
6906		if (!intel_crtc_needs_modeset(new_crtc_state) &&
6907		    !intel_crtc_needs_fastset(new_crtc_state))
6908			continue;
6909
6910		intel_crtc_state_dump(new_crtc_state, state,
6911				      intel_crtc_needs_modeset(new_crtc_state) ?
6912				      "modeset" : "fastset");
6913	}
6914
6915	return 0;
6916
6917 fail:
6918	if (ret == -EDEADLK)
6919		return ret;
6920
6921	/*
6922	 * FIXME would probably be nice to know which crtc specifically
6923	 * caused the failure, in cases where we can pinpoint it.
6924	 */
6925	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6926					    new_crtc_state, i)
6927		intel_crtc_state_dump(new_crtc_state, state, "failed");
6928
6929	return ret;
6930}
6931
6932static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6933{
6934	struct intel_crtc_state *crtc_state;
6935	struct intel_crtc *crtc;
6936	int i, ret;
6937
6938	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6939	if (ret < 0)
6940		return ret;
6941
6942	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6943		if (intel_crtc_needs_color_update(crtc_state))
6944			intel_dsb_prepare(crtc_state);
6945	}
6946
6947	return 0;
6948}
6949
6950void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6951				  struct intel_crtc_state *crtc_state)
6952{
6953	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6954
6955	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6956		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6957
6958	if (crtc_state->has_pch_encoder) {
6959		enum pipe pch_transcoder =
6960			intel_crtc_pch_transcoder(crtc);
6961
6962		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6963	}
6964}
6965
6966static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6967			       const struct intel_crtc_state *new_crtc_state)
6968{
6969	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6970	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6971
6972	/*
6973	 * Update pipe size and adjust fitter if needed: the reason for this is
6974	 * that in compute_mode_changes we check the native mode (not the pfit
6975	 * mode) to see if we can flip rather than do a full mode set. In the
6976	 * fastboot case, we'll flip, but if we don't update the pipesrc and
6977	 * pfit state, we'll end up with a big fb scanned out into the wrong
6978	 * sized surface.
6979	 */
6980	intel_set_pipe_src_size(new_crtc_state);
6981
6982	/* on skylake this is done by detaching scalers */
6983	if (DISPLAY_VER(dev_priv) >= 9) {
6984		if (new_crtc_state->pch_pfit.enabled)
6985			skl_pfit_enable(new_crtc_state);
6986	} else if (HAS_PCH_SPLIT(dev_priv)) {
6987		if (new_crtc_state->pch_pfit.enabled)
6988			ilk_pfit_enable(new_crtc_state);
6989		else if (old_crtc_state->pch_pfit.enabled)
6990			ilk_pfit_disable(old_crtc_state);
6991	}
6992
6993	/*
6994	 * The register is supposedly single buffered so perhaps
6995	 * not 100% correct to do this here. But SKL+ calculate
6996	 * this based on the adjust pixel rate so pfit changes do
6997	 * affect it and so it must be updated for fastsets.
6998	 * HSW/BDW only really need this here for fastboot, after
6999	 * that the value should not change without a full modeset.
7000	 */
7001	if (DISPLAY_VER(dev_priv) >= 9 ||
7002	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
7003		hsw_set_linetime_wm(new_crtc_state);
7004
7005	if (new_crtc_state->seamless_m_n)
7006		intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
7007					       &new_crtc_state->dp_m_n);
7008}
7009
7010static void commit_pipe_pre_planes(struct intel_atomic_state *state,
7011				   struct intel_crtc *crtc)
7012{
7013	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7014	const struct intel_crtc_state *old_crtc_state =
7015		intel_atomic_get_old_crtc_state(state, crtc);
7016	const struct intel_crtc_state *new_crtc_state =
7017		intel_atomic_get_new_crtc_state(state, crtc);
7018	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7019
7020	/*
7021	 * During modesets pipe configuration was programmed as the
7022	 * CRTC was enabled.
7023	 */
7024	if (!modeset) {
7025		if (intel_crtc_needs_color_update(new_crtc_state))
7026			intel_color_commit_arm(new_crtc_state);
7027
7028		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7029			bdw_set_pipemisc(new_crtc_state);
7030
7031		if (intel_crtc_needs_fastset(new_crtc_state))
7032			intel_pipe_fastset(old_crtc_state, new_crtc_state);
7033	}
7034
7035	intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
7036
7037	intel_atomic_update_watermarks(state, crtc);
7038}
7039
7040static void commit_pipe_post_planes(struct intel_atomic_state *state,
7041				    struct intel_crtc *crtc)
7042{
7043	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7044	const struct intel_crtc_state *new_crtc_state =
7045		intel_atomic_get_new_crtc_state(state, crtc);
7046
7047	/*
7048	 * Disable the scaler(s) after the plane(s) so that we don't
7049	 * get a catastrophic underrun even if the two operations
7050	 * end up happening in two different frames.
7051	 */
7052	if (DISPLAY_VER(dev_priv) >= 9 &&
7053	    !intel_crtc_needs_modeset(new_crtc_state))
7054		skl_detach_scalers(new_crtc_state);
7055}
7056
7057static void intel_enable_crtc(struct intel_atomic_state *state,
7058			      struct intel_crtc *crtc)
7059{
7060	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7061	const struct intel_crtc_state *new_crtc_state =
7062		intel_atomic_get_new_crtc_state(state, crtc);
7063
7064	if (!intel_crtc_needs_modeset(new_crtc_state))
7065		return;
7066
7067	intel_crtc_update_active_timings(new_crtc_state);
7068
7069	dev_priv->display.funcs.display->crtc_enable(state, crtc);
7070
7071	if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
7072		return;
7073
7074	/* vblanks work again, re-enable pipe CRC. */
7075	intel_crtc_enable_pipe_crc(crtc);
7076}
7077
7078static void intel_update_crtc(struct intel_atomic_state *state,
7079			      struct intel_crtc *crtc)
7080{
7081	struct drm_i915_private *i915 = to_i915(state->base.dev);
7082	const struct intel_crtc_state *old_crtc_state =
7083		intel_atomic_get_old_crtc_state(state, crtc);
7084	struct intel_crtc_state *new_crtc_state =
7085		intel_atomic_get_new_crtc_state(state, crtc);
7086	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7087
7088	if (!modeset) {
7089		if (new_crtc_state->preload_luts &&
7090		    intel_crtc_needs_color_update(new_crtc_state))
7091			intel_color_load_luts(new_crtc_state);
7092
7093		intel_pre_plane_update(state, crtc);
7094
7095		if (intel_crtc_needs_fastset(new_crtc_state))
7096			intel_encoders_update_pipe(state, crtc);
7097
7098		if (DISPLAY_VER(i915) >= 11 &&
7099		    intel_crtc_needs_fastset(new_crtc_state))
7100			icl_set_pipe_chicken(new_crtc_state);
7101	}
7102
7103	intel_fbc_update(state, crtc);
7104
7105	if (!modeset &&
7106	    intel_crtc_needs_color_update(new_crtc_state))
7107		intel_color_commit_noarm(new_crtc_state);
7108
7109	intel_crtc_planes_update_noarm(state, crtc);
7110
7111	/* Perform vblank evasion around commit operation */
7112	intel_pipe_update_start(new_crtc_state);
7113
7114	commit_pipe_pre_planes(state, crtc);
7115
7116	intel_crtc_planes_update_arm(state, crtc);
7117
7118	commit_pipe_post_planes(state, crtc);
7119
7120	intel_pipe_update_end(new_crtc_state);
7121
7122	/*
7123	 * We usually enable FIFO underrun interrupts as part of the
7124	 * CRTC enable sequence during modesets.  But when we inherit a
7125	 * valid pipe configuration from the BIOS we need to take care
7126	 * of enabling them on the CRTC's first fastset.
7127	 */
7128	if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
7129	    old_crtc_state->inherited)
7130		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
7131}
7132
7133static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
7134					  struct intel_crtc_state *old_crtc_state,
7135					  struct intel_crtc_state *new_crtc_state,
7136					  struct intel_crtc *crtc)
7137{
7138	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7139
7140	/*
7141	 * We need to disable pipe CRC before disabling the pipe,
7142	 * or we race against vblank off.
7143	 */
7144	intel_crtc_disable_pipe_crc(crtc);
7145
7146	dev_priv->display.funcs.display->crtc_disable(state, crtc);
7147	crtc->active = false;
7148	intel_fbc_disable(crtc);
7149	intel_disable_shared_dpll(old_crtc_state);
7150
7151	if (!new_crtc_state->hw.active)
7152		intel_initial_watermarks(state, crtc);
7153}
7154
7155static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7156{
7157	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7158	struct intel_crtc *crtc;
7159	u32 handled = 0;
7160	int i;
7161
7162	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7163					    new_crtc_state, i) {
7164		if (!intel_crtc_needs_modeset(new_crtc_state))
7165			continue;
7166
7167		if (!old_crtc_state->hw.active)
7168			continue;
7169
7170		intel_pre_plane_update(state, crtc);
7171		intel_crtc_disable_planes(state, crtc);
7172	}
7173
7174	/* Only disable port sync and MST slaves */
7175	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7176					    new_crtc_state, i) {
7177		if (!intel_crtc_needs_modeset(new_crtc_state))
7178			continue;
7179
7180		if (!old_crtc_state->hw.active)
7181			continue;
7182
7183		/* In case of Transcoder port Sync master slave CRTCs can be
7184		 * assigned in any order and we need to make sure that
7185		 * slave CRTCs are disabled first and then master CRTC since
7186		 * Slave vblanks are masked till Master Vblanks.
7187		 */
7188		if (!is_trans_port_sync_slave(old_crtc_state) &&
7189		    !intel_dp_mst_is_slave_trans(old_crtc_state) &&
7190		    !intel_crtc_is_bigjoiner_slave(old_crtc_state))
7191			continue;
7192
7193		intel_old_crtc_state_disables(state, old_crtc_state,
7194					      new_crtc_state, crtc);
7195		handled |= BIT(crtc->pipe);
7196	}
7197
7198	/* Disable everything else left on */
7199	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7200					    new_crtc_state, i) {
7201		if (!intel_crtc_needs_modeset(new_crtc_state) ||
7202		    (handled & BIT(crtc->pipe)))
7203			continue;
7204
7205		if (!old_crtc_state->hw.active)
7206			continue;
7207
7208		intel_old_crtc_state_disables(state, old_crtc_state,
7209					      new_crtc_state, crtc);
7210	}
7211}
7212
7213static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7214{
7215	struct intel_crtc_state *new_crtc_state;
7216	struct intel_crtc *crtc;
7217	int i;
7218
7219	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7220		if (!new_crtc_state->hw.active)
7221			continue;
7222
7223		intel_enable_crtc(state, crtc);
7224		intel_update_crtc(state, crtc);
7225	}
7226}
7227
7228static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7229{
7230	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7231	struct intel_crtc *crtc;
7232	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7233	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7234	u8 update_pipes = 0, modeset_pipes = 0;
7235	int i;
7236
7237	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7238		enum pipe pipe = crtc->pipe;
7239
7240		if (!new_crtc_state->hw.active)
7241			continue;
7242
7243		/* ignore allocations for crtc's that have been turned off. */
7244		if (!intel_crtc_needs_modeset(new_crtc_state)) {
7245			entries[pipe] = old_crtc_state->wm.skl.ddb;
7246			update_pipes |= BIT(pipe);
7247		} else {
7248			modeset_pipes |= BIT(pipe);
7249		}
7250	}
7251
7252	/*
7253	 * Whenever the number of active pipes changes, we need to make sure we
7254	 * update the pipes in the right order so that their ddb allocations
7255	 * never overlap with each other between CRTC updates. Otherwise we'll
7256	 * cause pipe underruns and other bad stuff.
7257	 *
7258	 * So first lets enable all pipes that do not need a fullmodeset as
7259	 * those don't have any external dependency.
7260	 */
7261	while (update_pipes) {
7262		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7263						    new_crtc_state, i) {
7264			enum pipe pipe = crtc->pipe;
7265
7266			if ((update_pipes & BIT(pipe)) == 0)
7267				continue;
7268
7269			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7270							entries, I915_MAX_PIPES, pipe))
7271				continue;
7272
7273			entries[pipe] = new_crtc_state->wm.skl.ddb;
7274			update_pipes &= ~BIT(pipe);
7275
7276			intel_update_crtc(state, crtc);
7277
7278			/*
7279			 * If this is an already active pipe, it's DDB changed,
7280			 * and this isn't the last pipe that needs updating
7281			 * then we need to wait for a vblank to pass for the
7282			 * new ddb allocation to take effect.
7283			 */
7284			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7285						 &old_crtc_state->wm.skl.ddb) &&
7286			    (update_pipes | modeset_pipes))
7287				intel_crtc_wait_for_next_vblank(crtc);
7288		}
7289	}
7290
7291	update_pipes = modeset_pipes;
7292
7293	/*
7294	 * Enable all pipes that needs a modeset and do not depends on other
7295	 * pipes
7296	 */
7297	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7298		enum pipe pipe = crtc->pipe;
7299
7300		if ((modeset_pipes & BIT(pipe)) == 0)
7301			continue;
7302
7303		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7304		    is_trans_port_sync_master(new_crtc_state) ||
7305		    intel_crtc_is_bigjoiner_master(new_crtc_state))
7306			continue;
7307
7308		modeset_pipes &= ~BIT(pipe);
7309
7310		intel_enable_crtc(state, crtc);
7311	}
7312
7313	/*
7314	 * Then we enable all remaining pipes that depend on other
7315	 * pipes: MST slaves and port sync masters, big joiner master
7316	 */
7317	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7318		enum pipe pipe = crtc->pipe;
7319
7320		if ((modeset_pipes & BIT(pipe)) == 0)
7321			continue;
7322
7323		modeset_pipes &= ~BIT(pipe);
7324
7325		intel_enable_crtc(state, crtc);
7326	}
7327
7328	/*
7329	 * Finally we do the plane updates/etc. for all pipes that got enabled.
7330	 */
7331	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7332		enum pipe pipe = crtc->pipe;
7333
7334		if ((update_pipes & BIT(pipe)) == 0)
7335			continue;
7336
7337		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7338									entries, I915_MAX_PIPES, pipe));
7339
7340		entries[pipe] = new_crtc_state->wm.skl.ddb;
7341		update_pipes &= ~BIT(pipe);
7342
7343		intel_update_crtc(state, crtc);
7344	}
7345
7346	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7347	drm_WARN_ON(&dev_priv->drm, update_pipes);
7348}
7349
7350static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
7351{
7352	struct intel_atomic_state *state, *next;
7353	struct llist_node *freed;
7354
7355	freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
7356	llist_for_each_entry_safe(state, next, freed, freed)
7357		drm_atomic_state_put(&state->base);
7358}
7359
7360static void intel_atomic_helper_free_state_worker(struct work_struct *work)
7361{
7362	struct drm_i915_private *dev_priv =
7363		container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
7364
7365	intel_atomic_helper_free_state(dev_priv);
7366}
7367
7368static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7369{
7370	struct wait_queue_entry wait_fence, wait_reset;
7371	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
7372
7373	init_wait_entry(&wait_fence, 0);
7374	init_wait_entry(&wait_reset, 0);
7375	for (;;) {
7376		prepare_to_wait(&intel_state->commit_ready.wait,
7377				&wait_fence, TASK_UNINTERRUPTIBLE);
7378		prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7379					      I915_RESET_MODESET),
7380				&wait_reset, TASK_UNINTERRUPTIBLE);
7381
7382
7383		if (i915_sw_fence_done(&intel_state->commit_ready) ||
7384		    test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
7385			break;
7386
7387		schedule();
7388	}
7389	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
7390	finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7391				  I915_RESET_MODESET),
7392		    &wait_reset);
7393}
7394
7395static void intel_cleanup_dsbs(struct intel_atomic_state *state)
7396{
7397	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7398	struct intel_crtc *crtc;
7399	int i;
7400
7401	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7402					    new_crtc_state, i)
7403		intel_dsb_cleanup(old_crtc_state);
7404}
7405
7406static void intel_atomic_cleanup_work(struct work_struct *work)
7407{
7408	struct intel_atomic_state *state =
7409		container_of(work, struct intel_atomic_state, base.commit_work);
7410	struct drm_i915_private *i915 = to_i915(state->base.dev);
7411
7412	intel_cleanup_dsbs(state);
7413	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7414	drm_atomic_helper_commit_cleanup_done(&state->base);
7415	drm_atomic_state_put(&state->base);
7416
7417	intel_atomic_helper_free_state(i915);
7418}
7419
7420static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7421{
7422	struct drm_i915_private *i915 = to_i915(state->base.dev);
7423	struct intel_plane *plane;
7424	struct intel_plane_state *plane_state;
7425	int i;
7426
7427	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7428		struct drm_framebuffer *fb = plane_state->hw.fb;
7429		int cc_plane;
7430		int ret;
7431
7432		if (!fb)
7433			continue;
7434
7435		cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7436		if (cc_plane < 0)
7437			continue;
7438
7439		/*
7440		 * The layout of the fast clear color value expected by HW
7441		 * (the DRM ABI requiring this value to be located in fb at
7442		 * offset 0 of cc plane, plane #2 previous generations or
7443		 * plane #1 for flat ccs):
7444		 * - 4 x 4 bytes per-channel value
7445		 *   (in surface type specific float/int format provided by the fb user)
7446		 * - 8 bytes native color value used by the display
7447		 *   (converted/written by GPU during a fast clear operation using the
7448		 *    above per-channel values)
7449		 *
7450		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
7451		 * caller made sure that the object is synced wrt. the related color clear value
7452		 * GPU write on it.
7453		 */
7454		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7455						     fb->offsets[cc_plane] + 16,
7456						     &plane_state->ccval,
7457						     sizeof(plane_state->ccval));
7458		/* The above could only fail if the FB obj has an unexpected backing store type. */
7459		drm_WARN_ON(&i915->drm, ret);
7460	}
7461}
7462
7463static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7464{
7465	struct drm_device *dev = state->base.dev;
7466	struct drm_i915_private *dev_priv = to_i915(dev);
7467	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7468	struct intel_crtc *crtc;
7469	struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7470	intel_wakeref_t wakeref = 0;
7471	int i;
7472
7473	intel_atomic_commit_fence_wait(state);
7474
7475	drm_atomic_helper_wait_for_dependencies(&state->base);
7476	drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7477
7478	if (state->modeset)
7479		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
7480
7481	intel_atomic_prepare_plane_clear_colors(state);
7482
7483	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7484					    new_crtc_state, i) {
7485		if (intel_crtc_needs_modeset(new_crtc_state) ||
7486		    intel_crtc_needs_fastset(new_crtc_state))
7487			intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7488	}
7489
7490	intel_commit_modeset_disables(state);
7491
7492	/* FIXME: Eventually get rid of our crtc->config pointer */
7493	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7494		crtc->config = new_crtc_state;
7495
7496	if (state->modeset) {
7497		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7498
7499		intel_set_cdclk_pre_plane_update(state);
7500
7501		intel_modeset_verify_disabled(dev_priv, state);
7502	}
7503
7504	intel_sagv_pre_plane_update(state);
7505
7506	/* Complete the events for pipes that have now been disabled */
7507	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7508		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7509
7510		/* Complete events for now disable pipes here. */
7511		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7512			spin_lock_irq(&dev->event_lock);
7513			drm_crtc_send_vblank_event(&crtc->base,
7514						   new_crtc_state->uapi.event);
7515			spin_unlock_irq(&dev->event_lock);
7516
7517			new_crtc_state->uapi.event = NULL;
7518		}
7519	}
7520
7521	intel_encoders_update_prepare(state);
7522
7523	intel_dbuf_pre_plane_update(state);
7524	intel_mbus_dbox_update(state);
7525
7526	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7527		if (new_crtc_state->do_async_flip)
7528			intel_crtc_enable_flip_done(state, crtc);
7529	}
7530
7531	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
7532	dev_priv->display.funcs.display->commit_modeset_enables(state);
7533
7534	intel_encoders_update_complete(state);
7535
7536	if (state->modeset)
7537		intel_set_cdclk_post_plane_update(state);
7538
7539	intel_wait_for_vblank_workers(state);
7540
7541	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7542	 * already, but still need the state for the delayed optimization. To
7543	 * fix this:
7544	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7545	 * - schedule that vblank worker _before_ calling hw_done
7546	 * - at the start of commit_tail, cancel it _synchrously
7547	 * - switch over to the vblank wait helper in the core after that since
7548	 *   we don't need out special handling any more.
7549	 */
7550	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7551
7552	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7553		if (new_crtc_state->do_async_flip)
7554			intel_crtc_disable_flip_done(state, crtc);
7555	}
7556
7557	/*
7558	 * Now that the vblank has passed, we can go ahead and program the
7559	 * optimal watermarks on platforms that need two-step watermark
7560	 * programming.
7561	 *
7562	 * TODO: Move this (and other cleanup) to an async worker eventually.
7563	 */
7564	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7565					    new_crtc_state, i) {
7566		/*
7567		 * Gen2 reports pipe underruns whenever all planes are disabled.
7568		 * So re-enable underrun reporting after some planes get enabled.
7569		 *
7570		 * We do this before .optimize_watermarks() so that we have a
7571		 * chance of catching underruns with the intermediate watermarks
7572		 * vs. the new plane configuration.
7573		 */
7574		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7575			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7576
7577		intel_optimize_watermarks(state, crtc);
7578	}
7579
7580	intel_dbuf_post_plane_update(state);
7581	intel_psr_post_plane_update(state);
7582
7583	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7584		intel_post_plane_update(state, crtc);
7585
7586		intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7587
7588		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
7589
7590		/*
7591		 * Activate DRRS after state readout to avoid
7592		 * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7593		 */
7594		intel_drrs_activate(new_crtc_state);
7595
7596		/*
7597		 * DSB cleanup is done in cleanup_work aligning with framebuffer
7598		 * cleanup. So copy and reset the dsb structure to sync with
7599		 * commit_done and later do dsb cleanup in cleanup_work.
7600		 */
7601		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7602	}
7603
7604	/* Underruns don't always raise interrupts, so check manually */
7605	intel_check_cpu_fifo_underruns(dev_priv);
7606	intel_check_pch_fifo_underruns(dev_priv);
7607
7608	if (state->modeset)
7609		intel_verify_planes(state);
7610
7611	intel_sagv_post_plane_update(state);
7612
7613	drm_atomic_helper_commit_hw_done(&state->base);
7614
7615	if (state->modeset) {
7616		/* As one of the primary mmio accessors, KMS has a high
7617		 * likelihood of triggering bugs in unclaimed access. After we
7618		 * finish modesetting, see if an error has been flagged, and if
7619		 * so enable debugging for the next modeset - and hope we catch
7620		 * the culprit.
7621		 */
7622		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7623		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
7624	}
7625	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7626
7627	/*
7628	 * Defer the cleanup of the old state to a separate worker to not
7629	 * impede the current task (userspace for blocking modesets) that
7630	 * are executed inline. For out-of-line asynchronous modesets/flips,
7631	 * deferring to a new worker seems overkill, but we would place a
7632	 * schedule point (cond_resched()) here anyway to keep latencies
7633	 * down.
7634	 */
7635	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7636	queue_work(system_highpri_wq, &state->base.commit_work);
7637}
7638
7639static void intel_atomic_commit_work(struct work_struct *work)
7640{
7641	struct intel_atomic_state *state =
7642		container_of(work, struct intel_atomic_state, base.commit_work);
7643
7644	intel_atomic_commit_tail(state);
7645}
7646
7647static int
7648intel_atomic_commit_ready(struct i915_sw_fence *fence,
7649			  enum i915_sw_fence_notify notify)
7650{
7651	struct intel_atomic_state *state =
7652		container_of(fence, struct intel_atomic_state, commit_ready);
7653
7654	switch (notify) {
7655	case FENCE_COMPLETE:
7656		/* we do blocking waits in the worker, nothing to do here */
7657		break;
7658	case FENCE_FREE:
7659		{
7660			struct intel_atomic_helper *helper =
7661				&to_i915(state->base.dev)->display.atomic_helper;
7662
7663			if (llist_add(&state->freed, &helper->free_list))
7664				schedule_work(&helper->free_work);
7665			break;
7666		}
7667	}
7668
7669	return NOTIFY_DONE;
7670}
7671
7672static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7673{
7674	struct intel_plane_state *old_plane_state, *new_plane_state;
7675	struct intel_plane *plane;
7676	int i;
7677
7678	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7679					     new_plane_state, i)
7680		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7681					to_intel_frontbuffer(new_plane_state->hw.fb),
7682					plane->frontbuffer_bit);
7683}
7684
7685static int intel_atomic_commit(struct drm_device *dev,
7686			       struct drm_atomic_state *_state,
7687			       bool nonblock)
7688{
7689	struct intel_atomic_state *state = to_intel_atomic_state(_state);
7690	struct drm_i915_private *dev_priv = to_i915(dev);
7691	int ret = 0;
7692
7693	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7694
7695	drm_atomic_state_get(&state->base);
7696	i915_sw_fence_init(&state->commit_ready,
7697			   intel_atomic_commit_ready);
7698
7699	/*
7700	 * The intel_legacy_cursor_update() fast path takes care
7701	 * of avoiding the vblank waits for simple cursor
7702	 * movement and flips. For cursor on/off and size changes,
7703	 * we want to perform the vblank waits so that watermark
7704	 * updates happen during the correct frames. Gen9+ have
7705	 * double buffered watermarks and so shouldn't need this.
7706	 *
7707	 * Unset state->legacy_cursor_update before the call to
7708	 * drm_atomic_helper_setup_commit() because otherwise
7709	 * drm_atomic_helper_wait_for_flip_done() is a noop and
7710	 * we get FIFO underruns because we didn't wait
7711	 * for vblank.
7712	 *
7713	 * FIXME doing watermarks and fb cleanup from a vblank worker
7714	 * (assuming we had any) would solve these problems.
7715	 */
7716	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7717		struct intel_crtc_state *new_crtc_state;
7718		struct intel_crtc *crtc;
7719		int i;
7720
7721		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7722			if (new_crtc_state->wm.need_postvbl_update ||
7723			    new_crtc_state->update_wm_post)
7724				state->base.legacy_cursor_update = false;
7725	}
7726
7727	ret = intel_atomic_prepare_commit(state);
7728	if (ret) {
7729		drm_dbg_atomic(&dev_priv->drm,
7730			       "Preparing state failed with %i\n", ret);
7731		i915_sw_fence_commit(&state->commit_ready);
7732		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7733		return ret;
7734	}
7735
7736	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7737	if (!ret)
7738		ret = drm_atomic_helper_swap_state(&state->base, true);
7739	if (!ret)
7740		intel_atomic_swap_global_state(state);
7741
7742	if (ret) {
7743		struct intel_crtc_state *new_crtc_state;
7744		struct intel_crtc *crtc;
7745		int i;
7746
7747		i915_sw_fence_commit(&state->commit_ready);
7748
7749		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7750			intel_dsb_cleanup(new_crtc_state);
7751
7752		drm_atomic_helper_cleanup_planes(dev, &state->base);
7753		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7754		return ret;
7755	}
7756	intel_shared_dpll_swap_state(state);
7757	intel_atomic_track_fbs(state);
7758
7759	drm_atomic_state_get(&state->base);
7760	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7761
7762	i915_sw_fence_commit(&state->commit_ready);
7763	if (nonblock && state->modeset) {
7764		queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7765	} else if (nonblock) {
7766		queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7767	} else {
7768		if (state->modeset)
7769			flush_workqueue(dev_priv->display.wq.modeset);
7770		intel_atomic_commit_tail(state);
7771	}
7772
7773	return 0;
7774}
7775
7776/**
7777 * intel_plane_destroy - destroy a plane
7778 * @plane: plane to destroy
7779 *
7780 * Common destruction function for all types of planes (primary, cursor,
7781 * sprite).
7782 */
7783void intel_plane_destroy(struct drm_plane *plane)
7784{
7785	drm_plane_cleanup(plane);
7786	kfree(to_intel_plane(plane));
7787}
7788
7789static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
7790{
7791	struct intel_plane *plane;
7792
7793	for_each_intel_plane(&dev_priv->drm, plane) {
7794		struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
7795							      plane->pipe);
7796
7797		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
7798	}
7799}
7800
7801
7802int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7803				      struct drm_file *file)
7804{
7805	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7806	struct drm_crtc *drmmode_crtc;
7807	struct intel_crtc *crtc;
7808
7809	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7810	if (!drmmode_crtc)
7811		return -ENOENT;
7812
7813	crtc = to_intel_crtc(drmmode_crtc);
7814	pipe_from_crtc_id->pipe = crtc->pipe;
7815
7816	return 0;
7817}
7818
7819static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7820{
7821	struct drm_device *dev = encoder->base.dev;
7822	struct intel_encoder *source_encoder;
7823	u32 possible_clones = 0;
7824
7825	for_each_intel_encoder(dev, source_encoder) {
7826		if (encoders_cloneable(encoder, source_encoder))
7827			possible_clones |= drm_encoder_mask(&source_encoder->base);
7828	}
7829
7830	return possible_clones;
7831}
7832
7833static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7834{
7835	struct drm_device *dev = encoder->base.dev;
7836	struct intel_crtc *crtc;
7837	u32 possible_crtcs = 0;
7838
7839	for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7840		possible_crtcs |= drm_crtc_mask(&crtc->base);
7841
7842	return possible_crtcs;
7843}
7844
7845static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7846{
7847	if (!IS_MOBILE(dev_priv))
7848		return false;
7849
7850	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7851		return false;
7852
7853	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7854		return false;
7855
7856	return true;
7857}
7858
7859static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7860{
7861	if (DISPLAY_VER(dev_priv) >= 9)
7862		return false;
7863
7864	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
7865		return false;
7866
7867	if (HAS_PCH_LPT_H(dev_priv) &&
7868	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7869		return false;
7870
7871	/* DDI E can't be used if DDI A requires 4 lanes */
7872	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7873		return false;
7874
7875	if (!dev_priv->display.vbt.int_crt_support)
7876		return false;
7877
7878	return true;
7879}
7880
7881static void intel_setup_outputs(struct drm_i915_private *dev_priv)
7882{
7883	struct intel_encoder *encoder;
7884	bool dpd_is_edp = false;
7885
7886	intel_pps_unlock_regs_wa(dev_priv);
7887
7888	if (!HAS_DISPLAY(dev_priv))
7889		return;
7890
7891	if (IS_DG2(dev_priv)) {
7892		intel_ddi_init(dev_priv, PORT_A);
7893		intel_ddi_init(dev_priv, PORT_B);
7894		intel_ddi_init(dev_priv, PORT_C);
7895		intel_ddi_init(dev_priv, PORT_D_XELPD);
7896		intel_ddi_init(dev_priv, PORT_TC1);
7897	} else if (IS_ALDERLAKE_P(dev_priv)) {
7898		intel_ddi_init(dev_priv, PORT_A);
7899		intel_ddi_init(dev_priv, PORT_B);
7900		intel_ddi_init(dev_priv, PORT_TC1);
7901		intel_ddi_init(dev_priv, PORT_TC2);
7902		intel_ddi_init(dev_priv, PORT_TC3);
7903		intel_ddi_init(dev_priv, PORT_TC4);
7904		icl_dsi_init(dev_priv);
7905	} else if (IS_ALDERLAKE_S(dev_priv)) {
7906		intel_ddi_init(dev_priv, PORT_A);
7907		intel_ddi_init(dev_priv, PORT_TC1);
7908		intel_ddi_init(dev_priv, PORT_TC2);
7909		intel_ddi_init(dev_priv, PORT_TC3);
7910		intel_ddi_init(dev_priv, PORT_TC4);
7911	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
7912		intel_ddi_init(dev_priv, PORT_A);
7913		intel_ddi_init(dev_priv, PORT_B);
7914		intel_ddi_init(dev_priv, PORT_TC1);
7915		intel_ddi_init(dev_priv, PORT_TC2);
7916	} else if (DISPLAY_VER(dev_priv) >= 12) {
7917		intel_ddi_init(dev_priv, PORT_A);
7918		intel_ddi_init(dev_priv, PORT_B);
7919		intel_ddi_init(dev_priv, PORT_TC1);
7920		intel_ddi_init(dev_priv, PORT_TC2);
7921		intel_ddi_init(dev_priv, PORT_TC3);
7922		intel_ddi_init(dev_priv, PORT_TC4);
7923		intel_ddi_init(dev_priv, PORT_TC5);
7924		intel_ddi_init(dev_priv, PORT_TC6);
7925		icl_dsi_init(dev_priv);
7926	} else if (IS_JSL_EHL(dev_priv)) {
7927		intel_ddi_init(dev_priv, PORT_A);
7928		intel_ddi_init(dev_priv, PORT_B);
7929		intel_ddi_init(dev_priv, PORT_C);
7930		intel_ddi_init(dev_priv, PORT_D);
7931		icl_dsi_init(dev_priv);
7932	} else if (DISPLAY_VER(dev_priv) == 11) {
7933		intel_ddi_init(dev_priv, PORT_A);
7934		intel_ddi_init(dev_priv, PORT_B);
7935		intel_ddi_init(dev_priv, PORT_C);
7936		intel_ddi_init(dev_priv, PORT_D);
7937		intel_ddi_init(dev_priv, PORT_E);
7938		intel_ddi_init(dev_priv, PORT_F);
7939		icl_dsi_init(dev_priv);
7940	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
7941		intel_ddi_init(dev_priv, PORT_A);
7942		intel_ddi_init(dev_priv, PORT_B);
7943		intel_ddi_init(dev_priv, PORT_C);
7944		vlv_dsi_init(dev_priv);
7945	} else if (DISPLAY_VER(dev_priv) >= 9) {
7946		intel_ddi_init(dev_priv, PORT_A);
7947		intel_ddi_init(dev_priv, PORT_B);
7948		intel_ddi_init(dev_priv, PORT_C);
7949		intel_ddi_init(dev_priv, PORT_D);
7950		intel_ddi_init(dev_priv, PORT_E);
7951	} else if (HAS_DDI(dev_priv)) {
7952		u32 found;
7953
7954		if (intel_ddi_crt_present(dev_priv))
7955			intel_crt_init(dev_priv);
7956
7957		/* Haswell uses DDI functions to detect digital outputs. */
7958		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
7959		if (found)
7960			intel_ddi_init(dev_priv, PORT_A);
7961
7962		found = intel_de_read(dev_priv, SFUSE_STRAP);
7963		if (found & SFUSE_STRAP_DDIB_DETECTED)
7964			intel_ddi_init(dev_priv, PORT_B);
7965		if (found & SFUSE_STRAP_DDIC_DETECTED)
7966			intel_ddi_init(dev_priv, PORT_C);
7967		if (found & SFUSE_STRAP_DDID_DETECTED)
7968			intel_ddi_init(dev_priv, PORT_D);
7969		if (found & SFUSE_STRAP_DDIF_DETECTED)
7970			intel_ddi_init(dev_priv, PORT_F);
7971	} else if (HAS_PCH_SPLIT(dev_priv)) {
7972		int found;
7973
7974		/*
7975		 * intel_edp_init_connector() depends on this completing first,
7976		 * to prevent the registration of both eDP and LVDS and the
7977		 * incorrect sharing of the PPS.
7978		 */
7979		intel_lvds_init(dev_priv);
7980		intel_crt_init(dev_priv);
7981
7982		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7983
7984		if (ilk_has_edp_a(dev_priv))
7985			g4x_dp_init(dev_priv, DP_A, PORT_A);
7986
7987		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7988			/* PCH SDVOB multiplex with HDMIB */
7989			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7990			if (!found)
7991				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7992			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7993				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7994		}
7995
7996		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7997			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7998
7999		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
8000			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
8001
8002		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
8003			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
8004
8005		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
8006			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
8007	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8008		bool has_edp, has_port;
8009
8010		if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
8011			intel_crt_init(dev_priv);
8012
8013		/*
8014		 * The DP_DETECTED bit is the latched state of the DDC
8015		 * SDA pin at boot. However since eDP doesn't require DDC
8016		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
8017		 * eDP ports may have been muxed to an alternate function.
8018		 * Thus we can't rely on the DP_DETECTED bit alone to detect
8019		 * eDP ports. Consult the VBT as well as DP_DETECTED to
8020		 * detect eDP ports.
8021		 *
8022		 * Sadly the straps seem to be missing sometimes even for HDMI
8023		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
8024		 * and VBT for the presence of the port. Additionally we can't
8025		 * trust the port type the VBT declares as we've seen at least
8026		 * HDMI ports that the VBT claim are DP or eDP.
8027		 */
8028		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
8029		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
8030		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
8031			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
8032		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
8033			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
8034
8035		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
8036		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
8037		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
8038			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
8039		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
8040			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
8041
8042		if (IS_CHERRYVIEW(dev_priv)) {
8043			/*
8044			 * eDP not supported on port D,
8045			 * so no need to worry about it
8046			 */
8047			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
8048			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
8049				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
8050			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
8051				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
8052		}
8053
8054		vlv_dsi_init(dev_priv);
8055	} else if (IS_PINEVIEW(dev_priv)) {
8056		intel_lvds_init(dev_priv);
8057		intel_crt_init(dev_priv);
8058	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
8059		bool found = false;
8060
8061		if (IS_MOBILE(dev_priv))
8062			intel_lvds_init(dev_priv);
8063
8064		intel_crt_init(dev_priv);
8065
8066		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8067			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
8068			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
8069			if (!found && IS_G4X(dev_priv)) {
8070				drm_dbg_kms(&dev_priv->drm,
8071					    "probing HDMI on SDVOB\n");
8072				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
8073			}
8074
8075			if (!found && IS_G4X(dev_priv))
8076				g4x_dp_init(dev_priv, DP_B, PORT_B);
8077		}
8078
8079		/* Before G4X SDVOC doesn't have its own detect register */
8080
8081		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8082			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
8083			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
8084		}
8085
8086		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
8087
8088			if (IS_G4X(dev_priv)) {
8089				drm_dbg_kms(&dev_priv->drm,
8090					    "probing HDMI on SDVOC\n");
8091				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
8092			}
8093			if (IS_G4X(dev_priv))
8094				g4x_dp_init(dev_priv, DP_C, PORT_C);
8095		}
8096
8097		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
8098			g4x_dp_init(dev_priv, DP_D, PORT_D);
8099
8100		if (SUPPORTS_TV(dev_priv))
8101			intel_tv_init(dev_priv);
8102	} else if (DISPLAY_VER(dev_priv) == 2) {
8103		if (IS_I85X(dev_priv))
8104			intel_lvds_init(dev_priv);
8105
8106		intel_crt_init(dev_priv);
8107		intel_dvo_init(dev_priv);
8108	}
8109
8110	for_each_intel_encoder(&dev_priv->drm, encoder) {
8111		encoder->base.possible_crtcs =
8112			intel_encoder_possible_crtcs(encoder);
8113		encoder->base.possible_clones =
8114			intel_encoder_possible_clones(encoder);
8115	}
8116
8117	intel_init_pch_refclk(dev_priv);
8118
8119	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
8120}
8121
8122static int max_dotclock(struct drm_i915_private *i915)
8123{
8124	int max_dotclock = i915->max_dotclk_freq;
8125
8126	/* icl+ might use bigjoiner */
8127	if (DISPLAY_VER(i915) >= 11)
8128		max_dotclock *= 2;
8129
8130	return max_dotclock;
8131}
8132
8133static enum drm_mode_status
8134intel_mode_valid(struct drm_device *dev,
8135		 const struct drm_display_mode *mode)
8136{
8137	struct drm_i915_private *dev_priv = to_i915(dev);
8138	int hdisplay_max, htotal_max;
8139	int vdisplay_max, vtotal_max;
8140
8141	/*
8142	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
8143	 * of DBLSCAN modes to the output's mode list when they detect
8144	 * the scaling mode property on the connector. And they don't
8145	 * ask the kernel to validate those modes in any way until
8146	 * modeset time at which point the client gets a protocol error.
8147	 * So in order to not upset those clients we silently ignore the
8148	 * DBLSCAN flag on such connectors. For other connectors we will
8149	 * reject modes with the DBLSCAN flag in encoder->compute_config().
8150	 * And we always reject DBLSCAN modes in connector->mode_valid()
8151	 * as we never want such modes on the connector's mode list.
8152	 */
8153
8154	if (mode->vscan > 1)
8155		return MODE_NO_VSCAN;
8156
8157	if (mode->flags & DRM_MODE_FLAG_HSKEW)
8158		return MODE_H_ILLEGAL;
8159
8160	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
8161			   DRM_MODE_FLAG_NCSYNC |
8162			   DRM_MODE_FLAG_PCSYNC))
8163		return MODE_HSYNC;
8164
8165	if (mode->flags & (DRM_MODE_FLAG_BCAST |
8166			   DRM_MODE_FLAG_PIXMUX |
8167			   DRM_MODE_FLAG_CLKDIV2))
8168		return MODE_BAD;
8169
8170	/*
8171	 * Reject clearly excessive dotclocks early to
8172	 * avoid having to worry about huge integers later.
8173	 */
8174	if (mode->clock > max_dotclock(dev_priv))
8175		return MODE_CLOCK_HIGH;
8176
8177	/* Transcoder timing limits */
8178	if (DISPLAY_VER(dev_priv) >= 11) {
8179		hdisplay_max = 16384;
8180		vdisplay_max = 8192;
8181		htotal_max = 16384;
8182		vtotal_max = 8192;
8183	} else if (DISPLAY_VER(dev_priv) >= 9 ||
8184		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8185		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8186		vdisplay_max = 4096;
8187		htotal_max = 8192;
8188		vtotal_max = 8192;
8189	} else if (DISPLAY_VER(dev_priv) >= 3) {
8190		hdisplay_max = 4096;
8191		vdisplay_max = 4096;
8192		htotal_max = 8192;
8193		vtotal_max = 8192;
8194	} else {
8195		hdisplay_max = 2048;
8196		vdisplay_max = 2048;
8197		htotal_max = 4096;
8198		vtotal_max = 4096;
8199	}
8200
8201	if (mode->hdisplay > hdisplay_max ||
8202	    mode->hsync_start > htotal_max ||
8203	    mode->hsync_end > htotal_max ||
8204	    mode->htotal > htotal_max)
8205		return MODE_H_ILLEGAL;
8206
8207	if (mode->vdisplay > vdisplay_max ||
8208	    mode->vsync_start > vtotal_max ||
8209	    mode->vsync_end > vtotal_max ||
8210	    mode->vtotal > vtotal_max)
8211		return MODE_V_ILLEGAL;
8212
8213	if (DISPLAY_VER(dev_priv) >= 5) {
8214		if (mode->hdisplay < 64 ||
8215		    mode->htotal - mode->hdisplay < 32)
8216			return MODE_H_ILLEGAL;
8217
8218		if (mode->vtotal - mode->vdisplay < 5)
8219			return MODE_V_ILLEGAL;
8220	} else {
8221		if (mode->htotal - mode->hdisplay < 32)
8222			return MODE_H_ILLEGAL;
8223
8224		if (mode->vtotal - mode->vdisplay < 3)
8225			return MODE_V_ILLEGAL;
8226	}
8227
8228	/*
8229	 * Cantiga+ cannot handle modes with a hsync front porch of 0.
8230	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8231	 */
8232	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8233	    mode->hsync_start == mode->hdisplay)
8234		return MODE_H_ILLEGAL;
8235
8236	return MODE_OK;
8237}
8238
8239enum drm_mode_status
8240intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8241				const struct drm_display_mode *mode,
8242				bool bigjoiner)
8243{
8244	int plane_width_max, plane_height_max;
8245
8246	/*
8247	 * intel_mode_valid() should be
8248	 * sufficient on older platforms.
8249	 */
8250	if (DISPLAY_VER(dev_priv) < 9)
8251		return MODE_OK;
8252
8253	/*
8254	 * Most people will probably want a fullscreen
8255	 * plane so let's not advertize modes that are
8256	 * too big for that.
8257	 */
8258	if (DISPLAY_VER(dev_priv) >= 11) {
8259		plane_width_max = 5120 << bigjoiner;
8260		plane_height_max = 4320;
8261	} else {
8262		plane_width_max = 5120;
8263		plane_height_max = 4096;
8264	}
8265
8266	if (mode->hdisplay > plane_width_max)
8267		return MODE_H_ILLEGAL;
8268
8269	if (mode->vdisplay > plane_height_max)
8270		return MODE_V_ILLEGAL;
8271
8272	return MODE_OK;
8273}
8274
8275static const struct drm_mode_config_funcs intel_mode_funcs = {
8276	.fb_create = intel_user_framebuffer_create,
8277	.get_format_info = intel_fb_get_format_info,
8278	.output_poll_changed = intel_fbdev_output_poll_changed,
8279	.mode_valid = intel_mode_valid,
8280	.atomic_check = intel_atomic_check,
8281	.atomic_commit = intel_atomic_commit,
8282	.atomic_state_alloc = intel_atomic_state_alloc,
8283	.atomic_state_clear = intel_atomic_state_clear,
8284	.atomic_state_free = intel_atomic_state_free,
8285};
8286
8287static const struct intel_display_funcs skl_display_funcs = {
8288	.get_pipe_config = hsw_get_pipe_config,
8289	.crtc_enable = hsw_crtc_enable,
8290	.crtc_disable = hsw_crtc_disable,
8291	.commit_modeset_enables = skl_commit_modeset_enables,
8292	.get_initial_plane_config = skl_get_initial_plane_config,
8293};
8294
8295static const struct intel_display_funcs ddi_display_funcs = {
8296	.get_pipe_config = hsw_get_pipe_config,
8297	.crtc_enable = hsw_crtc_enable,
8298	.crtc_disable = hsw_crtc_disable,
8299	.commit_modeset_enables = intel_commit_modeset_enables,
8300	.get_initial_plane_config = i9xx_get_initial_plane_config,
8301};
8302
8303static const struct intel_display_funcs pch_split_display_funcs = {
8304	.get_pipe_config = ilk_get_pipe_config,
8305	.crtc_enable = ilk_crtc_enable,
8306	.crtc_disable = ilk_crtc_disable,
8307	.commit_modeset_enables = intel_commit_modeset_enables,
8308	.get_initial_plane_config = i9xx_get_initial_plane_config,
8309};
8310
8311static const struct intel_display_funcs vlv_display_funcs = {
8312	.get_pipe_config = i9xx_get_pipe_config,
8313	.crtc_enable = valleyview_crtc_enable,
8314	.crtc_disable = i9xx_crtc_disable,
8315	.commit_modeset_enables = intel_commit_modeset_enables,
8316	.get_initial_plane_config = i9xx_get_initial_plane_config,
8317};
8318
8319static const struct intel_display_funcs i9xx_display_funcs = {
8320	.get_pipe_config = i9xx_get_pipe_config,
8321	.crtc_enable = i9xx_crtc_enable,
8322	.crtc_disable = i9xx_crtc_disable,
8323	.commit_modeset_enables = intel_commit_modeset_enables,
8324	.get_initial_plane_config = i9xx_get_initial_plane_config,
8325};
8326
8327/**
8328 * intel_init_display_hooks - initialize the display modesetting hooks
8329 * @dev_priv: device private
8330 */
8331void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8332{
8333	if (!HAS_DISPLAY(dev_priv))
8334		return;
8335
8336	intel_color_init_hooks(dev_priv);
8337	intel_init_cdclk_hooks(dev_priv);
8338	intel_audio_hooks_init(dev_priv);
8339
8340	intel_dpll_init_clock_hook(dev_priv);
8341
8342	if (DISPLAY_VER(dev_priv) >= 9) {
8343		dev_priv->display.funcs.display = &skl_display_funcs;
8344	} else if (HAS_DDI(dev_priv)) {
8345		dev_priv->display.funcs.display = &ddi_display_funcs;
8346	} else if (HAS_PCH_SPLIT(dev_priv)) {
8347		dev_priv->display.funcs.display = &pch_split_display_funcs;
8348	} else if (IS_CHERRYVIEW(dev_priv) ||
8349		   IS_VALLEYVIEW(dev_priv)) {
8350		dev_priv->display.funcs.display = &vlv_display_funcs;
8351	} else {
8352		dev_priv->display.funcs.display = &i9xx_display_funcs;
8353	}
8354
8355	intel_fdi_init_hook(dev_priv);
8356}
8357
8358void intel_modeset_init_hw(struct drm_i915_private *i915)
8359{
8360	struct intel_cdclk_state *cdclk_state;
8361
8362	if (!HAS_DISPLAY(i915))
8363		return;
8364
8365	cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
8366
8367	intel_update_cdclk(i915);
8368	intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
8369	cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
8370}
8371
8372static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
8373{
8374	struct drm_plane *plane;
8375	struct intel_crtc *crtc;
8376
8377	for_each_intel_crtc(state->dev, crtc) {
8378		struct intel_crtc_state *crtc_state;
8379
8380		crtc_state = intel_atomic_get_crtc_state(state, crtc);
8381		if (IS_ERR(crtc_state))
8382			return PTR_ERR(crtc_state);
8383
8384		if (crtc_state->hw.active) {
8385			/*
8386			 * Preserve the inherited flag to avoid
8387			 * taking the full modeset path.
8388			 */
8389			crtc_state->inherited = true;
8390		}
8391	}
8392
8393	drm_for_each_plane(plane, state->dev) {
8394		struct drm_plane_state *plane_state;
8395
8396		plane_state = drm_atomic_get_plane_state(state, plane);
8397		if (IS_ERR(plane_state))
8398			return PTR_ERR(plane_state);
8399	}
8400
8401	return 0;
8402}
8403
8404/*
8405 * Calculate what we think the watermarks should be for the state we've read
8406 * out of the hardware and then immediately program those watermarks so that
8407 * we ensure the hardware settings match our internal state.
8408 *
8409 * We can calculate what we think WM's should be by creating a duplicate of the
8410 * current state (which was constructed during hardware readout) and running it
8411 * through the atomic check code to calculate new watermark values in the
8412 * state object.
8413 */
8414static void sanitize_watermarks(struct drm_i915_private *dev_priv)
8415{
8416	struct drm_atomic_state *state;
8417	struct intel_atomic_state *intel_state;
8418	struct intel_crtc *crtc;
8419	struct intel_crtc_state *crtc_state;
8420	struct drm_modeset_acquire_ctx ctx;
8421	int ret;
8422	int i;
8423
8424	/* Only supported on platforms that use atomic watermark design */
8425	if (!dev_priv->display.funcs.wm->optimize_watermarks)
8426		return;
8427
8428	state = drm_atomic_state_alloc(&dev_priv->drm);
8429	if (drm_WARN_ON(&dev_priv->drm, !state))
8430		return;
8431
8432	intel_state = to_intel_atomic_state(state);
8433
8434	drm_modeset_acquire_init(&ctx, 0);
8435
8436retry:
8437	state->acquire_ctx = &ctx;
8438
8439	/*
8440	 * Hardware readout is the only time we don't want to calculate
8441	 * intermediate watermarks (since we don't trust the current
8442	 * watermarks).
8443	 */
8444	if (!HAS_GMCH(dev_priv))
8445		intel_state->skip_intermediate_wm = true;
8446
8447	ret = sanitize_watermarks_add_affected(state);
8448	if (ret)
8449		goto fail;
8450
8451	ret = intel_atomic_check(&dev_priv->drm, state);
8452	if (ret)
8453		goto fail;
8454
8455	/* Write calculated watermark values back */
8456	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
8457		crtc_state->wm.need_postvbl_update = true;
8458		intel_optimize_watermarks(intel_state, crtc);
8459
8460		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
8461	}
8462
8463fail:
8464	if (ret == -EDEADLK) {
8465		drm_atomic_state_clear(state);
8466		drm_modeset_backoff(&ctx);
8467		goto retry;
8468	}
8469
8470	/*
8471	 * If we fail here, it means that the hardware appears to be
8472	 * programmed in a way that shouldn't be possible, given our
8473	 * understanding of watermark requirements.  This might mean a
8474	 * mistake in the hardware readout code or a mistake in the
8475	 * watermark calculations for a given platform.  Raise a WARN
8476	 * so that this is noticeable.
8477	 *
8478	 * If this actually happens, we'll have to just leave the
8479	 * BIOS-programmed watermarks untouched and hope for the best.
8480	 */
8481	drm_WARN(&dev_priv->drm, ret,
8482		 "Could not determine valid watermarks for inherited state\n");
8483
8484	drm_atomic_state_put(state);
8485
8486	drm_modeset_drop_locks(&ctx);
8487	drm_modeset_acquire_fini(&ctx);
8488}
8489
8490static int intel_initial_commit(struct drm_device *dev)
8491{
8492	struct drm_atomic_state *state = NULL;
8493	struct drm_modeset_acquire_ctx ctx;
8494	struct intel_crtc *crtc;
8495	int ret = 0;
8496
8497	state = drm_atomic_state_alloc(dev);
8498	if (!state)
8499		return -ENOMEM;
8500
8501	drm_modeset_acquire_init(&ctx, 0);
8502
8503retry:
8504	state->acquire_ctx = &ctx;
8505
8506	for_each_intel_crtc(dev, crtc) {
8507		struct intel_crtc_state *crtc_state =
8508			intel_atomic_get_crtc_state(state, crtc);
8509
8510		if (IS_ERR(crtc_state)) {
8511			ret = PTR_ERR(crtc_state);
8512			goto out;
8513		}
8514
8515		if (crtc_state->hw.active) {
8516			struct intel_encoder *encoder;
8517
8518			/*
8519			 * We've not yet detected sink capabilities
8520			 * (audio,infoframes,etc.) and thus we don't want to
8521			 * force a full state recomputation yet. We want that to
8522			 * happen only for the first real commit from userspace.
8523			 * So preserve the inherited flag for the time being.
8524			 */
8525			crtc_state->inherited = true;
8526
8527			ret = drm_atomic_add_affected_planes(state, &crtc->base);
8528			if (ret)
8529				goto out;
8530
8531			/*
8532			 * FIXME hack to force a LUT update to avoid the
8533			 * plane update forcing the pipe gamma on without
8534			 * having a proper LUT loaded. Remove once we
8535			 * have readout for pipe gamma enable.
8536			 */
8537			crtc_state->uapi.color_mgmt_changed = true;
8538
8539			for_each_intel_encoder_mask(dev, encoder,
8540						    crtc_state->uapi.encoder_mask) {
8541				if (encoder->initial_fastset_check &&
8542				    !encoder->initial_fastset_check(encoder, crtc_state)) {
8543					ret = drm_atomic_add_affected_connectors(state,
8544										 &crtc->base);
8545					if (ret)
8546						goto out;
8547				}
8548			}
8549		}
8550	}
8551
8552	ret = drm_atomic_commit(state);
8553
8554out:
8555	if (ret == -EDEADLK) {
8556		drm_atomic_state_clear(state);
8557		drm_modeset_backoff(&ctx);
8558		goto retry;
8559	}
8560
8561	drm_atomic_state_put(state);
8562
8563	drm_modeset_drop_locks(&ctx);
8564	drm_modeset_acquire_fini(&ctx);
8565
8566	return ret;
8567}
8568
8569static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
8570	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
8571};
8572
8573static void intel_mode_config_init(struct drm_i915_private *i915)
8574{
8575	struct drm_mode_config *mode_config = &i915->drm.mode_config;
8576
8577	drm_mode_config_init(&i915->drm);
8578	INIT_LIST_HEAD(&i915->display.global.obj_list);
8579
8580	mode_config->min_width = 0;
8581	mode_config->min_height = 0;
8582
8583	mode_config->preferred_depth = 24;
8584	mode_config->prefer_shadow = 1;
8585
8586	mode_config->funcs = &intel_mode_funcs;
8587	mode_config->helper_private = &intel_mode_config_funcs;
8588
8589	mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
8590
8591	/*
8592	 * Maximum framebuffer dimensions, chosen to match
8593	 * the maximum render engine surface size on gen4+.
8594	 */
8595	if (DISPLAY_VER(i915) >= 7) {
8596		mode_config->max_width = 16384;
8597		mode_config->max_height = 16384;
8598	} else if (DISPLAY_VER(i915) >= 4) {
8599		mode_config->max_width = 8192;
8600		mode_config->max_height = 8192;
8601	} else if (DISPLAY_VER(i915) == 3) {
8602		mode_config->max_width = 4096;
8603		mode_config->max_height = 4096;
8604	} else {
8605		mode_config->max_width = 2048;
8606		mode_config->max_height = 2048;
8607	}
8608
8609	if (IS_I845G(i915) || IS_I865G(i915)) {
8610		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
8611		mode_config->cursor_height = 1023;
8612	} else if (IS_I830(i915) || IS_I85X(i915) ||
8613		   IS_I915G(i915) || IS_I915GM(i915)) {
8614		mode_config->cursor_width = 64;
8615		mode_config->cursor_height = 64;
8616	} else {
8617		mode_config->cursor_width = 256;
8618		mode_config->cursor_height = 256;
8619	}
8620}
8621
8622static void intel_mode_config_cleanup(struct drm_i915_private *i915)
8623{
8624	intel_atomic_global_obj_cleanup(i915);
8625	drm_mode_config_cleanup(&i915->drm);
8626}
8627
8628/* part #1: call before irq install */
8629int intel_modeset_init_noirq(struct drm_i915_private *i915)
8630{
8631	int ret;
8632
8633	if (i915_inject_probe_failure(i915))
8634		return -ENODEV;
8635
8636	if (HAS_DISPLAY(i915)) {
8637		ret = drm_vblank_init(&i915->drm,
8638				      INTEL_NUM_PIPES(i915));
8639		if (ret)
8640			return ret;
8641	}
8642
8643	intel_bios_init(i915);
8644
8645	ret = intel_vga_register(i915);
8646	if (ret)
8647		goto cleanup_bios;
8648
8649	/* FIXME: completely on the wrong abstraction layer */
8650	intel_power_domains_init_hw(i915, false);
8651
8652	if (!HAS_DISPLAY(i915))
8653		return 0;
8654
8655	intel_dmc_ucode_init(i915);
8656
8657	i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
8658	i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
8659						WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
8660
8661	intel_mode_config_init(i915);
8662
8663	ret = intel_cdclk_init(i915);
8664	if (ret)
8665		goto cleanup_vga_client_pw_domain_dmc;
8666
8667	ret = intel_color_init(i915);
8668	if (ret)
8669		goto cleanup_vga_client_pw_domain_dmc;
8670
8671	ret = intel_dbuf_init(i915);
8672	if (ret)
8673		goto cleanup_vga_client_pw_domain_dmc;
8674
8675	ret = intel_bw_init(i915);
8676	if (ret)
8677		goto cleanup_vga_client_pw_domain_dmc;
8678
8679	init_llist_head(&i915->display.atomic_helper.free_list);
8680	INIT_WORK(&i915->display.atomic_helper.free_work,
8681		  intel_atomic_helper_free_state_worker);
8682
8683	intel_init_quirks(i915);
8684
8685	intel_fbc_init(i915);
8686
8687	return 0;
8688
8689cleanup_vga_client_pw_domain_dmc:
8690	intel_dmc_ucode_fini(i915);
8691	intel_power_domains_driver_remove(i915);
8692	intel_vga_unregister(i915);
8693cleanup_bios:
8694	intel_bios_driver_remove(i915);
8695
8696	return ret;
8697}
8698
8699/* part #2: call after irq install, but before gem init */
8700int intel_modeset_init_nogem(struct drm_i915_private *i915)
8701{
8702	struct drm_device *dev = &i915->drm;
8703	enum pipe pipe;
8704	struct intel_crtc *crtc;
8705	int ret;
8706
8707	if (!HAS_DISPLAY(i915))
8708		return 0;
8709
8710	intel_init_pm(i915);
8711
8712	intel_panel_sanitize_ssc(i915);
8713
8714	intel_pps_setup(i915);
8715
8716	intel_gmbus_setup(i915);
8717
8718	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
8719		    INTEL_NUM_PIPES(i915),
8720		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
8721
8722	for_each_pipe(i915, pipe) {
8723		ret = intel_crtc_init(i915, pipe);
8724		if (ret) {
8725			intel_mode_config_cleanup(i915);
8726			return ret;
8727		}
8728	}
8729
8730	intel_plane_possible_crtcs_init(i915);
8731	intel_shared_dpll_init(i915);
8732	intel_fdi_pll_freq_update(i915);
8733
8734	intel_update_czclk(i915);
8735	intel_modeset_init_hw(i915);
8736	intel_dpll_update_ref_clks(i915);
8737
8738	intel_hdcp_component_init(i915);
8739
8740	if (i915->display.cdclk.max_cdclk_freq == 0)
8741		intel_update_max_cdclk(i915);
8742
8743	intel_hti_init(i915);
8744
8745	/* Just disable it once at startup */
8746	intel_vga_disable(i915);
8747	intel_setup_outputs(i915);
8748
8749	drm_modeset_lock_all(dev);
8750	intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
8751	intel_acpi_assign_connector_fwnodes(i915);
8752	drm_modeset_unlock_all(dev);
8753
8754	for_each_intel_crtc(dev, crtc) {
8755		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
8756			continue;
8757		intel_crtc_initial_plane_config(crtc);
8758	}
8759
8760	/*
8761	 * Make sure hardware watermarks really match the state we read out.
8762	 * Note that we need to do this after reconstructing the BIOS fb's
8763	 * since the watermark calculation done here will use pstate->fb.
8764	 */
8765	if (!HAS_GMCH(i915))
8766		sanitize_watermarks(i915);
8767
8768	return 0;
8769}
8770
8771/* part #3: call after gem init */
8772int intel_modeset_init(struct drm_i915_private *i915)
8773{
8774	int ret;
8775
8776	if (!HAS_DISPLAY(i915))
8777		return 0;
8778
8779	/*
8780	 * Force all active planes to recompute their states. So that on
8781	 * mode_setcrtc after probe, all the intel_plane_state variables
8782	 * are already calculated and there is no assert_plane warnings
8783	 * during bootup.
8784	 */
8785	ret = intel_initial_commit(&i915->drm);
8786	if (ret)
8787		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
8788
8789	intel_overlay_setup(i915);
8790
8791	ret = intel_fbdev_init(&i915->drm);
8792	if (ret)
8793		return ret;
8794
8795	/* Only enable hotplug handling once the fbdev is fully set up. */
8796	intel_hpd_init(i915);
8797	intel_hpd_poll_disable(i915);
8798
8799	skl_watermark_ipc_init(i915);
8800
8801	return 0;
8802}
8803
8804void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8805{
8806	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8807	/* 640x480@60Hz, ~25175 kHz */
8808	struct dpll clock = {
8809		.m1 = 18,
8810		.m2 = 7,
8811		.p1 = 13,
8812		.p2 = 4,
8813		.n = 2,
8814	};
8815	u32 dpll, fp;
8816	int i;
8817
8818	drm_WARN_ON(&dev_priv->drm,
8819		    i9xx_calc_dpll_params(48000, &clock) != 25154);
8820
8821	drm_dbg_kms(&dev_priv->drm,
8822		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8823		    pipe_name(pipe), clock.vco, clock.dot);
8824
8825	fp = i9xx_dpll_compute_fp(&clock);
8826	dpll = DPLL_DVO_2X_MODE |
8827		DPLL_VGA_MODE_DIS |
8828		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8829		PLL_P2_DIVIDE_BY_4 |
8830		PLL_REF_INPUT_DREFCLK |
8831		DPLL_VCO_ENABLE;
8832
8833	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
8834	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
8835	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
8836	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
8837	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
8838	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
8839	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
8840
8841	intel_de_write(dev_priv, FP0(pipe), fp);
8842	intel_de_write(dev_priv, FP1(pipe), fp);
8843
8844	/*
8845	 * Apparently we need to have VGA mode enabled prior to changing
8846	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8847	 * dividers, even though the register value does change.
8848	 */
8849	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
8850	intel_de_write(dev_priv, DPLL(pipe), dpll);
8851
8852	/* Wait for the clocks to stabilize. */
8853	intel_de_posting_read(dev_priv, DPLL(pipe));
8854	udelay(150);
8855
8856	/* The pixel multiplier can only be updated once the
8857	 * DPLL is enabled and the clocks are stable.
8858	 *
8859	 * So write it again.
8860	 */
8861	intel_de_write(dev_priv, DPLL(pipe), dpll);
8862
8863	/* We do this three times for luck */
8864	for (i = 0; i < 3 ; i++) {
8865		intel_de_write(dev_priv, DPLL(pipe), dpll);
8866		intel_de_posting_read(dev_priv, DPLL(pipe));
8867		udelay(150); /* wait for warmup */
8868	}
8869
8870	intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
8871	intel_de_posting_read(dev_priv, PIPECONF(pipe));
8872
8873	intel_wait_for_pipe_scanline_moving(crtc);
8874}
8875
8876void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8877{
8878	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8879
8880	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8881		    pipe_name(pipe));
8882
8883	drm_WARN_ON(&dev_priv->drm,
8884		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8885	drm_WARN_ON(&dev_priv->drm,
8886		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8887	drm_WARN_ON(&dev_priv->drm,
8888		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8889	drm_WARN_ON(&dev_priv->drm,
8890		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8891	drm_WARN_ON(&dev_priv->drm,
8892		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8893
8894	intel_de_write(dev_priv, PIPECONF(pipe), 0);
8895	intel_de_posting_read(dev_priv, PIPECONF(pipe));
8896
8897	intel_wait_for_pipe_scanline_stopped(crtc);
8898
8899	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8900	intel_de_posting_read(dev_priv, DPLL(pipe));
8901}
8902
8903void intel_display_resume(struct drm_device *dev)
8904{
8905	struct drm_i915_private *i915 = to_i915(dev);
8906	struct drm_atomic_state *state = i915->display.restore.modeset_state;
8907	struct drm_modeset_acquire_ctx ctx;
8908	int ret;
8909
8910	if (!HAS_DISPLAY(i915))
8911		return;
8912
8913	i915->display.restore.modeset_state = NULL;
8914	if (state)
8915		state->acquire_ctx = &ctx;
8916
8917	drm_modeset_acquire_init(&ctx, 0);
8918
8919	while (1) {
8920		ret = drm_modeset_lock_all_ctx(dev, &ctx);
8921		if (ret != -EDEADLK)
8922			break;
8923
8924		drm_modeset_backoff(&ctx);
8925	}
8926
8927	if (!ret)
8928		ret = __intel_display_resume(i915, state, &ctx);
8929
8930	skl_watermark_ipc_update(i915);
8931	drm_modeset_drop_locks(&ctx);
8932	drm_modeset_acquire_fini(&ctx);
8933
8934	if (ret)
8935		drm_err(&i915->drm,
8936			"Restoring old state failed with %i\n", ret);
8937	if (state)
8938		drm_atomic_state_put(state);
8939}
8940
8941static void intel_hpd_poll_fini(struct drm_i915_private *i915)
8942{
8943	struct intel_connector *connector;
8944	struct drm_connector_list_iter conn_iter;
8945
8946	/* Kill all the work that may have been queued by hpd. */
8947	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8948	for_each_intel_connector_iter(connector, &conn_iter) {
8949		if (connector->modeset_retry_work.func)
8950			cancel_work_sync(&connector->modeset_retry_work);
8951		if (connector->hdcp.shim) {
8952			cancel_delayed_work_sync(&connector->hdcp.check_work);
8953			cancel_work_sync(&connector->hdcp.prop_work);
8954		}
8955	}
8956	drm_connector_list_iter_end(&conn_iter);
8957}
8958
8959/* part #1: call before irq uninstall */
8960void intel_modeset_driver_remove(struct drm_i915_private *i915)
8961{
8962	if (!HAS_DISPLAY(i915))
8963		return;
8964
8965	flush_workqueue(i915->display.wq.flip);
8966	flush_workqueue(i915->display.wq.modeset);
8967
8968	flush_work(&i915->display.atomic_helper.free_work);
8969	drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
8970
8971	/*
8972	 * MST topology needs to be suspended so we don't have any calls to
8973	 * fbdev after it's finalized. MST will be destroyed later as part of
8974	 * drm_mode_config_cleanup()
8975	 */
8976	intel_dp_mst_suspend(i915);
8977}
8978
8979/* part #2: call after irq uninstall */
8980void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
8981{
8982	if (!HAS_DISPLAY(i915))
8983		return;
8984
8985	/*
8986	 * Due to the hpd irq storm handling the hotplug work can re-arm the
8987	 * poll handlers. Hence disable polling after hpd handling is shut down.
8988	 */
8989	intel_hpd_poll_fini(i915);
8990
8991	/* poll work can call into fbdev, hence clean that up afterwards */
8992	intel_fbdev_fini(i915);
8993
8994	intel_unregister_dsm_handler();
8995
8996	/* flush any delayed tasks or pending work */
8997	flush_scheduled_work();
8998
8999	intel_hdcp_component_fini(i915);
9000
9001	intel_mode_config_cleanup(i915);
9002
9003	intel_overlay_cleanup(i915);
9004
9005	intel_gmbus_teardown(i915);
9006
9007	destroy_workqueue(i915->display.wq.flip);
9008	destroy_workqueue(i915->display.wq.modeset);
9009
9010	intel_fbc_cleanup(i915);
9011}
9012
9013/* part #3: call after gem init */
9014void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
9015{
9016	intel_dmc_ucode_fini(i915);
9017
9018	intel_power_domains_driver_remove(i915);
9019
9020	intel_vga_unregister(i915);
9021
9022	intel_bios_driver_remove(i915);
9023}
9024
9025bool intel_modeset_probe_defer(struct pci_dev *pdev)
9026{
9027	struct drm_privacy_screen *privacy_screen;
9028
9029	/*
9030	 * apple-gmux is needed on dual GPU MacBook Pro
9031	 * to probe the panel if we're the inactive GPU.
9032	 */
9033	if (vga_switcheroo_client_probe_defer(pdev))
9034		return true;
9035
9036	/* If the LCD panel has a privacy-screen, wait for it */
9037	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
9038	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
9039		return true;
9040
9041	drm_privacy_screen_put(privacy_screen);
9042
9043	return false;
9044}
9045
9046void intel_display_driver_register(struct drm_i915_private *i915)
9047{
9048	if (!HAS_DISPLAY(i915))
9049		return;
9050
9051	intel_display_debugfs_register(i915);
9052
9053	/* Must be done after probing outputs */
9054	intel_opregion_register(i915);
9055	intel_acpi_video_register(i915);
9056
9057	intel_audio_init(i915);
9058
9059	/*
9060	 * Some ports require correctly set-up hpd registers for
9061	 * detection to work properly (leading to ghost connected
9062	 * connector status), e.g. VGA on gm45.  Hence we can only set
9063	 * up the initial fbdev config after hpd irqs are fully
9064	 * enabled. We do it last so that the async config cannot run
9065	 * before the connectors are registered.
9066	 */
9067	intel_fbdev_initial_config_async(&i915->drm);
9068
9069	/*
9070	 * We need to coordinate the hotplugs with the asynchronous
9071	 * fbdev configuration, for which we use the
9072	 * fbdev->async_cookie.
9073	 */
9074	drm_kms_helper_poll_init(&i915->drm);
9075}
9076
9077void intel_display_driver_unregister(struct drm_i915_private *i915)
9078{
9079	if (!HAS_DISPLAY(i915))
9080		return;
9081
9082	intel_fbdev_unregister(i915);
9083	intel_audio_deinit(i915);
9084
9085	/*
9086	 * After flushing the fbdev (incl. a late async config which
9087	 * will have delayed queuing of a hotplug event), then flush
9088	 * the hotplug events.
9089	 */
9090	drm_kms_helper_poll_fini(&i915->drm);
9091	drm_atomic_helper_shutdown(&i915->drm);
9092
9093	acpi_video_unregister();
9094	intel_opregion_unregister(i915);
9095}
9096
9097bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
9098{
9099	return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
9100}