Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include "display/intel_crt.h"
   7#include "display/intel_dp.h"
   8
   9#include "i915_drv.h"
  10#include "i915_irq.h"
  11#include "intel_cdclk.h"
  12#include "intel_combo_phy.h"
  13#include "intel_csr.h"
  14#include "intel_display_power.h"
  15#include "intel_display_types.h"
  16#include "intel_dpio_phy.h"
  17#include "intel_hotplug.h"
  18#include "intel_pm.h"
  19#include "intel_sideband.h"
  20#include "intel_tc.h"
  21#include "intel_vga.h"
  22
  23bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  24					 enum i915_power_well_id power_well_id);
  25
  26const char *
  27intel_display_power_domain_str(enum intel_display_power_domain domain)
  28{
  29	switch (domain) {
  30	case POWER_DOMAIN_DISPLAY_CORE:
  31		return "DISPLAY_CORE";
  32	case POWER_DOMAIN_PIPE_A:
  33		return "PIPE_A";
  34	case POWER_DOMAIN_PIPE_B:
  35		return "PIPE_B";
  36	case POWER_DOMAIN_PIPE_C:
  37		return "PIPE_C";
  38	case POWER_DOMAIN_PIPE_D:
  39		return "PIPE_D";
  40	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  41		return "PIPE_A_PANEL_FITTER";
  42	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  43		return "PIPE_B_PANEL_FITTER";
  44	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  45		return "PIPE_C_PANEL_FITTER";
  46	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
  47		return "PIPE_D_PANEL_FITTER";
  48	case POWER_DOMAIN_TRANSCODER_A:
  49		return "TRANSCODER_A";
  50	case POWER_DOMAIN_TRANSCODER_B:
  51		return "TRANSCODER_B";
  52	case POWER_DOMAIN_TRANSCODER_C:
  53		return "TRANSCODER_C";
  54	case POWER_DOMAIN_TRANSCODER_D:
  55		return "TRANSCODER_D";
  56	case POWER_DOMAIN_TRANSCODER_EDP:
  57		return "TRANSCODER_EDP";
  58	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
  59		return "TRANSCODER_VDSC_PW2";
  60	case POWER_DOMAIN_TRANSCODER_DSI_A:
  61		return "TRANSCODER_DSI_A";
  62	case POWER_DOMAIN_TRANSCODER_DSI_C:
  63		return "TRANSCODER_DSI_C";
  64	case POWER_DOMAIN_PORT_DDI_A_LANES:
  65		return "PORT_DDI_A_LANES";
  66	case POWER_DOMAIN_PORT_DDI_B_LANES:
  67		return "PORT_DDI_B_LANES";
  68	case POWER_DOMAIN_PORT_DDI_C_LANES:
  69		return "PORT_DDI_C_LANES";
  70	case POWER_DOMAIN_PORT_DDI_D_LANES:
  71		return "PORT_DDI_D_LANES";
  72	case POWER_DOMAIN_PORT_DDI_E_LANES:
  73		return "PORT_DDI_E_LANES";
  74	case POWER_DOMAIN_PORT_DDI_F_LANES:
  75		return "PORT_DDI_F_LANES";
  76	case POWER_DOMAIN_PORT_DDI_G_LANES:
  77		return "PORT_DDI_G_LANES";
  78	case POWER_DOMAIN_PORT_DDI_H_LANES:
  79		return "PORT_DDI_H_LANES";
  80	case POWER_DOMAIN_PORT_DDI_I_LANES:
  81		return "PORT_DDI_I_LANES";
  82	case POWER_DOMAIN_PORT_DDI_A_IO:
  83		return "PORT_DDI_A_IO";
  84	case POWER_DOMAIN_PORT_DDI_B_IO:
  85		return "PORT_DDI_B_IO";
  86	case POWER_DOMAIN_PORT_DDI_C_IO:
  87		return "PORT_DDI_C_IO";
  88	case POWER_DOMAIN_PORT_DDI_D_IO:
  89		return "PORT_DDI_D_IO";
  90	case POWER_DOMAIN_PORT_DDI_E_IO:
  91		return "PORT_DDI_E_IO";
  92	case POWER_DOMAIN_PORT_DDI_F_IO:
  93		return "PORT_DDI_F_IO";
  94	case POWER_DOMAIN_PORT_DDI_G_IO:
  95		return "PORT_DDI_G_IO";
  96	case POWER_DOMAIN_PORT_DDI_H_IO:
  97		return "PORT_DDI_H_IO";
  98	case POWER_DOMAIN_PORT_DDI_I_IO:
  99		return "PORT_DDI_I_IO";
 100	case POWER_DOMAIN_PORT_DSI:
 101		return "PORT_DSI";
 102	case POWER_DOMAIN_PORT_CRT:
 103		return "PORT_CRT";
 104	case POWER_DOMAIN_PORT_OTHER:
 105		return "PORT_OTHER";
 106	case POWER_DOMAIN_VGA:
 107		return "VGA";
 108	case POWER_DOMAIN_AUDIO:
 109		return "AUDIO";
 110	case POWER_DOMAIN_AUX_A:
 111		return "AUX_A";
 112	case POWER_DOMAIN_AUX_B:
 113		return "AUX_B";
 114	case POWER_DOMAIN_AUX_C:
 115		return "AUX_C";
 116	case POWER_DOMAIN_AUX_D:
 117		return "AUX_D";
 118	case POWER_DOMAIN_AUX_E:
 119		return "AUX_E";
 120	case POWER_DOMAIN_AUX_F:
 121		return "AUX_F";
 122	case POWER_DOMAIN_AUX_G:
 123		return "AUX_G";
 124	case POWER_DOMAIN_AUX_H:
 125		return "AUX_H";
 126	case POWER_DOMAIN_AUX_I:
 127		return "AUX_I";
 128	case POWER_DOMAIN_AUX_IO_A:
 129		return "AUX_IO_A";
 130	case POWER_DOMAIN_AUX_C_TBT:
 131		return "AUX_C_TBT";
 132	case POWER_DOMAIN_AUX_D_TBT:
 133		return "AUX_D_TBT";
 134	case POWER_DOMAIN_AUX_E_TBT:
 135		return "AUX_E_TBT";
 136	case POWER_DOMAIN_AUX_F_TBT:
 137		return "AUX_F_TBT";
 138	case POWER_DOMAIN_AUX_G_TBT:
 139		return "AUX_G_TBT";
 140	case POWER_DOMAIN_AUX_H_TBT:
 141		return "AUX_H_TBT";
 142	case POWER_DOMAIN_AUX_I_TBT:
 143		return "AUX_I_TBT";
 144	case POWER_DOMAIN_GMBUS:
 145		return "GMBUS";
 146	case POWER_DOMAIN_INIT:
 147		return "INIT";
 148	case POWER_DOMAIN_MODESET:
 149		return "MODESET";
 150	case POWER_DOMAIN_GT_IRQ:
 151		return "GT_IRQ";
 152	case POWER_DOMAIN_DPLL_DC_OFF:
 153		return "DPLL_DC_OFF";
 154	case POWER_DOMAIN_TC_COLD_OFF:
 155		return "TC_COLD_OFF";
 156	default:
 157		MISSING_CASE(domain);
 158		return "?";
 159	}
 160}
 161
 162static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 163				    struct i915_power_well *power_well)
 164{
 165	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
 166	power_well->desc->ops->enable(dev_priv, power_well);
 167	power_well->hw_enabled = true;
 168}
 169
 170static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 171				     struct i915_power_well *power_well)
 172{
 173	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
 174	power_well->hw_enabled = false;
 175	power_well->desc->ops->disable(dev_priv, power_well);
 176}
 177
 178static void intel_power_well_get(struct drm_i915_private *dev_priv,
 179				 struct i915_power_well *power_well)
 180{
 181	if (!power_well->count++)
 182		intel_power_well_enable(dev_priv, power_well);
 183}
 184
 185static void intel_power_well_put(struct drm_i915_private *dev_priv,
 186				 struct i915_power_well *power_well)
 187{
 188	drm_WARN(&dev_priv->drm, !power_well->count,
 189		 "Use count on power well %s is already zero",
 190		 power_well->desc->name);
 191
 192	if (!--power_well->count)
 193		intel_power_well_disable(dev_priv, power_well);
 194}
 195
 196/**
 197 * __intel_display_power_is_enabled - unlocked check for a power domain
 198 * @dev_priv: i915 device instance
 199 * @domain: power domain to check
 200 *
 201 * This is the unlocked version of intel_display_power_is_enabled() and should
 202 * only be used from error capture and recovery code where deadlocks are
 203 * possible.
 204 *
 205 * Returns:
 206 * True when the power domain is enabled, false otherwise.
 207 */
 208bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 209				      enum intel_display_power_domain domain)
 210{
 211	struct i915_power_well *power_well;
 212	bool is_enabled;
 213
 214	if (dev_priv->runtime_pm.suspended)
 215		return false;
 216
 217	is_enabled = true;
 218
 219	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
 220		if (power_well->desc->always_on)
 221			continue;
 222
 223		if (!power_well->hw_enabled) {
 224			is_enabled = false;
 225			break;
 226		}
 227	}
 228
 229	return is_enabled;
 230}
 231
 232/**
 233 * intel_display_power_is_enabled - check for a power domain
 234 * @dev_priv: i915 device instance
 235 * @domain: power domain to check
 236 *
 237 * This function can be used to check the hw power domain state. It is mostly
 238 * used in hardware state readout functions. Everywhere else code should rely
 239 * upon explicit power domain reference counting to ensure that the hardware
 240 * block is powered up before accessing it.
 241 *
 242 * Callers must hold the relevant modesetting locks to ensure that concurrent
 243 * threads can't disable the power well while the caller tries to read a few
 244 * registers.
 245 *
 246 * Returns:
 247 * True when the power domain is enabled, false otherwise.
 248 */
 249bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 250				    enum intel_display_power_domain domain)
 251{
 252	struct i915_power_domains *power_domains;
 253	bool ret;
 254
 255	power_domains = &dev_priv->power_domains;
 256
 257	mutex_lock(&power_domains->lock);
 258	ret = __intel_display_power_is_enabled(dev_priv, domain);
 259	mutex_unlock(&power_domains->lock);
 260
 261	return ret;
 262}
 263
 264/*
 265 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 266 * when not needed anymore. We have 4 registers that can request the power well
 267 * to be enabled, and it will only be disabled if none of the registers is
 268 * requesting it to be enabled.
 269 */
 270static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
 271				       u8 irq_pipe_mask, bool has_vga)
 272{
 273	if (has_vga)
 274		intel_vga_reset_io_mem(dev_priv);
 275
 276	if (irq_pipe_mask)
 277		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
 278}
 279
 280static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 281				       u8 irq_pipe_mask)
 282{
 283	if (irq_pipe_mask)
 284		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 285}
 286
 287#define ICL_AUX_PW_TO_CH(pw_idx)	\
 288	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 289
 290#define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
 291	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
 292
 293static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
 294				     struct i915_power_well *power_well)
 295{
 296	int pw_idx = power_well->desc->hsw.idx;
 297
 298	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
 299						 ICL_AUX_PW_TO_CH(pw_idx);
 300}
 301
 302static struct intel_digital_port *
 303aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
 304		       enum aux_ch aux_ch)
 305{
 306	struct intel_digital_port *dig_port = NULL;
 307	struct intel_encoder *encoder;
 308
 309	for_each_intel_encoder(&dev_priv->drm, encoder) {
 310		/* We'll check the MST primary port */
 311		if (encoder->type == INTEL_OUTPUT_DP_MST)
 312			continue;
 313
 314		dig_port = enc_to_dig_port(encoder);
 315		if (!dig_port)
 316			continue;
 317
 318		if (dig_port->aux_ch != aux_ch) {
 319			dig_port = NULL;
 320			continue;
 321		}
 322
 323		break;
 324	}
 325
 326	return dig_port;
 327}
 328
 329static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 330					   struct i915_power_well *power_well,
 331					   bool timeout_expected)
 332{
 333	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 334	int pw_idx = power_well->desc->hsw.idx;
 335
 336	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 337	if (intel_de_wait_for_set(dev_priv, regs->driver,
 338				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
 339		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
 340			    power_well->desc->name);
 341
 342		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
 343
 344	}
 345}
 346
 347static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 348				     const struct i915_power_well_regs *regs,
 349				     int pw_idx)
 350{
 351	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
 352	u32 ret;
 353
 354	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
 355	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
 356	if (regs->kvmr.reg)
 357		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
 358	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
 359
 360	return ret;
 361}
 362
 363static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 364					    struct i915_power_well *power_well)
 365{
 366	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 367	int pw_idx = power_well->desc->hsw.idx;
 368	bool disabled;
 369	u32 reqs;
 370
 371	/*
 372	 * Bspec doesn't require waiting for PWs to get disabled, but still do
 373	 * this for paranoia. The known cases where a PW will be forced on:
 374	 * - a KVMR request on any power well via the KVMR request register
 375	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
 376	 *   DEBUG request registers
 377	 * Skip the wait in case any of the request bits are set and print a
 378	 * diagnostic message.
 379	 */
 380	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
 381			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
 382		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
 383	if (disabled)
 384		return;
 385
 386	drm_dbg_kms(&dev_priv->drm,
 387		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
 388		    power_well->desc->name,
 389		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 390}
 391
 392static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 393					   enum skl_power_gate pg)
 394{
 395	/* Timeout 5us for PG#0, for other PGs 1us */
 396	drm_WARN_ON(&dev_priv->drm,
 397		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
 398					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
 399}
 400
 401static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 402				  struct i915_power_well *power_well)
 403{
 404	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 405	int pw_idx = power_well->desc->hsw.idx;
 406	u32 val;
 407
 408	if (power_well->desc->hsw.has_fuses) {
 409		enum skl_power_gate pg;
 410
 411		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 412						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 413		/*
 414		 * For PW1 we have to wait both for the PW0/PG0 fuse state
 415		 * before enabling the power well and PW1/PG1's own fuse
 416		 * state after the enabling. For all other power wells with
 417		 * fuses we only have to wait for that PW/PG's fuse state
 418		 * after the enabling.
 419		 */
 420		if (pg == SKL_PG1)
 421			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 422	}
 423
 424	val = intel_de_read(dev_priv, regs->driver);
 425	intel_de_write(dev_priv, regs->driver,
 426		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 427
 428	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 429
 430	/* Display WA #1178: cnl */
 431	if (IS_CANNONLAKE(dev_priv) &&
 432	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
 433	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
 434		u32 val;
 435
 436		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
 437		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
 438		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
 439	}
 440
 441	if (power_well->desc->hsw.has_fuses) {
 442		enum skl_power_gate pg;
 443
 444		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 445						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 446		gen9_wait_for_power_well_fuses(dev_priv, pg);
 447	}
 448
 449	hsw_power_well_post_enable(dev_priv,
 450				   power_well->desc->hsw.irq_pipe_mask,
 451				   power_well->desc->hsw.has_vga);
 452}
 453
 454static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 455				   struct i915_power_well *power_well)
 456{
 457	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 458	int pw_idx = power_well->desc->hsw.idx;
 459	u32 val;
 460
 461	hsw_power_well_pre_disable(dev_priv,
 462				   power_well->desc->hsw.irq_pipe_mask);
 463
 464	val = intel_de_read(dev_priv, regs->driver);
 465	intel_de_write(dev_priv, regs->driver,
 466		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 467	hsw_wait_for_power_well_disable(dev_priv, power_well);
 468}
 469
 470#define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
 471
 472static void
 473icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 474				    struct i915_power_well *power_well)
 475{
 476	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 477	int pw_idx = power_well->desc->hsw.idx;
 478	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 479	u32 val;
 480
 481	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 482
 483	val = intel_de_read(dev_priv, regs->driver);
 484	intel_de_write(dev_priv, regs->driver,
 485		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 486
 487	if (INTEL_GEN(dev_priv) < 12) {
 488		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 489		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 490			       val | ICL_LANE_ENABLE_AUX);
 491	}
 492
 493	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 494
 495	/* Display WA #1178: icl */
 496	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 497	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
 498		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
 499		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
 500		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
 501	}
 502}
 503
 504static void
 505icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 506				     struct i915_power_well *power_well)
 507{
 508	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 509	int pw_idx = power_well->desc->hsw.idx;
 510	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 511	u32 val;
 512
 513	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 514
 515	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 516	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 517		       val & ~ICL_LANE_ENABLE_AUX);
 518
 519	val = intel_de_read(dev_priv, regs->driver);
 520	intel_de_write(dev_priv, regs->driver,
 521		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 522
 523	hsw_wait_for_power_well_disable(dev_priv, power_well);
 524}
 525
 526#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 527
 528static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
 529
 530static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
 531				      struct i915_power_well *power_well)
 532{
 533	int refs = hweight64(power_well->desc->domains &
 534			     async_put_domains_mask(&dev_priv->power_domains));
 535
 536	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
 537
 538	return refs;
 539}
 540
 541static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 542					struct i915_power_well *power_well,
 543					struct intel_digital_port *dig_port)
 544{
 545	/* Bypass the check if all references are released asynchronously */
 546	if (power_well_async_ref_count(dev_priv, power_well) ==
 547	    power_well->count)
 548		return;
 549
 550	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
 551		return;
 552
 553	if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
 554		return;
 555
 556	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
 557}
 558
 559#else
 560
 561static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 562					struct i915_power_well *power_well,
 563					struct intel_digital_port *dig_port)
 564{
 565}
 566
 567#endif
 568
 569#define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
 570
 571static void icl_tc_cold_exit(struct drm_i915_private *i915)
 572{
 573	int ret, tries = 0;
 574
 575	while (1) {
 576		ret = sandybridge_pcode_write_timeout(i915,
 577						      ICL_PCODE_EXIT_TCCOLD,
 578						      0, 250, 1);
 579		if (ret != -EAGAIN || ++tries == 3)
 580			break;
 581		msleep(1);
 582	}
 583
 584	/* Spec states that TC cold exit can take up to 1ms to complete */
 585	if (!ret)
 586		msleep(1);
 587
 588	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
 589	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
 590		    "succeeded");
 591}
 592
 593static void
 594icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 595				 struct i915_power_well *power_well)
 596{
 597	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
 598	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 599	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 600	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 601	bool timeout_expected;
 602	u32 val;
 603
 604	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 605
 606	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
 607	val &= ~DP_AUX_CH_CTL_TBT_IO;
 608	if (is_tbt)
 609		val |= DP_AUX_CH_CTL_TBT_IO;
 610	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
 611
 612	val = intel_de_read(dev_priv, regs->driver);
 613	intel_de_write(dev_priv, regs->driver,
 614		       val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
 615
 616	/*
 617	 * An AUX timeout is expected if the TBT DP tunnel is down,
 618	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
 619	 * exit sequence.
 620	 */
 621	timeout_expected = is_tbt;
 622	if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
 623		icl_tc_cold_exit(dev_priv);
 624		timeout_expected = true;
 625	}
 626
 627	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
 628
 629	if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
 630		enum tc_port tc_port;
 631
 632		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
 633		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
 634			       HIP_INDEX_VAL(tc_port, 0x2));
 635
 636		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
 637					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
 638			drm_warn(&dev_priv->drm,
 639				 "Timeout waiting TC uC health\n");
 640	}
 641}
 642
 643static void
 644icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 645				  struct i915_power_well *power_well)
 646{
 647	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
 648	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 649
 650	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 651
 652	hsw_power_well_disable(dev_priv, power_well);
 653}
 654
 655static void
 656icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
 657			  struct i915_power_well *power_well)
 658{
 659	int pw_idx = power_well->desc->hsw.idx;
 660	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
 661	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 662
 663	if (is_tbt || intel_phy_is_tc(dev_priv, phy))
 664		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
 665	else if (IS_ICELAKE(dev_priv))
 666		return icl_combo_phy_aux_power_well_enable(dev_priv,
 667							   power_well);
 668	else
 669		return hsw_power_well_enable(dev_priv, power_well);
 670}
 671
 672static void
 673icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
 674			   struct i915_power_well *power_well)
 675{
 676	int pw_idx = power_well->desc->hsw.idx;
 677	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
 678	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 679
 680	if (is_tbt || intel_phy_is_tc(dev_priv, phy))
 681		return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
 682	else if (IS_ICELAKE(dev_priv))
 683		return icl_combo_phy_aux_power_well_disable(dev_priv,
 684							    power_well);
 685	else
 686		return hsw_power_well_disable(dev_priv, power_well);
 687}
 688
 689/*
 690 * We should only use the power well if we explicitly asked the hardware to
 691 * enable it, so check if it's enabled and also check if we've requested it to
 692 * be enabled.
 693 */
 694static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 695				   struct i915_power_well *power_well)
 696{
 697	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 698	enum i915_power_well_id id = power_well->desc->id;
 699	int pw_idx = power_well->desc->hsw.idx;
 700	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
 701		   HSW_PWR_WELL_CTL_STATE(pw_idx);
 702	u32 val;
 703
 704	val = intel_de_read(dev_priv, regs->driver);
 705
 706	/*
 707	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
 708	 * and the MISC_IO PW will be not restored, so check instead for the
 709	 * BIOS's own request bits, which are forced-on for these power wells
 710	 * when exiting DC5/6.
 711	 */
 712	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
 713	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
 714		val |= intel_de_read(dev_priv, regs->bios);
 715
 716	return (val & mask) == mask;
 717}
 718
 719static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 720{
 721	drm_WARN_ONCE(&dev_priv->drm,
 722		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
 723		      "DC9 already programmed to be enabled.\n");
 724	drm_WARN_ONCE(&dev_priv->drm,
 725		      intel_de_read(dev_priv, DC_STATE_EN) &
 726		      DC_STATE_EN_UPTO_DC5,
 727		      "DC5 still not disabled to enable DC9.\n");
 728	drm_WARN_ONCE(&dev_priv->drm,
 729		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
 730		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
 731		      "Power well 2 on.\n");
 732	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 733		      "Interrupts not disabled yet.\n");
 734
 735	 /*
 736	  * TODO: check for the following to verify the conditions to enter DC9
 737	  * state are satisfied:
 738	  * 1] Check relevant display engine registers to verify if mode set
 739	  * disable sequence was followed.
 740	  * 2] Check if display uninitialize sequence is initialized.
 741	  */
 742}
 743
 744static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 745{
 746	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 747		      "Interrupts not disabled yet.\n");
 748	drm_WARN_ONCE(&dev_priv->drm,
 749		      intel_de_read(dev_priv, DC_STATE_EN) &
 750		      DC_STATE_EN_UPTO_DC5,
 751		      "DC5 still not disabled.\n");
 752
 753	 /*
 754	  * TODO: check for the following to verify DC9 state was indeed
 755	  * entered before programming to disable it:
 756	  * 1] Check relevant display engine registers to verify if mode
 757	  *  set disable sequence was followed.
 758	  * 2] Check if display uninitialize sequence is initialized.
 759	  */
 760}
 761
 762static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 763				u32 state)
 764{
 765	int rewrites = 0;
 766	int rereads = 0;
 767	u32 v;
 768
 769	intel_de_write(dev_priv, DC_STATE_EN, state);
 770
 771	/* It has been observed that disabling the dc6 state sometimes
 772	 * doesn't stick and dmc keeps returning old value. Make sure
 773	 * the write really sticks enough times and also force rewrite until
 774	 * we are confident that state is exactly what we want.
 775	 */
 776	do  {
 777		v = intel_de_read(dev_priv, DC_STATE_EN);
 778
 779		if (v != state) {
 780			intel_de_write(dev_priv, DC_STATE_EN, state);
 781			rewrites++;
 782			rereads = 0;
 783		} else if (rereads++ > 5) {
 784			break;
 785		}
 786
 787	} while (rewrites < 100);
 788
 789	if (v != state)
 790		drm_err(&dev_priv->drm,
 791			"Writing dc state to 0x%x failed, now 0x%x\n",
 792			state, v);
 793
 794	/* Most of the times we need one retry, avoid spam */
 795	if (rewrites > 1)
 796		drm_dbg_kms(&dev_priv->drm,
 797			    "Rewrote dc state to 0x%x %d times\n",
 798			    state, rewrites);
 799}
 800
 801static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 802{
 803	u32 mask;
 804
 805	mask = DC_STATE_EN_UPTO_DC5;
 806
 807	if (INTEL_GEN(dev_priv) >= 12)
 808		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
 809					  | DC_STATE_EN_DC9;
 810	else if (IS_GEN(dev_priv, 11))
 811		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
 812	else if (IS_GEN9_LP(dev_priv))
 813		mask |= DC_STATE_EN_DC9;
 814	else
 815		mask |= DC_STATE_EN_UPTO_DC6;
 816
 817	return mask;
 818}
 819
 820static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 821{
 822	u32 val;
 823
 824	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
 825
 826	drm_dbg_kms(&dev_priv->drm,
 827		    "Resetting DC state tracking from %02x to %02x\n",
 828		    dev_priv->csr.dc_state, val);
 829	dev_priv->csr.dc_state = val;
 830}
 831
 832/**
 833 * gen9_set_dc_state - set target display C power state
 834 * @dev_priv: i915 device instance
 835 * @state: target DC power state
 836 * - DC_STATE_DISABLE
 837 * - DC_STATE_EN_UPTO_DC5
 838 * - DC_STATE_EN_UPTO_DC6
 839 * - DC_STATE_EN_DC9
 840 *
 841 * Signal to DMC firmware/HW the target DC power state passed in @state.
 842 * DMC/HW can turn off individual display clocks and power rails when entering
 843 * a deeper DC power state (higher in number) and turns these back when exiting
 844 * that state to a shallower power state (lower in number). The HW will decide
 845 * when to actually enter a given state on an on-demand basis, for instance
 846 * depending on the active state of display pipes. The state of display
 847 * registers backed by affected power rails are saved/restored as needed.
 848 *
 849 * Based on the above enabling a deeper DC power state is asynchronous wrt.
 850 * enabling it. Disabling a deeper power state is synchronous: for instance
 851 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
 852 * back on and register state is restored. This is guaranteed by the MMIO write
 853 * to DC_STATE_EN blocking until the state is restored.
 854 */
 855static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 856{
 857	u32 val;
 858	u32 mask;
 859
 860	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 861			     state & ~dev_priv->csr.allowed_dc_mask))
 862		state &= dev_priv->csr.allowed_dc_mask;
 863
 864	val = intel_de_read(dev_priv, DC_STATE_EN);
 865	mask = gen9_dc_mask(dev_priv);
 866	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
 867		    val & mask, state);
 868
 869	/* Check if DMC is ignoring our DC state requests */
 870	if ((val & mask) != dev_priv->csr.dc_state)
 871		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
 872			dev_priv->csr.dc_state, val & mask);
 873
 874	val &= ~mask;
 875	val |= state;
 876
 877	gen9_write_dc_state(dev_priv, val);
 878
 879	dev_priv->csr.dc_state = val & mask;
 880}
 881
 882static u32
 883sanitize_target_dc_state(struct drm_i915_private *dev_priv,
 884			 u32 target_dc_state)
 885{
 886	u32 states[] = {
 887		DC_STATE_EN_UPTO_DC6,
 888		DC_STATE_EN_UPTO_DC5,
 889		DC_STATE_EN_DC3CO,
 890		DC_STATE_DISABLE,
 891	};
 892	int i;
 893
 894	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
 895		if (target_dc_state != states[i])
 896			continue;
 897
 898		if (dev_priv->csr.allowed_dc_mask & target_dc_state)
 899			break;
 900
 901		target_dc_state = states[i + 1];
 902	}
 903
 904	return target_dc_state;
 905}
 906
 907static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
 908{
 909	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
 910	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 911}
 912
 913static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
 914{
 915	u32 val;
 916
 917	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
 918	val = intel_de_read(dev_priv, DC_STATE_EN);
 919	val &= ~DC_STATE_DC3CO_STATUS;
 920	intel_de_write(dev_priv, DC_STATE_EN, val);
 921	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 922	/*
 923	 * Delay of 200us DC3CO Exit time B.Spec 49196
 924	 */
 925	usleep_range(200, 210);
 926}
 927
 928static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 929{
 930	assert_can_enable_dc9(dev_priv);
 931
 932	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
 933	/*
 934	 * Power sequencer reset is not needed on
 935	 * platforms with South Display Engine on PCH,
 936	 * because PPS registers are always on.
 937	 */
 938	if (!HAS_PCH_SPLIT(dev_priv))
 939		intel_power_sequencer_reset(dev_priv);
 940	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 941}
 942
 943static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 944{
 945	assert_can_disable_dc9(dev_priv);
 946
 947	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
 948
 949	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 950
 951	intel_pps_unlock_regs_wa(dev_priv);
 952}
 953
 954static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 955{
 956	drm_WARN_ONCE(&dev_priv->drm,
 957		      !intel_de_read(dev_priv, CSR_PROGRAM(0)),
 958		      "CSR program storage start is NULL\n");
 959	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
 960		      "CSR SSP Base Not fine\n");
 961	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
 962		      "CSR HTP Not fine\n");
 963}
 964
 965static struct i915_power_well *
 966lookup_power_well(struct drm_i915_private *dev_priv,
 967		  enum i915_power_well_id power_well_id)
 968{
 969	struct i915_power_well *power_well;
 970
 971	for_each_power_well(dev_priv, power_well)
 972		if (power_well->desc->id == power_well_id)
 973			return power_well;
 974
 975	/*
 976	 * It's not feasible to add error checking code to the callers since
 977	 * this condition really shouldn't happen and it doesn't even make sense
 978	 * to abort things like display initialization sequences. Just return
 979	 * the first power well and hope the WARN gets reported so we can fix
 980	 * our driver.
 981	 */
 982	drm_WARN(&dev_priv->drm, 1,
 983		 "Power well %d not defined for this platform\n",
 984		 power_well_id);
 985	return &dev_priv->power_domains.power_wells[0];
 986}
 987
 988/**
 989 * intel_display_power_set_target_dc_state - Set target dc state.
 990 * @dev_priv: i915 device
 991 * @state: state which needs to be set as target_dc_state.
 992 *
 993 * This function set the "DC off" power well target_dc_state,
 994 * based upon this target_dc_stste, "DC off" power well will
 995 * enable desired DC state.
 996 */
 997void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 998					     u32 state)
 999{
1000	struct i915_power_well *power_well;
1001	bool dc_off_enabled;
1002	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1003
1004	mutex_lock(&power_domains->lock);
1005	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1006
1007	if (drm_WARN_ON(&dev_priv->drm, !power_well))
1008		goto unlock;
1009
1010	state = sanitize_target_dc_state(dev_priv, state);
1011
1012	if (state == dev_priv->csr.target_dc_state)
1013		goto unlock;
1014
1015	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1016							   power_well);
1017	/*
1018	 * If DC off power well is disabled, need to enable and disable the
1019	 * DC off power well to effect target DC state.
1020	 */
1021	if (!dc_off_enabled)
1022		power_well->desc->ops->enable(dev_priv, power_well);
1023
1024	dev_priv->csr.target_dc_state = state;
1025
1026	if (!dc_off_enabled)
1027		power_well->desc->ops->disable(dev_priv, power_well);
1028
1029unlock:
1030	mutex_unlock(&power_domains->lock);
1031}
1032
1033static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1034{
1035	enum i915_power_well_id high_pg;
1036
1037	/* Power wells at this level and above must be disabled for DC5 entry */
1038	if (INTEL_GEN(dev_priv) >= 12)
1039		high_pg = ICL_DISP_PW_3;
1040	else
1041		high_pg = SKL_DISP_PW_2;
1042
1043	drm_WARN_ONCE(&dev_priv->drm,
1044		      intel_display_power_well_is_enabled(dev_priv, high_pg),
1045		      "Power wells above platform's DC5 limit still enabled.\n");
1046
1047	drm_WARN_ONCE(&dev_priv->drm,
1048		      (intel_de_read(dev_priv, DC_STATE_EN) &
1049		       DC_STATE_EN_UPTO_DC5),
1050		      "DC5 already programmed to be enabled.\n");
1051	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1052
1053	assert_csr_loaded(dev_priv);
1054}
1055
1056static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1057{
1058	assert_can_enable_dc5(dev_priv);
1059
1060	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1061
1062	/* Wa Display #1183: skl,kbl,cfl */
1063	if (IS_GEN9_BC(dev_priv))
1064		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1065			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1066
1067	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1068}
1069
1070static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1071{
1072	drm_WARN_ONCE(&dev_priv->drm,
1073		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1074		      "Backlight is not disabled.\n");
1075	drm_WARN_ONCE(&dev_priv->drm,
1076		      (intel_de_read(dev_priv, DC_STATE_EN) &
1077		       DC_STATE_EN_UPTO_DC6),
1078		      "DC6 already programmed to be enabled.\n");
1079
1080	assert_csr_loaded(dev_priv);
1081}
1082
1083static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1084{
1085	assert_can_enable_dc6(dev_priv);
1086
1087	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1088
1089	/* Wa Display #1183: skl,kbl,cfl */
1090	if (IS_GEN9_BC(dev_priv))
1091		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1092			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1093
1094	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1095}
1096
1097static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1098				   struct i915_power_well *power_well)
1099{
1100	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1101	int pw_idx = power_well->desc->hsw.idx;
1102	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1103	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1104
1105	/* Take over the request bit if set by BIOS. */
1106	if (bios_req & mask) {
1107		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1108
1109		if (!(drv_req & mask))
1110			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1111		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1112	}
1113}
1114
1115static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1116					   struct i915_power_well *power_well)
1117{
1118	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1119}
1120
1121static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1122					    struct i915_power_well *power_well)
1123{
1124	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1125}
1126
1127static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1128					    struct i915_power_well *power_well)
1129{
1130	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1131}
1132
1133static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1134{
1135	struct i915_power_well *power_well;
1136
1137	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1138	if (power_well->count > 0)
1139		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1140
1141	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1142	if (power_well->count > 0)
1143		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1144
1145	if (IS_GEMINILAKE(dev_priv)) {
1146		power_well = lookup_power_well(dev_priv,
1147					       GLK_DISP_PW_DPIO_CMN_C);
1148		if (power_well->count > 0)
1149			bxt_ddi_phy_verify_state(dev_priv,
1150						 power_well->desc->bxt.phy);
1151	}
1152}
1153
1154static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1155					   struct i915_power_well *power_well)
1156{
1157	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1158		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1159}
1160
1161static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1162{
1163	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1164	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1165
1166	drm_WARN(&dev_priv->drm,
1167		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1168		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1169		 hw_enabled_dbuf_slices,
1170		 enabled_dbuf_slices);
1171}
1172
1173static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1174{
1175	struct intel_cdclk_config cdclk_config = {};
1176
1177	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1178		tgl_disable_dc3co(dev_priv);
1179		return;
1180	}
1181
1182	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1183
1184	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1185	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1186	drm_WARN_ON(&dev_priv->drm,
1187		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1188					      &cdclk_config));
1189
1190	gen9_assert_dbuf_enabled(dev_priv);
1191
1192	if (IS_GEN9_LP(dev_priv))
1193		bxt_verify_ddi_phy_power_wells(dev_priv);
1194
1195	if (INTEL_GEN(dev_priv) >= 11)
1196		/*
1197		 * DMC retains HW context only for port A, the other combo
1198		 * PHY's HW context for port B is lost after DC transitions,
1199		 * so we need to restore it manually.
1200		 */
1201		intel_combo_phy_init(dev_priv);
1202}
1203
1204static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1205					  struct i915_power_well *power_well)
1206{
1207	gen9_disable_dc_states(dev_priv);
1208}
1209
1210static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1211					   struct i915_power_well *power_well)
1212{
1213	if (!dev_priv->csr.dmc_payload)
1214		return;
1215
1216	switch (dev_priv->csr.target_dc_state) {
1217	case DC_STATE_EN_DC3CO:
1218		tgl_enable_dc3co(dev_priv);
1219		break;
1220	case DC_STATE_EN_UPTO_DC6:
1221		skl_enable_dc6(dev_priv);
1222		break;
1223	case DC_STATE_EN_UPTO_DC5:
1224		gen9_enable_dc5(dev_priv);
1225		break;
1226	}
1227}
1228
1229static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1230					 struct i915_power_well *power_well)
1231{
1232}
1233
1234static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1235					   struct i915_power_well *power_well)
1236{
1237}
1238
1239static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1240					     struct i915_power_well *power_well)
1241{
1242	return true;
1243}
1244
1245static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1246					 struct i915_power_well *power_well)
1247{
1248	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1249		i830_enable_pipe(dev_priv, PIPE_A);
1250	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1251		i830_enable_pipe(dev_priv, PIPE_B);
1252}
1253
1254static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1255					  struct i915_power_well *power_well)
1256{
1257	i830_disable_pipe(dev_priv, PIPE_B);
1258	i830_disable_pipe(dev_priv, PIPE_A);
1259}
1260
1261static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1262					  struct i915_power_well *power_well)
1263{
1264	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1265		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1266}
1267
1268static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1269					  struct i915_power_well *power_well)
1270{
1271	if (power_well->count > 0)
1272		i830_pipes_power_well_enable(dev_priv, power_well);
1273	else
1274		i830_pipes_power_well_disable(dev_priv, power_well);
1275}
1276
1277static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1278			       struct i915_power_well *power_well, bool enable)
1279{
1280	int pw_idx = power_well->desc->vlv.idx;
1281	u32 mask;
1282	u32 state;
1283	u32 ctrl;
1284
1285	mask = PUNIT_PWRGT_MASK(pw_idx);
1286	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1287			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1288
1289	vlv_punit_get(dev_priv);
1290
1291#define COND \
1292	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1293
1294	if (COND)
1295		goto out;
1296
1297	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1298	ctrl &= ~mask;
1299	ctrl |= state;
1300	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1301
1302	if (wait_for(COND, 100))
1303		drm_err(&dev_priv->drm,
1304			"timeout setting power well state %08x (%08x)\n",
1305			state,
1306			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1307
1308#undef COND
1309
1310out:
1311	vlv_punit_put(dev_priv);
1312}
1313
1314static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1315				  struct i915_power_well *power_well)
1316{
1317	vlv_set_power_well(dev_priv, power_well, true);
1318}
1319
1320static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1321				   struct i915_power_well *power_well)
1322{
1323	vlv_set_power_well(dev_priv, power_well, false);
1324}
1325
1326static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1327				   struct i915_power_well *power_well)
1328{
1329	int pw_idx = power_well->desc->vlv.idx;
1330	bool enabled = false;
1331	u32 mask;
1332	u32 state;
1333	u32 ctrl;
1334
1335	mask = PUNIT_PWRGT_MASK(pw_idx);
1336	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1337
1338	vlv_punit_get(dev_priv);
1339
1340	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1341	/*
1342	 * We only ever set the power-on and power-gate states, anything
1343	 * else is unexpected.
1344	 */
1345	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1346		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1347	if (state == ctrl)
1348		enabled = true;
1349
1350	/*
1351	 * A transient state at this point would mean some unexpected party
1352	 * is poking at the power controls too.
1353	 */
1354	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1355	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1356
1357	vlv_punit_put(dev_priv);
1358
1359	return enabled;
1360}
1361
1362static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1363{
1364	u32 val;
1365
1366	/*
1367	 * On driver load, a pipe may be active and driving a DSI display.
1368	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1369	 * (and never recovering) in this case. intel_dsi_post_disable() will
1370	 * clear it when we turn off the display.
1371	 */
1372	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1373	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1374	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1375	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1376
1377	/*
1378	 * Disable trickle feed and enable pnd deadline calculation
1379	 */
1380	intel_de_write(dev_priv, MI_ARB_VLV,
1381		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1382	intel_de_write(dev_priv, CBR1_VLV, 0);
1383
1384	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1385	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1386		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1387					 1000));
1388}
1389
1390static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1391{
1392	struct intel_encoder *encoder;
1393	enum pipe pipe;
1394
1395	/*
1396	 * Enable the CRI clock source so we can get at the
1397	 * display and the reference clock for VGA
1398	 * hotplug / manual detection. Supposedly DSI also
1399	 * needs the ref clock up and running.
1400	 *
1401	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1402	 */
1403	for_each_pipe(dev_priv, pipe) {
1404		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1405
1406		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1407		if (pipe != PIPE_A)
1408			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1409
1410		intel_de_write(dev_priv, DPLL(pipe), val);
1411	}
1412
1413	vlv_init_display_clock_gating(dev_priv);
1414
1415	spin_lock_irq(&dev_priv->irq_lock);
1416	valleyview_enable_display_irqs(dev_priv);
1417	spin_unlock_irq(&dev_priv->irq_lock);
1418
1419	/*
1420	 * During driver initialization/resume we can avoid restoring the
1421	 * part of the HW/SW state that will be inited anyway explicitly.
1422	 */
1423	if (dev_priv->power_domains.initializing)
1424		return;
1425
1426	intel_hpd_init(dev_priv);
1427
1428	/* Re-enable the ADPA, if we have one */
1429	for_each_intel_encoder(&dev_priv->drm, encoder) {
1430		if (encoder->type == INTEL_OUTPUT_ANALOG)
1431			intel_crt_reset(&encoder->base);
1432	}
1433
1434	intel_vga_redisable_power_on(dev_priv);
1435
1436	intel_pps_unlock_regs_wa(dev_priv);
1437}
1438
1439static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1440{
1441	spin_lock_irq(&dev_priv->irq_lock);
1442	valleyview_disable_display_irqs(dev_priv);
1443	spin_unlock_irq(&dev_priv->irq_lock);
1444
1445	/* make sure we're done processing display irqs */
1446	intel_synchronize_irq(dev_priv);
1447
1448	intel_power_sequencer_reset(dev_priv);
1449
1450	/* Prevent us from re-enabling polling on accident in late suspend */
1451	if (!dev_priv->drm.dev->power.is_suspended)
1452		intel_hpd_poll_init(dev_priv);
1453}
1454
1455static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1456					  struct i915_power_well *power_well)
1457{
1458	vlv_set_power_well(dev_priv, power_well, true);
1459
1460	vlv_display_power_well_init(dev_priv);
1461}
1462
1463static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1464					   struct i915_power_well *power_well)
1465{
1466	vlv_display_power_well_deinit(dev_priv);
1467
1468	vlv_set_power_well(dev_priv, power_well, false);
1469}
1470
1471static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1472					   struct i915_power_well *power_well)
1473{
1474	/* since ref/cri clock was enabled */
1475	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1476
1477	vlv_set_power_well(dev_priv, power_well, true);
1478
1479	/*
1480	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1481	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1482	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1483	 *   b.	The other bits such as sfr settings / modesel may all
1484	 *	be set to 0.
1485	 *
1486	 * This should only be done on init and resume from S3 with
1487	 * both PLLs disabled, or we risk losing DPIO and PLL
1488	 * synchronization.
1489	 */
1490	intel_de_write(dev_priv, DPIO_CTL,
1491		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1492}
1493
1494static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1495					    struct i915_power_well *power_well)
1496{
1497	enum pipe pipe;
1498
1499	for_each_pipe(dev_priv, pipe)
1500		assert_pll_disabled(dev_priv, pipe);
1501
1502	/* Assert common reset */
1503	intel_de_write(dev_priv, DPIO_CTL,
1504		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1505
1506	vlv_set_power_well(dev_priv, power_well, false);
1507}
1508
1509#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1510
1511#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1512
1513static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1514{
1515	struct i915_power_well *cmn_bc =
1516		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1517	struct i915_power_well *cmn_d =
1518		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1519	u32 phy_control = dev_priv->chv_phy_control;
1520	u32 phy_status = 0;
1521	u32 phy_status_mask = 0xffffffff;
1522
1523	/*
1524	 * The BIOS can leave the PHY is some weird state
1525	 * where it doesn't fully power down some parts.
1526	 * Disable the asserts until the PHY has been fully
1527	 * reset (ie. the power well has been disabled at
1528	 * least once).
1529	 */
1530	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1531		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1532				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1533				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1534				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1535				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1536				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1537
1538	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1539		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1540				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1541				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1542
1543	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1544		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1545
1546		/* this assumes override is only used to enable lanes */
1547		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1548			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1549
1550		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1551			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1552
1553		/* CL1 is on whenever anything is on in either channel */
1554		if (BITS_SET(phy_control,
1555			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1556			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1557			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1558
1559		/*
1560		 * The DPLLB check accounts for the pipe B + port A usage
1561		 * with CL2 powered up but all the lanes in the second channel
1562		 * powered down.
1563		 */
1564		if (BITS_SET(phy_control,
1565			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1566		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1567			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1568
1569		if (BITS_SET(phy_control,
1570			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1571			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1572		if (BITS_SET(phy_control,
1573			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1574			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1575
1576		if (BITS_SET(phy_control,
1577			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1578			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1579		if (BITS_SET(phy_control,
1580			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1581			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1582	}
1583
1584	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1585		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1586
1587		/* this assumes override is only used to enable lanes */
1588		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1589			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1590
1591		if (BITS_SET(phy_control,
1592			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1593			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1594
1595		if (BITS_SET(phy_control,
1596			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1597			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1598		if (BITS_SET(phy_control,
1599			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1600			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1601	}
1602
1603	phy_status &= phy_status_mask;
1604
1605	/*
1606	 * The PHY may be busy with some initial calibration and whatnot,
1607	 * so the power state can take a while to actually change.
1608	 */
1609	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1610				       phy_status_mask, phy_status, 10))
1611		drm_err(&dev_priv->drm,
1612			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1613			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1614			phy_status, dev_priv->chv_phy_control);
1615}
1616
1617#undef BITS_SET
1618
1619static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1620					   struct i915_power_well *power_well)
1621{
1622	enum dpio_phy phy;
1623	enum pipe pipe;
1624	u32 tmp;
1625
1626	drm_WARN_ON_ONCE(&dev_priv->drm,
1627			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1628			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1629
1630	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1631		pipe = PIPE_A;
1632		phy = DPIO_PHY0;
1633	} else {
1634		pipe = PIPE_C;
1635		phy = DPIO_PHY1;
1636	}
1637
1638	/* since ref/cri clock was enabled */
1639	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1640	vlv_set_power_well(dev_priv, power_well, true);
1641
1642	/* Poll for phypwrgood signal */
1643	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1644				  PHY_POWERGOOD(phy), 1))
1645		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1646			phy);
1647
1648	vlv_dpio_get(dev_priv);
1649
1650	/* Enable dynamic power down */
1651	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1652	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1653		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1654	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1655
1656	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1657		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1658		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1659		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1660	} else {
1661		/*
1662		 * Force the non-existing CL2 off. BXT does this
1663		 * too, so maybe it saves some power even though
1664		 * CL2 doesn't exist?
1665		 */
1666		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1667		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1668		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1669	}
1670
1671	vlv_dpio_put(dev_priv);
1672
1673	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1674	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1675		       dev_priv->chv_phy_control);
1676
1677	drm_dbg_kms(&dev_priv->drm,
1678		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1679		    phy, dev_priv->chv_phy_control);
1680
1681	assert_chv_phy_status(dev_priv);
1682}
1683
1684static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1685					    struct i915_power_well *power_well)
1686{
1687	enum dpio_phy phy;
1688
1689	drm_WARN_ON_ONCE(&dev_priv->drm,
1690			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1691			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1692
1693	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1694		phy = DPIO_PHY0;
1695		assert_pll_disabled(dev_priv, PIPE_A);
1696		assert_pll_disabled(dev_priv, PIPE_B);
1697	} else {
1698		phy = DPIO_PHY1;
1699		assert_pll_disabled(dev_priv, PIPE_C);
1700	}
1701
1702	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1703	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1704		       dev_priv->chv_phy_control);
1705
1706	vlv_set_power_well(dev_priv, power_well, false);
1707
1708	drm_dbg_kms(&dev_priv->drm,
1709		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1710		    phy, dev_priv->chv_phy_control);
1711
1712	/* PHY is fully reset now, so we can enable the PHY state asserts */
1713	dev_priv->chv_phy_assert[phy] = true;
1714
1715	assert_chv_phy_status(dev_priv);
1716}
1717
1718static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1719				     enum dpio_channel ch, bool override, unsigned int mask)
1720{
1721	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1722	u32 reg, val, expected, actual;
1723
1724	/*
1725	 * The BIOS can leave the PHY is some weird state
1726	 * where it doesn't fully power down some parts.
1727	 * Disable the asserts until the PHY has been fully
1728	 * reset (ie. the power well has been disabled at
1729	 * least once).
1730	 */
1731	if (!dev_priv->chv_phy_assert[phy])
1732		return;
1733
1734	if (ch == DPIO_CH0)
1735		reg = _CHV_CMN_DW0_CH0;
1736	else
1737		reg = _CHV_CMN_DW6_CH1;
1738
1739	vlv_dpio_get(dev_priv);
1740	val = vlv_dpio_read(dev_priv, pipe, reg);
1741	vlv_dpio_put(dev_priv);
1742
1743	/*
1744	 * This assumes !override is only used when the port is disabled.
1745	 * All lanes should power down even without the override when
1746	 * the port is disabled.
1747	 */
1748	if (!override || mask == 0xf) {
1749		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1750		/*
1751		 * If CH1 common lane is not active anymore
1752		 * (eg. for pipe B DPLL) the entire channel will
1753		 * shut down, which causes the common lane registers
1754		 * to read as 0. That means we can't actually check
1755		 * the lane power down status bits, but as the entire
1756		 * register reads as 0 it's a good indication that the
1757		 * channel is indeed entirely powered down.
1758		 */
1759		if (ch == DPIO_CH1 && val == 0)
1760			expected = 0;
1761	} else if (mask != 0x0) {
1762		expected = DPIO_ANYDL_POWERDOWN;
1763	} else {
1764		expected = 0;
1765	}
1766
1767	if (ch == DPIO_CH0)
1768		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1769	else
1770		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1771	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1772
1773	drm_WARN(&dev_priv->drm, actual != expected,
1774		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1775		 !!(actual & DPIO_ALLDL_POWERDOWN),
1776		 !!(actual & DPIO_ANYDL_POWERDOWN),
1777		 !!(expected & DPIO_ALLDL_POWERDOWN),
1778		 !!(expected & DPIO_ANYDL_POWERDOWN),
1779		 reg, val);
1780}
1781
1782bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1783			  enum dpio_channel ch, bool override)
1784{
1785	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1786	bool was_override;
1787
1788	mutex_lock(&power_domains->lock);
1789
1790	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1791
1792	if (override == was_override)
1793		goto out;
1794
1795	if (override)
1796		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1797	else
1798		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1799
1800	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1801		       dev_priv->chv_phy_control);
1802
1803	drm_dbg_kms(&dev_priv->drm,
1804		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1805		    phy, ch, dev_priv->chv_phy_control);
1806
1807	assert_chv_phy_status(dev_priv);
1808
1809out:
1810	mutex_unlock(&power_domains->lock);
1811
1812	return was_override;
1813}
1814
1815void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1816			     bool override, unsigned int mask)
1817{
1818	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1819	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1820	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1821	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1822
1823	mutex_lock(&power_domains->lock);
1824
1825	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1826	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1827
1828	if (override)
1829		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1830	else
1831		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1832
1833	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1834		       dev_priv->chv_phy_control);
1835
1836	drm_dbg_kms(&dev_priv->drm,
1837		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1838		    phy, ch, mask, dev_priv->chv_phy_control);
1839
1840	assert_chv_phy_status(dev_priv);
1841
1842	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1843
1844	mutex_unlock(&power_domains->lock);
1845}
1846
1847static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1848					struct i915_power_well *power_well)
1849{
1850	enum pipe pipe = PIPE_A;
1851	bool enabled;
1852	u32 state, ctrl;
1853
1854	vlv_punit_get(dev_priv);
1855
1856	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1857	/*
1858	 * We only ever set the power-on and power-gate states, anything
1859	 * else is unexpected.
1860	 */
1861	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1862		    state != DP_SSS_PWR_GATE(pipe));
1863	enabled = state == DP_SSS_PWR_ON(pipe);
1864
1865	/*
1866	 * A transient state at this point would mean some unexpected party
1867	 * is poking at the power controls too.
1868	 */
1869	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1870	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1871
1872	vlv_punit_put(dev_priv);
1873
1874	return enabled;
1875}
1876
1877static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1878				    struct i915_power_well *power_well,
1879				    bool enable)
1880{
1881	enum pipe pipe = PIPE_A;
1882	u32 state;
1883	u32 ctrl;
1884
1885	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1886
1887	vlv_punit_get(dev_priv);
1888
1889#define COND \
1890	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1891
1892	if (COND)
1893		goto out;
1894
1895	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1896	ctrl &= ~DP_SSC_MASK(pipe);
1897	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1898	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1899
1900	if (wait_for(COND, 100))
1901		drm_err(&dev_priv->drm,
1902			"timeout setting power well state %08x (%08x)\n",
1903			state,
1904			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1905
1906#undef COND
1907
1908out:
1909	vlv_punit_put(dev_priv);
1910}
1911
1912static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1913					struct i915_power_well *power_well)
1914{
1915	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1916		       dev_priv->chv_phy_control);
1917}
1918
1919static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1920				       struct i915_power_well *power_well)
1921{
1922	chv_set_pipe_power_well(dev_priv, power_well, true);
1923
1924	vlv_display_power_well_init(dev_priv);
1925}
1926
1927static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1928					struct i915_power_well *power_well)
1929{
1930	vlv_display_power_well_deinit(dev_priv);
1931
1932	chv_set_pipe_power_well(dev_priv, power_well, false);
1933}
1934
1935static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1936{
1937	return power_domains->async_put_domains[0] |
1938	       power_domains->async_put_domains[1];
1939}
1940
1941#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1942
1943static bool
1944assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1945{
1946	struct drm_i915_private *i915 = container_of(power_domains,
1947						     struct drm_i915_private,
1948						     power_domains);
1949	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1950			    power_domains->async_put_domains[1]);
1951}
1952
1953static bool
1954__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1955{
1956	struct drm_i915_private *i915 = container_of(power_domains,
1957						     struct drm_i915_private,
1958						     power_domains);
1959	enum intel_display_power_domain domain;
1960	bool err = false;
1961
1962	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1963	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1964			   !!__async_put_domains_mask(power_domains));
1965
1966	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1967		err |= drm_WARN_ON(&i915->drm,
1968				   power_domains->domain_use_count[domain] != 1);
1969
1970	return !err;
1971}
1972
1973static void print_power_domains(struct i915_power_domains *power_domains,
1974				const char *prefix, u64 mask)
1975{
1976	struct drm_i915_private *i915 = container_of(power_domains,
1977						     struct drm_i915_private,
1978						     power_domains);
1979	enum intel_display_power_domain domain;
1980
1981	drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1982	for_each_power_domain(domain, mask)
1983		drm_dbg(&i915->drm, "%s use_count %d\n",
1984			intel_display_power_domain_str(domain),
1985			power_domains->domain_use_count[domain]);
1986}
1987
1988static void
1989print_async_put_domains_state(struct i915_power_domains *power_domains)
1990{
1991	struct drm_i915_private *i915 = container_of(power_domains,
1992						     struct drm_i915_private,
1993						     power_domains);
1994
1995	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
1996		power_domains->async_put_wakeref);
1997
1998	print_power_domains(power_domains, "async_put_domains[0]",
1999			    power_domains->async_put_domains[0]);
2000	print_power_domains(power_domains, "async_put_domains[1]",
2001			    power_domains->async_put_domains[1]);
2002}
2003
2004static void
2005verify_async_put_domains_state(struct i915_power_domains *power_domains)
2006{
2007	if (!__async_put_domains_state_ok(power_domains))
2008		print_async_put_domains_state(power_domains);
2009}
2010
2011#else
2012
2013static void
2014assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2015{
2016}
2017
2018static void
2019verify_async_put_domains_state(struct i915_power_domains *power_domains)
2020{
2021}
2022
2023#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2024
2025static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2026{
2027	assert_async_put_domain_masks_disjoint(power_domains);
2028
2029	return __async_put_domains_mask(power_domains);
2030}
2031
2032static void
2033async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2034			       enum intel_display_power_domain domain)
2035{
2036	assert_async_put_domain_masks_disjoint(power_domains);
2037
2038	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2039	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2040}
2041
2042static bool
2043intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2044				       enum intel_display_power_domain domain)
2045{
2046	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2047	bool ret = false;
2048
2049	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2050		goto out_verify;
2051
2052	async_put_domains_clear_domain(power_domains, domain);
2053
2054	ret = true;
2055
2056	if (async_put_domains_mask(power_domains))
2057		goto out_verify;
2058
2059	cancel_delayed_work(&power_domains->async_put_work);
2060	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2061				 fetch_and_zero(&power_domains->async_put_wakeref));
2062out_verify:
2063	verify_async_put_domains_state(power_domains);
2064
2065	return ret;
2066}
2067
2068static void
2069__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2070				 enum intel_display_power_domain domain)
2071{
2072	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2073	struct i915_power_well *power_well;
2074
2075	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2076		return;
2077
2078	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2079		intel_power_well_get(dev_priv, power_well);
2080
2081	power_domains->domain_use_count[domain]++;
2082}
2083
2084/**
2085 * intel_display_power_get - grab a power domain reference
2086 * @dev_priv: i915 device instance
2087 * @domain: power domain to reference
2088 *
2089 * This function grabs a power domain reference for @domain and ensures that the
2090 * power domain and all its parents are powered up. Therefore users should only
2091 * grab a reference to the innermost power domain they need.
2092 *
2093 * Any power domain reference obtained by this function must have a symmetric
2094 * call to intel_display_power_put() to release the reference again.
2095 */
2096intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2097					enum intel_display_power_domain domain)
2098{
2099	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2100	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2101
2102	mutex_lock(&power_domains->lock);
2103	__intel_display_power_get_domain(dev_priv, domain);
2104	mutex_unlock(&power_domains->lock);
2105
2106	return wakeref;
2107}
2108
2109/**
2110 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2111 * @dev_priv: i915 device instance
2112 * @domain: power domain to reference
2113 *
2114 * This function grabs a power domain reference for @domain and ensures that the
2115 * power domain and all its parents are powered up. Therefore users should only
2116 * grab a reference to the innermost power domain they need.
2117 *
2118 * Any power domain reference obtained by this function must have a symmetric
2119 * call to intel_display_power_put() to release the reference again.
2120 */
2121intel_wakeref_t
2122intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2123				   enum intel_display_power_domain domain)
2124{
2125	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2126	intel_wakeref_t wakeref;
2127	bool is_enabled;
2128
2129	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2130	if (!wakeref)
2131		return false;
2132
2133	mutex_lock(&power_domains->lock);
2134
2135	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2136		__intel_display_power_get_domain(dev_priv, domain);
2137		is_enabled = true;
2138	} else {
2139		is_enabled = false;
2140	}
2141
2142	mutex_unlock(&power_domains->lock);
2143
2144	if (!is_enabled) {
2145		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2146		wakeref = 0;
2147	}
2148
2149	return wakeref;
2150}
2151
2152static void
2153__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2154				 enum intel_display_power_domain domain)
2155{
2156	struct i915_power_domains *power_domains;
2157	struct i915_power_well *power_well;
2158	const char *name = intel_display_power_domain_str(domain);
2159
2160	power_domains = &dev_priv->power_domains;
2161
2162	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2163		 "Use count on domain %s is already zero\n",
2164		 name);
2165	drm_WARN(&dev_priv->drm,
2166		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2167		 "Async disabling of domain %s is pending\n",
2168		 name);
2169
2170	power_domains->domain_use_count[domain]--;
2171
2172	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2173		intel_power_well_put(dev_priv, power_well);
2174}
2175
2176static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2177				      enum intel_display_power_domain domain)
2178{
2179	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2180
2181	mutex_lock(&power_domains->lock);
2182	__intel_display_power_put_domain(dev_priv, domain);
2183	mutex_unlock(&power_domains->lock);
2184}
2185
2186/**
2187 * intel_display_power_put_unchecked - release an unchecked power domain reference
2188 * @dev_priv: i915 device instance
2189 * @domain: power domain to reference
2190 *
2191 * This function drops the power domain reference obtained by
2192 * intel_display_power_get() and might power down the corresponding hardware
2193 * block right away if this is the last reference.
2194 *
2195 * This function exists only for historical reasons and should be avoided in
2196 * new code, as the correctness of its use cannot be checked. Always use
2197 * intel_display_power_put() instead.
2198 */
2199void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2200				       enum intel_display_power_domain domain)
2201{
2202	__intel_display_power_put(dev_priv, domain);
2203	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2204}
2205
2206static void
2207queue_async_put_domains_work(struct i915_power_domains *power_domains,
2208			     intel_wakeref_t wakeref)
2209{
2210	struct drm_i915_private *i915 = container_of(power_domains,
2211						     struct drm_i915_private,
2212						     power_domains);
2213	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2214	power_domains->async_put_wakeref = wakeref;
2215	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2216						    &power_domains->async_put_work,
2217						    msecs_to_jiffies(100)));
2218}
2219
2220static void
2221release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2222{
2223	struct drm_i915_private *dev_priv =
2224		container_of(power_domains, struct drm_i915_private,
2225			     power_domains);
2226	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2227	enum intel_display_power_domain domain;
2228	intel_wakeref_t wakeref;
2229
2230	/*
2231	 * The caller must hold already raw wakeref, upgrade that to a proper
2232	 * wakeref to make the state checker happy about the HW access during
2233	 * power well disabling.
2234	 */
2235	assert_rpm_raw_wakeref_held(rpm);
2236	wakeref = intel_runtime_pm_get(rpm);
2237
2238	for_each_power_domain(domain, mask) {
2239		/* Clear before put, so put's sanity check is happy. */
2240		async_put_domains_clear_domain(power_domains, domain);
2241		__intel_display_power_put_domain(dev_priv, domain);
2242	}
2243
2244	intel_runtime_pm_put(rpm, wakeref);
2245}
2246
2247static void
2248intel_display_power_put_async_work(struct work_struct *work)
2249{
2250	struct drm_i915_private *dev_priv =
2251		container_of(work, struct drm_i915_private,
2252			     power_domains.async_put_work.work);
2253	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2254	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2255	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2256	intel_wakeref_t old_work_wakeref = 0;
2257
2258	mutex_lock(&power_domains->lock);
2259
2260	/*
2261	 * Bail out if all the domain refs pending to be released were grabbed
2262	 * by subsequent gets or a flush_work.
2263	 */
2264	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2265	if (!old_work_wakeref)
2266		goto out_verify;
2267
2268	release_async_put_domains(power_domains,
2269				  power_domains->async_put_domains[0]);
2270
2271	/* Requeue the work if more domains were async put meanwhile. */
2272	if (power_domains->async_put_domains[1]) {
2273		power_domains->async_put_domains[0] =
2274			fetch_and_zero(&power_domains->async_put_domains[1]);
2275		queue_async_put_domains_work(power_domains,
2276					     fetch_and_zero(&new_work_wakeref));
2277	}
2278
2279out_verify:
2280	verify_async_put_domains_state(power_domains);
2281
2282	mutex_unlock(&power_domains->lock);
2283
2284	if (old_work_wakeref)
2285		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2286	if (new_work_wakeref)
2287		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2288}
2289
2290/**
2291 * intel_display_power_put_async - release a power domain reference asynchronously
2292 * @i915: i915 device instance
2293 * @domain: power domain to reference
2294 * @wakeref: wakeref acquired for the reference that is being released
2295 *
2296 * This function drops the power domain reference obtained by
2297 * intel_display_power_get*() and schedules a work to power down the
2298 * corresponding hardware block if this is the last reference.
2299 */
2300void __intel_display_power_put_async(struct drm_i915_private *i915,
2301				     enum intel_display_power_domain domain,
2302				     intel_wakeref_t wakeref)
2303{
2304	struct i915_power_domains *power_domains = &i915->power_domains;
2305	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2306	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2307
2308	mutex_lock(&power_domains->lock);
2309
2310	if (power_domains->domain_use_count[domain] > 1) {
2311		__intel_display_power_put_domain(i915, domain);
2312
2313		goto out_verify;
2314	}
2315
2316	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2317
2318	/* Let a pending work requeue itself or queue a new one. */
2319	if (power_domains->async_put_wakeref) {
2320		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2321	} else {
2322		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2323		queue_async_put_domains_work(power_domains,
2324					     fetch_and_zero(&work_wakeref));
2325	}
2326
2327out_verify:
2328	verify_async_put_domains_state(power_domains);
2329
2330	mutex_unlock(&power_domains->lock);
2331
2332	if (work_wakeref)
2333		intel_runtime_pm_put_raw(rpm, work_wakeref);
2334
2335	intel_runtime_pm_put(rpm, wakeref);
2336}
2337
2338/**
2339 * intel_display_power_flush_work - flushes the async display power disabling work
2340 * @i915: i915 device instance
2341 *
2342 * Flushes any pending work that was scheduled by a preceding
2343 * intel_display_power_put_async() call, completing the disabling of the
2344 * corresponding power domains.
2345 *
2346 * Note that the work handler function may still be running after this
2347 * function returns; to ensure that the work handler isn't running use
2348 * intel_display_power_flush_work_sync() instead.
2349 */
2350void intel_display_power_flush_work(struct drm_i915_private *i915)
2351{
2352	struct i915_power_domains *power_domains = &i915->power_domains;
2353	intel_wakeref_t work_wakeref;
2354
2355	mutex_lock(&power_domains->lock);
2356
2357	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2358	if (!work_wakeref)
2359		goto out_verify;
2360
2361	release_async_put_domains(power_domains,
2362				  async_put_domains_mask(power_domains));
2363	cancel_delayed_work(&power_domains->async_put_work);
2364
2365out_verify:
2366	verify_async_put_domains_state(power_domains);
2367
2368	mutex_unlock(&power_domains->lock);
2369
2370	if (work_wakeref)
2371		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2372}
2373
2374/**
2375 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2376 * @i915: i915 device instance
2377 *
2378 * Like intel_display_power_flush_work(), but also ensure that the work
2379 * handler function is not running any more when this function returns.
2380 */
2381static void
2382intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2383{
2384	struct i915_power_domains *power_domains = &i915->power_domains;
2385
2386	intel_display_power_flush_work(i915);
2387	cancel_delayed_work_sync(&power_domains->async_put_work);
2388
2389	verify_async_put_domains_state(power_domains);
2390
2391	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2392}
2393
2394#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2395/**
2396 * intel_display_power_put - release a power domain reference
2397 * @dev_priv: i915 device instance
2398 * @domain: power domain to reference
2399 * @wakeref: wakeref acquired for the reference that is being released
2400 *
2401 * This function drops the power domain reference obtained by
2402 * intel_display_power_get() and might power down the corresponding hardware
2403 * block right away if this is the last reference.
2404 */
2405void intel_display_power_put(struct drm_i915_private *dev_priv,
2406			     enum intel_display_power_domain domain,
2407			     intel_wakeref_t wakeref)
2408{
2409	__intel_display_power_put(dev_priv, domain);
2410	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2411}
2412#endif
2413
2414#define I830_PIPES_POWER_DOMAINS (		\
2415	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2416	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2417	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2418	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2419	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2420	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2421	BIT_ULL(POWER_DOMAIN_INIT))
2422
2423#define VLV_DISPLAY_POWER_DOMAINS (		\
2424	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2425	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2426	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2427	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2428	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2429	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2430	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2431	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2432	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2433	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2434	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2435	BIT_ULL(POWER_DOMAIN_VGA) |			\
2436	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2437	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2438	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2439	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2440	BIT_ULL(POWER_DOMAIN_INIT))
2441
2442#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2443	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2444	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2445	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2446	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2447	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2448	BIT_ULL(POWER_DOMAIN_INIT))
2449
2450#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2451	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2452	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2453	BIT_ULL(POWER_DOMAIN_INIT))
2454
2455#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2456	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2457	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2458	BIT_ULL(POWER_DOMAIN_INIT))
2459
2460#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2461	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2462	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2463	BIT_ULL(POWER_DOMAIN_INIT))
2464
2465#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2466	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2467	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2468	BIT_ULL(POWER_DOMAIN_INIT))
2469
2470#define CHV_DISPLAY_POWER_DOMAINS (		\
2471	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2472	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2473	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2474	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2475	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2476	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2477	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2478	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2479	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2480	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2481	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2482	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2483	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2484	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2485	BIT_ULL(POWER_DOMAIN_VGA) |			\
2486	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2487	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2488	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2489	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2490	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2491	BIT_ULL(POWER_DOMAIN_INIT))
2492
2493#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2494	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2495	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2496	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2497	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2498	BIT_ULL(POWER_DOMAIN_INIT))
2499
2500#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2501	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2502	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2503	BIT_ULL(POWER_DOMAIN_INIT))
2504
2505#define HSW_DISPLAY_POWER_DOMAINS (			\
2506	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2507	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2508	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2509	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2510	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2511	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2512	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2513	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2514	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2515	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2516	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2517	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2518	BIT_ULL(POWER_DOMAIN_VGA) |				\
2519	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2520	BIT_ULL(POWER_DOMAIN_INIT))
2521
2522#define BDW_DISPLAY_POWER_DOMAINS (			\
2523	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2524	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2525	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2526	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2527	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2528	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2529	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2530	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2531	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2532	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2533	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2534	BIT_ULL(POWER_DOMAIN_VGA) |				\
2535	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2536	BIT_ULL(POWER_DOMAIN_INIT))
2537
2538#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2539	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2540	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2541	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2542	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2543	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2544	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2545	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2546	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2547	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2548	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2549	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2550	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2551	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2552	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2553	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2554	BIT_ULL(POWER_DOMAIN_VGA) |				\
2555	BIT_ULL(POWER_DOMAIN_INIT))
2556#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2557	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2558	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2559	BIT_ULL(POWER_DOMAIN_INIT))
2560#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2561	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2562	BIT_ULL(POWER_DOMAIN_INIT))
2563#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2564	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2565	BIT_ULL(POWER_DOMAIN_INIT))
2566#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2567	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2568	BIT_ULL(POWER_DOMAIN_INIT))
2569#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2570	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2571	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2572	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2573	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2574	BIT_ULL(POWER_DOMAIN_INIT))
2575
2576#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2577	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2578	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2579	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2580	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2581	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2582	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2583	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2584	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2585	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2586	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2587	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2588	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2589	BIT_ULL(POWER_DOMAIN_VGA) |				\
2590	BIT_ULL(POWER_DOMAIN_INIT))
2591#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2592	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2593	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2594	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2595	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2596	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2597	BIT_ULL(POWER_DOMAIN_INIT))
2598#define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2599	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2600	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2601	BIT_ULL(POWER_DOMAIN_INIT))
2602#define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2603	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2604	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2605	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2606	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2607	BIT_ULL(POWER_DOMAIN_INIT))
2608
2609#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2610	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2611	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2612	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2613	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2614	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2615	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2616	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2617	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2618	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2619	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2620	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2621	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2622	BIT_ULL(POWER_DOMAIN_VGA) |				\
2623	BIT_ULL(POWER_DOMAIN_INIT))
2624#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2625	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2626#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2627	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2628#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2629	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2630#define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2631	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2632	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2633	BIT_ULL(POWER_DOMAIN_INIT))
2634#define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2635	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2636	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2637	BIT_ULL(POWER_DOMAIN_INIT))
2638#define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2639	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2640	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2641	BIT_ULL(POWER_DOMAIN_INIT))
2642#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2643	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2644	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2645	BIT_ULL(POWER_DOMAIN_INIT))
2646#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2647	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2648	BIT_ULL(POWER_DOMAIN_INIT))
2649#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2650	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2651	BIT_ULL(POWER_DOMAIN_INIT))
2652#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2653	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2654	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2655	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2656	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2657	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2658	BIT_ULL(POWER_DOMAIN_INIT))
2659
2660#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2661	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2662	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2663	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2664	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2665	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2666	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2667	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2668	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2669	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2670	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2671	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2672	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2673	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2674	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2675	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2676	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2677	BIT_ULL(POWER_DOMAIN_VGA) |				\
2678	BIT_ULL(POWER_DOMAIN_INIT))
2679#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2680	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2681	BIT_ULL(POWER_DOMAIN_INIT))
2682#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2683	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2684	BIT_ULL(POWER_DOMAIN_INIT))
2685#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2686	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2687	BIT_ULL(POWER_DOMAIN_INIT))
2688#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2689	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2690	BIT_ULL(POWER_DOMAIN_INIT))
2691#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2692	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2693	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2694	BIT_ULL(POWER_DOMAIN_INIT))
2695#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2696	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2697	BIT_ULL(POWER_DOMAIN_INIT))
2698#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2699	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2700	BIT_ULL(POWER_DOMAIN_INIT))
2701#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2702	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2703	BIT_ULL(POWER_DOMAIN_INIT))
2704#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2705	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2706	BIT_ULL(POWER_DOMAIN_INIT))
2707#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2708	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2709	BIT_ULL(POWER_DOMAIN_INIT))
2710#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2711	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2712	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2713	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2714	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2715	BIT_ULL(POWER_DOMAIN_INIT))
2716
2717/*
2718 * ICL PW_0/PG_0 domains (HW/DMC control):
2719 * - PCI
2720 * - clocks except port PLL
2721 * - central power except FBC
2722 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2723 * ICL PW_1/PG_1 domains (HW/DMC control):
2724 * - DBUF function
2725 * - PIPE_A and its planes, except VGA
2726 * - transcoder EDP + PSR
2727 * - transcoder DSI
2728 * - DDI_A
2729 * - FBC
2730 */
2731#define ICL_PW_4_POWER_DOMAINS (			\
2732	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2733	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2734	BIT_ULL(POWER_DOMAIN_INIT))
2735	/* VDSC/joining */
2736#define ICL_PW_3_POWER_DOMAINS (			\
2737	ICL_PW_4_POWER_DOMAINS |			\
2738	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2739	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2740	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2741	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2742	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2743	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2744	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2745	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2746	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2747	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2748	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2749	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2750	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2751	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2752	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2753	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2754	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2755	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2756	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2757	BIT_ULL(POWER_DOMAIN_VGA) |			\
2758	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2759	BIT_ULL(POWER_DOMAIN_INIT))
2760	/*
2761	 * - transcoder WD
2762	 * - KVMR (HW control)
2763	 */
2764#define ICL_PW_2_POWER_DOMAINS (			\
2765	ICL_PW_3_POWER_DOMAINS |			\
2766	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2767	BIT_ULL(POWER_DOMAIN_INIT))
2768	/*
2769	 * - KVMR (HW control)
2770	 */
2771#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2772	ICL_PW_2_POWER_DOMAINS |			\
2773	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2774	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2775	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2776	BIT_ULL(POWER_DOMAIN_INIT))
2777
2778#define ICL_DDI_IO_A_POWER_DOMAINS (			\
2779	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2780#define ICL_DDI_IO_B_POWER_DOMAINS (			\
2781	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2782#define ICL_DDI_IO_C_POWER_DOMAINS (			\
2783	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2784#define ICL_DDI_IO_D_POWER_DOMAINS (			\
2785	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2786#define ICL_DDI_IO_E_POWER_DOMAINS (			\
2787	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2788#define ICL_DDI_IO_F_POWER_DOMAINS (			\
2789	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2790
2791#define ICL_AUX_A_IO_POWER_DOMAINS (			\
2792	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2793	BIT_ULL(POWER_DOMAIN_AUX_A))
2794#define ICL_AUX_B_IO_POWER_DOMAINS (			\
2795	BIT_ULL(POWER_DOMAIN_AUX_B))
2796#define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2797	BIT_ULL(POWER_DOMAIN_AUX_C))
2798#define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2799	BIT_ULL(POWER_DOMAIN_AUX_D))
2800#define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2801	BIT_ULL(POWER_DOMAIN_AUX_E))
2802#define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2803	BIT_ULL(POWER_DOMAIN_AUX_F))
2804#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2805	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2806#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2807	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2808#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2809	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2810#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2811	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2812
2813#define TGL_PW_5_POWER_DOMAINS (			\
2814	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2815	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2816	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2817	BIT_ULL(POWER_DOMAIN_INIT))
2818
2819#define TGL_PW_4_POWER_DOMAINS (			\
2820	TGL_PW_5_POWER_DOMAINS |			\
2821	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2822	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2823	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2824	BIT_ULL(POWER_DOMAIN_INIT))
2825
2826#define TGL_PW_3_POWER_DOMAINS (			\
2827	TGL_PW_4_POWER_DOMAINS |			\
2828	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2829	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2830	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2831	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2832	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2833	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2834	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |	\
2835	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |	\
2836	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |	\
2837	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2838	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2839	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2840	BIT_ULL(POWER_DOMAIN_AUX_G) |			\
2841	BIT_ULL(POWER_DOMAIN_AUX_H) |			\
2842	BIT_ULL(POWER_DOMAIN_AUX_I) |			\
2843	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2844	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2845	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2846	BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |		\
2847	BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |		\
2848	BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |		\
2849	BIT_ULL(POWER_DOMAIN_VGA) |			\
2850	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2851	BIT_ULL(POWER_DOMAIN_INIT))
2852
2853#define TGL_PW_2_POWER_DOMAINS (			\
2854	TGL_PW_3_POWER_DOMAINS |			\
2855	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2856	BIT_ULL(POWER_DOMAIN_INIT))
2857
2858#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2859	TGL_PW_3_POWER_DOMAINS |			\
2860	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2861	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2862	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2863	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2864	BIT_ULL(POWER_DOMAIN_INIT))
2865
2866#define TGL_DDI_IO_D_TC1_POWER_DOMAINS (	\
2867	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2868#define TGL_DDI_IO_E_TC2_POWER_DOMAINS (	\
2869	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2870#define TGL_DDI_IO_F_TC3_POWER_DOMAINS (	\
2871	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2872#define TGL_DDI_IO_G_TC4_POWER_DOMAINS (	\
2873	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2874#define TGL_DDI_IO_H_TC5_POWER_DOMAINS (	\
2875	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2876#define TGL_DDI_IO_I_TC6_POWER_DOMAINS (	\
2877	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2878
2879#define TGL_AUX_A_IO_POWER_DOMAINS (		\
2880	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2881	BIT_ULL(POWER_DOMAIN_AUX_A))
2882#define TGL_AUX_B_IO_POWER_DOMAINS (		\
2883	BIT_ULL(POWER_DOMAIN_AUX_B))
2884#define TGL_AUX_C_IO_POWER_DOMAINS (		\
2885	BIT_ULL(POWER_DOMAIN_AUX_C))
2886#define TGL_AUX_D_TC1_IO_POWER_DOMAINS (	\
2887	BIT_ULL(POWER_DOMAIN_AUX_D))
2888#define TGL_AUX_E_TC2_IO_POWER_DOMAINS (	\
2889	BIT_ULL(POWER_DOMAIN_AUX_E))
2890#define TGL_AUX_F_TC3_IO_POWER_DOMAINS (	\
2891	BIT_ULL(POWER_DOMAIN_AUX_F))
2892#define TGL_AUX_G_TC4_IO_POWER_DOMAINS (	\
2893	BIT_ULL(POWER_DOMAIN_AUX_G))
2894#define TGL_AUX_H_TC5_IO_POWER_DOMAINS (	\
2895	BIT_ULL(POWER_DOMAIN_AUX_H))
2896#define TGL_AUX_I_TC6_IO_POWER_DOMAINS (	\
2897	BIT_ULL(POWER_DOMAIN_AUX_I))
2898#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (	\
2899	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2900#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (	\
2901	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2902#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (	\
2903	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2904#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (	\
2905	BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2906#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (	\
2907	BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2908#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (	\
2909	BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2910
2911#define TGL_TC_COLD_OFF_POWER_DOMAINS (		\
2912	BIT_ULL(POWER_DOMAIN_AUX_D)	|	\
2913	BIT_ULL(POWER_DOMAIN_AUX_E)	|	\
2914	BIT_ULL(POWER_DOMAIN_AUX_F)	|	\
2915	BIT_ULL(POWER_DOMAIN_AUX_G)	|	\
2916	BIT_ULL(POWER_DOMAIN_AUX_H)	|	\
2917	BIT_ULL(POWER_DOMAIN_AUX_I)	|	\
2918	BIT_ULL(POWER_DOMAIN_AUX_D_TBT)	|	\
2919	BIT_ULL(POWER_DOMAIN_AUX_E_TBT)	|	\
2920	BIT_ULL(POWER_DOMAIN_AUX_F_TBT)	|	\
2921	BIT_ULL(POWER_DOMAIN_AUX_G_TBT)	|	\
2922	BIT_ULL(POWER_DOMAIN_AUX_H_TBT)	|	\
2923	BIT_ULL(POWER_DOMAIN_AUX_I_TBT)	|	\
2924	BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2925
2926#define RKL_PW_4_POWER_DOMAINS (			\
2927	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2928	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2929	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2930	BIT_ULL(POWER_DOMAIN_INIT))
2931
2932#define RKL_PW_3_POWER_DOMAINS (			\
2933	RKL_PW_4_POWER_DOMAINS |			\
2934	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2935	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2936	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2937	BIT_ULL(POWER_DOMAIN_VGA) |			\
2938	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2939	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2940	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2941	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2942	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2943	BIT_ULL(POWER_DOMAIN_INIT))
2944
2945/*
2946 * There is no PW_2/PG_2 on RKL.
2947 *
2948 * RKL PW_1/PG_1 domains (under HW/DMC control):
2949 * - DBUF function (note: registers are in PW0)
2950 * - PIPE_A and its planes and VDSC/joining, except VGA
2951 * - transcoder A
2952 * - DDI_A and DDI_B
2953 * - FBC
2954 *
2955 * RKL PW_0/PG_0 domains (under HW/DMC control):
2956 * - PCI
2957 * - clocks except port PLL
2958 * - shared functions:
2959 *     * interrupts except pipe interrupts
2960 *     * MBus except PIPE_MBUS_DBOX_CTL
2961 *     * DBUF registers
2962 * - central power except FBC
2963 * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
2964 */
2965
2966#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2967	RKL_PW_3_POWER_DOMAINS |			\
2968	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2969	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2970	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2971	BIT_ULL(POWER_DOMAIN_INIT))
2972
2973static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2974	.sync_hw = i9xx_power_well_sync_hw_noop,
2975	.enable = i9xx_always_on_power_well_noop,
2976	.disable = i9xx_always_on_power_well_noop,
2977	.is_enabled = i9xx_always_on_power_well_enabled,
2978};
2979
2980static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2981	.sync_hw = chv_pipe_power_well_sync_hw,
2982	.enable = chv_pipe_power_well_enable,
2983	.disable = chv_pipe_power_well_disable,
2984	.is_enabled = chv_pipe_power_well_enabled,
2985};
2986
2987static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2988	.sync_hw = i9xx_power_well_sync_hw_noop,
2989	.enable = chv_dpio_cmn_power_well_enable,
2990	.disable = chv_dpio_cmn_power_well_disable,
2991	.is_enabled = vlv_power_well_enabled,
2992};
2993
2994static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2995	{
2996		.name = "always-on",
2997		.always_on = true,
2998		.domains = POWER_DOMAIN_MASK,
2999		.ops = &i9xx_always_on_power_well_ops,
3000		.id = DISP_PW_ID_NONE,
3001	},
3002};
3003
3004static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3005	.sync_hw = i830_pipes_power_well_sync_hw,
3006	.enable = i830_pipes_power_well_enable,
3007	.disable = i830_pipes_power_well_disable,
3008	.is_enabled = i830_pipes_power_well_enabled,
3009};
3010
3011static const struct i915_power_well_desc i830_power_wells[] = {
3012	{
3013		.name = "always-on",
3014		.always_on = true,
3015		.domains = POWER_DOMAIN_MASK,
3016		.ops = &i9xx_always_on_power_well_ops,
3017		.id = DISP_PW_ID_NONE,
3018	},
3019	{
3020		.name = "pipes",
3021		.domains = I830_PIPES_POWER_DOMAINS,
3022		.ops = &i830_pipes_power_well_ops,
3023		.id = DISP_PW_ID_NONE,
3024	},
3025};
3026
3027static const struct i915_power_well_ops hsw_power_well_ops = {
3028	.sync_hw = hsw_power_well_sync_hw,
3029	.enable = hsw_power_well_enable,
3030	.disable = hsw_power_well_disable,
3031	.is_enabled = hsw_power_well_enabled,
3032};
3033
3034static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3035	.sync_hw = i9xx_power_well_sync_hw_noop,
3036	.enable = gen9_dc_off_power_well_enable,
3037	.disable = gen9_dc_off_power_well_disable,
3038	.is_enabled = gen9_dc_off_power_well_enabled,
3039};
3040
3041static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3042	.sync_hw = i9xx_power_well_sync_hw_noop,
3043	.enable = bxt_dpio_cmn_power_well_enable,
3044	.disable = bxt_dpio_cmn_power_well_disable,
3045	.is_enabled = bxt_dpio_cmn_power_well_enabled,
3046};
3047
3048static const struct i915_power_well_regs hsw_power_well_regs = {
3049	.bios	= HSW_PWR_WELL_CTL1,
3050	.driver	= HSW_PWR_WELL_CTL2,
3051	.kvmr	= HSW_PWR_WELL_CTL3,
3052	.debug	= HSW_PWR_WELL_CTL4,
3053};
3054
3055static const struct i915_power_well_desc hsw_power_wells[] = {
3056	{
3057		.name = "always-on",
3058		.always_on = true,
3059		.domains = POWER_DOMAIN_MASK,
3060		.ops = &i9xx_always_on_power_well_ops,
3061		.id = DISP_PW_ID_NONE,
3062	},
3063	{
3064		.name = "display",
3065		.domains = HSW_DISPLAY_POWER_DOMAINS,
3066		.ops = &hsw_power_well_ops,
3067		.id = HSW_DISP_PW_GLOBAL,
3068		{
3069			.hsw.regs = &hsw_power_well_regs,
3070			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3071			.hsw.has_vga = true,
3072		},
3073	},
3074};
3075
3076static const struct i915_power_well_desc bdw_power_wells[] = {
3077	{
3078		.name = "always-on",
3079		.always_on = true,
3080		.domains = POWER_DOMAIN_MASK,
3081		.ops = &i9xx_always_on_power_well_ops,
3082		.id = DISP_PW_ID_NONE,
3083	},
3084	{
3085		.name = "display",
3086		.domains = BDW_DISPLAY_POWER_DOMAINS,
3087		.ops = &hsw_power_well_ops,
3088		.id = HSW_DISP_PW_GLOBAL,
3089		{
3090			.hsw.regs = &hsw_power_well_regs,
3091			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3092			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3093			.hsw.has_vga = true,
3094		},
3095	},
3096};
3097
3098static const struct i915_power_well_ops vlv_display_power_well_ops = {
3099	.sync_hw = i9xx_power_well_sync_hw_noop,
3100	.enable = vlv_display_power_well_enable,
3101	.disable = vlv_display_power_well_disable,
3102	.is_enabled = vlv_power_well_enabled,
3103};
3104
3105static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3106	.sync_hw = i9xx_power_well_sync_hw_noop,
3107	.enable = vlv_dpio_cmn_power_well_enable,
3108	.disable = vlv_dpio_cmn_power_well_disable,
3109	.is_enabled = vlv_power_well_enabled,
3110};
3111
3112static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3113	.sync_hw = i9xx_power_well_sync_hw_noop,
3114	.enable = vlv_power_well_enable,
3115	.disable = vlv_power_well_disable,
3116	.is_enabled = vlv_power_well_enabled,
3117};
3118
3119static const struct i915_power_well_desc vlv_power_wells[] = {
3120	{
3121		.name = "always-on",
3122		.always_on = true,
3123		.domains = POWER_DOMAIN_MASK,
3124		.ops = &i9xx_always_on_power_well_ops,
3125		.id = DISP_PW_ID_NONE,
3126	},
3127	{
3128		.name = "display",
3129		.domains = VLV_DISPLAY_POWER_DOMAINS,
3130		.ops = &vlv_display_power_well_ops,
3131		.id = VLV_DISP_PW_DISP2D,
3132		{
3133			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3134		},
3135	},
3136	{
3137		.name = "dpio-tx-b-01",
3138		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3139			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3140			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3141			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3142		.ops = &vlv_dpio_power_well_ops,
3143		.id = DISP_PW_ID_NONE,
3144		{
3145			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3146		},
3147	},
3148	{
3149		.name = "dpio-tx-b-23",
3150		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3151			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3152			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3153			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3154		.ops = &vlv_dpio_power_well_ops,
3155		.id = DISP_PW_ID_NONE,
3156		{
3157			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3158		},
3159	},
3160	{
3161		.name = "dpio-tx-c-01",
3162		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3163			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3164			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3165			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3166		.ops = &vlv_dpio_power_well_ops,
3167		.id = DISP_PW_ID_NONE,
3168		{
3169			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3170		},
3171	},
3172	{
3173		.name = "dpio-tx-c-23",
3174		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3175			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3176			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3177			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3178		.ops = &vlv_dpio_power_well_ops,
3179		.id = DISP_PW_ID_NONE,
3180		{
3181			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3182		},
3183	},
3184	{
3185		.name = "dpio-common",
3186		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3187		.ops = &vlv_dpio_cmn_power_well_ops,
3188		.id = VLV_DISP_PW_DPIO_CMN_BC,
3189		{
3190			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3191		},
3192	},
3193};
3194
3195static const struct i915_power_well_desc chv_power_wells[] = {
3196	{
3197		.name = "always-on",
3198		.always_on = true,
3199		.domains = POWER_DOMAIN_MASK,
3200		.ops = &i9xx_always_on_power_well_ops,
3201		.id = DISP_PW_ID_NONE,
3202	},
3203	{
3204		.name = "display",
3205		/*
3206		 * Pipe A power well is the new disp2d well. Pipe B and C
3207		 * power wells don't actually exist. Pipe A power well is
3208		 * required for any pipe to work.
3209		 */
3210		.domains = CHV_DISPLAY_POWER_DOMAINS,
3211		.ops = &chv_pipe_power_well_ops,
3212		.id = DISP_PW_ID_NONE,
3213	},
3214	{
3215		.name = "dpio-common-bc",
3216		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3217		.ops = &chv_dpio_cmn_power_well_ops,
3218		.id = VLV_DISP_PW_DPIO_CMN_BC,
3219		{
3220			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3221		},
3222	},
3223	{
3224		.name = "dpio-common-d",
3225		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3226		.ops = &chv_dpio_cmn_power_well_ops,
3227		.id = CHV_DISP_PW_DPIO_CMN_D,
3228		{
3229			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3230		},
3231	},
3232};
3233
3234bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3235					 enum i915_power_well_id power_well_id)
3236{
3237	struct i915_power_well *power_well;
3238	bool ret;
3239
3240	power_well = lookup_power_well(dev_priv, power_well_id);
3241	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3242
3243	return ret;
3244}
3245
3246static const struct i915_power_well_desc skl_power_wells[] = {
3247	{
3248		.name = "always-on",
3249		.always_on = true,
3250		.domains = POWER_DOMAIN_MASK,
3251		.ops = &i9xx_always_on_power_well_ops,
3252		.id = DISP_PW_ID_NONE,
3253	},
3254	{
3255		.name = "power well 1",
3256		/* Handled by the DMC firmware */
3257		.always_on = true,
3258		.domains = 0,
3259		.ops = &hsw_power_well_ops,
3260		.id = SKL_DISP_PW_1,
3261		{
3262			.hsw.regs = &hsw_power_well_regs,
3263			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3264			.hsw.has_fuses = true,
3265		},
3266	},
3267	{
3268		.name = "MISC IO power well",
3269		/* Handled by the DMC firmware */
3270		.always_on = true,
3271		.domains = 0,
3272		.ops = &hsw_power_well_ops,
3273		.id = SKL_DISP_PW_MISC_IO,
3274		{
3275			.hsw.regs = &hsw_power_well_regs,
3276			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3277		},
3278	},
3279	{
3280		.name = "DC off",
3281		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3282		.ops = &gen9_dc_off_power_well_ops,
3283		.id = SKL_DISP_DC_OFF,
3284	},
3285	{
3286		.name = "power well 2",
3287		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3288		.ops = &hsw_power_well_ops,
3289		.id = SKL_DISP_PW_2,
3290		{
3291			.hsw.regs = &hsw_power_well_regs,
3292			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3293			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3294			.hsw.has_vga = true,
3295			.hsw.has_fuses = true,
3296		},
3297	},
3298	{
3299		.name = "DDI A/E IO power well",
3300		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3301		.ops = &hsw_power_well_ops,
3302		.id = DISP_PW_ID_NONE,
3303		{
3304			.hsw.regs = &hsw_power_well_regs,
3305			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3306		},
3307	},
3308	{
3309		.name = "DDI B IO power well",
3310		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3311		.ops = &hsw_power_well_ops,
3312		.id = DISP_PW_ID_NONE,
3313		{
3314			.hsw.regs = &hsw_power_well_regs,
3315			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3316		},
3317	},
3318	{
3319		.name = "DDI C IO power well",
3320		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3321		.ops = &hsw_power_well_ops,
3322		.id = DISP_PW_ID_NONE,
3323		{
3324			.hsw.regs = &hsw_power_well_regs,
3325			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3326		},
3327	},
3328	{
3329		.name = "DDI D IO power well",
3330		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3331		.ops = &hsw_power_well_ops,
3332		.id = DISP_PW_ID_NONE,
3333		{
3334			.hsw.regs = &hsw_power_well_regs,
3335			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3336		},
3337	},
3338};
3339
3340static const struct i915_power_well_desc bxt_power_wells[] = {
3341	{
3342		.name = "always-on",
3343		.always_on = true,
3344		.domains = POWER_DOMAIN_MASK,
3345		.ops = &i9xx_always_on_power_well_ops,
3346		.id = DISP_PW_ID_NONE,
3347	},
3348	{
3349		.name = "power well 1",
3350		/* Handled by the DMC firmware */
3351		.always_on = true,
3352		.domains = 0,
3353		.ops = &hsw_power_well_ops,
3354		.id = SKL_DISP_PW_1,
3355		{
3356			.hsw.regs = &hsw_power_well_regs,
3357			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3358			.hsw.has_fuses = true,
3359		},
3360	},
3361	{
3362		.name = "DC off",
3363		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3364		.ops = &gen9_dc_off_power_well_ops,
3365		.id = SKL_DISP_DC_OFF,
3366	},
3367	{
3368		.name = "power well 2",
3369		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3370		.ops = &hsw_power_well_ops,
3371		.id = SKL_DISP_PW_2,
3372		{
3373			.hsw.regs = &hsw_power_well_regs,
3374			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3375			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3376			.hsw.has_vga = true,
3377			.hsw.has_fuses = true,
3378		},
3379	},
3380	{
3381		.name = "dpio-common-a",
3382		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3383		.ops = &bxt_dpio_cmn_power_well_ops,
3384		.id = BXT_DISP_PW_DPIO_CMN_A,
3385		{
3386			.bxt.phy = DPIO_PHY1,
3387		},
3388	},
3389	{
3390		.name = "dpio-common-bc",
3391		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3392		.ops = &bxt_dpio_cmn_power_well_ops,
3393		.id = VLV_DISP_PW_DPIO_CMN_BC,
3394		{
3395			.bxt.phy = DPIO_PHY0,
3396		},
3397	},
3398};
3399
3400static const struct i915_power_well_desc glk_power_wells[] = {
3401	{
3402		.name = "always-on",
3403		.always_on = true,
3404		.domains = POWER_DOMAIN_MASK,
3405		.ops = &i9xx_always_on_power_well_ops,
3406		.id = DISP_PW_ID_NONE,
3407	},
3408	{
3409		.name = "power well 1",
3410		/* Handled by the DMC firmware */
3411		.always_on = true,
3412		.domains = 0,
3413		.ops = &hsw_power_well_ops,
3414		.id = SKL_DISP_PW_1,
3415		{
3416			.hsw.regs = &hsw_power_well_regs,
3417			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3418			.hsw.has_fuses = true,
3419		},
3420	},
3421	{
3422		.name = "DC off",
3423		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3424		.ops = &gen9_dc_off_power_well_ops,
3425		.id = SKL_DISP_DC_OFF,
3426	},
3427	{
3428		.name = "power well 2",
3429		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3430		.ops = &hsw_power_well_ops,
3431		.id = SKL_DISP_PW_2,
3432		{
3433			.hsw.regs = &hsw_power_well_regs,
3434			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3435			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3436			.hsw.has_vga = true,
3437			.hsw.has_fuses = true,
3438		},
3439	},
3440	{
3441		.name = "dpio-common-a",
3442		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3443		.ops = &bxt_dpio_cmn_power_well_ops,
3444		.id = BXT_DISP_PW_DPIO_CMN_A,
3445		{
3446			.bxt.phy = DPIO_PHY1,
3447		},
3448	},
3449	{
3450		.name = "dpio-common-b",
3451		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3452		.ops = &bxt_dpio_cmn_power_well_ops,
3453		.id = VLV_DISP_PW_DPIO_CMN_BC,
3454		{
3455			.bxt.phy = DPIO_PHY0,
3456		},
3457	},
3458	{
3459		.name = "dpio-common-c",
3460		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3461		.ops = &bxt_dpio_cmn_power_well_ops,
3462		.id = GLK_DISP_PW_DPIO_CMN_C,
3463		{
3464			.bxt.phy = DPIO_PHY2,
3465		},
3466	},
3467	{
3468		.name = "AUX A",
3469		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3470		.ops = &hsw_power_well_ops,
3471		.id = DISP_PW_ID_NONE,
3472		{
3473			.hsw.regs = &hsw_power_well_regs,
3474			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3475		},
3476	},
3477	{
3478		.name = "AUX B",
3479		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3480		.ops = &hsw_power_well_ops,
3481		.id = DISP_PW_ID_NONE,
3482		{
3483			.hsw.regs = &hsw_power_well_regs,
3484			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3485		},
3486	},
3487	{
3488		.name = "AUX C",
3489		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3490		.ops = &hsw_power_well_ops,
3491		.id = DISP_PW_ID_NONE,
3492		{
3493			.hsw.regs = &hsw_power_well_regs,
3494			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3495		},
3496	},
3497	{
3498		.name = "DDI A IO power well",
3499		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3500		.ops = &hsw_power_well_ops,
3501		.id = DISP_PW_ID_NONE,
3502		{
3503			.hsw.regs = &hsw_power_well_regs,
3504			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3505		},
3506	},
3507	{
3508		.name = "DDI B IO power well",
3509		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3510		.ops = &hsw_power_well_ops,
3511		.id = DISP_PW_ID_NONE,
3512		{
3513			.hsw.regs = &hsw_power_well_regs,
3514			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3515		},
3516	},
3517	{
3518		.name = "DDI C IO power well",
3519		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3520		.ops = &hsw_power_well_ops,
3521		.id = DISP_PW_ID_NONE,
3522		{
3523			.hsw.regs = &hsw_power_well_regs,
3524			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3525		},
3526	},
3527};
3528
3529static const struct i915_power_well_desc cnl_power_wells[] = {
3530	{
3531		.name = "always-on",
3532		.always_on = true,
3533		.domains = POWER_DOMAIN_MASK,
3534		.ops = &i9xx_always_on_power_well_ops,
3535		.id = DISP_PW_ID_NONE,
3536	},
3537	{
3538		.name = "power well 1",
3539		/* Handled by the DMC firmware */
3540		.always_on = true,
3541		.domains = 0,
3542		.ops = &hsw_power_well_ops,
3543		.id = SKL_DISP_PW_1,
3544		{
3545			.hsw.regs = &hsw_power_well_regs,
3546			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3547			.hsw.has_fuses = true,
3548		},
3549	},
3550	{
3551		.name = "AUX A",
3552		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3553		.ops = &hsw_power_well_ops,
3554		.id = DISP_PW_ID_NONE,
3555		{
3556			.hsw.regs = &hsw_power_well_regs,
3557			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3558		},
3559	},
3560	{
3561		.name = "AUX B",
3562		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3563		.ops = &hsw_power_well_ops,
3564		.id = DISP_PW_ID_NONE,
3565		{
3566			.hsw.regs = &hsw_power_well_regs,
3567			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3568		},
3569	},
3570	{
3571		.name = "AUX C",
3572		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3573		.ops = &hsw_power_well_ops,
3574		.id = DISP_PW_ID_NONE,
3575		{
3576			.hsw.regs = &hsw_power_well_regs,
3577			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3578		},
3579	},
3580	{
3581		.name = "AUX D",
3582		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3583		.ops = &hsw_power_well_ops,
3584		.id = DISP_PW_ID_NONE,
3585		{
3586			.hsw.regs = &hsw_power_well_regs,
3587			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3588		},
3589	},
3590	{
3591		.name = "DC off",
3592		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3593		.ops = &gen9_dc_off_power_well_ops,
3594		.id = SKL_DISP_DC_OFF,
3595	},
3596	{
3597		.name = "power well 2",
3598		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3599		.ops = &hsw_power_well_ops,
3600		.id = SKL_DISP_PW_2,
3601		{
3602			.hsw.regs = &hsw_power_well_regs,
3603			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3604			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3605			.hsw.has_vga = true,
3606			.hsw.has_fuses = true,
3607		},
3608	},
3609	{
3610		.name = "DDI A IO power well",
3611		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3612		.ops = &hsw_power_well_ops,
3613		.id = DISP_PW_ID_NONE,
3614		{
3615			.hsw.regs = &hsw_power_well_regs,
3616			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3617		},
3618	},
3619	{
3620		.name = "DDI B IO power well",
3621		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3622		.ops = &hsw_power_well_ops,
3623		.id = DISP_PW_ID_NONE,
3624		{
3625			.hsw.regs = &hsw_power_well_regs,
3626			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3627		},
3628	},
3629	{
3630		.name = "DDI C IO power well",
3631		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3632		.ops = &hsw_power_well_ops,
3633		.id = DISP_PW_ID_NONE,
3634		{
3635			.hsw.regs = &hsw_power_well_regs,
3636			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3637		},
3638	},
3639	{
3640		.name = "DDI D IO power well",
3641		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3642		.ops = &hsw_power_well_ops,
3643		.id = DISP_PW_ID_NONE,
3644		{
3645			.hsw.regs = &hsw_power_well_regs,
3646			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3647		},
3648	},
3649	{
3650		.name = "DDI F IO power well",
3651		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3652		.ops = &hsw_power_well_ops,
3653		.id = DISP_PW_ID_NONE,
3654		{
3655			.hsw.regs = &hsw_power_well_regs,
3656			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3657		},
3658	},
3659	{
3660		.name = "AUX F",
3661		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3662		.ops = &hsw_power_well_ops,
3663		.id = DISP_PW_ID_NONE,
3664		{
3665			.hsw.regs = &hsw_power_well_regs,
3666			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3667		},
3668	},
3669};
3670
3671static const struct i915_power_well_ops icl_aux_power_well_ops = {
3672	.sync_hw = hsw_power_well_sync_hw,
3673	.enable = icl_aux_power_well_enable,
3674	.disable = icl_aux_power_well_disable,
3675	.is_enabled = hsw_power_well_enabled,
3676};
3677
3678static const struct i915_power_well_regs icl_aux_power_well_regs = {
3679	.bios	= ICL_PWR_WELL_CTL_AUX1,
3680	.driver	= ICL_PWR_WELL_CTL_AUX2,
3681	.debug	= ICL_PWR_WELL_CTL_AUX4,
3682};
3683
3684static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3685	.bios	= ICL_PWR_WELL_CTL_DDI1,
3686	.driver	= ICL_PWR_WELL_CTL_DDI2,
3687	.debug	= ICL_PWR_WELL_CTL_DDI4,
3688};
3689
3690static const struct i915_power_well_desc icl_power_wells[] = {
3691	{
3692		.name = "always-on",
3693		.always_on = true,
3694		.domains = POWER_DOMAIN_MASK,
3695		.ops = &i9xx_always_on_power_well_ops,
3696		.id = DISP_PW_ID_NONE,
3697	},
3698	{
3699		.name = "power well 1",
3700		/* Handled by the DMC firmware */
3701		.always_on = true,
3702		.domains = 0,
3703		.ops = &hsw_power_well_ops,
3704		.id = SKL_DISP_PW_1,
3705		{
3706			.hsw.regs = &hsw_power_well_regs,
3707			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3708			.hsw.has_fuses = true,
3709		},
3710	},
3711	{
3712		.name = "DC off",
3713		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3714		.ops = &gen9_dc_off_power_well_ops,
3715		.id = SKL_DISP_DC_OFF,
3716	},
3717	{
3718		.name = "power well 2",
3719		.domains = ICL_PW_2_POWER_DOMAINS,
3720		.ops = &hsw_power_well_ops,
3721		.id = SKL_DISP_PW_2,
3722		{
3723			.hsw.regs = &hsw_power_well_regs,
3724			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3725			.hsw.has_fuses = true,
3726		},
3727	},
3728	{
3729		.name = "power well 3",
3730		.domains = ICL_PW_3_POWER_DOMAINS,
3731		.ops = &hsw_power_well_ops,
3732		.id = ICL_DISP_PW_3,
3733		{
3734			.hsw.regs = &hsw_power_well_regs,
3735			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3736			.hsw.irq_pipe_mask = BIT(PIPE_B),
3737			.hsw.has_vga = true,
3738			.hsw.has_fuses = true,
3739		},
3740	},
3741	{
3742		.name = "DDI A IO",
3743		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3744		.ops = &hsw_power_well_ops,
3745		.id = DISP_PW_ID_NONE,
3746		{
3747			.hsw.regs = &icl_ddi_power_well_regs,
3748			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3749		},
3750	},
3751	{
3752		.name = "DDI B IO",
3753		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3754		.ops = &hsw_power_well_ops,
3755		.id = DISP_PW_ID_NONE,
3756		{
3757			.hsw.regs = &icl_ddi_power_well_regs,
3758			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3759		},
3760	},
3761	{
3762		.name = "DDI C IO",
3763		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3764		.ops = &hsw_power_well_ops,
3765		.id = DISP_PW_ID_NONE,
3766		{
3767			.hsw.regs = &icl_ddi_power_well_regs,
3768			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3769		},
3770	},
3771	{
3772		.name = "DDI D IO",
3773		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3774		.ops = &hsw_power_well_ops,
3775		.id = DISP_PW_ID_NONE,
3776		{
3777			.hsw.regs = &icl_ddi_power_well_regs,
3778			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3779		},
3780	},
3781	{
3782		.name = "DDI E IO",
3783		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3784		.ops = &hsw_power_well_ops,
3785		.id = DISP_PW_ID_NONE,
3786		{
3787			.hsw.regs = &icl_ddi_power_well_regs,
3788			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3789		},
3790	},
3791	{
3792		.name = "DDI F IO",
3793		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3794		.ops = &hsw_power_well_ops,
3795		.id = DISP_PW_ID_NONE,
3796		{
3797			.hsw.regs = &icl_ddi_power_well_regs,
3798			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3799		},
3800	},
3801	{
3802		.name = "AUX A",
3803		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3804		.ops = &icl_aux_power_well_ops,
3805		.id = DISP_PW_ID_NONE,
3806		{
3807			.hsw.regs = &icl_aux_power_well_regs,
3808			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3809		},
3810	},
3811	{
3812		.name = "AUX B",
3813		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3814		.ops = &icl_aux_power_well_ops,
3815		.id = DISP_PW_ID_NONE,
3816		{
3817			.hsw.regs = &icl_aux_power_well_regs,
3818			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3819		},
3820	},
3821	{
3822		.name = "AUX C TC1",
3823		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3824		.ops = &icl_aux_power_well_ops,
3825		.id = DISP_PW_ID_NONE,
3826		{
3827			.hsw.regs = &icl_aux_power_well_regs,
3828			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3829			.hsw.is_tc_tbt = false,
3830		},
3831	},
3832	{
3833		.name = "AUX D TC2",
3834		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3835		.ops = &icl_aux_power_well_ops,
3836		.id = DISP_PW_ID_NONE,
3837		{
3838			.hsw.regs = &icl_aux_power_well_regs,
3839			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3840			.hsw.is_tc_tbt = false,
3841		},
3842	},
3843	{
3844		.name = "AUX E TC3",
3845		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3846		.ops = &icl_aux_power_well_ops,
3847		.id = DISP_PW_ID_NONE,
3848		{
3849			.hsw.regs = &icl_aux_power_well_regs,
3850			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3851			.hsw.is_tc_tbt = false,
3852		},
3853	},
3854	{
3855		.name = "AUX F TC4",
3856		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3857		.ops = &icl_aux_power_well_ops,
3858		.id = DISP_PW_ID_NONE,
3859		{
3860			.hsw.regs = &icl_aux_power_well_regs,
3861			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3862			.hsw.is_tc_tbt = false,
3863		},
3864	},
3865	{
3866		.name = "AUX C TBT1",
3867		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3868		.ops = &icl_aux_power_well_ops,
3869		.id = DISP_PW_ID_NONE,
3870		{
3871			.hsw.regs = &icl_aux_power_well_regs,
3872			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3873			.hsw.is_tc_tbt = true,
3874		},
3875	},
3876	{
3877		.name = "AUX D TBT2",
3878		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3879		.ops = &icl_aux_power_well_ops,
3880		.id = DISP_PW_ID_NONE,
3881		{
3882			.hsw.regs = &icl_aux_power_well_regs,
3883			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3884			.hsw.is_tc_tbt = true,
3885		},
3886	},
3887	{
3888		.name = "AUX E TBT3",
3889		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3890		.ops = &icl_aux_power_well_ops,
3891		.id = DISP_PW_ID_NONE,
3892		{
3893			.hsw.regs = &icl_aux_power_well_regs,
3894			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3895			.hsw.is_tc_tbt = true,
3896		},
3897	},
3898	{
3899		.name = "AUX F TBT4",
3900		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3901		.ops = &icl_aux_power_well_ops,
3902		.id = DISP_PW_ID_NONE,
3903		{
3904			.hsw.regs = &icl_aux_power_well_regs,
3905			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3906			.hsw.is_tc_tbt = true,
3907		},
3908	},
3909	{
3910		.name = "power well 4",
3911		.domains = ICL_PW_4_POWER_DOMAINS,
3912		.ops = &hsw_power_well_ops,
3913		.id = DISP_PW_ID_NONE,
3914		{
3915			.hsw.regs = &hsw_power_well_regs,
3916			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3917			.hsw.has_fuses = true,
3918			.hsw.irq_pipe_mask = BIT(PIPE_C),
3919		},
3920	},
3921};
3922
3923static void
3924tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3925{
3926	u8 tries = 0;
3927	int ret;
3928
3929	while (1) {
3930		u32 low_val = 0, high_val;
3931
3932		if (block)
3933			high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
3934		else
3935			high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
3936
3937		/*
3938		 * Spec states that we should timeout the request after 200us
3939		 * but the function below will timeout after 500us
3940		 */
3941		ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3942					     &high_val);
3943		if (ret == 0) {
3944			if (block &&
3945			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3946				ret = -EIO;
3947			else
3948				break;
3949		}
3950
3951		if (++tries == 3)
3952			break;
3953
3954		if (ret == -EAGAIN)
3955			msleep(1);
3956	}
3957
3958	if (ret)
3959		drm_err(&i915->drm, "TC cold %sblock failed\n",
3960			block ? "" : "un");
3961	else
3962		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3963			    block ? "" : "un");
3964}
3965
3966static void
3967tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3968				  struct i915_power_well *power_well)
3969{
3970	tgl_tc_cold_request(i915, true);
3971}
3972
3973static void
3974tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3975				   struct i915_power_well *power_well)
3976{
3977	tgl_tc_cold_request(i915, false);
3978}
3979
3980static void
3981tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
3982				   struct i915_power_well *power_well)
3983{
3984	if (power_well->count > 0)
3985		tgl_tc_cold_off_power_well_enable(i915, power_well);
3986	else
3987		tgl_tc_cold_off_power_well_disable(i915, power_well);
3988}
3989
3990static bool
3991tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
3992				      struct i915_power_well *power_well)
3993{
3994	/*
3995	 * Not the correctly implementation but there is no way to just read it
3996	 * from PCODE, so returning count to avoid state mismatch errors
3997	 */
3998	return power_well->count;
3999}
4000
4001static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4002	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4003	.enable = tgl_tc_cold_off_power_well_enable,
4004	.disable = tgl_tc_cold_off_power_well_disable,
4005	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4006};
4007
4008static const struct i915_power_well_desc tgl_power_wells[] = {
4009	{
4010		.name = "always-on",
4011		.always_on = true,
4012		.domains = POWER_DOMAIN_MASK,
4013		.ops = &i9xx_always_on_power_well_ops,
4014		.id = DISP_PW_ID_NONE,
4015	},
4016	{
4017		.name = "power well 1",
4018		/* Handled by the DMC firmware */
4019		.always_on = true,
4020		.domains = 0,
4021		.ops = &hsw_power_well_ops,
4022		.id = SKL_DISP_PW_1,
4023		{
4024			.hsw.regs = &hsw_power_well_regs,
4025			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4026			.hsw.has_fuses = true,
4027		},
4028	},
4029	{
4030		.name = "DC off",
4031		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4032		.ops = &gen9_dc_off_power_well_ops,
4033		.id = SKL_DISP_DC_OFF,
4034	},
4035	{
4036		.name = "power well 2",
4037		.domains = TGL_PW_2_POWER_DOMAINS,
4038		.ops = &hsw_power_well_ops,
4039		.id = SKL_DISP_PW_2,
4040		{
4041			.hsw.regs = &hsw_power_well_regs,
4042			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4043			.hsw.has_fuses = true,
4044		},
4045	},
4046	{
4047		.name = "power well 3",
4048		.domains = TGL_PW_3_POWER_DOMAINS,
4049		.ops = &hsw_power_well_ops,
4050		.id = ICL_DISP_PW_3,
4051		{
4052			.hsw.regs = &hsw_power_well_regs,
4053			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4054			.hsw.irq_pipe_mask = BIT(PIPE_B),
4055			.hsw.has_vga = true,
4056			.hsw.has_fuses = true,
4057		},
4058	},
4059	{
4060		.name = "DDI A IO",
4061		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4062		.ops = &hsw_power_well_ops,
4063		.id = DISP_PW_ID_NONE,
4064		{
4065			.hsw.regs = &icl_ddi_power_well_regs,
4066			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4067		}
4068	},
4069	{
4070		.name = "DDI B IO",
4071		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4072		.ops = &hsw_power_well_ops,
4073		.id = DISP_PW_ID_NONE,
4074		{
4075			.hsw.regs = &icl_ddi_power_well_regs,
4076			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4077		}
4078	},
4079	{
4080		.name = "DDI C IO",
4081		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4082		.ops = &hsw_power_well_ops,
4083		.id = DISP_PW_ID_NONE,
4084		{
4085			.hsw.regs = &icl_ddi_power_well_regs,
4086			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4087		}
4088	},
4089	{
4090		.name = "DDI D TC1 IO",
4091		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4092		.ops = &hsw_power_well_ops,
4093		.id = DISP_PW_ID_NONE,
4094		{
4095			.hsw.regs = &icl_ddi_power_well_regs,
4096			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4097		},
4098	},
4099	{
4100		.name = "DDI E TC2 IO",
4101		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4102		.ops = &hsw_power_well_ops,
4103		.id = DISP_PW_ID_NONE,
4104		{
4105			.hsw.regs = &icl_ddi_power_well_regs,
4106			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4107		},
4108	},
4109	{
4110		.name = "DDI F TC3 IO",
4111		.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4112		.ops = &hsw_power_well_ops,
4113		.id = DISP_PW_ID_NONE,
4114		{
4115			.hsw.regs = &icl_ddi_power_well_regs,
4116			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4117		},
4118	},
4119	{
4120		.name = "DDI G TC4 IO",
4121		.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4122		.ops = &hsw_power_well_ops,
4123		.id = DISP_PW_ID_NONE,
4124		{
4125			.hsw.regs = &icl_ddi_power_well_regs,
4126			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4127		},
4128	},
4129	{
4130		.name = "DDI H TC5 IO",
4131		.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4132		.ops = &hsw_power_well_ops,
4133		.id = DISP_PW_ID_NONE,
4134		{
4135			.hsw.regs = &icl_ddi_power_well_regs,
4136			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4137		},
4138	},
4139	{
4140		.name = "DDI I TC6 IO",
4141		.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4142		.ops = &hsw_power_well_ops,
4143		.id = DISP_PW_ID_NONE,
4144		{
4145			.hsw.regs = &icl_ddi_power_well_regs,
4146			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4147		},
4148	},
4149	{
4150		.name = "TC cold off",
4151		.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4152		.ops = &tgl_tc_cold_off_ops,
4153		.id = DISP_PW_ID_NONE,
4154	},
4155	{
4156		.name = "AUX A",
4157		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4158		.ops = &icl_aux_power_well_ops,
4159		.id = DISP_PW_ID_NONE,
4160		{
4161			.hsw.regs = &icl_aux_power_well_regs,
4162			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4163		},
4164	},
4165	{
4166		.name = "AUX B",
4167		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4168		.ops = &icl_aux_power_well_ops,
4169		.id = DISP_PW_ID_NONE,
4170		{
4171			.hsw.regs = &icl_aux_power_well_regs,
4172			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4173		},
4174	},
4175	{
4176		.name = "AUX C",
4177		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4178		.ops = &icl_aux_power_well_ops,
4179		.id = DISP_PW_ID_NONE,
4180		{
4181			.hsw.regs = &icl_aux_power_well_regs,
4182			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4183		},
4184	},
4185	{
4186		.name = "AUX D TC1",
4187		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4188		.ops = &icl_aux_power_well_ops,
4189		.id = DISP_PW_ID_NONE,
4190		{
4191			.hsw.regs = &icl_aux_power_well_regs,
4192			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4193			.hsw.is_tc_tbt = false,
4194		},
4195	},
4196	{
4197		.name = "AUX E TC2",
4198		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4199		.ops = &icl_aux_power_well_ops,
4200		.id = DISP_PW_ID_NONE,
4201		{
4202			.hsw.regs = &icl_aux_power_well_regs,
4203			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4204			.hsw.is_tc_tbt = false,
4205		},
4206	},
4207	{
4208		.name = "AUX F TC3",
4209		.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4210		.ops = &icl_aux_power_well_ops,
4211		.id = DISP_PW_ID_NONE,
4212		{
4213			.hsw.regs = &icl_aux_power_well_regs,
4214			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4215			.hsw.is_tc_tbt = false,
4216		},
4217	},
4218	{
4219		.name = "AUX G TC4",
4220		.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4221		.ops = &icl_aux_power_well_ops,
4222		.id = DISP_PW_ID_NONE,
4223		{
4224			.hsw.regs = &icl_aux_power_well_regs,
4225			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4226			.hsw.is_tc_tbt = false,
4227		},
4228	},
4229	{
4230		.name = "AUX H TC5",
4231		.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4232		.ops = &icl_aux_power_well_ops,
4233		.id = DISP_PW_ID_NONE,
4234		{
4235			.hsw.regs = &icl_aux_power_well_regs,
4236			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4237			.hsw.is_tc_tbt = false,
4238		},
4239	},
4240	{
4241		.name = "AUX I TC6",
4242		.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4243		.ops = &icl_aux_power_well_ops,
4244		.id = DISP_PW_ID_NONE,
4245		{
4246			.hsw.regs = &icl_aux_power_well_regs,
4247			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4248			.hsw.is_tc_tbt = false,
4249		},
4250	},
4251	{
4252		.name = "AUX D TBT1",
4253		.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4254		.ops = &icl_aux_power_well_ops,
4255		.id = DISP_PW_ID_NONE,
4256		{
4257			.hsw.regs = &icl_aux_power_well_regs,
4258			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4259			.hsw.is_tc_tbt = true,
4260		},
4261	},
4262	{
4263		.name = "AUX E TBT2",
4264		.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4265		.ops = &icl_aux_power_well_ops,
4266		.id = DISP_PW_ID_NONE,
4267		{
4268			.hsw.regs = &icl_aux_power_well_regs,
4269			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4270			.hsw.is_tc_tbt = true,
4271		},
4272	},
4273	{
4274		.name = "AUX F TBT3",
4275		.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4276		.ops = &icl_aux_power_well_ops,
4277		.id = DISP_PW_ID_NONE,
4278		{
4279			.hsw.regs = &icl_aux_power_well_regs,
4280			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4281			.hsw.is_tc_tbt = true,
4282		},
4283	},
4284	{
4285		.name = "AUX G TBT4",
4286		.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4287		.ops = &icl_aux_power_well_ops,
4288		.id = DISP_PW_ID_NONE,
4289		{
4290			.hsw.regs = &icl_aux_power_well_regs,
4291			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4292			.hsw.is_tc_tbt = true,
4293		},
4294	},
4295	{
4296		.name = "AUX H TBT5",
4297		.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4298		.ops = &icl_aux_power_well_ops,
4299		.id = DISP_PW_ID_NONE,
4300		{
4301			.hsw.regs = &icl_aux_power_well_regs,
4302			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4303			.hsw.is_tc_tbt = true,
4304		},
4305	},
4306	{
4307		.name = "AUX I TBT6",
4308		.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4309		.ops = &icl_aux_power_well_ops,
4310		.id = DISP_PW_ID_NONE,
4311		{
4312			.hsw.regs = &icl_aux_power_well_regs,
4313			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4314			.hsw.is_tc_tbt = true,
4315		},
4316	},
4317	{
4318		.name = "power well 4",
4319		.domains = TGL_PW_4_POWER_DOMAINS,
4320		.ops = &hsw_power_well_ops,
4321		.id = DISP_PW_ID_NONE,
4322		{
4323			.hsw.regs = &hsw_power_well_regs,
4324			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4325			.hsw.has_fuses = true,
4326			.hsw.irq_pipe_mask = BIT(PIPE_C),
4327		}
4328	},
4329	{
4330		.name = "power well 5",
4331		.domains = TGL_PW_5_POWER_DOMAINS,
4332		.ops = &hsw_power_well_ops,
4333		.id = DISP_PW_ID_NONE,
4334		{
4335			.hsw.regs = &hsw_power_well_regs,
4336			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4337			.hsw.has_fuses = true,
4338			.hsw.irq_pipe_mask = BIT(PIPE_D),
4339		},
4340	},
4341};
4342
4343static const struct i915_power_well_desc rkl_power_wells[] = {
4344	{
4345		.name = "always-on",
4346		.always_on = true,
4347		.domains = POWER_DOMAIN_MASK,
4348		.ops = &i9xx_always_on_power_well_ops,
4349		.id = DISP_PW_ID_NONE,
4350	},
4351	{
4352		.name = "power well 1",
4353		/* Handled by the DMC firmware */
4354		.always_on = true,
4355		.domains = 0,
4356		.ops = &hsw_power_well_ops,
4357		.id = SKL_DISP_PW_1,
4358		{
4359			.hsw.regs = &hsw_power_well_regs,
4360			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4361			.hsw.has_fuses = true,
4362		},
4363	},
4364	{
4365		.name = "DC off",
4366		.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4367		.ops = &gen9_dc_off_power_well_ops,
4368		.id = SKL_DISP_DC_OFF,
4369	},
4370	{
4371		.name = "power well 3",
4372		.domains = RKL_PW_3_POWER_DOMAINS,
4373		.ops = &hsw_power_well_ops,
4374		.id = ICL_DISP_PW_3,
4375		{
4376			.hsw.regs = &hsw_power_well_regs,
4377			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4378			.hsw.irq_pipe_mask = BIT(PIPE_B),
4379			.hsw.has_vga = true,
4380			.hsw.has_fuses = true,
4381		},
4382	},
4383	{
4384		.name = "power well 4",
4385		.domains = RKL_PW_4_POWER_DOMAINS,
4386		.ops = &hsw_power_well_ops,
4387		.id = DISP_PW_ID_NONE,
4388		{
4389			.hsw.regs = &hsw_power_well_regs,
4390			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4391			.hsw.has_fuses = true,
4392			.hsw.irq_pipe_mask = BIT(PIPE_C),
4393		}
4394	},
4395	{
4396		.name = "DDI A IO",
4397		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4398		.ops = &hsw_power_well_ops,
4399		.id = DISP_PW_ID_NONE,
4400		{
4401			.hsw.regs = &icl_ddi_power_well_regs,
4402			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4403		}
4404	},
4405	{
4406		.name = "DDI B IO",
4407		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4408		.ops = &hsw_power_well_ops,
4409		.id = DISP_PW_ID_NONE,
4410		{
4411			.hsw.regs = &icl_ddi_power_well_regs,
4412			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4413		}
4414	},
4415	{
4416		.name = "DDI D TC1 IO",
4417		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4418		.ops = &hsw_power_well_ops,
4419		.id = DISP_PW_ID_NONE,
4420		{
4421			.hsw.regs = &icl_ddi_power_well_regs,
4422			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4423		},
4424	},
4425	{
4426		.name = "DDI E TC2 IO",
4427		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4428		.ops = &hsw_power_well_ops,
4429		.id = DISP_PW_ID_NONE,
4430		{
4431			.hsw.regs = &icl_ddi_power_well_regs,
4432			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4433		},
4434	},
4435	{
4436		.name = "AUX A",
4437		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4438		.ops = &icl_aux_power_well_ops,
4439		.id = DISP_PW_ID_NONE,
4440		{
4441			.hsw.regs = &icl_aux_power_well_regs,
4442			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4443		},
4444	},
4445	{
4446		.name = "AUX B",
4447		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4448		.ops = &icl_aux_power_well_ops,
4449		.id = DISP_PW_ID_NONE,
4450		{
4451			.hsw.regs = &icl_aux_power_well_regs,
4452			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4453		},
4454	},
4455	{
4456		.name = "AUX D TC1",
4457		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4458		.ops = &icl_aux_power_well_ops,
4459		.id = DISP_PW_ID_NONE,
4460		{
4461			.hsw.regs = &icl_aux_power_well_regs,
4462			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4463		},
4464	},
4465	{
4466		.name = "AUX E TC2",
4467		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4468		.ops = &icl_aux_power_well_ops,
4469		.id = DISP_PW_ID_NONE,
4470		{
4471			.hsw.regs = &icl_aux_power_well_regs,
4472			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4473		},
4474	},
4475};
4476
4477static int
4478sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4479				   int disable_power_well)
4480{
4481	if (disable_power_well >= 0)
4482		return !!disable_power_well;
4483
4484	return 1;
4485}
4486
4487static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4488			       int enable_dc)
4489{
4490	u32 mask;
4491	int requested_dc;
4492	int max_dc;
4493
4494	if (INTEL_GEN(dev_priv) >= 12) {
4495		max_dc = 4;
4496		/*
4497		 * DC9 has a separate HW flow from the rest of the DC states,
4498		 * not depending on the DMC firmware. It's needed by system
4499		 * suspend/resume, so allow it unconditionally.
4500		 */
4501		mask = DC_STATE_EN_DC9;
4502	} else if (IS_GEN(dev_priv, 11)) {
4503		max_dc = 2;
4504		mask = DC_STATE_EN_DC9;
4505	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4506		max_dc = 2;
4507		mask = 0;
4508	} else if (IS_GEN9_LP(dev_priv)) {
4509		max_dc = 1;
4510		mask = DC_STATE_EN_DC9;
4511	} else {
4512		max_dc = 0;
4513		mask = 0;
4514	}
4515
4516	if (!dev_priv->params.disable_power_well)
4517		max_dc = 0;
4518
4519	if (enable_dc >= 0 && enable_dc <= max_dc) {
4520		requested_dc = enable_dc;
4521	} else if (enable_dc == -1) {
4522		requested_dc = max_dc;
4523	} else if (enable_dc > max_dc && enable_dc <= 4) {
4524		drm_dbg_kms(&dev_priv->drm,
4525			    "Adjusting requested max DC state (%d->%d)\n",
4526			    enable_dc, max_dc);
4527		requested_dc = max_dc;
4528	} else {
4529		drm_err(&dev_priv->drm,
4530			"Unexpected value for enable_dc (%d)\n", enable_dc);
4531		requested_dc = max_dc;
4532	}
4533
4534	switch (requested_dc) {
4535	case 4:
4536		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4537		break;
4538	case 3:
4539		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4540		break;
4541	case 2:
4542		mask |= DC_STATE_EN_UPTO_DC6;
4543		break;
4544	case 1:
4545		mask |= DC_STATE_EN_UPTO_DC5;
4546		break;
4547	}
4548
4549	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4550
4551	return mask;
4552}
4553
4554static int
4555__set_power_wells(struct i915_power_domains *power_domains,
4556		  const struct i915_power_well_desc *power_well_descs,
4557		  int power_well_count)
4558{
4559	struct drm_i915_private *i915 = container_of(power_domains,
4560						     struct drm_i915_private,
4561						     power_domains);
4562	u64 power_well_ids = 0;
4563	int i;
4564
4565	power_domains->power_well_count = power_well_count;
4566	power_domains->power_wells =
4567				kcalloc(power_well_count,
4568					sizeof(*power_domains->power_wells),
4569					GFP_KERNEL);
4570	if (!power_domains->power_wells)
4571		return -ENOMEM;
4572
4573	for (i = 0; i < power_well_count; i++) {
4574		enum i915_power_well_id id = power_well_descs[i].id;
4575
4576		power_domains->power_wells[i].desc = &power_well_descs[i];
4577
4578		if (id == DISP_PW_ID_NONE)
4579			continue;
4580
4581		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
4582		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
4583		power_well_ids |= BIT_ULL(id);
4584	}
4585
4586	return 0;
4587}
4588
4589#define set_power_wells(power_domains, __power_well_descs) \
4590	__set_power_wells(power_domains, __power_well_descs, \
4591			  ARRAY_SIZE(__power_well_descs))
4592
4593/**
4594 * intel_power_domains_init - initializes the power domain structures
4595 * @dev_priv: i915 device instance
4596 *
4597 * Initializes the power domain structures for @dev_priv depending upon the
4598 * supported platform.
4599 */
4600int intel_power_domains_init(struct drm_i915_private *dev_priv)
4601{
4602	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4603	int err;
4604
4605	dev_priv->params.disable_power_well =
4606		sanitize_disable_power_well_option(dev_priv,
4607						   dev_priv->params.disable_power_well);
4608	dev_priv->csr.allowed_dc_mask =
4609		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
4610
4611	dev_priv->csr.target_dc_state =
4612		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4613
4614	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4615
4616	mutex_init(&power_domains->lock);
4617
4618	INIT_DELAYED_WORK(&power_domains->async_put_work,
4619			  intel_display_power_put_async_work);
4620
4621	/*
4622	 * The enabling order will be from lower to higher indexed wells,
4623	 * the disabling order is reversed.
4624	 */
4625	if (IS_ROCKETLAKE(dev_priv)) {
4626		err = set_power_wells(power_domains, rkl_power_wells);
4627	} else if (IS_GEN(dev_priv, 12)) {
4628		err = set_power_wells(power_domains, tgl_power_wells);
4629	} else if (IS_GEN(dev_priv, 11)) {
4630		err = set_power_wells(power_domains, icl_power_wells);
4631	} else if (IS_CANNONLAKE(dev_priv)) {
4632		err = set_power_wells(power_domains, cnl_power_wells);
4633
4634		/*
4635		 * DDI and Aux IO are getting enabled for all ports
4636		 * regardless the presence or use. So, in order to avoid
4637		 * timeouts, lets remove them from the list
4638		 * for the SKUs without port F.
4639		 */
4640		if (!IS_CNL_WITH_PORT_F(dev_priv))
4641			power_domains->power_well_count -= 2;
4642	} else if (IS_GEMINILAKE(dev_priv)) {
4643		err = set_power_wells(power_domains, glk_power_wells);
4644	} else if (IS_BROXTON(dev_priv)) {
4645		err = set_power_wells(power_domains, bxt_power_wells);
4646	} else if (IS_GEN9_BC(dev_priv)) {
4647		err = set_power_wells(power_domains, skl_power_wells);
4648	} else if (IS_CHERRYVIEW(dev_priv)) {
4649		err = set_power_wells(power_domains, chv_power_wells);
4650	} else if (IS_BROADWELL(dev_priv)) {
4651		err = set_power_wells(power_domains, bdw_power_wells);
4652	} else if (IS_HASWELL(dev_priv)) {
4653		err = set_power_wells(power_domains, hsw_power_wells);
4654	} else if (IS_VALLEYVIEW(dev_priv)) {
4655		err = set_power_wells(power_domains, vlv_power_wells);
4656	} else if (IS_I830(dev_priv)) {
4657		err = set_power_wells(power_domains, i830_power_wells);
4658	} else {
4659		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4660	}
4661
4662	return err;
4663}
4664
4665/**
4666 * intel_power_domains_cleanup - clean up power domains resources
4667 * @dev_priv: i915 device instance
4668 *
4669 * Release any resources acquired by intel_power_domains_init()
4670 */
4671void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4672{
4673	kfree(dev_priv->power_domains.power_wells);
4674}
4675
4676static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4677{
4678	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4679	struct i915_power_well *power_well;
4680
4681	mutex_lock(&power_domains->lock);
4682	for_each_power_well(dev_priv, power_well) {
4683		power_well->desc->ops->sync_hw(dev_priv, power_well);
4684		power_well->hw_enabled =
4685			power_well->desc->ops->is_enabled(dev_priv, power_well);
4686	}
4687	mutex_unlock(&power_domains->lock);
4688}
4689
4690static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
4691				enum dbuf_slice slice, bool enable)
4692{
4693	i915_reg_t reg = DBUF_CTL_S(slice);
4694	bool state;
4695	u32 val;
4696
4697	val = intel_de_read(dev_priv, reg);
4698	if (enable)
4699		val |= DBUF_POWER_REQUEST;
4700	else
4701		val &= ~DBUF_POWER_REQUEST;
4702	intel_de_write(dev_priv, reg, val);
4703	intel_de_posting_read(dev_priv, reg);
4704	udelay(10);
4705
4706	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4707	drm_WARN(&dev_priv->drm, enable != state,
4708		 "DBuf slice %d power %s timeout!\n",
4709		 slice, enable ? "enable" : "disable");
4710}
4711
4712void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
4713			     u8 req_slices)
4714{
4715	int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4716	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4717	enum dbuf_slice slice;
4718
4719	drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
4720		 "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
4721		 req_slices, num_slices);
4722
4723	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4724		    req_slices);
4725
4726	/*
4727	 * Might be running this in parallel to gen9_dc_off_power_well_enable
4728	 * being called from intel_dp_detect for instance,
4729	 * which causes assertion triggered by race condition,
4730	 * as gen9_assert_dbuf_enabled might preempt this when registers
4731	 * were already updated, while dev_priv was not.
4732	 */
4733	mutex_lock(&power_domains->lock);
4734
4735	for (slice = DBUF_S1; slice < num_slices; slice++)
4736		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
4737
4738	dev_priv->dbuf.enabled_slices = req_slices;
4739
4740	mutex_unlock(&power_domains->lock);
4741}
4742
4743static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4744{
4745	dev_priv->dbuf.enabled_slices =
4746		intel_enabled_dbuf_slices_mask(dev_priv);
4747
4748	/*
4749	 * Just power up at least 1 slice, we will
4750	 * figure out later which slices we have and what we need.
4751	 */
4752	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
4753				dev_priv->dbuf.enabled_slices);
4754}
4755
4756static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4757{
4758	gen9_dbuf_slices_update(dev_priv, 0);
4759}
4760
4761static void icl_mbus_init(struct drm_i915_private *dev_priv)
4762{
4763	unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
4764	u32 mask, val, i;
4765
4766	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4767		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4768		MBUS_ABOX_B_CREDIT_MASK |
4769		MBUS_ABOX_BW_CREDIT_MASK;
4770	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4771		MBUS_ABOX_BT_CREDIT_POOL2(16) |
4772		MBUS_ABOX_B_CREDIT(1) |
4773		MBUS_ABOX_BW_CREDIT(1);
4774
4775	/*
4776	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
4777	 * expect us to program the abox_ctl0 register as well, even though
4778	 * we don't have to program other instance-0 registers like BW_BUDDY.
4779	 */
4780	if (IS_GEN(dev_priv, 12))
4781		abox_regs |= BIT(0);
4782
4783	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
4784		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
4785}
4786
4787static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4788{
4789	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4790
4791	/*
4792	 * The LCPLL register should be turned on by the BIOS. For now
4793	 * let's just check its state and print errors in case
4794	 * something is wrong.  Don't even try to turn it on.
4795	 */
4796
4797	if (val & LCPLL_CD_SOURCE_FCLK)
4798		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4799
4800	if (val & LCPLL_PLL_DISABLE)
4801		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4802
4803	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4804		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4805}
4806
4807static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4808{
4809	struct drm_device *dev = &dev_priv->drm;
4810	struct intel_crtc *crtc;
4811
4812	for_each_intel_crtc(dev, crtc)
4813		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4814				pipe_name(crtc->pipe));
4815
4816	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4817			"Display power well on\n");
4818	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4819			"SPLL enabled\n");
4820	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4821			"WRPLL1 enabled\n");
4822	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4823			"WRPLL2 enabled\n");
4824	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4825			"Panel power on\n");
4826	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4827			"CPU PWM1 enabled\n");
4828	if (IS_HASWELL(dev_priv))
4829		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4830				"CPU PWM2 enabled\n");
4831	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4832			"PCH PWM1 enabled\n");
4833	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4834			"Utility pin enabled\n");
4835	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4836			"PCH GTC enabled\n");
4837
4838	/*
4839	 * In theory we can still leave IRQs enabled, as long as only the HPD
4840	 * interrupts remain enabled. We used to check for that, but since it's
4841	 * gen-specific and since we only disable LCPLL after we fully disable
4842	 * the interrupts, the check below should be enough.
4843	 */
4844	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4845}
4846
4847static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4848{
4849	if (IS_HASWELL(dev_priv))
4850		return intel_de_read(dev_priv, D_COMP_HSW);
4851	else
4852		return intel_de_read(dev_priv, D_COMP_BDW);
4853}
4854
4855static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4856{
4857	if (IS_HASWELL(dev_priv)) {
4858		if (sandybridge_pcode_write(dev_priv,
4859					    GEN6_PCODE_WRITE_D_COMP, val))
4860			drm_dbg_kms(&dev_priv->drm,
4861				    "Failed to write to D_COMP\n");
4862	} else {
4863		intel_de_write(dev_priv, D_COMP_BDW, val);
4864		intel_de_posting_read(dev_priv, D_COMP_BDW);
4865	}
4866}
4867
4868/*
4869 * This function implements pieces of two sequences from BSpec:
4870 * - Sequence for display software to disable LCPLL
4871 * - Sequence for display software to allow package C8+
4872 * The steps implemented here are just the steps that actually touch the LCPLL
4873 * register. Callers should take care of disabling all the display engine
4874 * functions, doing the mode unset, fixing interrupts, etc.
4875 */
4876static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4877			      bool switch_to_fclk, bool allow_power_down)
4878{
4879	u32 val;
4880
4881	assert_can_disable_lcpll(dev_priv);
4882
4883	val = intel_de_read(dev_priv, LCPLL_CTL);
4884
4885	if (switch_to_fclk) {
4886		val |= LCPLL_CD_SOURCE_FCLK;
4887		intel_de_write(dev_priv, LCPLL_CTL, val);
4888
4889		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4890				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4891			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4892
4893		val = intel_de_read(dev_priv, LCPLL_CTL);
4894	}
4895
4896	val |= LCPLL_PLL_DISABLE;
4897	intel_de_write(dev_priv, LCPLL_CTL, val);
4898	intel_de_posting_read(dev_priv, LCPLL_CTL);
4899
4900	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4901		drm_err(&dev_priv->drm, "LCPLL still locked\n");
4902
4903	val = hsw_read_dcomp(dev_priv);
4904	val |= D_COMP_COMP_DISABLE;
4905	hsw_write_dcomp(dev_priv, val);
4906	ndelay(100);
4907
4908	if (wait_for((hsw_read_dcomp(dev_priv) &
4909		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4910		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4911
4912	if (allow_power_down) {
4913		val = intel_de_read(dev_priv, LCPLL_CTL);
4914		val |= LCPLL_POWER_DOWN_ALLOW;
4915		intel_de_write(dev_priv, LCPLL_CTL, val);
4916		intel_de_posting_read(dev_priv, LCPLL_CTL);
4917	}
4918}
4919
4920/*
4921 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4922 * source.
4923 */
4924static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4925{
4926	u32 val;
4927
4928	val = intel_de_read(dev_priv, LCPLL_CTL);
4929
4930	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4931		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4932		return;
4933
4934	/*
4935	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4936	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4937	 */
4938	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4939
4940	if (val & LCPLL_POWER_DOWN_ALLOW) {
4941		val &= ~LCPLL_POWER_DOWN_ALLOW;
4942		intel_de_write(dev_priv, LCPLL_CTL, val);
4943		intel_de_posting_read(dev_priv, LCPLL_CTL);
4944	}
4945
4946	val = hsw_read_dcomp(dev_priv);
4947	val |= D_COMP_COMP_FORCE;
4948	val &= ~D_COMP_COMP_DISABLE;
4949	hsw_write_dcomp(dev_priv, val);
4950
4951	val = intel_de_read(dev_priv, LCPLL_CTL);
4952	val &= ~LCPLL_PLL_DISABLE;
4953	intel_de_write(dev_priv, LCPLL_CTL, val);
4954
4955	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4956		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4957
4958	if (val & LCPLL_CD_SOURCE_FCLK) {
4959		val = intel_de_read(dev_priv, LCPLL_CTL);
4960		val &= ~LCPLL_CD_SOURCE_FCLK;
4961		intel_de_write(dev_priv, LCPLL_CTL, val);
4962
4963		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4964				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4965			drm_err(&dev_priv->drm,
4966				"Switching back to LCPLL failed\n");
4967	}
4968
4969	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4970
4971	intel_update_cdclk(dev_priv);
4972	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4973}
4974
4975/*
4976 * Package states C8 and deeper are really deep PC states that can only be
4977 * reached when all the devices on the system allow it, so even if the graphics
4978 * device allows PC8+, it doesn't mean the system will actually get to these
4979 * states. Our driver only allows PC8+ when going into runtime PM.
4980 *
4981 * The requirements for PC8+ are that all the outputs are disabled, the power
4982 * well is disabled and most interrupts are disabled, and these are also
4983 * requirements for runtime PM. When these conditions are met, we manually do
4984 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4985 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4986 * hang the machine.
4987 *
4988 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4989 * the state of some registers, so when we come back from PC8+ we need to
4990 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4991 * need to take care of the registers kept by RC6. Notice that this happens even
4992 * if we don't put the device in PCI D3 state (which is what currently happens
4993 * because of the runtime PM support).
4994 *
4995 * For more, read "Display Sequences for Package C8" on the hardware
4996 * documentation.
4997 */
4998static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4999{
5000	u32 val;
5001
5002	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5003
5004	if (HAS_PCH_LPT_LP(dev_priv)) {
5005		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5006		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5007		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5008	}
5009
5010	lpt_disable_clkout_dp(dev_priv);
5011	hsw_disable_lcpll(dev_priv, true, true);
5012}
5013
5014static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5015{
5016	u32 val;
5017
5018	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5019
5020	hsw_restore_lcpll(dev_priv);
5021	intel_init_pch_refclk(dev_priv);
5022
5023	if (HAS_PCH_LPT_LP(dev_priv)) {
5024		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5025		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5026		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5027	}
5028}
5029
5030static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5031				      bool enable)
5032{
5033	i915_reg_t reg;
5034	u32 reset_bits, val;
5035
5036	if (IS_IVYBRIDGE(dev_priv)) {
5037		reg = GEN7_MSG_CTL;
5038		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5039	} else {
5040		reg = HSW_NDE_RSTWRN_OPT;
5041		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5042	}
5043
5044	val = intel_de_read(dev_priv, reg);
5045
5046	if (enable)
5047		val |= reset_bits;
5048	else
5049		val &= ~reset_bits;
5050
5051	intel_de_write(dev_priv, reg, val);
5052}
5053
5054static void skl_display_core_init(struct drm_i915_private *dev_priv,
5055				  bool resume)
5056{
5057	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5058	struct i915_power_well *well;
5059
5060	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5061
5062	/* enable PCH reset handshake */
5063	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5064
5065	/* enable PG1 and Misc I/O */
5066	mutex_lock(&power_domains->lock);
5067
5068	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5069	intel_power_well_enable(dev_priv, well);
5070
5071	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5072	intel_power_well_enable(dev_priv, well);
5073
5074	mutex_unlock(&power_domains->lock);
5075
5076	intel_cdclk_init_hw(dev_priv);
5077
5078	gen9_dbuf_enable(dev_priv);
5079
5080	if (resume && dev_priv->csr.dmc_payload)
5081		intel_csr_load_program(dev_priv);
5082}
5083
5084static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5085{
5086	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5087	struct i915_power_well *well;
5088
5089	gen9_disable_dc_states(dev_priv);
5090
5091	gen9_dbuf_disable(dev_priv);
5092
5093	intel_cdclk_uninit_hw(dev_priv);
5094
5095	/* The spec doesn't call for removing the reset handshake flag */
5096	/* disable PG1 and Misc I/O */
5097
5098	mutex_lock(&power_domains->lock);
5099
5100	/*
5101	 * BSpec says to keep the MISC IO power well enabled here, only
5102	 * remove our request for power well 1.
5103	 * Note that even though the driver's request is removed power well 1
5104	 * may stay enabled after this due to DMC's own request on it.
5105	 */
5106	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5107	intel_power_well_disable(dev_priv, well);
5108
5109	mutex_unlock(&power_domains->lock);
5110
5111	usleep_range(10, 30);		/* 10 us delay per Bspec */
5112}
5113
5114static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5115{
5116	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5117	struct i915_power_well *well;
5118
5119	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5120
5121	/*
5122	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5123	 * or else the reset will hang because there is no PCH to respond.
5124	 * Move the handshake programming to initialization sequence.
5125	 * Previously was left up to BIOS.
5126	 */
5127	intel_pch_reset_handshake(dev_priv, false);
5128
5129	/* Enable PG1 */
5130	mutex_lock(&power_domains->lock);
5131
5132	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5133	intel_power_well_enable(dev_priv, well);
5134
5135	mutex_unlock(&power_domains->lock);
5136
5137	intel_cdclk_init_hw(dev_priv);
5138
5139	gen9_dbuf_enable(dev_priv);
5140
5141	if (resume && dev_priv->csr.dmc_payload)
5142		intel_csr_load_program(dev_priv);
5143}
5144
5145static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5146{
5147	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5148	struct i915_power_well *well;
5149
5150	gen9_disable_dc_states(dev_priv);
5151
5152	gen9_dbuf_disable(dev_priv);
5153
5154	intel_cdclk_uninit_hw(dev_priv);
5155
5156	/* The spec doesn't call for removing the reset handshake flag */
5157
5158	/*
5159	 * Disable PW1 (PG1).
5160	 * Note that even though the driver's request is removed power well 1
5161	 * may stay enabled after this due to DMC's own request on it.
5162	 */
5163	mutex_lock(&power_domains->lock);
5164
5165	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5166	intel_power_well_disable(dev_priv, well);
5167
5168	mutex_unlock(&power_domains->lock);
5169
5170	usleep_range(10, 30);		/* 10 us delay per Bspec */
5171}
5172
5173static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5174{
5175	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5176	struct i915_power_well *well;
5177
5178	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5179
5180	/* 1. Enable PCH Reset Handshake */
5181	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5182
5183	/* 2-3. */
5184	intel_combo_phy_init(dev_priv);
5185
5186	/*
5187	 * 4. Enable Power Well 1 (PG1).
5188	 *    The AUX IO power wells will be enabled on demand.
5189	 */
5190	mutex_lock(&power_domains->lock);
5191	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5192	intel_power_well_enable(dev_priv, well);
5193	mutex_unlock(&power_domains->lock);
5194
5195	/* 5. Enable CD clock */
5196	intel_cdclk_init_hw(dev_priv);
5197
5198	/* 6. Enable DBUF */
5199	gen9_dbuf_enable(dev_priv);
5200
5201	if (resume && dev_priv->csr.dmc_payload)
5202		intel_csr_load_program(dev_priv);
5203}
5204
5205static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5206{
5207	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5208	struct i915_power_well *well;
5209
5210	gen9_disable_dc_states(dev_priv);
5211
5212	/* 1. Disable all display engine functions -> aready done */
5213
5214	/* 2. Disable DBUF */
5215	gen9_dbuf_disable(dev_priv);
5216
5217	/* 3. Disable CD clock */
5218	intel_cdclk_uninit_hw(dev_priv);
5219
5220	/*
5221	 * 4. Disable Power Well 1 (PG1).
5222	 *    The AUX IO power wells are toggled on demand, so they are already
5223	 *    disabled at this point.
5224	 */
5225	mutex_lock(&power_domains->lock);
5226	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5227	intel_power_well_disable(dev_priv, well);
5228	mutex_unlock(&power_domains->lock);
5229
5230	usleep_range(10, 30);		/* 10 us delay per Bspec */
5231
5232	/* 5. */
5233	intel_combo_phy_uninit(dev_priv);
5234}
5235
5236struct buddy_page_mask {
5237	u32 page_mask;
5238	u8 type;
5239	u8 num_channels;
5240};
5241
5242static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5243	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5244	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5245	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5246	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5247	{}
5248};
5249
5250static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5251	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5252	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5253	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5254	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5255	{}
5256};
5257
5258static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5259{
5260	enum intel_dram_type type = dev_priv->dram_info.type;
5261	u8 num_channels = dev_priv->dram_info.num_channels;
5262	const struct buddy_page_mask *table;
5263	unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5264	int config, i;
5265
5266	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5267		/* Wa_1409767108: tgl */
5268		table = wa_1409767108_buddy_page_masks;
5269	else
5270		table = tgl_buddy_page_masks;
5271
5272	for (config = 0; table[config].page_mask != 0; config++)
5273		if (table[config].num_channels == num_channels &&
5274		    table[config].type == type)
5275			break;
5276
5277	if (table[config].page_mask == 0) {
5278		drm_dbg(&dev_priv->drm,
5279			"Unknown memory configuration; disabling address buddy logic.\n");
5280		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5281			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5282				       BW_BUDDY_DISABLE);
5283	} else {
5284		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5285			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5286				       table[config].page_mask);
5287
5288			/* Wa_22010178259:tgl,rkl */
5289			intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5290				     BW_BUDDY_TLB_REQ_TIMER_MASK,
5291				     BW_BUDDY_TLB_REQ_TIMER(0x8));
5292		}
5293	}
5294}
5295
5296static void icl_display_core_init(struct drm_i915_private *dev_priv,
5297				  bool resume)
5298{
5299	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5300	struct i915_power_well *well;
5301	u32 val;
5302
5303	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5304
5305	/* 1. Enable PCH reset handshake. */
5306	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5307
5308	/* 2. Initialize all combo phys */
5309	intel_combo_phy_init(dev_priv);
5310
5311	/*
5312	 * 3. Enable Power Well 1 (PG1).
5313	 *    The AUX IO power wells will be enabled on demand.
5314	 */
5315	mutex_lock(&power_domains->lock);
5316	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5317	intel_power_well_enable(dev_priv, well);
5318	mutex_unlock(&power_domains->lock);
5319
5320	/* 4. Enable CDCLK. */
5321	intel_cdclk_init_hw(dev_priv);
5322
5323	/* 5. Enable DBUF. */
5324	gen9_dbuf_enable(dev_priv);
5325
5326	/* 6. Setup MBUS. */
5327	icl_mbus_init(dev_priv);
5328
5329	/* 7. Program arbiter BW_BUDDY registers */
5330	if (INTEL_GEN(dev_priv) >= 12)
5331		tgl_bw_buddy_init(dev_priv);
5332
5333	if (resume && dev_priv->csr.dmc_payload)
5334		intel_csr_load_program(dev_priv);
5335
5336	/* Wa_14011508470 */
5337	if (IS_GEN(dev_priv, 12)) {
5338		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5339		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5340		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5341	}
5342}
5343
5344static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5345{
5346	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5347	struct i915_power_well *well;
5348
5349	gen9_disable_dc_states(dev_priv);
5350
5351	/* 1. Disable all display engine functions -> aready done */
5352
5353	/* 2. Disable DBUF */
5354	gen9_dbuf_disable(dev_priv);
5355
5356	/* 3. Disable CD clock */
5357	intel_cdclk_uninit_hw(dev_priv);
5358
5359	/*
5360	 * 4. Disable Power Well 1 (PG1).
5361	 *    The AUX IO power wells are toggled on demand, so they are already
5362	 *    disabled at this point.
5363	 */
5364	mutex_lock(&power_domains->lock);
5365	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5366	intel_power_well_disable(dev_priv, well);
5367	mutex_unlock(&power_domains->lock);
5368
5369	/* 5. */
5370	intel_combo_phy_uninit(dev_priv);
5371}
5372
5373static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5374{
5375	struct i915_power_well *cmn_bc =
5376		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5377	struct i915_power_well *cmn_d =
5378		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5379
5380	/*
5381	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5382	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5383	 * instead maintain a shadow copy ourselves. Use the actual
5384	 * power well state and lane status to reconstruct the
5385	 * expected initial value.
5386	 */
5387	dev_priv->chv_phy_control =
5388		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5389		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5390		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5391		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5392		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5393
5394	/*
5395	 * If all lanes are disabled we leave the override disabled
5396	 * with all power down bits cleared to match the state we
5397	 * would use after disabling the port. Otherwise enable the
5398	 * override and set the lane powerdown bits accding to the
5399	 * current lane status.
5400	 */
5401	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5402		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5403		unsigned int mask;
5404
5405		mask = status & DPLL_PORTB_READY_MASK;
5406		if (mask == 0xf)
5407			mask = 0x0;
5408		else
5409			dev_priv->chv_phy_control |=
5410				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5411
5412		dev_priv->chv_phy_control |=
5413			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5414
5415		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5416		if (mask == 0xf)
5417			mask = 0x0;
5418		else
5419			dev_priv->chv_phy_control |=
5420				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5421
5422		dev_priv->chv_phy_control |=
5423			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5424
5425		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5426
5427		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5428	} else {
5429		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5430	}
5431
5432	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5433		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5434		unsigned int mask;
5435
5436		mask = status & DPLL_PORTD_READY_MASK;
5437
5438		if (mask == 0xf)
5439			mask = 0x0;
5440		else
5441			dev_priv->chv_phy_control |=
5442				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5443
5444		dev_priv->chv_phy_control |=
5445			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5446
5447		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5448
5449		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5450	} else {
5451		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5452	}
5453
5454	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5455		    dev_priv->chv_phy_control);
5456
5457	/* Defer application of initial phy_control to enabling the powerwell */
5458}
5459
5460static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5461{
5462	struct i915_power_well *cmn =
5463		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5464	struct i915_power_well *disp2d =
5465		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5466
5467	/* If the display might be already active skip this */
5468	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5469	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5470	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5471		return;
5472
5473	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5474
5475	/* cmnlane needs DPLL registers */
5476	disp2d->desc->ops->enable(dev_priv, disp2d);
5477
5478	/*
5479	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5480	 * Need to assert and de-assert PHY SB reset by gating the
5481	 * common lane power, then un-gating it.
5482	 * Simply ungating isn't enough to reset the PHY enough to get
5483	 * ports and lanes running.
5484	 */
5485	cmn->desc->ops->disable(dev_priv, cmn);
5486}
5487
5488static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5489{
5490	bool ret;
5491
5492	vlv_punit_get(dev_priv);
5493	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5494	vlv_punit_put(dev_priv);
5495
5496	return ret;
5497}
5498
5499static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5500{
5501	drm_WARN(&dev_priv->drm,
5502		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5503		 "VED not power gated\n");
5504}
5505
5506static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5507{
5508	static const struct pci_device_id isp_ids[] = {
5509		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5510		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5511		{}
5512	};
5513
5514	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5515		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5516		 "ISP not power gated\n");
5517}
5518
5519static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5520
5521/**
5522 * intel_power_domains_init_hw - initialize hardware power domain state
5523 * @i915: i915 device instance
5524 * @resume: Called from resume code paths or not
5525 *
5526 * This function initializes the hardware power domain state and enables all
5527 * power wells belonging to the INIT power domain. Power wells in other
5528 * domains (and not in the INIT domain) are referenced or disabled by
5529 * intel_modeset_readout_hw_state(). After that the reference count of each
5530 * power well must match its HW enabled state, see
5531 * intel_power_domains_verify_state().
5532 *
5533 * It will return with power domains disabled (to be enabled later by
5534 * intel_power_domains_enable()) and must be paired with
5535 * intel_power_domains_driver_remove().
5536 */
5537void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5538{
5539	struct i915_power_domains *power_domains = &i915->power_domains;
5540
5541	power_domains->initializing = true;
5542
5543	if (INTEL_GEN(i915) >= 11) {
5544		icl_display_core_init(i915, resume);
5545	} else if (IS_CANNONLAKE(i915)) {
5546		cnl_display_core_init(i915, resume);
5547	} else if (IS_GEN9_BC(i915)) {
5548		skl_display_core_init(i915, resume);
5549	} else if (IS_GEN9_LP(i915)) {
5550		bxt_display_core_init(i915, resume);
5551	} else if (IS_CHERRYVIEW(i915)) {
5552		mutex_lock(&power_domains->lock);
5553		chv_phy_control_init(i915);
5554		mutex_unlock(&power_domains->lock);
5555		assert_isp_power_gated(i915);
5556	} else if (IS_VALLEYVIEW(i915)) {
5557		mutex_lock(&power_domains->lock);
5558		vlv_cmnlane_wa(i915);
5559		mutex_unlock(&power_domains->lock);
5560		assert_ved_power_gated(i915);
5561		assert_isp_power_gated(i915);
5562	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5563		hsw_assert_cdclk(i915);
5564		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5565	} else if (IS_IVYBRIDGE(i915)) {
5566		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5567	}
5568
5569	/*
5570	 * Keep all power wells enabled for any dependent HW access during
5571	 * initialization and to make sure we keep BIOS enabled display HW
5572	 * resources powered until display HW readout is complete. We drop
5573	 * this reference in intel_power_domains_enable().
5574	 */
5575	power_domains->wakeref =
5576		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5577
5578	/* Disable power support if the user asked so. */
5579	if (!i915->params.disable_power_well)
5580		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5581	intel_power_domains_sync_hw(i915);
5582
5583	power_domains->initializing = false;
5584}
5585
5586/**
5587 * intel_power_domains_driver_remove - deinitialize hw power domain state
5588 * @i915: i915 device instance
5589 *
5590 * De-initializes the display power domain HW state. It also ensures that the
5591 * device stays powered up so that the driver can be reloaded.
5592 *
5593 * It must be called with power domains already disabled (after a call to
5594 * intel_power_domains_disable()) and must be paired with
5595 * intel_power_domains_init_hw().
5596 */
5597void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5598{
5599	intel_wakeref_t wakeref __maybe_unused =
5600		fetch_and_zero(&i915->power_domains.wakeref);
5601
5602	/* Remove the refcount we took to keep power well support disabled. */
5603	if (!i915->params.disable_power_well)
5604		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5605
5606	intel_display_power_flush_work_sync(i915);
5607
5608	intel_power_domains_verify_state(i915);
5609
5610	/* Keep the power well enabled, but cancel its rpm wakeref. */
5611	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5612}
5613
5614/**
5615 * intel_power_domains_enable - enable toggling of display power wells
5616 * @i915: i915 device instance
5617 *
5618 * Enable the ondemand enabling/disabling of the display power wells. Note that
5619 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5620 * only at specific points of the display modeset sequence, thus they are not
5621 * affected by the intel_power_domains_enable()/disable() calls. The purpose
5622 * of these function is to keep the rest of power wells enabled until the end
5623 * of display HW readout (which will acquire the power references reflecting
5624 * the current HW state).
5625 */
5626void intel_power_domains_enable(struct drm_i915_private *i915)
5627{
5628	intel_wakeref_t wakeref __maybe_unused =
5629		fetch_and_zero(&i915->power_domains.wakeref);
5630
5631	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5632	intel_power_domains_verify_state(i915);
5633}
5634
5635/**
5636 * intel_power_domains_disable - disable toggling of display power wells
5637 * @i915: i915 device instance
5638 *
5639 * Disable the ondemand enabling/disabling of the display power wells. See
5640 * intel_power_domains_enable() for which power wells this call controls.
5641 */
5642void intel_power_domains_disable(struct drm_i915_private *i915)
5643{
5644	struct i915_power_domains *power_domains = &i915->power_domains;
5645
5646	drm_WARN_ON(&i915->drm, power_domains->wakeref);
5647	power_domains->wakeref =
5648		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5649
5650	intel_power_domains_verify_state(i915);
5651}
5652
5653/**
5654 * intel_power_domains_suspend - suspend power domain state
5655 * @i915: i915 device instance
5656 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5657 *
5658 * This function prepares the hardware power domain state before entering
5659 * system suspend.
5660 *
5661 * It must be called with power domains already disabled (after a call to
5662 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5663 */
5664void intel_power_domains_suspend(struct drm_i915_private *i915,
5665				 enum i915_drm_suspend_mode suspend_mode)
5666{
5667	struct i915_power_domains *power_domains = &i915->power_domains;
5668	intel_wakeref_t wakeref __maybe_unused =
5669		fetch_and_zero(&power_domains->wakeref);
5670
5671	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5672
5673	/*
5674	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5675	 * support don't manually deinit the power domains. This also means the
5676	 * CSR/DMC firmware will stay active, it will power down any HW
5677	 * resources as required and also enable deeper system power states
5678	 * that would be blocked if the firmware was inactive.
5679	 */
5680	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5681	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5682	    i915->csr.dmc_payload) {
5683		intel_display_power_flush_work(i915);
5684		intel_power_domains_verify_state(i915);
5685		return;
5686	}
5687
5688	/*
5689	 * Even if power well support was disabled we still want to disable
5690	 * power wells if power domains must be deinitialized for suspend.
5691	 */
5692	if (!i915->params.disable_power_well)
5693		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5694
5695	intel_display_power_flush_work(i915);
5696	intel_power_domains_verify_state(i915);
5697
5698	if (INTEL_GEN(i915) >= 11)
5699		icl_display_core_uninit(i915);
5700	else if (IS_CANNONLAKE(i915))
5701		cnl_display_core_uninit(i915);
5702	else if (IS_GEN9_BC(i915))
5703		skl_display_core_uninit(i915);
5704	else if (IS_GEN9_LP(i915))
5705		bxt_display_core_uninit(i915);
5706
5707	power_domains->display_core_suspended = true;
5708}
5709
5710/**
5711 * intel_power_domains_resume - resume power domain state
5712 * @i915: i915 device instance
5713 *
5714 * This function resume the hardware power domain state during system resume.
5715 *
5716 * It will return with power domain support disabled (to be enabled later by
5717 * intel_power_domains_enable()) and must be paired with
5718 * intel_power_domains_suspend().
5719 */
5720void intel_power_domains_resume(struct drm_i915_private *i915)
5721{
5722	struct i915_power_domains *power_domains = &i915->power_domains;
5723
5724	if (power_domains->display_core_suspended) {
5725		intel_power_domains_init_hw(i915, true);
5726		power_domains->display_core_suspended = false;
5727	} else {
5728		drm_WARN_ON(&i915->drm, power_domains->wakeref);
5729		power_domains->wakeref =
5730			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5731	}
5732
5733	intel_power_domains_verify_state(i915);
5734}
5735
5736#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5737
5738static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5739{
5740	struct i915_power_domains *power_domains = &i915->power_domains;
5741	struct i915_power_well *power_well;
5742
5743	for_each_power_well(i915, power_well) {
5744		enum intel_display_power_domain domain;
5745
5746		drm_dbg(&i915->drm, "%-25s %d\n",
5747			power_well->desc->name, power_well->count);
5748
5749		for_each_power_domain(domain, power_well->desc->domains)
5750			drm_dbg(&i915->drm, "  %-23s %d\n",
5751				intel_display_power_domain_str(domain),
5752				power_domains->domain_use_count[domain]);
5753	}
5754}
5755
5756/**
5757 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5758 * @i915: i915 device instance
5759 *
5760 * Verify if the reference count of each power well matches its HW enabled
5761 * state and the total refcount of the domains it belongs to. This must be
5762 * called after modeset HW state sanitization, which is responsible for
5763 * acquiring reference counts for any power wells in use and disabling the
5764 * ones left on by BIOS but not required by any active output.
5765 */
5766static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5767{
5768	struct i915_power_domains *power_domains = &i915->power_domains;
5769	struct i915_power_well *power_well;
5770	bool dump_domain_info;
5771
5772	mutex_lock(&power_domains->lock);
5773
5774	verify_async_put_domains_state(power_domains);
5775
5776	dump_domain_info = false;
5777	for_each_power_well(i915, power_well) {
5778		enum intel_display_power_domain domain;
5779		int domains_count;
5780		bool enabled;
5781
5782		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5783		if ((power_well->count || power_well->desc->always_on) !=
5784		    enabled)
5785			drm_err(&i915->drm,
5786				"power well %s state mismatch (refcount %d/enabled %d)",
5787				power_well->desc->name,
5788				power_well->count, enabled);
5789
5790		domains_count = 0;
5791		for_each_power_domain(domain, power_well->desc->domains)
5792			domains_count += power_domains->domain_use_count[domain];
5793
5794		if (power_well->count != domains_count) {
5795			drm_err(&i915->drm,
5796				"power well %s refcount/domain refcount mismatch "
5797				"(refcount %d/domains refcount %d)\n",
5798				power_well->desc->name, power_well->count,
5799				domains_count);
5800			dump_domain_info = true;
5801		}
5802	}
5803
5804	if (dump_domain_info) {
5805		static bool dumped;
5806
5807		if (!dumped) {
5808			intel_power_domains_dump_info(i915);
5809			dumped = true;
5810		}
5811	}
5812
5813	mutex_unlock(&power_domains->lock);
5814}
5815
5816#else
5817
5818static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5819{
5820}
5821
5822#endif
5823
5824void intel_display_power_suspend_late(struct drm_i915_private *i915)
5825{
5826	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5827		bxt_enable_dc9(i915);
5828	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5829		hsw_enable_pc8(i915);
5830}
5831
5832void intel_display_power_resume_early(struct drm_i915_private *i915)
5833{
5834	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5835		gen9_sanitize_dc_state(i915);
5836		bxt_disable_dc9(i915);
5837	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5838		hsw_disable_pc8(i915);
5839	}
5840}
5841
5842void intel_display_power_suspend(struct drm_i915_private *i915)
5843{
5844	if (INTEL_GEN(i915) >= 11) {
5845		icl_display_core_uninit(i915);
5846		bxt_enable_dc9(i915);
5847	} else if (IS_GEN9_LP(i915)) {
5848		bxt_display_core_uninit(i915);
5849		bxt_enable_dc9(i915);
5850	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5851		hsw_enable_pc8(i915);
5852	}
5853}
5854
5855void intel_display_power_resume(struct drm_i915_private *i915)
5856{
5857	if (INTEL_GEN(i915) >= 11) {
5858		bxt_disable_dc9(i915);
5859		icl_display_core_init(i915, true);
5860		if (i915->csr.dmc_payload) {
5861			if (i915->csr.allowed_dc_mask &
5862			    DC_STATE_EN_UPTO_DC6)
5863				skl_enable_dc6(i915);
5864			else if (i915->csr.allowed_dc_mask &
5865				 DC_STATE_EN_UPTO_DC5)
5866				gen9_enable_dc5(i915);
5867		}
5868	} else if (IS_GEN9_LP(i915)) {
5869		bxt_disable_dc9(i915);
5870		bxt_display_core_init(i915, true);
5871		if (i915->csr.dmc_payload &&
5872		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5873			gen9_enable_dc5(i915);
5874	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5875		hsw_disable_pc8(i915);
5876	}
5877}