Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.14.15
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include "display/intel_crt.h"
 
   7
   8#include "i915_drv.h"
   9#include "i915_irq.h"
  10#include "intel_cdclk.h"
  11#include "intel_combo_phy.h"
 
  12#include "intel_display_power.h"
  13#include "intel_de.h"
  14#include "intel_display_types.h"
  15#include "intel_dmc.h"
  16#include "intel_dpio_phy.h"
  17#include "intel_hotplug.h"
  18#include "intel_pm.h"
  19#include "intel_pps.h"
  20#include "intel_sideband.h"
  21#include "intel_tc.h"
  22#include "intel_vga.h"
  23
  24bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  25					 enum i915_power_well_id power_well_id);
  26
  27const char *
  28intel_display_power_domain_str(enum intel_display_power_domain domain)
  29{
  30	switch (domain) {
  31	case POWER_DOMAIN_DISPLAY_CORE:
  32		return "DISPLAY_CORE";
  33	case POWER_DOMAIN_PIPE_A:
  34		return "PIPE_A";
  35	case POWER_DOMAIN_PIPE_B:
  36		return "PIPE_B";
  37	case POWER_DOMAIN_PIPE_C:
  38		return "PIPE_C";
  39	case POWER_DOMAIN_PIPE_D:
  40		return "PIPE_D";
  41	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  42		return "PIPE_A_PANEL_FITTER";
  43	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  44		return "PIPE_B_PANEL_FITTER";
  45	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  46		return "PIPE_C_PANEL_FITTER";
  47	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
  48		return "PIPE_D_PANEL_FITTER";
  49	case POWER_DOMAIN_TRANSCODER_A:
  50		return "TRANSCODER_A";
  51	case POWER_DOMAIN_TRANSCODER_B:
  52		return "TRANSCODER_B";
  53	case POWER_DOMAIN_TRANSCODER_C:
  54		return "TRANSCODER_C";
  55	case POWER_DOMAIN_TRANSCODER_D:
  56		return "TRANSCODER_D";
  57	case POWER_DOMAIN_TRANSCODER_EDP:
  58		return "TRANSCODER_EDP";
  59	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
  60		return "TRANSCODER_VDSC_PW2";
  61	case POWER_DOMAIN_TRANSCODER_DSI_A:
  62		return "TRANSCODER_DSI_A";
  63	case POWER_DOMAIN_TRANSCODER_DSI_C:
  64		return "TRANSCODER_DSI_C";
  65	case POWER_DOMAIN_PORT_DDI_A_LANES:
  66		return "PORT_DDI_A_LANES";
  67	case POWER_DOMAIN_PORT_DDI_B_LANES:
  68		return "PORT_DDI_B_LANES";
  69	case POWER_DOMAIN_PORT_DDI_C_LANES:
  70		return "PORT_DDI_C_LANES";
  71	case POWER_DOMAIN_PORT_DDI_D_LANES:
  72		return "PORT_DDI_D_LANES";
  73	case POWER_DOMAIN_PORT_DDI_E_LANES:
  74		return "PORT_DDI_E_LANES";
  75	case POWER_DOMAIN_PORT_DDI_F_LANES:
  76		return "PORT_DDI_F_LANES";
  77	case POWER_DOMAIN_PORT_DDI_G_LANES:
  78		return "PORT_DDI_G_LANES";
  79	case POWER_DOMAIN_PORT_DDI_H_LANES:
  80		return "PORT_DDI_H_LANES";
  81	case POWER_DOMAIN_PORT_DDI_I_LANES:
  82		return "PORT_DDI_I_LANES";
  83	case POWER_DOMAIN_PORT_DDI_A_IO:
  84		return "PORT_DDI_A_IO";
  85	case POWER_DOMAIN_PORT_DDI_B_IO:
  86		return "PORT_DDI_B_IO";
  87	case POWER_DOMAIN_PORT_DDI_C_IO:
  88		return "PORT_DDI_C_IO";
  89	case POWER_DOMAIN_PORT_DDI_D_IO:
  90		return "PORT_DDI_D_IO";
  91	case POWER_DOMAIN_PORT_DDI_E_IO:
  92		return "PORT_DDI_E_IO";
  93	case POWER_DOMAIN_PORT_DDI_F_IO:
  94		return "PORT_DDI_F_IO";
  95	case POWER_DOMAIN_PORT_DDI_G_IO:
  96		return "PORT_DDI_G_IO";
  97	case POWER_DOMAIN_PORT_DDI_H_IO:
  98		return "PORT_DDI_H_IO";
  99	case POWER_DOMAIN_PORT_DDI_I_IO:
 100		return "PORT_DDI_I_IO";
 101	case POWER_DOMAIN_PORT_DSI:
 102		return "PORT_DSI";
 103	case POWER_DOMAIN_PORT_CRT:
 104		return "PORT_CRT";
 105	case POWER_DOMAIN_PORT_OTHER:
 106		return "PORT_OTHER";
 107	case POWER_DOMAIN_VGA:
 108		return "VGA";
 109	case POWER_DOMAIN_AUDIO:
 110		return "AUDIO";
 111	case POWER_DOMAIN_AUX_A:
 112		return "AUX_A";
 113	case POWER_DOMAIN_AUX_B:
 114		return "AUX_B";
 115	case POWER_DOMAIN_AUX_C:
 116		return "AUX_C";
 117	case POWER_DOMAIN_AUX_D:
 118		return "AUX_D";
 119	case POWER_DOMAIN_AUX_E:
 120		return "AUX_E";
 121	case POWER_DOMAIN_AUX_F:
 122		return "AUX_F";
 123	case POWER_DOMAIN_AUX_G:
 124		return "AUX_G";
 125	case POWER_DOMAIN_AUX_H:
 126		return "AUX_H";
 127	case POWER_DOMAIN_AUX_I:
 128		return "AUX_I";
 129	case POWER_DOMAIN_AUX_IO_A:
 130		return "AUX_IO_A";
 131	case POWER_DOMAIN_AUX_C_TBT:
 132		return "AUX_C_TBT";
 133	case POWER_DOMAIN_AUX_D_TBT:
 134		return "AUX_D_TBT";
 135	case POWER_DOMAIN_AUX_E_TBT:
 136		return "AUX_E_TBT";
 137	case POWER_DOMAIN_AUX_F_TBT:
 138		return "AUX_F_TBT";
 139	case POWER_DOMAIN_AUX_G_TBT:
 140		return "AUX_G_TBT";
 141	case POWER_DOMAIN_AUX_H_TBT:
 142		return "AUX_H_TBT";
 143	case POWER_DOMAIN_AUX_I_TBT:
 144		return "AUX_I_TBT";
 145	case POWER_DOMAIN_GMBUS:
 146		return "GMBUS";
 147	case POWER_DOMAIN_INIT:
 148		return "INIT";
 149	case POWER_DOMAIN_MODESET:
 150		return "MODESET";
 151	case POWER_DOMAIN_GT_IRQ:
 152		return "GT_IRQ";
 153	case POWER_DOMAIN_DPLL_DC_OFF:
 154		return "DPLL_DC_OFF";
 155	case POWER_DOMAIN_TC_COLD_OFF:
 156		return "TC_COLD_OFF";
 157	default:
 158		MISSING_CASE(domain);
 159		return "?";
 160	}
 161}
 162
 163static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 164				    struct i915_power_well *power_well)
 165{
 166	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
 167	power_well->desc->ops->enable(dev_priv, power_well);
 168	power_well->hw_enabled = true;
 169}
 170
 171static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 172				     struct i915_power_well *power_well)
 173{
 174	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
 175	power_well->hw_enabled = false;
 176	power_well->desc->ops->disable(dev_priv, power_well);
 177}
 178
 179static void intel_power_well_get(struct drm_i915_private *dev_priv,
 180				 struct i915_power_well *power_well)
 181{
 182	if (!power_well->count++)
 183		intel_power_well_enable(dev_priv, power_well);
 184}
 185
 186static void intel_power_well_put(struct drm_i915_private *dev_priv,
 187				 struct i915_power_well *power_well)
 188{
 189	drm_WARN(&dev_priv->drm, !power_well->count,
 190		 "Use count on power well %s is already zero",
 191		 power_well->desc->name);
 192
 193	if (!--power_well->count)
 194		intel_power_well_disable(dev_priv, power_well);
 195}
 196
 197/**
 198 * __intel_display_power_is_enabled - unlocked check for a power domain
 199 * @dev_priv: i915 device instance
 200 * @domain: power domain to check
 201 *
 202 * This is the unlocked version of intel_display_power_is_enabled() and should
 203 * only be used from error capture and recovery code where deadlocks are
 204 * possible.
 205 *
 206 * Returns:
 207 * True when the power domain is enabled, false otherwise.
 208 */
 209bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 210				      enum intel_display_power_domain domain)
 211{
 212	struct i915_power_well *power_well;
 213	bool is_enabled;
 214
 215	if (dev_priv->runtime_pm.suspended)
 216		return false;
 217
 218	is_enabled = true;
 219
 220	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
 221		if (power_well->desc->always_on)
 222			continue;
 223
 224		if (!power_well->hw_enabled) {
 225			is_enabled = false;
 226			break;
 227		}
 228	}
 229
 230	return is_enabled;
 231}
 232
 233/**
 234 * intel_display_power_is_enabled - check for a power domain
 235 * @dev_priv: i915 device instance
 236 * @domain: power domain to check
 237 *
 238 * This function can be used to check the hw power domain state. It is mostly
 239 * used in hardware state readout functions. Everywhere else code should rely
 240 * upon explicit power domain reference counting to ensure that the hardware
 241 * block is powered up before accessing it.
 242 *
 243 * Callers must hold the relevant modesetting locks to ensure that concurrent
 244 * threads can't disable the power well while the caller tries to read a few
 245 * registers.
 246 *
 247 * Returns:
 248 * True when the power domain is enabled, false otherwise.
 249 */
 250bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 251				    enum intel_display_power_domain domain)
 252{
 253	struct i915_power_domains *power_domains;
 254	bool ret;
 255
 256	power_domains = &dev_priv->power_domains;
 257
 258	mutex_lock(&power_domains->lock);
 259	ret = __intel_display_power_is_enabled(dev_priv, domain);
 260	mutex_unlock(&power_domains->lock);
 261
 262	return ret;
 263}
 264
 265/*
 266 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 267 * when not needed anymore. We have 4 registers that can request the power well
 268 * to be enabled, and it will only be disabled if none of the registers is
 269 * requesting it to be enabled.
 270 */
 271static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
 272				       u8 irq_pipe_mask, bool has_vga)
 273{
 274	if (has_vga)
 275		intel_vga_reset_io_mem(dev_priv);
 276
 277	if (irq_pipe_mask)
 278		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
 279}
 280
 281static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 282				       u8 irq_pipe_mask)
 283{
 284	if (irq_pipe_mask)
 285		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 286}
 287
 288#define ICL_AUX_PW_TO_CH(pw_idx)	\
 289	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 290
 291#define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
 292	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
 293
 294static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
 
 295{
 296	int pw_idx = power_well->desc->hsw.idx;
 297
 298	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
 299						 ICL_AUX_PW_TO_CH(pw_idx);
 300}
 301
 302static struct intel_digital_port *
 303aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
 304		       enum aux_ch aux_ch)
 305{
 306	struct intel_digital_port *dig_port = NULL;
 307	struct intel_encoder *encoder;
 308
 309	for_each_intel_encoder(&dev_priv->drm, encoder) {
 310		/* We'll check the MST primary port */
 311		if (encoder->type == INTEL_OUTPUT_DP_MST)
 312			continue;
 313
 314		dig_port = enc_to_dig_port(encoder);
 315		if (!dig_port)
 316			continue;
 317
 318		if (dig_port->aux_ch != aux_ch) {
 319			dig_port = NULL;
 320			continue;
 321		}
 322
 323		break;
 324	}
 325
 326	return dig_port;
 327}
 328
 329static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
 330				  const struct i915_power_well *power_well)
 331{
 332	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
 333	struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
 334
 335	return intel_port_to_phy(i915, dig_port->base.port);
 336}
 337
 338static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 339					   struct i915_power_well *power_well,
 340					   bool timeout_expected)
 341{
 342	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 343	int pw_idx = power_well->desc->hsw.idx;
 344
 345	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 346	if (intel_de_wait_for_set(dev_priv, regs->driver,
 347				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
 348		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
 349			    power_well->desc->name);
 350
 351		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
 352
 353	}
 354}
 355
 356static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 357				     const struct i915_power_well_regs *regs,
 358				     int pw_idx)
 359{
 360	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
 361	u32 ret;
 362
 363	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
 364	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
 365	if (regs->kvmr.reg)
 366		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
 367	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
 368
 369	return ret;
 370}
 371
 372static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 373					    struct i915_power_well *power_well)
 374{
 375	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 376	int pw_idx = power_well->desc->hsw.idx;
 377	bool disabled;
 378	u32 reqs;
 379
 380	/*
 381	 * Bspec doesn't require waiting for PWs to get disabled, but still do
 382	 * this for paranoia. The known cases where a PW will be forced on:
 383	 * - a KVMR request on any power well via the KVMR request register
 384	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
 385	 *   DEBUG request registers
 386	 * Skip the wait in case any of the request bits are set and print a
 387	 * diagnostic message.
 388	 */
 389	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
 390			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
 391		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
 392	if (disabled)
 393		return;
 394
 395	drm_dbg_kms(&dev_priv->drm,
 396		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
 397		    power_well->desc->name,
 398		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 399}
 400
 401static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 402					   enum skl_power_gate pg)
 403{
 404	/* Timeout 5us for PG#0, for other PGs 1us */
 405	drm_WARN_ON(&dev_priv->drm,
 406		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
 407					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
 408}
 409
 410static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 411				  struct i915_power_well *power_well)
 412{
 413	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 414	int pw_idx = power_well->desc->hsw.idx;
 415	u32 val;
 416
 417	if (power_well->desc->hsw.has_fuses) {
 418		enum skl_power_gate pg;
 419
 420		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 421						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 422		/*
 423		 * For PW1 we have to wait both for the PW0/PG0 fuse state
 424		 * before enabling the power well and PW1/PG1's own fuse
 425		 * state after the enabling. For all other power wells with
 426		 * fuses we only have to wait for that PW/PG's fuse state
 427		 * after the enabling.
 428		 */
 429		if (pg == SKL_PG1)
 430			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 431	}
 432
 433	val = intel_de_read(dev_priv, regs->driver);
 434	intel_de_write(dev_priv, regs->driver,
 435		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 436
 437	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 438
 439	/* Display WA #1178: cnl */
 440	if (IS_CANNONLAKE(dev_priv) &&
 441	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
 442	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
 443		u32 val;
 444
 445		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
 446		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
 447		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
 448	}
 449
 450	if (power_well->desc->hsw.has_fuses) {
 451		enum skl_power_gate pg;
 452
 453		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 454						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 455		gen9_wait_for_power_well_fuses(dev_priv, pg);
 456	}
 457
 458	hsw_power_well_post_enable(dev_priv,
 459				   power_well->desc->hsw.irq_pipe_mask,
 460				   power_well->desc->hsw.has_vga);
 461}
 462
 463static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 464				   struct i915_power_well *power_well)
 465{
 466	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 467	int pw_idx = power_well->desc->hsw.idx;
 468	u32 val;
 469
 470	hsw_power_well_pre_disable(dev_priv,
 471				   power_well->desc->hsw.irq_pipe_mask);
 472
 473	val = intel_de_read(dev_priv, regs->driver);
 474	intel_de_write(dev_priv, regs->driver,
 475		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 476	hsw_wait_for_power_well_disable(dev_priv, power_well);
 477}
 478
 
 
 479static void
 480icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 481				    struct i915_power_well *power_well)
 482{
 483	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 484	int pw_idx = power_well->desc->hsw.idx;
 485	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 486	u32 val;
 487
 488	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 489
 490	val = intel_de_read(dev_priv, regs->driver);
 491	intel_de_write(dev_priv, regs->driver,
 492		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 493
 494	if (DISPLAY_VER(dev_priv) < 12) {
 495		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 496		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 497			       val | ICL_LANE_ENABLE_AUX);
 498	}
 499
 500	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 501
 502	/* Display WA #1178: icl */
 503	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 504	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
 505		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
 506		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
 507		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
 508	}
 509}
 510
 511static void
 512icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 513				     struct i915_power_well *power_well)
 514{
 515	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 516	int pw_idx = power_well->desc->hsw.idx;
 517	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 518	u32 val;
 519
 520	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 521
 522	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 523	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 524		       val & ~ICL_LANE_ENABLE_AUX);
 525
 526	val = intel_de_read(dev_priv, regs->driver);
 527	intel_de_write(dev_priv, regs->driver,
 528		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 529
 530	hsw_wait_for_power_well_disable(dev_priv, power_well);
 531}
 532
 533#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 534
 535static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
 536
 537static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
 538				      struct i915_power_well *power_well)
 539{
 540	int refs = hweight64(power_well->desc->domains &
 541			     async_put_domains_mask(&dev_priv->power_domains));
 542
 543	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
 544
 545	return refs;
 546}
 547
 548static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 549					struct i915_power_well *power_well,
 550					struct intel_digital_port *dig_port)
 551{
 552	/* Bypass the check if all references are released asynchronously */
 553	if (power_well_async_ref_count(dev_priv, power_well) ==
 554	    power_well->count)
 555		return;
 556
 557	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
 558		return;
 559
 560	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
 561		return;
 562
 563	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
 564}
 565
 566#else
 567
 568static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 569					struct i915_power_well *power_well,
 570					struct intel_digital_port *dig_port)
 571{
 572}
 573
 574#endif
 575
 576#define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
 577
 578static void icl_tc_cold_exit(struct drm_i915_private *i915)
 579{
 580	int ret, tries = 0;
 581
 582	while (1) {
 583		ret = sandybridge_pcode_write_timeout(i915,
 584						      ICL_PCODE_EXIT_TCCOLD,
 585						      0, 250, 1);
 586		if (ret != -EAGAIN || ++tries == 3)
 587			break;
 588		msleep(1);
 589	}
 590
 591	/* Spec states that TC cold exit can take up to 1ms to complete */
 592	if (!ret)
 593		msleep(1);
 594
 595	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
 596	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
 597		    "succeeded");
 598}
 599
 600static void
 601icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 602				 struct i915_power_well *power_well)
 603{
 604	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
 605	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 606	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 607	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 608	bool timeout_expected;
 609	u32 val;
 610
 611	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 612
 613	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
 614	val &= ~DP_AUX_CH_CTL_TBT_IO;
 615	if (is_tbt)
 616		val |= DP_AUX_CH_CTL_TBT_IO;
 617	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
 618
 619	val = intel_de_read(dev_priv, regs->driver);
 620	intel_de_write(dev_priv, regs->driver,
 621		       val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
 622
 623	/*
 624	 * An AUX timeout is expected if the TBT DP tunnel is down,
 625	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
 626	 * exit sequence.
 627	 */
 628	timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
 629	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
 630		icl_tc_cold_exit(dev_priv);
 
 
 631
 632	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
 633
 634	if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
 635		enum tc_port tc_port;
 636
 637		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
 638		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
 639			       HIP_INDEX_VAL(tc_port, 0x2));
 640
 641		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
 642					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
 643			drm_warn(&dev_priv->drm,
 644				 "Timeout waiting TC uC health\n");
 645	}
 646}
 647
 648static void
 649icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 650				  struct i915_power_well *power_well)
 651{
 652	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
 653	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 654
 655	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 656
 657	hsw_power_well_disable(dev_priv, power_well);
 658}
 659
 660static void
 661icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
 662			  struct i915_power_well *power_well)
 663{
 664	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 
 
 665
 666	if (intel_phy_is_tc(dev_priv, phy))
 667		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
 668	else if (IS_ICELAKE(dev_priv))
 669		return icl_combo_phy_aux_power_well_enable(dev_priv,
 670							   power_well);
 671	else
 672		return hsw_power_well_enable(dev_priv, power_well);
 673}
 674
 675static void
 676icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
 677			   struct i915_power_well *power_well)
 678{
 679	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 
 
 680
 681	if (intel_phy_is_tc(dev_priv, phy))
 682		return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
 683	else if (IS_ICELAKE(dev_priv))
 684		return icl_combo_phy_aux_power_well_disable(dev_priv,
 685							    power_well);
 686	else
 687		return hsw_power_well_disable(dev_priv, power_well);
 688}
 689
 690/*
 691 * We should only use the power well if we explicitly asked the hardware to
 692 * enable it, so check if it's enabled and also check if we've requested it to
 693 * be enabled.
 694 */
 695static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 696				   struct i915_power_well *power_well)
 697{
 698	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 699	enum i915_power_well_id id = power_well->desc->id;
 700	int pw_idx = power_well->desc->hsw.idx;
 701	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
 702		   HSW_PWR_WELL_CTL_STATE(pw_idx);
 703	u32 val;
 704
 705	val = intel_de_read(dev_priv, regs->driver);
 706
 707	/*
 708	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
 709	 * and the MISC_IO PW will be not restored, so check instead for the
 710	 * BIOS's own request bits, which are forced-on for these power wells
 711	 * when exiting DC5/6.
 712	 */
 713	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
 714	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
 715		val |= intel_de_read(dev_priv, regs->bios);
 716
 717	return (val & mask) == mask;
 718}
 719
 720static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 721{
 722	drm_WARN_ONCE(&dev_priv->drm,
 723		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
 724		      "DC9 already programmed to be enabled.\n");
 725	drm_WARN_ONCE(&dev_priv->drm,
 726		      intel_de_read(dev_priv, DC_STATE_EN) &
 727		      DC_STATE_EN_UPTO_DC5,
 728		      "DC5 still not disabled to enable DC9.\n");
 729	drm_WARN_ONCE(&dev_priv->drm,
 730		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
 731		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
 732		      "Power well 2 on.\n");
 733	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 734		      "Interrupts not disabled yet.\n");
 735
 736	 /*
 737	  * TODO: check for the following to verify the conditions to enter DC9
 738	  * state are satisfied:
 739	  * 1] Check relevant display engine registers to verify if mode set
 740	  * disable sequence was followed.
 741	  * 2] Check if display uninitialize sequence is initialized.
 742	  */
 743}
 744
 745static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 746{
 747	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 748		      "Interrupts not disabled yet.\n");
 749	drm_WARN_ONCE(&dev_priv->drm,
 750		      intel_de_read(dev_priv, DC_STATE_EN) &
 751		      DC_STATE_EN_UPTO_DC5,
 752		      "DC5 still not disabled.\n");
 753
 754	 /*
 755	  * TODO: check for the following to verify DC9 state was indeed
 756	  * entered before programming to disable it:
 757	  * 1] Check relevant display engine registers to verify if mode
 758	  *  set disable sequence was followed.
 759	  * 2] Check if display uninitialize sequence is initialized.
 760	  */
 761}
 762
 763static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 764				u32 state)
 765{
 766	int rewrites = 0;
 767	int rereads = 0;
 768	u32 v;
 769
 770	intel_de_write(dev_priv, DC_STATE_EN, state);
 771
 772	/* It has been observed that disabling the dc6 state sometimes
 773	 * doesn't stick and dmc keeps returning old value. Make sure
 774	 * the write really sticks enough times and also force rewrite until
 775	 * we are confident that state is exactly what we want.
 776	 */
 777	do  {
 778		v = intel_de_read(dev_priv, DC_STATE_EN);
 779
 780		if (v != state) {
 781			intel_de_write(dev_priv, DC_STATE_EN, state);
 782			rewrites++;
 783			rereads = 0;
 784		} else if (rereads++ > 5) {
 785			break;
 786		}
 787
 788	} while (rewrites < 100);
 789
 790	if (v != state)
 791		drm_err(&dev_priv->drm,
 792			"Writing dc state to 0x%x failed, now 0x%x\n",
 793			state, v);
 794
 795	/* Most of the times we need one retry, avoid spam */
 796	if (rewrites > 1)
 797		drm_dbg_kms(&dev_priv->drm,
 798			    "Rewrote dc state to 0x%x %d times\n",
 799			    state, rewrites);
 800}
 801
 802static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 803{
 804	u32 mask;
 805
 806	mask = DC_STATE_EN_UPTO_DC5;
 807
 808	if (DISPLAY_VER(dev_priv) >= 12)
 809		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
 810					  | DC_STATE_EN_DC9;
 811	else if (DISPLAY_VER(dev_priv) == 11)
 812		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
 813	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 814		mask |= DC_STATE_EN_DC9;
 815	else
 816		mask |= DC_STATE_EN_UPTO_DC6;
 817
 818	return mask;
 819}
 820
 821static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 822{
 823	u32 val;
 824
 825	if (!HAS_DISPLAY(dev_priv))
 826		return;
 827
 828	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
 829
 830	drm_dbg_kms(&dev_priv->drm,
 831		    "Resetting DC state tracking from %02x to %02x\n",
 832		    dev_priv->dmc.dc_state, val);
 833	dev_priv->dmc.dc_state = val;
 834}
 835
 836/**
 837 * gen9_set_dc_state - set target display C power state
 838 * @dev_priv: i915 device instance
 839 * @state: target DC power state
 840 * - DC_STATE_DISABLE
 841 * - DC_STATE_EN_UPTO_DC5
 842 * - DC_STATE_EN_UPTO_DC6
 843 * - DC_STATE_EN_DC9
 844 *
 845 * Signal to DMC firmware/HW the target DC power state passed in @state.
 846 * DMC/HW can turn off individual display clocks and power rails when entering
 847 * a deeper DC power state (higher in number) and turns these back when exiting
 848 * that state to a shallower power state (lower in number). The HW will decide
 849 * when to actually enter a given state on an on-demand basis, for instance
 850 * depending on the active state of display pipes. The state of display
 851 * registers backed by affected power rails are saved/restored as needed.
 852 *
 853 * Based on the above enabling a deeper DC power state is asynchronous wrt.
 854 * enabling it. Disabling a deeper power state is synchronous: for instance
 855 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
 856 * back on and register state is restored. This is guaranteed by the MMIO write
 857 * to DC_STATE_EN blocking until the state is restored.
 858 */
 859static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 860{
 861	u32 val;
 862	u32 mask;
 863
 864	if (!HAS_DISPLAY(dev_priv))
 865		return;
 866
 867	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 868			     state & ~dev_priv->dmc.allowed_dc_mask))
 869		state &= dev_priv->dmc.allowed_dc_mask;
 870
 871	val = intel_de_read(dev_priv, DC_STATE_EN);
 872	mask = gen9_dc_mask(dev_priv);
 873	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
 874		    val & mask, state);
 875
 876	/* Check if DMC is ignoring our DC state requests */
 877	if ((val & mask) != dev_priv->dmc.dc_state)
 878		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
 879			dev_priv->dmc.dc_state, val & mask);
 880
 881	val &= ~mask;
 882	val |= state;
 883
 884	gen9_write_dc_state(dev_priv, val);
 885
 886	dev_priv->dmc.dc_state = val & mask;
 887}
 888
 889static u32
 890sanitize_target_dc_state(struct drm_i915_private *dev_priv,
 891			 u32 target_dc_state)
 892{
 893	u32 states[] = {
 894		DC_STATE_EN_UPTO_DC6,
 895		DC_STATE_EN_UPTO_DC5,
 896		DC_STATE_EN_DC3CO,
 897		DC_STATE_DISABLE,
 898	};
 899	int i;
 900
 901	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
 902		if (target_dc_state != states[i])
 903			continue;
 904
 905		if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
 906			break;
 907
 908		target_dc_state = states[i + 1];
 909	}
 910
 911	return target_dc_state;
 912}
 913
 914static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
 915{
 916	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
 917	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 918}
 919
 920static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
 921{
 922	u32 val;
 923
 924	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
 925	val = intel_de_read(dev_priv, DC_STATE_EN);
 926	val &= ~DC_STATE_DC3CO_STATUS;
 927	intel_de_write(dev_priv, DC_STATE_EN, val);
 928	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 929	/*
 930	 * Delay of 200us DC3CO Exit time B.Spec 49196
 931	 */
 932	usleep_range(200, 210);
 933}
 934
 935static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 936{
 937	assert_can_enable_dc9(dev_priv);
 938
 939	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
 940	/*
 941	 * Power sequencer reset is not needed on
 942	 * platforms with South Display Engine on PCH,
 943	 * because PPS registers are always on.
 944	 */
 945	if (!HAS_PCH_SPLIT(dev_priv))
 946		intel_pps_reset_all(dev_priv);
 947	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 948}
 949
 950static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 951{
 952	assert_can_disable_dc9(dev_priv);
 953
 954	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
 955
 956	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 957
 958	intel_pps_unlock_regs_wa(dev_priv);
 959}
 960
 961static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
 962{
 963	drm_WARN_ONCE(&dev_priv->drm,
 964		      !intel_de_read(dev_priv, DMC_PROGRAM(0)),
 965		      "DMC program storage start is NULL\n");
 966	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
 967		      "DMC SSP Base Not fine\n");
 968	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
 969		      "DMC HTP Not fine\n");
 970}
 971
 972static struct i915_power_well *
 973lookup_power_well(struct drm_i915_private *dev_priv,
 974		  enum i915_power_well_id power_well_id)
 975{
 976	struct i915_power_well *power_well;
 977
 978	for_each_power_well(dev_priv, power_well)
 979		if (power_well->desc->id == power_well_id)
 980			return power_well;
 981
 982	/*
 983	 * It's not feasible to add error checking code to the callers since
 984	 * this condition really shouldn't happen and it doesn't even make sense
 985	 * to abort things like display initialization sequences. Just return
 986	 * the first power well and hope the WARN gets reported so we can fix
 987	 * our driver.
 988	 */
 989	drm_WARN(&dev_priv->drm, 1,
 990		 "Power well %d not defined for this platform\n",
 991		 power_well_id);
 992	return &dev_priv->power_domains.power_wells[0];
 993}
 994
 995/**
 996 * intel_display_power_set_target_dc_state - Set target dc state.
 997 * @dev_priv: i915 device
 998 * @state: state which needs to be set as target_dc_state.
 999 *
1000 * This function set the "DC off" power well target_dc_state,
1001 * based upon this target_dc_stste, "DC off" power well will
1002 * enable desired DC state.
1003 */
1004void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1005					     u32 state)
1006{
1007	struct i915_power_well *power_well;
1008	bool dc_off_enabled;
1009	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1010
1011	mutex_lock(&power_domains->lock);
1012	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1013
1014	if (drm_WARN_ON(&dev_priv->drm, !power_well))
1015		goto unlock;
1016
1017	state = sanitize_target_dc_state(dev_priv, state);
1018
1019	if (state == dev_priv->dmc.target_dc_state)
1020		goto unlock;
1021
1022	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1023							   power_well);
1024	/*
1025	 * If DC off power well is disabled, need to enable and disable the
1026	 * DC off power well to effect target DC state.
1027	 */
1028	if (!dc_off_enabled)
1029		power_well->desc->ops->enable(dev_priv, power_well);
1030
1031	dev_priv->dmc.target_dc_state = state;
1032
1033	if (!dc_off_enabled)
1034		power_well->desc->ops->disable(dev_priv, power_well);
1035
1036unlock:
1037	mutex_unlock(&power_domains->lock);
1038}
1039
1040static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1041{
1042	enum i915_power_well_id high_pg;
1043
1044	/* Power wells at this level and above must be disabled for DC5 entry */
1045	if (DISPLAY_VER(dev_priv) == 12)
1046		high_pg = ICL_DISP_PW_3;
1047	else
1048		high_pg = SKL_DISP_PW_2;
1049
1050	drm_WARN_ONCE(&dev_priv->drm,
1051		      intel_display_power_well_is_enabled(dev_priv, high_pg),
1052		      "Power wells above platform's DC5 limit still enabled.\n");
1053
1054	drm_WARN_ONCE(&dev_priv->drm,
1055		      (intel_de_read(dev_priv, DC_STATE_EN) &
1056		       DC_STATE_EN_UPTO_DC5),
1057		      "DC5 already programmed to be enabled.\n");
1058	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1059
1060	assert_dmc_loaded(dev_priv);
1061}
1062
1063static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1064{
1065	assert_can_enable_dc5(dev_priv);
1066
1067	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1068
1069	/* Wa Display #1183: skl,kbl,cfl */
1070	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1071		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1072			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1073
1074	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1075}
1076
1077static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1078{
1079	drm_WARN_ONCE(&dev_priv->drm,
1080		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1081		      "Backlight is not disabled.\n");
1082	drm_WARN_ONCE(&dev_priv->drm,
1083		      (intel_de_read(dev_priv, DC_STATE_EN) &
1084		       DC_STATE_EN_UPTO_DC6),
1085		      "DC6 already programmed to be enabled.\n");
1086
1087	assert_dmc_loaded(dev_priv);
1088}
1089
1090static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1091{
1092	assert_can_enable_dc6(dev_priv);
1093
1094	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1095
1096	/* Wa Display #1183: skl,kbl,cfl */
1097	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1098		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1099			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1100
1101	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1102}
1103
1104static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1105				   struct i915_power_well *power_well)
1106{
1107	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1108	int pw_idx = power_well->desc->hsw.idx;
1109	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1110	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1111
1112	/* Take over the request bit if set by BIOS. */
1113	if (bios_req & mask) {
1114		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1115
1116		if (!(drv_req & mask))
1117			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1118		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1119	}
1120}
1121
1122static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1123					   struct i915_power_well *power_well)
1124{
1125	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1126}
1127
1128static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1129					    struct i915_power_well *power_well)
1130{
1131	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1132}
1133
1134static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1135					    struct i915_power_well *power_well)
1136{
1137	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1138}
1139
1140static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1141{
1142	struct i915_power_well *power_well;
1143
1144	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1145	if (power_well->count > 0)
1146		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1147
1148	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1149	if (power_well->count > 0)
1150		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1151
1152	if (IS_GEMINILAKE(dev_priv)) {
1153		power_well = lookup_power_well(dev_priv,
1154					       GLK_DISP_PW_DPIO_CMN_C);
1155		if (power_well->count > 0)
1156			bxt_ddi_phy_verify_state(dev_priv,
1157						 power_well->desc->bxt.phy);
1158	}
1159}
1160
1161static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1162					   struct i915_power_well *power_well)
1163{
1164	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1165		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1166}
1167
1168static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1169{
1170	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1171	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1172
1173	drm_WARN(&dev_priv->drm,
1174		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1175		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1176		 hw_enabled_dbuf_slices,
1177		 enabled_dbuf_slices);
1178}
1179
1180static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1181{
1182	struct intel_cdclk_config cdclk_config = {};
1183
1184	if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
1185		tgl_disable_dc3co(dev_priv);
1186		return;
1187	}
1188
1189	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1190
1191	if (!HAS_DISPLAY(dev_priv))
1192		return;
1193
1194	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1195	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1196	drm_WARN_ON(&dev_priv->drm,
1197		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1198					      &cdclk_config));
1199
1200	gen9_assert_dbuf_enabled(dev_priv);
1201
1202	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1203		bxt_verify_ddi_phy_power_wells(dev_priv);
1204
1205	if (DISPLAY_VER(dev_priv) >= 11)
1206		/*
1207		 * DMC retains HW context only for port A, the other combo
1208		 * PHY's HW context for port B is lost after DC transitions,
1209		 * so we need to restore it manually.
1210		 */
1211		intel_combo_phy_init(dev_priv);
1212}
1213
1214static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1215					  struct i915_power_well *power_well)
1216{
1217	gen9_disable_dc_states(dev_priv);
1218}
1219
1220static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1221					   struct i915_power_well *power_well)
1222{
1223	if (!intel_dmc_has_payload(dev_priv))
1224		return;
1225
1226	switch (dev_priv->dmc.target_dc_state) {
1227	case DC_STATE_EN_DC3CO:
1228		tgl_enable_dc3co(dev_priv);
1229		break;
1230	case DC_STATE_EN_UPTO_DC6:
1231		skl_enable_dc6(dev_priv);
1232		break;
1233	case DC_STATE_EN_UPTO_DC5:
1234		gen9_enable_dc5(dev_priv);
1235		break;
1236	}
1237}
1238
1239static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1240					 struct i915_power_well *power_well)
1241{
1242}
1243
1244static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1245					   struct i915_power_well *power_well)
1246{
1247}
1248
1249static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1250					     struct i915_power_well *power_well)
1251{
1252	return true;
1253}
1254
1255static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1256					 struct i915_power_well *power_well)
1257{
1258	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1259		i830_enable_pipe(dev_priv, PIPE_A);
1260	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1261		i830_enable_pipe(dev_priv, PIPE_B);
1262}
1263
1264static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1265					  struct i915_power_well *power_well)
1266{
1267	i830_disable_pipe(dev_priv, PIPE_B);
1268	i830_disable_pipe(dev_priv, PIPE_A);
1269}
1270
1271static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1272					  struct i915_power_well *power_well)
1273{
1274	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1275		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1276}
1277
1278static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1279					  struct i915_power_well *power_well)
1280{
1281	if (power_well->count > 0)
1282		i830_pipes_power_well_enable(dev_priv, power_well);
1283	else
1284		i830_pipes_power_well_disable(dev_priv, power_well);
1285}
1286
1287static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1288			       struct i915_power_well *power_well, bool enable)
1289{
1290	int pw_idx = power_well->desc->vlv.idx;
1291	u32 mask;
1292	u32 state;
1293	u32 ctrl;
1294
1295	mask = PUNIT_PWRGT_MASK(pw_idx);
1296	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1297			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1298
1299	vlv_punit_get(dev_priv);
1300
1301#define COND \
1302	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1303
1304	if (COND)
1305		goto out;
1306
1307	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1308	ctrl &= ~mask;
1309	ctrl |= state;
1310	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1311
1312	if (wait_for(COND, 100))
1313		drm_err(&dev_priv->drm,
1314			"timeout setting power well state %08x (%08x)\n",
1315			state,
1316			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1317
1318#undef COND
1319
1320out:
1321	vlv_punit_put(dev_priv);
1322}
1323
1324static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1325				  struct i915_power_well *power_well)
1326{
1327	vlv_set_power_well(dev_priv, power_well, true);
1328}
1329
1330static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1331				   struct i915_power_well *power_well)
1332{
1333	vlv_set_power_well(dev_priv, power_well, false);
1334}
1335
1336static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1337				   struct i915_power_well *power_well)
1338{
1339	int pw_idx = power_well->desc->vlv.idx;
1340	bool enabled = false;
1341	u32 mask;
1342	u32 state;
1343	u32 ctrl;
1344
1345	mask = PUNIT_PWRGT_MASK(pw_idx);
1346	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1347
1348	vlv_punit_get(dev_priv);
1349
1350	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1351	/*
1352	 * We only ever set the power-on and power-gate states, anything
1353	 * else is unexpected.
1354	 */
1355	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1356		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1357	if (state == ctrl)
1358		enabled = true;
1359
1360	/*
1361	 * A transient state at this point would mean some unexpected party
1362	 * is poking at the power controls too.
1363	 */
1364	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1365	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1366
1367	vlv_punit_put(dev_priv);
1368
1369	return enabled;
1370}
1371
1372static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1373{
1374	u32 val;
1375
1376	/*
1377	 * On driver load, a pipe may be active and driving a DSI display.
1378	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1379	 * (and never recovering) in this case. intel_dsi_post_disable() will
1380	 * clear it when we turn off the display.
1381	 */
1382	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1383	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1384	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1385	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1386
1387	/*
1388	 * Disable trickle feed and enable pnd deadline calculation
1389	 */
1390	intel_de_write(dev_priv, MI_ARB_VLV,
1391		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1392	intel_de_write(dev_priv, CBR1_VLV, 0);
1393
1394	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1395	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1396		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1397					 1000));
1398}
1399
1400static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1401{
1402	struct intel_encoder *encoder;
1403	enum pipe pipe;
1404
1405	/*
1406	 * Enable the CRI clock source so we can get at the
1407	 * display and the reference clock for VGA
1408	 * hotplug / manual detection. Supposedly DSI also
1409	 * needs the ref clock up and running.
1410	 *
1411	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1412	 */
1413	for_each_pipe(dev_priv, pipe) {
1414		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1415
1416		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1417		if (pipe != PIPE_A)
1418			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1419
1420		intel_de_write(dev_priv, DPLL(pipe), val);
1421	}
1422
1423	vlv_init_display_clock_gating(dev_priv);
1424
1425	spin_lock_irq(&dev_priv->irq_lock);
1426	valleyview_enable_display_irqs(dev_priv);
1427	spin_unlock_irq(&dev_priv->irq_lock);
1428
1429	/*
1430	 * During driver initialization/resume we can avoid restoring the
1431	 * part of the HW/SW state that will be inited anyway explicitly.
1432	 */
1433	if (dev_priv->power_domains.initializing)
1434		return;
1435
1436	intel_hpd_init(dev_priv);
1437	intel_hpd_poll_disable(dev_priv);
1438
1439	/* Re-enable the ADPA, if we have one */
1440	for_each_intel_encoder(&dev_priv->drm, encoder) {
1441		if (encoder->type == INTEL_OUTPUT_ANALOG)
1442			intel_crt_reset(&encoder->base);
1443	}
1444
1445	intel_vga_redisable_power_on(dev_priv);
1446
1447	intel_pps_unlock_regs_wa(dev_priv);
1448}
1449
1450static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1451{
1452	spin_lock_irq(&dev_priv->irq_lock);
1453	valleyview_disable_display_irqs(dev_priv);
1454	spin_unlock_irq(&dev_priv->irq_lock);
1455
1456	/* make sure we're done processing display irqs */
1457	intel_synchronize_irq(dev_priv);
1458
1459	intel_pps_reset_all(dev_priv);
1460
1461	/* Prevent us from re-enabling polling on accident in late suspend */
1462	if (!dev_priv->drm.dev->power.is_suspended)
1463		intel_hpd_poll_enable(dev_priv);
1464}
1465
1466static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1467					  struct i915_power_well *power_well)
1468{
1469	vlv_set_power_well(dev_priv, power_well, true);
1470
1471	vlv_display_power_well_init(dev_priv);
1472}
1473
1474static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1475					   struct i915_power_well *power_well)
1476{
1477	vlv_display_power_well_deinit(dev_priv);
1478
1479	vlv_set_power_well(dev_priv, power_well, false);
1480}
1481
1482static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1483					   struct i915_power_well *power_well)
1484{
1485	/* since ref/cri clock was enabled */
1486	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1487
1488	vlv_set_power_well(dev_priv, power_well, true);
1489
1490	/*
1491	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1492	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1493	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1494	 *   b.	The other bits such as sfr settings / modesel may all
1495	 *	be set to 0.
1496	 *
1497	 * This should only be done on init and resume from S3 with
1498	 * both PLLs disabled, or we risk losing DPIO and PLL
1499	 * synchronization.
1500	 */
1501	intel_de_write(dev_priv, DPIO_CTL,
1502		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1503}
1504
1505static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1506					    struct i915_power_well *power_well)
1507{
1508	enum pipe pipe;
1509
1510	for_each_pipe(dev_priv, pipe)
1511		assert_pll_disabled(dev_priv, pipe);
1512
1513	/* Assert common reset */
1514	intel_de_write(dev_priv, DPIO_CTL,
1515		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1516
1517	vlv_set_power_well(dev_priv, power_well, false);
1518}
1519
1520#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1521
1522#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1523
1524static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1525{
1526	struct i915_power_well *cmn_bc =
1527		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1528	struct i915_power_well *cmn_d =
1529		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1530	u32 phy_control = dev_priv->chv_phy_control;
1531	u32 phy_status = 0;
1532	u32 phy_status_mask = 0xffffffff;
1533
1534	/*
1535	 * The BIOS can leave the PHY is some weird state
1536	 * where it doesn't fully power down some parts.
1537	 * Disable the asserts until the PHY has been fully
1538	 * reset (ie. the power well has been disabled at
1539	 * least once).
1540	 */
1541	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1542		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1543				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1544				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1545				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1546				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1547				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1548
1549	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1550		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1551				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1552				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1553
1554	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1555		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1556
1557		/* this assumes override is only used to enable lanes */
1558		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1559			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1560
1561		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1562			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1563
1564		/* CL1 is on whenever anything is on in either channel */
1565		if (BITS_SET(phy_control,
1566			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1567			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1568			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1569
1570		/*
1571		 * The DPLLB check accounts for the pipe B + port A usage
1572		 * with CL2 powered up but all the lanes in the second channel
1573		 * powered down.
1574		 */
1575		if (BITS_SET(phy_control,
1576			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1577		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1578			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1579
1580		if (BITS_SET(phy_control,
1581			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1582			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1583		if (BITS_SET(phy_control,
1584			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1585			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1586
1587		if (BITS_SET(phy_control,
1588			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1589			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1590		if (BITS_SET(phy_control,
1591			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1592			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1593	}
1594
1595	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1596		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1597
1598		/* this assumes override is only used to enable lanes */
1599		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1600			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1601
1602		if (BITS_SET(phy_control,
1603			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1604			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1605
1606		if (BITS_SET(phy_control,
1607			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1608			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1609		if (BITS_SET(phy_control,
1610			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1611			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1612	}
1613
1614	phy_status &= phy_status_mask;
1615
1616	/*
1617	 * The PHY may be busy with some initial calibration and whatnot,
1618	 * so the power state can take a while to actually change.
1619	 */
1620	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1621				       phy_status_mask, phy_status, 10))
1622		drm_err(&dev_priv->drm,
1623			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1624			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1625			phy_status, dev_priv->chv_phy_control);
1626}
1627
1628#undef BITS_SET
1629
1630static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1631					   struct i915_power_well *power_well)
1632{
1633	enum dpio_phy phy;
1634	enum pipe pipe;
1635	u32 tmp;
1636
1637	drm_WARN_ON_ONCE(&dev_priv->drm,
1638			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1639			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1640
1641	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1642		pipe = PIPE_A;
1643		phy = DPIO_PHY0;
1644	} else {
1645		pipe = PIPE_C;
1646		phy = DPIO_PHY1;
1647	}
1648
1649	/* since ref/cri clock was enabled */
1650	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1651	vlv_set_power_well(dev_priv, power_well, true);
1652
1653	/* Poll for phypwrgood signal */
1654	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1655				  PHY_POWERGOOD(phy), 1))
1656		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1657			phy);
1658
1659	vlv_dpio_get(dev_priv);
1660
1661	/* Enable dynamic power down */
1662	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1663	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1664		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1665	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1666
1667	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1668		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1669		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1670		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1671	} else {
1672		/*
1673		 * Force the non-existing CL2 off. BXT does this
1674		 * too, so maybe it saves some power even though
1675		 * CL2 doesn't exist?
1676		 */
1677		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1678		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1679		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1680	}
1681
1682	vlv_dpio_put(dev_priv);
1683
1684	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1685	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1686		       dev_priv->chv_phy_control);
1687
1688	drm_dbg_kms(&dev_priv->drm,
1689		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1690		    phy, dev_priv->chv_phy_control);
1691
1692	assert_chv_phy_status(dev_priv);
1693}
1694
1695static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1696					    struct i915_power_well *power_well)
1697{
1698	enum dpio_phy phy;
1699
1700	drm_WARN_ON_ONCE(&dev_priv->drm,
1701			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1702			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1703
1704	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1705		phy = DPIO_PHY0;
1706		assert_pll_disabled(dev_priv, PIPE_A);
1707		assert_pll_disabled(dev_priv, PIPE_B);
1708	} else {
1709		phy = DPIO_PHY1;
1710		assert_pll_disabled(dev_priv, PIPE_C);
1711	}
1712
1713	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1714	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1715		       dev_priv->chv_phy_control);
1716
1717	vlv_set_power_well(dev_priv, power_well, false);
1718
1719	drm_dbg_kms(&dev_priv->drm,
1720		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1721		    phy, dev_priv->chv_phy_control);
1722
1723	/* PHY is fully reset now, so we can enable the PHY state asserts */
1724	dev_priv->chv_phy_assert[phy] = true;
1725
1726	assert_chv_phy_status(dev_priv);
1727}
1728
1729static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1730				     enum dpio_channel ch, bool override, unsigned int mask)
1731{
1732	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1733	u32 reg, val, expected, actual;
1734
1735	/*
1736	 * The BIOS can leave the PHY is some weird state
1737	 * where it doesn't fully power down some parts.
1738	 * Disable the asserts until the PHY has been fully
1739	 * reset (ie. the power well has been disabled at
1740	 * least once).
1741	 */
1742	if (!dev_priv->chv_phy_assert[phy])
1743		return;
1744
1745	if (ch == DPIO_CH0)
1746		reg = _CHV_CMN_DW0_CH0;
1747	else
1748		reg = _CHV_CMN_DW6_CH1;
1749
1750	vlv_dpio_get(dev_priv);
1751	val = vlv_dpio_read(dev_priv, pipe, reg);
1752	vlv_dpio_put(dev_priv);
1753
1754	/*
1755	 * This assumes !override is only used when the port is disabled.
1756	 * All lanes should power down even without the override when
1757	 * the port is disabled.
1758	 */
1759	if (!override || mask == 0xf) {
1760		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1761		/*
1762		 * If CH1 common lane is not active anymore
1763		 * (eg. for pipe B DPLL) the entire channel will
1764		 * shut down, which causes the common lane registers
1765		 * to read as 0. That means we can't actually check
1766		 * the lane power down status bits, but as the entire
1767		 * register reads as 0 it's a good indication that the
1768		 * channel is indeed entirely powered down.
1769		 */
1770		if (ch == DPIO_CH1 && val == 0)
1771			expected = 0;
1772	} else if (mask != 0x0) {
1773		expected = DPIO_ANYDL_POWERDOWN;
1774	} else {
1775		expected = 0;
1776	}
1777
1778	if (ch == DPIO_CH0)
1779		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1780	else
1781		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1782	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1783
1784	drm_WARN(&dev_priv->drm, actual != expected,
1785		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1786		 !!(actual & DPIO_ALLDL_POWERDOWN),
1787		 !!(actual & DPIO_ANYDL_POWERDOWN),
1788		 !!(expected & DPIO_ALLDL_POWERDOWN),
1789		 !!(expected & DPIO_ANYDL_POWERDOWN),
1790		 reg, val);
1791}
1792
1793bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1794			  enum dpio_channel ch, bool override)
1795{
1796	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1797	bool was_override;
1798
1799	mutex_lock(&power_domains->lock);
1800
1801	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1802
1803	if (override == was_override)
1804		goto out;
1805
1806	if (override)
1807		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1808	else
1809		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1810
1811	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1812		       dev_priv->chv_phy_control);
1813
1814	drm_dbg_kms(&dev_priv->drm,
1815		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1816		    phy, ch, dev_priv->chv_phy_control);
1817
1818	assert_chv_phy_status(dev_priv);
1819
1820out:
1821	mutex_unlock(&power_domains->lock);
1822
1823	return was_override;
1824}
1825
1826void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1827			     bool override, unsigned int mask)
1828{
1829	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1830	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1831	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1832	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1833
1834	mutex_lock(&power_domains->lock);
1835
1836	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1837	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1838
1839	if (override)
1840		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1841	else
1842		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1843
1844	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1845		       dev_priv->chv_phy_control);
1846
1847	drm_dbg_kms(&dev_priv->drm,
1848		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1849		    phy, ch, mask, dev_priv->chv_phy_control);
1850
1851	assert_chv_phy_status(dev_priv);
1852
1853	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1854
1855	mutex_unlock(&power_domains->lock);
1856}
1857
1858static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1859					struct i915_power_well *power_well)
1860{
1861	enum pipe pipe = PIPE_A;
1862	bool enabled;
1863	u32 state, ctrl;
1864
1865	vlv_punit_get(dev_priv);
1866
1867	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1868	/*
1869	 * We only ever set the power-on and power-gate states, anything
1870	 * else is unexpected.
1871	 */
1872	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1873		    state != DP_SSS_PWR_GATE(pipe));
1874	enabled = state == DP_SSS_PWR_ON(pipe);
1875
1876	/*
1877	 * A transient state at this point would mean some unexpected party
1878	 * is poking at the power controls too.
1879	 */
1880	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1881	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1882
1883	vlv_punit_put(dev_priv);
1884
1885	return enabled;
1886}
1887
1888static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1889				    struct i915_power_well *power_well,
1890				    bool enable)
1891{
1892	enum pipe pipe = PIPE_A;
1893	u32 state;
1894	u32 ctrl;
1895
1896	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1897
1898	vlv_punit_get(dev_priv);
1899
1900#define COND \
1901	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1902
1903	if (COND)
1904		goto out;
1905
1906	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1907	ctrl &= ~DP_SSC_MASK(pipe);
1908	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1909	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1910
1911	if (wait_for(COND, 100))
1912		drm_err(&dev_priv->drm,
1913			"timeout setting power well state %08x (%08x)\n",
1914			state,
1915			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1916
1917#undef COND
1918
1919out:
1920	vlv_punit_put(dev_priv);
1921}
1922
1923static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1924					struct i915_power_well *power_well)
1925{
1926	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1927		       dev_priv->chv_phy_control);
1928}
1929
1930static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1931				       struct i915_power_well *power_well)
1932{
1933	chv_set_pipe_power_well(dev_priv, power_well, true);
1934
1935	vlv_display_power_well_init(dev_priv);
1936}
1937
1938static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1939					struct i915_power_well *power_well)
1940{
1941	vlv_display_power_well_deinit(dev_priv);
1942
1943	chv_set_pipe_power_well(dev_priv, power_well, false);
1944}
1945
1946static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1947{
1948	return power_domains->async_put_domains[0] |
1949	       power_domains->async_put_domains[1];
1950}
1951
1952#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1953
1954static bool
1955assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1956{
1957	struct drm_i915_private *i915 = container_of(power_domains,
1958						     struct drm_i915_private,
1959						     power_domains);
1960	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1961			    power_domains->async_put_domains[1]);
1962}
1963
1964static bool
1965__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1966{
1967	struct drm_i915_private *i915 = container_of(power_domains,
1968						     struct drm_i915_private,
1969						     power_domains);
1970	enum intel_display_power_domain domain;
1971	bool err = false;
1972
1973	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1974	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1975			   !!__async_put_domains_mask(power_domains));
1976
1977	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1978		err |= drm_WARN_ON(&i915->drm,
1979				   power_domains->domain_use_count[domain] != 1);
1980
1981	return !err;
1982}
1983
1984static void print_power_domains(struct i915_power_domains *power_domains,
1985				const char *prefix, u64 mask)
1986{
1987	struct drm_i915_private *i915 = container_of(power_domains,
1988						     struct drm_i915_private,
1989						     power_domains);
1990	enum intel_display_power_domain domain;
1991
1992	drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1993	for_each_power_domain(domain, mask)
1994		drm_dbg(&i915->drm, "%s use_count %d\n",
1995			intel_display_power_domain_str(domain),
1996			power_domains->domain_use_count[domain]);
1997}
1998
1999static void
2000print_async_put_domains_state(struct i915_power_domains *power_domains)
2001{
2002	struct drm_i915_private *i915 = container_of(power_domains,
2003						     struct drm_i915_private,
2004						     power_domains);
2005
2006	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2007		power_domains->async_put_wakeref);
2008
2009	print_power_domains(power_domains, "async_put_domains[0]",
2010			    power_domains->async_put_domains[0]);
2011	print_power_domains(power_domains, "async_put_domains[1]",
2012			    power_domains->async_put_domains[1]);
2013}
2014
2015static void
2016verify_async_put_domains_state(struct i915_power_domains *power_domains)
2017{
2018	if (!__async_put_domains_state_ok(power_domains))
2019		print_async_put_domains_state(power_domains);
2020}
2021
2022#else
2023
2024static void
2025assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2026{
2027}
2028
2029static void
2030verify_async_put_domains_state(struct i915_power_domains *power_domains)
2031{
2032}
2033
2034#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2035
2036static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2037{
2038	assert_async_put_domain_masks_disjoint(power_domains);
2039
2040	return __async_put_domains_mask(power_domains);
2041}
2042
2043static void
2044async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2045			       enum intel_display_power_domain domain)
2046{
2047	assert_async_put_domain_masks_disjoint(power_domains);
2048
2049	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2050	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2051}
2052
2053static bool
2054intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2055				       enum intel_display_power_domain domain)
2056{
2057	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2058	bool ret = false;
2059
2060	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2061		goto out_verify;
2062
2063	async_put_domains_clear_domain(power_domains, domain);
2064
2065	ret = true;
2066
2067	if (async_put_domains_mask(power_domains))
2068		goto out_verify;
2069
2070	cancel_delayed_work(&power_domains->async_put_work);
2071	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2072				 fetch_and_zero(&power_domains->async_put_wakeref));
2073out_verify:
2074	verify_async_put_domains_state(power_domains);
2075
2076	return ret;
2077}
2078
2079static void
2080__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2081				 enum intel_display_power_domain domain)
2082{
2083	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2084	struct i915_power_well *power_well;
2085
2086	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2087		return;
2088
2089	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2090		intel_power_well_get(dev_priv, power_well);
2091
2092	power_domains->domain_use_count[domain]++;
2093}
2094
2095/**
2096 * intel_display_power_get - grab a power domain reference
2097 * @dev_priv: i915 device instance
2098 * @domain: power domain to reference
2099 *
2100 * This function grabs a power domain reference for @domain and ensures that the
2101 * power domain and all its parents are powered up. Therefore users should only
2102 * grab a reference to the innermost power domain they need.
2103 *
2104 * Any power domain reference obtained by this function must have a symmetric
2105 * call to intel_display_power_put() to release the reference again.
2106 */
2107intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2108					enum intel_display_power_domain domain)
2109{
2110	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2111	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2112
2113	mutex_lock(&power_domains->lock);
2114	__intel_display_power_get_domain(dev_priv, domain);
2115	mutex_unlock(&power_domains->lock);
2116
2117	return wakeref;
2118}
2119
2120/**
2121 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2122 * @dev_priv: i915 device instance
2123 * @domain: power domain to reference
2124 *
2125 * This function grabs a power domain reference for @domain and ensures that the
2126 * power domain and all its parents are powered up. Therefore users should only
2127 * grab a reference to the innermost power domain they need.
2128 *
2129 * Any power domain reference obtained by this function must have a symmetric
2130 * call to intel_display_power_put() to release the reference again.
2131 */
2132intel_wakeref_t
2133intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2134				   enum intel_display_power_domain domain)
2135{
2136	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2137	intel_wakeref_t wakeref;
2138	bool is_enabled;
2139
2140	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2141	if (!wakeref)
2142		return false;
2143
2144	mutex_lock(&power_domains->lock);
2145
2146	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2147		__intel_display_power_get_domain(dev_priv, domain);
2148		is_enabled = true;
2149	} else {
2150		is_enabled = false;
2151	}
2152
2153	mutex_unlock(&power_domains->lock);
2154
2155	if (!is_enabled) {
2156		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2157		wakeref = 0;
2158	}
2159
2160	return wakeref;
2161}
2162
2163static void
2164__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2165				 enum intel_display_power_domain domain)
2166{
2167	struct i915_power_domains *power_domains;
2168	struct i915_power_well *power_well;
2169	const char *name = intel_display_power_domain_str(domain);
2170
2171	power_domains = &dev_priv->power_domains;
2172
2173	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2174		 "Use count on domain %s is already zero\n",
2175		 name);
2176	drm_WARN(&dev_priv->drm,
2177		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2178		 "Async disabling of domain %s is pending\n",
2179		 name);
2180
2181	power_domains->domain_use_count[domain]--;
2182
2183	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2184		intel_power_well_put(dev_priv, power_well);
2185}
2186
2187static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2188				      enum intel_display_power_domain domain)
2189{
2190	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2191
2192	mutex_lock(&power_domains->lock);
2193	__intel_display_power_put_domain(dev_priv, domain);
2194	mutex_unlock(&power_domains->lock);
2195}
2196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2197static void
2198queue_async_put_domains_work(struct i915_power_domains *power_domains,
2199			     intel_wakeref_t wakeref)
2200{
2201	struct drm_i915_private *i915 = container_of(power_domains,
2202						     struct drm_i915_private,
2203						     power_domains);
2204	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2205	power_domains->async_put_wakeref = wakeref;
2206	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2207						    &power_domains->async_put_work,
2208						    msecs_to_jiffies(100)));
2209}
2210
2211static void
2212release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2213{
2214	struct drm_i915_private *dev_priv =
2215		container_of(power_domains, struct drm_i915_private,
2216			     power_domains);
2217	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2218	enum intel_display_power_domain domain;
2219	intel_wakeref_t wakeref;
2220
2221	/*
2222	 * The caller must hold already raw wakeref, upgrade that to a proper
2223	 * wakeref to make the state checker happy about the HW access during
2224	 * power well disabling.
2225	 */
2226	assert_rpm_raw_wakeref_held(rpm);
2227	wakeref = intel_runtime_pm_get(rpm);
2228
2229	for_each_power_domain(domain, mask) {
2230		/* Clear before put, so put's sanity check is happy. */
2231		async_put_domains_clear_domain(power_domains, domain);
2232		__intel_display_power_put_domain(dev_priv, domain);
2233	}
2234
2235	intel_runtime_pm_put(rpm, wakeref);
2236}
2237
2238static void
2239intel_display_power_put_async_work(struct work_struct *work)
2240{
2241	struct drm_i915_private *dev_priv =
2242		container_of(work, struct drm_i915_private,
2243			     power_domains.async_put_work.work);
2244	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2245	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2246	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2247	intel_wakeref_t old_work_wakeref = 0;
2248
2249	mutex_lock(&power_domains->lock);
2250
2251	/*
2252	 * Bail out if all the domain refs pending to be released were grabbed
2253	 * by subsequent gets or a flush_work.
2254	 */
2255	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2256	if (!old_work_wakeref)
2257		goto out_verify;
2258
2259	release_async_put_domains(power_domains,
2260				  power_domains->async_put_domains[0]);
2261
2262	/* Requeue the work if more domains were async put meanwhile. */
2263	if (power_domains->async_put_domains[1]) {
2264		power_domains->async_put_domains[0] =
2265			fetch_and_zero(&power_domains->async_put_domains[1]);
2266		queue_async_put_domains_work(power_domains,
2267					     fetch_and_zero(&new_work_wakeref));
2268	} else {
2269		/*
2270		 * Cancel the work that got queued after this one got dequeued,
2271		 * since here we released the corresponding async-put reference.
2272		 */
2273		cancel_delayed_work(&power_domains->async_put_work);
2274	}
2275
2276out_verify:
2277	verify_async_put_domains_state(power_domains);
2278
2279	mutex_unlock(&power_domains->lock);
2280
2281	if (old_work_wakeref)
2282		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2283	if (new_work_wakeref)
2284		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2285}
2286
2287/**
2288 * intel_display_power_put_async - release a power domain reference asynchronously
2289 * @i915: i915 device instance
2290 * @domain: power domain to reference
2291 * @wakeref: wakeref acquired for the reference that is being released
2292 *
2293 * This function drops the power domain reference obtained by
2294 * intel_display_power_get*() and schedules a work to power down the
2295 * corresponding hardware block if this is the last reference.
2296 */
2297void __intel_display_power_put_async(struct drm_i915_private *i915,
2298				     enum intel_display_power_domain domain,
2299				     intel_wakeref_t wakeref)
2300{
2301	struct i915_power_domains *power_domains = &i915->power_domains;
2302	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2303	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2304
2305	mutex_lock(&power_domains->lock);
2306
2307	if (power_domains->domain_use_count[domain] > 1) {
2308		__intel_display_power_put_domain(i915, domain);
2309
2310		goto out_verify;
2311	}
2312
2313	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2314
2315	/* Let a pending work requeue itself or queue a new one. */
2316	if (power_domains->async_put_wakeref) {
2317		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2318	} else {
2319		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2320		queue_async_put_domains_work(power_domains,
2321					     fetch_and_zero(&work_wakeref));
2322	}
2323
2324out_verify:
2325	verify_async_put_domains_state(power_domains);
2326
2327	mutex_unlock(&power_domains->lock);
2328
2329	if (work_wakeref)
2330		intel_runtime_pm_put_raw(rpm, work_wakeref);
2331
2332	intel_runtime_pm_put(rpm, wakeref);
2333}
2334
2335/**
2336 * intel_display_power_flush_work - flushes the async display power disabling work
2337 * @i915: i915 device instance
2338 *
2339 * Flushes any pending work that was scheduled by a preceding
2340 * intel_display_power_put_async() call, completing the disabling of the
2341 * corresponding power domains.
2342 *
2343 * Note that the work handler function may still be running after this
2344 * function returns; to ensure that the work handler isn't running use
2345 * intel_display_power_flush_work_sync() instead.
2346 */
2347void intel_display_power_flush_work(struct drm_i915_private *i915)
2348{
2349	struct i915_power_domains *power_domains = &i915->power_domains;
2350	intel_wakeref_t work_wakeref;
2351
2352	mutex_lock(&power_domains->lock);
2353
2354	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2355	if (!work_wakeref)
2356		goto out_verify;
2357
2358	release_async_put_domains(power_domains,
2359				  async_put_domains_mask(power_domains));
2360	cancel_delayed_work(&power_domains->async_put_work);
2361
2362out_verify:
2363	verify_async_put_domains_state(power_domains);
2364
2365	mutex_unlock(&power_domains->lock);
2366
2367	if (work_wakeref)
2368		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2369}
2370
2371/**
2372 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2373 * @i915: i915 device instance
2374 *
2375 * Like intel_display_power_flush_work(), but also ensure that the work
2376 * handler function is not running any more when this function returns.
2377 */
2378static void
2379intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2380{
2381	struct i915_power_domains *power_domains = &i915->power_domains;
2382
2383	intel_display_power_flush_work(i915);
2384	cancel_delayed_work_sync(&power_domains->async_put_work);
2385
2386	verify_async_put_domains_state(power_domains);
2387
2388	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2389}
2390
2391#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2392/**
2393 * intel_display_power_put - release a power domain reference
2394 * @dev_priv: i915 device instance
2395 * @domain: power domain to reference
2396 * @wakeref: wakeref acquired for the reference that is being released
2397 *
2398 * This function drops the power domain reference obtained by
2399 * intel_display_power_get() and might power down the corresponding hardware
2400 * block right away if this is the last reference.
2401 */
2402void intel_display_power_put(struct drm_i915_private *dev_priv,
2403			     enum intel_display_power_domain domain,
2404			     intel_wakeref_t wakeref)
2405{
2406	__intel_display_power_put(dev_priv, domain);
2407	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2408}
2409#else
2410/**
2411 * intel_display_power_put_unchecked - release an unchecked power domain reference
2412 * @dev_priv: i915 device instance
2413 * @domain: power domain to reference
2414 *
2415 * This function drops the power domain reference obtained by
2416 * intel_display_power_get() and might power down the corresponding hardware
2417 * block right away if this is the last reference.
2418 *
2419 * This function is only for the power domain code's internal use to suppress wakeref
2420 * tracking when the correspondig debug kconfig option is disabled, should not
2421 * be used otherwise.
2422 */
2423void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2424				       enum intel_display_power_domain domain)
2425{
2426	__intel_display_power_put(dev_priv, domain);
2427	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2428}
2429#endif
2430
2431void
2432intel_display_power_get_in_set(struct drm_i915_private *i915,
2433			       struct intel_display_power_domain_set *power_domain_set,
2434			       enum intel_display_power_domain domain)
2435{
2436	intel_wakeref_t __maybe_unused wf;
2437
2438	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2439
2440	wf = intel_display_power_get(i915, domain);
2441#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2442	power_domain_set->wakerefs[domain] = wf;
2443#endif
2444	power_domain_set->mask |= BIT_ULL(domain);
2445}
2446
2447bool
2448intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
2449					  struct intel_display_power_domain_set *power_domain_set,
2450					  enum intel_display_power_domain domain)
2451{
2452	intel_wakeref_t wf;
2453
2454	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2455
2456	wf = intel_display_power_get_if_enabled(i915, domain);
2457	if (!wf)
2458		return false;
2459
2460#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2461	power_domain_set->wakerefs[domain] = wf;
2462#endif
2463	power_domain_set->mask |= BIT_ULL(domain);
2464
2465	return true;
2466}
2467
2468void
2469intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
2470				    struct intel_display_power_domain_set *power_domain_set,
2471				    u64 mask)
2472{
2473	enum intel_display_power_domain domain;
2474
2475	drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
2476
2477	for_each_power_domain(domain, mask) {
2478		intel_wakeref_t __maybe_unused wf = -1;
2479
2480#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2481		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
2482#endif
2483		intel_display_power_put(i915, domain, wf);
2484		power_domain_set->mask &= ~BIT_ULL(domain);
2485	}
2486}
2487
2488#define I830_PIPES_POWER_DOMAINS (		\
2489	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2490	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2491	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2492	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2493	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2494	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2495	BIT_ULL(POWER_DOMAIN_INIT))
2496
2497#define VLV_DISPLAY_POWER_DOMAINS (		\
2498	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2499	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2500	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2501	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2502	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2503	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2504	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2505	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2506	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2507	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2508	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2509	BIT_ULL(POWER_DOMAIN_VGA) |			\
2510	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2511	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2512	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2513	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2514	BIT_ULL(POWER_DOMAIN_INIT))
2515
2516#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2517	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2518	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2519	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2520	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2521	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2522	BIT_ULL(POWER_DOMAIN_INIT))
2523
2524#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2525	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2526	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2527	BIT_ULL(POWER_DOMAIN_INIT))
2528
2529#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2530	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2531	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2532	BIT_ULL(POWER_DOMAIN_INIT))
2533
2534#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2535	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2536	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2537	BIT_ULL(POWER_DOMAIN_INIT))
2538
2539#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2540	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2541	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2542	BIT_ULL(POWER_DOMAIN_INIT))
2543
2544#define CHV_DISPLAY_POWER_DOMAINS (		\
2545	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2546	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2547	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2548	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2549	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2550	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2551	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2552	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2553	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2554	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2555	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2556	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2557	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2558	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2559	BIT_ULL(POWER_DOMAIN_VGA) |			\
2560	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2561	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2562	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2563	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2564	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2565	BIT_ULL(POWER_DOMAIN_INIT))
2566
2567#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2568	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2569	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2570	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2571	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2572	BIT_ULL(POWER_DOMAIN_INIT))
2573
2574#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2575	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2576	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2577	BIT_ULL(POWER_DOMAIN_INIT))
2578
2579#define HSW_DISPLAY_POWER_DOMAINS (			\
2580	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2581	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2582	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2583	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2584	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2585	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2586	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2587	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2588	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2589	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2590	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2591	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2592	BIT_ULL(POWER_DOMAIN_VGA) |				\
2593	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2594	BIT_ULL(POWER_DOMAIN_INIT))
2595
2596#define BDW_DISPLAY_POWER_DOMAINS (			\
2597	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2598	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2599	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2600	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2601	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2602	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2603	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2604	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2605	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2606	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2607	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2608	BIT_ULL(POWER_DOMAIN_VGA) |				\
2609	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2610	BIT_ULL(POWER_DOMAIN_INIT))
2611
2612#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2613	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2614	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2615	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2616	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2617	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2618	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2619	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2620	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2621	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2622	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2623	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2624	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2625	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2626	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2627	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2628	BIT_ULL(POWER_DOMAIN_VGA) |				\
2629	BIT_ULL(POWER_DOMAIN_INIT))
2630#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2631	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2632	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2633	BIT_ULL(POWER_DOMAIN_INIT))
2634#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2635	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2636	BIT_ULL(POWER_DOMAIN_INIT))
2637#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2638	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2639	BIT_ULL(POWER_DOMAIN_INIT))
2640#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2641	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2642	BIT_ULL(POWER_DOMAIN_INIT))
2643#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2644	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2645	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2646	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2647	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2648	BIT_ULL(POWER_DOMAIN_INIT))
2649
2650#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2651	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2652	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2653	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2654	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2655	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2656	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2657	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2658	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2659	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2660	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2661	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2662	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2663	BIT_ULL(POWER_DOMAIN_VGA) |				\
2664	BIT_ULL(POWER_DOMAIN_INIT))
2665#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2666	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2667	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2668	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2669	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2670	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2671	BIT_ULL(POWER_DOMAIN_INIT))
2672#define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2673	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2674	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2675	BIT_ULL(POWER_DOMAIN_INIT))
2676#define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2677	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2678	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2679	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2680	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2681	BIT_ULL(POWER_DOMAIN_INIT))
2682
2683#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2684	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2685	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2686	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2687	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2688	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2689	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2690	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2691	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2692	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2693	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2694	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2695	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2696	BIT_ULL(POWER_DOMAIN_VGA) |				\
2697	BIT_ULL(POWER_DOMAIN_INIT))
2698#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2699	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2700#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2701	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2702#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2703	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2704#define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2705	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2706	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2707	BIT_ULL(POWER_DOMAIN_INIT))
2708#define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2709	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2710	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2711	BIT_ULL(POWER_DOMAIN_INIT))
2712#define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2713	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2714	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2715	BIT_ULL(POWER_DOMAIN_INIT))
2716#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2717	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2718	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2719	BIT_ULL(POWER_DOMAIN_INIT))
2720#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2721	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2722	BIT_ULL(POWER_DOMAIN_INIT))
2723#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2724	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2725	BIT_ULL(POWER_DOMAIN_INIT))
2726#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2727	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2728	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2729	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2730	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2731	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2732	BIT_ULL(POWER_DOMAIN_INIT))
2733
2734#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2735	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2736	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2737	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2738	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2739	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2740	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2741	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2742	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2743	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2744	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2745	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2746	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2747	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2748	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2749	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2750	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2751	BIT_ULL(POWER_DOMAIN_VGA) |				\
2752	BIT_ULL(POWER_DOMAIN_INIT))
2753#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2754	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2755	BIT_ULL(POWER_DOMAIN_INIT))
2756#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2757	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2758	BIT_ULL(POWER_DOMAIN_INIT))
2759#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2760	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2761	BIT_ULL(POWER_DOMAIN_INIT))
2762#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2763	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2764	BIT_ULL(POWER_DOMAIN_INIT))
2765#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2766	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2767	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2768	BIT_ULL(POWER_DOMAIN_INIT))
2769#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2770	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2771	BIT_ULL(POWER_DOMAIN_INIT))
2772#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2773	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2774	BIT_ULL(POWER_DOMAIN_INIT))
2775#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2776	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2777	BIT_ULL(POWER_DOMAIN_INIT))
2778#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2779	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2780	BIT_ULL(POWER_DOMAIN_INIT))
2781#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2782	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2783	BIT_ULL(POWER_DOMAIN_INIT))
2784#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2785	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2786	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2787	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2788	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2789	BIT_ULL(POWER_DOMAIN_INIT))
2790
2791/*
2792 * ICL PW_0/PG_0 domains (HW/DMC control):
2793 * - PCI
2794 * - clocks except port PLL
2795 * - central power except FBC
2796 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2797 * ICL PW_1/PG_1 domains (HW/DMC control):
2798 * - DBUF function
2799 * - PIPE_A and its planes, except VGA
2800 * - transcoder EDP + PSR
2801 * - transcoder DSI
2802 * - DDI_A
2803 * - FBC
2804 */
2805#define ICL_PW_4_POWER_DOMAINS (			\
2806	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2807	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2808	BIT_ULL(POWER_DOMAIN_INIT))
2809	/* VDSC/joining */
2810#define ICL_PW_3_POWER_DOMAINS (			\
2811	ICL_PW_4_POWER_DOMAINS |			\
2812	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2813	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2814	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2815	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2816	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2817	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2818	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2819	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2820	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2821	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2822	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2823	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2824	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2825	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2826	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2827	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2828	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2829	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2830	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2831	BIT_ULL(POWER_DOMAIN_VGA) |			\
2832	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2833	BIT_ULL(POWER_DOMAIN_INIT))
2834	/*
2835	 * - transcoder WD
2836	 * - KVMR (HW control)
2837	 */
2838#define ICL_PW_2_POWER_DOMAINS (			\
2839	ICL_PW_3_POWER_DOMAINS |			\
2840	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2841	BIT_ULL(POWER_DOMAIN_INIT))
2842	/*
2843	 * - KVMR (HW control)
2844	 */
2845#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2846	ICL_PW_2_POWER_DOMAINS |			\
2847	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2848	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2849	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2850	BIT_ULL(POWER_DOMAIN_INIT))
2851
2852#define ICL_DDI_IO_A_POWER_DOMAINS (			\
2853	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2854#define ICL_DDI_IO_B_POWER_DOMAINS (			\
2855	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2856#define ICL_DDI_IO_C_POWER_DOMAINS (			\
2857	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2858#define ICL_DDI_IO_D_POWER_DOMAINS (			\
2859	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2860#define ICL_DDI_IO_E_POWER_DOMAINS (			\
2861	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2862#define ICL_DDI_IO_F_POWER_DOMAINS (			\
2863	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2864
2865#define ICL_AUX_A_IO_POWER_DOMAINS (			\
2866	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2867	BIT_ULL(POWER_DOMAIN_AUX_A))
2868#define ICL_AUX_B_IO_POWER_DOMAINS (			\
2869	BIT_ULL(POWER_DOMAIN_AUX_B))
2870#define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2871	BIT_ULL(POWER_DOMAIN_AUX_C))
2872#define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2873	BIT_ULL(POWER_DOMAIN_AUX_D))
2874#define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2875	BIT_ULL(POWER_DOMAIN_AUX_E))
2876#define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2877	BIT_ULL(POWER_DOMAIN_AUX_F))
2878#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2879	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2880#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2881	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2882#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2883	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2884#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2885	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2886
2887#define TGL_PW_5_POWER_DOMAINS (			\
2888	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2889	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2890	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2891	BIT_ULL(POWER_DOMAIN_INIT))
2892
2893#define TGL_PW_4_POWER_DOMAINS (			\
2894	TGL_PW_5_POWER_DOMAINS |			\
2895	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2896	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2897	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2898	BIT_ULL(POWER_DOMAIN_INIT))
2899
2900#define TGL_PW_3_POWER_DOMAINS (			\
2901	TGL_PW_4_POWER_DOMAINS |			\
2902	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2903	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2904	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2905	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2906	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2907	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
2908	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
2909	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) |	\
2910	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) |	\
2911	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2912	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
2913	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |		\
2914	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |		\
2915	BIT_ULL(POWER_DOMAIN_AUX_USBC5) |		\
2916	BIT_ULL(POWER_DOMAIN_AUX_USBC6) |		\
2917	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2918	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2919	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2920	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2921	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |		\
2922	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |		\
2923	BIT_ULL(POWER_DOMAIN_VGA) |			\
2924	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2925	BIT_ULL(POWER_DOMAIN_INIT))
2926
2927#define TGL_PW_2_POWER_DOMAINS (			\
2928	TGL_PW_3_POWER_DOMAINS |			\
2929	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2930	BIT_ULL(POWER_DOMAIN_INIT))
2931
2932#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2933	TGL_PW_3_POWER_DOMAINS |			\
2934	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2935	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2936	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2937	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2938	BIT_ULL(POWER_DOMAIN_INIT))
2939
2940#define TGL_DDI_IO_TC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
2941#define TGL_DDI_IO_TC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
2942#define TGL_DDI_IO_TC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
2943#define TGL_DDI_IO_TC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
2944#define TGL_DDI_IO_TC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
2945#define TGL_DDI_IO_TC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
 
 
 
 
 
 
2946
2947#define TGL_AUX_A_IO_POWER_DOMAINS (		\
2948	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2949	BIT_ULL(POWER_DOMAIN_AUX_A))
2950#define TGL_AUX_B_IO_POWER_DOMAINS (		\
2951	BIT_ULL(POWER_DOMAIN_AUX_B))
2952#define TGL_AUX_C_IO_POWER_DOMAINS (		\
2953	BIT_ULL(POWER_DOMAIN_AUX_C))
2954
2955#define TGL_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
2956#define TGL_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
2957#define TGL_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
2958#define TGL_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
2959#define TGL_AUX_IO_USBC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC5)
2960#define TGL_AUX_IO_USBC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC6)
2961
2962#define TGL_AUX_IO_TBT1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT1)
2963#define TGL_AUX_IO_TBT2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT2)
2964#define TGL_AUX_IO_TBT3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT3)
2965#define TGL_AUX_IO_TBT4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT4)
2966#define TGL_AUX_IO_TBT5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT5)
2967#define TGL_AUX_IO_TBT6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT6)
 
 
 
 
 
 
 
 
 
 
2968
2969#define TGL_TC_COLD_OFF_POWER_DOMAINS (		\
2970	BIT_ULL(POWER_DOMAIN_AUX_USBC1)	|	\
2971	BIT_ULL(POWER_DOMAIN_AUX_USBC2)	|	\
2972	BIT_ULL(POWER_DOMAIN_AUX_USBC3)	|	\
2973	BIT_ULL(POWER_DOMAIN_AUX_USBC4)	|	\
2974	BIT_ULL(POWER_DOMAIN_AUX_USBC5)	|	\
2975	BIT_ULL(POWER_DOMAIN_AUX_USBC6)	|	\
2976	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |	\
2977	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |	\
2978	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |	\
2979	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |	\
2980	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |	\
2981	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |	\
2982	BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2983
2984#define RKL_PW_4_POWER_DOMAINS (			\
2985	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2986	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2987	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2988	BIT_ULL(POWER_DOMAIN_INIT))
2989
2990#define RKL_PW_3_POWER_DOMAINS (			\
2991	RKL_PW_4_POWER_DOMAINS |			\
2992	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2993	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2994	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2995	BIT_ULL(POWER_DOMAIN_VGA) |			\
2996	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2997	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2998	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2999	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
3000	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
3001	BIT_ULL(POWER_DOMAIN_INIT))
3002
3003/*
3004 * There is no PW_2/PG_2 on RKL.
3005 *
3006 * RKL PW_1/PG_1 domains (under HW/DMC control):
3007 * - DBUF function (note: registers are in PW0)
3008 * - PIPE_A and its planes and VDSC/joining, except VGA
3009 * - transcoder A
3010 * - DDI_A and DDI_B
3011 * - FBC
3012 *
3013 * RKL PW_0/PG_0 domains (under HW/DMC control):
3014 * - PCI
3015 * - clocks except port PLL
3016 * - shared functions:
3017 *     * interrupts except pipe interrupts
3018 *     * MBus except PIPE_MBUS_DBOX_CTL
3019 *     * DBUF registers
3020 * - central power except FBC
3021 * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
3022 */
3023
3024#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3025	RKL_PW_3_POWER_DOMAINS |			\
3026	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3027	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3028	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3029	BIT_ULL(POWER_DOMAIN_INIT))
3030
3031/*
3032 * XE_LPD Power Domains
3033 *
3034 * Previous platforms required that PG(n-1) be enabled before PG(n).  That
3035 * dependency chain turns into a dependency tree on XE_LPD:
3036 *
3037 *       PG0
3038 *        |
3039 *     --PG1--
3040 *    /       \
3041 *  PGA     --PG2--
3042 *         /   |   \
3043 *       PGB  PGC  PGD
3044 *
3045 * Power wells must be enabled from top to bottom and disabled from bottom
3046 * to top.  This allows pipes to be power gated independently.
3047 */
3048
3049#define XELPD_PW_D_POWER_DOMAINS (			\
3050	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
3051	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |	\
3052	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
3053	BIT_ULL(POWER_DOMAIN_INIT))
3054
3055#define XELPD_PW_C_POWER_DOMAINS (			\
3056	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
3057	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
3058	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
3059	BIT_ULL(POWER_DOMAIN_INIT))
3060
3061#define XELPD_PW_B_POWER_DOMAINS (			\
3062	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
3063	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
3064	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
3065	BIT_ULL(POWER_DOMAIN_INIT))
3066
3067#define XELPD_PW_A_POWER_DOMAINS (			\
3068	BIT_ULL(POWER_DOMAIN_PIPE_A) |			\
3069	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
3070	BIT_ULL(POWER_DOMAIN_INIT))
3071
3072#define XELPD_PW_2_POWER_DOMAINS (			\
3073	XELPD_PW_B_POWER_DOMAINS |			\
3074	XELPD_PW_C_POWER_DOMAINS |			\
3075	XELPD_PW_D_POWER_DOMAINS |			\
3076	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
3077	BIT_ULL(POWER_DOMAIN_VGA) |			\
3078	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
3079	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) |	\
3080	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) |	\
3081	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
3082	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
3083	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
3084	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
3085	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
3086	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) |		\
3087	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) |		\
3088	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |			\
3089	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |			\
3090	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |			\
3091	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |			\
3092	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |			\
3093	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |			\
3094	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |			\
3095	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |			\
3096	BIT_ULL(POWER_DOMAIN_INIT))
3097
3098/*
3099 * XELPD PW_1/PG_1 domains (under HW/DMC control):
3100 *  - DBUF function (registers are in PW0)
3101 *  - Transcoder A
3102 *  - DDI_A and DDI_B
3103 *
3104 * XELPD PW_0/PW_1 domains (under HW/DMC control):
3105 *  - PCI
3106 *  - Clocks except port PLL
3107 *  - Shared functions:
3108 *     * interrupts except pipe interrupts
3109 *     * MBus except PIPE_MBUS_DBOX_CTL
3110 *     * DBUF registers
3111 *  - Central power except FBC
3112 *  - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
3113 */
3114
3115#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3116	XELPD_PW_2_POWER_DOMAINS |			\
3117	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3118	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3119	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3120	BIT_ULL(POWER_DOMAIN_INIT))
3121
3122#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
3123#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
3124#define XELPD_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
3125#define XELPD_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
3126#define XELPD_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
3127#define XELPD_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
3128
3129#define XELPD_AUX_IO_TBT1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT1)
3130#define XELPD_AUX_IO_TBT2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT2)
3131#define XELPD_AUX_IO_TBT3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT3)
3132#define XELPD_AUX_IO_TBT4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT4)
3133
3134#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
3135#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
3136#define XELPD_DDI_IO_TC1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
3137#define XELPD_DDI_IO_TC2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
3138#define XELPD_DDI_IO_TC3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
3139#define XELPD_DDI_IO_TC4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
3140
3141static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3142	.sync_hw = i9xx_power_well_sync_hw_noop,
3143	.enable = i9xx_always_on_power_well_noop,
3144	.disable = i9xx_always_on_power_well_noop,
3145	.is_enabled = i9xx_always_on_power_well_enabled,
3146};
3147
3148static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3149	.sync_hw = chv_pipe_power_well_sync_hw,
3150	.enable = chv_pipe_power_well_enable,
3151	.disable = chv_pipe_power_well_disable,
3152	.is_enabled = chv_pipe_power_well_enabled,
3153};
3154
3155static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3156	.sync_hw = i9xx_power_well_sync_hw_noop,
3157	.enable = chv_dpio_cmn_power_well_enable,
3158	.disable = chv_dpio_cmn_power_well_disable,
3159	.is_enabled = vlv_power_well_enabled,
3160};
3161
3162static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
3163	{
3164		.name = "always-on",
3165		.always_on = true,
3166		.domains = POWER_DOMAIN_MASK,
3167		.ops = &i9xx_always_on_power_well_ops,
3168		.id = DISP_PW_ID_NONE,
3169	},
3170};
3171
3172static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3173	.sync_hw = i830_pipes_power_well_sync_hw,
3174	.enable = i830_pipes_power_well_enable,
3175	.disable = i830_pipes_power_well_disable,
3176	.is_enabled = i830_pipes_power_well_enabled,
3177};
3178
3179static const struct i915_power_well_desc i830_power_wells[] = {
3180	{
3181		.name = "always-on",
3182		.always_on = true,
3183		.domains = POWER_DOMAIN_MASK,
3184		.ops = &i9xx_always_on_power_well_ops,
3185		.id = DISP_PW_ID_NONE,
3186	},
3187	{
3188		.name = "pipes",
3189		.domains = I830_PIPES_POWER_DOMAINS,
3190		.ops = &i830_pipes_power_well_ops,
3191		.id = DISP_PW_ID_NONE,
3192	},
3193};
3194
3195static const struct i915_power_well_ops hsw_power_well_ops = {
3196	.sync_hw = hsw_power_well_sync_hw,
3197	.enable = hsw_power_well_enable,
3198	.disable = hsw_power_well_disable,
3199	.is_enabled = hsw_power_well_enabled,
3200};
3201
3202static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3203	.sync_hw = i9xx_power_well_sync_hw_noop,
3204	.enable = gen9_dc_off_power_well_enable,
3205	.disable = gen9_dc_off_power_well_disable,
3206	.is_enabled = gen9_dc_off_power_well_enabled,
3207};
3208
3209static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3210	.sync_hw = i9xx_power_well_sync_hw_noop,
3211	.enable = bxt_dpio_cmn_power_well_enable,
3212	.disable = bxt_dpio_cmn_power_well_disable,
3213	.is_enabled = bxt_dpio_cmn_power_well_enabled,
3214};
3215
3216static const struct i915_power_well_regs hsw_power_well_regs = {
3217	.bios	= HSW_PWR_WELL_CTL1,
3218	.driver	= HSW_PWR_WELL_CTL2,
3219	.kvmr	= HSW_PWR_WELL_CTL3,
3220	.debug	= HSW_PWR_WELL_CTL4,
3221};
3222
3223static const struct i915_power_well_desc hsw_power_wells[] = {
3224	{
3225		.name = "always-on",
3226		.always_on = true,
3227		.domains = POWER_DOMAIN_MASK,
3228		.ops = &i9xx_always_on_power_well_ops,
3229		.id = DISP_PW_ID_NONE,
3230	},
3231	{
3232		.name = "display",
3233		.domains = HSW_DISPLAY_POWER_DOMAINS,
3234		.ops = &hsw_power_well_ops,
3235		.id = HSW_DISP_PW_GLOBAL,
3236		{
3237			.hsw.regs = &hsw_power_well_regs,
3238			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3239			.hsw.has_vga = true,
3240		},
3241	},
3242};
3243
3244static const struct i915_power_well_desc bdw_power_wells[] = {
3245	{
3246		.name = "always-on",
3247		.always_on = true,
3248		.domains = POWER_DOMAIN_MASK,
3249		.ops = &i9xx_always_on_power_well_ops,
3250		.id = DISP_PW_ID_NONE,
3251	},
3252	{
3253		.name = "display",
3254		.domains = BDW_DISPLAY_POWER_DOMAINS,
3255		.ops = &hsw_power_well_ops,
3256		.id = HSW_DISP_PW_GLOBAL,
3257		{
3258			.hsw.regs = &hsw_power_well_regs,
3259			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3260			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3261			.hsw.has_vga = true,
3262		},
3263	},
3264};
3265
3266static const struct i915_power_well_ops vlv_display_power_well_ops = {
3267	.sync_hw = i9xx_power_well_sync_hw_noop,
3268	.enable = vlv_display_power_well_enable,
3269	.disable = vlv_display_power_well_disable,
3270	.is_enabled = vlv_power_well_enabled,
3271};
3272
3273static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3274	.sync_hw = i9xx_power_well_sync_hw_noop,
3275	.enable = vlv_dpio_cmn_power_well_enable,
3276	.disable = vlv_dpio_cmn_power_well_disable,
3277	.is_enabled = vlv_power_well_enabled,
3278};
3279
3280static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3281	.sync_hw = i9xx_power_well_sync_hw_noop,
3282	.enable = vlv_power_well_enable,
3283	.disable = vlv_power_well_disable,
3284	.is_enabled = vlv_power_well_enabled,
3285};
3286
3287static const struct i915_power_well_desc vlv_power_wells[] = {
3288	{
3289		.name = "always-on",
3290		.always_on = true,
3291		.domains = POWER_DOMAIN_MASK,
3292		.ops = &i9xx_always_on_power_well_ops,
3293		.id = DISP_PW_ID_NONE,
3294	},
3295	{
3296		.name = "display",
3297		.domains = VLV_DISPLAY_POWER_DOMAINS,
3298		.ops = &vlv_display_power_well_ops,
3299		.id = VLV_DISP_PW_DISP2D,
3300		{
3301			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3302		},
3303	},
3304	{
3305		.name = "dpio-tx-b-01",
3306		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3307			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3308			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3309			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3310		.ops = &vlv_dpio_power_well_ops,
3311		.id = DISP_PW_ID_NONE,
3312		{
3313			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3314		},
3315	},
3316	{
3317		.name = "dpio-tx-b-23",
3318		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3319			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3320			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3321			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3322		.ops = &vlv_dpio_power_well_ops,
3323		.id = DISP_PW_ID_NONE,
3324		{
3325			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3326		},
3327	},
3328	{
3329		.name = "dpio-tx-c-01",
3330		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3331			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3332			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3333			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3334		.ops = &vlv_dpio_power_well_ops,
3335		.id = DISP_PW_ID_NONE,
3336		{
3337			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3338		},
3339	},
3340	{
3341		.name = "dpio-tx-c-23",
3342		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3343			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3344			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3345			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3346		.ops = &vlv_dpio_power_well_ops,
3347		.id = DISP_PW_ID_NONE,
3348		{
3349			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3350		},
3351	},
3352	{
3353		.name = "dpio-common",
3354		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3355		.ops = &vlv_dpio_cmn_power_well_ops,
3356		.id = VLV_DISP_PW_DPIO_CMN_BC,
3357		{
3358			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3359		},
3360	},
3361};
3362
3363static const struct i915_power_well_desc chv_power_wells[] = {
3364	{
3365		.name = "always-on",
3366		.always_on = true,
3367		.domains = POWER_DOMAIN_MASK,
3368		.ops = &i9xx_always_on_power_well_ops,
3369		.id = DISP_PW_ID_NONE,
3370	},
3371	{
3372		.name = "display",
3373		/*
3374		 * Pipe A power well is the new disp2d well. Pipe B and C
3375		 * power wells don't actually exist. Pipe A power well is
3376		 * required for any pipe to work.
3377		 */
3378		.domains = CHV_DISPLAY_POWER_DOMAINS,
3379		.ops = &chv_pipe_power_well_ops,
3380		.id = DISP_PW_ID_NONE,
3381	},
3382	{
3383		.name = "dpio-common-bc",
3384		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3385		.ops = &chv_dpio_cmn_power_well_ops,
3386		.id = VLV_DISP_PW_DPIO_CMN_BC,
3387		{
3388			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3389		},
3390	},
3391	{
3392		.name = "dpio-common-d",
3393		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3394		.ops = &chv_dpio_cmn_power_well_ops,
3395		.id = CHV_DISP_PW_DPIO_CMN_D,
3396		{
3397			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3398		},
3399	},
3400};
3401
3402bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3403					 enum i915_power_well_id power_well_id)
3404{
3405	struct i915_power_well *power_well;
3406	bool ret;
3407
3408	power_well = lookup_power_well(dev_priv, power_well_id);
3409	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3410
3411	return ret;
3412}
3413
3414static const struct i915_power_well_desc skl_power_wells[] = {
3415	{
3416		.name = "always-on",
3417		.always_on = true,
3418		.domains = POWER_DOMAIN_MASK,
3419		.ops = &i9xx_always_on_power_well_ops,
3420		.id = DISP_PW_ID_NONE,
3421	},
3422	{
3423		.name = "power well 1",
3424		/* Handled by the DMC firmware */
3425		.always_on = true,
3426		.domains = 0,
3427		.ops = &hsw_power_well_ops,
3428		.id = SKL_DISP_PW_1,
3429		{
3430			.hsw.regs = &hsw_power_well_regs,
3431			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3432			.hsw.has_fuses = true,
3433		},
3434	},
3435	{
3436		.name = "MISC IO power well",
3437		/* Handled by the DMC firmware */
3438		.always_on = true,
3439		.domains = 0,
3440		.ops = &hsw_power_well_ops,
3441		.id = SKL_DISP_PW_MISC_IO,
3442		{
3443			.hsw.regs = &hsw_power_well_regs,
3444			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3445		},
3446	},
3447	{
3448		.name = "DC off",
3449		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3450		.ops = &gen9_dc_off_power_well_ops,
3451		.id = SKL_DISP_DC_OFF,
3452	},
3453	{
3454		.name = "power well 2",
3455		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3456		.ops = &hsw_power_well_ops,
3457		.id = SKL_DISP_PW_2,
3458		{
3459			.hsw.regs = &hsw_power_well_regs,
3460			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3461			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3462			.hsw.has_vga = true,
3463			.hsw.has_fuses = true,
3464		},
3465	},
3466	{
3467		.name = "DDI A/E IO power well",
3468		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3469		.ops = &hsw_power_well_ops,
3470		.id = DISP_PW_ID_NONE,
3471		{
3472			.hsw.regs = &hsw_power_well_regs,
3473			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3474		},
3475	},
3476	{
3477		.name = "DDI B IO power well",
3478		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3479		.ops = &hsw_power_well_ops,
3480		.id = DISP_PW_ID_NONE,
3481		{
3482			.hsw.regs = &hsw_power_well_regs,
3483			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3484		},
3485	},
3486	{
3487		.name = "DDI C IO power well",
3488		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3489		.ops = &hsw_power_well_ops,
3490		.id = DISP_PW_ID_NONE,
3491		{
3492			.hsw.regs = &hsw_power_well_regs,
3493			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3494		},
3495	},
3496	{
3497		.name = "DDI D IO power well",
3498		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3499		.ops = &hsw_power_well_ops,
3500		.id = DISP_PW_ID_NONE,
3501		{
3502			.hsw.regs = &hsw_power_well_regs,
3503			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3504		},
3505	},
3506};
3507
3508static const struct i915_power_well_desc bxt_power_wells[] = {
3509	{
3510		.name = "always-on",
3511		.always_on = true,
3512		.domains = POWER_DOMAIN_MASK,
3513		.ops = &i9xx_always_on_power_well_ops,
3514		.id = DISP_PW_ID_NONE,
3515	},
3516	{
3517		.name = "power well 1",
3518		/* Handled by the DMC firmware */
3519		.always_on = true,
3520		.domains = 0,
3521		.ops = &hsw_power_well_ops,
3522		.id = SKL_DISP_PW_1,
3523		{
3524			.hsw.regs = &hsw_power_well_regs,
3525			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3526			.hsw.has_fuses = true,
3527		},
3528	},
3529	{
3530		.name = "DC off",
3531		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3532		.ops = &gen9_dc_off_power_well_ops,
3533		.id = SKL_DISP_DC_OFF,
3534	},
3535	{
3536		.name = "power well 2",
3537		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3538		.ops = &hsw_power_well_ops,
3539		.id = SKL_DISP_PW_2,
3540		{
3541			.hsw.regs = &hsw_power_well_regs,
3542			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3543			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3544			.hsw.has_vga = true,
3545			.hsw.has_fuses = true,
3546		},
3547	},
3548	{
3549		.name = "dpio-common-a",
3550		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3551		.ops = &bxt_dpio_cmn_power_well_ops,
3552		.id = BXT_DISP_PW_DPIO_CMN_A,
3553		{
3554			.bxt.phy = DPIO_PHY1,
3555		},
3556	},
3557	{
3558		.name = "dpio-common-bc",
3559		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3560		.ops = &bxt_dpio_cmn_power_well_ops,
3561		.id = VLV_DISP_PW_DPIO_CMN_BC,
3562		{
3563			.bxt.phy = DPIO_PHY0,
3564		},
3565	},
3566};
3567
3568static const struct i915_power_well_desc glk_power_wells[] = {
3569	{
3570		.name = "always-on",
3571		.always_on = true,
3572		.domains = POWER_DOMAIN_MASK,
3573		.ops = &i9xx_always_on_power_well_ops,
3574		.id = DISP_PW_ID_NONE,
3575	},
3576	{
3577		.name = "power well 1",
3578		/* Handled by the DMC firmware */
3579		.always_on = true,
3580		.domains = 0,
3581		.ops = &hsw_power_well_ops,
3582		.id = SKL_DISP_PW_1,
3583		{
3584			.hsw.regs = &hsw_power_well_regs,
3585			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3586			.hsw.has_fuses = true,
3587		},
3588	},
3589	{
3590		.name = "DC off",
3591		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3592		.ops = &gen9_dc_off_power_well_ops,
3593		.id = SKL_DISP_DC_OFF,
3594	},
3595	{
3596		.name = "power well 2",
3597		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3598		.ops = &hsw_power_well_ops,
3599		.id = SKL_DISP_PW_2,
3600		{
3601			.hsw.regs = &hsw_power_well_regs,
3602			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3603			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3604			.hsw.has_vga = true,
3605			.hsw.has_fuses = true,
3606		},
3607	},
3608	{
3609		.name = "dpio-common-a",
3610		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3611		.ops = &bxt_dpio_cmn_power_well_ops,
3612		.id = BXT_DISP_PW_DPIO_CMN_A,
3613		{
3614			.bxt.phy = DPIO_PHY1,
3615		},
3616	},
3617	{
3618		.name = "dpio-common-b",
3619		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3620		.ops = &bxt_dpio_cmn_power_well_ops,
3621		.id = VLV_DISP_PW_DPIO_CMN_BC,
3622		{
3623			.bxt.phy = DPIO_PHY0,
3624		},
3625	},
3626	{
3627		.name = "dpio-common-c",
3628		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3629		.ops = &bxt_dpio_cmn_power_well_ops,
3630		.id = GLK_DISP_PW_DPIO_CMN_C,
3631		{
3632			.bxt.phy = DPIO_PHY2,
3633		},
3634	},
3635	{
3636		.name = "AUX A",
3637		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3638		.ops = &hsw_power_well_ops,
3639		.id = DISP_PW_ID_NONE,
3640		{
3641			.hsw.regs = &hsw_power_well_regs,
3642			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3643		},
3644	},
3645	{
3646		.name = "AUX B",
3647		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3648		.ops = &hsw_power_well_ops,
3649		.id = DISP_PW_ID_NONE,
3650		{
3651			.hsw.regs = &hsw_power_well_regs,
3652			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3653		},
3654	},
3655	{
3656		.name = "AUX C",
3657		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3658		.ops = &hsw_power_well_ops,
3659		.id = DISP_PW_ID_NONE,
3660		{
3661			.hsw.regs = &hsw_power_well_regs,
3662			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3663		},
3664	},
3665	{
3666		.name = "DDI A IO power well",
3667		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3668		.ops = &hsw_power_well_ops,
3669		.id = DISP_PW_ID_NONE,
3670		{
3671			.hsw.regs = &hsw_power_well_regs,
3672			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3673		},
3674	},
3675	{
3676		.name = "DDI B IO power well",
3677		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3678		.ops = &hsw_power_well_ops,
3679		.id = DISP_PW_ID_NONE,
3680		{
3681			.hsw.regs = &hsw_power_well_regs,
3682			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3683		},
3684	},
3685	{
3686		.name = "DDI C IO power well",
3687		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3688		.ops = &hsw_power_well_ops,
3689		.id = DISP_PW_ID_NONE,
3690		{
3691			.hsw.regs = &hsw_power_well_regs,
3692			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3693		},
3694	},
3695};
3696
3697static const struct i915_power_well_desc cnl_power_wells[] = {
3698	{
3699		.name = "always-on",
3700		.always_on = true,
3701		.domains = POWER_DOMAIN_MASK,
3702		.ops = &i9xx_always_on_power_well_ops,
3703		.id = DISP_PW_ID_NONE,
3704	},
3705	{
3706		.name = "power well 1",
3707		/* Handled by the DMC firmware */
3708		.always_on = true,
3709		.domains = 0,
3710		.ops = &hsw_power_well_ops,
3711		.id = SKL_DISP_PW_1,
3712		{
3713			.hsw.regs = &hsw_power_well_regs,
3714			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3715			.hsw.has_fuses = true,
3716		},
3717	},
3718	{
3719		.name = "AUX A",
3720		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3721		.ops = &hsw_power_well_ops,
3722		.id = DISP_PW_ID_NONE,
3723		{
3724			.hsw.regs = &hsw_power_well_regs,
3725			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3726		},
3727	},
3728	{
3729		.name = "AUX B",
3730		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3731		.ops = &hsw_power_well_ops,
3732		.id = DISP_PW_ID_NONE,
3733		{
3734			.hsw.regs = &hsw_power_well_regs,
3735			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3736		},
3737	},
3738	{
3739		.name = "AUX C",
3740		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3741		.ops = &hsw_power_well_ops,
3742		.id = DISP_PW_ID_NONE,
3743		{
3744			.hsw.regs = &hsw_power_well_regs,
3745			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3746		},
3747	},
3748	{
3749		.name = "AUX D",
3750		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3751		.ops = &hsw_power_well_ops,
3752		.id = DISP_PW_ID_NONE,
3753		{
3754			.hsw.regs = &hsw_power_well_regs,
3755			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3756		},
3757	},
3758	{
3759		.name = "DC off",
3760		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3761		.ops = &gen9_dc_off_power_well_ops,
3762		.id = SKL_DISP_DC_OFF,
3763	},
3764	{
3765		.name = "power well 2",
3766		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3767		.ops = &hsw_power_well_ops,
3768		.id = SKL_DISP_PW_2,
3769		{
3770			.hsw.regs = &hsw_power_well_regs,
3771			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3772			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3773			.hsw.has_vga = true,
3774			.hsw.has_fuses = true,
3775		},
3776	},
3777	{
3778		.name = "DDI A IO power well",
3779		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3780		.ops = &hsw_power_well_ops,
3781		.id = DISP_PW_ID_NONE,
3782		{
3783			.hsw.regs = &hsw_power_well_regs,
3784			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3785		},
3786	},
3787	{
3788		.name = "DDI B IO power well",
3789		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3790		.ops = &hsw_power_well_ops,
3791		.id = DISP_PW_ID_NONE,
3792		{
3793			.hsw.regs = &hsw_power_well_regs,
3794			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3795		},
3796	},
3797	{
3798		.name = "DDI C IO power well",
3799		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3800		.ops = &hsw_power_well_ops,
3801		.id = DISP_PW_ID_NONE,
3802		{
3803			.hsw.regs = &hsw_power_well_regs,
3804			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3805		},
3806	},
3807	{
3808		.name = "DDI D IO power well",
3809		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3810		.ops = &hsw_power_well_ops,
3811		.id = DISP_PW_ID_NONE,
3812		{
3813			.hsw.regs = &hsw_power_well_regs,
3814			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3815		},
3816	},
3817	{
3818		.name = "DDI F IO power well",
3819		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3820		.ops = &hsw_power_well_ops,
3821		.id = CNL_DISP_PW_DDI_F_IO,
3822		{
3823			.hsw.regs = &hsw_power_well_regs,
3824			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3825		},
3826	},
3827	{
3828		.name = "AUX F",
3829		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3830		.ops = &hsw_power_well_ops,
3831		.id = CNL_DISP_PW_DDI_F_AUX,
3832		{
3833			.hsw.regs = &hsw_power_well_regs,
3834			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3835		},
3836	},
3837};
3838
3839static const struct i915_power_well_ops icl_aux_power_well_ops = {
3840	.sync_hw = hsw_power_well_sync_hw,
3841	.enable = icl_aux_power_well_enable,
3842	.disable = icl_aux_power_well_disable,
3843	.is_enabled = hsw_power_well_enabled,
3844};
3845
3846static const struct i915_power_well_regs icl_aux_power_well_regs = {
3847	.bios	= ICL_PWR_WELL_CTL_AUX1,
3848	.driver	= ICL_PWR_WELL_CTL_AUX2,
3849	.debug	= ICL_PWR_WELL_CTL_AUX4,
3850};
3851
3852static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3853	.bios	= ICL_PWR_WELL_CTL_DDI1,
3854	.driver	= ICL_PWR_WELL_CTL_DDI2,
3855	.debug	= ICL_PWR_WELL_CTL_DDI4,
3856};
3857
3858static const struct i915_power_well_desc icl_power_wells[] = {
3859	{
3860		.name = "always-on",
3861		.always_on = true,
3862		.domains = POWER_DOMAIN_MASK,
3863		.ops = &i9xx_always_on_power_well_ops,
3864		.id = DISP_PW_ID_NONE,
3865	},
3866	{
3867		.name = "power well 1",
3868		/* Handled by the DMC firmware */
3869		.always_on = true,
3870		.domains = 0,
3871		.ops = &hsw_power_well_ops,
3872		.id = SKL_DISP_PW_1,
3873		{
3874			.hsw.regs = &hsw_power_well_regs,
3875			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3876			.hsw.has_fuses = true,
3877		},
3878	},
3879	{
3880		.name = "DC off",
3881		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3882		.ops = &gen9_dc_off_power_well_ops,
3883		.id = SKL_DISP_DC_OFF,
3884	},
3885	{
3886		.name = "power well 2",
3887		.domains = ICL_PW_2_POWER_DOMAINS,
3888		.ops = &hsw_power_well_ops,
3889		.id = SKL_DISP_PW_2,
3890		{
3891			.hsw.regs = &hsw_power_well_regs,
3892			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3893			.hsw.has_fuses = true,
3894		},
3895	},
3896	{
3897		.name = "power well 3",
3898		.domains = ICL_PW_3_POWER_DOMAINS,
3899		.ops = &hsw_power_well_ops,
3900		.id = ICL_DISP_PW_3,
3901		{
3902			.hsw.regs = &hsw_power_well_regs,
3903			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3904			.hsw.irq_pipe_mask = BIT(PIPE_B),
3905			.hsw.has_vga = true,
3906			.hsw.has_fuses = true,
3907		},
3908	},
3909	{
3910		.name = "DDI A IO",
3911		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3912		.ops = &hsw_power_well_ops,
3913		.id = DISP_PW_ID_NONE,
3914		{
3915			.hsw.regs = &icl_ddi_power_well_regs,
3916			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3917		},
3918	},
3919	{
3920		.name = "DDI B IO",
3921		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3922		.ops = &hsw_power_well_ops,
3923		.id = DISP_PW_ID_NONE,
3924		{
3925			.hsw.regs = &icl_ddi_power_well_regs,
3926			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3927		},
3928	},
3929	{
3930		.name = "DDI C IO",
3931		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3932		.ops = &hsw_power_well_ops,
3933		.id = DISP_PW_ID_NONE,
3934		{
3935			.hsw.regs = &icl_ddi_power_well_regs,
3936			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3937		},
3938	},
3939	{
3940		.name = "DDI D IO",
3941		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3942		.ops = &hsw_power_well_ops,
3943		.id = DISP_PW_ID_NONE,
3944		{
3945			.hsw.regs = &icl_ddi_power_well_regs,
3946			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3947		},
3948	},
3949	{
3950		.name = "DDI E IO",
3951		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3952		.ops = &hsw_power_well_ops,
3953		.id = DISP_PW_ID_NONE,
3954		{
3955			.hsw.regs = &icl_ddi_power_well_regs,
3956			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3957		},
3958	},
3959	{
3960		.name = "DDI F IO",
3961		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3962		.ops = &hsw_power_well_ops,
3963		.id = DISP_PW_ID_NONE,
3964		{
3965			.hsw.regs = &icl_ddi_power_well_regs,
3966			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3967		},
3968	},
3969	{
3970		.name = "AUX A",
3971		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3972		.ops = &icl_aux_power_well_ops,
3973		.id = DISP_PW_ID_NONE,
3974		{
3975			.hsw.regs = &icl_aux_power_well_regs,
3976			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3977		},
3978	},
3979	{
3980		.name = "AUX B",
3981		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3982		.ops = &icl_aux_power_well_ops,
3983		.id = DISP_PW_ID_NONE,
3984		{
3985			.hsw.regs = &icl_aux_power_well_regs,
3986			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3987		},
3988	},
3989	{
3990		.name = "AUX C TC1",
3991		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3992		.ops = &icl_aux_power_well_ops,
3993		.id = DISP_PW_ID_NONE,
3994		{
3995			.hsw.regs = &icl_aux_power_well_regs,
3996			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3997			.hsw.is_tc_tbt = false,
3998		},
3999	},
4000	{
4001		.name = "AUX D TC2",
4002		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
4003		.ops = &icl_aux_power_well_ops,
4004		.id = DISP_PW_ID_NONE,
4005		{
4006			.hsw.regs = &icl_aux_power_well_regs,
4007			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
4008			.hsw.is_tc_tbt = false,
4009		},
4010	},
4011	{
4012		.name = "AUX E TC3",
4013		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
4014		.ops = &icl_aux_power_well_ops,
4015		.id = DISP_PW_ID_NONE,
4016		{
4017			.hsw.regs = &icl_aux_power_well_regs,
4018			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
4019			.hsw.is_tc_tbt = false,
4020		},
4021	},
4022	{
4023		.name = "AUX F TC4",
4024		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
4025		.ops = &icl_aux_power_well_ops,
4026		.id = DISP_PW_ID_NONE,
4027		{
4028			.hsw.regs = &icl_aux_power_well_regs,
4029			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
4030			.hsw.is_tc_tbt = false,
4031		},
4032	},
4033	{
4034		.name = "AUX C TBT1",
4035		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
4036		.ops = &icl_aux_power_well_ops,
4037		.id = DISP_PW_ID_NONE,
4038		{
4039			.hsw.regs = &icl_aux_power_well_regs,
4040			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
4041			.hsw.is_tc_tbt = true,
4042		},
4043	},
4044	{
4045		.name = "AUX D TBT2",
4046		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
4047		.ops = &icl_aux_power_well_ops,
4048		.id = DISP_PW_ID_NONE,
4049		{
4050			.hsw.regs = &icl_aux_power_well_regs,
4051			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
4052			.hsw.is_tc_tbt = true,
4053		},
4054	},
4055	{
4056		.name = "AUX E TBT3",
4057		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
4058		.ops = &icl_aux_power_well_ops,
4059		.id = DISP_PW_ID_NONE,
4060		{
4061			.hsw.regs = &icl_aux_power_well_regs,
4062			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
4063			.hsw.is_tc_tbt = true,
4064		},
4065	},
4066	{
4067		.name = "AUX F TBT4",
4068		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
4069		.ops = &icl_aux_power_well_ops,
4070		.id = DISP_PW_ID_NONE,
4071		{
4072			.hsw.regs = &icl_aux_power_well_regs,
4073			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
4074			.hsw.is_tc_tbt = true,
4075		},
4076	},
4077	{
4078		.name = "power well 4",
4079		.domains = ICL_PW_4_POWER_DOMAINS,
4080		.ops = &hsw_power_well_ops,
4081		.id = DISP_PW_ID_NONE,
4082		{
4083			.hsw.regs = &hsw_power_well_regs,
4084			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4085			.hsw.has_fuses = true,
4086			.hsw.irq_pipe_mask = BIT(PIPE_C),
4087		},
4088	},
4089};
4090
4091static void
4092tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
4093{
4094	u8 tries = 0;
4095	int ret;
4096
4097	while (1) {
4098		u32 low_val;
4099		u32 high_val = 0;
4100
4101		if (block)
4102			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
4103		else
4104			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
4105
4106		/*
4107		 * Spec states that we should timeout the request after 200us
4108		 * but the function below will timeout after 500us
4109		 */
4110		ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
4111					     &high_val);
4112		if (ret == 0) {
4113			if (block &&
4114			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
4115				ret = -EIO;
4116			else
4117				break;
4118		}
4119
4120		if (++tries == 3)
4121			break;
4122
4123		msleep(1);
 
4124	}
4125
4126	if (ret)
4127		drm_err(&i915->drm, "TC cold %sblock failed\n",
4128			block ? "" : "un");
4129	else
4130		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
4131			    block ? "" : "un");
4132}
4133
4134static void
4135tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
4136				  struct i915_power_well *power_well)
4137{
4138	tgl_tc_cold_request(i915, true);
4139}
4140
4141static void
4142tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
4143				   struct i915_power_well *power_well)
4144{
4145	tgl_tc_cold_request(i915, false);
4146}
4147
4148static void
4149tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
4150				   struct i915_power_well *power_well)
4151{
4152	if (power_well->count > 0)
4153		tgl_tc_cold_off_power_well_enable(i915, power_well);
4154	else
4155		tgl_tc_cold_off_power_well_disable(i915, power_well);
4156}
4157
4158static bool
4159tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
4160				      struct i915_power_well *power_well)
4161{
4162	/*
4163	 * Not the correctly implementation but there is no way to just read it
4164	 * from PCODE, so returning count to avoid state mismatch errors
4165	 */
4166	return power_well->count;
4167}
4168
4169static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4170	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4171	.enable = tgl_tc_cold_off_power_well_enable,
4172	.disable = tgl_tc_cold_off_power_well_disable,
4173	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4174};
4175
4176static const struct i915_power_well_desc tgl_power_wells[] = {
4177	{
4178		.name = "always-on",
4179		.always_on = true,
4180		.domains = POWER_DOMAIN_MASK,
4181		.ops = &i9xx_always_on_power_well_ops,
4182		.id = DISP_PW_ID_NONE,
4183	},
4184	{
4185		.name = "power well 1",
4186		/* Handled by the DMC firmware */
4187		.always_on = true,
4188		.domains = 0,
4189		.ops = &hsw_power_well_ops,
4190		.id = SKL_DISP_PW_1,
4191		{
4192			.hsw.regs = &hsw_power_well_regs,
4193			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4194			.hsw.has_fuses = true,
4195		},
4196	},
4197	{
4198		.name = "DC off",
4199		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4200		.ops = &gen9_dc_off_power_well_ops,
4201		.id = SKL_DISP_DC_OFF,
4202	},
4203	{
4204		.name = "power well 2",
4205		.domains = TGL_PW_2_POWER_DOMAINS,
4206		.ops = &hsw_power_well_ops,
4207		.id = SKL_DISP_PW_2,
4208		{
4209			.hsw.regs = &hsw_power_well_regs,
4210			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4211			.hsw.has_fuses = true,
4212		},
4213	},
4214	{
4215		.name = "power well 3",
4216		.domains = TGL_PW_3_POWER_DOMAINS,
4217		.ops = &hsw_power_well_ops,
4218		.id = ICL_DISP_PW_3,
4219		{
4220			.hsw.regs = &hsw_power_well_regs,
4221			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4222			.hsw.irq_pipe_mask = BIT(PIPE_B),
4223			.hsw.has_vga = true,
4224			.hsw.has_fuses = true,
4225		},
4226	},
4227	{
4228		.name = "DDI A IO",
4229		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4230		.ops = &hsw_power_well_ops,
4231		.id = DISP_PW_ID_NONE,
4232		{
4233			.hsw.regs = &icl_ddi_power_well_regs,
4234			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4235		}
4236	},
4237	{
4238		.name = "DDI B IO",
4239		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4240		.ops = &hsw_power_well_ops,
4241		.id = DISP_PW_ID_NONE,
4242		{
4243			.hsw.regs = &icl_ddi_power_well_regs,
4244			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4245		}
4246	},
4247	{
4248		.name = "DDI C IO",
4249		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4250		.ops = &hsw_power_well_ops,
4251		.id = DISP_PW_ID_NONE,
4252		{
4253			.hsw.regs = &icl_ddi_power_well_regs,
4254			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4255		}
4256	},
4257	{
4258		.name = "DDI IO TC1",
4259		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4260		.ops = &hsw_power_well_ops,
4261		.id = DISP_PW_ID_NONE,
4262		{
4263			.hsw.regs = &icl_ddi_power_well_regs,
4264			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4265		},
4266	},
4267	{
4268		.name = "DDI IO TC2",
4269		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4270		.ops = &hsw_power_well_ops,
4271		.id = DISP_PW_ID_NONE,
4272		{
4273			.hsw.regs = &icl_ddi_power_well_regs,
4274			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4275		},
4276	},
4277	{
4278		.name = "DDI IO TC3",
4279		.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
4280		.ops = &hsw_power_well_ops,
4281		.id = DISP_PW_ID_NONE,
4282		{
4283			.hsw.regs = &icl_ddi_power_well_regs,
4284			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4285		},
4286	},
4287	{
4288		.name = "DDI IO TC4",
4289		.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
4290		.ops = &hsw_power_well_ops,
4291		.id = DISP_PW_ID_NONE,
4292		{
4293			.hsw.regs = &icl_ddi_power_well_regs,
4294			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4295		},
4296	},
4297	{
4298		.name = "DDI IO TC5",
4299		.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
4300		.ops = &hsw_power_well_ops,
4301		.id = DISP_PW_ID_NONE,
4302		{
4303			.hsw.regs = &icl_ddi_power_well_regs,
4304			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4305		},
4306	},
4307	{
4308		.name = "DDI IO TC6",
4309		.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
4310		.ops = &hsw_power_well_ops,
4311		.id = DISP_PW_ID_NONE,
4312		{
4313			.hsw.regs = &icl_ddi_power_well_regs,
4314			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4315		},
4316	},
4317	{
4318		.name = "TC cold off",
4319		.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4320		.ops = &tgl_tc_cold_off_ops,
4321		.id = TGL_DISP_PW_TC_COLD_OFF,
4322	},
4323	{
4324		.name = "AUX A",
4325		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4326		.ops = &icl_aux_power_well_ops,
4327		.id = DISP_PW_ID_NONE,
4328		{
4329			.hsw.regs = &icl_aux_power_well_regs,
4330			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4331		},
4332	},
4333	{
4334		.name = "AUX B",
4335		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4336		.ops = &icl_aux_power_well_ops,
4337		.id = DISP_PW_ID_NONE,
4338		{
4339			.hsw.regs = &icl_aux_power_well_regs,
4340			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4341		},
4342	},
4343	{
4344		.name = "AUX C",
4345		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4346		.ops = &icl_aux_power_well_ops,
4347		.id = DISP_PW_ID_NONE,
4348		{
4349			.hsw.regs = &icl_aux_power_well_regs,
4350			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4351		},
4352	},
4353	{
4354		.name = "AUX USBC1",
4355		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4356		.ops = &icl_aux_power_well_ops,
4357		.id = DISP_PW_ID_NONE,
4358		{
4359			.hsw.regs = &icl_aux_power_well_regs,
4360			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4361			.hsw.is_tc_tbt = false,
4362		},
4363	},
4364	{
4365		.name = "AUX USBC2",
4366		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4367		.ops = &icl_aux_power_well_ops,
4368		.id = DISP_PW_ID_NONE,
4369		{
4370			.hsw.regs = &icl_aux_power_well_regs,
4371			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4372			.hsw.is_tc_tbt = false,
4373		},
4374	},
4375	{
4376		.name = "AUX USBC3",
4377		.domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
4378		.ops = &icl_aux_power_well_ops,
4379		.id = DISP_PW_ID_NONE,
4380		{
4381			.hsw.regs = &icl_aux_power_well_regs,
4382			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4383			.hsw.is_tc_tbt = false,
4384		},
4385	},
4386	{
4387		.name = "AUX USBC4",
4388		.domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
4389		.ops = &icl_aux_power_well_ops,
4390		.id = DISP_PW_ID_NONE,
4391		{
4392			.hsw.regs = &icl_aux_power_well_regs,
4393			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4394			.hsw.is_tc_tbt = false,
4395		},
4396	},
4397	{
4398		.name = "AUX USBC5",
4399		.domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
4400		.ops = &icl_aux_power_well_ops,
4401		.id = DISP_PW_ID_NONE,
4402		{
4403			.hsw.regs = &icl_aux_power_well_regs,
4404			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4405			.hsw.is_tc_tbt = false,
4406		},
4407	},
4408	{
4409		.name = "AUX USBC6",
4410		.domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
4411		.ops = &icl_aux_power_well_ops,
4412		.id = DISP_PW_ID_NONE,
4413		{
4414			.hsw.regs = &icl_aux_power_well_regs,
4415			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4416			.hsw.is_tc_tbt = false,
4417		},
4418	},
4419	{
4420		.name = "AUX TBT1",
4421		.domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
4422		.ops = &icl_aux_power_well_ops,
4423		.id = DISP_PW_ID_NONE,
4424		{
4425			.hsw.regs = &icl_aux_power_well_regs,
4426			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4427			.hsw.is_tc_tbt = true,
4428		},
4429	},
4430	{
4431		.name = "AUX TBT2",
4432		.domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
4433		.ops = &icl_aux_power_well_ops,
4434		.id = DISP_PW_ID_NONE,
4435		{
4436			.hsw.regs = &icl_aux_power_well_regs,
4437			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4438			.hsw.is_tc_tbt = true,
4439		},
4440	},
4441	{
4442		.name = "AUX TBT3",
4443		.domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
4444		.ops = &icl_aux_power_well_ops,
4445		.id = DISP_PW_ID_NONE,
4446		{
4447			.hsw.regs = &icl_aux_power_well_regs,
4448			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4449			.hsw.is_tc_tbt = true,
4450		},
4451	},
4452	{
4453		.name = "AUX TBT4",
4454		.domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
4455		.ops = &icl_aux_power_well_ops,
4456		.id = DISP_PW_ID_NONE,
4457		{
4458			.hsw.regs = &icl_aux_power_well_regs,
4459			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4460			.hsw.is_tc_tbt = true,
4461		},
4462	},
4463	{
4464		.name = "AUX TBT5",
4465		.domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
4466		.ops = &icl_aux_power_well_ops,
4467		.id = DISP_PW_ID_NONE,
4468		{
4469			.hsw.regs = &icl_aux_power_well_regs,
4470			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4471			.hsw.is_tc_tbt = true,
4472		},
4473	},
4474	{
4475		.name = "AUX TBT6",
4476		.domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
4477		.ops = &icl_aux_power_well_ops,
4478		.id = DISP_PW_ID_NONE,
4479		{
4480			.hsw.regs = &icl_aux_power_well_regs,
4481			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4482			.hsw.is_tc_tbt = true,
4483		},
4484	},
4485	{
4486		.name = "power well 4",
4487		.domains = TGL_PW_4_POWER_DOMAINS,
4488		.ops = &hsw_power_well_ops,
4489		.id = DISP_PW_ID_NONE,
4490		{
4491			.hsw.regs = &hsw_power_well_regs,
4492			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4493			.hsw.has_fuses = true,
4494			.hsw.irq_pipe_mask = BIT(PIPE_C),
4495		}
4496	},
4497	{
4498		.name = "power well 5",
4499		.domains = TGL_PW_5_POWER_DOMAINS,
4500		.ops = &hsw_power_well_ops,
4501		.id = DISP_PW_ID_NONE,
4502		{
4503			.hsw.regs = &hsw_power_well_regs,
4504			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4505			.hsw.has_fuses = true,
4506			.hsw.irq_pipe_mask = BIT(PIPE_D),
4507		},
4508	},
4509};
4510
4511static const struct i915_power_well_desc rkl_power_wells[] = {
4512	{
4513		.name = "always-on",
4514		.always_on = true,
4515		.domains = POWER_DOMAIN_MASK,
4516		.ops = &i9xx_always_on_power_well_ops,
4517		.id = DISP_PW_ID_NONE,
4518	},
4519	{
4520		.name = "power well 1",
4521		/* Handled by the DMC firmware */
4522		.always_on = true,
4523		.domains = 0,
4524		.ops = &hsw_power_well_ops,
4525		.id = SKL_DISP_PW_1,
4526		{
4527			.hsw.regs = &hsw_power_well_regs,
4528			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4529			.hsw.has_fuses = true,
4530		},
4531	},
4532	{
4533		.name = "DC off",
4534		.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4535		.ops = &gen9_dc_off_power_well_ops,
4536		.id = SKL_DISP_DC_OFF,
4537	},
4538	{
4539		.name = "power well 3",
4540		.domains = RKL_PW_3_POWER_DOMAINS,
4541		.ops = &hsw_power_well_ops,
4542		.id = ICL_DISP_PW_3,
4543		{
4544			.hsw.regs = &hsw_power_well_regs,
4545			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4546			.hsw.irq_pipe_mask = BIT(PIPE_B),
4547			.hsw.has_vga = true,
4548			.hsw.has_fuses = true,
4549		},
4550	},
4551	{
4552		.name = "power well 4",
4553		.domains = RKL_PW_4_POWER_DOMAINS,
4554		.ops = &hsw_power_well_ops,
4555		.id = DISP_PW_ID_NONE,
4556		{
4557			.hsw.regs = &hsw_power_well_regs,
4558			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4559			.hsw.has_fuses = true,
4560			.hsw.irq_pipe_mask = BIT(PIPE_C),
4561		}
4562	},
4563	{
4564		.name = "DDI A IO",
4565		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4566		.ops = &hsw_power_well_ops,
4567		.id = DISP_PW_ID_NONE,
4568		{
4569			.hsw.regs = &icl_ddi_power_well_regs,
4570			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4571		}
4572	},
4573	{
4574		.name = "DDI B IO",
4575		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4576		.ops = &hsw_power_well_ops,
4577		.id = DISP_PW_ID_NONE,
4578		{
4579			.hsw.regs = &icl_ddi_power_well_regs,
4580			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4581		}
4582	},
4583	{
4584		.name = "DDI IO TC1",
4585		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4586		.ops = &hsw_power_well_ops,
4587		.id = DISP_PW_ID_NONE,
4588		{
4589			.hsw.regs = &icl_ddi_power_well_regs,
4590			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4591		},
4592	},
4593	{
4594		.name = "DDI IO TC2",
4595		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4596		.ops = &hsw_power_well_ops,
4597		.id = DISP_PW_ID_NONE,
4598		{
4599			.hsw.regs = &icl_ddi_power_well_regs,
4600			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4601		},
4602	},
4603	{
4604		.name = "AUX A",
4605		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4606		.ops = &icl_aux_power_well_ops,
4607		.id = DISP_PW_ID_NONE,
4608		{
4609			.hsw.regs = &icl_aux_power_well_regs,
4610			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4611		},
4612	},
4613	{
4614		.name = "AUX B",
4615		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4616		.ops = &icl_aux_power_well_ops,
4617		.id = DISP_PW_ID_NONE,
4618		{
4619			.hsw.regs = &icl_aux_power_well_regs,
4620			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4621		},
4622	},
4623	{
4624		.name = "AUX USBC1",
4625		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4626		.ops = &icl_aux_power_well_ops,
4627		.id = DISP_PW_ID_NONE,
4628		{
4629			.hsw.regs = &icl_aux_power_well_regs,
4630			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4631		},
4632	},
4633	{
4634		.name = "AUX USBC2",
4635		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4636		.ops = &icl_aux_power_well_ops,
4637		.id = DISP_PW_ID_NONE,
4638		{
4639			.hsw.regs = &icl_aux_power_well_regs,
4640			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4641		},
4642	},
4643};
4644
4645static const struct i915_power_well_desc xelpd_power_wells[] = {
4646	{
4647		.name = "always-on",
4648		.always_on = true,
4649		.domains = POWER_DOMAIN_MASK,
4650		.ops = &i9xx_always_on_power_well_ops,
4651		.id = DISP_PW_ID_NONE,
4652	},
4653	{
4654		.name = "power well 1",
4655		/* Handled by the DMC firmware */
4656		.always_on = true,
4657		.domains = 0,
4658		.ops = &hsw_power_well_ops,
4659		.id = SKL_DISP_PW_1,
4660		{
4661			.hsw.regs = &hsw_power_well_regs,
4662			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4663			.hsw.has_fuses = true,
4664		},
4665	},
4666	{
4667		.name = "DC off",
4668		.domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
4669		.ops = &gen9_dc_off_power_well_ops,
4670		.id = SKL_DISP_DC_OFF,
4671	},
4672	{
4673		.name = "power well 2",
4674		.domains = XELPD_PW_2_POWER_DOMAINS,
4675		.ops = &hsw_power_well_ops,
4676		.id = SKL_DISP_PW_2,
4677		{
4678			.hsw.regs = &hsw_power_well_regs,
4679			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4680			.hsw.has_vga = true,
4681			.hsw.has_fuses = true,
4682		},
4683	},
4684	{
4685		.name = "power well A",
4686		.domains = XELPD_PW_A_POWER_DOMAINS,
4687		.ops = &hsw_power_well_ops,
4688		.id = DISP_PW_ID_NONE,
4689		{
4690			.hsw.regs = &hsw_power_well_regs,
4691			.hsw.idx = XELPD_PW_CTL_IDX_PW_A,
4692			.hsw.irq_pipe_mask = BIT(PIPE_A),
4693			.hsw.has_fuses = true,
4694		},
4695	},
4696	{
4697		.name = "power well B",
4698		.domains = XELPD_PW_B_POWER_DOMAINS,
4699		.ops = &hsw_power_well_ops,
4700		.id = DISP_PW_ID_NONE,
4701		{
4702			.hsw.regs = &hsw_power_well_regs,
4703			.hsw.idx = XELPD_PW_CTL_IDX_PW_B,
4704			.hsw.irq_pipe_mask = BIT(PIPE_B),
4705			.hsw.has_fuses = true,
4706		},
4707	},
4708	{
4709		.name = "power well C",
4710		.domains = XELPD_PW_C_POWER_DOMAINS,
4711		.ops = &hsw_power_well_ops,
4712		.id = DISP_PW_ID_NONE,
4713		{
4714			.hsw.regs = &hsw_power_well_regs,
4715			.hsw.idx = XELPD_PW_CTL_IDX_PW_C,
4716			.hsw.irq_pipe_mask = BIT(PIPE_C),
4717			.hsw.has_fuses = true,
4718		},
4719	},
4720	{
4721		.name = "power well D",
4722		.domains = XELPD_PW_D_POWER_DOMAINS,
4723		.ops = &hsw_power_well_ops,
4724		.id = DISP_PW_ID_NONE,
4725		{
4726			.hsw.regs = &hsw_power_well_regs,
4727			.hsw.idx = XELPD_PW_CTL_IDX_PW_D,
4728			.hsw.irq_pipe_mask = BIT(PIPE_D),
4729			.hsw.has_fuses = true,
4730		},
4731	},
4732	{
4733		.name = "DDI A IO",
4734		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4735		.ops = &hsw_power_well_ops,
4736		.id = DISP_PW_ID_NONE,
4737		{
4738			.hsw.regs = &icl_ddi_power_well_regs,
4739			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4740		}
4741	},
4742	{
4743		.name = "DDI B IO",
4744		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4745		.ops = &hsw_power_well_ops,
4746		.id = DISP_PW_ID_NONE,
4747		{
4748			.hsw.regs = &icl_ddi_power_well_regs,
4749			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4750		}
4751	},
4752	{
4753		.name = "DDI C IO",
4754		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4755		.ops = &hsw_power_well_ops,
4756		.id = DISP_PW_ID_NONE,
4757		{
4758			.hsw.regs = &icl_ddi_power_well_regs,
4759			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4760		}
4761	},
4762	{
4763		.name = "DDI IO D_XELPD",
4764		.domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
4765		.ops = &hsw_power_well_ops,
4766		.id = DISP_PW_ID_NONE,
4767		{
4768			.hsw.regs = &icl_ddi_power_well_regs,
4769			.hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
4770		}
4771	},
4772	{
4773		.name = "DDI IO E_XELPD",
4774		.domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
4775		.ops = &hsw_power_well_ops,
4776		.id = DISP_PW_ID_NONE,
4777		{
4778			.hsw.regs = &icl_ddi_power_well_regs,
4779			.hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
4780		}
4781	},
4782	{
4783		.name = "DDI IO TC1",
4784		.domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
4785		.ops = &hsw_power_well_ops,
4786		.id = DISP_PW_ID_NONE,
4787		{
4788			.hsw.regs = &icl_ddi_power_well_regs,
4789			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4790		}
4791	},
4792	{
4793		.name = "DDI IO TC2",
4794		.domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
4795		.ops = &hsw_power_well_ops,
4796		.id = DISP_PW_ID_NONE,
4797		{
4798			.hsw.regs = &icl_ddi_power_well_regs,
4799			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4800		}
4801	},
4802	{
4803		.name = "DDI IO TC3",
4804		.domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
4805		.ops = &hsw_power_well_ops,
4806		.id = DISP_PW_ID_NONE,
4807		{
4808			.hsw.regs = &icl_ddi_power_well_regs,
4809			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4810		}
4811	},
4812	{
4813		.name = "DDI IO TC4",
4814		.domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
4815		.ops = &hsw_power_well_ops,
4816		.id = DISP_PW_ID_NONE,
4817		{
4818			.hsw.regs = &icl_ddi_power_well_regs,
4819			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4820		}
4821	},
4822	{
4823		.name = "AUX A",
4824		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4825		.ops = &icl_aux_power_well_ops,
4826		.id = DISP_PW_ID_NONE,
4827		{
4828			.hsw.regs = &icl_aux_power_well_regs,
4829			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4830		},
4831	},
4832	{
4833		.name = "AUX B",
4834		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4835		.ops = &icl_aux_power_well_ops,
4836		.id = DISP_PW_ID_NONE,
4837		{
4838			.hsw.regs = &icl_aux_power_well_regs,
4839			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4840		},
4841	},
4842	{
4843		.name = "AUX C",
4844		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4845		.ops = &icl_aux_power_well_ops,
4846		.id = DISP_PW_ID_NONE,
4847		{
4848			.hsw.regs = &icl_aux_power_well_regs,
4849			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4850		},
4851	},
4852	{
4853		.name = "AUX D_XELPD",
4854		.domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
4855		.ops = &icl_aux_power_well_ops,
4856		.id = DISP_PW_ID_NONE,
4857		{
4858			.hsw.regs = &icl_aux_power_well_regs,
4859			.hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
4860		},
4861	},
4862	{
4863		.name = "AUX E_XELPD",
4864		.domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
4865		.ops = &icl_aux_power_well_ops,
4866		.id = DISP_PW_ID_NONE,
4867		{
4868			.hsw.regs = &icl_aux_power_well_regs,
4869			.hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
4870		},
4871	},
4872	{
4873		.name = "AUX USBC1",
4874		.domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
4875		.ops = &icl_aux_power_well_ops,
4876		.id = DISP_PW_ID_NONE,
4877		{
4878			.hsw.regs = &icl_aux_power_well_regs,
4879			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4880		},
4881	},
4882	{
4883		.name = "AUX USBC2",
4884		.domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
4885		.ops = &icl_aux_power_well_ops,
4886		.id = DISP_PW_ID_NONE,
4887		{
4888			.hsw.regs = &icl_aux_power_well_regs,
4889			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4890		},
4891	},
4892	{
4893		.name = "AUX USBC3",
4894		.domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
4895		.ops = &icl_aux_power_well_ops,
4896		.id = DISP_PW_ID_NONE,
4897		{
4898			.hsw.regs = &icl_aux_power_well_regs,
4899			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4900		},
4901	},
4902	{
4903		.name = "AUX USBC4",
4904		.domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
4905		.ops = &icl_aux_power_well_ops,
4906		.id = DISP_PW_ID_NONE,
4907		{
4908			.hsw.regs = &icl_aux_power_well_regs,
4909			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4910		},
4911	},
4912	{
4913		.name = "AUX TBT1",
4914		.domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
4915		.ops = &icl_aux_power_well_ops,
4916		.id = DISP_PW_ID_NONE,
4917		{
4918			.hsw.regs = &icl_aux_power_well_regs,
4919			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4920			.hsw.is_tc_tbt = true,
4921		},
4922	},
4923	{
4924		.name = "AUX TBT2",
4925		.domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
4926		.ops = &icl_aux_power_well_ops,
4927		.id = DISP_PW_ID_NONE,
4928		{
4929			.hsw.regs = &icl_aux_power_well_regs,
4930			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4931			.hsw.is_tc_tbt = true,
4932		},
4933	},
4934	{
4935		.name = "AUX TBT3",
4936		.domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
4937		.ops = &icl_aux_power_well_ops,
4938		.id = DISP_PW_ID_NONE,
4939		{
4940			.hsw.regs = &icl_aux_power_well_regs,
4941			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4942			.hsw.is_tc_tbt = true,
4943		},
4944	},
4945	{
4946		.name = "AUX TBT4",
4947		.domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
4948		.ops = &icl_aux_power_well_ops,
4949		.id = DISP_PW_ID_NONE,
4950		{
4951			.hsw.regs = &icl_aux_power_well_regs,
4952			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4953			.hsw.is_tc_tbt = true,
4954		},
4955	},
4956};
4957
4958static int
4959sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4960				   int disable_power_well)
4961{
4962	if (disable_power_well >= 0)
4963		return !!disable_power_well;
4964
4965	return 1;
4966}
4967
4968static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4969			       int enable_dc)
4970{
4971	u32 mask;
4972	int requested_dc;
4973	int max_dc;
4974
4975	if (!HAS_DISPLAY(dev_priv))
4976		return 0;
4977
4978	if (IS_DG1(dev_priv))
4979		max_dc = 3;
4980	else if (DISPLAY_VER(dev_priv) >= 12)
4981		max_dc = 4;
4982	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4983		max_dc = 1;
4984	else if (DISPLAY_VER(dev_priv) >= 9)
 
 
 
 
4985		max_dc = 2;
4986	else
 
 
 
 
 
 
 
4987		max_dc = 0;
4988
4989	/*
4990	 * DC9 has a separate HW flow from the rest of the DC states,
4991	 * not depending on the DMC firmware. It's needed by system
4992	 * suspend/resume, so allow it unconditionally.
4993	 */
4994	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
4995		DISPLAY_VER(dev_priv) >= 11 ?
4996	       DC_STATE_EN_DC9 : 0;
4997
4998	if (!dev_priv->params.disable_power_well)
4999		max_dc = 0;
5000
5001	if (enable_dc >= 0 && enable_dc <= max_dc) {
5002		requested_dc = enable_dc;
5003	} else if (enable_dc == -1) {
5004		requested_dc = max_dc;
5005	} else if (enable_dc > max_dc && enable_dc <= 4) {
5006		drm_dbg_kms(&dev_priv->drm,
5007			    "Adjusting requested max DC state (%d->%d)\n",
5008			    enable_dc, max_dc);
5009		requested_dc = max_dc;
5010	} else {
5011		drm_err(&dev_priv->drm,
5012			"Unexpected value for enable_dc (%d)\n", enable_dc);
5013		requested_dc = max_dc;
5014	}
5015
5016	switch (requested_dc) {
5017	case 4:
5018		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
5019		break;
5020	case 3:
5021		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
5022		break;
5023	case 2:
5024		mask |= DC_STATE_EN_UPTO_DC6;
5025		break;
5026	case 1:
5027		mask |= DC_STATE_EN_UPTO_DC5;
5028		break;
5029	}
5030
5031	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
5032
5033	return mask;
5034}
5035
5036static int
5037__set_power_wells(struct i915_power_domains *power_domains,
5038		  const struct i915_power_well_desc *power_well_descs,
5039		  int power_well_descs_sz, u64 skip_mask)
5040{
5041	struct drm_i915_private *i915 = container_of(power_domains,
5042						     struct drm_i915_private,
5043						     power_domains);
5044	u64 power_well_ids = 0;
5045	int power_well_count = 0;
5046	int i, plt_idx = 0;
5047
5048	for (i = 0; i < power_well_descs_sz; i++)
5049		if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
5050			power_well_count++;
5051
5052	power_domains->power_well_count = power_well_count;
5053	power_domains->power_wells =
5054				kcalloc(power_well_count,
5055					sizeof(*power_domains->power_wells),
5056					GFP_KERNEL);
5057	if (!power_domains->power_wells)
5058		return -ENOMEM;
5059
5060	for (i = 0; i < power_well_descs_sz; i++) {
5061		enum i915_power_well_id id = power_well_descs[i].id;
5062
5063		if (BIT_ULL(id) & skip_mask)
5064			continue;
5065
5066		power_domains->power_wells[plt_idx++].desc =
5067			&power_well_descs[i];
5068
5069		if (id == DISP_PW_ID_NONE)
5070			continue;
5071
5072		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
5073		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
5074		power_well_ids |= BIT_ULL(id);
5075	}
5076
5077	return 0;
5078}
5079
5080#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
5081	__set_power_wells(power_domains, __power_well_descs, \
5082			  ARRAY_SIZE(__power_well_descs), skip_mask)
5083
5084#define set_power_wells(power_domains, __power_well_descs) \
5085	set_power_wells_mask(power_domains, __power_well_descs, 0)
 
5086
5087/**
5088 * intel_power_domains_init - initializes the power domain structures
5089 * @dev_priv: i915 device instance
5090 *
5091 * Initializes the power domain structures for @dev_priv depending upon the
5092 * supported platform.
5093 */
5094int intel_power_domains_init(struct drm_i915_private *dev_priv)
5095{
5096	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5097	int err;
5098
5099	dev_priv->params.disable_power_well =
5100		sanitize_disable_power_well_option(dev_priv,
5101						   dev_priv->params.disable_power_well);
5102	dev_priv->dmc.allowed_dc_mask =
5103		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
5104
5105	dev_priv->dmc.target_dc_state =
5106		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
5107
5108	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
5109
5110	mutex_init(&power_domains->lock);
5111
5112	INIT_DELAYED_WORK(&power_domains->async_put_work,
5113			  intel_display_power_put_async_work);
5114
5115	/*
5116	 * The enabling order will be from lower to higher indexed wells,
5117	 * the disabling order is reversed.
5118	 */
5119	if (!HAS_DISPLAY(dev_priv)) {
5120		power_domains->power_well_count = 0;
5121		err = 0;
5122	} else if (DISPLAY_VER(dev_priv) >= 13) {
5123		err = set_power_wells(power_domains, xelpd_power_wells);
5124	} else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
5125		err = set_power_wells_mask(power_domains, tgl_power_wells,
5126					   BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
5127	} else if (IS_ROCKETLAKE(dev_priv)) {
5128		err = set_power_wells(power_domains, rkl_power_wells);
5129	} else if (DISPLAY_VER(dev_priv) == 12) {
5130		err = set_power_wells(power_domains, tgl_power_wells);
5131	} else if (DISPLAY_VER(dev_priv) == 11) {
5132		err = set_power_wells(power_domains, icl_power_wells);
5133	} else if (IS_CNL_WITH_PORT_F(dev_priv)) {
5134		err = set_power_wells(power_domains, cnl_power_wells);
5135	} else if (IS_CANNONLAKE(dev_priv)) {
5136		err = set_power_wells_mask(power_domains, cnl_power_wells,
5137					   BIT_ULL(CNL_DISP_PW_DDI_F_IO) |
5138					   BIT_ULL(CNL_DISP_PW_DDI_F_AUX));
 
 
 
 
 
 
 
5139	} else if (IS_GEMINILAKE(dev_priv)) {
5140		err = set_power_wells(power_domains, glk_power_wells);
5141	} else if (IS_BROXTON(dev_priv)) {
5142		err = set_power_wells(power_domains, bxt_power_wells);
5143	} else if (DISPLAY_VER(dev_priv) == 9) {
5144		err = set_power_wells(power_domains, skl_power_wells);
5145	} else if (IS_CHERRYVIEW(dev_priv)) {
5146		err = set_power_wells(power_domains, chv_power_wells);
5147	} else if (IS_BROADWELL(dev_priv)) {
5148		err = set_power_wells(power_domains, bdw_power_wells);
5149	} else if (IS_HASWELL(dev_priv)) {
5150		err = set_power_wells(power_domains, hsw_power_wells);
5151	} else if (IS_VALLEYVIEW(dev_priv)) {
5152		err = set_power_wells(power_domains, vlv_power_wells);
5153	} else if (IS_I830(dev_priv)) {
5154		err = set_power_wells(power_domains, i830_power_wells);
5155	} else {
5156		err = set_power_wells(power_domains, i9xx_always_on_power_well);
5157	}
5158
5159	return err;
5160}
5161
5162/**
5163 * intel_power_domains_cleanup - clean up power domains resources
5164 * @dev_priv: i915 device instance
5165 *
5166 * Release any resources acquired by intel_power_domains_init()
5167 */
5168void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
5169{
5170	kfree(dev_priv->power_domains.power_wells);
5171}
5172
5173static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5174{
5175	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5176	struct i915_power_well *power_well;
5177
5178	mutex_lock(&power_domains->lock);
5179	for_each_power_well(dev_priv, power_well) {
5180		power_well->desc->ops->sync_hw(dev_priv, power_well);
5181		power_well->hw_enabled =
5182			power_well->desc->ops->is_enabled(dev_priv, power_well);
5183	}
5184	mutex_unlock(&power_domains->lock);
5185}
5186
5187static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
5188				enum dbuf_slice slice, bool enable)
5189{
5190	i915_reg_t reg = DBUF_CTL_S(slice);
5191	bool state;
 
5192
5193	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
5194		     enable ? DBUF_POWER_REQUEST : 0);
 
 
 
 
5195	intel_de_posting_read(dev_priv, reg);
5196	udelay(10);
5197
5198	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
5199	drm_WARN(&dev_priv->drm, enable != state,
5200		 "DBuf slice %d power %s timeout!\n",
5201		 slice, enabledisable(enable));
5202}
5203
5204void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
5205			     u8 req_slices)
5206{
 
5207	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5208	u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
5209	enum dbuf_slice slice;
5210
5211	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
5212		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
5213		 req_slices, slice_mask);
5214
5215	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
5216		    req_slices);
5217
5218	/*
5219	 * Might be running this in parallel to gen9_dc_off_power_well_enable
5220	 * being called from intel_dp_detect for instance,
5221	 * which causes assertion triggered by race condition,
5222	 * as gen9_assert_dbuf_enabled might preempt this when registers
5223	 * were already updated, while dev_priv was not.
5224	 */
5225	mutex_lock(&power_domains->lock);
5226
5227	for_each_dbuf_slice(dev_priv, slice)
5228		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
5229
5230	dev_priv->dbuf.enabled_slices = req_slices;
5231
5232	mutex_unlock(&power_domains->lock);
5233}
5234
5235static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
5236{
5237	dev_priv->dbuf.enabled_slices =
5238		intel_enabled_dbuf_slices_mask(dev_priv);
5239
5240	/*
5241	 * Just power up at least 1 slice, we will
5242	 * figure out later which slices we have and what we need.
5243	 */
5244	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
5245				dev_priv->dbuf.enabled_slices);
5246}
5247
5248static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
5249{
5250	gen9_dbuf_slices_update(dev_priv, 0);
5251}
5252
5253static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
5254{
5255	enum dbuf_slice slice;
5256
5257	if (IS_ALDERLAKE_P(dev_priv))
5258		return;
5259
5260	for_each_dbuf_slice(dev_priv, slice)
5261		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
5262			     DBUF_TRACKER_STATE_SERVICE_MASK,
5263			     DBUF_TRACKER_STATE_SERVICE(8));
5264}
5265
5266static void icl_mbus_init(struct drm_i915_private *dev_priv)
5267{
5268	unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
5269	u32 mask, val, i;
5270
5271	if (IS_ALDERLAKE_P(dev_priv))
5272		return;
5273
5274	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
5275		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
5276		MBUS_ABOX_B_CREDIT_MASK |
5277		MBUS_ABOX_BW_CREDIT_MASK;
5278	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
5279		MBUS_ABOX_BT_CREDIT_POOL2(16) |
5280		MBUS_ABOX_B_CREDIT(1) |
5281		MBUS_ABOX_BW_CREDIT(1);
5282
5283	/*
5284	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
5285	 * expect us to program the abox_ctl0 register as well, even though
5286	 * we don't have to program other instance-0 registers like BW_BUDDY.
5287	 */
5288	if (DISPLAY_VER(dev_priv) == 12)
5289		abox_regs |= BIT(0);
5290
5291	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
5292		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
5293}
5294
5295static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
5296{
5297	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
5298
5299	/*
5300	 * The LCPLL register should be turned on by the BIOS. For now
5301	 * let's just check its state and print errors in case
5302	 * something is wrong.  Don't even try to turn it on.
5303	 */
5304
5305	if (val & LCPLL_CD_SOURCE_FCLK)
5306		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
5307
5308	if (val & LCPLL_PLL_DISABLE)
5309		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
5310
5311	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
5312		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
5313}
5314
5315static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5316{
5317	struct drm_device *dev = &dev_priv->drm;
5318	struct intel_crtc *crtc;
5319
5320	for_each_intel_crtc(dev, crtc)
5321		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
5322				pipe_name(crtc->pipe));
5323
5324	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
5325			"Display power well on\n");
5326	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
5327			"SPLL enabled\n");
5328	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
5329			"WRPLL1 enabled\n");
5330	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
5331			"WRPLL2 enabled\n");
5332	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
5333			"Panel power on\n");
5334	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5335			"CPU PWM1 enabled\n");
5336	if (IS_HASWELL(dev_priv))
5337		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5338				"CPU PWM2 enabled\n");
5339	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5340			"PCH PWM1 enabled\n");
5341	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5342			"Utility pin enabled\n");
5343	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
5344			"PCH GTC enabled\n");
5345
5346	/*
5347	 * In theory we can still leave IRQs enabled, as long as only the HPD
5348	 * interrupts remain enabled. We used to check for that, but since it's
5349	 * gen-specific and since we only disable LCPLL after we fully disable
5350	 * the interrupts, the check below should be enough.
5351	 */
5352	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
5353}
5354
5355static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
5356{
5357	if (IS_HASWELL(dev_priv))
5358		return intel_de_read(dev_priv, D_COMP_HSW);
5359	else
5360		return intel_de_read(dev_priv, D_COMP_BDW);
5361}
5362
5363static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
5364{
5365	if (IS_HASWELL(dev_priv)) {
5366		if (sandybridge_pcode_write(dev_priv,
5367					    GEN6_PCODE_WRITE_D_COMP, val))
5368			drm_dbg_kms(&dev_priv->drm,
5369				    "Failed to write to D_COMP\n");
5370	} else {
5371		intel_de_write(dev_priv, D_COMP_BDW, val);
5372		intel_de_posting_read(dev_priv, D_COMP_BDW);
5373	}
5374}
5375
5376/*
5377 * This function implements pieces of two sequences from BSpec:
5378 * - Sequence for display software to disable LCPLL
5379 * - Sequence for display software to allow package C8+
5380 * The steps implemented here are just the steps that actually touch the LCPLL
5381 * register. Callers should take care of disabling all the display engine
5382 * functions, doing the mode unset, fixing interrupts, etc.
5383 */
5384static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5385			      bool switch_to_fclk, bool allow_power_down)
5386{
5387	u32 val;
5388
5389	assert_can_disable_lcpll(dev_priv);
5390
5391	val = intel_de_read(dev_priv, LCPLL_CTL);
5392
5393	if (switch_to_fclk) {
5394		val |= LCPLL_CD_SOURCE_FCLK;
5395		intel_de_write(dev_priv, LCPLL_CTL, val);
5396
5397		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
5398				LCPLL_CD_SOURCE_FCLK_DONE, 1))
5399			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
5400
5401		val = intel_de_read(dev_priv, LCPLL_CTL);
5402	}
5403
5404	val |= LCPLL_PLL_DISABLE;
5405	intel_de_write(dev_priv, LCPLL_CTL, val);
5406	intel_de_posting_read(dev_priv, LCPLL_CTL);
5407
5408	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
5409		drm_err(&dev_priv->drm, "LCPLL still locked\n");
5410
5411	val = hsw_read_dcomp(dev_priv);
5412	val |= D_COMP_COMP_DISABLE;
5413	hsw_write_dcomp(dev_priv, val);
5414	ndelay(100);
5415
5416	if (wait_for((hsw_read_dcomp(dev_priv) &
5417		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
5418		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
5419
5420	if (allow_power_down) {
5421		val = intel_de_read(dev_priv, LCPLL_CTL);
5422		val |= LCPLL_POWER_DOWN_ALLOW;
5423		intel_de_write(dev_priv, LCPLL_CTL, val);
5424		intel_de_posting_read(dev_priv, LCPLL_CTL);
5425	}
5426}
5427
5428/*
5429 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
5430 * source.
5431 */
5432static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
5433{
5434	u32 val;
5435
5436	val = intel_de_read(dev_priv, LCPLL_CTL);
5437
5438	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
5439		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
5440		return;
5441
5442	/*
5443	 * Make sure we're not on PC8 state before disabling PC8, otherwise
5444	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
5445	 */
5446	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
5447
5448	if (val & LCPLL_POWER_DOWN_ALLOW) {
5449		val &= ~LCPLL_POWER_DOWN_ALLOW;
5450		intel_de_write(dev_priv, LCPLL_CTL, val);
5451		intel_de_posting_read(dev_priv, LCPLL_CTL);
5452	}
5453
5454	val = hsw_read_dcomp(dev_priv);
5455	val |= D_COMP_COMP_FORCE;
5456	val &= ~D_COMP_COMP_DISABLE;
5457	hsw_write_dcomp(dev_priv, val);
5458
5459	val = intel_de_read(dev_priv, LCPLL_CTL);
5460	val &= ~LCPLL_PLL_DISABLE;
5461	intel_de_write(dev_priv, LCPLL_CTL, val);
5462
5463	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
5464		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
5465
5466	if (val & LCPLL_CD_SOURCE_FCLK) {
5467		val = intel_de_read(dev_priv, LCPLL_CTL);
5468		val &= ~LCPLL_CD_SOURCE_FCLK;
5469		intel_de_write(dev_priv, LCPLL_CTL, val);
5470
5471		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
5472				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
5473			drm_err(&dev_priv->drm,
5474				"Switching back to LCPLL failed\n");
5475	}
5476
5477	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
5478
5479	intel_update_cdclk(dev_priv);
5480	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
5481}
5482
5483/*
5484 * Package states C8 and deeper are really deep PC states that can only be
5485 * reached when all the devices on the system allow it, so even if the graphics
5486 * device allows PC8+, it doesn't mean the system will actually get to these
5487 * states. Our driver only allows PC8+ when going into runtime PM.
5488 *
5489 * The requirements for PC8+ are that all the outputs are disabled, the power
5490 * well is disabled and most interrupts are disabled, and these are also
5491 * requirements for runtime PM. When these conditions are met, we manually do
5492 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
5493 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
5494 * hang the machine.
5495 *
5496 * When we really reach PC8 or deeper states (not just when we allow it) we lose
5497 * the state of some registers, so when we come back from PC8+ we need to
5498 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
5499 * need to take care of the registers kept by RC6. Notice that this happens even
5500 * if we don't put the device in PCI D3 state (which is what currently happens
5501 * because of the runtime PM support).
5502 *
5503 * For more, read "Display Sequences for Package C8" on the hardware
5504 * documentation.
5505 */
5506static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
5507{
5508	u32 val;
5509
5510	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5511
5512	if (HAS_PCH_LPT_LP(dev_priv)) {
5513		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5514		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5515		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5516	}
5517
5518	lpt_disable_clkout_dp(dev_priv);
5519	hsw_disable_lcpll(dev_priv, true, true);
5520}
5521
5522static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5523{
5524	u32 val;
5525
5526	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5527
5528	hsw_restore_lcpll(dev_priv);
5529	intel_init_pch_refclk(dev_priv);
5530
5531	if (HAS_PCH_LPT_LP(dev_priv)) {
5532		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5533		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5534		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5535	}
5536}
5537
5538static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5539				      bool enable)
5540{
5541	i915_reg_t reg;
5542	u32 reset_bits, val;
5543
5544	if (IS_IVYBRIDGE(dev_priv)) {
5545		reg = GEN7_MSG_CTL;
5546		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5547	} else {
5548		reg = HSW_NDE_RSTWRN_OPT;
5549		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5550	}
5551
5552	val = intel_de_read(dev_priv, reg);
5553
5554	if (enable)
5555		val |= reset_bits;
5556	else
5557		val &= ~reset_bits;
5558
5559	intel_de_write(dev_priv, reg, val);
5560}
5561
5562static void skl_display_core_init(struct drm_i915_private *dev_priv,
5563				  bool resume)
5564{
5565	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5566	struct i915_power_well *well;
5567
5568	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5569
5570	/* enable PCH reset handshake */
5571	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5572
5573	if (!HAS_DISPLAY(dev_priv))
5574		return;
5575
5576	/* enable PG1 and Misc I/O */
5577	mutex_lock(&power_domains->lock);
5578
5579	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5580	intel_power_well_enable(dev_priv, well);
5581
5582	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5583	intel_power_well_enable(dev_priv, well);
5584
5585	mutex_unlock(&power_domains->lock);
5586
5587	intel_cdclk_init_hw(dev_priv);
5588
5589	gen9_dbuf_enable(dev_priv);
5590
5591	if (resume && intel_dmc_has_payload(dev_priv))
5592		intel_dmc_load_program(dev_priv);
5593}
5594
5595static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5596{
5597	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5598	struct i915_power_well *well;
5599
5600	if (!HAS_DISPLAY(dev_priv))
5601		return;
5602
5603	gen9_disable_dc_states(dev_priv);
5604
5605	gen9_dbuf_disable(dev_priv);
5606
5607	intel_cdclk_uninit_hw(dev_priv);
5608
5609	/* The spec doesn't call for removing the reset handshake flag */
5610	/* disable PG1 and Misc I/O */
5611
5612	mutex_lock(&power_domains->lock);
5613
5614	/*
5615	 * BSpec says to keep the MISC IO power well enabled here, only
5616	 * remove our request for power well 1.
5617	 * Note that even though the driver's request is removed power well 1
5618	 * may stay enabled after this due to DMC's own request on it.
5619	 */
5620	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5621	intel_power_well_disable(dev_priv, well);
5622
5623	mutex_unlock(&power_domains->lock);
5624
5625	usleep_range(10, 30);		/* 10 us delay per Bspec */
5626}
5627
5628static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5629{
5630	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5631	struct i915_power_well *well;
5632
5633	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5634
5635	/*
5636	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5637	 * or else the reset will hang because there is no PCH to respond.
5638	 * Move the handshake programming to initialization sequence.
5639	 * Previously was left up to BIOS.
5640	 */
5641	intel_pch_reset_handshake(dev_priv, false);
5642
5643	if (!HAS_DISPLAY(dev_priv))
5644		return;
5645
5646	/* Enable PG1 */
5647	mutex_lock(&power_domains->lock);
5648
5649	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5650	intel_power_well_enable(dev_priv, well);
5651
5652	mutex_unlock(&power_domains->lock);
5653
5654	intel_cdclk_init_hw(dev_priv);
5655
5656	gen9_dbuf_enable(dev_priv);
5657
5658	if (resume && intel_dmc_has_payload(dev_priv))
5659		intel_dmc_load_program(dev_priv);
5660}
5661
5662static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5663{
5664	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5665	struct i915_power_well *well;
5666
5667	if (!HAS_DISPLAY(dev_priv))
5668		return;
5669
5670	gen9_disable_dc_states(dev_priv);
5671
5672	gen9_dbuf_disable(dev_priv);
5673
5674	intel_cdclk_uninit_hw(dev_priv);
5675
5676	/* The spec doesn't call for removing the reset handshake flag */
5677
5678	/*
5679	 * Disable PW1 (PG1).
5680	 * Note that even though the driver's request is removed power well 1
5681	 * may stay enabled after this due to DMC's own request on it.
5682	 */
5683	mutex_lock(&power_domains->lock);
5684
5685	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5686	intel_power_well_disable(dev_priv, well);
5687
5688	mutex_unlock(&power_domains->lock);
5689
5690	usleep_range(10, 30);		/* 10 us delay per Bspec */
5691}
5692
5693static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5694{
5695	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5696	struct i915_power_well *well;
5697
5698	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5699
5700	/* 1. Enable PCH Reset Handshake */
5701	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5702
5703	if (!HAS_DISPLAY(dev_priv))
5704		return;
5705
5706	/* 2-3. */
5707	intel_combo_phy_init(dev_priv);
5708
5709	/*
5710	 * 4. Enable Power Well 1 (PG1).
5711	 *    The AUX IO power wells will be enabled on demand.
5712	 */
5713	mutex_lock(&power_domains->lock);
5714	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5715	intel_power_well_enable(dev_priv, well);
5716	mutex_unlock(&power_domains->lock);
5717
5718	/* 5. Enable CD clock */
5719	intel_cdclk_init_hw(dev_priv);
5720
5721	/* 6. Enable DBUF */
5722	gen9_dbuf_enable(dev_priv);
5723
5724	if (resume && intel_dmc_has_payload(dev_priv))
5725		intel_dmc_load_program(dev_priv);
5726}
5727
5728static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5729{
5730	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5731	struct i915_power_well *well;
5732
5733	if (!HAS_DISPLAY(dev_priv))
5734		return;
5735
5736	gen9_disable_dc_states(dev_priv);
5737
5738	/* 1. Disable all display engine functions -> aready done */
5739
5740	/* 2. Disable DBUF */
5741	gen9_dbuf_disable(dev_priv);
5742
5743	/* 3. Disable CD clock */
5744	intel_cdclk_uninit_hw(dev_priv);
5745
5746	/*
5747	 * 4. Disable Power Well 1 (PG1).
5748	 *    The AUX IO power wells are toggled on demand, so they are already
5749	 *    disabled at this point.
5750	 */
5751	mutex_lock(&power_domains->lock);
5752	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5753	intel_power_well_disable(dev_priv, well);
5754	mutex_unlock(&power_domains->lock);
5755
5756	usleep_range(10, 30);		/* 10 us delay per Bspec */
5757
5758	/* 5. */
5759	intel_combo_phy_uninit(dev_priv);
5760}
5761
5762struct buddy_page_mask {
5763	u32 page_mask;
5764	u8 type;
5765	u8 num_channels;
5766};
5767
5768static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5769	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5770	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
5771	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5772	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
5773	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5774	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
5775	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5776	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
5777	{}
5778};
5779
5780static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5781	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5782	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5783	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
5784	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
5785	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5786	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5787	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
5788	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
5789	{}
5790};
5791
5792static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5793{
5794	enum intel_dram_type type = dev_priv->dram_info.type;
5795	u8 num_channels = dev_priv->dram_info.num_channels;
5796	const struct buddy_page_mask *table;
5797	unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5798	int config, i;
5799
5800	if (IS_ALDERLAKE_S(dev_priv) ||
5801	    IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
5802	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
5803		/* Wa_1409767108:tgl,dg1,adl-s */
5804		table = wa_1409767108_buddy_page_masks;
5805	else
5806		table = tgl_buddy_page_masks;
5807
5808	for (config = 0; table[config].page_mask != 0; config++)
5809		if (table[config].num_channels == num_channels &&
5810		    table[config].type == type)
5811			break;
5812
5813	if (table[config].page_mask == 0) {
5814		drm_dbg(&dev_priv->drm,
5815			"Unknown memory configuration; disabling address buddy logic.\n");
5816		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5817			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5818				       BW_BUDDY_DISABLE);
5819	} else {
5820		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5821			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5822				       table[config].page_mask);
5823
5824			/* Wa_22010178259:tgl,rkl */
5825			intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5826				     BW_BUDDY_TLB_REQ_TIMER_MASK,
5827				     BW_BUDDY_TLB_REQ_TIMER(0x8));
5828		}
5829	}
5830}
5831
5832static void icl_display_core_init(struct drm_i915_private *dev_priv,
5833				  bool resume)
5834{
5835	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5836	struct i915_power_well *well;
5837	u32 val;
5838
5839	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5840
5841	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
5842	if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
5843	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
5844		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
5845			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
5846
5847	/* 1. Enable PCH reset handshake. */
5848	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5849
5850	if (!HAS_DISPLAY(dev_priv))
5851		return;
5852
5853	/* 2. Initialize all combo phys */
5854	intel_combo_phy_init(dev_priv);
5855
5856	/*
5857	 * 3. Enable Power Well 1 (PG1).
5858	 *    The AUX IO power wells will be enabled on demand.
5859	 */
5860	mutex_lock(&power_domains->lock);
5861	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5862	intel_power_well_enable(dev_priv, well);
5863	mutex_unlock(&power_domains->lock);
5864
5865	/* 4. Enable CDCLK. */
5866	intel_cdclk_init_hw(dev_priv);
5867
5868	if (DISPLAY_VER(dev_priv) >= 12)
5869		gen12_dbuf_slices_config(dev_priv);
5870
5871	/* 5. Enable DBUF. */
5872	gen9_dbuf_enable(dev_priv);
5873
5874	/* 6. Setup MBUS. */
5875	icl_mbus_init(dev_priv);
5876
5877	/* 7. Program arbiter BW_BUDDY registers */
5878	if (DISPLAY_VER(dev_priv) >= 12)
5879		tgl_bw_buddy_init(dev_priv);
5880
5881	if (resume && intel_dmc_has_payload(dev_priv))
5882		intel_dmc_load_program(dev_priv);
5883
5884	/* Wa_14011508470 */
5885	if (DISPLAY_VER(dev_priv) == 12) {
5886		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5887		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5888		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5889	}
5890
5891	/* Wa_14011503030:xelpd */
5892	if (DISPLAY_VER(dev_priv) >= 13)
5893		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
5894}
5895
5896static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5897{
5898	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5899	struct i915_power_well *well;
5900
5901	if (!HAS_DISPLAY(dev_priv))
5902		return;
5903
5904	gen9_disable_dc_states(dev_priv);
5905
5906	/* 1. Disable all display engine functions -> aready done */
5907
5908	/* 2. Disable DBUF */
5909	gen9_dbuf_disable(dev_priv);
5910
5911	/* 3. Disable CD clock */
5912	intel_cdclk_uninit_hw(dev_priv);
5913
5914	/*
5915	 * 4. Disable Power Well 1 (PG1).
5916	 *    The AUX IO power wells are toggled on demand, so they are already
5917	 *    disabled at this point.
5918	 */
5919	mutex_lock(&power_domains->lock);
5920	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5921	intel_power_well_disable(dev_priv, well);
5922	mutex_unlock(&power_domains->lock);
5923
5924	/* 5. */
5925	intel_combo_phy_uninit(dev_priv);
5926}
5927
5928static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5929{
5930	struct i915_power_well *cmn_bc =
5931		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5932	struct i915_power_well *cmn_d =
5933		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5934
5935	/*
5936	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5937	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5938	 * instead maintain a shadow copy ourselves. Use the actual
5939	 * power well state and lane status to reconstruct the
5940	 * expected initial value.
5941	 */
5942	dev_priv->chv_phy_control =
5943		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5944		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5945		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5946		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5947		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5948
5949	/*
5950	 * If all lanes are disabled we leave the override disabled
5951	 * with all power down bits cleared to match the state we
5952	 * would use after disabling the port. Otherwise enable the
5953	 * override and set the lane powerdown bits accding to the
5954	 * current lane status.
5955	 */
5956	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5957		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5958		unsigned int mask;
5959
5960		mask = status & DPLL_PORTB_READY_MASK;
5961		if (mask == 0xf)
5962			mask = 0x0;
5963		else
5964			dev_priv->chv_phy_control |=
5965				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5966
5967		dev_priv->chv_phy_control |=
5968			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5969
5970		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5971		if (mask == 0xf)
5972			mask = 0x0;
5973		else
5974			dev_priv->chv_phy_control |=
5975				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5976
5977		dev_priv->chv_phy_control |=
5978			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5979
5980		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5981
5982		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5983	} else {
5984		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5985	}
5986
5987	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5988		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5989		unsigned int mask;
5990
5991		mask = status & DPLL_PORTD_READY_MASK;
5992
5993		if (mask == 0xf)
5994			mask = 0x0;
5995		else
5996			dev_priv->chv_phy_control |=
5997				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5998
5999		dev_priv->chv_phy_control |=
6000			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
6001
6002		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
6003
6004		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
6005	} else {
6006		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
6007	}
6008
6009	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
6010		    dev_priv->chv_phy_control);
6011
6012	/* Defer application of initial phy_control to enabling the powerwell */
6013}
6014
6015static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6016{
6017	struct i915_power_well *cmn =
6018		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
6019	struct i915_power_well *disp2d =
6020		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
6021
6022	/* If the display might be already active skip this */
6023	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
6024	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
6025	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
6026		return;
6027
6028	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
6029
6030	/* cmnlane needs DPLL registers */
6031	disp2d->desc->ops->enable(dev_priv, disp2d);
6032
6033	/*
6034	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
6035	 * Need to assert and de-assert PHY SB reset by gating the
6036	 * common lane power, then un-gating it.
6037	 * Simply ungating isn't enough to reset the PHY enough to get
6038	 * ports and lanes running.
6039	 */
6040	cmn->desc->ops->disable(dev_priv, cmn);
6041}
6042
6043static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
6044{
6045	bool ret;
6046
6047	vlv_punit_get(dev_priv);
6048	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
6049	vlv_punit_put(dev_priv);
6050
6051	return ret;
6052}
6053
6054static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
6055{
6056	drm_WARN(&dev_priv->drm,
6057		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
6058		 "VED not power gated\n");
6059}
6060
6061static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
6062{
6063	static const struct pci_device_id isp_ids[] = {
6064		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
6065		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
6066		{}
6067	};
6068
6069	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
6070		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
6071		 "ISP not power gated\n");
6072}
6073
6074static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
6075
6076/**
6077 * intel_power_domains_init_hw - initialize hardware power domain state
6078 * @i915: i915 device instance
6079 * @resume: Called from resume code paths or not
6080 *
6081 * This function initializes the hardware power domain state and enables all
6082 * power wells belonging to the INIT power domain. Power wells in other
6083 * domains (and not in the INIT domain) are referenced or disabled by
6084 * intel_modeset_readout_hw_state(). After that the reference count of each
6085 * power well must match its HW enabled state, see
6086 * intel_power_domains_verify_state().
6087 *
6088 * It will return with power domains disabled (to be enabled later by
6089 * intel_power_domains_enable()) and must be paired with
6090 * intel_power_domains_driver_remove().
6091 */
6092void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
6093{
6094	struct i915_power_domains *power_domains = &i915->power_domains;
6095
6096	power_domains->initializing = true;
6097
6098	if (DISPLAY_VER(i915) >= 11) {
6099		icl_display_core_init(i915, resume);
6100	} else if (IS_CANNONLAKE(i915)) {
6101		cnl_display_core_init(i915, resume);
6102	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6103		bxt_display_core_init(i915, resume);
6104	} else if (DISPLAY_VER(i915) == 9) {
6105		skl_display_core_init(i915, resume);
 
 
6106	} else if (IS_CHERRYVIEW(i915)) {
6107		mutex_lock(&power_domains->lock);
6108		chv_phy_control_init(i915);
6109		mutex_unlock(&power_domains->lock);
6110		assert_isp_power_gated(i915);
6111	} else if (IS_VALLEYVIEW(i915)) {
6112		mutex_lock(&power_domains->lock);
6113		vlv_cmnlane_wa(i915);
6114		mutex_unlock(&power_domains->lock);
6115		assert_ved_power_gated(i915);
6116		assert_isp_power_gated(i915);
6117	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
6118		hsw_assert_cdclk(i915);
6119		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6120	} else if (IS_IVYBRIDGE(i915)) {
6121		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6122	}
6123
6124	/*
6125	 * Keep all power wells enabled for any dependent HW access during
6126	 * initialization and to make sure we keep BIOS enabled display HW
6127	 * resources powered until display HW readout is complete. We drop
6128	 * this reference in intel_power_domains_enable().
6129	 */
6130	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6131	power_domains->init_wakeref =
6132		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6133
6134	/* Disable power support if the user asked so. */
6135	if (!i915->params.disable_power_well) {
6136		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
6137		i915->power_domains.disable_wakeref = intel_display_power_get(i915,
6138									      POWER_DOMAIN_INIT);
6139	}
6140	intel_power_domains_sync_hw(i915);
6141
6142	power_domains->initializing = false;
6143}
6144
6145/**
6146 * intel_power_domains_driver_remove - deinitialize hw power domain state
6147 * @i915: i915 device instance
6148 *
6149 * De-initializes the display power domain HW state. It also ensures that the
6150 * device stays powered up so that the driver can be reloaded.
6151 *
6152 * It must be called with power domains already disabled (after a call to
6153 * intel_power_domains_disable()) and must be paired with
6154 * intel_power_domains_init_hw().
6155 */
6156void intel_power_domains_driver_remove(struct drm_i915_private *i915)
6157{
6158	intel_wakeref_t wakeref __maybe_unused =
6159		fetch_and_zero(&i915->power_domains.init_wakeref);
6160
6161	/* Remove the refcount we took to keep power well support disabled. */
6162	if (!i915->params.disable_power_well)
6163		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6164					fetch_and_zero(&i915->power_domains.disable_wakeref));
6165
6166	intel_display_power_flush_work_sync(i915);
6167
6168	intel_power_domains_verify_state(i915);
6169
6170	/* Keep the power well enabled, but cancel its rpm wakeref. */
6171	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
6172}
6173
6174/**
6175 * intel_power_domains_enable - enable toggling of display power wells
6176 * @i915: i915 device instance
6177 *
6178 * Enable the ondemand enabling/disabling of the display power wells. Note that
6179 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
6180 * only at specific points of the display modeset sequence, thus they are not
6181 * affected by the intel_power_domains_enable()/disable() calls. The purpose
6182 * of these function is to keep the rest of power wells enabled until the end
6183 * of display HW readout (which will acquire the power references reflecting
6184 * the current HW state).
6185 */
6186void intel_power_domains_enable(struct drm_i915_private *i915)
6187{
6188	intel_wakeref_t wakeref __maybe_unused =
6189		fetch_and_zero(&i915->power_domains.init_wakeref);
6190
6191	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6192	intel_power_domains_verify_state(i915);
6193}
6194
6195/**
6196 * intel_power_domains_disable - disable toggling of display power wells
6197 * @i915: i915 device instance
6198 *
6199 * Disable the ondemand enabling/disabling of the display power wells. See
6200 * intel_power_domains_enable() for which power wells this call controls.
6201 */
6202void intel_power_domains_disable(struct drm_i915_private *i915)
6203{
6204	struct i915_power_domains *power_domains = &i915->power_domains;
6205
6206	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6207	power_domains->init_wakeref =
6208		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6209
6210	intel_power_domains_verify_state(i915);
6211}
6212
6213/**
6214 * intel_power_domains_suspend - suspend power domain state
6215 * @i915: i915 device instance
6216 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
6217 *
6218 * This function prepares the hardware power domain state before entering
6219 * system suspend.
6220 *
6221 * It must be called with power domains already disabled (after a call to
6222 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
6223 */
6224void intel_power_domains_suspend(struct drm_i915_private *i915,
6225				 enum i915_drm_suspend_mode suspend_mode)
6226{
6227	struct i915_power_domains *power_domains = &i915->power_domains;
6228	intel_wakeref_t wakeref __maybe_unused =
6229		fetch_and_zero(&power_domains->init_wakeref);
6230
6231	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6232
6233	/*
6234	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
6235	 * support don't manually deinit the power domains. This also means the
6236	 * DMC firmware will stay active, it will power down any HW
6237	 * resources as required and also enable deeper system power states
6238	 * that would be blocked if the firmware was inactive.
6239	 */
6240	if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
6241	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
6242	    intel_dmc_has_payload(i915)) {
6243		intel_display_power_flush_work(i915);
6244		intel_power_domains_verify_state(i915);
6245		return;
6246	}
6247
6248	/*
6249	 * Even if power well support was disabled we still want to disable
6250	 * power wells if power domains must be deinitialized for suspend.
6251	 */
6252	if (!i915->params.disable_power_well)
6253		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6254					fetch_and_zero(&i915->power_domains.disable_wakeref));
6255
6256	intel_display_power_flush_work(i915);
6257	intel_power_domains_verify_state(i915);
6258
6259	if (DISPLAY_VER(i915) >= 11)
6260		icl_display_core_uninit(i915);
6261	else if (IS_CANNONLAKE(i915))
6262		cnl_display_core_uninit(i915);
6263	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
6264		bxt_display_core_uninit(i915);
6265	else if (DISPLAY_VER(i915) == 9)
6266		skl_display_core_uninit(i915);
 
 
6267
6268	power_domains->display_core_suspended = true;
6269}
6270
6271/**
6272 * intel_power_domains_resume - resume power domain state
6273 * @i915: i915 device instance
6274 *
6275 * This function resume the hardware power domain state during system resume.
6276 *
6277 * It will return with power domain support disabled (to be enabled later by
6278 * intel_power_domains_enable()) and must be paired with
6279 * intel_power_domains_suspend().
6280 */
6281void intel_power_domains_resume(struct drm_i915_private *i915)
6282{
6283	struct i915_power_domains *power_domains = &i915->power_domains;
6284
6285	if (power_domains->display_core_suspended) {
6286		intel_power_domains_init_hw(i915, true);
6287		power_domains->display_core_suspended = false;
6288	} else {
6289		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6290		power_domains->init_wakeref =
6291			intel_display_power_get(i915, POWER_DOMAIN_INIT);
6292	}
6293
6294	intel_power_domains_verify_state(i915);
6295}
6296
6297#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
6298
6299static void intel_power_domains_dump_info(struct drm_i915_private *i915)
6300{
6301	struct i915_power_domains *power_domains = &i915->power_domains;
6302	struct i915_power_well *power_well;
6303
6304	for_each_power_well(i915, power_well) {
6305		enum intel_display_power_domain domain;
6306
6307		drm_dbg(&i915->drm, "%-25s %d\n",
6308			power_well->desc->name, power_well->count);
6309
6310		for_each_power_domain(domain, power_well->desc->domains)
6311			drm_dbg(&i915->drm, "  %-23s %d\n",
6312				intel_display_power_domain_str(domain),
6313				power_domains->domain_use_count[domain]);
6314	}
6315}
6316
6317/**
6318 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
6319 * @i915: i915 device instance
6320 *
6321 * Verify if the reference count of each power well matches its HW enabled
6322 * state and the total refcount of the domains it belongs to. This must be
6323 * called after modeset HW state sanitization, which is responsible for
6324 * acquiring reference counts for any power wells in use and disabling the
6325 * ones left on by BIOS but not required by any active output.
6326 */
6327static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6328{
6329	struct i915_power_domains *power_domains = &i915->power_domains;
6330	struct i915_power_well *power_well;
6331	bool dump_domain_info;
6332
6333	mutex_lock(&power_domains->lock);
6334
6335	verify_async_put_domains_state(power_domains);
6336
6337	dump_domain_info = false;
6338	for_each_power_well(i915, power_well) {
6339		enum intel_display_power_domain domain;
6340		int domains_count;
6341		bool enabled;
6342
6343		enabled = power_well->desc->ops->is_enabled(i915, power_well);
6344		if ((power_well->count || power_well->desc->always_on) !=
6345		    enabled)
6346			drm_err(&i915->drm,
6347				"power well %s state mismatch (refcount %d/enabled %d)",
6348				power_well->desc->name,
6349				power_well->count, enabled);
6350
6351		domains_count = 0;
6352		for_each_power_domain(domain, power_well->desc->domains)
6353			domains_count += power_domains->domain_use_count[domain];
6354
6355		if (power_well->count != domains_count) {
6356			drm_err(&i915->drm,
6357				"power well %s refcount/domain refcount mismatch "
6358				"(refcount %d/domains refcount %d)\n",
6359				power_well->desc->name, power_well->count,
6360				domains_count);
6361			dump_domain_info = true;
6362		}
6363	}
6364
6365	if (dump_domain_info) {
6366		static bool dumped;
6367
6368		if (!dumped) {
6369			intel_power_domains_dump_info(i915);
6370			dumped = true;
6371		}
6372	}
6373
6374	mutex_unlock(&power_domains->lock);
6375}
6376
6377#else
6378
6379static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6380{
6381}
6382
6383#endif
6384
6385void intel_display_power_suspend_late(struct drm_i915_private *i915)
6386{
6387	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6388	    IS_BROXTON(i915)) {
6389		bxt_enable_dc9(i915);
6390	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6391		hsw_enable_pc8(i915);
6392	}
6393
6394	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6395	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6396		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
6397}
6398
6399void intel_display_power_resume_early(struct drm_i915_private *i915)
6400{
6401	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6402	    IS_BROXTON(i915)) {
6403		gen9_sanitize_dc_state(i915);
6404		bxt_disable_dc9(i915);
6405	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6406		hsw_disable_pc8(i915);
6407	}
6408
6409	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6410	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6411		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
6412}
6413
6414void intel_display_power_suspend(struct drm_i915_private *i915)
6415{
6416	if (DISPLAY_VER(i915) >= 11) {
6417		icl_display_core_uninit(i915);
6418		bxt_enable_dc9(i915);
6419	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6420		bxt_display_core_uninit(i915);
6421		bxt_enable_dc9(i915);
6422	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6423		hsw_enable_pc8(i915);
6424	}
6425}
6426
6427void intel_display_power_resume(struct drm_i915_private *i915)
6428{
6429	if (DISPLAY_VER(i915) >= 11) {
6430		bxt_disable_dc9(i915);
6431		icl_display_core_init(i915, true);
6432		if (intel_dmc_has_payload(i915)) {
6433			if (i915->dmc.allowed_dc_mask &
6434			    DC_STATE_EN_UPTO_DC6)
6435				skl_enable_dc6(i915);
6436			else if (i915->dmc.allowed_dc_mask &
6437				 DC_STATE_EN_UPTO_DC5)
6438				gen9_enable_dc5(i915);
6439		}
6440	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6441		bxt_disable_dc9(i915);
6442		bxt_display_core_init(i915, true);
6443		if (intel_dmc_has_payload(i915) &&
6444		    (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
6445			gen9_enable_dc5(i915);
6446	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6447		hsw_disable_pc8(i915);
6448	}
6449}
v5.9
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include "display/intel_crt.h"
   7#include "display/intel_dp.h"
   8
   9#include "i915_drv.h"
  10#include "i915_irq.h"
  11#include "intel_cdclk.h"
  12#include "intel_combo_phy.h"
  13#include "intel_csr.h"
  14#include "intel_display_power.h"
 
  15#include "intel_display_types.h"
 
  16#include "intel_dpio_phy.h"
  17#include "intel_hotplug.h"
  18#include "intel_pm.h"
 
  19#include "intel_sideband.h"
  20#include "intel_tc.h"
  21#include "intel_vga.h"
  22
  23bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  24					 enum i915_power_well_id power_well_id);
  25
  26const char *
  27intel_display_power_domain_str(enum intel_display_power_domain domain)
  28{
  29	switch (domain) {
  30	case POWER_DOMAIN_DISPLAY_CORE:
  31		return "DISPLAY_CORE";
  32	case POWER_DOMAIN_PIPE_A:
  33		return "PIPE_A";
  34	case POWER_DOMAIN_PIPE_B:
  35		return "PIPE_B";
  36	case POWER_DOMAIN_PIPE_C:
  37		return "PIPE_C";
  38	case POWER_DOMAIN_PIPE_D:
  39		return "PIPE_D";
  40	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  41		return "PIPE_A_PANEL_FITTER";
  42	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  43		return "PIPE_B_PANEL_FITTER";
  44	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  45		return "PIPE_C_PANEL_FITTER";
  46	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
  47		return "PIPE_D_PANEL_FITTER";
  48	case POWER_DOMAIN_TRANSCODER_A:
  49		return "TRANSCODER_A";
  50	case POWER_DOMAIN_TRANSCODER_B:
  51		return "TRANSCODER_B";
  52	case POWER_DOMAIN_TRANSCODER_C:
  53		return "TRANSCODER_C";
  54	case POWER_DOMAIN_TRANSCODER_D:
  55		return "TRANSCODER_D";
  56	case POWER_DOMAIN_TRANSCODER_EDP:
  57		return "TRANSCODER_EDP";
  58	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
  59		return "TRANSCODER_VDSC_PW2";
  60	case POWER_DOMAIN_TRANSCODER_DSI_A:
  61		return "TRANSCODER_DSI_A";
  62	case POWER_DOMAIN_TRANSCODER_DSI_C:
  63		return "TRANSCODER_DSI_C";
  64	case POWER_DOMAIN_PORT_DDI_A_LANES:
  65		return "PORT_DDI_A_LANES";
  66	case POWER_DOMAIN_PORT_DDI_B_LANES:
  67		return "PORT_DDI_B_LANES";
  68	case POWER_DOMAIN_PORT_DDI_C_LANES:
  69		return "PORT_DDI_C_LANES";
  70	case POWER_DOMAIN_PORT_DDI_D_LANES:
  71		return "PORT_DDI_D_LANES";
  72	case POWER_DOMAIN_PORT_DDI_E_LANES:
  73		return "PORT_DDI_E_LANES";
  74	case POWER_DOMAIN_PORT_DDI_F_LANES:
  75		return "PORT_DDI_F_LANES";
  76	case POWER_DOMAIN_PORT_DDI_G_LANES:
  77		return "PORT_DDI_G_LANES";
  78	case POWER_DOMAIN_PORT_DDI_H_LANES:
  79		return "PORT_DDI_H_LANES";
  80	case POWER_DOMAIN_PORT_DDI_I_LANES:
  81		return "PORT_DDI_I_LANES";
  82	case POWER_DOMAIN_PORT_DDI_A_IO:
  83		return "PORT_DDI_A_IO";
  84	case POWER_DOMAIN_PORT_DDI_B_IO:
  85		return "PORT_DDI_B_IO";
  86	case POWER_DOMAIN_PORT_DDI_C_IO:
  87		return "PORT_DDI_C_IO";
  88	case POWER_DOMAIN_PORT_DDI_D_IO:
  89		return "PORT_DDI_D_IO";
  90	case POWER_DOMAIN_PORT_DDI_E_IO:
  91		return "PORT_DDI_E_IO";
  92	case POWER_DOMAIN_PORT_DDI_F_IO:
  93		return "PORT_DDI_F_IO";
  94	case POWER_DOMAIN_PORT_DDI_G_IO:
  95		return "PORT_DDI_G_IO";
  96	case POWER_DOMAIN_PORT_DDI_H_IO:
  97		return "PORT_DDI_H_IO";
  98	case POWER_DOMAIN_PORT_DDI_I_IO:
  99		return "PORT_DDI_I_IO";
 100	case POWER_DOMAIN_PORT_DSI:
 101		return "PORT_DSI";
 102	case POWER_DOMAIN_PORT_CRT:
 103		return "PORT_CRT";
 104	case POWER_DOMAIN_PORT_OTHER:
 105		return "PORT_OTHER";
 106	case POWER_DOMAIN_VGA:
 107		return "VGA";
 108	case POWER_DOMAIN_AUDIO:
 109		return "AUDIO";
 110	case POWER_DOMAIN_AUX_A:
 111		return "AUX_A";
 112	case POWER_DOMAIN_AUX_B:
 113		return "AUX_B";
 114	case POWER_DOMAIN_AUX_C:
 115		return "AUX_C";
 116	case POWER_DOMAIN_AUX_D:
 117		return "AUX_D";
 118	case POWER_DOMAIN_AUX_E:
 119		return "AUX_E";
 120	case POWER_DOMAIN_AUX_F:
 121		return "AUX_F";
 122	case POWER_DOMAIN_AUX_G:
 123		return "AUX_G";
 124	case POWER_DOMAIN_AUX_H:
 125		return "AUX_H";
 126	case POWER_DOMAIN_AUX_I:
 127		return "AUX_I";
 128	case POWER_DOMAIN_AUX_IO_A:
 129		return "AUX_IO_A";
 130	case POWER_DOMAIN_AUX_C_TBT:
 131		return "AUX_C_TBT";
 132	case POWER_DOMAIN_AUX_D_TBT:
 133		return "AUX_D_TBT";
 134	case POWER_DOMAIN_AUX_E_TBT:
 135		return "AUX_E_TBT";
 136	case POWER_DOMAIN_AUX_F_TBT:
 137		return "AUX_F_TBT";
 138	case POWER_DOMAIN_AUX_G_TBT:
 139		return "AUX_G_TBT";
 140	case POWER_DOMAIN_AUX_H_TBT:
 141		return "AUX_H_TBT";
 142	case POWER_DOMAIN_AUX_I_TBT:
 143		return "AUX_I_TBT";
 144	case POWER_DOMAIN_GMBUS:
 145		return "GMBUS";
 146	case POWER_DOMAIN_INIT:
 147		return "INIT";
 148	case POWER_DOMAIN_MODESET:
 149		return "MODESET";
 150	case POWER_DOMAIN_GT_IRQ:
 151		return "GT_IRQ";
 152	case POWER_DOMAIN_DPLL_DC_OFF:
 153		return "DPLL_DC_OFF";
 154	case POWER_DOMAIN_TC_COLD_OFF:
 155		return "TC_COLD_OFF";
 156	default:
 157		MISSING_CASE(domain);
 158		return "?";
 159	}
 160}
 161
 162static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 163				    struct i915_power_well *power_well)
 164{
 165	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
 166	power_well->desc->ops->enable(dev_priv, power_well);
 167	power_well->hw_enabled = true;
 168}
 169
 170static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 171				     struct i915_power_well *power_well)
 172{
 173	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
 174	power_well->hw_enabled = false;
 175	power_well->desc->ops->disable(dev_priv, power_well);
 176}
 177
 178static void intel_power_well_get(struct drm_i915_private *dev_priv,
 179				 struct i915_power_well *power_well)
 180{
 181	if (!power_well->count++)
 182		intel_power_well_enable(dev_priv, power_well);
 183}
 184
 185static void intel_power_well_put(struct drm_i915_private *dev_priv,
 186				 struct i915_power_well *power_well)
 187{
 188	drm_WARN(&dev_priv->drm, !power_well->count,
 189		 "Use count on power well %s is already zero",
 190		 power_well->desc->name);
 191
 192	if (!--power_well->count)
 193		intel_power_well_disable(dev_priv, power_well);
 194}
 195
 196/**
 197 * __intel_display_power_is_enabled - unlocked check for a power domain
 198 * @dev_priv: i915 device instance
 199 * @domain: power domain to check
 200 *
 201 * This is the unlocked version of intel_display_power_is_enabled() and should
 202 * only be used from error capture and recovery code where deadlocks are
 203 * possible.
 204 *
 205 * Returns:
 206 * True when the power domain is enabled, false otherwise.
 207 */
 208bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 209				      enum intel_display_power_domain domain)
 210{
 211	struct i915_power_well *power_well;
 212	bool is_enabled;
 213
 214	if (dev_priv->runtime_pm.suspended)
 215		return false;
 216
 217	is_enabled = true;
 218
 219	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
 220		if (power_well->desc->always_on)
 221			continue;
 222
 223		if (!power_well->hw_enabled) {
 224			is_enabled = false;
 225			break;
 226		}
 227	}
 228
 229	return is_enabled;
 230}
 231
 232/**
 233 * intel_display_power_is_enabled - check for a power domain
 234 * @dev_priv: i915 device instance
 235 * @domain: power domain to check
 236 *
 237 * This function can be used to check the hw power domain state. It is mostly
 238 * used in hardware state readout functions. Everywhere else code should rely
 239 * upon explicit power domain reference counting to ensure that the hardware
 240 * block is powered up before accessing it.
 241 *
 242 * Callers must hold the relevant modesetting locks to ensure that concurrent
 243 * threads can't disable the power well while the caller tries to read a few
 244 * registers.
 245 *
 246 * Returns:
 247 * True when the power domain is enabled, false otherwise.
 248 */
 249bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 250				    enum intel_display_power_domain domain)
 251{
 252	struct i915_power_domains *power_domains;
 253	bool ret;
 254
 255	power_domains = &dev_priv->power_domains;
 256
 257	mutex_lock(&power_domains->lock);
 258	ret = __intel_display_power_is_enabled(dev_priv, domain);
 259	mutex_unlock(&power_domains->lock);
 260
 261	return ret;
 262}
 263
 264/*
 265 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 266 * when not needed anymore. We have 4 registers that can request the power well
 267 * to be enabled, and it will only be disabled if none of the registers is
 268 * requesting it to be enabled.
 269 */
 270static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
 271				       u8 irq_pipe_mask, bool has_vga)
 272{
 273	if (has_vga)
 274		intel_vga_reset_io_mem(dev_priv);
 275
 276	if (irq_pipe_mask)
 277		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
 278}
 279
 280static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 281				       u8 irq_pipe_mask)
 282{
 283	if (irq_pipe_mask)
 284		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 285}
 286
 287#define ICL_AUX_PW_TO_CH(pw_idx)	\
 288	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 289
 290#define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
 291	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
 292
 293static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
 294				     struct i915_power_well *power_well)
 295{
 296	int pw_idx = power_well->desc->hsw.idx;
 297
 298	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
 299						 ICL_AUX_PW_TO_CH(pw_idx);
 300}
 301
 302static struct intel_digital_port *
 303aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
 304		       enum aux_ch aux_ch)
 305{
 306	struct intel_digital_port *dig_port = NULL;
 307	struct intel_encoder *encoder;
 308
 309	for_each_intel_encoder(&dev_priv->drm, encoder) {
 310		/* We'll check the MST primary port */
 311		if (encoder->type == INTEL_OUTPUT_DP_MST)
 312			continue;
 313
 314		dig_port = enc_to_dig_port(encoder);
 315		if (!dig_port)
 316			continue;
 317
 318		if (dig_port->aux_ch != aux_ch) {
 319			dig_port = NULL;
 320			continue;
 321		}
 322
 323		break;
 324	}
 325
 326	return dig_port;
 327}
 328
 
 
 
 
 
 
 
 
 
 329static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 330					   struct i915_power_well *power_well,
 331					   bool timeout_expected)
 332{
 333	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 334	int pw_idx = power_well->desc->hsw.idx;
 335
 336	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 337	if (intel_de_wait_for_set(dev_priv, regs->driver,
 338				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
 339		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
 340			    power_well->desc->name);
 341
 342		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
 343
 344	}
 345}
 346
 347static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 348				     const struct i915_power_well_regs *regs,
 349				     int pw_idx)
 350{
 351	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
 352	u32 ret;
 353
 354	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
 355	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
 356	if (regs->kvmr.reg)
 357		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
 358	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
 359
 360	return ret;
 361}
 362
 363static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 364					    struct i915_power_well *power_well)
 365{
 366	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 367	int pw_idx = power_well->desc->hsw.idx;
 368	bool disabled;
 369	u32 reqs;
 370
 371	/*
 372	 * Bspec doesn't require waiting for PWs to get disabled, but still do
 373	 * this for paranoia. The known cases where a PW will be forced on:
 374	 * - a KVMR request on any power well via the KVMR request register
 375	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
 376	 *   DEBUG request registers
 377	 * Skip the wait in case any of the request bits are set and print a
 378	 * diagnostic message.
 379	 */
 380	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
 381			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
 382		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
 383	if (disabled)
 384		return;
 385
 386	drm_dbg_kms(&dev_priv->drm,
 387		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
 388		    power_well->desc->name,
 389		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 390}
 391
 392static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 393					   enum skl_power_gate pg)
 394{
 395	/* Timeout 5us for PG#0, for other PGs 1us */
 396	drm_WARN_ON(&dev_priv->drm,
 397		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
 398					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
 399}
 400
 401static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 402				  struct i915_power_well *power_well)
 403{
 404	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 405	int pw_idx = power_well->desc->hsw.idx;
 406	u32 val;
 407
 408	if (power_well->desc->hsw.has_fuses) {
 409		enum skl_power_gate pg;
 410
 411		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 412						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 413		/*
 414		 * For PW1 we have to wait both for the PW0/PG0 fuse state
 415		 * before enabling the power well and PW1/PG1's own fuse
 416		 * state after the enabling. For all other power wells with
 417		 * fuses we only have to wait for that PW/PG's fuse state
 418		 * after the enabling.
 419		 */
 420		if (pg == SKL_PG1)
 421			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 422	}
 423
 424	val = intel_de_read(dev_priv, regs->driver);
 425	intel_de_write(dev_priv, regs->driver,
 426		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 427
 428	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 429
 430	/* Display WA #1178: cnl */
 431	if (IS_CANNONLAKE(dev_priv) &&
 432	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
 433	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
 434		u32 val;
 435
 436		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
 437		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
 438		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
 439	}
 440
 441	if (power_well->desc->hsw.has_fuses) {
 442		enum skl_power_gate pg;
 443
 444		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 445						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 446		gen9_wait_for_power_well_fuses(dev_priv, pg);
 447	}
 448
 449	hsw_power_well_post_enable(dev_priv,
 450				   power_well->desc->hsw.irq_pipe_mask,
 451				   power_well->desc->hsw.has_vga);
 452}
 453
 454static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 455				   struct i915_power_well *power_well)
 456{
 457	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 458	int pw_idx = power_well->desc->hsw.idx;
 459	u32 val;
 460
 461	hsw_power_well_pre_disable(dev_priv,
 462				   power_well->desc->hsw.irq_pipe_mask);
 463
 464	val = intel_de_read(dev_priv, regs->driver);
 465	intel_de_write(dev_priv, regs->driver,
 466		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 467	hsw_wait_for_power_well_disable(dev_priv, power_well);
 468}
 469
 470#define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
 471
 472static void
 473icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 474				    struct i915_power_well *power_well)
 475{
 476	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 477	int pw_idx = power_well->desc->hsw.idx;
 478	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 479	u32 val;
 480
 481	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 482
 483	val = intel_de_read(dev_priv, regs->driver);
 484	intel_de_write(dev_priv, regs->driver,
 485		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 486
 487	if (INTEL_GEN(dev_priv) < 12) {
 488		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 489		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 490			       val | ICL_LANE_ENABLE_AUX);
 491	}
 492
 493	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 494
 495	/* Display WA #1178: icl */
 496	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 497	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
 498		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
 499		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
 500		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
 501	}
 502}
 503
 504static void
 505icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 506				     struct i915_power_well *power_well)
 507{
 508	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 509	int pw_idx = power_well->desc->hsw.idx;
 510	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 511	u32 val;
 512
 513	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 514
 515	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 516	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 517		       val & ~ICL_LANE_ENABLE_AUX);
 518
 519	val = intel_de_read(dev_priv, regs->driver);
 520	intel_de_write(dev_priv, regs->driver,
 521		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 522
 523	hsw_wait_for_power_well_disable(dev_priv, power_well);
 524}
 525
 526#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 527
 528static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
 529
 530static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
 531				      struct i915_power_well *power_well)
 532{
 533	int refs = hweight64(power_well->desc->domains &
 534			     async_put_domains_mask(&dev_priv->power_domains));
 535
 536	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
 537
 538	return refs;
 539}
 540
 541static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 542					struct i915_power_well *power_well,
 543					struct intel_digital_port *dig_port)
 544{
 545	/* Bypass the check if all references are released asynchronously */
 546	if (power_well_async_ref_count(dev_priv, power_well) ==
 547	    power_well->count)
 548		return;
 549
 550	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
 551		return;
 552
 553	if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
 554		return;
 555
 556	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
 557}
 558
 559#else
 560
 561static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 562					struct i915_power_well *power_well,
 563					struct intel_digital_port *dig_port)
 564{
 565}
 566
 567#endif
 568
 569#define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
 570
 571static void icl_tc_cold_exit(struct drm_i915_private *i915)
 572{
 573	int ret, tries = 0;
 574
 575	while (1) {
 576		ret = sandybridge_pcode_write_timeout(i915,
 577						      ICL_PCODE_EXIT_TCCOLD,
 578						      0, 250, 1);
 579		if (ret != -EAGAIN || ++tries == 3)
 580			break;
 581		msleep(1);
 582	}
 583
 584	/* Spec states that TC cold exit can take up to 1ms to complete */
 585	if (!ret)
 586		msleep(1);
 587
 588	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
 589	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
 590		    "succeeded");
 591}
 592
 593static void
 594icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 595				 struct i915_power_well *power_well)
 596{
 597	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
 598	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 599	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 600	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 601	bool timeout_expected;
 602	u32 val;
 603
 604	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 605
 606	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
 607	val &= ~DP_AUX_CH_CTL_TBT_IO;
 608	if (is_tbt)
 609		val |= DP_AUX_CH_CTL_TBT_IO;
 610	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
 611
 612	val = intel_de_read(dev_priv, regs->driver);
 613	intel_de_write(dev_priv, regs->driver,
 614		       val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
 615
 616	/*
 617	 * An AUX timeout is expected if the TBT DP tunnel is down,
 618	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
 619	 * exit sequence.
 620	 */
 621	timeout_expected = is_tbt;
 622	if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
 623		icl_tc_cold_exit(dev_priv);
 624		timeout_expected = true;
 625	}
 626
 627	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
 628
 629	if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
 630		enum tc_port tc_port;
 631
 632		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
 633		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
 634			       HIP_INDEX_VAL(tc_port, 0x2));
 635
 636		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
 637					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
 638			drm_warn(&dev_priv->drm,
 639				 "Timeout waiting TC uC health\n");
 640	}
 641}
 642
 643static void
 644icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 645				  struct i915_power_well *power_well)
 646{
 647	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
 648	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 649
 650	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 651
 652	hsw_power_well_disable(dev_priv, power_well);
 653}
 654
 655static void
 656icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
 657			  struct i915_power_well *power_well)
 658{
 659	int pw_idx = power_well->desc->hsw.idx;
 660	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
 661	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 662
 663	if (is_tbt || intel_phy_is_tc(dev_priv, phy))
 664		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
 665	else if (IS_ICELAKE(dev_priv))
 666		return icl_combo_phy_aux_power_well_enable(dev_priv,
 667							   power_well);
 668	else
 669		return hsw_power_well_enable(dev_priv, power_well);
 670}
 671
 672static void
 673icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
 674			   struct i915_power_well *power_well)
 675{
 676	int pw_idx = power_well->desc->hsw.idx;
 677	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
 678	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 679
 680	if (is_tbt || intel_phy_is_tc(dev_priv, phy))
 681		return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
 682	else if (IS_ICELAKE(dev_priv))
 683		return icl_combo_phy_aux_power_well_disable(dev_priv,
 684							    power_well);
 685	else
 686		return hsw_power_well_disable(dev_priv, power_well);
 687}
 688
 689/*
 690 * We should only use the power well if we explicitly asked the hardware to
 691 * enable it, so check if it's enabled and also check if we've requested it to
 692 * be enabled.
 693 */
 694static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 695				   struct i915_power_well *power_well)
 696{
 697	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 698	enum i915_power_well_id id = power_well->desc->id;
 699	int pw_idx = power_well->desc->hsw.idx;
 700	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
 701		   HSW_PWR_WELL_CTL_STATE(pw_idx);
 702	u32 val;
 703
 704	val = intel_de_read(dev_priv, regs->driver);
 705
 706	/*
 707	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
 708	 * and the MISC_IO PW will be not restored, so check instead for the
 709	 * BIOS's own request bits, which are forced-on for these power wells
 710	 * when exiting DC5/6.
 711	 */
 712	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
 713	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
 714		val |= intel_de_read(dev_priv, regs->bios);
 715
 716	return (val & mask) == mask;
 717}
 718
 719static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 720{
 721	drm_WARN_ONCE(&dev_priv->drm,
 722		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
 723		      "DC9 already programmed to be enabled.\n");
 724	drm_WARN_ONCE(&dev_priv->drm,
 725		      intel_de_read(dev_priv, DC_STATE_EN) &
 726		      DC_STATE_EN_UPTO_DC5,
 727		      "DC5 still not disabled to enable DC9.\n");
 728	drm_WARN_ONCE(&dev_priv->drm,
 729		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
 730		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
 731		      "Power well 2 on.\n");
 732	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 733		      "Interrupts not disabled yet.\n");
 734
 735	 /*
 736	  * TODO: check for the following to verify the conditions to enter DC9
 737	  * state are satisfied:
 738	  * 1] Check relevant display engine registers to verify if mode set
 739	  * disable sequence was followed.
 740	  * 2] Check if display uninitialize sequence is initialized.
 741	  */
 742}
 743
 744static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 745{
 746	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 747		      "Interrupts not disabled yet.\n");
 748	drm_WARN_ONCE(&dev_priv->drm,
 749		      intel_de_read(dev_priv, DC_STATE_EN) &
 750		      DC_STATE_EN_UPTO_DC5,
 751		      "DC5 still not disabled.\n");
 752
 753	 /*
 754	  * TODO: check for the following to verify DC9 state was indeed
 755	  * entered before programming to disable it:
 756	  * 1] Check relevant display engine registers to verify if mode
 757	  *  set disable sequence was followed.
 758	  * 2] Check if display uninitialize sequence is initialized.
 759	  */
 760}
 761
 762static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 763				u32 state)
 764{
 765	int rewrites = 0;
 766	int rereads = 0;
 767	u32 v;
 768
 769	intel_de_write(dev_priv, DC_STATE_EN, state);
 770
 771	/* It has been observed that disabling the dc6 state sometimes
 772	 * doesn't stick and dmc keeps returning old value. Make sure
 773	 * the write really sticks enough times and also force rewrite until
 774	 * we are confident that state is exactly what we want.
 775	 */
 776	do  {
 777		v = intel_de_read(dev_priv, DC_STATE_EN);
 778
 779		if (v != state) {
 780			intel_de_write(dev_priv, DC_STATE_EN, state);
 781			rewrites++;
 782			rereads = 0;
 783		} else if (rereads++ > 5) {
 784			break;
 785		}
 786
 787	} while (rewrites < 100);
 788
 789	if (v != state)
 790		drm_err(&dev_priv->drm,
 791			"Writing dc state to 0x%x failed, now 0x%x\n",
 792			state, v);
 793
 794	/* Most of the times we need one retry, avoid spam */
 795	if (rewrites > 1)
 796		drm_dbg_kms(&dev_priv->drm,
 797			    "Rewrote dc state to 0x%x %d times\n",
 798			    state, rewrites);
 799}
 800
 801static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 802{
 803	u32 mask;
 804
 805	mask = DC_STATE_EN_UPTO_DC5;
 806
 807	if (INTEL_GEN(dev_priv) >= 12)
 808		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
 809					  | DC_STATE_EN_DC9;
 810	else if (IS_GEN(dev_priv, 11))
 811		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
 812	else if (IS_GEN9_LP(dev_priv))
 813		mask |= DC_STATE_EN_DC9;
 814	else
 815		mask |= DC_STATE_EN_UPTO_DC6;
 816
 817	return mask;
 818}
 819
 820static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 821{
 822	u32 val;
 823
 
 
 
 824	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
 825
 826	drm_dbg_kms(&dev_priv->drm,
 827		    "Resetting DC state tracking from %02x to %02x\n",
 828		    dev_priv->csr.dc_state, val);
 829	dev_priv->csr.dc_state = val;
 830}
 831
 832/**
 833 * gen9_set_dc_state - set target display C power state
 834 * @dev_priv: i915 device instance
 835 * @state: target DC power state
 836 * - DC_STATE_DISABLE
 837 * - DC_STATE_EN_UPTO_DC5
 838 * - DC_STATE_EN_UPTO_DC6
 839 * - DC_STATE_EN_DC9
 840 *
 841 * Signal to DMC firmware/HW the target DC power state passed in @state.
 842 * DMC/HW can turn off individual display clocks and power rails when entering
 843 * a deeper DC power state (higher in number) and turns these back when exiting
 844 * that state to a shallower power state (lower in number). The HW will decide
 845 * when to actually enter a given state on an on-demand basis, for instance
 846 * depending on the active state of display pipes. The state of display
 847 * registers backed by affected power rails are saved/restored as needed.
 848 *
 849 * Based on the above enabling a deeper DC power state is asynchronous wrt.
 850 * enabling it. Disabling a deeper power state is synchronous: for instance
 851 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
 852 * back on and register state is restored. This is guaranteed by the MMIO write
 853 * to DC_STATE_EN blocking until the state is restored.
 854 */
 855static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 856{
 857	u32 val;
 858	u32 mask;
 859
 
 
 
 860	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 861			     state & ~dev_priv->csr.allowed_dc_mask))
 862		state &= dev_priv->csr.allowed_dc_mask;
 863
 864	val = intel_de_read(dev_priv, DC_STATE_EN);
 865	mask = gen9_dc_mask(dev_priv);
 866	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
 867		    val & mask, state);
 868
 869	/* Check if DMC is ignoring our DC state requests */
 870	if ((val & mask) != dev_priv->csr.dc_state)
 871		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
 872			dev_priv->csr.dc_state, val & mask);
 873
 874	val &= ~mask;
 875	val |= state;
 876
 877	gen9_write_dc_state(dev_priv, val);
 878
 879	dev_priv->csr.dc_state = val & mask;
 880}
 881
 882static u32
 883sanitize_target_dc_state(struct drm_i915_private *dev_priv,
 884			 u32 target_dc_state)
 885{
 886	u32 states[] = {
 887		DC_STATE_EN_UPTO_DC6,
 888		DC_STATE_EN_UPTO_DC5,
 889		DC_STATE_EN_DC3CO,
 890		DC_STATE_DISABLE,
 891	};
 892	int i;
 893
 894	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
 895		if (target_dc_state != states[i])
 896			continue;
 897
 898		if (dev_priv->csr.allowed_dc_mask & target_dc_state)
 899			break;
 900
 901		target_dc_state = states[i + 1];
 902	}
 903
 904	return target_dc_state;
 905}
 906
 907static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
 908{
 909	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
 910	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 911}
 912
 913static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
 914{
 915	u32 val;
 916
 917	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
 918	val = intel_de_read(dev_priv, DC_STATE_EN);
 919	val &= ~DC_STATE_DC3CO_STATUS;
 920	intel_de_write(dev_priv, DC_STATE_EN, val);
 921	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 922	/*
 923	 * Delay of 200us DC3CO Exit time B.Spec 49196
 924	 */
 925	usleep_range(200, 210);
 926}
 927
 928static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 929{
 930	assert_can_enable_dc9(dev_priv);
 931
 932	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
 933	/*
 934	 * Power sequencer reset is not needed on
 935	 * platforms with South Display Engine on PCH,
 936	 * because PPS registers are always on.
 937	 */
 938	if (!HAS_PCH_SPLIT(dev_priv))
 939		intel_power_sequencer_reset(dev_priv);
 940	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 941}
 942
 943static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 944{
 945	assert_can_disable_dc9(dev_priv);
 946
 947	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
 948
 949	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 950
 951	intel_pps_unlock_regs_wa(dev_priv);
 952}
 953
 954static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 955{
 956	drm_WARN_ONCE(&dev_priv->drm,
 957		      !intel_de_read(dev_priv, CSR_PROGRAM(0)),
 958		      "CSR program storage start is NULL\n");
 959	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
 960		      "CSR SSP Base Not fine\n");
 961	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
 962		      "CSR HTP Not fine\n");
 963}
 964
 965static struct i915_power_well *
 966lookup_power_well(struct drm_i915_private *dev_priv,
 967		  enum i915_power_well_id power_well_id)
 968{
 969	struct i915_power_well *power_well;
 970
 971	for_each_power_well(dev_priv, power_well)
 972		if (power_well->desc->id == power_well_id)
 973			return power_well;
 974
 975	/*
 976	 * It's not feasible to add error checking code to the callers since
 977	 * this condition really shouldn't happen and it doesn't even make sense
 978	 * to abort things like display initialization sequences. Just return
 979	 * the first power well and hope the WARN gets reported so we can fix
 980	 * our driver.
 981	 */
 982	drm_WARN(&dev_priv->drm, 1,
 983		 "Power well %d not defined for this platform\n",
 984		 power_well_id);
 985	return &dev_priv->power_domains.power_wells[0];
 986}
 987
 988/**
 989 * intel_display_power_set_target_dc_state - Set target dc state.
 990 * @dev_priv: i915 device
 991 * @state: state which needs to be set as target_dc_state.
 992 *
 993 * This function set the "DC off" power well target_dc_state,
 994 * based upon this target_dc_stste, "DC off" power well will
 995 * enable desired DC state.
 996 */
 997void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 998					     u32 state)
 999{
1000	struct i915_power_well *power_well;
1001	bool dc_off_enabled;
1002	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1003
1004	mutex_lock(&power_domains->lock);
1005	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1006
1007	if (drm_WARN_ON(&dev_priv->drm, !power_well))
1008		goto unlock;
1009
1010	state = sanitize_target_dc_state(dev_priv, state);
1011
1012	if (state == dev_priv->csr.target_dc_state)
1013		goto unlock;
1014
1015	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1016							   power_well);
1017	/*
1018	 * If DC off power well is disabled, need to enable and disable the
1019	 * DC off power well to effect target DC state.
1020	 */
1021	if (!dc_off_enabled)
1022		power_well->desc->ops->enable(dev_priv, power_well);
1023
1024	dev_priv->csr.target_dc_state = state;
1025
1026	if (!dc_off_enabled)
1027		power_well->desc->ops->disable(dev_priv, power_well);
1028
1029unlock:
1030	mutex_unlock(&power_domains->lock);
1031}
1032
1033static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1034{
1035	enum i915_power_well_id high_pg;
1036
1037	/* Power wells at this level and above must be disabled for DC5 entry */
1038	if (INTEL_GEN(dev_priv) >= 12)
1039		high_pg = ICL_DISP_PW_3;
1040	else
1041		high_pg = SKL_DISP_PW_2;
1042
1043	drm_WARN_ONCE(&dev_priv->drm,
1044		      intel_display_power_well_is_enabled(dev_priv, high_pg),
1045		      "Power wells above platform's DC5 limit still enabled.\n");
1046
1047	drm_WARN_ONCE(&dev_priv->drm,
1048		      (intel_de_read(dev_priv, DC_STATE_EN) &
1049		       DC_STATE_EN_UPTO_DC5),
1050		      "DC5 already programmed to be enabled.\n");
1051	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1052
1053	assert_csr_loaded(dev_priv);
1054}
1055
1056static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1057{
1058	assert_can_enable_dc5(dev_priv);
1059
1060	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1061
1062	/* Wa Display #1183: skl,kbl,cfl */
1063	if (IS_GEN9_BC(dev_priv))
1064		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1065			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1066
1067	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1068}
1069
1070static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1071{
1072	drm_WARN_ONCE(&dev_priv->drm,
1073		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1074		      "Backlight is not disabled.\n");
1075	drm_WARN_ONCE(&dev_priv->drm,
1076		      (intel_de_read(dev_priv, DC_STATE_EN) &
1077		       DC_STATE_EN_UPTO_DC6),
1078		      "DC6 already programmed to be enabled.\n");
1079
1080	assert_csr_loaded(dev_priv);
1081}
1082
1083static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1084{
1085	assert_can_enable_dc6(dev_priv);
1086
1087	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1088
1089	/* Wa Display #1183: skl,kbl,cfl */
1090	if (IS_GEN9_BC(dev_priv))
1091		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1092			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1093
1094	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1095}
1096
1097static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1098				   struct i915_power_well *power_well)
1099{
1100	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1101	int pw_idx = power_well->desc->hsw.idx;
1102	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1103	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1104
1105	/* Take over the request bit if set by BIOS. */
1106	if (bios_req & mask) {
1107		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1108
1109		if (!(drv_req & mask))
1110			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1111		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1112	}
1113}
1114
1115static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1116					   struct i915_power_well *power_well)
1117{
1118	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1119}
1120
1121static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1122					    struct i915_power_well *power_well)
1123{
1124	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1125}
1126
1127static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1128					    struct i915_power_well *power_well)
1129{
1130	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1131}
1132
1133static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1134{
1135	struct i915_power_well *power_well;
1136
1137	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1138	if (power_well->count > 0)
1139		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1140
1141	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1142	if (power_well->count > 0)
1143		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1144
1145	if (IS_GEMINILAKE(dev_priv)) {
1146		power_well = lookup_power_well(dev_priv,
1147					       GLK_DISP_PW_DPIO_CMN_C);
1148		if (power_well->count > 0)
1149			bxt_ddi_phy_verify_state(dev_priv,
1150						 power_well->desc->bxt.phy);
1151	}
1152}
1153
1154static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1155					   struct i915_power_well *power_well)
1156{
1157	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1158		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1159}
1160
1161static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1162{
1163	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1164	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1165
1166	drm_WARN(&dev_priv->drm,
1167		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1168		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1169		 hw_enabled_dbuf_slices,
1170		 enabled_dbuf_slices);
1171}
1172
1173static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1174{
1175	struct intel_cdclk_config cdclk_config = {};
1176
1177	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1178		tgl_disable_dc3co(dev_priv);
1179		return;
1180	}
1181
1182	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1183
 
 
 
1184	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1185	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1186	drm_WARN_ON(&dev_priv->drm,
1187		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1188					      &cdclk_config));
1189
1190	gen9_assert_dbuf_enabled(dev_priv);
1191
1192	if (IS_GEN9_LP(dev_priv))
1193		bxt_verify_ddi_phy_power_wells(dev_priv);
1194
1195	if (INTEL_GEN(dev_priv) >= 11)
1196		/*
1197		 * DMC retains HW context only for port A, the other combo
1198		 * PHY's HW context for port B is lost after DC transitions,
1199		 * so we need to restore it manually.
1200		 */
1201		intel_combo_phy_init(dev_priv);
1202}
1203
1204static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1205					  struct i915_power_well *power_well)
1206{
1207	gen9_disable_dc_states(dev_priv);
1208}
1209
1210static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1211					   struct i915_power_well *power_well)
1212{
1213	if (!dev_priv->csr.dmc_payload)
1214		return;
1215
1216	switch (dev_priv->csr.target_dc_state) {
1217	case DC_STATE_EN_DC3CO:
1218		tgl_enable_dc3co(dev_priv);
1219		break;
1220	case DC_STATE_EN_UPTO_DC6:
1221		skl_enable_dc6(dev_priv);
1222		break;
1223	case DC_STATE_EN_UPTO_DC5:
1224		gen9_enable_dc5(dev_priv);
1225		break;
1226	}
1227}
1228
1229static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1230					 struct i915_power_well *power_well)
1231{
1232}
1233
1234static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1235					   struct i915_power_well *power_well)
1236{
1237}
1238
1239static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1240					     struct i915_power_well *power_well)
1241{
1242	return true;
1243}
1244
1245static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1246					 struct i915_power_well *power_well)
1247{
1248	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1249		i830_enable_pipe(dev_priv, PIPE_A);
1250	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1251		i830_enable_pipe(dev_priv, PIPE_B);
1252}
1253
1254static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1255					  struct i915_power_well *power_well)
1256{
1257	i830_disable_pipe(dev_priv, PIPE_B);
1258	i830_disable_pipe(dev_priv, PIPE_A);
1259}
1260
1261static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1262					  struct i915_power_well *power_well)
1263{
1264	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1265		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1266}
1267
1268static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1269					  struct i915_power_well *power_well)
1270{
1271	if (power_well->count > 0)
1272		i830_pipes_power_well_enable(dev_priv, power_well);
1273	else
1274		i830_pipes_power_well_disable(dev_priv, power_well);
1275}
1276
1277static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1278			       struct i915_power_well *power_well, bool enable)
1279{
1280	int pw_idx = power_well->desc->vlv.idx;
1281	u32 mask;
1282	u32 state;
1283	u32 ctrl;
1284
1285	mask = PUNIT_PWRGT_MASK(pw_idx);
1286	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1287			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1288
1289	vlv_punit_get(dev_priv);
1290
1291#define COND \
1292	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1293
1294	if (COND)
1295		goto out;
1296
1297	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1298	ctrl &= ~mask;
1299	ctrl |= state;
1300	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1301
1302	if (wait_for(COND, 100))
1303		drm_err(&dev_priv->drm,
1304			"timeout setting power well state %08x (%08x)\n",
1305			state,
1306			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1307
1308#undef COND
1309
1310out:
1311	vlv_punit_put(dev_priv);
1312}
1313
1314static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1315				  struct i915_power_well *power_well)
1316{
1317	vlv_set_power_well(dev_priv, power_well, true);
1318}
1319
1320static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1321				   struct i915_power_well *power_well)
1322{
1323	vlv_set_power_well(dev_priv, power_well, false);
1324}
1325
1326static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1327				   struct i915_power_well *power_well)
1328{
1329	int pw_idx = power_well->desc->vlv.idx;
1330	bool enabled = false;
1331	u32 mask;
1332	u32 state;
1333	u32 ctrl;
1334
1335	mask = PUNIT_PWRGT_MASK(pw_idx);
1336	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1337
1338	vlv_punit_get(dev_priv);
1339
1340	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1341	/*
1342	 * We only ever set the power-on and power-gate states, anything
1343	 * else is unexpected.
1344	 */
1345	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1346		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1347	if (state == ctrl)
1348		enabled = true;
1349
1350	/*
1351	 * A transient state at this point would mean some unexpected party
1352	 * is poking at the power controls too.
1353	 */
1354	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1355	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1356
1357	vlv_punit_put(dev_priv);
1358
1359	return enabled;
1360}
1361
1362static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1363{
1364	u32 val;
1365
1366	/*
1367	 * On driver load, a pipe may be active and driving a DSI display.
1368	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1369	 * (and never recovering) in this case. intel_dsi_post_disable() will
1370	 * clear it when we turn off the display.
1371	 */
1372	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1373	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1374	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1375	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1376
1377	/*
1378	 * Disable trickle feed and enable pnd deadline calculation
1379	 */
1380	intel_de_write(dev_priv, MI_ARB_VLV,
1381		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1382	intel_de_write(dev_priv, CBR1_VLV, 0);
1383
1384	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1385	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1386		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1387					 1000));
1388}
1389
1390static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1391{
1392	struct intel_encoder *encoder;
1393	enum pipe pipe;
1394
1395	/*
1396	 * Enable the CRI clock source so we can get at the
1397	 * display and the reference clock for VGA
1398	 * hotplug / manual detection. Supposedly DSI also
1399	 * needs the ref clock up and running.
1400	 *
1401	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1402	 */
1403	for_each_pipe(dev_priv, pipe) {
1404		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1405
1406		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1407		if (pipe != PIPE_A)
1408			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1409
1410		intel_de_write(dev_priv, DPLL(pipe), val);
1411	}
1412
1413	vlv_init_display_clock_gating(dev_priv);
1414
1415	spin_lock_irq(&dev_priv->irq_lock);
1416	valleyview_enable_display_irqs(dev_priv);
1417	spin_unlock_irq(&dev_priv->irq_lock);
1418
1419	/*
1420	 * During driver initialization/resume we can avoid restoring the
1421	 * part of the HW/SW state that will be inited anyway explicitly.
1422	 */
1423	if (dev_priv->power_domains.initializing)
1424		return;
1425
1426	intel_hpd_init(dev_priv);
 
1427
1428	/* Re-enable the ADPA, if we have one */
1429	for_each_intel_encoder(&dev_priv->drm, encoder) {
1430		if (encoder->type == INTEL_OUTPUT_ANALOG)
1431			intel_crt_reset(&encoder->base);
1432	}
1433
1434	intel_vga_redisable_power_on(dev_priv);
1435
1436	intel_pps_unlock_regs_wa(dev_priv);
1437}
1438
1439static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1440{
1441	spin_lock_irq(&dev_priv->irq_lock);
1442	valleyview_disable_display_irqs(dev_priv);
1443	spin_unlock_irq(&dev_priv->irq_lock);
1444
1445	/* make sure we're done processing display irqs */
1446	intel_synchronize_irq(dev_priv);
1447
1448	intel_power_sequencer_reset(dev_priv);
1449
1450	/* Prevent us from re-enabling polling on accident in late suspend */
1451	if (!dev_priv->drm.dev->power.is_suspended)
1452		intel_hpd_poll_init(dev_priv);
1453}
1454
1455static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1456					  struct i915_power_well *power_well)
1457{
1458	vlv_set_power_well(dev_priv, power_well, true);
1459
1460	vlv_display_power_well_init(dev_priv);
1461}
1462
1463static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1464					   struct i915_power_well *power_well)
1465{
1466	vlv_display_power_well_deinit(dev_priv);
1467
1468	vlv_set_power_well(dev_priv, power_well, false);
1469}
1470
1471static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1472					   struct i915_power_well *power_well)
1473{
1474	/* since ref/cri clock was enabled */
1475	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1476
1477	vlv_set_power_well(dev_priv, power_well, true);
1478
1479	/*
1480	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1481	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1482	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1483	 *   b.	The other bits such as sfr settings / modesel may all
1484	 *	be set to 0.
1485	 *
1486	 * This should only be done on init and resume from S3 with
1487	 * both PLLs disabled, or we risk losing DPIO and PLL
1488	 * synchronization.
1489	 */
1490	intel_de_write(dev_priv, DPIO_CTL,
1491		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1492}
1493
1494static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1495					    struct i915_power_well *power_well)
1496{
1497	enum pipe pipe;
1498
1499	for_each_pipe(dev_priv, pipe)
1500		assert_pll_disabled(dev_priv, pipe);
1501
1502	/* Assert common reset */
1503	intel_de_write(dev_priv, DPIO_CTL,
1504		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1505
1506	vlv_set_power_well(dev_priv, power_well, false);
1507}
1508
1509#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1510
1511#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1512
1513static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1514{
1515	struct i915_power_well *cmn_bc =
1516		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1517	struct i915_power_well *cmn_d =
1518		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1519	u32 phy_control = dev_priv->chv_phy_control;
1520	u32 phy_status = 0;
1521	u32 phy_status_mask = 0xffffffff;
1522
1523	/*
1524	 * The BIOS can leave the PHY is some weird state
1525	 * where it doesn't fully power down some parts.
1526	 * Disable the asserts until the PHY has been fully
1527	 * reset (ie. the power well has been disabled at
1528	 * least once).
1529	 */
1530	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1531		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1532				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1533				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1534				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1535				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1536				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1537
1538	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1539		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1540				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1541				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1542
1543	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1544		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1545
1546		/* this assumes override is only used to enable lanes */
1547		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1548			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1549
1550		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1551			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1552
1553		/* CL1 is on whenever anything is on in either channel */
1554		if (BITS_SET(phy_control,
1555			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1556			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1557			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1558
1559		/*
1560		 * The DPLLB check accounts for the pipe B + port A usage
1561		 * with CL2 powered up but all the lanes in the second channel
1562		 * powered down.
1563		 */
1564		if (BITS_SET(phy_control,
1565			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1566		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1567			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1568
1569		if (BITS_SET(phy_control,
1570			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1571			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1572		if (BITS_SET(phy_control,
1573			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1574			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1575
1576		if (BITS_SET(phy_control,
1577			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1578			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1579		if (BITS_SET(phy_control,
1580			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1581			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1582	}
1583
1584	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1585		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1586
1587		/* this assumes override is only used to enable lanes */
1588		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1589			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1590
1591		if (BITS_SET(phy_control,
1592			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1593			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1594
1595		if (BITS_SET(phy_control,
1596			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1597			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1598		if (BITS_SET(phy_control,
1599			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1600			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1601	}
1602
1603	phy_status &= phy_status_mask;
1604
1605	/*
1606	 * The PHY may be busy with some initial calibration and whatnot,
1607	 * so the power state can take a while to actually change.
1608	 */
1609	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1610				       phy_status_mask, phy_status, 10))
1611		drm_err(&dev_priv->drm,
1612			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1613			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1614			phy_status, dev_priv->chv_phy_control);
1615}
1616
1617#undef BITS_SET
1618
1619static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1620					   struct i915_power_well *power_well)
1621{
1622	enum dpio_phy phy;
1623	enum pipe pipe;
1624	u32 tmp;
1625
1626	drm_WARN_ON_ONCE(&dev_priv->drm,
1627			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1628			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1629
1630	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1631		pipe = PIPE_A;
1632		phy = DPIO_PHY0;
1633	} else {
1634		pipe = PIPE_C;
1635		phy = DPIO_PHY1;
1636	}
1637
1638	/* since ref/cri clock was enabled */
1639	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1640	vlv_set_power_well(dev_priv, power_well, true);
1641
1642	/* Poll for phypwrgood signal */
1643	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1644				  PHY_POWERGOOD(phy), 1))
1645		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1646			phy);
1647
1648	vlv_dpio_get(dev_priv);
1649
1650	/* Enable dynamic power down */
1651	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1652	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1653		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1654	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1655
1656	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1657		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1658		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1659		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1660	} else {
1661		/*
1662		 * Force the non-existing CL2 off. BXT does this
1663		 * too, so maybe it saves some power even though
1664		 * CL2 doesn't exist?
1665		 */
1666		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1667		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1668		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1669	}
1670
1671	vlv_dpio_put(dev_priv);
1672
1673	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1674	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1675		       dev_priv->chv_phy_control);
1676
1677	drm_dbg_kms(&dev_priv->drm,
1678		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1679		    phy, dev_priv->chv_phy_control);
1680
1681	assert_chv_phy_status(dev_priv);
1682}
1683
1684static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1685					    struct i915_power_well *power_well)
1686{
1687	enum dpio_phy phy;
1688
1689	drm_WARN_ON_ONCE(&dev_priv->drm,
1690			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1691			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1692
1693	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1694		phy = DPIO_PHY0;
1695		assert_pll_disabled(dev_priv, PIPE_A);
1696		assert_pll_disabled(dev_priv, PIPE_B);
1697	} else {
1698		phy = DPIO_PHY1;
1699		assert_pll_disabled(dev_priv, PIPE_C);
1700	}
1701
1702	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1703	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1704		       dev_priv->chv_phy_control);
1705
1706	vlv_set_power_well(dev_priv, power_well, false);
1707
1708	drm_dbg_kms(&dev_priv->drm,
1709		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1710		    phy, dev_priv->chv_phy_control);
1711
1712	/* PHY is fully reset now, so we can enable the PHY state asserts */
1713	dev_priv->chv_phy_assert[phy] = true;
1714
1715	assert_chv_phy_status(dev_priv);
1716}
1717
1718static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1719				     enum dpio_channel ch, bool override, unsigned int mask)
1720{
1721	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1722	u32 reg, val, expected, actual;
1723
1724	/*
1725	 * The BIOS can leave the PHY is some weird state
1726	 * where it doesn't fully power down some parts.
1727	 * Disable the asserts until the PHY has been fully
1728	 * reset (ie. the power well has been disabled at
1729	 * least once).
1730	 */
1731	if (!dev_priv->chv_phy_assert[phy])
1732		return;
1733
1734	if (ch == DPIO_CH0)
1735		reg = _CHV_CMN_DW0_CH0;
1736	else
1737		reg = _CHV_CMN_DW6_CH1;
1738
1739	vlv_dpio_get(dev_priv);
1740	val = vlv_dpio_read(dev_priv, pipe, reg);
1741	vlv_dpio_put(dev_priv);
1742
1743	/*
1744	 * This assumes !override is only used when the port is disabled.
1745	 * All lanes should power down even without the override when
1746	 * the port is disabled.
1747	 */
1748	if (!override || mask == 0xf) {
1749		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1750		/*
1751		 * If CH1 common lane is not active anymore
1752		 * (eg. for pipe B DPLL) the entire channel will
1753		 * shut down, which causes the common lane registers
1754		 * to read as 0. That means we can't actually check
1755		 * the lane power down status bits, but as the entire
1756		 * register reads as 0 it's a good indication that the
1757		 * channel is indeed entirely powered down.
1758		 */
1759		if (ch == DPIO_CH1 && val == 0)
1760			expected = 0;
1761	} else if (mask != 0x0) {
1762		expected = DPIO_ANYDL_POWERDOWN;
1763	} else {
1764		expected = 0;
1765	}
1766
1767	if (ch == DPIO_CH0)
1768		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1769	else
1770		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1771	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1772
1773	drm_WARN(&dev_priv->drm, actual != expected,
1774		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1775		 !!(actual & DPIO_ALLDL_POWERDOWN),
1776		 !!(actual & DPIO_ANYDL_POWERDOWN),
1777		 !!(expected & DPIO_ALLDL_POWERDOWN),
1778		 !!(expected & DPIO_ANYDL_POWERDOWN),
1779		 reg, val);
1780}
1781
1782bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1783			  enum dpio_channel ch, bool override)
1784{
1785	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1786	bool was_override;
1787
1788	mutex_lock(&power_domains->lock);
1789
1790	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1791
1792	if (override == was_override)
1793		goto out;
1794
1795	if (override)
1796		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1797	else
1798		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1799
1800	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1801		       dev_priv->chv_phy_control);
1802
1803	drm_dbg_kms(&dev_priv->drm,
1804		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1805		    phy, ch, dev_priv->chv_phy_control);
1806
1807	assert_chv_phy_status(dev_priv);
1808
1809out:
1810	mutex_unlock(&power_domains->lock);
1811
1812	return was_override;
1813}
1814
1815void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1816			     bool override, unsigned int mask)
1817{
1818	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1819	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1820	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1821	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1822
1823	mutex_lock(&power_domains->lock);
1824
1825	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1826	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1827
1828	if (override)
1829		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1830	else
1831		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1832
1833	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1834		       dev_priv->chv_phy_control);
1835
1836	drm_dbg_kms(&dev_priv->drm,
1837		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1838		    phy, ch, mask, dev_priv->chv_phy_control);
1839
1840	assert_chv_phy_status(dev_priv);
1841
1842	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1843
1844	mutex_unlock(&power_domains->lock);
1845}
1846
1847static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1848					struct i915_power_well *power_well)
1849{
1850	enum pipe pipe = PIPE_A;
1851	bool enabled;
1852	u32 state, ctrl;
1853
1854	vlv_punit_get(dev_priv);
1855
1856	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1857	/*
1858	 * We only ever set the power-on and power-gate states, anything
1859	 * else is unexpected.
1860	 */
1861	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1862		    state != DP_SSS_PWR_GATE(pipe));
1863	enabled = state == DP_SSS_PWR_ON(pipe);
1864
1865	/*
1866	 * A transient state at this point would mean some unexpected party
1867	 * is poking at the power controls too.
1868	 */
1869	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1870	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1871
1872	vlv_punit_put(dev_priv);
1873
1874	return enabled;
1875}
1876
1877static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1878				    struct i915_power_well *power_well,
1879				    bool enable)
1880{
1881	enum pipe pipe = PIPE_A;
1882	u32 state;
1883	u32 ctrl;
1884
1885	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1886
1887	vlv_punit_get(dev_priv);
1888
1889#define COND \
1890	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1891
1892	if (COND)
1893		goto out;
1894
1895	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1896	ctrl &= ~DP_SSC_MASK(pipe);
1897	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1898	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1899
1900	if (wait_for(COND, 100))
1901		drm_err(&dev_priv->drm,
1902			"timeout setting power well state %08x (%08x)\n",
1903			state,
1904			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1905
1906#undef COND
1907
1908out:
1909	vlv_punit_put(dev_priv);
1910}
1911
1912static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1913					struct i915_power_well *power_well)
1914{
1915	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1916		       dev_priv->chv_phy_control);
1917}
1918
1919static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1920				       struct i915_power_well *power_well)
1921{
1922	chv_set_pipe_power_well(dev_priv, power_well, true);
1923
1924	vlv_display_power_well_init(dev_priv);
1925}
1926
1927static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1928					struct i915_power_well *power_well)
1929{
1930	vlv_display_power_well_deinit(dev_priv);
1931
1932	chv_set_pipe_power_well(dev_priv, power_well, false);
1933}
1934
1935static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1936{
1937	return power_domains->async_put_domains[0] |
1938	       power_domains->async_put_domains[1];
1939}
1940
1941#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1942
1943static bool
1944assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1945{
1946	struct drm_i915_private *i915 = container_of(power_domains,
1947						     struct drm_i915_private,
1948						     power_domains);
1949	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1950			    power_domains->async_put_domains[1]);
1951}
1952
1953static bool
1954__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1955{
1956	struct drm_i915_private *i915 = container_of(power_domains,
1957						     struct drm_i915_private,
1958						     power_domains);
1959	enum intel_display_power_domain domain;
1960	bool err = false;
1961
1962	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1963	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1964			   !!__async_put_domains_mask(power_domains));
1965
1966	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1967		err |= drm_WARN_ON(&i915->drm,
1968				   power_domains->domain_use_count[domain] != 1);
1969
1970	return !err;
1971}
1972
1973static void print_power_domains(struct i915_power_domains *power_domains,
1974				const char *prefix, u64 mask)
1975{
1976	struct drm_i915_private *i915 = container_of(power_domains,
1977						     struct drm_i915_private,
1978						     power_domains);
1979	enum intel_display_power_domain domain;
1980
1981	drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1982	for_each_power_domain(domain, mask)
1983		drm_dbg(&i915->drm, "%s use_count %d\n",
1984			intel_display_power_domain_str(domain),
1985			power_domains->domain_use_count[domain]);
1986}
1987
1988static void
1989print_async_put_domains_state(struct i915_power_domains *power_domains)
1990{
1991	struct drm_i915_private *i915 = container_of(power_domains,
1992						     struct drm_i915_private,
1993						     power_domains);
1994
1995	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
1996		power_domains->async_put_wakeref);
1997
1998	print_power_domains(power_domains, "async_put_domains[0]",
1999			    power_domains->async_put_domains[0]);
2000	print_power_domains(power_domains, "async_put_domains[1]",
2001			    power_domains->async_put_domains[1]);
2002}
2003
2004static void
2005verify_async_put_domains_state(struct i915_power_domains *power_domains)
2006{
2007	if (!__async_put_domains_state_ok(power_domains))
2008		print_async_put_domains_state(power_domains);
2009}
2010
2011#else
2012
2013static void
2014assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2015{
2016}
2017
2018static void
2019verify_async_put_domains_state(struct i915_power_domains *power_domains)
2020{
2021}
2022
2023#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2024
2025static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2026{
2027	assert_async_put_domain_masks_disjoint(power_domains);
2028
2029	return __async_put_domains_mask(power_domains);
2030}
2031
2032static void
2033async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2034			       enum intel_display_power_domain domain)
2035{
2036	assert_async_put_domain_masks_disjoint(power_domains);
2037
2038	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2039	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2040}
2041
2042static bool
2043intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2044				       enum intel_display_power_domain domain)
2045{
2046	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2047	bool ret = false;
2048
2049	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2050		goto out_verify;
2051
2052	async_put_domains_clear_domain(power_domains, domain);
2053
2054	ret = true;
2055
2056	if (async_put_domains_mask(power_domains))
2057		goto out_verify;
2058
2059	cancel_delayed_work(&power_domains->async_put_work);
2060	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2061				 fetch_and_zero(&power_domains->async_put_wakeref));
2062out_verify:
2063	verify_async_put_domains_state(power_domains);
2064
2065	return ret;
2066}
2067
2068static void
2069__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2070				 enum intel_display_power_domain domain)
2071{
2072	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2073	struct i915_power_well *power_well;
2074
2075	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2076		return;
2077
2078	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2079		intel_power_well_get(dev_priv, power_well);
2080
2081	power_domains->domain_use_count[domain]++;
2082}
2083
2084/**
2085 * intel_display_power_get - grab a power domain reference
2086 * @dev_priv: i915 device instance
2087 * @domain: power domain to reference
2088 *
2089 * This function grabs a power domain reference for @domain and ensures that the
2090 * power domain and all its parents are powered up. Therefore users should only
2091 * grab a reference to the innermost power domain they need.
2092 *
2093 * Any power domain reference obtained by this function must have a symmetric
2094 * call to intel_display_power_put() to release the reference again.
2095 */
2096intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2097					enum intel_display_power_domain domain)
2098{
2099	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2100	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2101
2102	mutex_lock(&power_domains->lock);
2103	__intel_display_power_get_domain(dev_priv, domain);
2104	mutex_unlock(&power_domains->lock);
2105
2106	return wakeref;
2107}
2108
2109/**
2110 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2111 * @dev_priv: i915 device instance
2112 * @domain: power domain to reference
2113 *
2114 * This function grabs a power domain reference for @domain and ensures that the
2115 * power domain and all its parents are powered up. Therefore users should only
2116 * grab a reference to the innermost power domain they need.
2117 *
2118 * Any power domain reference obtained by this function must have a symmetric
2119 * call to intel_display_power_put() to release the reference again.
2120 */
2121intel_wakeref_t
2122intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2123				   enum intel_display_power_domain domain)
2124{
2125	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2126	intel_wakeref_t wakeref;
2127	bool is_enabled;
2128
2129	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2130	if (!wakeref)
2131		return false;
2132
2133	mutex_lock(&power_domains->lock);
2134
2135	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2136		__intel_display_power_get_domain(dev_priv, domain);
2137		is_enabled = true;
2138	} else {
2139		is_enabled = false;
2140	}
2141
2142	mutex_unlock(&power_domains->lock);
2143
2144	if (!is_enabled) {
2145		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2146		wakeref = 0;
2147	}
2148
2149	return wakeref;
2150}
2151
2152static void
2153__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2154				 enum intel_display_power_domain domain)
2155{
2156	struct i915_power_domains *power_domains;
2157	struct i915_power_well *power_well;
2158	const char *name = intel_display_power_domain_str(domain);
2159
2160	power_domains = &dev_priv->power_domains;
2161
2162	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2163		 "Use count on domain %s is already zero\n",
2164		 name);
2165	drm_WARN(&dev_priv->drm,
2166		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2167		 "Async disabling of domain %s is pending\n",
2168		 name);
2169
2170	power_domains->domain_use_count[domain]--;
2171
2172	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2173		intel_power_well_put(dev_priv, power_well);
2174}
2175
2176static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2177				      enum intel_display_power_domain domain)
2178{
2179	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2180
2181	mutex_lock(&power_domains->lock);
2182	__intel_display_power_put_domain(dev_priv, domain);
2183	mutex_unlock(&power_domains->lock);
2184}
2185
2186/**
2187 * intel_display_power_put_unchecked - release an unchecked power domain reference
2188 * @dev_priv: i915 device instance
2189 * @domain: power domain to reference
2190 *
2191 * This function drops the power domain reference obtained by
2192 * intel_display_power_get() and might power down the corresponding hardware
2193 * block right away if this is the last reference.
2194 *
2195 * This function exists only for historical reasons and should be avoided in
2196 * new code, as the correctness of its use cannot be checked. Always use
2197 * intel_display_power_put() instead.
2198 */
2199void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2200				       enum intel_display_power_domain domain)
2201{
2202	__intel_display_power_put(dev_priv, domain);
2203	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2204}
2205
2206static void
2207queue_async_put_domains_work(struct i915_power_domains *power_domains,
2208			     intel_wakeref_t wakeref)
2209{
2210	struct drm_i915_private *i915 = container_of(power_domains,
2211						     struct drm_i915_private,
2212						     power_domains);
2213	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2214	power_domains->async_put_wakeref = wakeref;
2215	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2216						    &power_domains->async_put_work,
2217						    msecs_to_jiffies(100)));
2218}
2219
2220static void
2221release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2222{
2223	struct drm_i915_private *dev_priv =
2224		container_of(power_domains, struct drm_i915_private,
2225			     power_domains);
2226	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2227	enum intel_display_power_domain domain;
2228	intel_wakeref_t wakeref;
2229
2230	/*
2231	 * The caller must hold already raw wakeref, upgrade that to a proper
2232	 * wakeref to make the state checker happy about the HW access during
2233	 * power well disabling.
2234	 */
2235	assert_rpm_raw_wakeref_held(rpm);
2236	wakeref = intel_runtime_pm_get(rpm);
2237
2238	for_each_power_domain(domain, mask) {
2239		/* Clear before put, so put's sanity check is happy. */
2240		async_put_domains_clear_domain(power_domains, domain);
2241		__intel_display_power_put_domain(dev_priv, domain);
2242	}
2243
2244	intel_runtime_pm_put(rpm, wakeref);
2245}
2246
2247static void
2248intel_display_power_put_async_work(struct work_struct *work)
2249{
2250	struct drm_i915_private *dev_priv =
2251		container_of(work, struct drm_i915_private,
2252			     power_domains.async_put_work.work);
2253	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2254	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2255	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2256	intel_wakeref_t old_work_wakeref = 0;
2257
2258	mutex_lock(&power_domains->lock);
2259
2260	/*
2261	 * Bail out if all the domain refs pending to be released were grabbed
2262	 * by subsequent gets or a flush_work.
2263	 */
2264	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2265	if (!old_work_wakeref)
2266		goto out_verify;
2267
2268	release_async_put_domains(power_domains,
2269				  power_domains->async_put_domains[0]);
2270
2271	/* Requeue the work if more domains were async put meanwhile. */
2272	if (power_domains->async_put_domains[1]) {
2273		power_domains->async_put_domains[0] =
2274			fetch_and_zero(&power_domains->async_put_domains[1]);
2275		queue_async_put_domains_work(power_domains,
2276					     fetch_and_zero(&new_work_wakeref));
 
 
 
 
 
 
2277	}
2278
2279out_verify:
2280	verify_async_put_domains_state(power_domains);
2281
2282	mutex_unlock(&power_domains->lock);
2283
2284	if (old_work_wakeref)
2285		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2286	if (new_work_wakeref)
2287		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2288}
2289
2290/**
2291 * intel_display_power_put_async - release a power domain reference asynchronously
2292 * @i915: i915 device instance
2293 * @domain: power domain to reference
2294 * @wakeref: wakeref acquired for the reference that is being released
2295 *
2296 * This function drops the power domain reference obtained by
2297 * intel_display_power_get*() and schedules a work to power down the
2298 * corresponding hardware block if this is the last reference.
2299 */
2300void __intel_display_power_put_async(struct drm_i915_private *i915,
2301				     enum intel_display_power_domain domain,
2302				     intel_wakeref_t wakeref)
2303{
2304	struct i915_power_domains *power_domains = &i915->power_domains;
2305	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2306	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2307
2308	mutex_lock(&power_domains->lock);
2309
2310	if (power_domains->domain_use_count[domain] > 1) {
2311		__intel_display_power_put_domain(i915, domain);
2312
2313		goto out_verify;
2314	}
2315
2316	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2317
2318	/* Let a pending work requeue itself or queue a new one. */
2319	if (power_domains->async_put_wakeref) {
2320		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2321	} else {
2322		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2323		queue_async_put_domains_work(power_domains,
2324					     fetch_and_zero(&work_wakeref));
2325	}
2326
2327out_verify:
2328	verify_async_put_domains_state(power_domains);
2329
2330	mutex_unlock(&power_domains->lock);
2331
2332	if (work_wakeref)
2333		intel_runtime_pm_put_raw(rpm, work_wakeref);
2334
2335	intel_runtime_pm_put(rpm, wakeref);
2336}
2337
2338/**
2339 * intel_display_power_flush_work - flushes the async display power disabling work
2340 * @i915: i915 device instance
2341 *
2342 * Flushes any pending work that was scheduled by a preceding
2343 * intel_display_power_put_async() call, completing the disabling of the
2344 * corresponding power domains.
2345 *
2346 * Note that the work handler function may still be running after this
2347 * function returns; to ensure that the work handler isn't running use
2348 * intel_display_power_flush_work_sync() instead.
2349 */
2350void intel_display_power_flush_work(struct drm_i915_private *i915)
2351{
2352	struct i915_power_domains *power_domains = &i915->power_domains;
2353	intel_wakeref_t work_wakeref;
2354
2355	mutex_lock(&power_domains->lock);
2356
2357	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2358	if (!work_wakeref)
2359		goto out_verify;
2360
2361	release_async_put_domains(power_domains,
2362				  async_put_domains_mask(power_domains));
2363	cancel_delayed_work(&power_domains->async_put_work);
2364
2365out_verify:
2366	verify_async_put_domains_state(power_domains);
2367
2368	mutex_unlock(&power_domains->lock);
2369
2370	if (work_wakeref)
2371		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2372}
2373
2374/**
2375 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2376 * @i915: i915 device instance
2377 *
2378 * Like intel_display_power_flush_work(), but also ensure that the work
2379 * handler function is not running any more when this function returns.
2380 */
2381static void
2382intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2383{
2384	struct i915_power_domains *power_domains = &i915->power_domains;
2385
2386	intel_display_power_flush_work(i915);
2387	cancel_delayed_work_sync(&power_domains->async_put_work);
2388
2389	verify_async_put_domains_state(power_domains);
2390
2391	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2392}
2393
2394#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2395/**
2396 * intel_display_power_put - release a power domain reference
2397 * @dev_priv: i915 device instance
2398 * @domain: power domain to reference
2399 * @wakeref: wakeref acquired for the reference that is being released
2400 *
2401 * This function drops the power domain reference obtained by
2402 * intel_display_power_get() and might power down the corresponding hardware
2403 * block right away if this is the last reference.
2404 */
2405void intel_display_power_put(struct drm_i915_private *dev_priv,
2406			     enum intel_display_power_domain domain,
2407			     intel_wakeref_t wakeref)
2408{
2409	__intel_display_power_put(dev_priv, domain);
2410	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2411}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2412#endif
2413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2414#define I830_PIPES_POWER_DOMAINS (		\
2415	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2416	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2417	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2418	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2419	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2420	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2421	BIT_ULL(POWER_DOMAIN_INIT))
2422
2423#define VLV_DISPLAY_POWER_DOMAINS (		\
2424	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2425	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2426	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2427	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2428	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2429	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2430	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2431	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2432	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2433	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2434	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2435	BIT_ULL(POWER_DOMAIN_VGA) |			\
2436	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2437	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2438	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2439	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2440	BIT_ULL(POWER_DOMAIN_INIT))
2441
2442#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2443	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2444	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2445	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2446	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2447	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2448	BIT_ULL(POWER_DOMAIN_INIT))
2449
2450#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2451	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2452	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2453	BIT_ULL(POWER_DOMAIN_INIT))
2454
2455#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2456	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2457	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2458	BIT_ULL(POWER_DOMAIN_INIT))
2459
2460#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2461	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2462	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2463	BIT_ULL(POWER_DOMAIN_INIT))
2464
2465#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2466	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2467	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2468	BIT_ULL(POWER_DOMAIN_INIT))
2469
2470#define CHV_DISPLAY_POWER_DOMAINS (		\
2471	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2472	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2473	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2474	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2475	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2476	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2477	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2478	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2479	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2480	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2481	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2482	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2483	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2484	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2485	BIT_ULL(POWER_DOMAIN_VGA) |			\
2486	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2487	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2488	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2489	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2490	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2491	BIT_ULL(POWER_DOMAIN_INIT))
2492
2493#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2494	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2495	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2496	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2497	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2498	BIT_ULL(POWER_DOMAIN_INIT))
2499
2500#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2501	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2502	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2503	BIT_ULL(POWER_DOMAIN_INIT))
2504
2505#define HSW_DISPLAY_POWER_DOMAINS (			\
2506	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2507	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2508	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2509	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2510	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2511	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2512	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2513	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2514	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2515	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2516	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2517	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2518	BIT_ULL(POWER_DOMAIN_VGA) |				\
2519	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2520	BIT_ULL(POWER_DOMAIN_INIT))
2521
2522#define BDW_DISPLAY_POWER_DOMAINS (			\
2523	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2524	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2525	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2526	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2527	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2528	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2529	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2530	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2531	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2532	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2533	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2534	BIT_ULL(POWER_DOMAIN_VGA) |				\
2535	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2536	BIT_ULL(POWER_DOMAIN_INIT))
2537
2538#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2539	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2540	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2541	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2542	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2543	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2544	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2545	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2546	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2547	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2548	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2549	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2550	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2551	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2552	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2553	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2554	BIT_ULL(POWER_DOMAIN_VGA) |				\
2555	BIT_ULL(POWER_DOMAIN_INIT))
2556#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2557	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2558	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2559	BIT_ULL(POWER_DOMAIN_INIT))
2560#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2561	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2562	BIT_ULL(POWER_DOMAIN_INIT))
2563#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2564	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2565	BIT_ULL(POWER_DOMAIN_INIT))
2566#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2567	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2568	BIT_ULL(POWER_DOMAIN_INIT))
2569#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2570	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2571	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2572	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2573	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2574	BIT_ULL(POWER_DOMAIN_INIT))
2575
2576#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2577	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2578	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2579	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2580	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2581	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2582	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2583	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2584	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2585	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2586	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2587	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2588	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2589	BIT_ULL(POWER_DOMAIN_VGA) |				\
2590	BIT_ULL(POWER_DOMAIN_INIT))
2591#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2592	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2593	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2594	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2595	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2596	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2597	BIT_ULL(POWER_DOMAIN_INIT))
2598#define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2599	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2600	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2601	BIT_ULL(POWER_DOMAIN_INIT))
2602#define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2603	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2604	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2605	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2606	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2607	BIT_ULL(POWER_DOMAIN_INIT))
2608
2609#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2610	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2611	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2612	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2613	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2614	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2615	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2616	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2617	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2618	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2619	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2620	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2621	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2622	BIT_ULL(POWER_DOMAIN_VGA) |				\
2623	BIT_ULL(POWER_DOMAIN_INIT))
2624#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2625	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2626#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2627	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2628#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2629	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2630#define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2631	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2632	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2633	BIT_ULL(POWER_DOMAIN_INIT))
2634#define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2635	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2636	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2637	BIT_ULL(POWER_DOMAIN_INIT))
2638#define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2639	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2640	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2641	BIT_ULL(POWER_DOMAIN_INIT))
2642#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2643	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2644	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2645	BIT_ULL(POWER_DOMAIN_INIT))
2646#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2647	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2648	BIT_ULL(POWER_DOMAIN_INIT))
2649#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2650	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2651	BIT_ULL(POWER_DOMAIN_INIT))
2652#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2653	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2654	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2655	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2656	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2657	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2658	BIT_ULL(POWER_DOMAIN_INIT))
2659
2660#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2661	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2662	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2663	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2664	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2665	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2666	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2667	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2668	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2669	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2670	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2671	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2672	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2673	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2674	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2675	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2676	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2677	BIT_ULL(POWER_DOMAIN_VGA) |				\
2678	BIT_ULL(POWER_DOMAIN_INIT))
2679#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2680	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2681	BIT_ULL(POWER_DOMAIN_INIT))
2682#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2683	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2684	BIT_ULL(POWER_DOMAIN_INIT))
2685#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2686	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2687	BIT_ULL(POWER_DOMAIN_INIT))
2688#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2689	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2690	BIT_ULL(POWER_DOMAIN_INIT))
2691#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2692	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2693	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2694	BIT_ULL(POWER_DOMAIN_INIT))
2695#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2696	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2697	BIT_ULL(POWER_DOMAIN_INIT))
2698#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2699	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2700	BIT_ULL(POWER_DOMAIN_INIT))
2701#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2702	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2703	BIT_ULL(POWER_DOMAIN_INIT))
2704#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2705	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2706	BIT_ULL(POWER_DOMAIN_INIT))
2707#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2708	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2709	BIT_ULL(POWER_DOMAIN_INIT))
2710#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2711	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2712	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2713	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2714	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2715	BIT_ULL(POWER_DOMAIN_INIT))
2716
2717/*
2718 * ICL PW_0/PG_0 domains (HW/DMC control):
2719 * - PCI
2720 * - clocks except port PLL
2721 * - central power except FBC
2722 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2723 * ICL PW_1/PG_1 domains (HW/DMC control):
2724 * - DBUF function
2725 * - PIPE_A and its planes, except VGA
2726 * - transcoder EDP + PSR
2727 * - transcoder DSI
2728 * - DDI_A
2729 * - FBC
2730 */
2731#define ICL_PW_4_POWER_DOMAINS (			\
2732	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2733	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2734	BIT_ULL(POWER_DOMAIN_INIT))
2735	/* VDSC/joining */
2736#define ICL_PW_3_POWER_DOMAINS (			\
2737	ICL_PW_4_POWER_DOMAINS |			\
2738	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2739	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2740	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2741	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2742	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2743	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2744	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2745	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2746	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2747	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2748	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2749	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2750	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2751	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2752	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2753	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2754	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2755	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2756	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2757	BIT_ULL(POWER_DOMAIN_VGA) |			\
2758	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2759	BIT_ULL(POWER_DOMAIN_INIT))
2760	/*
2761	 * - transcoder WD
2762	 * - KVMR (HW control)
2763	 */
2764#define ICL_PW_2_POWER_DOMAINS (			\
2765	ICL_PW_3_POWER_DOMAINS |			\
2766	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2767	BIT_ULL(POWER_DOMAIN_INIT))
2768	/*
2769	 * - KVMR (HW control)
2770	 */
2771#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2772	ICL_PW_2_POWER_DOMAINS |			\
2773	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2774	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2775	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2776	BIT_ULL(POWER_DOMAIN_INIT))
2777
2778#define ICL_DDI_IO_A_POWER_DOMAINS (			\
2779	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2780#define ICL_DDI_IO_B_POWER_DOMAINS (			\
2781	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2782#define ICL_DDI_IO_C_POWER_DOMAINS (			\
2783	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2784#define ICL_DDI_IO_D_POWER_DOMAINS (			\
2785	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2786#define ICL_DDI_IO_E_POWER_DOMAINS (			\
2787	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2788#define ICL_DDI_IO_F_POWER_DOMAINS (			\
2789	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2790
2791#define ICL_AUX_A_IO_POWER_DOMAINS (			\
2792	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2793	BIT_ULL(POWER_DOMAIN_AUX_A))
2794#define ICL_AUX_B_IO_POWER_DOMAINS (			\
2795	BIT_ULL(POWER_DOMAIN_AUX_B))
2796#define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2797	BIT_ULL(POWER_DOMAIN_AUX_C))
2798#define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2799	BIT_ULL(POWER_DOMAIN_AUX_D))
2800#define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2801	BIT_ULL(POWER_DOMAIN_AUX_E))
2802#define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2803	BIT_ULL(POWER_DOMAIN_AUX_F))
2804#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2805	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2806#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2807	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2808#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2809	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2810#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2811	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2812
2813#define TGL_PW_5_POWER_DOMAINS (			\
2814	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2815	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2816	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2817	BIT_ULL(POWER_DOMAIN_INIT))
2818
2819#define TGL_PW_4_POWER_DOMAINS (			\
2820	TGL_PW_5_POWER_DOMAINS |			\
2821	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2822	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2823	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2824	BIT_ULL(POWER_DOMAIN_INIT))
2825
2826#define TGL_PW_3_POWER_DOMAINS (			\
2827	TGL_PW_4_POWER_DOMAINS |			\
2828	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2829	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2830	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2831	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2832	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2833	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2834	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |	\
2835	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |	\
2836	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |	\
2837	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2838	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2839	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2840	BIT_ULL(POWER_DOMAIN_AUX_G) |			\
2841	BIT_ULL(POWER_DOMAIN_AUX_H) |			\
2842	BIT_ULL(POWER_DOMAIN_AUX_I) |			\
2843	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2844	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2845	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2846	BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |		\
2847	BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |		\
2848	BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |		\
2849	BIT_ULL(POWER_DOMAIN_VGA) |			\
2850	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2851	BIT_ULL(POWER_DOMAIN_INIT))
2852
2853#define TGL_PW_2_POWER_DOMAINS (			\
2854	TGL_PW_3_POWER_DOMAINS |			\
2855	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2856	BIT_ULL(POWER_DOMAIN_INIT))
2857
2858#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2859	TGL_PW_3_POWER_DOMAINS |			\
2860	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2861	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2862	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2863	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2864	BIT_ULL(POWER_DOMAIN_INIT))
2865
2866#define TGL_DDI_IO_D_TC1_POWER_DOMAINS (	\
2867	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2868#define TGL_DDI_IO_E_TC2_POWER_DOMAINS (	\
2869	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2870#define TGL_DDI_IO_F_TC3_POWER_DOMAINS (	\
2871	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2872#define TGL_DDI_IO_G_TC4_POWER_DOMAINS (	\
2873	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2874#define TGL_DDI_IO_H_TC5_POWER_DOMAINS (	\
2875	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2876#define TGL_DDI_IO_I_TC6_POWER_DOMAINS (	\
2877	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2878
2879#define TGL_AUX_A_IO_POWER_DOMAINS (		\
2880	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2881	BIT_ULL(POWER_DOMAIN_AUX_A))
2882#define TGL_AUX_B_IO_POWER_DOMAINS (		\
2883	BIT_ULL(POWER_DOMAIN_AUX_B))
2884#define TGL_AUX_C_IO_POWER_DOMAINS (		\
2885	BIT_ULL(POWER_DOMAIN_AUX_C))
2886#define TGL_AUX_D_TC1_IO_POWER_DOMAINS (	\
2887	BIT_ULL(POWER_DOMAIN_AUX_D))
2888#define TGL_AUX_E_TC2_IO_POWER_DOMAINS (	\
2889	BIT_ULL(POWER_DOMAIN_AUX_E))
2890#define TGL_AUX_F_TC3_IO_POWER_DOMAINS (	\
2891	BIT_ULL(POWER_DOMAIN_AUX_F))
2892#define TGL_AUX_G_TC4_IO_POWER_DOMAINS (	\
2893	BIT_ULL(POWER_DOMAIN_AUX_G))
2894#define TGL_AUX_H_TC5_IO_POWER_DOMAINS (	\
2895	BIT_ULL(POWER_DOMAIN_AUX_H))
2896#define TGL_AUX_I_TC6_IO_POWER_DOMAINS (	\
2897	BIT_ULL(POWER_DOMAIN_AUX_I))
2898#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (	\
2899	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2900#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (	\
2901	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2902#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (	\
2903	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2904#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (	\
2905	BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2906#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (	\
2907	BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2908#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (	\
2909	BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2910
2911#define TGL_TC_COLD_OFF_POWER_DOMAINS (		\
2912	BIT_ULL(POWER_DOMAIN_AUX_D)	|	\
2913	BIT_ULL(POWER_DOMAIN_AUX_E)	|	\
2914	BIT_ULL(POWER_DOMAIN_AUX_F)	|	\
2915	BIT_ULL(POWER_DOMAIN_AUX_G)	|	\
2916	BIT_ULL(POWER_DOMAIN_AUX_H)	|	\
2917	BIT_ULL(POWER_DOMAIN_AUX_I)	|	\
2918	BIT_ULL(POWER_DOMAIN_AUX_D_TBT)	|	\
2919	BIT_ULL(POWER_DOMAIN_AUX_E_TBT)	|	\
2920	BIT_ULL(POWER_DOMAIN_AUX_F_TBT)	|	\
2921	BIT_ULL(POWER_DOMAIN_AUX_G_TBT)	|	\
2922	BIT_ULL(POWER_DOMAIN_AUX_H_TBT)	|	\
2923	BIT_ULL(POWER_DOMAIN_AUX_I_TBT)	|	\
2924	BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2925
2926#define RKL_PW_4_POWER_DOMAINS (			\
2927	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2928	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2929	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2930	BIT_ULL(POWER_DOMAIN_INIT))
2931
2932#define RKL_PW_3_POWER_DOMAINS (			\
2933	RKL_PW_4_POWER_DOMAINS |			\
2934	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2935	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2936	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2937	BIT_ULL(POWER_DOMAIN_VGA) |			\
2938	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2939	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2940	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2941	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2942	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2943	BIT_ULL(POWER_DOMAIN_INIT))
2944
2945/*
2946 * There is no PW_2/PG_2 on RKL.
2947 *
2948 * RKL PW_1/PG_1 domains (under HW/DMC control):
2949 * - DBUF function (note: registers are in PW0)
2950 * - PIPE_A and its planes and VDSC/joining, except VGA
2951 * - transcoder A
2952 * - DDI_A and DDI_B
2953 * - FBC
2954 *
2955 * RKL PW_0/PG_0 domains (under HW/DMC control):
2956 * - PCI
2957 * - clocks except port PLL
2958 * - shared functions:
2959 *     * interrupts except pipe interrupts
2960 *     * MBus except PIPE_MBUS_DBOX_CTL
2961 *     * DBUF registers
2962 * - central power except FBC
2963 * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
2964 */
2965
2966#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2967	RKL_PW_3_POWER_DOMAINS |			\
2968	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2969	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2970	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2971	BIT_ULL(POWER_DOMAIN_INIT))
2972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2973static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2974	.sync_hw = i9xx_power_well_sync_hw_noop,
2975	.enable = i9xx_always_on_power_well_noop,
2976	.disable = i9xx_always_on_power_well_noop,
2977	.is_enabled = i9xx_always_on_power_well_enabled,
2978};
2979
2980static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2981	.sync_hw = chv_pipe_power_well_sync_hw,
2982	.enable = chv_pipe_power_well_enable,
2983	.disable = chv_pipe_power_well_disable,
2984	.is_enabled = chv_pipe_power_well_enabled,
2985};
2986
2987static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2988	.sync_hw = i9xx_power_well_sync_hw_noop,
2989	.enable = chv_dpio_cmn_power_well_enable,
2990	.disable = chv_dpio_cmn_power_well_disable,
2991	.is_enabled = vlv_power_well_enabled,
2992};
2993
2994static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2995	{
2996		.name = "always-on",
2997		.always_on = true,
2998		.domains = POWER_DOMAIN_MASK,
2999		.ops = &i9xx_always_on_power_well_ops,
3000		.id = DISP_PW_ID_NONE,
3001	},
3002};
3003
3004static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3005	.sync_hw = i830_pipes_power_well_sync_hw,
3006	.enable = i830_pipes_power_well_enable,
3007	.disable = i830_pipes_power_well_disable,
3008	.is_enabled = i830_pipes_power_well_enabled,
3009};
3010
3011static const struct i915_power_well_desc i830_power_wells[] = {
3012	{
3013		.name = "always-on",
3014		.always_on = true,
3015		.domains = POWER_DOMAIN_MASK,
3016		.ops = &i9xx_always_on_power_well_ops,
3017		.id = DISP_PW_ID_NONE,
3018	},
3019	{
3020		.name = "pipes",
3021		.domains = I830_PIPES_POWER_DOMAINS,
3022		.ops = &i830_pipes_power_well_ops,
3023		.id = DISP_PW_ID_NONE,
3024	},
3025};
3026
3027static const struct i915_power_well_ops hsw_power_well_ops = {
3028	.sync_hw = hsw_power_well_sync_hw,
3029	.enable = hsw_power_well_enable,
3030	.disable = hsw_power_well_disable,
3031	.is_enabled = hsw_power_well_enabled,
3032};
3033
3034static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3035	.sync_hw = i9xx_power_well_sync_hw_noop,
3036	.enable = gen9_dc_off_power_well_enable,
3037	.disable = gen9_dc_off_power_well_disable,
3038	.is_enabled = gen9_dc_off_power_well_enabled,
3039};
3040
3041static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3042	.sync_hw = i9xx_power_well_sync_hw_noop,
3043	.enable = bxt_dpio_cmn_power_well_enable,
3044	.disable = bxt_dpio_cmn_power_well_disable,
3045	.is_enabled = bxt_dpio_cmn_power_well_enabled,
3046};
3047
3048static const struct i915_power_well_regs hsw_power_well_regs = {
3049	.bios	= HSW_PWR_WELL_CTL1,
3050	.driver	= HSW_PWR_WELL_CTL2,
3051	.kvmr	= HSW_PWR_WELL_CTL3,
3052	.debug	= HSW_PWR_WELL_CTL4,
3053};
3054
3055static const struct i915_power_well_desc hsw_power_wells[] = {
3056	{
3057		.name = "always-on",
3058		.always_on = true,
3059		.domains = POWER_DOMAIN_MASK,
3060		.ops = &i9xx_always_on_power_well_ops,
3061		.id = DISP_PW_ID_NONE,
3062	},
3063	{
3064		.name = "display",
3065		.domains = HSW_DISPLAY_POWER_DOMAINS,
3066		.ops = &hsw_power_well_ops,
3067		.id = HSW_DISP_PW_GLOBAL,
3068		{
3069			.hsw.regs = &hsw_power_well_regs,
3070			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3071			.hsw.has_vga = true,
3072		},
3073	},
3074};
3075
3076static const struct i915_power_well_desc bdw_power_wells[] = {
3077	{
3078		.name = "always-on",
3079		.always_on = true,
3080		.domains = POWER_DOMAIN_MASK,
3081		.ops = &i9xx_always_on_power_well_ops,
3082		.id = DISP_PW_ID_NONE,
3083	},
3084	{
3085		.name = "display",
3086		.domains = BDW_DISPLAY_POWER_DOMAINS,
3087		.ops = &hsw_power_well_ops,
3088		.id = HSW_DISP_PW_GLOBAL,
3089		{
3090			.hsw.regs = &hsw_power_well_regs,
3091			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3092			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3093			.hsw.has_vga = true,
3094		},
3095	},
3096};
3097
3098static const struct i915_power_well_ops vlv_display_power_well_ops = {
3099	.sync_hw = i9xx_power_well_sync_hw_noop,
3100	.enable = vlv_display_power_well_enable,
3101	.disable = vlv_display_power_well_disable,
3102	.is_enabled = vlv_power_well_enabled,
3103};
3104
3105static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3106	.sync_hw = i9xx_power_well_sync_hw_noop,
3107	.enable = vlv_dpio_cmn_power_well_enable,
3108	.disable = vlv_dpio_cmn_power_well_disable,
3109	.is_enabled = vlv_power_well_enabled,
3110};
3111
3112static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3113	.sync_hw = i9xx_power_well_sync_hw_noop,
3114	.enable = vlv_power_well_enable,
3115	.disable = vlv_power_well_disable,
3116	.is_enabled = vlv_power_well_enabled,
3117};
3118
3119static const struct i915_power_well_desc vlv_power_wells[] = {
3120	{
3121		.name = "always-on",
3122		.always_on = true,
3123		.domains = POWER_DOMAIN_MASK,
3124		.ops = &i9xx_always_on_power_well_ops,
3125		.id = DISP_PW_ID_NONE,
3126	},
3127	{
3128		.name = "display",
3129		.domains = VLV_DISPLAY_POWER_DOMAINS,
3130		.ops = &vlv_display_power_well_ops,
3131		.id = VLV_DISP_PW_DISP2D,
3132		{
3133			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3134		},
3135	},
3136	{
3137		.name = "dpio-tx-b-01",
3138		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3139			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3140			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3141			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3142		.ops = &vlv_dpio_power_well_ops,
3143		.id = DISP_PW_ID_NONE,
3144		{
3145			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3146		},
3147	},
3148	{
3149		.name = "dpio-tx-b-23",
3150		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3151			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3152			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3153			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3154		.ops = &vlv_dpio_power_well_ops,
3155		.id = DISP_PW_ID_NONE,
3156		{
3157			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3158		},
3159	},
3160	{
3161		.name = "dpio-tx-c-01",
3162		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3163			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3164			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3165			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3166		.ops = &vlv_dpio_power_well_ops,
3167		.id = DISP_PW_ID_NONE,
3168		{
3169			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3170		},
3171	},
3172	{
3173		.name = "dpio-tx-c-23",
3174		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3175			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3176			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3177			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3178		.ops = &vlv_dpio_power_well_ops,
3179		.id = DISP_PW_ID_NONE,
3180		{
3181			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3182		},
3183	},
3184	{
3185		.name = "dpio-common",
3186		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3187		.ops = &vlv_dpio_cmn_power_well_ops,
3188		.id = VLV_DISP_PW_DPIO_CMN_BC,
3189		{
3190			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3191		},
3192	},
3193};
3194
3195static const struct i915_power_well_desc chv_power_wells[] = {
3196	{
3197		.name = "always-on",
3198		.always_on = true,
3199		.domains = POWER_DOMAIN_MASK,
3200		.ops = &i9xx_always_on_power_well_ops,
3201		.id = DISP_PW_ID_NONE,
3202	},
3203	{
3204		.name = "display",
3205		/*
3206		 * Pipe A power well is the new disp2d well. Pipe B and C
3207		 * power wells don't actually exist. Pipe A power well is
3208		 * required for any pipe to work.
3209		 */
3210		.domains = CHV_DISPLAY_POWER_DOMAINS,
3211		.ops = &chv_pipe_power_well_ops,
3212		.id = DISP_PW_ID_NONE,
3213	},
3214	{
3215		.name = "dpio-common-bc",
3216		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3217		.ops = &chv_dpio_cmn_power_well_ops,
3218		.id = VLV_DISP_PW_DPIO_CMN_BC,
3219		{
3220			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3221		},
3222	},
3223	{
3224		.name = "dpio-common-d",
3225		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3226		.ops = &chv_dpio_cmn_power_well_ops,
3227		.id = CHV_DISP_PW_DPIO_CMN_D,
3228		{
3229			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3230		},
3231	},
3232};
3233
3234bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3235					 enum i915_power_well_id power_well_id)
3236{
3237	struct i915_power_well *power_well;
3238	bool ret;
3239
3240	power_well = lookup_power_well(dev_priv, power_well_id);
3241	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3242
3243	return ret;
3244}
3245
3246static const struct i915_power_well_desc skl_power_wells[] = {
3247	{
3248		.name = "always-on",
3249		.always_on = true,
3250		.domains = POWER_DOMAIN_MASK,
3251		.ops = &i9xx_always_on_power_well_ops,
3252		.id = DISP_PW_ID_NONE,
3253	},
3254	{
3255		.name = "power well 1",
3256		/* Handled by the DMC firmware */
3257		.always_on = true,
3258		.domains = 0,
3259		.ops = &hsw_power_well_ops,
3260		.id = SKL_DISP_PW_1,
3261		{
3262			.hsw.regs = &hsw_power_well_regs,
3263			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3264			.hsw.has_fuses = true,
3265		},
3266	},
3267	{
3268		.name = "MISC IO power well",
3269		/* Handled by the DMC firmware */
3270		.always_on = true,
3271		.domains = 0,
3272		.ops = &hsw_power_well_ops,
3273		.id = SKL_DISP_PW_MISC_IO,
3274		{
3275			.hsw.regs = &hsw_power_well_regs,
3276			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3277		},
3278	},
3279	{
3280		.name = "DC off",
3281		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3282		.ops = &gen9_dc_off_power_well_ops,
3283		.id = SKL_DISP_DC_OFF,
3284	},
3285	{
3286		.name = "power well 2",
3287		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3288		.ops = &hsw_power_well_ops,
3289		.id = SKL_DISP_PW_2,
3290		{
3291			.hsw.regs = &hsw_power_well_regs,
3292			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3293			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3294			.hsw.has_vga = true,
3295			.hsw.has_fuses = true,
3296		},
3297	},
3298	{
3299		.name = "DDI A/E IO power well",
3300		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3301		.ops = &hsw_power_well_ops,
3302		.id = DISP_PW_ID_NONE,
3303		{
3304			.hsw.regs = &hsw_power_well_regs,
3305			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3306		},
3307	},
3308	{
3309		.name = "DDI B IO power well",
3310		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3311		.ops = &hsw_power_well_ops,
3312		.id = DISP_PW_ID_NONE,
3313		{
3314			.hsw.regs = &hsw_power_well_regs,
3315			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3316		},
3317	},
3318	{
3319		.name = "DDI C IO power well",
3320		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3321		.ops = &hsw_power_well_ops,
3322		.id = DISP_PW_ID_NONE,
3323		{
3324			.hsw.regs = &hsw_power_well_regs,
3325			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3326		},
3327	},
3328	{
3329		.name = "DDI D IO power well",
3330		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3331		.ops = &hsw_power_well_ops,
3332		.id = DISP_PW_ID_NONE,
3333		{
3334			.hsw.regs = &hsw_power_well_regs,
3335			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3336		},
3337	},
3338};
3339
3340static const struct i915_power_well_desc bxt_power_wells[] = {
3341	{
3342		.name = "always-on",
3343		.always_on = true,
3344		.domains = POWER_DOMAIN_MASK,
3345		.ops = &i9xx_always_on_power_well_ops,
3346		.id = DISP_PW_ID_NONE,
3347	},
3348	{
3349		.name = "power well 1",
3350		/* Handled by the DMC firmware */
3351		.always_on = true,
3352		.domains = 0,
3353		.ops = &hsw_power_well_ops,
3354		.id = SKL_DISP_PW_1,
3355		{
3356			.hsw.regs = &hsw_power_well_regs,
3357			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3358			.hsw.has_fuses = true,
3359		},
3360	},
3361	{
3362		.name = "DC off",
3363		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3364		.ops = &gen9_dc_off_power_well_ops,
3365		.id = SKL_DISP_DC_OFF,
3366	},
3367	{
3368		.name = "power well 2",
3369		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3370		.ops = &hsw_power_well_ops,
3371		.id = SKL_DISP_PW_2,
3372		{
3373			.hsw.regs = &hsw_power_well_regs,
3374			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3375			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3376			.hsw.has_vga = true,
3377			.hsw.has_fuses = true,
3378		},
3379	},
3380	{
3381		.name = "dpio-common-a",
3382		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3383		.ops = &bxt_dpio_cmn_power_well_ops,
3384		.id = BXT_DISP_PW_DPIO_CMN_A,
3385		{
3386			.bxt.phy = DPIO_PHY1,
3387		},
3388	},
3389	{
3390		.name = "dpio-common-bc",
3391		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3392		.ops = &bxt_dpio_cmn_power_well_ops,
3393		.id = VLV_DISP_PW_DPIO_CMN_BC,
3394		{
3395			.bxt.phy = DPIO_PHY0,
3396		},
3397	},
3398};
3399
3400static const struct i915_power_well_desc glk_power_wells[] = {
3401	{
3402		.name = "always-on",
3403		.always_on = true,
3404		.domains = POWER_DOMAIN_MASK,
3405		.ops = &i9xx_always_on_power_well_ops,
3406		.id = DISP_PW_ID_NONE,
3407	},
3408	{
3409		.name = "power well 1",
3410		/* Handled by the DMC firmware */
3411		.always_on = true,
3412		.domains = 0,
3413		.ops = &hsw_power_well_ops,
3414		.id = SKL_DISP_PW_1,
3415		{
3416			.hsw.regs = &hsw_power_well_regs,
3417			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3418			.hsw.has_fuses = true,
3419		},
3420	},
3421	{
3422		.name = "DC off",
3423		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3424		.ops = &gen9_dc_off_power_well_ops,
3425		.id = SKL_DISP_DC_OFF,
3426	},
3427	{
3428		.name = "power well 2",
3429		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3430		.ops = &hsw_power_well_ops,
3431		.id = SKL_DISP_PW_2,
3432		{
3433			.hsw.regs = &hsw_power_well_regs,
3434			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3435			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3436			.hsw.has_vga = true,
3437			.hsw.has_fuses = true,
3438		},
3439	},
3440	{
3441		.name = "dpio-common-a",
3442		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3443		.ops = &bxt_dpio_cmn_power_well_ops,
3444		.id = BXT_DISP_PW_DPIO_CMN_A,
3445		{
3446			.bxt.phy = DPIO_PHY1,
3447		},
3448	},
3449	{
3450		.name = "dpio-common-b",
3451		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3452		.ops = &bxt_dpio_cmn_power_well_ops,
3453		.id = VLV_DISP_PW_DPIO_CMN_BC,
3454		{
3455			.bxt.phy = DPIO_PHY0,
3456		},
3457	},
3458	{
3459		.name = "dpio-common-c",
3460		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3461		.ops = &bxt_dpio_cmn_power_well_ops,
3462		.id = GLK_DISP_PW_DPIO_CMN_C,
3463		{
3464			.bxt.phy = DPIO_PHY2,
3465		},
3466	},
3467	{
3468		.name = "AUX A",
3469		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3470		.ops = &hsw_power_well_ops,
3471		.id = DISP_PW_ID_NONE,
3472		{
3473			.hsw.regs = &hsw_power_well_regs,
3474			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3475		},
3476	},
3477	{
3478		.name = "AUX B",
3479		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3480		.ops = &hsw_power_well_ops,
3481		.id = DISP_PW_ID_NONE,
3482		{
3483			.hsw.regs = &hsw_power_well_regs,
3484			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3485		},
3486	},
3487	{
3488		.name = "AUX C",
3489		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3490		.ops = &hsw_power_well_ops,
3491		.id = DISP_PW_ID_NONE,
3492		{
3493			.hsw.regs = &hsw_power_well_regs,
3494			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3495		},
3496	},
3497	{
3498		.name = "DDI A IO power well",
3499		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3500		.ops = &hsw_power_well_ops,
3501		.id = DISP_PW_ID_NONE,
3502		{
3503			.hsw.regs = &hsw_power_well_regs,
3504			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3505		},
3506	},
3507	{
3508		.name = "DDI B IO power well",
3509		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3510		.ops = &hsw_power_well_ops,
3511		.id = DISP_PW_ID_NONE,
3512		{
3513			.hsw.regs = &hsw_power_well_regs,
3514			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3515		},
3516	},
3517	{
3518		.name = "DDI C IO power well",
3519		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3520		.ops = &hsw_power_well_ops,
3521		.id = DISP_PW_ID_NONE,
3522		{
3523			.hsw.regs = &hsw_power_well_regs,
3524			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3525		},
3526	},
3527};
3528
3529static const struct i915_power_well_desc cnl_power_wells[] = {
3530	{
3531		.name = "always-on",
3532		.always_on = true,
3533		.domains = POWER_DOMAIN_MASK,
3534		.ops = &i9xx_always_on_power_well_ops,
3535		.id = DISP_PW_ID_NONE,
3536	},
3537	{
3538		.name = "power well 1",
3539		/* Handled by the DMC firmware */
3540		.always_on = true,
3541		.domains = 0,
3542		.ops = &hsw_power_well_ops,
3543		.id = SKL_DISP_PW_1,
3544		{
3545			.hsw.regs = &hsw_power_well_regs,
3546			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3547			.hsw.has_fuses = true,
3548		},
3549	},
3550	{
3551		.name = "AUX A",
3552		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3553		.ops = &hsw_power_well_ops,
3554		.id = DISP_PW_ID_NONE,
3555		{
3556			.hsw.regs = &hsw_power_well_regs,
3557			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3558		},
3559	},
3560	{
3561		.name = "AUX B",
3562		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3563		.ops = &hsw_power_well_ops,
3564		.id = DISP_PW_ID_NONE,
3565		{
3566			.hsw.regs = &hsw_power_well_regs,
3567			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3568		},
3569	},
3570	{
3571		.name = "AUX C",
3572		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3573		.ops = &hsw_power_well_ops,
3574		.id = DISP_PW_ID_NONE,
3575		{
3576			.hsw.regs = &hsw_power_well_regs,
3577			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3578		},
3579	},
3580	{
3581		.name = "AUX D",
3582		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3583		.ops = &hsw_power_well_ops,
3584		.id = DISP_PW_ID_NONE,
3585		{
3586			.hsw.regs = &hsw_power_well_regs,
3587			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3588		},
3589	},
3590	{
3591		.name = "DC off",
3592		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3593		.ops = &gen9_dc_off_power_well_ops,
3594		.id = SKL_DISP_DC_OFF,
3595	},
3596	{
3597		.name = "power well 2",
3598		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3599		.ops = &hsw_power_well_ops,
3600		.id = SKL_DISP_PW_2,
3601		{
3602			.hsw.regs = &hsw_power_well_regs,
3603			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3604			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3605			.hsw.has_vga = true,
3606			.hsw.has_fuses = true,
3607		},
3608	},
3609	{
3610		.name = "DDI A IO power well",
3611		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3612		.ops = &hsw_power_well_ops,
3613		.id = DISP_PW_ID_NONE,
3614		{
3615			.hsw.regs = &hsw_power_well_regs,
3616			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3617		},
3618	},
3619	{
3620		.name = "DDI B IO power well",
3621		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3622		.ops = &hsw_power_well_ops,
3623		.id = DISP_PW_ID_NONE,
3624		{
3625			.hsw.regs = &hsw_power_well_regs,
3626			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3627		},
3628	},
3629	{
3630		.name = "DDI C IO power well",
3631		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3632		.ops = &hsw_power_well_ops,
3633		.id = DISP_PW_ID_NONE,
3634		{
3635			.hsw.regs = &hsw_power_well_regs,
3636			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3637		},
3638	},
3639	{
3640		.name = "DDI D IO power well",
3641		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3642		.ops = &hsw_power_well_ops,
3643		.id = DISP_PW_ID_NONE,
3644		{
3645			.hsw.regs = &hsw_power_well_regs,
3646			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3647		},
3648	},
3649	{
3650		.name = "DDI F IO power well",
3651		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3652		.ops = &hsw_power_well_ops,
3653		.id = DISP_PW_ID_NONE,
3654		{
3655			.hsw.regs = &hsw_power_well_regs,
3656			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3657		},
3658	},
3659	{
3660		.name = "AUX F",
3661		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3662		.ops = &hsw_power_well_ops,
3663		.id = DISP_PW_ID_NONE,
3664		{
3665			.hsw.regs = &hsw_power_well_regs,
3666			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3667		},
3668	},
3669};
3670
3671static const struct i915_power_well_ops icl_aux_power_well_ops = {
3672	.sync_hw = hsw_power_well_sync_hw,
3673	.enable = icl_aux_power_well_enable,
3674	.disable = icl_aux_power_well_disable,
3675	.is_enabled = hsw_power_well_enabled,
3676};
3677
3678static const struct i915_power_well_regs icl_aux_power_well_regs = {
3679	.bios	= ICL_PWR_WELL_CTL_AUX1,
3680	.driver	= ICL_PWR_WELL_CTL_AUX2,
3681	.debug	= ICL_PWR_WELL_CTL_AUX4,
3682};
3683
3684static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3685	.bios	= ICL_PWR_WELL_CTL_DDI1,
3686	.driver	= ICL_PWR_WELL_CTL_DDI2,
3687	.debug	= ICL_PWR_WELL_CTL_DDI4,
3688};
3689
3690static const struct i915_power_well_desc icl_power_wells[] = {
3691	{
3692		.name = "always-on",
3693		.always_on = true,
3694		.domains = POWER_DOMAIN_MASK,
3695		.ops = &i9xx_always_on_power_well_ops,
3696		.id = DISP_PW_ID_NONE,
3697	},
3698	{
3699		.name = "power well 1",
3700		/* Handled by the DMC firmware */
3701		.always_on = true,
3702		.domains = 0,
3703		.ops = &hsw_power_well_ops,
3704		.id = SKL_DISP_PW_1,
3705		{
3706			.hsw.regs = &hsw_power_well_regs,
3707			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3708			.hsw.has_fuses = true,
3709		},
3710	},
3711	{
3712		.name = "DC off",
3713		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3714		.ops = &gen9_dc_off_power_well_ops,
3715		.id = SKL_DISP_DC_OFF,
3716	},
3717	{
3718		.name = "power well 2",
3719		.domains = ICL_PW_2_POWER_DOMAINS,
3720		.ops = &hsw_power_well_ops,
3721		.id = SKL_DISP_PW_2,
3722		{
3723			.hsw.regs = &hsw_power_well_regs,
3724			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3725			.hsw.has_fuses = true,
3726		},
3727	},
3728	{
3729		.name = "power well 3",
3730		.domains = ICL_PW_3_POWER_DOMAINS,
3731		.ops = &hsw_power_well_ops,
3732		.id = ICL_DISP_PW_3,
3733		{
3734			.hsw.regs = &hsw_power_well_regs,
3735			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3736			.hsw.irq_pipe_mask = BIT(PIPE_B),
3737			.hsw.has_vga = true,
3738			.hsw.has_fuses = true,
3739		},
3740	},
3741	{
3742		.name = "DDI A IO",
3743		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3744		.ops = &hsw_power_well_ops,
3745		.id = DISP_PW_ID_NONE,
3746		{
3747			.hsw.regs = &icl_ddi_power_well_regs,
3748			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3749		},
3750	},
3751	{
3752		.name = "DDI B IO",
3753		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3754		.ops = &hsw_power_well_ops,
3755		.id = DISP_PW_ID_NONE,
3756		{
3757			.hsw.regs = &icl_ddi_power_well_regs,
3758			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3759		},
3760	},
3761	{
3762		.name = "DDI C IO",
3763		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3764		.ops = &hsw_power_well_ops,
3765		.id = DISP_PW_ID_NONE,
3766		{
3767			.hsw.regs = &icl_ddi_power_well_regs,
3768			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3769		},
3770	},
3771	{
3772		.name = "DDI D IO",
3773		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3774		.ops = &hsw_power_well_ops,
3775		.id = DISP_PW_ID_NONE,
3776		{
3777			.hsw.regs = &icl_ddi_power_well_regs,
3778			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3779		},
3780	},
3781	{
3782		.name = "DDI E IO",
3783		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3784		.ops = &hsw_power_well_ops,
3785		.id = DISP_PW_ID_NONE,
3786		{
3787			.hsw.regs = &icl_ddi_power_well_regs,
3788			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3789		},
3790	},
3791	{
3792		.name = "DDI F IO",
3793		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3794		.ops = &hsw_power_well_ops,
3795		.id = DISP_PW_ID_NONE,
3796		{
3797			.hsw.regs = &icl_ddi_power_well_regs,
3798			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3799		},
3800	},
3801	{
3802		.name = "AUX A",
3803		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3804		.ops = &icl_aux_power_well_ops,
3805		.id = DISP_PW_ID_NONE,
3806		{
3807			.hsw.regs = &icl_aux_power_well_regs,
3808			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3809		},
3810	},
3811	{
3812		.name = "AUX B",
3813		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3814		.ops = &icl_aux_power_well_ops,
3815		.id = DISP_PW_ID_NONE,
3816		{
3817			.hsw.regs = &icl_aux_power_well_regs,
3818			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3819		},
3820	},
3821	{
3822		.name = "AUX C TC1",
3823		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3824		.ops = &icl_aux_power_well_ops,
3825		.id = DISP_PW_ID_NONE,
3826		{
3827			.hsw.regs = &icl_aux_power_well_regs,
3828			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3829			.hsw.is_tc_tbt = false,
3830		},
3831	},
3832	{
3833		.name = "AUX D TC2",
3834		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3835		.ops = &icl_aux_power_well_ops,
3836		.id = DISP_PW_ID_NONE,
3837		{
3838			.hsw.regs = &icl_aux_power_well_regs,
3839			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3840			.hsw.is_tc_tbt = false,
3841		},
3842	},
3843	{
3844		.name = "AUX E TC3",
3845		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3846		.ops = &icl_aux_power_well_ops,
3847		.id = DISP_PW_ID_NONE,
3848		{
3849			.hsw.regs = &icl_aux_power_well_regs,
3850			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3851			.hsw.is_tc_tbt = false,
3852		},
3853	},
3854	{
3855		.name = "AUX F TC4",
3856		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3857		.ops = &icl_aux_power_well_ops,
3858		.id = DISP_PW_ID_NONE,
3859		{
3860			.hsw.regs = &icl_aux_power_well_regs,
3861			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3862			.hsw.is_tc_tbt = false,
3863		},
3864	},
3865	{
3866		.name = "AUX C TBT1",
3867		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3868		.ops = &icl_aux_power_well_ops,
3869		.id = DISP_PW_ID_NONE,
3870		{
3871			.hsw.regs = &icl_aux_power_well_regs,
3872			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3873			.hsw.is_tc_tbt = true,
3874		},
3875	},
3876	{
3877		.name = "AUX D TBT2",
3878		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3879		.ops = &icl_aux_power_well_ops,
3880		.id = DISP_PW_ID_NONE,
3881		{
3882			.hsw.regs = &icl_aux_power_well_regs,
3883			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3884			.hsw.is_tc_tbt = true,
3885		},
3886	},
3887	{
3888		.name = "AUX E TBT3",
3889		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3890		.ops = &icl_aux_power_well_ops,
3891		.id = DISP_PW_ID_NONE,
3892		{
3893			.hsw.regs = &icl_aux_power_well_regs,
3894			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3895			.hsw.is_tc_tbt = true,
3896		},
3897	},
3898	{
3899		.name = "AUX F TBT4",
3900		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3901		.ops = &icl_aux_power_well_ops,
3902		.id = DISP_PW_ID_NONE,
3903		{
3904			.hsw.regs = &icl_aux_power_well_regs,
3905			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3906			.hsw.is_tc_tbt = true,
3907		},
3908	},
3909	{
3910		.name = "power well 4",
3911		.domains = ICL_PW_4_POWER_DOMAINS,
3912		.ops = &hsw_power_well_ops,
3913		.id = DISP_PW_ID_NONE,
3914		{
3915			.hsw.regs = &hsw_power_well_regs,
3916			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3917			.hsw.has_fuses = true,
3918			.hsw.irq_pipe_mask = BIT(PIPE_C),
3919		},
3920	},
3921};
3922
3923static void
3924tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3925{
3926	u8 tries = 0;
3927	int ret;
3928
3929	while (1) {
3930		u32 low_val = 0, high_val;
 
3931
3932		if (block)
3933			high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
3934		else
3935			high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
3936
3937		/*
3938		 * Spec states that we should timeout the request after 200us
3939		 * but the function below will timeout after 500us
3940		 */
3941		ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3942					     &high_val);
3943		if (ret == 0) {
3944			if (block &&
3945			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3946				ret = -EIO;
3947			else
3948				break;
3949		}
3950
3951		if (++tries == 3)
3952			break;
3953
3954		if (ret == -EAGAIN)
3955			msleep(1);
3956	}
3957
3958	if (ret)
3959		drm_err(&i915->drm, "TC cold %sblock failed\n",
3960			block ? "" : "un");
3961	else
3962		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3963			    block ? "" : "un");
3964}
3965
3966static void
3967tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3968				  struct i915_power_well *power_well)
3969{
3970	tgl_tc_cold_request(i915, true);
3971}
3972
3973static void
3974tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3975				   struct i915_power_well *power_well)
3976{
3977	tgl_tc_cold_request(i915, false);
3978}
3979
3980static void
3981tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
3982				   struct i915_power_well *power_well)
3983{
3984	if (power_well->count > 0)
3985		tgl_tc_cold_off_power_well_enable(i915, power_well);
3986	else
3987		tgl_tc_cold_off_power_well_disable(i915, power_well);
3988}
3989
3990static bool
3991tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
3992				      struct i915_power_well *power_well)
3993{
3994	/*
3995	 * Not the correctly implementation but there is no way to just read it
3996	 * from PCODE, so returning count to avoid state mismatch errors
3997	 */
3998	return power_well->count;
3999}
4000
4001static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4002	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4003	.enable = tgl_tc_cold_off_power_well_enable,
4004	.disable = tgl_tc_cold_off_power_well_disable,
4005	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4006};
4007
4008static const struct i915_power_well_desc tgl_power_wells[] = {
4009	{
4010		.name = "always-on",
4011		.always_on = true,
4012		.domains = POWER_DOMAIN_MASK,
4013		.ops = &i9xx_always_on_power_well_ops,
4014		.id = DISP_PW_ID_NONE,
4015	},
4016	{
4017		.name = "power well 1",
4018		/* Handled by the DMC firmware */
4019		.always_on = true,
4020		.domains = 0,
4021		.ops = &hsw_power_well_ops,
4022		.id = SKL_DISP_PW_1,
4023		{
4024			.hsw.regs = &hsw_power_well_regs,
4025			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4026			.hsw.has_fuses = true,
4027		},
4028	},
4029	{
4030		.name = "DC off",
4031		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4032		.ops = &gen9_dc_off_power_well_ops,
4033		.id = SKL_DISP_DC_OFF,
4034	},
4035	{
4036		.name = "power well 2",
4037		.domains = TGL_PW_2_POWER_DOMAINS,
4038		.ops = &hsw_power_well_ops,
4039		.id = SKL_DISP_PW_2,
4040		{
4041			.hsw.regs = &hsw_power_well_regs,
4042			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4043			.hsw.has_fuses = true,
4044		},
4045	},
4046	{
4047		.name = "power well 3",
4048		.domains = TGL_PW_3_POWER_DOMAINS,
4049		.ops = &hsw_power_well_ops,
4050		.id = ICL_DISP_PW_3,
4051		{
4052			.hsw.regs = &hsw_power_well_regs,
4053			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4054			.hsw.irq_pipe_mask = BIT(PIPE_B),
4055			.hsw.has_vga = true,
4056			.hsw.has_fuses = true,
4057		},
4058	},
4059	{
4060		.name = "DDI A IO",
4061		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4062		.ops = &hsw_power_well_ops,
4063		.id = DISP_PW_ID_NONE,
4064		{
4065			.hsw.regs = &icl_ddi_power_well_regs,
4066			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4067		}
4068	},
4069	{
4070		.name = "DDI B IO",
4071		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4072		.ops = &hsw_power_well_ops,
4073		.id = DISP_PW_ID_NONE,
4074		{
4075			.hsw.regs = &icl_ddi_power_well_regs,
4076			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4077		}
4078	},
4079	{
4080		.name = "DDI C IO",
4081		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4082		.ops = &hsw_power_well_ops,
4083		.id = DISP_PW_ID_NONE,
4084		{
4085			.hsw.regs = &icl_ddi_power_well_regs,
4086			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4087		}
4088	},
4089	{
4090		.name = "DDI D TC1 IO",
4091		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4092		.ops = &hsw_power_well_ops,
4093		.id = DISP_PW_ID_NONE,
4094		{
4095			.hsw.regs = &icl_ddi_power_well_regs,
4096			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4097		},
4098	},
4099	{
4100		.name = "DDI E TC2 IO",
4101		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4102		.ops = &hsw_power_well_ops,
4103		.id = DISP_PW_ID_NONE,
4104		{
4105			.hsw.regs = &icl_ddi_power_well_regs,
4106			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4107		},
4108	},
4109	{
4110		.name = "DDI F TC3 IO",
4111		.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4112		.ops = &hsw_power_well_ops,
4113		.id = DISP_PW_ID_NONE,
4114		{
4115			.hsw.regs = &icl_ddi_power_well_regs,
4116			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4117		},
4118	},
4119	{
4120		.name = "DDI G TC4 IO",
4121		.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4122		.ops = &hsw_power_well_ops,
4123		.id = DISP_PW_ID_NONE,
4124		{
4125			.hsw.regs = &icl_ddi_power_well_regs,
4126			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4127		},
4128	},
4129	{
4130		.name = "DDI H TC5 IO",
4131		.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4132		.ops = &hsw_power_well_ops,
4133		.id = DISP_PW_ID_NONE,
4134		{
4135			.hsw.regs = &icl_ddi_power_well_regs,
4136			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4137		},
4138	},
4139	{
4140		.name = "DDI I TC6 IO",
4141		.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4142		.ops = &hsw_power_well_ops,
4143		.id = DISP_PW_ID_NONE,
4144		{
4145			.hsw.regs = &icl_ddi_power_well_regs,
4146			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4147		},
4148	},
4149	{
4150		.name = "TC cold off",
4151		.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4152		.ops = &tgl_tc_cold_off_ops,
4153		.id = DISP_PW_ID_NONE,
4154	},
4155	{
4156		.name = "AUX A",
4157		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4158		.ops = &icl_aux_power_well_ops,
4159		.id = DISP_PW_ID_NONE,
4160		{
4161			.hsw.regs = &icl_aux_power_well_regs,
4162			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4163		},
4164	},
4165	{
4166		.name = "AUX B",
4167		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4168		.ops = &icl_aux_power_well_ops,
4169		.id = DISP_PW_ID_NONE,
4170		{
4171			.hsw.regs = &icl_aux_power_well_regs,
4172			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4173		},
4174	},
4175	{
4176		.name = "AUX C",
4177		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4178		.ops = &icl_aux_power_well_ops,
4179		.id = DISP_PW_ID_NONE,
4180		{
4181			.hsw.regs = &icl_aux_power_well_regs,
4182			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4183		},
4184	},
4185	{
4186		.name = "AUX D TC1",
4187		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4188		.ops = &icl_aux_power_well_ops,
4189		.id = DISP_PW_ID_NONE,
4190		{
4191			.hsw.regs = &icl_aux_power_well_regs,
4192			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4193			.hsw.is_tc_tbt = false,
4194		},
4195	},
4196	{
4197		.name = "AUX E TC2",
4198		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4199		.ops = &icl_aux_power_well_ops,
4200		.id = DISP_PW_ID_NONE,
4201		{
4202			.hsw.regs = &icl_aux_power_well_regs,
4203			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4204			.hsw.is_tc_tbt = false,
4205		},
4206	},
4207	{
4208		.name = "AUX F TC3",
4209		.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4210		.ops = &icl_aux_power_well_ops,
4211		.id = DISP_PW_ID_NONE,
4212		{
4213			.hsw.regs = &icl_aux_power_well_regs,
4214			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4215			.hsw.is_tc_tbt = false,
4216		},
4217	},
4218	{
4219		.name = "AUX G TC4",
4220		.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4221		.ops = &icl_aux_power_well_ops,
4222		.id = DISP_PW_ID_NONE,
4223		{
4224			.hsw.regs = &icl_aux_power_well_regs,
4225			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4226			.hsw.is_tc_tbt = false,
4227		},
4228	},
4229	{
4230		.name = "AUX H TC5",
4231		.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4232		.ops = &icl_aux_power_well_ops,
4233		.id = DISP_PW_ID_NONE,
4234		{
4235			.hsw.regs = &icl_aux_power_well_regs,
4236			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4237			.hsw.is_tc_tbt = false,
4238		},
4239	},
4240	{
4241		.name = "AUX I TC6",
4242		.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4243		.ops = &icl_aux_power_well_ops,
4244		.id = DISP_PW_ID_NONE,
4245		{
4246			.hsw.regs = &icl_aux_power_well_regs,
4247			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4248			.hsw.is_tc_tbt = false,
4249		},
4250	},
4251	{
4252		.name = "AUX D TBT1",
4253		.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4254		.ops = &icl_aux_power_well_ops,
4255		.id = DISP_PW_ID_NONE,
4256		{
4257			.hsw.regs = &icl_aux_power_well_regs,
4258			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4259			.hsw.is_tc_tbt = true,
4260		},
4261	},
4262	{
4263		.name = "AUX E TBT2",
4264		.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4265		.ops = &icl_aux_power_well_ops,
4266		.id = DISP_PW_ID_NONE,
4267		{
4268			.hsw.regs = &icl_aux_power_well_regs,
4269			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4270			.hsw.is_tc_tbt = true,
4271		},
4272	},
4273	{
4274		.name = "AUX F TBT3",
4275		.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4276		.ops = &icl_aux_power_well_ops,
4277		.id = DISP_PW_ID_NONE,
4278		{
4279			.hsw.regs = &icl_aux_power_well_regs,
4280			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4281			.hsw.is_tc_tbt = true,
4282		},
4283	},
4284	{
4285		.name = "AUX G TBT4",
4286		.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4287		.ops = &icl_aux_power_well_ops,
4288		.id = DISP_PW_ID_NONE,
4289		{
4290			.hsw.regs = &icl_aux_power_well_regs,
4291			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4292			.hsw.is_tc_tbt = true,
4293		},
4294	},
4295	{
4296		.name = "AUX H TBT5",
4297		.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4298		.ops = &icl_aux_power_well_ops,
4299		.id = DISP_PW_ID_NONE,
4300		{
4301			.hsw.regs = &icl_aux_power_well_regs,
4302			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4303			.hsw.is_tc_tbt = true,
4304		},
4305	},
4306	{
4307		.name = "AUX I TBT6",
4308		.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4309		.ops = &icl_aux_power_well_ops,
4310		.id = DISP_PW_ID_NONE,
4311		{
4312			.hsw.regs = &icl_aux_power_well_regs,
4313			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4314			.hsw.is_tc_tbt = true,
4315		},
4316	},
4317	{
4318		.name = "power well 4",
4319		.domains = TGL_PW_4_POWER_DOMAINS,
4320		.ops = &hsw_power_well_ops,
4321		.id = DISP_PW_ID_NONE,
4322		{
4323			.hsw.regs = &hsw_power_well_regs,
4324			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4325			.hsw.has_fuses = true,
4326			.hsw.irq_pipe_mask = BIT(PIPE_C),
4327		}
4328	},
4329	{
4330		.name = "power well 5",
4331		.domains = TGL_PW_5_POWER_DOMAINS,
4332		.ops = &hsw_power_well_ops,
4333		.id = DISP_PW_ID_NONE,
4334		{
4335			.hsw.regs = &hsw_power_well_regs,
4336			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4337			.hsw.has_fuses = true,
4338			.hsw.irq_pipe_mask = BIT(PIPE_D),
4339		},
4340	},
4341};
4342
4343static const struct i915_power_well_desc rkl_power_wells[] = {
4344	{
4345		.name = "always-on",
4346		.always_on = true,
4347		.domains = POWER_DOMAIN_MASK,
4348		.ops = &i9xx_always_on_power_well_ops,
4349		.id = DISP_PW_ID_NONE,
4350	},
4351	{
4352		.name = "power well 1",
4353		/* Handled by the DMC firmware */
4354		.always_on = true,
4355		.domains = 0,
4356		.ops = &hsw_power_well_ops,
4357		.id = SKL_DISP_PW_1,
4358		{
4359			.hsw.regs = &hsw_power_well_regs,
4360			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4361			.hsw.has_fuses = true,
4362		},
4363	},
4364	{
4365		.name = "DC off",
4366		.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4367		.ops = &gen9_dc_off_power_well_ops,
4368		.id = SKL_DISP_DC_OFF,
4369	},
4370	{
4371		.name = "power well 3",
4372		.domains = RKL_PW_3_POWER_DOMAINS,
4373		.ops = &hsw_power_well_ops,
4374		.id = ICL_DISP_PW_3,
4375		{
4376			.hsw.regs = &hsw_power_well_regs,
4377			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4378			.hsw.irq_pipe_mask = BIT(PIPE_B),
4379			.hsw.has_vga = true,
4380			.hsw.has_fuses = true,
4381		},
4382	},
4383	{
4384		.name = "power well 4",
4385		.domains = RKL_PW_4_POWER_DOMAINS,
4386		.ops = &hsw_power_well_ops,
4387		.id = DISP_PW_ID_NONE,
4388		{
4389			.hsw.regs = &hsw_power_well_regs,
4390			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4391			.hsw.has_fuses = true,
4392			.hsw.irq_pipe_mask = BIT(PIPE_C),
4393		}
4394	},
4395	{
4396		.name = "DDI A IO",
4397		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4398		.ops = &hsw_power_well_ops,
4399		.id = DISP_PW_ID_NONE,
4400		{
4401			.hsw.regs = &icl_ddi_power_well_regs,
4402			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4403		}
4404	},
4405	{
4406		.name = "DDI B IO",
4407		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4408		.ops = &hsw_power_well_ops,
4409		.id = DISP_PW_ID_NONE,
4410		{
4411			.hsw.regs = &icl_ddi_power_well_regs,
4412			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4413		}
4414	},
4415	{
4416		.name = "DDI D TC1 IO",
4417		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4418		.ops = &hsw_power_well_ops,
4419		.id = DISP_PW_ID_NONE,
4420		{
4421			.hsw.regs = &icl_ddi_power_well_regs,
4422			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4423		},
4424	},
4425	{
4426		.name = "DDI E TC2 IO",
4427		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4428		.ops = &hsw_power_well_ops,
4429		.id = DISP_PW_ID_NONE,
4430		{
4431			.hsw.regs = &icl_ddi_power_well_regs,
4432			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4433		},
4434	},
4435	{
4436		.name = "AUX A",
4437		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4438		.ops = &icl_aux_power_well_ops,
4439		.id = DISP_PW_ID_NONE,
4440		{
4441			.hsw.regs = &icl_aux_power_well_regs,
4442			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4443		},
4444	},
4445	{
4446		.name = "AUX B",
4447		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4448		.ops = &icl_aux_power_well_ops,
4449		.id = DISP_PW_ID_NONE,
4450		{
4451			.hsw.regs = &icl_aux_power_well_regs,
4452			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4453		},
4454	},
4455	{
4456		.name = "AUX D TC1",
4457		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4458		.ops = &icl_aux_power_well_ops,
4459		.id = DISP_PW_ID_NONE,
4460		{
4461			.hsw.regs = &icl_aux_power_well_regs,
4462			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4463		},
4464	},
4465	{
4466		.name = "AUX E TC2",
4467		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4468		.ops = &icl_aux_power_well_ops,
4469		.id = DISP_PW_ID_NONE,
4470		{
4471			.hsw.regs = &icl_aux_power_well_regs,
4472			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4473		},
4474	},
4475};
4476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4477static int
4478sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4479				   int disable_power_well)
4480{
4481	if (disable_power_well >= 0)
4482		return !!disable_power_well;
4483
4484	return 1;
4485}
4486
4487static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4488			       int enable_dc)
4489{
4490	u32 mask;
4491	int requested_dc;
4492	int max_dc;
4493
4494	if (INTEL_GEN(dev_priv) >= 12) {
 
 
 
 
 
4495		max_dc = 4;
4496		/*
4497		 * DC9 has a separate HW flow from the rest of the DC states,
4498		 * not depending on the DMC firmware. It's needed by system
4499		 * suspend/resume, so allow it unconditionally.
4500		 */
4501		mask = DC_STATE_EN_DC9;
4502	} else if (IS_GEN(dev_priv, 11)) {
4503		max_dc = 2;
4504		mask = DC_STATE_EN_DC9;
4505	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4506		max_dc = 2;
4507		mask = 0;
4508	} else if (IS_GEN9_LP(dev_priv)) {
4509		max_dc = 1;
4510		mask = DC_STATE_EN_DC9;
4511	} else {
4512		max_dc = 0;
4513		mask = 0;
4514	}
 
 
 
 
 
 
 
4515
4516	if (!dev_priv->params.disable_power_well)
4517		max_dc = 0;
4518
4519	if (enable_dc >= 0 && enable_dc <= max_dc) {
4520		requested_dc = enable_dc;
4521	} else if (enable_dc == -1) {
4522		requested_dc = max_dc;
4523	} else if (enable_dc > max_dc && enable_dc <= 4) {
4524		drm_dbg_kms(&dev_priv->drm,
4525			    "Adjusting requested max DC state (%d->%d)\n",
4526			    enable_dc, max_dc);
4527		requested_dc = max_dc;
4528	} else {
4529		drm_err(&dev_priv->drm,
4530			"Unexpected value for enable_dc (%d)\n", enable_dc);
4531		requested_dc = max_dc;
4532	}
4533
4534	switch (requested_dc) {
4535	case 4:
4536		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4537		break;
4538	case 3:
4539		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4540		break;
4541	case 2:
4542		mask |= DC_STATE_EN_UPTO_DC6;
4543		break;
4544	case 1:
4545		mask |= DC_STATE_EN_UPTO_DC5;
4546		break;
4547	}
4548
4549	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4550
4551	return mask;
4552}
4553
4554static int
4555__set_power_wells(struct i915_power_domains *power_domains,
4556		  const struct i915_power_well_desc *power_well_descs,
4557		  int power_well_count)
4558{
4559	struct drm_i915_private *i915 = container_of(power_domains,
4560						     struct drm_i915_private,
4561						     power_domains);
4562	u64 power_well_ids = 0;
4563	int i;
 
 
 
 
 
4564
4565	power_domains->power_well_count = power_well_count;
4566	power_domains->power_wells =
4567				kcalloc(power_well_count,
4568					sizeof(*power_domains->power_wells),
4569					GFP_KERNEL);
4570	if (!power_domains->power_wells)
4571		return -ENOMEM;
4572
4573	for (i = 0; i < power_well_count; i++) {
4574		enum i915_power_well_id id = power_well_descs[i].id;
4575
4576		power_domains->power_wells[i].desc = &power_well_descs[i];
 
 
 
 
4577
4578		if (id == DISP_PW_ID_NONE)
4579			continue;
4580
4581		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
4582		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
4583		power_well_ids |= BIT_ULL(id);
4584	}
4585
4586	return 0;
4587}
4588
 
 
 
 
4589#define set_power_wells(power_domains, __power_well_descs) \
4590	__set_power_wells(power_domains, __power_well_descs, \
4591			  ARRAY_SIZE(__power_well_descs))
4592
4593/**
4594 * intel_power_domains_init - initializes the power domain structures
4595 * @dev_priv: i915 device instance
4596 *
4597 * Initializes the power domain structures for @dev_priv depending upon the
4598 * supported platform.
4599 */
4600int intel_power_domains_init(struct drm_i915_private *dev_priv)
4601{
4602	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4603	int err;
4604
4605	dev_priv->params.disable_power_well =
4606		sanitize_disable_power_well_option(dev_priv,
4607						   dev_priv->params.disable_power_well);
4608	dev_priv->csr.allowed_dc_mask =
4609		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
4610
4611	dev_priv->csr.target_dc_state =
4612		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4613
4614	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4615
4616	mutex_init(&power_domains->lock);
4617
4618	INIT_DELAYED_WORK(&power_domains->async_put_work,
4619			  intel_display_power_put_async_work);
4620
4621	/*
4622	 * The enabling order will be from lower to higher indexed wells,
4623	 * the disabling order is reversed.
4624	 */
4625	if (IS_ROCKETLAKE(dev_priv)) {
 
 
 
 
 
 
 
 
4626		err = set_power_wells(power_domains, rkl_power_wells);
4627	} else if (IS_GEN(dev_priv, 12)) {
4628		err = set_power_wells(power_domains, tgl_power_wells);
4629	} else if (IS_GEN(dev_priv, 11)) {
4630		err = set_power_wells(power_domains, icl_power_wells);
 
 
4631	} else if (IS_CANNONLAKE(dev_priv)) {
4632		err = set_power_wells(power_domains, cnl_power_wells);
4633
4634		/*
4635		 * DDI and Aux IO are getting enabled for all ports
4636		 * regardless the presence or use. So, in order to avoid
4637		 * timeouts, lets remove them from the list
4638		 * for the SKUs without port F.
4639		 */
4640		if (!IS_CNL_WITH_PORT_F(dev_priv))
4641			power_domains->power_well_count -= 2;
4642	} else if (IS_GEMINILAKE(dev_priv)) {
4643		err = set_power_wells(power_domains, glk_power_wells);
4644	} else if (IS_BROXTON(dev_priv)) {
4645		err = set_power_wells(power_domains, bxt_power_wells);
4646	} else if (IS_GEN9_BC(dev_priv)) {
4647		err = set_power_wells(power_domains, skl_power_wells);
4648	} else if (IS_CHERRYVIEW(dev_priv)) {
4649		err = set_power_wells(power_domains, chv_power_wells);
4650	} else if (IS_BROADWELL(dev_priv)) {
4651		err = set_power_wells(power_domains, bdw_power_wells);
4652	} else if (IS_HASWELL(dev_priv)) {
4653		err = set_power_wells(power_domains, hsw_power_wells);
4654	} else if (IS_VALLEYVIEW(dev_priv)) {
4655		err = set_power_wells(power_domains, vlv_power_wells);
4656	} else if (IS_I830(dev_priv)) {
4657		err = set_power_wells(power_domains, i830_power_wells);
4658	} else {
4659		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4660	}
4661
4662	return err;
4663}
4664
4665/**
4666 * intel_power_domains_cleanup - clean up power domains resources
4667 * @dev_priv: i915 device instance
4668 *
4669 * Release any resources acquired by intel_power_domains_init()
4670 */
4671void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4672{
4673	kfree(dev_priv->power_domains.power_wells);
4674}
4675
4676static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4677{
4678	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4679	struct i915_power_well *power_well;
4680
4681	mutex_lock(&power_domains->lock);
4682	for_each_power_well(dev_priv, power_well) {
4683		power_well->desc->ops->sync_hw(dev_priv, power_well);
4684		power_well->hw_enabled =
4685			power_well->desc->ops->is_enabled(dev_priv, power_well);
4686	}
4687	mutex_unlock(&power_domains->lock);
4688}
4689
4690static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
4691				enum dbuf_slice slice, bool enable)
4692{
4693	i915_reg_t reg = DBUF_CTL_S(slice);
4694	bool state;
4695	u32 val;
4696
4697	val = intel_de_read(dev_priv, reg);
4698	if (enable)
4699		val |= DBUF_POWER_REQUEST;
4700	else
4701		val &= ~DBUF_POWER_REQUEST;
4702	intel_de_write(dev_priv, reg, val);
4703	intel_de_posting_read(dev_priv, reg);
4704	udelay(10);
4705
4706	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4707	drm_WARN(&dev_priv->drm, enable != state,
4708		 "DBuf slice %d power %s timeout!\n",
4709		 slice, enable ? "enable" : "disable");
4710}
4711
4712void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
4713			     u8 req_slices)
4714{
4715	int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4716	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
4717	enum dbuf_slice slice;
4718
4719	drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
4720		 "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
4721		 req_slices, num_slices);
4722
4723	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4724		    req_slices);
4725
4726	/*
4727	 * Might be running this in parallel to gen9_dc_off_power_well_enable
4728	 * being called from intel_dp_detect for instance,
4729	 * which causes assertion triggered by race condition,
4730	 * as gen9_assert_dbuf_enabled might preempt this when registers
4731	 * were already updated, while dev_priv was not.
4732	 */
4733	mutex_lock(&power_domains->lock);
4734
4735	for (slice = DBUF_S1; slice < num_slices; slice++)
4736		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
4737
4738	dev_priv->dbuf.enabled_slices = req_slices;
4739
4740	mutex_unlock(&power_domains->lock);
4741}
4742
4743static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4744{
4745	dev_priv->dbuf.enabled_slices =
4746		intel_enabled_dbuf_slices_mask(dev_priv);
4747
4748	/*
4749	 * Just power up at least 1 slice, we will
4750	 * figure out later which slices we have and what we need.
4751	 */
4752	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
4753				dev_priv->dbuf.enabled_slices);
4754}
4755
4756static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4757{
4758	gen9_dbuf_slices_update(dev_priv, 0);
4759}
4760
 
 
 
 
 
 
 
 
 
 
 
 
 
4761static void icl_mbus_init(struct drm_i915_private *dev_priv)
4762{
4763	unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
4764	u32 mask, val, i;
4765
 
 
 
4766	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4767		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4768		MBUS_ABOX_B_CREDIT_MASK |
4769		MBUS_ABOX_BW_CREDIT_MASK;
4770	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4771		MBUS_ABOX_BT_CREDIT_POOL2(16) |
4772		MBUS_ABOX_B_CREDIT(1) |
4773		MBUS_ABOX_BW_CREDIT(1);
4774
4775	/*
4776	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
4777	 * expect us to program the abox_ctl0 register as well, even though
4778	 * we don't have to program other instance-0 registers like BW_BUDDY.
4779	 */
4780	if (IS_GEN(dev_priv, 12))
4781		abox_regs |= BIT(0);
4782
4783	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
4784		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
4785}
4786
4787static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4788{
4789	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4790
4791	/*
4792	 * The LCPLL register should be turned on by the BIOS. For now
4793	 * let's just check its state and print errors in case
4794	 * something is wrong.  Don't even try to turn it on.
4795	 */
4796
4797	if (val & LCPLL_CD_SOURCE_FCLK)
4798		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4799
4800	if (val & LCPLL_PLL_DISABLE)
4801		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4802
4803	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4804		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4805}
4806
4807static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4808{
4809	struct drm_device *dev = &dev_priv->drm;
4810	struct intel_crtc *crtc;
4811
4812	for_each_intel_crtc(dev, crtc)
4813		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4814				pipe_name(crtc->pipe));
4815
4816	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4817			"Display power well on\n");
4818	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4819			"SPLL enabled\n");
4820	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4821			"WRPLL1 enabled\n");
4822	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4823			"WRPLL2 enabled\n");
4824	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4825			"Panel power on\n");
4826	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4827			"CPU PWM1 enabled\n");
4828	if (IS_HASWELL(dev_priv))
4829		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4830				"CPU PWM2 enabled\n");
4831	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4832			"PCH PWM1 enabled\n");
4833	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4834			"Utility pin enabled\n");
4835	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4836			"PCH GTC enabled\n");
4837
4838	/*
4839	 * In theory we can still leave IRQs enabled, as long as only the HPD
4840	 * interrupts remain enabled. We used to check for that, but since it's
4841	 * gen-specific and since we only disable LCPLL after we fully disable
4842	 * the interrupts, the check below should be enough.
4843	 */
4844	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4845}
4846
4847static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4848{
4849	if (IS_HASWELL(dev_priv))
4850		return intel_de_read(dev_priv, D_COMP_HSW);
4851	else
4852		return intel_de_read(dev_priv, D_COMP_BDW);
4853}
4854
4855static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4856{
4857	if (IS_HASWELL(dev_priv)) {
4858		if (sandybridge_pcode_write(dev_priv,
4859					    GEN6_PCODE_WRITE_D_COMP, val))
4860			drm_dbg_kms(&dev_priv->drm,
4861				    "Failed to write to D_COMP\n");
4862	} else {
4863		intel_de_write(dev_priv, D_COMP_BDW, val);
4864		intel_de_posting_read(dev_priv, D_COMP_BDW);
4865	}
4866}
4867
4868/*
4869 * This function implements pieces of two sequences from BSpec:
4870 * - Sequence for display software to disable LCPLL
4871 * - Sequence for display software to allow package C8+
4872 * The steps implemented here are just the steps that actually touch the LCPLL
4873 * register. Callers should take care of disabling all the display engine
4874 * functions, doing the mode unset, fixing interrupts, etc.
4875 */
4876static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4877			      bool switch_to_fclk, bool allow_power_down)
4878{
4879	u32 val;
4880
4881	assert_can_disable_lcpll(dev_priv);
4882
4883	val = intel_de_read(dev_priv, LCPLL_CTL);
4884
4885	if (switch_to_fclk) {
4886		val |= LCPLL_CD_SOURCE_FCLK;
4887		intel_de_write(dev_priv, LCPLL_CTL, val);
4888
4889		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4890				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4891			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4892
4893		val = intel_de_read(dev_priv, LCPLL_CTL);
4894	}
4895
4896	val |= LCPLL_PLL_DISABLE;
4897	intel_de_write(dev_priv, LCPLL_CTL, val);
4898	intel_de_posting_read(dev_priv, LCPLL_CTL);
4899
4900	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4901		drm_err(&dev_priv->drm, "LCPLL still locked\n");
4902
4903	val = hsw_read_dcomp(dev_priv);
4904	val |= D_COMP_COMP_DISABLE;
4905	hsw_write_dcomp(dev_priv, val);
4906	ndelay(100);
4907
4908	if (wait_for((hsw_read_dcomp(dev_priv) &
4909		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4910		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4911
4912	if (allow_power_down) {
4913		val = intel_de_read(dev_priv, LCPLL_CTL);
4914		val |= LCPLL_POWER_DOWN_ALLOW;
4915		intel_de_write(dev_priv, LCPLL_CTL, val);
4916		intel_de_posting_read(dev_priv, LCPLL_CTL);
4917	}
4918}
4919
4920/*
4921 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4922 * source.
4923 */
4924static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4925{
4926	u32 val;
4927
4928	val = intel_de_read(dev_priv, LCPLL_CTL);
4929
4930	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4931		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4932		return;
4933
4934	/*
4935	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4936	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4937	 */
4938	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4939
4940	if (val & LCPLL_POWER_DOWN_ALLOW) {
4941		val &= ~LCPLL_POWER_DOWN_ALLOW;
4942		intel_de_write(dev_priv, LCPLL_CTL, val);
4943		intel_de_posting_read(dev_priv, LCPLL_CTL);
4944	}
4945
4946	val = hsw_read_dcomp(dev_priv);
4947	val |= D_COMP_COMP_FORCE;
4948	val &= ~D_COMP_COMP_DISABLE;
4949	hsw_write_dcomp(dev_priv, val);
4950
4951	val = intel_de_read(dev_priv, LCPLL_CTL);
4952	val &= ~LCPLL_PLL_DISABLE;
4953	intel_de_write(dev_priv, LCPLL_CTL, val);
4954
4955	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4956		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4957
4958	if (val & LCPLL_CD_SOURCE_FCLK) {
4959		val = intel_de_read(dev_priv, LCPLL_CTL);
4960		val &= ~LCPLL_CD_SOURCE_FCLK;
4961		intel_de_write(dev_priv, LCPLL_CTL, val);
4962
4963		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4964				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4965			drm_err(&dev_priv->drm,
4966				"Switching back to LCPLL failed\n");
4967	}
4968
4969	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4970
4971	intel_update_cdclk(dev_priv);
4972	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4973}
4974
4975/*
4976 * Package states C8 and deeper are really deep PC states that can only be
4977 * reached when all the devices on the system allow it, so even if the graphics
4978 * device allows PC8+, it doesn't mean the system will actually get to these
4979 * states. Our driver only allows PC8+ when going into runtime PM.
4980 *
4981 * The requirements for PC8+ are that all the outputs are disabled, the power
4982 * well is disabled and most interrupts are disabled, and these are also
4983 * requirements for runtime PM. When these conditions are met, we manually do
4984 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4985 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4986 * hang the machine.
4987 *
4988 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4989 * the state of some registers, so when we come back from PC8+ we need to
4990 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4991 * need to take care of the registers kept by RC6. Notice that this happens even
4992 * if we don't put the device in PCI D3 state (which is what currently happens
4993 * because of the runtime PM support).
4994 *
4995 * For more, read "Display Sequences for Package C8" on the hardware
4996 * documentation.
4997 */
4998static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4999{
5000	u32 val;
5001
5002	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5003
5004	if (HAS_PCH_LPT_LP(dev_priv)) {
5005		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5006		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5007		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5008	}
5009
5010	lpt_disable_clkout_dp(dev_priv);
5011	hsw_disable_lcpll(dev_priv, true, true);
5012}
5013
5014static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5015{
5016	u32 val;
5017
5018	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5019
5020	hsw_restore_lcpll(dev_priv);
5021	intel_init_pch_refclk(dev_priv);
5022
5023	if (HAS_PCH_LPT_LP(dev_priv)) {
5024		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5025		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5026		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5027	}
5028}
5029
5030static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5031				      bool enable)
5032{
5033	i915_reg_t reg;
5034	u32 reset_bits, val;
5035
5036	if (IS_IVYBRIDGE(dev_priv)) {
5037		reg = GEN7_MSG_CTL;
5038		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5039	} else {
5040		reg = HSW_NDE_RSTWRN_OPT;
5041		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5042	}
5043
5044	val = intel_de_read(dev_priv, reg);
5045
5046	if (enable)
5047		val |= reset_bits;
5048	else
5049		val &= ~reset_bits;
5050
5051	intel_de_write(dev_priv, reg, val);
5052}
5053
5054static void skl_display_core_init(struct drm_i915_private *dev_priv,
5055				  bool resume)
5056{
5057	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5058	struct i915_power_well *well;
5059
5060	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5061
5062	/* enable PCH reset handshake */
5063	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5064
 
 
 
5065	/* enable PG1 and Misc I/O */
5066	mutex_lock(&power_domains->lock);
5067
5068	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5069	intel_power_well_enable(dev_priv, well);
5070
5071	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5072	intel_power_well_enable(dev_priv, well);
5073
5074	mutex_unlock(&power_domains->lock);
5075
5076	intel_cdclk_init_hw(dev_priv);
5077
5078	gen9_dbuf_enable(dev_priv);
5079
5080	if (resume && dev_priv->csr.dmc_payload)
5081		intel_csr_load_program(dev_priv);
5082}
5083
5084static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5085{
5086	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5087	struct i915_power_well *well;
5088
 
 
 
5089	gen9_disable_dc_states(dev_priv);
5090
5091	gen9_dbuf_disable(dev_priv);
5092
5093	intel_cdclk_uninit_hw(dev_priv);
5094
5095	/* The spec doesn't call for removing the reset handshake flag */
5096	/* disable PG1 and Misc I/O */
5097
5098	mutex_lock(&power_domains->lock);
5099
5100	/*
5101	 * BSpec says to keep the MISC IO power well enabled here, only
5102	 * remove our request for power well 1.
5103	 * Note that even though the driver's request is removed power well 1
5104	 * may stay enabled after this due to DMC's own request on it.
5105	 */
5106	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5107	intel_power_well_disable(dev_priv, well);
5108
5109	mutex_unlock(&power_domains->lock);
5110
5111	usleep_range(10, 30);		/* 10 us delay per Bspec */
5112}
5113
5114static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5115{
5116	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5117	struct i915_power_well *well;
5118
5119	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5120
5121	/*
5122	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5123	 * or else the reset will hang because there is no PCH to respond.
5124	 * Move the handshake programming to initialization sequence.
5125	 * Previously was left up to BIOS.
5126	 */
5127	intel_pch_reset_handshake(dev_priv, false);
5128
 
 
 
5129	/* Enable PG1 */
5130	mutex_lock(&power_domains->lock);
5131
5132	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5133	intel_power_well_enable(dev_priv, well);
5134
5135	mutex_unlock(&power_domains->lock);
5136
5137	intel_cdclk_init_hw(dev_priv);
5138
5139	gen9_dbuf_enable(dev_priv);
5140
5141	if (resume && dev_priv->csr.dmc_payload)
5142		intel_csr_load_program(dev_priv);
5143}
5144
5145static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5146{
5147	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5148	struct i915_power_well *well;
5149
 
 
 
5150	gen9_disable_dc_states(dev_priv);
5151
5152	gen9_dbuf_disable(dev_priv);
5153
5154	intel_cdclk_uninit_hw(dev_priv);
5155
5156	/* The spec doesn't call for removing the reset handshake flag */
5157
5158	/*
5159	 * Disable PW1 (PG1).
5160	 * Note that even though the driver's request is removed power well 1
5161	 * may stay enabled after this due to DMC's own request on it.
5162	 */
5163	mutex_lock(&power_domains->lock);
5164
5165	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5166	intel_power_well_disable(dev_priv, well);
5167
5168	mutex_unlock(&power_domains->lock);
5169
5170	usleep_range(10, 30);		/* 10 us delay per Bspec */
5171}
5172
5173static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5174{
5175	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5176	struct i915_power_well *well;
5177
5178	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5179
5180	/* 1. Enable PCH Reset Handshake */
5181	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5182
 
 
 
5183	/* 2-3. */
5184	intel_combo_phy_init(dev_priv);
5185
5186	/*
5187	 * 4. Enable Power Well 1 (PG1).
5188	 *    The AUX IO power wells will be enabled on demand.
5189	 */
5190	mutex_lock(&power_domains->lock);
5191	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5192	intel_power_well_enable(dev_priv, well);
5193	mutex_unlock(&power_domains->lock);
5194
5195	/* 5. Enable CD clock */
5196	intel_cdclk_init_hw(dev_priv);
5197
5198	/* 6. Enable DBUF */
5199	gen9_dbuf_enable(dev_priv);
5200
5201	if (resume && dev_priv->csr.dmc_payload)
5202		intel_csr_load_program(dev_priv);
5203}
5204
5205static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5206{
5207	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5208	struct i915_power_well *well;
5209
 
 
 
5210	gen9_disable_dc_states(dev_priv);
5211
5212	/* 1. Disable all display engine functions -> aready done */
5213
5214	/* 2. Disable DBUF */
5215	gen9_dbuf_disable(dev_priv);
5216
5217	/* 3. Disable CD clock */
5218	intel_cdclk_uninit_hw(dev_priv);
5219
5220	/*
5221	 * 4. Disable Power Well 1 (PG1).
5222	 *    The AUX IO power wells are toggled on demand, so they are already
5223	 *    disabled at this point.
5224	 */
5225	mutex_lock(&power_domains->lock);
5226	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5227	intel_power_well_disable(dev_priv, well);
5228	mutex_unlock(&power_domains->lock);
5229
5230	usleep_range(10, 30);		/* 10 us delay per Bspec */
5231
5232	/* 5. */
5233	intel_combo_phy_uninit(dev_priv);
5234}
5235
5236struct buddy_page_mask {
5237	u32 page_mask;
5238	u8 type;
5239	u8 num_channels;
5240};
5241
5242static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5243	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
 
5244	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
 
5245	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
 
5246	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
 
5247	{}
5248};
5249
5250static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5251	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5252	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
 
 
5253	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5254	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
 
 
5255	{}
5256};
5257
5258static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5259{
5260	enum intel_dram_type type = dev_priv->dram_info.type;
5261	u8 num_channels = dev_priv->dram_info.num_channels;
5262	const struct buddy_page_mask *table;
5263	unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5264	int config, i;
5265
5266	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5267		/* Wa_1409767108: tgl */
 
 
5268		table = wa_1409767108_buddy_page_masks;
5269	else
5270		table = tgl_buddy_page_masks;
5271
5272	for (config = 0; table[config].page_mask != 0; config++)
5273		if (table[config].num_channels == num_channels &&
5274		    table[config].type == type)
5275			break;
5276
5277	if (table[config].page_mask == 0) {
5278		drm_dbg(&dev_priv->drm,
5279			"Unknown memory configuration; disabling address buddy logic.\n");
5280		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5281			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5282				       BW_BUDDY_DISABLE);
5283	} else {
5284		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5285			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5286				       table[config].page_mask);
5287
5288			/* Wa_22010178259:tgl,rkl */
5289			intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5290				     BW_BUDDY_TLB_REQ_TIMER_MASK,
5291				     BW_BUDDY_TLB_REQ_TIMER(0x8));
5292		}
5293	}
5294}
5295
5296static void icl_display_core_init(struct drm_i915_private *dev_priv,
5297				  bool resume)
5298{
5299	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5300	struct i915_power_well *well;
5301	u32 val;
5302
5303	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5304
 
 
 
 
 
 
5305	/* 1. Enable PCH reset handshake. */
5306	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5307
 
 
 
5308	/* 2. Initialize all combo phys */
5309	intel_combo_phy_init(dev_priv);
5310
5311	/*
5312	 * 3. Enable Power Well 1 (PG1).
5313	 *    The AUX IO power wells will be enabled on demand.
5314	 */
5315	mutex_lock(&power_domains->lock);
5316	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5317	intel_power_well_enable(dev_priv, well);
5318	mutex_unlock(&power_domains->lock);
5319
5320	/* 4. Enable CDCLK. */
5321	intel_cdclk_init_hw(dev_priv);
5322
 
 
 
5323	/* 5. Enable DBUF. */
5324	gen9_dbuf_enable(dev_priv);
5325
5326	/* 6. Setup MBUS. */
5327	icl_mbus_init(dev_priv);
5328
5329	/* 7. Program arbiter BW_BUDDY registers */
5330	if (INTEL_GEN(dev_priv) >= 12)
5331		tgl_bw_buddy_init(dev_priv);
5332
5333	if (resume && dev_priv->csr.dmc_payload)
5334		intel_csr_load_program(dev_priv);
5335
5336	/* Wa_14011508470 */
5337	if (IS_GEN(dev_priv, 12)) {
5338		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5339		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5340		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5341	}
 
 
 
 
5342}
5343
5344static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5345{
5346	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5347	struct i915_power_well *well;
5348
 
 
 
5349	gen9_disable_dc_states(dev_priv);
5350
5351	/* 1. Disable all display engine functions -> aready done */
5352
5353	/* 2. Disable DBUF */
5354	gen9_dbuf_disable(dev_priv);
5355
5356	/* 3. Disable CD clock */
5357	intel_cdclk_uninit_hw(dev_priv);
5358
5359	/*
5360	 * 4. Disable Power Well 1 (PG1).
5361	 *    The AUX IO power wells are toggled on demand, so they are already
5362	 *    disabled at this point.
5363	 */
5364	mutex_lock(&power_domains->lock);
5365	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5366	intel_power_well_disable(dev_priv, well);
5367	mutex_unlock(&power_domains->lock);
5368
5369	/* 5. */
5370	intel_combo_phy_uninit(dev_priv);
5371}
5372
5373static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5374{
5375	struct i915_power_well *cmn_bc =
5376		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5377	struct i915_power_well *cmn_d =
5378		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5379
5380	/*
5381	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5382	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5383	 * instead maintain a shadow copy ourselves. Use the actual
5384	 * power well state and lane status to reconstruct the
5385	 * expected initial value.
5386	 */
5387	dev_priv->chv_phy_control =
5388		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5389		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5390		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5391		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5392		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5393
5394	/*
5395	 * If all lanes are disabled we leave the override disabled
5396	 * with all power down bits cleared to match the state we
5397	 * would use after disabling the port. Otherwise enable the
5398	 * override and set the lane powerdown bits accding to the
5399	 * current lane status.
5400	 */
5401	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5402		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5403		unsigned int mask;
5404
5405		mask = status & DPLL_PORTB_READY_MASK;
5406		if (mask == 0xf)
5407			mask = 0x0;
5408		else
5409			dev_priv->chv_phy_control |=
5410				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5411
5412		dev_priv->chv_phy_control |=
5413			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5414
5415		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5416		if (mask == 0xf)
5417			mask = 0x0;
5418		else
5419			dev_priv->chv_phy_control |=
5420				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5421
5422		dev_priv->chv_phy_control |=
5423			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5424
5425		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5426
5427		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5428	} else {
5429		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5430	}
5431
5432	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5433		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5434		unsigned int mask;
5435
5436		mask = status & DPLL_PORTD_READY_MASK;
5437
5438		if (mask == 0xf)
5439			mask = 0x0;
5440		else
5441			dev_priv->chv_phy_control |=
5442				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5443
5444		dev_priv->chv_phy_control |=
5445			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5446
5447		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5448
5449		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5450	} else {
5451		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5452	}
5453
5454	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5455		    dev_priv->chv_phy_control);
5456
5457	/* Defer application of initial phy_control to enabling the powerwell */
5458}
5459
5460static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5461{
5462	struct i915_power_well *cmn =
5463		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5464	struct i915_power_well *disp2d =
5465		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5466
5467	/* If the display might be already active skip this */
5468	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5469	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5470	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5471		return;
5472
5473	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5474
5475	/* cmnlane needs DPLL registers */
5476	disp2d->desc->ops->enable(dev_priv, disp2d);
5477
5478	/*
5479	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5480	 * Need to assert and de-assert PHY SB reset by gating the
5481	 * common lane power, then un-gating it.
5482	 * Simply ungating isn't enough to reset the PHY enough to get
5483	 * ports and lanes running.
5484	 */
5485	cmn->desc->ops->disable(dev_priv, cmn);
5486}
5487
5488static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5489{
5490	bool ret;
5491
5492	vlv_punit_get(dev_priv);
5493	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5494	vlv_punit_put(dev_priv);
5495
5496	return ret;
5497}
5498
5499static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5500{
5501	drm_WARN(&dev_priv->drm,
5502		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5503		 "VED not power gated\n");
5504}
5505
5506static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5507{
5508	static const struct pci_device_id isp_ids[] = {
5509		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5510		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5511		{}
5512	};
5513
5514	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5515		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5516		 "ISP not power gated\n");
5517}
5518
5519static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5520
5521/**
5522 * intel_power_domains_init_hw - initialize hardware power domain state
5523 * @i915: i915 device instance
5524 * @resume: Called from resume code paths or not
5525 *
5526 * This function initializes the hardware power domain state and enables all
5527 * power wells belonging to the INIT power domain. Power wells in other
5528 * domains (and not in the INIT domain) are referenced or disabled by
5529 * intel_modeset_readout_hw_state(). After that the reference count of each
5530 * power well must match its HW enabled state, see
5531 * intel_power_domains_verify_state().
5532 *
5533 * It will return with power domains disabled (to be enabled later by
5534 * intel_power_domains_enable()) and must be paired with
5535 * intel_power_domains_driver_remove().
5536 */
5537void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5538{
5539	struct i915_power_domains *power_domains = &i915->power_domains;
5540
5541	power_domains->initializing = true;
5542
5543	if (INTEL_GEN(i915) >= 11) {
5544		icl_display_core_init(i915, resume);
5545	} else if (IS_CANNONLAKE(i915)) {
5546		cnl_display_core_init(i915, resume);
5547	} else if (IS_GEN9_BC(i915)) {
 
 
5548		skl_display_core_init(i915, resume);
5549	} else if (IS_GEN9_LP(i915)) {
5550		bxt_display_core_init(i915, resume);
5551	} else if (IS_CHERRYVIEW(i915)) {
5552		mutex_lock(&power_domains->lock);
5553		chv_phy_control_init(i915);
5554		mutex_unlock(&power_domains->lock);
5555		assert_isp_power_gated(i915);
5556	} else if (IS_VALLEYVIEW(i915)) {
5557		mutex_lock(&power_domains->lock);
5558		vlv_cmnlane_wa(i915);
5559		mutex_unlock(&power_domains->lock);
5560		assert_ved_power_gated(i915);
5561		assert_isp_power_gated(i915);
5562	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5563		hsw_assert_cdclk(i915);
5564		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5565	} else if (IS_IVYBRIDGE(i915)) {
5566		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5567	}
5568
5569	/*
5570	 * Keep all power wells enabled for any dependent HW access during
5571	 * initialization and to make sure we keep BIOS enabled display HW
5572	 * resources powered until display HW readout is complete. We drop
5573	 * this reference in intel_power_domains_enable().
5574	 */
5575	power_domains->wakeref =
 
5576		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5577
5578	/* Disable power support if the user asked so. */
5579	if (!i915->params.disable_power_well)
5580		intel_display_power_get(i915, POWER_DOMAIN_INIT);
 
 
 
5581	intel_power_domains_sync_hw(i915);
5582
5583	power_domains->initializing = false;
5584}
5585
5586/**
5587 * intel_power_domains_driver_remove - deinitialize hw power domain state
5588 * @i915: i915 device instance
5589 *
5590 * De-initializes the display power domain HW state. It also ensures that the
5591 * device stays powered up so that the driver can be reloaded.
5592 *
5593 * It must be called with power domains already disabled (after a call to
5594 * intel_power_domains_disable()) and must be paired with
5595 * intel_power_domains_init_hw().
5596 */
5597void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5598{
5599	intel_wakeref_t wakeref __maybe_unused =
5600		fetch_and_zero(&i915->power_domains.wakeref);
5601
5602	/* Remove the refcount we took to keep power well support disabled. */
5603	if (!i915->params.disable_power_well)
5604		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
 
5605
5606	intel_display_power_flush_work_sync(i915);
5607
5608	intel_power_domains_verify_state(i915);
5609
5610	/* Keep the power well enabled, but cancel its rpm wakeref. */
5611	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5612}
5613
5614/**
5615 * intel_power_domains_enable - enable toggling of display power wells
5616 * @i915: i915 device instance
5617 *
5618 * Enable the ondemand enabling/disabling of the display power wells. Note that
5619 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5620 * only at specific points of the display modeset sequence, thus they are not
5621 * affected by the intel_power_domains_enable()/disable() calls. The purpose
5622 * of these function is to keep the rest of power wells enabled until the end
5623 * of display HW readout (which will acquire the power references reflecting
5624 * the current HW state).
5625 */
5626void intel_power_domains_enable(struct drm_i915_private *i915)
5627{
5628	intel_wakeref_t wakeref __maybe_unused =
5629		fetch_and_zero(&i915->power_domains.wakeref);
5630
5631	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5632	intel_power_domains_verify_state(i915);
5633}
5634
5635/**
5636 * intel_power_domains_disable - disable toggling of display power wells
5637 * @i915: i915 device instance
5638 *
5639 * Disable the ondemand enabling/disabling of the display power wells. See
5640 * intel_power_domains_enable() for which power wells this call controls.
5641 */
5642void intel_power_domains_disable(struct drm_i915_private *i915)
5643{
5644	struct i915_power_domains *power_domains = &i915->power_domains;
5645
5646	drm_WARN_ON(&i915->drm, power_domains->wakeref);
5647	power_domains->wakeref =
5648		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5649
5650	intel_power_domains_verify_state(i915);
5651}
5652
5653/**
5654 * intel_power_domains_suspend - suspend power domain state
5655 * @i915: i915 device instance
5656 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5657 *
5658 * This function prepares the hardware power domain state before entering
5659 * system suspend.
5660 *
5661 * It must be called with power domains already disabled (after a call to
5662 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5663 */
5664void intel_power_domains_suspend(struct drm_i915_private *i915,
5665				 enum i915_drm_suspend_mode suspend_mode)
5666{
5667	struct i915_power_domains *power_domains = &i915->power_domains;
5668	intel_wakeref_t wakeref __maybe_unused =
5669		fetch_and_zero(&power_domains->wakeref);
5670
5671	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5672
5673	/*
5674	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5675	 * support don't manually deinit the power domains. This also means the
5676	 * CSR/DMC firmware will stay active, it will power down any HW
5677	 * resources as required and also enable deeper system power states
5678	 * that would be blocked if the firmware was inactive.
5679	 */
5680	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5681	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5682	    i915->csr.dmc_payload) {
5683		intel_display_power_flush_work(i915);
5684		intel_power_domains_verify_state(i915);
5685		return;
5686	}
5687
5688	/*
5689	 * Even if power well support was disabled we still want to disable
5690	 * power wells if power domains must be deinitialized for suspend.
5691	 */
5692	if (!i915->params.disable_power_well)
5693		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
 
5694
5695	intel_display_power_flush_work(i915);
5696	intel_power_domains_verify_state(i915);
5697
5698	if (INTEL_GEN(i915) >= 11)
5699		icl_display_core_uninit(i915);
5700	else if (IS_CANNONLAKE(i915))
5701		cnl_display_core_uninit(i915);
5702	else if (IS_GEN9_BC(i915))
 
 
5703		skl_display_core_uninit(i915);
5704	else if (IS_GEN9_LP(i915))
5705		bxt_display_core_uninit(i915);
5706
5707	power_domains->display_core_suspended = true;
5708}
5709
5710/**
5711 * intel_power_domains_resume - resume power domain state
5712 * @i915: i915 device instance
5713 *
5714 * This function resume the hardware power domain state during system resume.
5715 *
5716 * It will return with power domain support disabled (to be enabled later by
5717 * intel_power_domains_enable()) and must be paired with
5718 * intel_power_domains_suspend().
5719 */
5720void intel_power_domains_resume(struct drm_i915_private *i915)
5721{
5722	struct i915_power_domains *power_domains = &i915->power_domains;
5723
5724	if (power_domains->display_core_suspended) {
5725		intel_power_domains_init_hw(i915, true);
5726		power_domains->display_core_suspended = false;
5727	} else {
5728		drm_WARN_ON(&i915->drm, power_domains->wakeref);
5729		power_domains->wakeref =
5730			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5731	}
5732
5733	intel_power_domains_verify_state(i915);
5734}
5735
5736#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5737
5738static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5739{
5740	struct i915_power_domains *power_domains = &i915->power_domains;
5741	struct i915_power_well *power_well;
5742
5743	for_each_power_well(i915, power_well) {
5744		enum intel_display_power_domain domain;
5745
5746		drm_dbg(&i915->drm, "%-25s %d\n",
5747			power_well->desc->name, power_well->count);
5748
5749		for_each_power_domain(domain, power_well->desc->domains)
5750			drm_dbg(&i915->drm, "  %-23s %d\n",
5751				intel_display_power_domain_str(domain),
5752				power_domains->domain_use_count[domain]);
5753	}
5754}
5755
5756/**
5757 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5758 * @i915: i915 device instance
5759 *
5760 * Verify if the reference count of each power well matches its HW enabled
5761 * state and the total refcount of the domains it belongs to. This must be
5762 * called after modeset HW state sanitization, which is responsible for
5763 * acquiring reference counts for any power wells in use and disabling the
5764 * ones left on by BIOS but not required by any active output.
5765 */
5766static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5767{
5768	struct i915_power_domains *power_domains = &i915->power_domains;
5769	struct i915_power_well *power_well;
5770	bool dump_domain_info;
5771
5772	mutex_lock(&power_domains->lock);
5773
5774	verify_async_put_domains_state(power_domains);
5775
5776	dump_domain_info = false;
5777	for_each_power_well(i915, power_well) {
5778		enum intel_display_power_domain domain;
5779		int domains_count;
5780		bool enabled;
5781
5782		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5783		if ((power_well->count || power_well->desc->always_on) !=
5784		    enabled)
5785			drm_err(&i915->drm,
5786				"power well %s state mismatch (refcount %d/enabled %d)",
5787				power_well->desc->name,
5788				power_well->count, enabled);
5789
5790		domains_count = 0;
5791		for_each_power_domain(domain, power_well->desc->domains)
5792			domains_count += power_domains->domain_use_count[domain];
5793
5794		if (power_well->count != domains_count) {
5795			drm_err(&i915->drm,
5796				"power well %s refcount/domain refcount mismatch "
5797				"(refcount %d/domains refcount %d)\n",
5798				power_well->desc->name, power_well->count,
5799				domains_count);
5800			dump_domain_info = true;
5801		}
5802	}
5803
5804	if (dump_domain_info) {
5805		static bool dumped;
5806
5807		if (!dumped) {
5808			intel_power_domains_dump_info(i915);
5809			dumped = true;
5810		}
5811	}
5812
5813	mutex_unlock(&power_domains->lock);
5814}
5815
5816#else
5817
5818static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5819{
5820}
5821
5822#endif
5823
5824void intel_display_power_suspend_late(struct drm_i915_private *i915)
5825{
5826	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
 
5827		bxt_enable_dc9(i915);
5828	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5829		hsw_enable_pc8(i915);
 
 
 
 
 
5830}
5831
5832void intel_display_power_resume_early(struct drm_i915_private *i915)
5833{
5834	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
 
5835		gen9_sanitize_dc_state(i915);
5836		bxt_disable_dc9(i915);
5837	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5838		hsw_disable_pc8(i915);
5839	}
 
 
 
 
5840}
5841
5842void intel_display_power_suspend(struct drm_i915_private *i915)
5843{
5844	if (INTEL_GEN(i915) >= 11) {
5845		icl_display_core_uninit(i915);
5846		bxt_enable_dc9(i915);
5847	} else if (IS_GEN9_LP(i915)) {
5848		bxt_display_core_uninit(i915);
5849		bxt_enable_dc9(i915);
5850	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5851		hsw_enable_pc8(i915);
5852	}
5853}
5854
5855void intel_display_power_resume(struct drm_i915_private *i915)
5856{
5857	if (INTEL_GEN(i915) >= 11) {
5858		bxt_disable_dc9(i915);
5859		icl_display_core_init(i915, true);
5860		if (i915->csr.dmc_payload) {
5861			if (i915->csr.allowed_dc_mask &
5862			    DC_STATE_EN_UPTO_DC6)
5863				skl_enable_dc6(i915);
5864			else if (i915->csr.allowed_dc_mask &
5865				 DC_STATE_EN_UPTO_DC5)
5866				gen9_enable_dc5(i915);
5867		}
5868	} else if (IS_GEN9_LP(i915)) {
5869		bxt_disable_dc9(i915);
5870		bxt_display_core_init(i915, true);
5871		if (i915->csr.dmc_payload &&
5872		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5873			gen9_enable_dc5(i915);
5874	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5875		hsw_disable_pc8(i915);
5876	}
5877}