Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include "display/intel_crt.h"
   7
   8#include "i915_drv.h"
   9#include "i915_irq.h"
 
  10#include "intel_cdclk.h"
  11#include "intel_combo_phy.h"
  12#include "intel_display_power.h"
  13#include "intel_de.h"
 
 
 
  14#include "intel_display_types.h"
  15#include "intel_dmc.h"
  16#include "intel_dpio_phy.h"
  17#include "intel_hotplug.h"
  18#include "intel_pm.h"
  19#include "intel_pps.h"
  20#include "intel_sideband.h"
  21#include "intel_tc.h"
  22#include "intel_vga.h"
  23
  24bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  25					 enum i915_power_well_id power_well_id);
 
 
 
 
  26
  27const char *
  28intel_display_power_domain_str(enum intel_display_power_domain domain)
  29{
  30	switch (domain) {
  31	case POWER_DOMAIN_DISPLAY_CORE:
  32		return "DISPLAY_CORE";
  33	case POWER_DOMAIN_PIPE_A:
  34		return "PIPE_A";
  35	case POWER_DOMAIN_PIPE_B:
  36		return "PIPE_B";
  37	case POWER_DOMAIN_PIPE_C:
  38		return "PIPE_C";
  39	case POWER_DOMAIN_PIPE_D:
  40		return "PIPE_D";
  41	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  42		return "PIPE_A_PANEL_FITTER";
  43	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  44		return "PIPE_B_PANEL_FITTER";
  45	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  46		return "PIPE_C_PANEL_FITTER";
  47	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
  48		return "PIPE_D_PANEL_FITTER";
  49	case POWER_DOMAIN_TRANSCODER_A:
  50		return "TRANSCODER_A";
  51	case POWER_DOMAIN_TRANSCODER_B:
  52		return "TRANSCODER_B";
  53	case POWER_DOMAIN_TRANSCODER_C:
  54		return "TRANSCODER_C";
  55	case POWER_DOMAIN_TRANSCODER_D:
  56		return "TRANSCODER_D";
  57	case POWER_DOMAIN_TRANSCODER_EDP:
  58		return "TRANSCODER_EDP";
  59	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
  60		return "TRANSCODER_VDSC_PW2";
  61	case POWER_DOMAIN_TRANSCODER_DSI_A:
  62		return "TRANSCODER_DSI_A";
  63	case POWER_DOMAIN_TRANSCODER_DSI_C:
  64		return "TRANSCODER_DSI_C";
  65	case POWER_DOMAIN_PORT_DDI_A_LANES:
  66		return "PORT_DDI_A_LANES";
  67	case POWER_DOMAIN_PORT_DDI_B_LANES:
  68		return "PORT_DDI_B_LANES";
  69	case POWER_DOMAIN_PORT_DDI_C_LANES:
  70		return "PORT_DDI_C_LANES";
  71	case POWER_DOMAIN_PORT_DDI_D_LANES:
  72		return "PORT_DDI_D_LANES";
  73	case POWER_DOMAIN_PORT_DDI_E_LANES:
  74		return "PORT_DDI_E_LANES";
  75	case POWER_DOMAIN_PORT_DDI_F_LANES:
  76		return "PORT_DDI_F_LANES";
  77	case POWER_DOMAIN_PORT_DDI_G_LANES:
  78		return "PORT_DDI_G_LANES";
  79	case POWER_DOMAIN_PORT_DDI_H_LANES:
  80		return "PORT_DDI_H_LANES";
  81	case POWER_DOMAIN_PORT_DDI_I_LANES:
  82		return "PORT_DDI_I_LANES";
  83	case POWER_DOMAIN_PORT_DDI_A_IO:
  84		return "PORT_DDI_A_IO";
  85	case POWER_DOMAIN_PORT_DDI_B_IO:
  86		return "PORT_DDI_B_IO";
  87	case POWER_DOMAIN_PORT_DDI_C_IO:
  88		return "PORT_DDI_C_IO";
  89	case POWER_DOMAIN_PORT_DDI_D_IO:
  90		return "PORT_DDI_D_IO";
  91	case POWER_DOMAIN_PORT_DDI_E_IO:
  92		return "PORT_DDI_E_IO";
  93	case POWER_DOMAIN_PORT_DDI_F_IO:
  94		return "PORT_DDI_F_IO";
  95	case POWER_DOMAIN_PORT_DDI_G_IO:
  96		return "PORT_DDI_G_IO";
  97	case POWER_DOMAIN_PORT_DDI_H_IO:
  98		return "PORT_DDI_H_IO";
  99	case POWER_DOMAIN_PORT_DDI_I_IO:
 100		return "PORT_DDI_I_IO";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101	case POWER_DOMAIN_PORT_DSI:
 102		return "PORT_DSI";
 103	case POWER_DOMAIN_PORT_CRT:
 104		return "PORT_CRT";
 105	case POWER_DOMAIN_PORT_OTHER:
 106		return "PORT_OTHER";
 107	case POWER_DOMAIN_VGA:
 108		return "VGA";
 109	case POWER_DOMAIN_AUDIO:
 110		return "AUDIO";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111	case POWER_DOMAIN_AUX_A:
 112		return "AUX_A";
 113	case POWER_DOMAIN_AUX_B:
 114		return "AUX_B";
 115	case POWER_DOMAIN_AUX_C:
 116		return "AUX_C";
 117	case POWER_DOMAIN_AUX_D:
 118		return "AUX_D";
 119	case POWER_DOMAIN_AUX_E:
 120		return "AUX_E";
 121	case POWER_DOMAIN_AUX_F:
 122		return "AUX_F";
 123	case POWER_DOMAIN_AUX_G:
 124		return "AUX_G";
 125	case POWER_DOMAIN_AUX_H:
 126		return "AUX_H";
 127	case POWER_DOMAIN_AUX_I:
 128		return "AUX_I";
 129	case POWER_DOMAIN_AUX_IO_A:
 130		return "AUX_IO_A";
 131	case POWER_DOMAIN_AUX_C_TBT:
 132		return "AUX_C_TBT";
 133	case POWER_DOMAIN_AUX_D_TBT:
 134		return "AUX_D_TBT";
 135	case POWER_DOMAIN_AUX_E_TBT:
 136		return "AUX_E_TBT";
 137	case POWER_DOMAIN_AUX_F_TBT:
 138		return "AUX_F_TBT";
 139	case POWER_DOMAIN_AUX_G_TBT:
 140		return "AUX_G_TBT";
 141	case POWER_DOMAIN_AUX_H_TBT:
 142		return "AUX_H_TBT";
 143	case POWER_DOMAIN_AUX_I_TBT:
 144		return "AUX_I_TBT";
 
 
 145	case POWER_DOMAIN_GMBUS:
 146		return "GMBUS";
 147	case POWER_DOMAIN_INIT:
 148		return "INIT";
 149	case POWER_DOMAIN_MODESET:
 150		return "MODESET";
 151	case POWER_DOMAIN_GT_IRQ:
 152		return "GT_IRQ";
 153	case POWER_DOMAIN_DPLL_DC_OFF:
 154		return "DPLL_DC_OFF";
 155	case POWER_DOMAIN_TC_COLD_OFF:
 156		return "TC_COLD_OFF";
 157	default:
 158		MISSING_CASE(domain);
 159		return "?";
 160	}
 161}
 162
 163static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 164				    struct i915_power_well *power_well)
 165{
 166	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
 167	power_well->desc->ops->enable(dev_priv, power_well);
 168	power_well->hw_enabled = true;
 169}
 170
 171static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 172				     struct i915_power_well *power_well)
 173{
 174	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
 175	power_well->hw_enabled = false;
 176	power_well->desc->ops->disable(dev_priv, power_well);
 177}
 178
 179static void intel_power_well_get(struct drm_i915_private *dev_priv,
 180				 struct i915_power_well *power_well)
 181{
 182	if (!power_well->count++)
 183		intel_power_well_enable(dev_priv, power_well);
 184}
 185
 186static void intel_power_well_put(struct drm_i915_private *dev_priv,
 187				 struct i915_power_well *power_well)
 188{
 189	drm_WARN(&dev_priv->drm, !power_well->count,
 190		 "Use count on power well %s is already zero",
 191		 power_well->desc->name);
 192
 193	if (!--power_well->count)
 194		intel_power_well_disable(dev_priv, power_well);
 195}
 196
 197/**
 198 * __intel_display_power_is_enabled - unlocked check for a power domain
 199 * @dev_priv: i915 device instance
 200 * @domain: power domain to check
 201 *
 202 * This is the unlocked version of intel_display_power_is_enabled() and should
 203 * only be used from error capture and recovery code where deadlocks are
 204 * possible.
 205 *
 206 * Returns:
 207 * True when the power domain is enabled, false otherwise.
 208 */
 209bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 210				      enum intel_display_power_domain domain)
 211{
 212	struct i915_power_well *power_well;
 213	bool is_enabled;
 214
 215	if (dev_priv->runtime_pm.suspended)
 216		return false;
 217
 218	is_enabled = true;
 219
 220	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
 221		if (power_well->desc->always_on)
 222			continue;
 223
 224		if (!power_well->hw_enabled) {
 225			is_enabled = false;
 226			break;
 227		}
 228	}
 229
 230	return is_enabled;
 231}
 232
 233/**
 234 * intel_display_power_is_enabled - check for a power domain
 235 * @dev_priv: i915 device instance
 236 * @domain: power domain to check
 237 *
 238 * This function can be used to check the hw power domain state. It is mostly
 239 * used in hardware state readout functions. Everywhere else code should rely
 240 * upon explicit power domain reference counting to ensure that the hardware
 241 * block is powered up before accessing it.
 242 *
 243 * Callers must hold the relevant modesetting locks to ensure that concurrent
 244 * threads can't disable the power well while the caller tries to read a few
 245 * registers.
 246 *
 247 * Returns:
 248 * True when the power domain is enabled, false otherwise.
 249 */
 250bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 251				    enum intel_display_power_domain domain)
 252{
 253	struct i915_power_domains *power_domains;
 254	bool ret;
 255
 256	power_domains = &dev_priv->power_domains;
 257
 258	mutex_lock(&power_domains->lock);
 259	ret = __intel_display_power_is_enabled(dev_priv, domain);
 260	mutex_unlock(&power_domains->lock);
 261
 262	return ret;
 263}
 264
 265/*
 266 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 267 * when not needed anymore. We have 4 registers that can request the power well
 268 * to be enabled, and it will only be disabled if none of the registers is
 269 * requesting it to be enabled.
 270 */
 271static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
 272				       u8 irq_pipe_mask, bool has_vga)
 273{
 274	if (has_vga)
 275		intel_vga_reset_io_mem(dev_priv);
 276
 277	if (irq_pipe_mask)
 278		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
 279}
 280
 281static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 282				       u8 irq_pipe_mask)
 283{
 284	if (irq_pipe_mask)
 285		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 286}
 287
 288#define ICL_AUX_PW_TO_CH(pw_idx)	\
 289	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 290
 291#define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
 292	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
 293
 294static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
 295{
 296	int pw_idx = power_well->desc->hsw.idx;
 297
 298	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
 299						 ICL_AUX_PW_TO_CH(pw_idx);
 300}
 301
 302static struct intel_digital_port *
 303aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
 304		       enum aux_ch aux_ch)
 305{
 306	struct intel_digital_port *dig_port = NULL;
 307	struct intel_encoder *encoder;
 308
 309	for_each_intel_encoder(&dev_priv->drm, encoder) {
 310		/* We'll check the MST primary port */
 311		if (encoder->type == INTEL_OUTPUT_DP_MST)
 312			continue;
 313
 314		dig_port = enc_to_dig_port(encoder);
 315		if (!dig_port)
 316			continue;
 317
 318		if (dig_port->aux_ch != aux_ch) {
 319			dig_port = NULL;
 320			continue;
 321		}
 322
 323		break;
 324	}
 325
 326	return dig_port;
 327}
 328
 329static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
 330				  const struct i915_power_well *power_well)
 331{
 332	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
 333	struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
 334
 335	return intel_port_to_phy(i915, dig_port->base.port);
 336}
 337
 338static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 339					   struct i915_power_well *power_well,
 340					   bool timeout_expected)
 341{
 342	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 343	int pw_idx = power_well->desc->hsw.idx;
 344
 345	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 346	if (intel_de_wait_for_set(dev_priv, regs->driver,
 347				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
 348		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
 349			    power_well->desc->name);
 350
 351		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
 352
 353	}
 354}
 355
 356static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 357				     const struct i915_power_well_regs *regs,
 358				     int pw_idx)
 359{
 360	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
 361	u32 ret;
 362
 363	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
 364	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
 365	if (regs->kvmr.reg)
 366		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
 367	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
 368
 369	return ret;
 370}
 371
 372static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 373					    struct i915_power_well *power_well)
 374{
 375	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 376	int pw_idx = power_well->desc->hsw.idx;
 377	bool disabled;
 378	u32 reqs;
 379
 380	/*
 381	 * Bspec doesn't require waiting for PWs to get disabled, but still do
 382	 * this for paranoia. The known cases where a PW will be forced on:
 383	 * - a KVMR request on any power well via the KVMR request register
 384	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
 385	 *   DEBUG request registers
 386	 * Skip the wait in case any of the request bits are set and print a
 387	 * diagnostic message.
 388	 */
 389	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
 390			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
 391		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
 392	if (disabled)
 393		return;
 394
 395	drm_dbg_kms(&dev_priv->drm,
 396		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
 397		    power_well->desc->name,
 398		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 399}
 400
 401static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 402					   enum skl_power_gate pg)
 403{
 404	/* Timeout 5us for PG#0, for other PGs 1us */
 405	drm_WARN_ON(&dev_priv->drm,
 406		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
 407					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
 408}
 409
 410static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 411				  struct i915_power_well *power_well)
 412{
 413	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 414	int pw_idx = power_well->desc->hsw.idx;
 415	u32 val;
 416
 417	if (power_well->desc->hsw.has_fuses) {
 418		enum skl_power_gate pg;
 419
 420		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 421						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 422		/*
 423		 * For PW1 we have to wait both for the PW0/PG0 fuse state
 424		 * before enabling the power well and PW1/PG1's own fuse
 425		 * state after the enabling. For all other power wells with
 426		 * fuses we only have to wait for that PW/PG's fuse state
 427		 * after the enabling.
 428		 */
 429		if (pg == SKL_PG1)
 430			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 431	}
 432
 433	val = intel_de_read(dev_priv, regs->driver);
 434	intel_de_write(dev_priv, regs->driver,
 435		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 436
 437	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 438
 439	/* Display WA #1178: cnl */
 440	if (IS_CANNONLAKE(dev_priv) &&
 441	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
 442	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
 443		u32 val;
 444
 445		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
 446		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
 447		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
 448	}
 449
 450	if (power_well->desc->hsw.has_fuses) {
 451		enum skl_power_gate pg;
 452
 453		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 454						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 455		gen9_wait_for_power_well_fuses(dev_priv, pg);
 456	}
 457
 458	hsw_power_well_post_enable(dev_priv,
 459				   power_well->desc->hsw.irq_pipe_mask,
 460				   power_well->desc->hsw.has_vga);
 461}
 462
 463static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 464				   struct i915_power_well *power_well)
 465{
 466	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 467	int pw_idx = power_well->desc->hsw.idx;
 468	u32 val;
 469
 470	hsw_power_well_pre_disable(dev_priv,
 471				   power_well->desc->hsw.irq_pipe_mask);
 472
 473	val = intel_de_read(dev_priv, regs->driver);
 474	intel_de_write(dev_priv, regs->driver,
 475		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 476	hsw_wait_for_power_well_disable(dev_priv, power_well);
 477}
 478
 479static void
 480icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 481				    struct i915_power_well *power_well)
 482{
 483	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 484	int pw_idx = power_well->desc->hsw.idx;
 485	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 486	u32 val;
 487
 488	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 489
 490	val = intel_de_read(dev_priv, regs->driver);
 491	intel_de_write(dev_priv, regs->driver,
 492		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 493
 494	if (DISPLAY_VER(dev_priv) < 12) {
 495		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 496		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 497			       val | ICL_LANE_ENABLE_AUX);
 498	}
 499
 500	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 501
 502	/* Display WA #1178: icl */
 503	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 504	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
 505		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
 506		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
 507		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
 508	}
 509}
 510
 511static void
 512icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 513				     struct i915_power_well *power_well)
 514{
 515	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 516	int pw_idx = power_well->desc->hsw.idx;
 517	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 518	u32 val;
 519
 520	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 521
 522	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
 523	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
 524		       val & ~ICL_LANE_ENABLE_AUX);
 525
 526	val = intel_de_read(dev_priv, regs->driver);
 527	intel_de_write(dev_priv, regs->driver,
 528		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 529
 530	hsw_wait_for_power_well_disable(dev_priv, power_well);
 531}
 532
 533#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 534
 535static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
 536
 537static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
 538				      struct i915_power_well *power_well)
 539{
 540	int refs = hweight64(power_well->desc->domains &
 541			     async_put_domains_mask(&dev_priv->power_domains));
 542
 543	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
 544
 545	return refs;
 546}
 547
 548static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 549					struct i915_power_well *power_well,
 550					struct intel_digital_port *dig_port)
 551{
 552	/* Bypass the check if all references are released asynchronously */
 553	if (power_well_async_ref_count(dev_priv, power_well) ==
 554	    power_well->count)
 555		return;
 556
 557	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
 558		return;
 559
 560	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
 561		return;
 562
 563	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
 564}
 565
 566#else
 567
 568static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 569					struct i915_power_well *power_well,
 570					struct intel_digital_port *dig_port)
 571{
 572}
 573
 574#endif
 575
 576#define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
 577
 578static void icl_tc_cold_exit(struct drm_i915_private *i915)
 579{
 580	int ret, tries = 0;
 581
 582	while (1) {
 583		ret = sandybridge_pcode_write_timeout(i915,
 584						      ICL_PCODE_EXIT_TCCOLD,
 585						      0, 250, 1);
 586		if (ret != -EAGAIN || ++tries == 3)
 587			break;
 588		msleep(1);
 589	}
 590
 591	/* Spec states that TC cold exit can take up to 1ms to complete */
 592	if (!ret)
 593		msleep(1);
 594
 595	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
 596	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
 597		    "succeeded");
 598}
 599
 600static void
 601icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 602				 struct i915_power_well *power_well)
 603{
 604	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
 605	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 606	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 607	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
 608	bool timeout_expected;
 609	u32 val;
 610
 611	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 612
 613	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
 614	val &= ~DP_AUX_CH_CTL_TBT_IO;
 615	if (is_tbt)
 616		val |= DP_AUX_CH_CTL_TBT_IO;
 617	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
 618
 619	val = intel_de_read(dev_priv, regs->driver);
 620	intel_de_write(dev_priv, regs->driver,
 621		       val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
 622
 623	/*
 624	 * An AUX timeout is expected if the TBT DP tunnel is down,
 625	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
 626	 * exit sequence.
 627	 */
 628	timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
 629	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
 630		icl_tc_cold_exit(dev_priv);
 631
 632	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
 633
 634	if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
 635		enum tc_port tc_port;
 636
 637		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
 638		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
 639			       HIP_INDEX_VAL(tc_port, 0x2));
 640
 641		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
 642					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
 643			drm_warn(&dev_priv->drm,
 644				 "Timeout waiting TC uC health\n");
 645	}
 646}
 647
 648static void
 649icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 650				  struct i915_power_well *power_well)
 651{
 652	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
 653	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
 654
 655	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
 656
 657	hsw_power_well_disable(dev_priv, power_well);
 658}
 659
 660static void
 661icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
 662			  struct i915_power_well *power_well)
 663{
 664	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 665
 666	if (intel_phy_is_tc(dev_priv, phy))
 667		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
 668	else if (IS_ICELAKE(dev_priv))
 669		return icl_combo_phy_aux_power_well_enable(dev_priv,
 670							   power_well);
 671	else
 672		return hsw_power_well_enable(dev_priv, power_well);
 673}
 674
 675static void
 676icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
 677			   struct i915_power_well *power_well)
 678{
 679	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
 680
 681	if (intel_phy_is_tc(dev_priv, phy))
 682		return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
 683	else if (IS_ICELAKE(dev_priv))
 684		return icl_combo_phy_aux_power_well_disable(dev_priv,
 685							    power_well);
 686	else
 687		return hsw_power_well_disable(dev_priv, power_well);
 688}
 689
 690/*
 691 * We should only use the power well if we explicitly asked the hardware to
 692 * enable it, so check if it's enabled and also check if we've requested it to
 693 * be enabled.
 694 */
 695static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 696				   struct i915_power_well *power_well)
 697{
 698	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 699	enum i915_power_well_id id = power_well->desc->id;
 700	int pw_idx = power_well->desc->hsw.idx;
 701	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
 702		   HSW_PWR_WELL_CTL_STATE(pw_idx);
 703	u32 val;
 704
 705	val = intel_de_read(dev_priv, regs->driver);
 706
 707	/*
 708	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
 709	 * and the MISC_IO PW will be not restored, so check instead for the
 710	 * BIOS's own request bits, which are forced-on for these power wells
 711	 * when exiting DC5/6.
 712	 */
 713	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
 714	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
 715		val |= intel_de_read(dev_priv, regs->bios);
 716
 717	return (val & mask) == mask;
 718}
 719
 720static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 721{
 722	drm_WARN_ONCE(&dev_priv->drm,
 723		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
 724		      "DC9 already programmed to be enabled.\n");
 725	drm_WARN_ONCE(&dev_priv->drm,
 726		      intel_de_read(dev_priv, DC_STATE_EN) &
 727		      DC_STATE_EN_UPTO_DC5,
 728		      "DC5 still not disabled to enable DC9.\n");
 729	drm_WARN_ONCE(&dev_priv->drm,
 730		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
 731		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
 732		      "Power well 2 on.\n");
 733	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 734		      "Interrupts not disabled yet.\n");
 735
 736	 /*
 737	  * TODO: check for the following to verify the conditions to enter DC9
 738	  * state are satisfied:
 739	  * 1] Check relevant display engine registers to verify if mode set
 740	  * disable sequence was followed.
 741	  * 2] Check if display uninitialize sequence is initialized.
 742	  */
 743}
 744
 745static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 746{
 747	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
 748		      "Interrupts not disabled yet.\n");
 749	drm_WARN_ONCE(&dev_priv->drm,
 750		      intel_de_read(dev_priv, DC_STATE_EN) &
 751		      DC_STATE_EN_UPTO_DC5,
 752		      "DC5 still not disabled.\n");
 753
 754	 /*
 755	  * TODO: check for the following to verify DC9 state was indeed
 756	  * entered before programming to disable it:
 757	  * 1] Check relevant display engine registers to verify if mode
 758	  *  set disable sequence was followed.
 759	  * 2] Check if display uninitialize sequence is initialized.
 760	  */
 761}
 762
 763static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 764				u32 state)
 765{
 766	int rewrites = 0;
 767	int rereads = 0;
 768	u32 v;
 769
 770	intel_de_write(dev_priv, DC_STATE_EN, state);
 771
 772	/* It has been observed that disabling the dc6 state sometimes
 773	 * doesn't stick and dmc keeps returning old value. Make sure
 774	 * the write really sticks enough times and also force rewrite until
 775	 * we are confident that state is exactly what we want.
 776	 */
 777	do  {
 778		v = intel_de_read(dev_priv, DC_STATE_EN);
 779
 780		if (v != state) {
 781			intel_de_write(dev_priv, DC_STATE_EN, state);
 782			rewrites++;
 783			rereads = 0;
 784		} else if (rereads++ > 5) {
 785			break;
 786		}
 787
 788	} while (rewrites < 100);
 789
 790	if (v != state)
 791		drm_err(&dev_priv->drm,
 792			"Writing dc state to 0x%x failed, now 0x%x\n",
 793			state, v);
 794
 795	/* Most of the times we need one retry, avoid spam */
 796	if (rewrites > 1)
 797		drm_dbg_kms(&dev_priv->drm,
 798			    "Rewrote dc state to 0x%x %d times\n",
 799			    state, rewrites);
 800}
 801
 802static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 803{
 804	u32 mask;
 805
 806	mask = DC_STATE_EN_UPTO_DC5;
 807
 808	if (DISPLAY_VER(dev_priv) >= 12)
 809		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
 810					  | DC_STATE_EN_DC9;
 811	else if (DISPLAY_VER(dev_priv) == 11)
 812		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
 813	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 814		mask |= DC_STATE_EN_DC9;
 815	else
 816		mask |= DC_STATE_EN_UPTO_DC6;
 817
 818	return mask;
 819}
 820
 821static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 822{
 823	u32 val;
 824
 825	if (!HAS_DISPLAY(dev_priv))
 826		return;
 827
 828	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
 829
 830	drm_dbg_kms(&dev_priv->drm,
 831		    "Resetting DC state tracking from %02x to %02x\n",
 832		    dev_priv->dmc.dc_state, val);
 833	dev_priv->dmc.dc_state = val;
 834}
 835
 836/**
 837 * gen9_set_dc_state - set target display C power state
 838 * @dev_priv: i915 device instance
 839 * @state: target DC power state
 840 * - DC_STATE_DISABLE
 841 * - DC_STATE_EN_UPTO_DC5
 842 * - DC_STATE_EN_UPTO_DC6
 843 * - DC_STATE_EN_DC9
 844 *
 845 * Signal to DMC firmware/HW the target DC power state passed in @state.
 846 * DMC/HW can turn off individual display clocks and power rails when entering
 847 * a deeper DC power state (higher in number) and turns these back when exiting
 848 * that state to a shallower power state (lower in number). The HW will decide
 849 * when to actually enter a given state on an on-demand basis, for instance
 850 * depending on the active state of display pipes. The state of display
 851 * registers backed by affected power rails are saved/restored as needed.
 852 *
 853 * Based on the above enabling a deeper DC power state is asynchronous wrt.
 854 * enabling it. Disabling a deeper power state is synchronous: for instance
 855 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
 856 * back on and register state is restored. This is guaranteed by the MMIO write
 857 * to DC_STATE_EN blocking until the state is restored.
 858 */
 859static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 860{
 861	u32 val;
 862	u32 mask;
 863
 864	if (!HAS_DISPLAY(dev_priv))
 865		return;
 866
 867	if (drm_WARN_ON_ONCE(&dev_priv->drm,
 868			     state & ~dev_priv->dmc.allowed_dc_mask))
 869		state &= dev_priv->dmc.allowed_dc_mask;
 870
 871	val = intel_de_read(dev_priv, DC_STATE_EN);
 872	mask = gen9_dc_mask(dev_priv);
 873	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
 874		    val & mask, state);
 875
 876	/* Check if DMC is ignoring our DC state requests */
 877	if ((val & mask) != dev_priv->dmc.dc_state)
 878		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
 879			dev_priv->dmc.dc_state, val & mask);
 880
 881	val &= ~mask;
 882	val |= state;
 883
 884	gen9_write_dc_state(dev_priv, val);
 885
 886	dev_priv->dmc.dc_state = val & mask;
 887}
 888
 889static u32
 890sanitize_target_dc_state(struct drm_i915_private *dev_priv,
 891			 u32 target_dc_state)
 892{
 893	u32 states[] = {
 894		DC_STATE_EN_UPTO_DC6,
 895		DC_STATE_EN_UPTO_DC5,
 896		DC_STATE_EN_DC3CO,
 897		DC_STATE_DISABLE,
 898	};
 899	int i;
 900
 901	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
 902		if (target_dc_state != states[i])
 903			continue;
 904
 905		if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
 906			break;
 907
 908		target_dc_state = states[i + 1];
 909	}
 910
 911	return target_dc_state;
 912}
 913
 914static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
 915{
 916	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
 917	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 918}
 919
 920static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
 921{
 922	u32 val;
 923
 924	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
 925	val = intel_de_read(dev_priv, DC_STATE_EN);
 926	val &= ~DC_STATE_DC3CO_STATUS;
 927	intel_de_write(dev_priv, DC_STATE_EN, val);
 928	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 929	/*
 930	 * Delay of 200us DC3CO Exit time B.Spec 49196
 931	 */
 932	usleep_range(200, 210);
 933}
 934
 935static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 936{
 937	assert_can_enable_dc9(dev_priv);
 938
 939	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
 940	/*
 941	 * Power sequencer reset is not needed on
 942	 * platforms with South Display Engine on PCH,
 943	 * because PPS registers are always on.
 944	 */
 945	if (!HAS_PCH_SPLIT(dev_priv))
 946		intel_pps_reset_all(dev_priv);
 947	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 948}
 949
 950static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 951{
 952	assert_can_disable_dc9(dev_priv);
 953
 954	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
 955
 956	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 957
 958	intel_pps_unlock_regs_wa(dev_priv);
 959}
 960
 961static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
 962{
 963	drm_WARN_ONCE(&dev_priv->drm,
 964		      !intel_de_read(dev_priv, DMC_PROGRAM(0)),
 965		      "DMC program storage start is NULL\n");
 966	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
 967		      "DMC SSP Base Not fine\n");
 968	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
 969		      "DMC HTP Not fine\n");
 970}
 971
 972static struct i915_power_well *
 973lookup_power_well(struct drm_i915_private *dev_priv,
 974		  enum i915_power_well_id power_well_id)
 975{
 976	struct i915_power_well *power_well;
 977
 978	for_each_power_well(dev_priv, power_well)
 979		if (power_well->desc->id == power_well_id)
 980			return power_well;
 981
 982	/*
 983	 * It's not feasible to add error checking code to the callers since
 984	 * this condition really shouldn't happen and it doesn't even make sense
 985	 * to abort things like display initialization sequences. Just return
 986	 * the first power well and hope the WARN gets reported so we can fix
 987	 * our driver.
 988	 */
 989	drm_WARN(&dev_priv->drm, 1,
 990		 "Power well %d not defined for this platform\n",
 991		 power_well_id);
 992	return &dev_priv->power_domains.power_wells[0];
 993}
 994
 995/**
 996 * intel_display_power_set_target_dc_state - Set target dc state.
 997 * @dev_priv: i915 device
 998 * @state: state which needs to be set as target_dc_state.
 999 *
1000 * This function set the "DC off" power well target_dc_state,
1001 * based upon this target_dc_stste, "DC off" power well will
1002 * enable desired DC state.
1003 */
1004void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1005					     u32 state)
1006{
1007	struct i915_power_well *power_well;
1008	bool dc_off_enabled;
1009	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1010
1011	mutex_lock(&power_domains->lock);
1012	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1013
1014	if (drm_WARN_ON(&dev_priv->drm, !power_well))
1015		goto unlock;
1016
1017	state = sanitize_target_dc_state(dev_priv, state);
1018
1019	if (state == dev_priv->dmc.target_dc_state)
1020		goto unlock;
1021
1022	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1023							   power_well);
1024	/*
1025	 * If DC off power well is disabled, need to enable and disable the
1026	 * DC off power well to effect target DC state.
1027	 */
1028	if (!dc_off_enabled)
1029		power_well->desc->ops->enable(dev_priv, power_well);
1030
1031	dev_priv->dmc.target_dc_state = state;
1032
1033	if (!dc_off_enabled)
1034		power_well->desc->ops->disable(dev_priv, power_well);
1035
1036unlock:
1037	mutex_unlock(&power_domains->lock);
1038}
1039
1040static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1041{
1042	enum i915_power_well_id high_pg;
1043
1044	/* Power wells at this level and above must be disabled for DC5 entry */
1045	if (DISPLAY_VER(dev_priv) == 12)
1046		high_pg = ICL_DISP_PW_3;
1047	else
1048		high_pg = SKL_DISP_PW_2;
1049
1050	drm_WARN_ONCE(&dev_priv->drm,
1051		      intel_display_power_well_is_enabled(dev_priv, high_pg),
1052		      "Power wells above platform's DC5 limit still enabled.\n");
1053
1054	drm_WARN_ONCE(&dev_priv->drm,
1055		      (intel_de_read(dev_priv, DC_STATE_EN) &
1056		       DC_STATE_EN_UPTO_DC5),
1057		      "DC5 already programmed to be enabled.\n");
1058	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1059
1060	assert_dmc_loaded(dev_priv);
1061}
1062
1063static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1064{
1065	assert_can_enable_dc5(dev_priv);
1066
1067	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1068
1069	/* Wa Display #1183: skl,kbl,cfl */
1070	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1071		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1072			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1073
1074	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1075}
1076
1077static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1078{
1079	drm_WARN_ONCE(&dev_priv->drm,
1080		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1081		      "Backlight is not disabled.\n");
1082	drm_WARN_ONCE(&dev_priv->drm,
1083		      (intel_de_read(dev_priv, DC_STATE_EN) &
1084		       DC_STATE_EN_UPTO_DC6),
1085		      "DC6 already programmed to be enabled.\n");
1086
1087	assert_dmc_loaded(dev_priv);
1088}
1089
1090static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1091{
1092	assert_can_enable_dc6(dev_priv);
1093
1094	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1095
1096	/* Wa Display #1183: skl,kbl,cfl */
1097	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1098		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1099			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1100
1101	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1102}
1103
1104static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1105				   struct i915_power_well *power_well)
1106{
1107	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1108	int pw_idx = power_well->desc->hsw.idx;
1109	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1110	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1111
1112	/* Take over the request bit if set by BIOS. */
1113	if (bios_req & mask) {
1114		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1115
1116		if (!(drv_req & mask))
1117			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1118		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1119	}
1120}
1121
1122static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1123					   struct i915_power_well *power_well)
1124{
1125	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1126}
1127
1128static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1129					    struct i915_power_well *power_well)
1130{
1131	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1132}
1133
1134static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1135					    struct i915_power_well *power_well)
1136{
1137	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1138}
1139
1140static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1141{
1142	struct i915_power_well *power_well;
1143
1144	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1145	if (power_well->count > 0)
1146		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1147
1148	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1149	if (power_well->count > 0)
1150		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1151
1152	if (IS_GEMINILAKE(dev_priv)) {
1153		power_well = lookup_power_well(dev_priv,
1154					       GLK_DISP_PW_DPIO_CMN_C);
1155		if (power_well->count > 0)
1156			bxt_ddi_phy_verify_state(dev_priv,
1157						 power_well->desc->bxt.phy);
1158	}
1159}
1160
1161static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1162					   struct i915_power_well *power_well)
1163{
1164	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1165		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1166}
1167
1168static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1169{
1170	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1171	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1172
1173	drm_WARN(&dev_priv->drm,
1174		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1175		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1176		 hw_enabled_dbuf_slices,
1177		 enabled_dbuf_slices);
1178}
1179
1180static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1181{
1182	struct intel_cdclk_config cdclk_config = {};
1183
1184	if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
1185		tgl_disable_dc3co(dev_priv);
1186		return;
1187	}
1188
1189	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1190
1191	if (!HAS_DISPLAY(dev_priv))
1192		return;
1193
1194	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1195	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1196	drm_WARN_ON(&dev_priv->drm,
1197		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1198					      &cdclk_config));
1199
1200	gen9_assert_dbuf_enabled(dev_priv);
1201
1202	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1203		bxt_verify_ddi_phy_power_wells(dev_priv);
1204
1205	if (DISPLAY_VER(dev_priv) >= 11)
1206		/*
1207		 * DMC retains HW context only for port A, the other combo
1208		 * PHY's HW context for port B is lost after DC transitions,
1209		 * so we need to restore it manually.
1210		 */
1211		intel_combo_phy_init(dev_priv);
1212}
1213
1214static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1215					  struct i915_power_well *power_well)
1216{
1217	gen9_disable_dc_states(dev_priv);
1218}
1219
1220static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1221					   struct i915_power_well *power_well)
1222{
1223	if (!intel_dmc_has_payload(dev_priv))
1224		return;
1225
1226	switch (dev_priv->dmc.target_dc_state) {
1227	case DC_STATE_EN_DC3CO:
1228		tgl_enable_dc3co(dev_priv);
1229		break;
1230	case DC_STATE_EN_UPTO_DC6:
1231		skl_enable_dc6(dev_priv);
1232		break;
1233	case DC_STATE_EN_UPTO_DC5:
1234		gen9_enable_dc5(dev_priv);
1235		break;
1236	}
1237}
1238
1239static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1240					 struct i915_power_well *power_well)
1241{
1242}
1243
1244static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1245					   struct i915_power_well *power_well)
1246{
1247}
1248
1249static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1250					     struct i915_power_well *power_well)
1251{
1252	return true;
1253}
1254
1255static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1256					 struct i915_power_well *power_well)
1257{
1258	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1259		i830_enable_pipe(dev_priv, PIPE_A);
1260	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1261		i830_enable_pipe(dev_priv, PIPE_B);
1262}
1263
1264static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1265					  struct i915_power_well *power_well)
1266{
1267	i830_disable_pipe(dev_priv, PIPE_B);
1268	i830_disable_pipe(dev_priv, PIPE_A);
1269}
1270
1271static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1272					  struct i915_power_well *power_well)
1273{
1274	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1275		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1276}
1277
1278static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1279					  struct i915_power_well *power_well)
1280{
1281	if (power_well->count > 0)
1282		i830_pipes_power_well_enable(dev_priv, power_well);
1283	else
1284		i830_pipes_power_well_disable(dev_priv, power_well);
1285}
1286
1287static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1288			       struct i915_power_well *power_well, bool enable)
1289{
1290	int pw_idx = power_well->desc->vlv.idx;
1291	u32 mask;
1292	u32 state;
1293	u32 ctrl;
1294
1295	mask = PUNIT_PWRGT_MASK(pw_idx);
1296	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1297			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1298
1299	vlv_punit_get(dev_priv);
1300
1301#define COND \
1302	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1303
1304	if (COND)
1305		goto out;
1306
1307	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1308	ctrl &= ~mask;
1309	ctrl |= state;
1310	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1311
1312	if (wait_for(COND, 100))
1313		drm_err(&dev_priv->drm,
1314			"timeout setting power well state %08x (%08x)\n",
1315			state,
1316			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1317
1318#undef COND
1319
1320out:
1321	vlv_punit_put(dev_priv);
1322}
1323
1324static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1325				  struct i915_power_well *power_well)
1326{
1327	vlv_set_power_well(dev_priv, power_well, true);
1328}
1329
1330static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1331				   struct i915_power_well *power_well)
1332{
1333	vlv_set_power_well(dev_priv, power_well, false);
1334}
1335
1336static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1337				   struct i915_power_well *power_well)
1338{
1339	int pw_idx = power_well->desc->vlv.idx;
1340	bool enabled = false;
1341	u32 mask;
1342	u32 state;
1343	u32 ctrl;
1344
1345	mask = PUNIT_PWRGT_MASK(pw_idx);
1346	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1347
1348	vlv_punit_get(dev_priv);
1349
1350	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1351	/*
1352	 * We only ever set the power-on and power-gate states, anything
1353	 * else is unexpected.
1354	 */
1355	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1356		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1357	if (state == ctrl)
1358		enabled = true;
1359
1360	/*
1361	 * A transient state at this point would mean some unexpected party
1362	 * is poking at the power controls too.
1363	 */
1364	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1365	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1366
1367	vlv_punit_put(dev_priv);
1368
1369	return enabled;
1370}
1371
1372static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1373{
1374	u32 val;
1375
1376	/*
1377	 * On driver load, a pipe may be active and driving a DSI display.
1378	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1379	 * (and never recovering) in this case. intel_dsi_post_disable() will
1380	 * clear it when we turn off the display.
1381	 */
1382	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1383	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1384	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1385	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1386
1387	/*
1388	 * Disable trickle feed and enable pnd deadline calculation
1389	 */
1390	intel_de_write(dev_priv, MI_ARB_VLV,
1391		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1392	intel_de_write(dev_priv, CBR1_VLV, 0);
1393
1394	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1395	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1396		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1397					 1000));
1398}
1399
1400static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1401{
1402	struct intel_encoder *encoder;
1403	enum pipe pipe;
1404
1405	/*
1406	 * Enable the CRI clock source so we can get at the
1407	 * display and the reference clock for VGA
1408	 * hotplug / manual detection. Supposedly DSI also
1409	 * needs the ref clock up and running.
1410	 *
1411	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1412	 */
1413	for_each_pipe(dev_priv, pipe) {
1414		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1415
1416		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1417		if (pipe != PIPE_A)
1418			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1419
1420		intel_de_write(dev_priv, DPLL(pipe), val);
1421	}
1422
1423	vlv_init_display_clock_gating(dev_priv);
1424
1425	spin_lock_irq(&dev_priv->irq_lock);
1426	valleyview_enable_display_irqs(dev_priv);
1427	spin_unlock_irq(&dev_priv->irq_lock);
1428
1429	/*
1430	 * During driver initialization/resume we can avoid restoring the
1431	 * part of the HW/SW state that will be inited anyway explicitly.
1432	 */
1433	if (dev_priv->power_domains.initializing)
1434		return;
1435
1436	intel_hpd_init(dev_priv);
1437	intel_hpd_poll_disable(dev_priv);
1438
1439	/* Re-enable the ADPA, if we have one */
1440	for_each_intel_encoder(&dev_priv->drm, encoder) {
1441		if (encoder->type == INTEL_OUTPUT_ANALOG)
1442			intel_crt_reset(&encoder->base);
1443	}
1444
1445	intel_vga_redisable_power_on(dev_priv);
1446
1447	intel_pps_unlock_regs_wa(dev_priv);
1448}
1449
1450static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1451{
1452	spin_lock_irq(&dev_priv->irq_lock);
1453	valleyview_disable_display_irqs(dev_priv);
1454	spin_unlock_irq(&dev_priv->irq_lock);
1455
1456	/* make sure we're done processing display irqs */
1457	intel_synchronize_irq(dev_priv);
1458
1459	intel_pps_reset_all(dev_priv);
1460
1461	/* Prevent us from re-enabling polling on accident in late suspend */
1462	if (!dev_priv->drm.dev->power.is_suspended)
1463		intel_hpd_poll_enable(dev_priv);
1464}
1465
1466static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1467					  struct i915_power_well *power_well)
1468{
1469	vlv_set_power_well(dev_priv, power_well, true);
1470
1471	vlv_display_power_well_init(dev_priv);
1472}
1473
1474static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1475					   struct i915_power_well *power_well)
1476{
1477	vlv_display_power_well_deinit(dev_priv);
1478
1479	vlv_set_power_well(dev_priv, power_well, false);
1480}
1481
1482static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1483					   struct i915_power_well *power_well)
1484{
1485	/* since ref/cri clock was enabled */
1486	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1487
1488	vlv_set_power_well(dev_priv, power_well, true);
1489
1490	/*
1491	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1492	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1493	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1494	 *   b.	The other bits such as sfr settings / modesel may all
1495	 *	be set to 0.
1496	 *
1497	 * This should only be done on init and resume from S3 with
1498	 * both PLLs disabled, or we risk losing DPIO and PLL
1499	 * synchronization.
1500	 */
1501	intel_de_write(dev_priv, DPIO_CTL,
1502		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1503}
1504
1505static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1506					    struct i915_power_well *power_well)
1507{
1508	enum pipe pipe;
1509
1510	for_each_pipe(dev_priv, pipe)
1511		assert_pll_disabled(dev_priv, pipe);
1512
1513	/* Assert common reset */
1514	intel_de_write(dev_priv, DPIO_CTL,
1515		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1516
1517	vlv_set_power_well(dev_priv, power_well, false);
1518}
1519
1520#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1521
1522#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1523
1524static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1525{
1526	struct i915_power_well *cmn_bc =
1527		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1528	struct i915_power_well *cmn_d =
1529		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1530	u32 phy_control = dev_priv->chv_phy_control;
1531	u32 phy_status = 0;
1532	u32 phy_status_mask = 0xffffffff;
1533
1534	/*
1535	 * The BIOS can leave the PHY is some weird state
1536	 * where it doesn't fully power down some parts.
1537	 * Disable the asserts until the PHY has been fully
1538	 * reset (ie. the power well has been disabled at
1539	 * least once).
1540	 */
1541	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1542		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1543				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1544				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1545				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1546				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1547				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1548
1549	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1550		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1551				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1552				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1553
1554	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1555		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1556
1557		/* this assumes override is only used to enable lanes */
1558		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1559			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1560
1561		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1562			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1563
1564		/* CL1 is on whenever anything is on in either channel */
1565		if (BITS_SET(phy_control,
1566			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1567			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1568			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1569
1570		/*
1571		 * The DPLLB check accounts for the pipe B + port A usage
1572		 * with CL2 powered up but all the lanes in the second channel
1573		 * powered down.
1574		 */
1575		if (BITS_SET(phy_control,
1576			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1577		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1578			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1579
1580		if (BITS_SET(phy_control,
1581			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1582			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1583		if (BITS_SET(phy_control,
1584			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1585			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1586
1587		if (BITS_SET(phy_control,
1588			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1589			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1590		if (BITS_SET(phy_control,
1591			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1592			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1593	}
1594
1595	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1596		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1597
1598		/* this assumes override is only used to enable lanes */
1599		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1600			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1601
1602		if (BITS_SET(phy_control,
1603			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1604			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1605
1606		if (BITS_SET(phy_control,
1607			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1608			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1609		if (BITS_SET(phy_control,
1610			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1611			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1612	}
1613
1614	phy_status &= phy_status_mask;
1615
1616	/*
1617	 * The PHY may be busy with some initial calibration and whatnot,
1618	 * so the power state can take a while to actually change.
1619	 */
1620	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1621				       phy_status_mask, phy_status, 10))
1622		drm_err(&dev_priv->drm,
1623			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1624			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1625			phy_status, dev_priv->chv_phy_control);
1626}
1627
1628#undef BITS_SET
1629
1630static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1631					   struct i915_power_well *power_well)
1632{
1633	enum dpio_phy phy;
1634	enum pipe pipe;
1635	u32 tmp;
1636
1637	drm_WARN_ON_ONCE(&dev_priv->drm,
1638			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1639			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1640
1641	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1642		pipe = PIPE_A;
1643		phy = DPIO_PHY0;
1644	} else {
1645		pipe = PIPE_C;
1646		phy = DPIO_PHY1;
1647	}
1648
1649	/* since ref/cri clock was enabled */
1650	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1651	vlv_set_power_well(dev_priv, power_well, true);
1652
1653	/* Poll for phypwrgood signal */
1654	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1655				  PHY_POWERGOOD(phy), 1))
1656		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1657			phy);
1658
1659	vlv_dpio_get(dev_priv);
1660
1661	/* Enable dynamic power down */
1662	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1663	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1664		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1665	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1666
1667	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1668		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1669		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1670		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1671	} else {
1672		/*
1673		 * Force the non-existing CL2 off. BXT does this
1674		 * too, so maybe it saves some power even though
1675		 * CL2 doesn't exist?
1676		 */
1677		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1678		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1679		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1680	}
1681
1682	vlv_dpio_put(dev_priv);
1683
1684	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1685	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1686		       dev_priv->chv_phy_control);
1687
1688	drm_dbg_kms(&dev_priv->drm,
1689		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1690		    phy, dev_priv->chv_phy_control);
1691
1692	assert_chv_phy_status(dev_priv);
1693}
1694
1695static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1696					    struct i915_power_well *power_well)
1697{
1698	enum dpio_phy phy;
1699
1700	drm_WARN_ON_ONCE(&dev_priv->drm,
1701			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1702			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1703
1704	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1705		phy = DPIO_PHY0;
1706		assert_pll_disabled(dev_priv, PIPE_A);
1707		assert_pll_disabled(dev_priv, PIPE_B);
1708	} else {
1709		phy = DPIO_PHY1;
1710		assert_pll_disabled(dev_priv, PIPE_C);
1711	}
1712
1713	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1714	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1715		       dev_priv->chv_phy_control);
1716
1717	vlv_set_power_well(dev_priv, power_well, false);
1718
1719	drm_dbg_kms(&dev_priv->drm,
1720		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1721		    phy, dev_priv->chv_phy_control);
1722
1723	/* PHY is fully reset now, so we can enable the PHY state asserts */
1724	dev_priv->chv_phy_assert[phy] = true;
1725
1726	assert_chv_phy_status(dev_priv);
1727}
1728
1729static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1730				     enum dpio_channel ch, bool override, unsigned int mask)
1731{
1732	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1733	u32 reg, val, expected, actual;
1734
1735	/*
1736	 * The BIOS can leave the PHY is some weird state
1737	 * where it doesn't fully power down some parts.
1738	 * Disable the asserts until the PHY has been fully
1739	 * reset (ie. the power well has been disabled at
1740	 * least once).
1741	 */
1742	if (!dev_priv->chv_phy_assert[phy])
1743		return;
1744
1745	if (ch == DPIO_CH0)
1746		reg = _CHV_CMN_DW0_CH0;
1747	else
1748		reg = _CHV_CMN_DW6_CH1;
1749
1750	vlv_dpio_get(dev_priv);
1751	val = vlv_dpio_read(dev_priv, pipe, reg);
1752	vlv_dpio_put(dev_priv);
1753
1754	/*
1755	 * This assumes !override is only used when the port is disabled.
1756	 * All lanes should power down even without the override when
1757	 * the port is disabled.
1758	 */
1759	if (!override || mask == 0xf) {
1760		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1761		/*
1762		 * If CH1 common lane is not active anymore
1763		 * (eg. for pipe B DPLL) the entire channel will
1764		 * shut down, which causes the common lane registers
1765		 * to read as 0. That means we can't actually check
1766		 * the lane power down status bits, but as the entire
1767		 * register reads as 0 it's a good indication that the
1768		 * channel is indeed entirely powered down.
1769		 */
1770		if (ch == DPIO_CH1 && val == 0)
1771			expected = 0;
1772	} else if (mask != 0x0) {
1773		expected = DPIO_ANYDL_POWERDOWN;
1774	} else {
1775		expected = 0;
1776	}
1777
1778	if (ch == DPIO_CH0)
1779		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1780	else
1781		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1782	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1783
1784	drm_WARN(&dev_priv->drm, actual != expected,
1785		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1786		 !!(actual & DPIO_ALLDL_POWERDOWN),
1787		 !!(actual & DPIO_ANYDL_POWERDOWN),
1788		 !!(expected & DPIO_ALLDL_POWERDOWN),
1789		 !!(expected & DPIO_ANYDL_POWERDOWN),
1790		 reg, val);
1791}
1792
1793bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1794			  enum dpio_channel ch, bool override)
1795{
1796	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1797	bool was_override;
1798
1799	mutex_lock(&power_domains->lock);
1800
1801	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1802
1803	if (override == was_override)
1804		goto out;
1805
1806	if (override)
1807		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1808	else
1809		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1810
1811	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1812		       dev_priv->chv_phy_control);
1813
1814	drm_dbg_kms(&dev_priv->drm,
1815		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1816		    phy, ch, dev_priv->chv_phy_control);
1817
1818	assert_chv_phy_status(dev_priv);
1819
1820out:
1821	mutex_unlock(&power_domains->lock);
1822
1823	return was_override;
1824}
1825
1826void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1827			     bool override, unsigned int mask)
1828{
1829	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1830	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1831	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1832	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1833
1834	mutex_lock(&power_domains->lock);
1835
1836	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1837	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1838
1839	if (override)
1840		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1841	else
1842		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1843
1844	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1845		       dev_priv->chv_phy_control);
1846
1847	drm_dbg_kms(&dev_priv->drm,
1848		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1849		    phy, ch, mask, dev_priv->chv_phy_control);
1850
1851	assert_chv_phy_status(dev_priv);
1852
1853	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1854
1855	mutex_unlock(&power_domains->lock);
1856}
1857
1858static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1859					struct i915_power_well *power_well)
1860{
1861	enum pipe pipe = PIPE_A;
1862	bool enabled;
1863	u32 state, ctrl;
1864
1865	vlv_punit_get(dev_priv);
1866
1867	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1868	/*
1869	 * We only ever set the power-on and power-gate states, anything
1870	 * else is unexpected.
1871	 */
1872	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1873		    state != DP_SSS_PWR_GATE(pipe));
1874	enabled = state == DP_SSS_PWR_ON(pipe);
1875
1876	/*
1877	 * A transient state at this point would mean some unexpected party
1878	 * is poking at the power controls too.
1879	 */
1880	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1881	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1882
1883	vlv_punit_put(dev_priv);
1884
1885	return enabled;
1886}
1887
1888static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1889				    struct i915_power_well *power_well,
1890				    bool enable)
1891{
1892	enum pipe pipe = PIPE_A;
1893	u32 state;
1894	u32 ctrl;
1895
1896	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1897
1898	vlv_punit_get(dev_priv);
1899
1900#define COND \
1901	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1902
1903	if (COND)
1904		goto out;
1905
1906	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1907	ctrl &= ~DP_SSC_MASK(pipe);
1908	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1909	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1910
1911	if (wait_for(COND, 100))
1912		drm_err(&dev_priv->drm,
1913			"timeout setting power well state %08x (%08x)\n",
1914			state,
1915			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1916
1917#undef COND
1918
1919out:
1920	vlv_punit_put(dev_priv);
1921}
1922
1923static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1924					struct i915_power_well *power_well)
1925{
1926	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1927		       dev_priv->chv_phy_control);
1928}
1929
1930static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1931				       struct i915_power_well *power_well)
1932{
1933	chv_set_pipe_power_well(dev_priv, power_well, true);
1934
1935	vlv_display_power_well_init(dev_priv);
1936}
1937
1938static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1939					struct i915_power_well *power_well)
1940{
1941	vlv_display_power_well_deinit(dev_priv);
1942
1943	chv_set_pipe_power_well(dev_priv, power_well, false);
1944}
1945
1946static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1947{
1948	return power_domains->async_put_domains[0] |
1949	       power_domains->async_put_domains[1];
 
 
1950}
1951
1952#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1953
1954static bool
1955assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1956{
1957	struct drm_i915_private *i915 = container_of(power_domains,
1958						     struct drm_i915_private,
1959						     power_domains);
1960	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1961			    power_domains->async_put_domains[1]);
 
 
 
1962}
1963
1964static bool
1965__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1966{
1967	struct drm_i915_private *i915 = container_of(power_domains,
1968						     struct drm_i915_private,
1969						     power_domains);
 
1970	enum intel_display_power_domain domain;
1971	bool err = false;
1972
1973	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1974	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1975			   !!__async_put_domains_mask(power_domains));
 
 
1976
1977	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1978		err |= drm_WARN_ON(&i915->drm,
1979				   power_domains->domain_use_count[domain] != 1);
1980
1981	return !err;
1982}
1983
1984static void print_power_domains(struct i915_power_domains *power_domains,
1985				const char *prefix, u64 mask)
1986{
1987	struct drm_i915_private *i915 = container_of(power_domains,
1988						     struct drm_i915_private,
1989						     power_domains);
1990	enum intel_display_power_domain domain;
1991
1992	drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1993	for_each_power_domain(domain, mask)
1994		drm_dbg(&i915->drm, "%s use_count %d\n",
1995			intel_display_power_domain_str(domain),
1996			power_domains->domain_use_count[domain]);
1997}
1998
1999static void
2000print_async_put_domains_state(struct i915_power_domains *power_domains)
2001{
2002	struct drm_i915_private *i915 = container_of(power_domains,
2003						     struct drm_i915_private,
2004						     power_domains);
2005
2006	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2007		power_domains->async_put_wakeref);
2008
2009	print_power_domains(power_domains, "async_put_domains[0]",
2010			    power_domains->async_put_domains[0]);
2011	print_power_domains(power_domains, "async_put_domains[1]",
2012			    power_domains->async_put_domains[1]);
2013}
2014
2015static void
2016verify_async_put_domains_state(struct i915_power_domains *power_domains)
2017{
2018	if (!__async_put_domains_state_ok(power_domains))
2019		print_async_put_domains_state(power_domains);
2020}
2021
2022#else
2023
2024static void
2025assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2026{
2027}
2028
2029static void
2030verify_async_put_domains_state(struct i915_power_domains *power_domains)
2031{
2032}
2033
2034#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2035
2036static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
 
 
2037{
2038	assert_async_put_domain_masks_disjoint(power_domains);
2039
2040	return __async_put_domains_mask(power_domains);
2041}
2042
2043static void
2044async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2045			       enum intel_display_power_domain domain)
2046{
2047	assert_async_put_domain_masks_disjoint(power_domains);
2048
2049	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2050	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2051}
2052
2053static bool
2054intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2055				       enum intel_display_power_domain domain)
2056{
2057	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
2058	bool ret = false;
2059
2060	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
 
2061		goto out_verify;
2062
2063	async_put_domains_clear_domain(power_domains, domain);
2064
2065	ret = true;
2066
2067	if (async_put_domains_mask(power_domains))
 
2068		goto out_verify;
2069
2070	cancel_delayed_work(&power_domains->async_put_work);
2071	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2072				 fetch_and_zero(&power_domains->async_put_wakeref));
2073out_verify:
2074	verify_async_put_domains_state(power_domains);
2075
2076	return ret;
2077}
2078
2079static void
2080__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2081				 enum intel_display_power_domain domain)
2082{
2083	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2084	struct i915_power_well *power_well;
2085
2086	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2087		return;
2088
2089	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2090		intel_power_well_get(dev_priv, power_well);
2091
2092	power_domains->domain_use_count[domain]++;
2093}
2094
2095/**
2096 * intel_display_power_get - grab a power domain reference
2097 * @dev_priv: i915 device instance
2098 * @domain: power domain to reference
2099 *
2100 * This function grabs a power domain reference for @domain and ensures that the
2101 * power domain and all its parents are powered up. Therefore users should only
2102 * grab a reference to the innermost power domain they need.
2103 *
2104 * Any power domain reference obtained by this function must have a symmetric
2105 * call to intel_display_power_put() to release the reference again.
2106 */
2107intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2108					enum intel_display_power_domain domain)
2109{
2110	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2111	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2112
2113	mutex_lock(&power_domains->lock);
2114	__intel_display_power_get_domain(dev_priv, domain);
2115	mutex_unlock(&power_domains->lock);
2116
2117	return wakeref;
2118}
2119
2120/**
2121 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2122 * @dev_priv: i915 device instance
2123 * @domain: power domain to reference
2124 *
2125 * This function grabs a power domain reference for @domain and ensures that the
2126 * power domain and all its parents are powered up. Therefore users should only
2127 * grab a reference to the innermost power domain they need.
2128 *
2129 * Any power domain reference obtained by this function must have a symmetric
2130 * call to intel_display_power_put() to release the reference again.
2131 */
2132intel_wakeref_t
2133intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2134				   enum intel_display_power_domain domain)
2135{
2136	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2137	intel_wakeref_t wakeref;
2138	bool is_enabled;
2139
2140	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2141	if (!wakeref)
2142		return false;
2143
2144	mutex_lock(&power_domains->lock);
2145
2146	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2147		__intel_display_power_get_domain(dev_priv, domain);
2148		is_enabled = true;
2149	} else {
2150		is_enabled = false;
2151	}
2152
2153	mutex_unlock(&power_domains->lock);
2154
2155	if (!is_enabled) {
2156		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2157		wakeref = 0;
2158	}
2159
2160	return wakeref;
2161}
2162
2163static void
2164__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2165				 enum intel_display_power_domain domain)
2166{
2167	struct i915_power_domains *power_domains;
2168	struct i915_power_well *power_well;
2169	const char *name = intel_display_power_domain_str(domain);
 
2170
2171	power_domains = &dev_priv->power_domains;
2172
2173	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2174		 "Use count on domain %s is already zero\n",
2175		 name);
 
2176	drm_WARN(&dev_priv->drm,
2177		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2178		 "Async disabling of domain %s is pending\n",
2179		 name);
2180
2181	power_domains->domain_use_count[domain]--;
2182
2183	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2184		intel_power_well_put(dev_priv, power_well);
2185}
2186
2187static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2188				      enum intel_display_power_domain domain)
2189{
2190	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2191
2192	mutex_lock(&power_domains->lock);
2193	__intel_display_power_put_domain(dev_priv, domain);
2194	mutex_unlock(&power_domains->lock);
2195}
2196
2197static void
2198queue_async_put_domains_work(struct i915_power_domains *power_domains,
2199			     intel_wakeref_t wakeref)
2200{
2201	struct drm_i915_private *i915 = container_of(power_domains,
2202						     struct drm_i915_private,
2203						     power_domains);
2204	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2205	power_domains->async_put_wakeref = wakeref;
2206	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2207						    &power_domains->async_put_work,
2208						    msecs_to_jiffies(100)));
2209}
2210
2211static void
2212release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
 
2213{
2214	struct drm_i915_private *dev_priv =
2215		container_of(power_domains, struct drm_i915_private,
2216			     power_domains);
2217	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2218	enum intel_display_power_domain domain;
2219	intel_wakeref_t wakeref;
2220
2221	/*
2222	 * The caller must hold already raw wakeref, upgrade that to a proper
2223	 * wakeref to make the state checker happy about the HW access during
2224	 * power well disabling.
2225	 */
2226	assert_rpm_raw_wakeref_held(rpm);
2227	wakeref = intel_runtime_pm_get(rpm);
2228
2229	for_each_power_domain(domain, mask) {
2230		/* Clear before put, so put's sanity check is happy. */
2231		async_put_domains_clear_domain(power_domains, domain);
2232		__intel_display_power_put_domain(dev_priv, domain);
2233	}
2234
2235	intel_runtime_pm_put(rpm, wakeref);
2236}
2237
2238static void
2239intel_display_power_put_async_work(struct work_struct *work)
2240{
2241	struct drm_i915_private *dev_priv =
2242		container_of(work, struct drm_i915_private,
2243			     power_domains.async_put_work.work);
2244	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2245	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2246	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2247	intel_wakeref_t old_work_wakeref = 0;
2248
2249	mutex_lock(&power_domains->lock);
2250
2251	/*
2252	 * Bail out if all the domain refs pending to be released were grabbed
2253	 * by subsequent gets or a flush_work.
2254	 */
2255	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2256	if (!old_work_wakeref)
2257		goto out_verify;
2258
2259	release_async_put_domains(power_domains,
2260				  power_domains->async_put_domains[0]);
2261
2262	/* Requeue the work if more domains were async put meanwhile. */
2263	if (power_domains->async_put_domains[1]) {
2264		power_domains->async_put_domains[0] =
2265			fetch_and_zero(&power_domains->async_put_domains[1]);
 
 
 
2266		queue_async_put_domains_work(power_domains,
2267					     fetch_and_zero(&new_work_wakeref));
2268	} else {
2269		/*
2270		 * Cancel the work that got queued after this one got dequeued,
2271		 * since here we released the corresponding async-put reference.
2272		 */
2273		cancel_delayed_work(&power_domains->async_put_work);
2274	}
2275
2276out_verify:
2277	verify_async_put_domains_state(power_domains);
2278
2279	mutex_unlock(&power_domains->lock);
2280
2281	if (old_work_wakeref)
2282		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2283	if (new_work_wakeref)
2284		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2285}
2286
2287/**
2288 * intel_display_power_put_async - release a power domain reference asynchronously
2289 * @i915: i915 device instance
2290 * @domain: power domain to reference
2291 * @wakeref: wakeref acquired for the reference that is being released
2292 *
2293 * This function drops the power domain reference obtained by
2294 * intel_display_power_get*() and schedules a work to power down the
2295 * corresponding hardware block if this is the last reference.
2296 */
2297void __intel_display_power_put_async(struct drm_i915_private *i915,
2298				     enum intel_display_power_domain domain,
2299				     intel_wakeref_t wakeref)
2300{
2301	struct i915_power_domains *power_domains = &i915->power_domains;
2302	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2303	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2304
2305	mutex_lock(&power_domains->lock);
2306
2307	if (power_domains->domain_use_count[domain] > 1) {
2308		__intel_display_power_put_domain(i915, domain);
2309
2310		goto out_verify;
2311	}
2312
2313	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2314
2315	/* Let a pending work requeue itself or queue a new one. */
2316	if (power_domains->async_put_wakeref) {
2317		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2318	} else {
2319		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2320		queue_async_put_domains_work(power_domains,
2321					     fetch_and_zero(&work_wakeref));
2322	}
2323
2324out_verify:
2325	verify_async_put_domains_state(power_domains);
2326
2327	mutex_unlock(&power_domains->lock);
2328
2329	if (work_wakeref)
2330		intel_runtime_pm_put_raw(rpm, work_wakeref);
2331
2332	intel_runtime_pm_put(rpm, wakeref);
2333}
2334
2335/**
2336 * intel_display_power_flush_work - flushes the async display power disabling work
2337 * @i915: i915 device instance
2338 *
2339 * Flushes any pending work that was scheduled by a preceding
2340 * intel_display_power_put_async() call, completing the disabling of the
2341 * corresponding power domains.
2342 *
2343 * Note that the work handler function may still be running after this
2344 * function returns; to ensure that the work handler isn't running use
2345 * intel_display_power_flush_work_sync() instead.
2346 */
2347void intel_display_power_flush_work(struct drm_i915_private *i915)
2348{
2349	struct i915_power_domains *power_domains = &i915->power_domains;
 
2350	intel_wakeref_t work_wakeref;
2351
2352	mutex_lock(&power_domains->lock);
2353
2354	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2355	if (!work_wakeref)
2356		goto out_verify;
2357
2358	release_async_put_domains(power_domains,
2359				  async_put_domains_mask(power_domains));
2360	cancel_delayed_work(&power_domains->async_put_work);
2361
2362out_verify:
2363	verify_async_put_domains_state(power_domains);
2364
2365	mutex_unlock(&power_domains->lock);
2366
2367	if (work_wakeref)
2368		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2369}
2370
2371/**
2372 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2373 * @i915: i915 device instance
2374 *
2375 * Like intel_display_power_flush_work(), but also ensure that the work
2376 * handler function is not running any more when this function returns.
2377 */
2378static void
2379intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2380{
2381	struct i915_power_domains *power_domains = &i915->power_domains;
2382
2383	intel_display_power_flush_work(i915);
2384	cancel_delayed_work_sync(&power_domains->async_put_work);
2385
2386	verify_async_put_domains_state(power_domains);
2387
2388	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2389}
2390
2391#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2392/**
2393 * intel_display_power_put - release a power domain reference
2394 * @dev_priv: i915 device instance
2395 * @domain: power domain to reference
2396 * @wakeref: wakeref acquired for the reference that is being released
2397 *
2398 * This function drops the power domain reference obtained by
2399 * intel_display_power_get() and might power down the corresponding hardware
2400 * block right away if this is the last reference.
2401 */
2402void intel_display_power_put(struct drm_i915_private *dev_priv,
2403			     enum intel_display_power_domain domain,
2404			     intel_wakeref_t wakeref)
2405{
2406	__intel_display_power_put(dev_priv, domain);
2407	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2408}
2409#else
2410/**
2411 * intel_display_power_put_unchecked - release an unchecked power domain reference
2412 * @dev_priv: i915 device instance
2413 * @domain: power domain to reference
2414 *
2415 * This function drops the power domain reference obtained by
2416 * intel_display_power_get() and might power down the corresponding hardware
2417 * block right away if this is the last reference.
2418 *
2419 * This function is only for the power domain code's internal use to suppress wakeref
2420 * tracking when the correspondig debug kconfig option is disabled, should not
2421 * be used otherwise.
2422 */
2423void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2424				       enum intel_display_power_domain domain)
2425{
2426	__intel_display_power_put(dev_priv, domain);
2427	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2428}
2429#endif
2430
2431void
2432intel_display_power_get_in_set(struct drm_i915_private *i915,
2433			       struct intel_display_power_domain_set *power_domain_set,
2434			       enum intel_display_power_domain domain)
2435{
2436	intel_wakeref_t __maybe_unused wf;
2437
2438	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2439
2440	wf = intel_display_power_get(i915, domain);
2441#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2442	power_domain_set->wakerefs[domain] = wf;
2443#endif
2444	power_domain_set->mask |= BIT_ULL(domain);
2445}
2446
2447bool
2448intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
2449					  struct intel_display_power_domain_set *power_domain_set,
2450					  enum intel_display_power_domain domain)
2451{
2452	intel_wakeref_t wf;
2453
2454	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2455
2456	wf = intel_display_power_get_if_enabled(i915, domain);
2457	if (!wf)
2458		return false;
2459
2460#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2461	power_domain_set->wakerefs[domain] = wf;
2462#endif
2463	power_domain_set->mask |= BIT_ULL(domain);
2464
2465	return true;
2466}
2467
2468void
2469intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
2470				    struct intel_display_power_domain_set *power_domain_set,
2471				    u64 mask)
2472{
2473	enum intel_display_power_domain domain;
2474
2475	drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
 
2476
2477	for_each_power_domain(domain, mask) {
2478		intel_wakeref_t __maybe_unused wf = -1;
2479
2480#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2481		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
2482#endif
2483		intel_display_power_put(i915, domain, wf);
2484		power_domain_set->mask &= ~BIT_ULL(domain);
2485	}
2486}
2487
2488#define I830_PIPES_POWER_DOMAINS (		\
2489	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2490	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2491	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2492	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2493	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2494	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2495	BIT_ULL(POWER_DOMAIN_INIT))
2496
2497#define VLV_DISPLAY_POWER_DOMAINS (		\
2498	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2499	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2500	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2501	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2502	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2503	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2504	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2505	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2506	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2507	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2508	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2509	BIT_ULL(POWER_DOMAIN_VGA) |			\
2510	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2511	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2512	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2513	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2514	BIT_ULL(POWER_DOMAIN_INIT))
2515
2516#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2517	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2518	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2519	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2520	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2521	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2522	BIT_ULL(POWER_DOMAIN_INIT))
2523
2524#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2525	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2526	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2527	BIT_ULL(POWER_DOMAIN_INIT))
2528
2529#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2530	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2531	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2532	BIT_ULL(POWER_DOMAIN_INIT))
2533
2534#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2535	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2536	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2537	BIT_ULL(POWER_DOMAIN_INIT))
2538
2539#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2540	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2541	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2542	BIT_ULL(POWER_DOMAIN_INIT))
2543
2544#define CHV_DISPLAY_POWER_DOMAINS (		\
2545	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2546	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2547	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2548	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2549	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2550	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2551	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2552	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2553	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2554	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2555	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2556	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2557	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2558	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2559	BIT_ULL(POWER_DOMAIN_VGA) |			\
2560	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2561	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2562	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2563	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2564	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2565	BIT_ULL(POWER_DOMAIN_INIT))
2566
2567#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2568	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2569	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2570	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2571	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2572	BIT_ULL(POWER_DOMAIN_INIT))
2573
2574#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2575	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2576	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2577	BIT_ULL(POWER_DOMAIN_INIT))
2578
2579#define HSW_DISPLAY_POWER_DOMAINS (			\
2580	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2581	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2582	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2583	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2584	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2585	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2586	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2587	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2588	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2589	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2590	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2591	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2592	BIT_ULL(POWER_DOMAIN_VGA) |				\
2593	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2594	BIT_ULL(POWER_DOMAIN_INIT))
2595
2596#define BDW_DISPLAY_POWER_DOMAINS (			\
2597	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2598	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2599	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2600	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2601	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2602	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2603	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2604	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2605	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2606	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2607	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2608	BIT_ULL(POWER_DOMAIN_VGA) |				\
2609	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2610	BIT_ULL(POWER_DOMAIN_INIT))
2611
2612#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2613	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2614	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2615	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2616	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2617	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2618	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2619	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2620	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2621	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2622	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2623	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2624	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2625	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2626	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2627	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2628	BIT_ULL(POWER_DOMAIN_VGA) |				\
2629	BIT_ULL(POWER_DOMAIN_INIT))
2630#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2631	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2632	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2633	BIT_ULL(POWER_DOMAIN_INIT))
2634#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2635	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2636	BIT_ULL(POWER_DOMAIN_INIT))
2637#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2638	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2639	BIT_ULL(POWER_DOMAIN_INIT))
2640#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2641	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2642	BIT_ULL(POWER_DOMAIN_INIT))
2643#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2644	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2645	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2646	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2647	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2648	BIT_ULL(POWER_DOMAIN_INIT))
2649
2650#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2651	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2652	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2653	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2654	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2655	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2656	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2657	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2658	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2659	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2660	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2661	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2662	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2663	BIT_ULL(POWER_DOMAIN_VGA) |				\
2664	BIT_ULL(POWER_DOMAIN_INIT))
2665#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2666	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2667	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2668	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2669	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2670	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2671	BIT_ULL(POWER_DOMAIN_INIT))
2672#define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2673	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2674	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2675	BIT_ULL(POWER_DOMAIN_INIT))
2676#define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2677	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2678	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2679	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2680	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2681	BIT_ULL(POWER_DOMAIN_INIT))
2682
2683#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2684	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2685	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2686	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2687	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2688	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2689	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2690	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2691	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2692	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2693	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2694	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2695	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2696	BIT_ULL(POWER_DOMAIN_VGA) |				\
2697	BIT_ULL(POWER_DOMAIN_INIT))
2698#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2699	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2700#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2701	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2702#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2703	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2704#define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2705	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2706	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2707	BIT_ULL(POWER_DOMAIN_INIT))
2708#define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2709	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2710	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2711	BIT_ULL(POWER_DOMAIN_INIT))
2712#define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2713	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2714	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2715	BIT_ULL(POWER_DOMAIN_INIT))
2716#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2717	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2718	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2719	BIT_ULL(POWER_DOMAIN_INIT))
2720#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2721	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2722	BIT_ULL(POWER_DOMAIN_INIT))
2723#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2724	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2725	BIT_ULL(POWER_DOMAIN_INIT))
2726#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2727	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2728	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2729	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2730	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2731	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2732	BIT_ULL(POWER_DOMAIN_INIT))
2733
2734#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2735	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2736	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2737	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2738	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2739	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2740	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2741	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2742	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2743	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2744	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2745	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2746	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2747	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2748	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2749	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2750	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2751	BIT_ULL(POWER_DOMAIN_VGA) |				\
2752	BIT_ULL(POWER_DOMAIN_INIT))
2753#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2754	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2755	BIT_ULL(POWER_DOMAIN_INIT))
2756#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2757	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2758	BIT_ULL(POWER_DOMAIN_INIT))
2759#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2760	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2761	BIT_ULL(POWER_DOMAIN_INIT))
2762#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2763	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2764	BIT_ULL(POWER_DOMAIN_INIT))
2765#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2766	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2767	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2768	BIT_ULL(POWER_DOMAIN_INIT))
2769#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2770	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2771	BIT_ULL(POWER_DOMAIN_INIT))
2772#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2773	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2774	BIT_ULL(POWER_DOMAIN_INIT))
2775#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2776	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2777	BIT_ULL(POWER_DOMAIN_INIT))
2778#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2779	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2780	BIT_ULL(POWER_DOMAIN_INIT))
2781#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2782	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2783	BIT_ULL(POWER_DOMAIN_INIT))
2784#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2785	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2786	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2787	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2788	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2789	BIT_ULL(POWER_DOMAIN_INIT))
2790
2791/*
2792 * ICL PW_0/PG_0 domains (HW/DMC control):
2793 * - PCI
2794 * - clocks except port PLL
2795 * - central power except FBC
2796 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2797 * ICL PW_1/PG_1 domains (HW/DMC control):
2798 * - DBUF function
2799 * - PIPE_A and its planes, except VGA
2800 * - transcoder EDP + PSR
2801 * - transcoder DSI
2802 * - DDI_A
2803 * - FBC
2804 */
2805#define ICL_PW_4_POWER_DOMAINS (			\
2806	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2807	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2808	BIT_ULL(POWER_DOMAIN_INIT))
2809	/* VDSC/joining */
2810#define ICL_PW_3_POWER_DOMAINS (			\
2811	ICL_PW_4_POWER_DOMAINS |			\
2812	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2813	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2814	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2815	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2816	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2817	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2818	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2819	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2820	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2821	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2822	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2823	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2824	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2825	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2826	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2827	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2828	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2829	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2830	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2831	BIT_ULL(POWER_DOMAIN_VGA) |			\
2832	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2833	BIT_ULL(POWER_DOMAIN_INIT))
2834	/*
2835	 * - transcoder WD
2836	 * - KVMR (HW control)
2837	 */
2838#define ICL_PW_2_POWER_DOMAINS (			\
2839	ICL_PW_3_POWER_DOMAINS |			\
2840	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2841	BIT_ULL(POWER_DOMAIN_INIT))
2842	/*
2843	 * - KVMR (HW control)
2844	 */
2845#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2846	ICL_PW_2_POWER_DOMAINS |			\
2847	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2848	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2849	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2850	BIT_ULL(POWER_DOMAIN_INIT))
2851
2852#define ICL_DDI_IO_A_POWER_DOMAINS (			\
2853	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2854#define ICL_DDI_IO_B_POWER_DOMAINS (			\
2855	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2856#define ICL_DDI_IO_C_POWER_DOMAINS (			\
2857	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2858#define ICL_DDI_IO_D_POWER_DOMAINS (			\
2859	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2860#define ICL_DDI_IO_E_POWER_DOMAINS (			\
2861	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2862#define ICL_DDI_IO_F_POWER_DOMAINS (			\
2863	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2864
2865#define ICL_AUX_A_IO_POWER_DOMAINS (			\
2866	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2867	BIT_ULL(POWER_DOMAIN_AUX_A))
2868#define ICL_AUX_B_IO_POWER_DOMAINS (			\
2869	BIT_ULL(POWER_DOMAIN_AUX_B))
2870#define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2871	BIT_ULL(POWER_DOMAIN_AUX_C))
2872#define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2873	BIT_ULL(POWER_DOMAIN_AUX_D))
2874#define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2875	BIT_ULL(POWER_DOMAIN_AUX_E))
2876#define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2877	BIT_ULL(POWER_DOMAIN_AUX_F))
2878#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2879	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2880#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2881	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2882#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2883	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2884#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2885	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2886
2887#define TGL_PW_5_POWER_DOMAINS (			\
2888	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2889	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2890	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2891	BIT_ULL(POWER_DOMAIN_INIT))
2892
2893#define TGL_PW_4_POWER_DOMAINS (			\
2894	TGL_PW_5_POWER_DOMAINS |			\
2895	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2896	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2897	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2898	BIT_ULL(POWER_DOMAIN_INIT))
2899
2900#define TGL_PW_3_POWER_DOMAINS (			\
2901	TGL_PW_4_POWER_DOMAINS |			\
2902	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2903	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2904	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2905	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2906	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2907	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
2908	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
2909	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) |	\
2910	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) |	\
2911	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2912	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
2913	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |		\
2914	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |		\
2915	BIT_ULL(POWER_DOMAIN_AUX_USBC5) |		\
2916	BIT_ULL(POWER_DOMAIN_AUX_USBC6) |		\
2917	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2918	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2919	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2920	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2921	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |		\
2922	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |		\
2923	BIT_ULL(POWER_DOMAIN_VGA) |			\
2924	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2925	BIT_ULL(POWER_DOMAIN_INIT))
2926
2927#define TGL_PW_2_POWER_DOMAINS (			\
2928	TGL_PW_3_POWER_DOMAINS |			\
2929	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2930	BIT_ULL(POWER_DOMAIN_INIT))
2931
2932#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2933	TGL_PW_3_POWER_DOMAINS |			\
2934	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2935	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2936	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2937	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2938	BIT_ULL(POWER_DOMAIN_INIT))
2939
2940#define TGL_DDI_IO_TC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
2941#define TGL_DDI_IO_TC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
2942#define TGL_DDI_IO_TC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
2943#define TGL_DDI_IO_TC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
2944#define TGL_DDI_IO_TC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
2945#define TGL_DDI_IO_TC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
2946
2947#define TGL_AUX_A_IO_POWER_DOMAINS (		\
2948	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2949	BIT_ULL(POWER_DOMAIN_AUX_A))
2950#define TGL_AUX_B_IO_POWER_DOMAINS (		\
2951	BIT_ULL(POWER_DOMAIN_AUX_B))
2952#define TGL_AUX_C_IO_POWER_DOMAINS (		\
2953	BIT_ULL(POWER_DOMAIN_AUX_C))
2954
2955#define TGL_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
2956#define TGL_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
2957#define TGL_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
2958#define TGL_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
2959#define TGL_AUX_IO_USBC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC5)
2960#define TGL_AUX_IO_USBC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC6)
2961
2962#define TGL_AUX_IO_TBT1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT1)
2963#define TGL_AUX_IO_TBT2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT2)
2964#define TGL_AUX_IO_TBT3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT3)
2965#define TGL_AUX_IO_TBT4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT4)
2966#define TGL_AUX_IO_TBT5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT5)
2967#define TGL_AUX_IO_TBT6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT6)
2968
2969#define TGL_TC_COLD_OFF_POWER_DOMAINS (		\
2970	BIT_ULL(POWER_DOMAIN_AUX_USBC1)	|	\
2971	BIT_ULL(POWER_DOMAIN_AUX_USBC2)	|	\
2972	BIT_ULL(POWER_DOMAIN_AUX_USBC3)	|	\
2973	BIT_ULL(POWER_DOMAIN_AUX_USBC4)	|	\
2974	BIT_ULL(POWER_DOMAIN_AUX_USBC5)	|	\
2975	BIT_ULL(POWER_DOMAIN_AUX_USBC6)	|	\
2976	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |	\
2977	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |	\
2978	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |	\
2979	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |	\
2980	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |	\
2981	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |	\
2982	BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2983
2984#define RKL_PW_4_POWER_DOMAINS (			\
2985	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2986	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2987	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2988	BIT_ULL(POWER_DOMAIN_INIT))
2989
2990#define RKL_PW_3_POWER_DOMAINS (			\
2991	RKL_PW_4_POWER_DOMAINS |			\
2992	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2993	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2994	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2995	BIT_ULL(POWER_DOMAIN_VGA) |			\
2996	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2997	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2998	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2999	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
3000	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
3001	BIT_ULL(POWER_DOMAIN_INIT))
3002
3003/*
3004 * There is no PW_2/PG_2 on RKL.
3005 *
3006 * RKL PW_1/PG_1 domains (under HW/DMC control):
3007 * - DBUF function (note: registers are in PW0)
3008 * - PIPE_A and its planes and VDSC/joining, except VGA
3009 * - transcoder A
3010 * - DDI_A and DDI_B
3011 * - FBC
3012 *
3013 * RKL PW_0/PG_0 domains (under HW/DMC control):
3014 * - PCI
3015 * - clocks except port PLL
3016 * - shared functions:
3017 *     * interrupts except pipe interrupts
3018 *     * MBus except PIPE_MBUS_DBOX_CTL
3019 *     * DBUF registers
3020 * - central power except FBC
3021 * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
3022 */
3023
3024#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3025	RKL_PW_3_POWER_DOMAINS |			\
3026	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3027	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3028	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3029	BIT_ULL(POWER_DOMAIN_INIT))
3030
3031/*
3032 * XE_LPD Power Domains
3033 *
3034 * Previous platforms required that PG(n-1) be enabled before PG(n).  That
3035 * dependency chain turns into a dependency tree on XE_LPD:
3036 *
3037 *       PG0
3038 *        |
3039 *     --PG1--
3040 *    /       \
3041 *  PGA     --PG2--
3042 *         /   |   \
3043 *       PGB  PGC  PGD
3044 *
3045 * Power wells must be enabled from top to bottom and disabled from bottom
3046 * to top.  This allows pipes to be power gated independently.
3047 */
3048
3049#define XELPD_PW_D_POWER_DOMAINS (			\
3050	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
3051	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |	\
3052	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
3053	BIT_ULL(POWER_DOMAIN_INIT))
3054
3055#define XELPD_PW_C_POWER_DOMAINS (			\
3056	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
3057	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
3058	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
3059	BIT_ULL(POWER_DOMAIN_INIT))
3060
3061#define XELPD_PW_B_POWER_DOMAINS (			\
3062	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
3063	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
3064	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
3065	BIT_ULL(POWER_DOMAIN_INIT))
3066
3067#define XELPD_PW_A_POWER_DOMAINS (			\
3068	BIT_ULL(POWER_DOMAIN_PIPE_A) |			\
3069	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
3070	BIT_ULL(POWER_DOMAIN_INIT))
3071
3072#define XELPD_PW_2_POWER_DOMAINS (			\
3073	XELPD_PW_B_POWER_DOMAINS |			\
3074	XELPD_PW_C_POWER_DOMAINS |			\
3075	XELPD_PW_D_POWER_DOMAINS |			\
3076	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
3077	BIT_ULL(POWER_DOMAIN_VGA) |			\
3078	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
3079	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) |	\
3080	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) |	\
3081	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
3082	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
3083	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
3084	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
3085	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
3086	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) |		\
3087	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) |		\
3088	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |			\
3089	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |			\
3090	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |			\
3091	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |			\
3092	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |			\
3093	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |			\
3094	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |			\
3095	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |			\
3096	BIT_ULL(POWER_DOMAIN_INIT))
3097
3098/*
3099 * XELPD PW_1/PG_1 domains (under HW/DMC control):
3100 *  - DBUF function (registers are in PW0)
3101 *  - Transcoder A
3102 *  - DDI_A and DDI_B
3103 *
3104 * XELPD PW_0/PW_1 domains (under HW/DMC control):
3105 *  - PCI
3106 *  - Clocks except port PLL
3107 *  - Shared functions:
3108 *     * interrupts except pipe interrupts
3109 *     * MBus except PIPE_MBUS_DBOX_CTL
3110 *     * DBUF registers
3111 *  - Central power except FBC
3112 *  - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
3113 */
3114
3115#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3116	XELPD_PW_2_POWER_DOMAINS |			\
3117	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3118	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3119	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3120	BIT_ULL(POWER_DOMAIN_INIT))
3121
3122#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
3123#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
3124#define XELPD_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
3125#define XELPD_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
3126#define XELPD_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
3127#define XELPD_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
3128
3129#define XELPD_AUX_IO_TBT1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT1)
3130#define XELPD_AUX_IO_TBT2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT2)
3131#define XELPD_AUX_IO_TBT3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT3)
3132#define XELPD_AUX_IO_TBT4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT4)
3133
3134#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
3135#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
3136#define XELPD_DDI_IO_TC1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
3137#define XELPD_DDI_IO_TC2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
3138#define XELPD_DDI_IO_TC3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
3139#define XELPD_DDI_IO_TC4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
3140
3141static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3142	.sync_hw = i9xx_power_well_sync_hw_noop,
3143	.enable = i9xx_always_on_power_well_noop,
3144	.disable = i9xx_always_on_power_well_noop,
3145	.is_enabled = i9xx_always_on_power_well_enabled,
3146};
3147
3148static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3149	.sync_hw = chv_pipe_power_well_sync_hw,
3150	.enable = chv_pipe_power_well_enable,
3151	.disable = chv_pipe_power_well_disable,
3152	.is_enabled = chv_pipe_power_well_enabled,
3153};
3154
3155static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3156	.sync_hw = i9xx_power_well_sync_hw_noop,
3157	.enable = chv_dpio_cmn_power_well_enable,
3158	.disable = chv_dpio_cmn_power_well_disable,
3159	.is_enabled = vlv_power_well_enabled,
3160};
3161
3162static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
3163	{
3164		.name = "always-on",
3165		.always_on = true,
3166		.domains = POWER_DOMAIN_MASK,
3167		.ops = &i9xx_always_on_power_well_ops,
3168		.id = DISP_PW_ID_NONE,
3169	},
3170};
3171
3172static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3173	.sync_hw = i830_pipes_power_well_sync_hw,
3174	.enable = i830_pipes_power_well_enable,
3175	.disable = i830_pipes_power_well_disable,
3176	.is_enabled = i830_pipes_power_well_enabled,
3177};
3178
3179static const struct i915_power_well_desc i830_power_wells[] = {
3180	{
3181		.name = "always-on",
3182		.always_on = true,
3183		.domains = POWER_DOMAIN_MASK,
3184		.ops = &i9xx_always_on_power_well_ops,
3185		.id = DISP_PW_ID_NONE,
3186	},
3187	{
3188		.name = "pipes",
3189		.domains = I830_PIPES_POWER_DOMAINS,
3190		.ops = &i830_pipes_power_well_ops,
3191		.id = DISP_PW_ID_NONE,
3192	},
3193};
3194
3195static const struct i915_power_well_ops hsw_power_well_ops = {
3196	.sync_hw = hsw_power_well_sync_hw,
3197	.enable = hsw_power_well_enable,
3198	.disable = hsw_power_well_disable,
3199	.is_enabled = hsw_power_well_enabled,
3200};
3201
3202static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3203	.sync_hw = i9xx_power_well_sync_hw_noop,
3204	.enable = gen9_dc_off_power_well_enable,
3205	.disable = gen9_dc_off_power_well_disable,
3206	.is_enabled = gen9_dc_off_power_well_enabled,
3207};
3208
3209static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3210	.sync_hw = i9xx_power_well_sync_hw_noop,
3211	.enable = bxt_dpio_cmn_power_well_enable,
3212	.disable = bxt_dpio_cmn_power_well_disable,
3213	.is_enabled = bxt_dpio_cmn_power_well_enabled,
3214};
3215
3216static const struct i915_power_well_regs hsw_power_well_regs = {
3217	.bios	= HSW_PWR_WELL_CTL1,
3218	.driver	= HSW_PWR_WELL_CTL2,
3219	.kvmr	= HSW_PWR_WELL_CTL3,
3220	.debug	= HSW_PWR_WELL_CTL4,
3221};
3222
3223static const struct i915_power_well_desc hsw_power_wells[] = {
3224	{
3225		.name = "always-on",
3226		.always_on = true,
3227		.domains = POWER_DOMAIN_MASK,
3228		.ops = &i9xx_always_on_power_well_ops,
3229		.id = DISP_PW_ID_NONE,
3230	},
3231	{
3232		.name = "display",
3233		.domains = HSW_DISPLAY_POWER_DOMAINS,
3234		.ops = &hsw_power_well_ops,
3235		.id = HSW_DISP_PW_GLOBAL,
3236		{
3237			.hsw.regs = &hsw_power_well_regs,
3238			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3239			.hsw.has_vga = true,
3240		},
3241	},
3242};
3243
3244static const struct i915_power_well_desc bdw_power_wells[] = {
3245	{
3246		.name = "always-on",
3247		.always_on = true,
3248		.domains = POWER_DOMAIN_MASK,
3249		.ops = &i9xx_always_on_power_well_ops,
3250		.id = DISP_PW_ID_NONE,
3251	},
3252	{
3253		.name = "display",
3254		.domains = BDW_DISPLAY_POWER_DOMAINS,
3255		.ops = &hsw_power_well_ops,
3256		.id = HSW_DISP_PW_GLOBAL,
3257		{
3258			.hsw.regs = &hsw_power_well_regs,
3259			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3260			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3261			.hsw.has_vga = true,
3262		},
3263	},
3264};
3265
3266static const struct i915_power_well_ops vlv_display_power_well_ops = {
3267	.sync_hw = i9xx_power_well_sync_hw_noop,
3268	.enable = vlv_display_power_well_enable,
3269	.disable = vlv_display_power_well_disable,
3270	.is_enabled = vlv_power_well_enabled,
3271};
3272
3273static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3274	.sync_hw = i9xx_power_well_sync_hw_noop,
3275	.enable = vlv_dpio_cmn_power_well_enable,
3276	.disable = vlv_dpio_cmn_power_well_disable,
3277	.is_enabled = vlv_power_well_enabled,
3278};
3279
3280static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3281	.sync_hw = i9xx_power_well_sync_hw_noop,
3282	.enable = vlv_power_well_enable,
3283	.disable = vlv_power_well_disable,
3284	.is_enabled = vlv_power_well_enabled,
3285};
3286
3287static const struct i915_power_well_desc vlv_power_wells[] = {
3288	{
3289		.name = "always-on",
3290		.always_on = true,
3291		.domains = POWER_DOMAIN_MASK,
3292		.ops = &i9xx_always_on_power_well_ops,
3293		.id = DISP_PW_ID_NONE,
3294	},
3295	{
3296		.name = "display",
3297		.domains = VLV_DISPLAY_POWER_DOMAINS,
3298		.ops = &vlv_display_power_well_ops,
3299		.id = VLV_DISP_PW_DISP2D,
3300		{
3301			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3302		},
3303	},
3304	{
3305		.name = "dpio-tx-b-01",
3306		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3307			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3308			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3309			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3310		.ops = &vlv_dpio_power_well_ops,
3311		.id = DISP_PW_ID_NONE,
3312		{
3313			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3314		},
3315	},
3316	{
3317		.name = "dpio-tx-b-23",
3318		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3319			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3320			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3321			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3322		.ops = &vlv_dpio_power_well_ops,
3323		.id = DISP_PW_ID_NONE,
3324		{
3325			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3326		},
3327	},
3328	{
3329		.name = "dpio-tx-c-01",
3330		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3331			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3332			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3333			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3334		.ops = &vlv_dpio_power_well_ops,
3335		.id = DISP_PW_ID_NONE,
3336		{
3337			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3338		},
3339	},
3340	{
3341		.name = "dpio-tx-c-23",
3342		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3343			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3344			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3345			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3346		.ops = &vlv_dpio_power_well_ops,
3347		.id = DISP_PW_ID_NONE,
3348		{
3349			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3350		},
3351	},
3352	{
3353		.name = "dpio-common",
3354		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3355		.ops = &vlv_dpio_cmn_power_well_ops,
3356		.id = VLV_DISP_PW_DPIO_CMN_BC,
3357		{
3358			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3359		},
3360	},
3361};
3362
3363static const struct i915_power_well_desc chv_power_wells[] = {
3364	{
3365		.name = "always-on",
3366		.always_on = true,
3367		.domains = POWER_DOMAIN_MASK,
3368		.ops = &i9xx_always_on_power_well_ops,
3369		.id = DISP_PW_ID_NONE,
3370	},
3371	{
3372		.name = "display",
3373		/*
3374		 * Pipe A power well is the new disp2d well. Pipe B and C
3375		 * power wells don't actually exist. Pipe A power well is
3376		 * required for any pipe to work.
3377		 */
3378		.domains = CHV_DISPLAY_POWER_DOMAINS,
3379		.ops = &chv_pipe_power_well_ops,
3380		.id = DISP_PW_ID_NONE,
3381	},
3382	{
3383		.name = "dpio-common-bc",
3384		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3385		.ops = &chv_dpio_cmn_power_well_ops,
3386		.id = VLV_DISP_PW_DPIO_CMN_BC,
3387		{
3388			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3389		},
3390	},
3391	{
3392		.name = "dpio-common-d",
3393		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3394		.ops = &chv_dpio_cmn_power_well_ops,
3395		.id = CHV_DISP_PW_DPIO_CMN_D,
3396		{
3397			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3398		},
3399	},
3400};
3401
3402bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3403					 enum i915_power_well_id power_well_id)
3404{
3405	struct i915_power_well *power_well;
3406	bool ret;
3407
3408	power_well = lookup_power_well(dev_priv, power_well_id);
3409	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3410
3411	return ret;
3412}
3413
3414static const struct i915_power_well_desc skl_power_wells[] = {
3415	{
3416		.name = "always-on",
3417		.always_on = true,
3418		.domains = POWER_DOMAIN_MASK,
3419		.ops = &i9xx_always_on_power_well_ops,
3420		.id = DISP_PW_ID_NONE,
3421	},
3422	{
3423		.name = "power well 1",
3424		/* Handled by the DMC firmware */
3425		.always_on = true,
3426		.domains = 0,
3427		.ops = &hsw_power_well_ops,
3428		.id = SKL_DISP_PW_1,
3429		{
3430			.hsw.regs = &hsw_power_well_regs,
3431			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3432			.hsw.has_fuses = true,
3433		},
3434	},
3435	{
3436		.name = "MISC IO power well",
3437		/* Handled by the DMC firmware */
3438		.always_on = true,
3439		.domains = 0,
3440		.ops = &hsw_power_well_ops,
3441		.id = SKL_DISP_PW_MISC_IO,
3442		{
3443			.hsw.regs = &hsw_power_well_regs,
3444			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3445		},
3446	},
3447	{
3448		.name = "DC off",
3449		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3450		.ops = &gen9_dc_off_power_well_ops,
3451		.id = SKL_DISP_DC_OFF,
3452	},
3453	{
3454		.name = "power well 2",
3455		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3456		.ops = &hsw_power_well_ops,
3457		.id = SKL_DISP_PW_2,
3458		{
3459			.hsw.regs = &hsw_power_well_regs,
3460			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3461			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3462			.hsw.has_vga = true,
3463			.hsw.has_fuses = true,
3464		},
3465	},
3466	{
3467		.name = "DDI A/E IO power well",
3468		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3469		.ops = &hsw_power_well_ops,
3470		.id = DISP_PW_ID_NONE,
3471		{
3472			.hsw.regs = &hsw_power_well_regs,
3473			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3474		},
3475	},
3476	{
3477		.name = "DDI B IO power well",
3478		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3479		.ops = &hsw_power_well_ops,
3480		.id = DISP_PW_ID_NONE,
3481		{
3482			.hsw.regs = &hsw_power_well_regs,
3483			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3484		},
3485	},
3486	{
3487		.name = "DDI C IO power well",
3488		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3489		.ops = &hsw_power_well_ops,
3490		.id = DISP_PW_ID_NONE,
3491		{
3492			.hsw.regs = &hsw_power_well_regs,
3493			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3494		},
3495	},
3496	{
3497		.name = "DDI D IO power well",
3498		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3499		.ops = &hsw_power_well_ops,
3500		.id = DISP_PW_ID_NONE,
3501		{
3502			.hsw.regs = &hsw_power_well_regs,
3503			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3504		},
3505	},
3506};
3507
3508static const struct i915_power_well_desc bxt_power_wells[] = {
3509	{
3510		.name = "always-on",
3511		.always_on = true,
3512		.domains = POWER_DOMAIN_MASK,
3513		.ops = &i9xx_always_on_power_well_ops,
3514		.id = DISP_PW_ID_NONE,
3515	},
3516	{
3517		.name = "power well 1",
3518		/* Handled by the DMC firmware */
3519		.always_on = true,
3520		.domains = 0,
3521		.ops = &hsw_power_well_ops,
3522		.id = SKL_DISP_PW_1,
3523		{
3524			.hsw.regs = &hsw_power_well_regs,
3525			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3526			.hsw.has_fuses = true,
3527		},
3528	},
3529	{
3530		.name = "DC off",
3531		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3532		.ops = &gen9_dc_off_power_well_ops,
3533		.id = SKL_DISP_DC_OFF,
3534	},
3535	{
3536		.name = "power well 2",
3537		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3538		.ops = &hsw_power_well_ops,
3539		.id = SKL_DISP_PW_2,
3540		{
3541			.hsw.regs = &hsw_power_well_regs,
3542			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3543			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3544			.hsw.has_vga = true,
3545			.hsw.has_fuses = true,
3546		},
3547	},
3548	{
3549		.name = "dpio-common-a",
3550		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3551		.ops = &bxt_dpio_cmn_power_well_ops,
3552		.id = BXT_DISP_PW_DPIO_CMN_A,
3553		{
3554			.bxt.phy = DPIO_PHY1,
3555		},
3556	},
3557	{
3558		.name = "dpio-common-bc",
3559		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3560		.ops = &bxt_dpio_cmn_power_well_ops,
3561		.id = VLV_DISP_PW_DPIO_CMN_BC,
3562		{
3563			.bxt.phy = DPIO_PHY0,
3564		},
3565	},
3566};
3567
3568static const struct i915_power_well_desc glk_power_wells[] = {
3569	{
3570		.name = "always-on",
3571		.always_on = true,
3572		.domains = POWER_DOMAIN_MASK,
3573		.ops = &i9xx_always_on_power_well_ops,
3574		.id = DISP_PW_ID_NONE,
3575	},
3576	{
3577		.name = "power well 1",
3578		/* Handled by the DMC firmware */
3579		.always_on = true,
3580		.domains = 0,
3581		.ops = &hsw_power_well_ops,
3582		.id = SKL_DISP_PW_1,
3583		{
3584			.hsw.regs = &hsw_power_well_regs,
3585			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3586			.hsw.has_fuses = true,
3587		},
3588	},
3589	{
3590		.name = "DC off",
3591		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3592		.ops = &gen9_dc_off_power_well_ops,
3593		.id = SKL_DISP_DC_OFF,
3594	},
3595	{
3596		.name = "power well 2",
3597		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3598		.ops = &hsw_power_well_ops,
3599		.id = SKL_DISP_PW_2,
3600		{
3601			.hsw.regs = &hsw_power_well_regs,
3602			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3603			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3604			.hsw.has_vga = true,
3605			.hsw.has_fuses = true,
3606		},
3607	},
3608	{
3609		.name = "dpio-common-a",
3610		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3611		.ops = &bxt_dpio_cmn_power_well_ops,
3612		.id = BXT_DISP_PW_DPIO_CMN_A,
3613		{
3614			.bxt.phy = DPIO_PHY1,
3615		},
3616	},
3617	{
3618		.name = "dpio-common-b",
3619		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3620		.ops = &bxt_dpio_cmn_power_well_ops,
3621		.id = VLV_DISP_PW_DPIO_CMN_BC,
3622		{
3623			.bxt.phy = DPIO_PHY0,
3624		},
3625	},
3626	{
3627		.name = "dpio-common-c",
3628		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3629		.ops = &bxt_dpio_cmn_power_well_ops,
3630		.id = GLK_DISP_PW_DPIO_CMN_C,
3631		{
3632			.bxt.phy = DPIO_PHY2,
3633		},
3634	},
3635	{
3636		.name = "AUX A",
3637		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3638		.ops = &hsw_power_well_ops,
3639		.id = DISP_PW_ID_NONE,
3640		{
3641			.hsw.regs = &hsw_power_well_regs,
3642			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3643		},
3644	},
3645	{
3646		.name = "AUX B",
3647		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3648		.ops = &hsw_power_well_ops,
3649		.id = DISP_PW_ID_NONE,
3650		{
3651			.hsw.regs = &hsw_power_well_regs,
3652			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3653		},
3654	},
3655	{
3656		.name = "AUX C",
3657		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3658		.ops = &hsw_power_well_ops,
3659		.id = DISP_PW_ID_NONE,
3660		{
3661			.hsw.regs = &hsw_power_well_regs,
3662			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3663		},
3664	},
3665	{
3666		.name = "DDI A IO power well",
3667		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3668		.ops = &hsw_power_well_ops,
3669		.id = DISP_PW_ID_NONE,
3670		{
3671			.hsw.regs = &hsw_power_well_regs,
3672			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3673		},
3674	},
3675	{
3676		.name = "DDI B IO power well",
3677		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3678		.ops = &hsw_power_well_ops,
3679		.id = DISP_PW_ID_NONE,
3680		{
3681			.hsw.regs = &hsw_power_well_regs,
3682			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3683		},
3684	},
3685	{
3686		.name = "DDI C IO power well",
3687		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3688		.ops = &hsw_power_well_ops,
3689		.id = DISP_PW_ID_NONE,
3690		{
3691			.hsw.regs = &hsw_power_well_regs,
3692			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3693		},
3694	},
3695};
3696
3697static const struct i915_power_well_desc cnl_power_wells[] = {
3698	{
3699		.name = "always-on",
3700		.always_on = true,
3701		.domains = POWER_DOMAIN_MASK,
3702		.ops = &i9xx_always_on_power_well_ops,
3703		.id = DISP_PW_ID_NONE,
3704	},
3705	{
3706		.name = "power well 1",
3707		/* Handled by the DMC firmware */
3708		.always_on = true,
3709		.domains = 0,
3710		.ops = &hsw_power_well_ops,
3711		.id = SKL_DISP_PW_1,
3712		{
3713			.hsw.regs = &hsw_power_well_regs,
3714			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3715			.hsw.has_fuses = true,
3716		},
3717	},
3718	{
3719		.name = "AUX A",
3720		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3721		.ops = &hsw_power_well_ops,
3722		.id = DISP_PW_ID_NONE,
3723		{
3724			.hsw.regs = &hsw_power_well_regs,
3725			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3726		},
3727	},
3728	{
3729		.name = "AUX B",
3730		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3731		.ops = &hsw_power_well_ops,
3732		.id = DISP_PW_ID_NONE,
3733		{
3734			.hsw.regs = &hsw_power_well_regs,
3735			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3736		},
3737	},
3738	{
3739		.name = "AUX C",
3740		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3741		.ops = &hsw_power_well_ops,
3742		.id = DISP_PW_ID_NONE,
3743		{
3744			.hsw.regs = &hsw_power_well_regs,
3745			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3746		},
3747	},
3748	{
3749		.name = "AUX D",
3750		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3751		.ops = &hsw_power_well_ops,
3752		.id = DISP_PW_ID_NONE,
3753		{
3754			.hsw.regs = &hsw_power_well_regs,
3755			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3756		},
3757	},
3758	{
3759		.name = "DC off",
3760		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3761		.ops = &gen9_dc_off_power_well_ops,
3762		.id = SKL_DISP_DC_OFF,
3763	},
3764	{
3765		.name = "power well 2",
3766		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3767		.ops = &hsw_power_well_ops,
3768		.id = SKL_DISP_PW_2,
3769		{
3770			.hsw.regs = &hsw_power_well_regs,
3771			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3772			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3773			.hsw.has_vga = true,
3774			.hsw.has_fuses = true,
3775		},
3776	},
3777	{
3778		.name = "DDI A IO power well",
3779		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3780		.ops = &hsw_power_well_ops,
3781		.id = DISP_PW_ID_NONE,
3782		{
3783			.hsw.regs = &hsw_power_well_regs,
3784			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3785		},
3786	},
3787	{
3788		.name = "DDI B IO power well",
3789		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3790		.ops = &hsw_power_well_ops,
3791		.id = DISP_PW_ID_NONE,
3792		{
3793			.hsw.regs = &hsw_power_well_regs,
3794			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3795		},
3796	},
3797	{
3798		.name = "DDI C IO power well",
3799		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3800		.ops = &hsw_power_well_ops,
3801		.id = DISP_PW_ID_NONE,
3802		{
3803			.hsw.regs = &hsw_power_well_regs,
3804			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3805		},
3806	},
3807	{
3808		.name = "DDI D IO power well",
3809		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3810		.ops = &hsw_power_well_ops,
3811		.id = DISP_PW_ID_NONE,
3812		{
3813			.hsw.regs = &hsw_power_well_regs,
3814			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3815		},
3816	},
3817	{
3818		.name = "DDI F IO power well",
3819		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3820		.ops = &hsw_power_well_ops,
3821		.id = CNL_DISP_PW_DDI_F_IO,
3822		{
3823			.hsw.regs = &hsw_power_well_regs,
3824			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3825		},
3826	},
3827	{
3828		.name = "AUX F",
3829		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3830		.ops = &hsw_power_well_ops,
3831		.id = CNL_DISP_PW_DDI_F_AUX,
3832		{
3833			.hsw.regs = &hsw_power_well_regs,
3834			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3835		},
3836	},
3837};
3838
3839static const struct i915_power_well_ops icl_aux_power_well_ops = {
3840	.sync_hw = hsw_power_well_sync_hw,
3841	.enable = icl_aux_power_well_enable,
3842	.disable = icl_aux_power_well_disable,
3843	.is_enabled = hsw_power_well_enabled,
3844};
3845
3846static const struct i915_power_well_regs icl_aux_power_well_regs = {
3847	.bios	= ICL_PWR_WELL_CTL_AUX1,
3848	.driver	= ICL_PWR_WELL_CTL_AUX2,
3849	.debug	= ICL_PWR_WELL_CTL_AUX4,
3850};
3851
3852static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3853	.bios	= ICL_PWR_WELL_CTL_DDI1,
3854	.driver	= ICL_PWR_WELL_CTL_DDI2,
3855	.debug	= ICL_PWR_WELL_CTL_DDI4,
3856};
3857
3858static const struct i915_power_well_desc icl_power_wells[] = {
3859	{
3860		.name = "always-on",
3861		.always_on = true,
3862		.domains = POWER_DOMAIN_MASK,
3863		.ops = &i9xx_always_on_power_well_ops,
3864		.id = DISP_PW_ID_NONE,
3865	},
3866	{
3867		.name = "power well 1",
3868		/* Handled by the DMC firmware */
3869		.always_on = true,
3870		.domains = 0,
3871		.ops = &hsw_power_well_ops,
3872		.id = SKL_DISP_PW_1,
3873		{
3874			.hsw.regs = &hsw_power_well_regs,
3875			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3876			.hsw.has_fuses = true,
3877		},
3878	},
3879	{
3880		.name = "DC off",
3881		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3882		.ops = &gen9_dc_off_power_well_ops,
3883		.id = SKL_DISP_DC_OFF,
3884	},
3885	{
3886		.name = "power well 2",
3887		.domains = ICL_PW_2_POWER_DOMAINS,
3888		.ops = &hsw_power_well_ops,
3889		.id = SKL_DISP_PW_2,
3890		{
3891			.hsw.regs = &hsw_power_well_regs,
3892			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3893			.hsw.has_fuses = true,
3894		},
3895	},
3896	{
3897		.name = "power well 3",
3898		.domains = ICL_PW_3_POWER_DOMAINS,
3899		.ops = &hsw_power_well_ops,
3900		.id = ICL_DISP_PW_3,
3901		{
3902			.hsw.regs = &hsw_power_well_regs,
3903			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3904			.hsw.irq_pipe_mask = BIT(PIPE_B),
3905			.hsw.has_vga = true,
3906			.hsw.has_fuses = true,
3907		},
3908	},
3909	{
3910		.name = "DDI A IO",
3911		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3912		.ops = &hsw_power_well_ops,
3913		.id = DISP_PW_ID_NONE,
3914		{
3915			.hsw.regs = &icl_ddi_power_well_regs,
3916			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3917		},
3918	},
3919	{
3920		.name = "DDI B IO",
3921		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3922		.ops = &hsw_power_well_ops,
3923		.id = DISP_PW_ID_NONE,
3924		{
3925			.hsw.regs = &icl_ddi_power_well_regs,
3926			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3927		},
3928	},
3929	{
3930		.name = "DDI C IO",
3931		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3932		.ops = &hsw_power_well_ops,
3933		.id = DISP_PW_ID_NONE,
3934		{
3935			.hsw.regs = &icl_ddi_power_well_regs,
3936			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3937		},
3938	},
3939	{
3940		.name = "DDI D IO",
3941		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3942		.ops = &hsw_power_well_ops,
3943		.id = DISP_PW_ID_NONE,
3944		{
3945			.hsw.regs = &icl_ddi_power_well_regs,
3946			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3947		},
3948	},
3949	{
3950		.name = "DDI E IO",
3951		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3952		.ops = &hsw_power_well_ops,
3953		.id = DISP_PW_ID_NONE,
3954		{
3955			.hsw.regs = &icl_ddi_power_well_regs,
3956			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3957		},
3958	},
3959	{
3960		.name = "DDI F IO",
3961		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3962		.ops = &hsw_power_well_ops,
3963		.id = DISP_PW_ID_NONE,
3964		{
3965			.hsw.regs = &icl_ddi_power_well_regs,
3966			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3967		},
3968	},
3969	{
3970		.name = "AUX A",
3971		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3972		.ops = &icl_aux_power_well_ops,
3973		.id = DISP_PW_ID_NONE,
3974		{
3975			.hsw.regs = &icl_aux_power_well_regs,
3976			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3977		},
3978	},
3979	{
3980		.name = "AUX B",
3981		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3982		.ops = &icl_aux_power_well_ops,
3983		.id = DISP_PW_ID_NONE,
3984		{
3985			.hsw.regs = &icl_aux_power_well_regs,
3986			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3987		},
3988	},
3989	{
3990		.name = "AUX C TC1",
3991		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3992		.ops = &icl_aux_power_well_ops,
3993		.id = DISP_PW_ID_NONE,
3994		{
3995			.hsw.regs = &icl_aux_power_well_regs,
3996			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3997			.hsw.is_tc_tbt = false,
3998		},
3999	},
4000	{
4001		.name = "AUX D TC2",
4002		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
4003		.ops = &icl_aux_power_well_ops,
4004		.id = DISP_PW_ID_NONE,
4005		{
4006			.hsw.regs = &icl_aux_power_well_regs,
4007			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
4008			.hsw.is_tc_tbt = false,
4009		},
4010	},
4011	{
4012		.name = "AUX E TC3",
4013		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
4014		.ops = &icl_aux_power_well_ops,
4015		.id = DISP_PW_ID_NONE,
4016		{
4017			.hsw.regs = &icl_aux_power_well_regs,
4018			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
4019			.hsw.is_tc_tbt = false,
4020		},
4021	},
4022	{
4023		.name = "AUX F TC4",
4024		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
4025		.ops = &icl_aux_power_well_ops,
4026		.id = DISP_PW_ID_NONE,
4027		{
4028			.hsw.regs = &icl_aux_power_well_regs,
4029			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
4030			.hsw.is_tc_tbt = false,
4031		},
4032	},
4033	{
4034		.name = "AUX C TBT1",
4035		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
4036		.ops = &icl_aux_power_well_ops,
4037		.id = DISP_PW_ID_NONE,
4038		{
4039			.hsw.regs = &icl_aux_power_well_regs,
4040			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
4041			.hsw.is_tc_tbt = true,
4042		},
4043	},
4044	{
4045		.name = "AUX D TBT2",
4046		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
4047		.ops = &icl_aux_power_well_ops,
4048		.id = DISP_PW_ID_NONE,
4049		{
4050			.hsw.regs = &icl_aux_power_well_regs,
4051			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
4052			.hsw.is_tc_tbt = true,
4053		},
4054	},
4055	{
4056		.name = "AUX E TBT3",
4057		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
4058		.ops = &icl_aux_power_well_ops,
4059		.id = DISP_PW_ID_NONE,
4060		{
4061			.hsw.regs = &icl_aux_power_well_regs,
4062			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
4063			.hsw.is_tc_tbt = true,
4064		},
4065	},
4066	{
4067		.name = "AUX F TBT4",
4068		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
4069		.ops = &icl_aux_power_well_ops,
4070		.id = DISP_PW_ID_NONE,
4071		{
4072			.hsw.regs = &icl_aux_power_well_regs,
4073			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
4074			.hsw.is_tc_tbt = true,
4075		},
4076	},
4077	{
4078		.name = "power well 4",
4079		.domains = ICL_PW_4_POWER_DOMAINS,
4080		.ops = &hsw_power_well_ops,
4081		.id = DISP_PW_ID_NONE,
4082		{
4083			.hsw.regs = &hsw_power_well_regs,
4084			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4085			.hsw.has_fuses = true,
4086			.hsw.irq_pipe_mask = BIT(PIPE_C),
4087		},
4088	},
4089};
4090
4091static void
4092tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
4093{
4094	u8 tries = 0;
4095	int ret;
4096
4097	while (1) {
4098		u32 low_val;
4099		u32 high_val = 0;
4100
4101		if (block)
4102			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
4103		else
4104			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
4105
4106		/*
4107		 * Spec states that we should timeout the request after 200us
4108		 * but the function below will timeout after 500us
4109		 */
4110		ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
4111					     &high_val);
4112		if (ret == 0) {
4113			if (block &&
4114			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
4115				ret = -EIO;
4116			else
4117				break;
4118		}
4119
4120		if (++tries == 3)
4121			break;
4122
4123		msleep(1);
4124	}
4125
4126	if (ret)
4127		drm_err(&i915->drm, "TC cold %sblock failed\n",
4128			block ? "" : "un");
4129	else
4130		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
4131			    block ? "" : "un");
4132}
4133
4134static void
4135tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
4136				  struct i915_power_well *power_well)
4137{
4138	tgl_tc_cold_request(i915, true);
4139}
4140
4141static void
4142tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
4143				   struct i915_power_well *power_well)
4144{
4145	tgl_tc_cold_request(i915, false);
4146}
4147
4148static void
4149tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
4150				   struct i915_power_well *power_well)
4151{
4152	if (power_well->count > 0)
4153		tgl_tc_cold_off_power_well_enable(i915, power_well);
4154	else
4155		tgl_tc_cold_off_power_well_disable(i915, power_well);
4156}
4157
4158static bool
4159tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
4160				      struct i915_power_well *power_well)
4161{
4162	/*
4163	 * Not the correctly implementation but there is no way to just read it
4164	 * from PCODE, so returning count to avoid state mismatch errors
4165	 */
4166	return power_well->count;
4167}
4168
4169static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4170	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4171	.enable = tgl_tc_cold_off_power_well_enable,
4172	.disable = tgl_tc_cold_off_power_well_disable,
4173	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4174};
4175
4176static const struct i915_power_well_desc tgl_power_wells[] = {
4177	{
4178		.name = "always-on",
4179		.always_on = true,
4180		.domains = POWER_DOMAIN_MASK,
4181		.ops = &i9xx_always_on_power_well_ops,
4182		.id = DISP_PW_ID_NONE,
4183	},
4184	{
4185		.name = "power well 1",
4186		/* Handled by the DMC firmware */
4187		.always_on = true,
4188		.domains = 0,
4189		.ops = &hsw_power_well_ops,
4190		.id = SKL_DISP_PW_1,
4191		{
4192			.hsw.regs = &hsw_power_well_regs,
4193			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4194			.hsw.has_fuses = true,
4195		},
4196	},
4197	{
4198		.name = "DC off",
4199		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4200		.ops = &gen9_dc_off_power_well_ops,
4201		.id = SKL_DISP_DC_OFF,
4202	},
4203	{
4204		.name = "power well 2",
4205		.domains = TGL_PW_2_POWER_DOMAINS,
4206		.ops = &hsw_power_well_ops,
4207		.id = SKL_DISP_PW_2,
4208		{
4209			.hsw.regs = &hsw_power_well_regs,
4210			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4211			.hsw.has_fuses = true,
4212		},
4213	},
4214	{
4215		.name = "power well 3",
4216		.domains = TGL_PW_3_POWER_DOMAINS,
4217		.ops = &hsw_power_well_ops,
4218		.id = ICL_DISP_PW_3,
4219		{
4220			.hsw.regs = &hsw_power_well_regs,
4221			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4222			.hsw.irq_pipe_mask = BIT(PIPE_B),
4223			.hsw.has_vga = true,
4224			.hsw.has_fuses = true,
4225		},
4226	},
4227	{
4228		.name = "DDI A IO",
4229		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4230		.ops = &hsw_power_well_ops,
4231		.id = DISP_PW_ID_NONE,
4232		{
4233			.hsw.regs = &icl_ddi_power_well_regs,
4234			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4235		}
4236	},
4237	{
4238		.name = "DDI B IO",
4239		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4240		.ops = &hsw_power_well_ops,
4241		.id = DISP_PW_ID_NONE,
4242		{
4243			.hsw.regs = &icl_ddi_power_well_regs,
4244			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4245		}
4246	},
4247	{
4248		.name = "DDI C IO",
4249		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4250		.ops = &hsw_power_well_ops,
4251		.id = DISP_PW_ID_NONE,
4252		{
4253			.hsw.regs = &icl_ddi_power_well_regs,
4254			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4255		}
4256	},
4257	{
4258		.name = "DDI IO TC1",
4259		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4260		.ops = &hsw_power_well_ops,
4261		.id = DISP_PW_ID_NONE,
4262		{
4263			.hsw.regs = &icl_ddi_power_well_regs,
4264			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4265		},
4266	},
4267	{
4268		.name = "DDI IO TC2",
4269		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4270		.ops = &hsw_power_well_ops,
4271		.id = DISP_PW_ID_NONE,
4272		{
4273			.hsw.regs = &icl_ddi_power_well_regs,
4274			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4275		},
4276	},
4277	{
4278		.name = "DDI IO TC3",
4279		.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
4280		.ops = &hsw_power_well_ops,
4281		.id = DISP_PW_ID_NONE,
4282		{
4283			.hsw.regs = &icl_ddi_power_well_regs,
4284			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4285		},
4286	},
4287	{
4288		.name = "DDI IO TC4",
4289		.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
4290		.ops = &hsw_power_well_ops,
4291		.id = DISP_PW_ID_NONE,
4292		{
4293			.hsw.regs = &icl_ddi_power_well_regs,
4294			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4295		},
4296	},
4297	{
4298		.name = "DDI IO TC5",
4299		.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
4300		.ops = &hsw_power_well_ops,
4301		.id = DISP_PW_ID_NONE,
4302		{
4303			.hsw.regs = &icl_ddi_power_well_regs,
4304			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4305		},
4306	},
4307	{
4308		.name = "DDI IO TC6",
4309		.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
4310		.ops = &hsw_power_well_ops,
4311		.id = DISP_PW_ID_NONE,
4312		{
4313			.hsw.regs = &icl_ddi_power_well_regs,
4314			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4315		},
4316	},
4317	{
4318		.name = "TC cold off",
4319		.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4320		.ops = &tgl_tc_cold_off_ops,
4321		.id = TGL_DISP_PW_TC_COLD_OFF,
4322	},
4323	{
4324		.name = "AUX A",
4325		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4326		.ops = &icl_aux_power_well_ops,
4327		.id = DISP_PW_ID_NONE,
4328		{
4329			.hsw.regs = &icl_aux_power_well_regs,
4330			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4331		},
4332	},
4333	{
4334		.name = "AUX B",
4335		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4336		.ops = &icl_aux_power_well_ops,
4337		.id = DISP_PW_ID_NONE,
4338		{
4339			.hsw.regs = &icl_aux_power_well_regs,
4340			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4341		},
4342	},
4343	{
4344		.name = "AUX C",
4345		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4346		.ops = &icl_aux_power_well_ops,
4347		.id = DISP_PW_ID_NONE,
4348		{
4349			.hsw.regs = &icl_aux_power_well_regs,
4350			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4351		},
4352	},
4353	{
4354		.name = "AUX USBC1",
4355		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4356		.ops = &icl_aux_power_well_ops,
4357		.id = DISP_PW_ID_NONE,
4358		{
4359			.hsw.regs = &icl_aux_power_well_regs,
4360			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4361			.hsw.is_tc_tbt = false,
4362		},
4363	},
4364	{
4365		.name = "AUX USBC2",
4366		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4367		.ops = &icl_aux_power_well_ops,
4368		.id = DISP_PW_ID_NONE,
4369		{
4370			.hsw.regs = &icl_aux_power_well_regs,
4371			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4372			.hsw.is_tc_tbt = false,
4373		},
4374	},
4375	{
4376		.name = "AUX USBC3",
4377		.domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
4378		.ops = &icl_aux_power_well_ops,
4379		.id = DISP_PW_ID_NONE,
4380		{
4381			.hsw.regs = &icl_aux_power_well_regs,
4382			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4383			.hsw.is_tc_tbt = false,
4384		},
4385	},
4386	{
4387		.name = "AUX USBC4",
4388		.domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
4389		.ops = &icl_aux_power_well_ops,
4390		.id = DISP_PW_ID_NONE,
4391		{
4392			.hsw.regs = &icl_aux_power_well_regs,
4393			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4394			.hsw.is_tc_tbt = false,
4395		},
4396	},
4397	{
4398		.name = "AUX USBC5",
4399		.domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
4400		.ops = &icl_aux_power_well_ops,
4401		.id = DISP_PW_ID_NONE,
4402		{
4403			.hsw.regs = &icl_aux_power_well_regs,
4404			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4405			.hsw.is_tc_tbt = false,
4406		},
4407	},
4408	{
4409		.name = "AUX USBC6",
4410		.domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
4411		.ops = &icl_aux_power_well_ops,
4412		.id = DISP_PW_ID_NONE,
4413		{
4414			.hsw.regs = &icl_aux_power_well_regs,
4415			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4416			.hsw.is_tc_tbt = false,
4417		},
4418	},
4419	{
4420		.name = "AUX TBT1",
4421		.domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
4422		.ops = &icl_aux_power_well_ops,
4423		.id = DISP_PW_ID_NONE,
4424		{
4425			.hsw.regs = &icl_aux_power_well_regs,
4426			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4427			.hsw.is_tc_tbt = true,
4428		},
4429	},
4430	{
4431		.name = "AUX TBT2",
4432		.domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
4433		.ops = &icl_aux_power_well_ops,
4434		.id = DISP_PW_ID_NONE,
4435		{
4436			.hsw.regs = &icl_aux_power_well_regs,
4437			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4438			.hsw.is_tc_tbt = true,
4439		},
4440	},
4441	{
4442		.name = "AUX TBT3",
4443		.domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
4444		.ops = &icl_aux_power_well_ops,
4445		.id = DISP_PW_ID_NONE,
4446		{
4447			.hsw.regs = &icl_aux_power_well_regs,
4448			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4449			.hsw.is_tc_tbt = true,
4450		},
4451	},
4452	{
4453		.name = "AUX TBT4",
4454		.domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
4455		.ops = &icl_aux_power_well_ops,
4456		.id = DISP_PW_ID_NONE,
4457		{
4458			.hsw.regs = &icl_aux_power_well_regs,
4459			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4460			.hsw.is_tc_tbt = true,
4461		},
4462	},
4463	{
4464		.name = "AUX TBT5",
4465		.domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
4466		.ops = &icl_aux_power_well_ops,
4467		.id = DISP_PW_ID_NONE,
4468		{
4469			.hsw.regs = &icl_aux_power_well_regs,
4470			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4471			.hsw.is_tc_tbt = true,
4472		},
4473	},
4474	{
4475		.name = "AUX TBT6",
4476		.domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
4477		.ops = &icl_aux_power_well_ops,
4478		.id = DISP_PW_ID_NONE,
4479		{
4480			.hsw.regs = &icl_aux_power_well_regs,
4481			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4482			.hsw.is_tc_tbt = true,
4483		},
4484	},
4485	{
4486		.name = "power well 4",
4487		.domains = TGL_PW_4_POWER_DOMAINS,
4488		.ops = &hsw_power_well_ops,
4489		.id = DISP_PW_ID_NONE,
4490		{
4491			.hsw.regs = &hsw_power_well_regs,
4492			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4493			.hsw.has_fuses = true,
4494			.hsw.irq_pipe_mask = BIT(PIPE_C),
4495		}
4496	},
4497	{
4498		.name = "power well 5",
4499		.domains = TGL_PW_5_POWER_DOMAINS,
4500		.ops = &hsw_power_well_ops,
4501		.id = DISP_PW_ID_NONE,
4502		{
4503			.hsw.regs = &hsw_power_well_regs,
4504			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4505			.hsw.has_fuses = true,
4506			.hsw.irq_pipe_mask = BIT(PIPE_D),
4507		},
4508	},
4509};
4510
4511static const struct i915_power_well_desc rkl_power_wells[] = {
4512	{
4513		.name = "always-on",
4514		.always_on = true,
4515		.domains = POWER_DOMAIN_MASK,
4516		.ops = &i9xx_always_on_power_well_ops,
4517		.id = DISP_PW_ID_NONE,
4518	},
4519	{
4520		.name = "power well 1",
4521		/* Handled by the DMC firmware */
4522		.always_on = true,
4523		.domains = 0,
4524		.ops = &hsw_power_well_ops,
4525		.id = SKL_DISP_PW_1,
4526		{
4527			.hsw.regs = &hsw_power_well_regs,
4528			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4529			.hsw.has_fuses = true,
4530		},
4531	},
4532	{
4533		.name = "DC off",
4534		.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4535		.ops = &gen9_dc_off_power_well_ops,
4536		.id = SKL_DISP_DC_OFF,
4537	},
4538	{
4539		.name = "power well 3",
4540		.domains = RKL_PW_3_POWER_DOMAINS,
4541		.ops = &hsw_power_well_ops,
4542		.id = ICL_DISP_PW_3,
4543		{
4544			.hsw.regs = &hsw_power_well_regs,
4545			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4546			.hsw.irq_pipe_mask = BIT(PIPE_B),
4547			.hsw.has_vga = true,
4548			.hsw.has_fuses = true,
4549		},
4550	},
4551	{
4552		.name = "power well 4",
4553		.domains = RKL_PW_4_POWER_DOMAINS,
4554		.ops = &hsw_power_well_ops,
4555		.id = DISP_PW_ID_NONE,
4556		{
4557			.hsw.regs = &hsw_power_well_regs,
4558			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4559			.hsw.has_fuses = true,
4560			.hsw.irq_pipe_mask = BIT(PIPE_C),
4561		}
4562	},
4563	{
4564		.name = "DDI A IO",
4565		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4566		.ops = &hsw_power_well_ops,
4567		.id = DISP_PW_ID_NONE,
4568		{
4569			.hsw.regs = &icl_ddi_power_well_regs,
4570			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4571		}
4572	},
4573	{
4574		.name = "DDI B IO",
4575		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4576		.ops = &hsw_power_well_ops,
4577		.id = DISP_PW_ID_NONE,
4578		{
4579			.hsw.regs = &icl_ddi_power_well_regs,
4580			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4581		}
4582	},
4583	{
4584		.name = "DDI IO TC1",
4585		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4586		.ops = &hsw_power_well_ops,
4587		.id = DISP_PW_ID_NONE,
4588		{
4589			.hsw.regs = &icl_ddi_power_well_regs,
4590			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4591		},
4592	},
4593	{
4594		.name = "DDI IO TC2",
4595		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4596		.ops = &hsw_power_well_ops,
4597		.id = DISP_PW_ID_NONE,
4598		{
4599			.hsw.regs = &icl_ddi_power_well_regs,
4600			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4601		},
4602	},
4603	{
4604		.name = "AUX A",
4605		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4606		.ops = &icl_aux_power_well_ops,
4607		.id = DISP_PW_ID_NONE,
4608		{
4609			.hsw.regs = &icl_aux_power_well_regs,
4610			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4611		},
4612	},
4613	{
4614		.name = "AUX B",
4615		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4616		.ops = &icl_aux_power_well_ops,
4617		.id = DISP_PW_ID_NONE,
4618		{
4619			.hsw.regs = &icl_aux_power_well_regs,
4620			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4621		},
4622	},
4623	{
4624		.name = "AUX USBC1",
4625		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4626		.ops = &icl_aux_power_well_ops,
4627		.id = DISP_PW_ID_NONE,
4628		{
4629			.hsw.regs = &icl_aux_power_well_regs,
4630			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4631		},
4632	},
4633	{
4634		.name = "AUX USBC2",
4635		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4636		.ops = &icl_aux_power_well_ops,
4637		.id = DISP_PW_ID_NONE,
4638		{
4639			.hsw.regs = &icl_aux_power_well_regs,
4640			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4641		},
4642	},
4643};
4644
4645static const struct i915_power_well_desc xelpd_power_wells[] = {
4646	{
4647		.name = "always-on",
4648		.always_on = true,
4649		.domains = POWER_DOMAIN_MASK,
4650		.ops = &i9xx_always_on_power_well_ops,
4651		.id = DISP_PW_ID_NONE,
4652	},
4653	{
4654		.name = "power well 1",
4655		/* Handled by the DMC firmware */
4656		.always_on = true,
4657		.domains = 0,
4658		.ops = &hsw_power_well_ops,
4659		.id = SKL_DISP_PW_1,
4660		{
4661			.hsw.regs = &hsw_power_well_regs,
4662			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4663			.hsw.has_fuses = true,
4664		},
4665	},
4666	{
4667		.name = "DC off",
4668		.domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
4669		.ops = &gen9_dc_off_power_well_ops,
4670		.id = SKL_DISP_DC_OFF,
4671	},
4672	{
4673		.name = "power well 2",
4674		.domains = XELPD_PW_2_POWER_DOMAINS,
4675		.ops = &hsw_power_well_ops,
4676		.id = SKL_DISP_PW_2,
4677		{
4678			.hsw.regs = &hsw_power_well_regs,
4679			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4680			.hsw.has_vga = true,
4681			.hsw.has_fuses = true,
4682		},
4683	},
4684	{
4685		.name = "power well A",
4686		.domains = XELPD_PW_A_POWER_DOMAINS,
4687		.ops = &hsw_power_well_ops,
4688		.id = DISP_PW_ID_NONE,
4689		{
4690			.hsw.regs = &hsw_power_well_regs,
4691			.hsw.idx = XELPD_PW_CTL_IDX_PW_A,
4692			.hsw.irq_pipe_mask = BIT(PIPE_A),
4693			.hsw.has_fuses = true,
4694		},
4695	},
4696	{
4697		.name = "power well B",
4698		.domains = XELPD_PW_B_POWER_DOMAINS,
4699		.ops = &hsw_power_well_ops,
4700		.id = DISP_PW_ID_NONE,
4701		{
4702			.hsw.regs = &hsw_power_well_regs,
4703			.hsw.idx = XELPD_PW_CTL_IDX_PW_B,
4704			.hsw.irq_pipe_mask = BIT(PIPE_B),
4705			.hsw.has_fuses = true,
4706		},
4707	},
4708	{
4709		.name = "power well C",
4710		.domains = XELPD_PW_C_POWER_DOMAINS,
4711		.ops = &hsw_power_well_ops,
4712		.id = DISP_PW_ID_NONE,
4713		{
4714			.hsw.regs = &hsw_power_well_regs,
4715			.hsw.idx = XELPD_PW_CTL_IDX_PW_C,
4716			.hsw.irq_pipe_mask = BIT(PIPE_C),
4717			.hsw.has_fuses = true,
4718		},
4719	},
4720	{
4721		.name = "power well D",
4722		.domains = XELPD_PW_D_POWER_DOMAINS,
4723		.ops = &hsw_power_well_ops,
4724		.id = DISP_PW_ID_NONE,
4725		{
4726			.hsw.regs = &hsw_power_well_regs,
4727			.hsw.idx = XELPD_PW_CTL_IDX_PW_D,
4728			.hsw.irq_pipe_mask = BIT(PIPE_D),
4729			.hsw.has_fuses = true,
4730		},
4731	},
4732	{
4733		.name = "DDI A IO",
4734		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4735		.ops = &hsw_power_well_ops,
4736		.id = DISP_PW_ID_NONE,
4737		{
4738			.hsw.regs = &icl_ddi_power_well_regs,
4739			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4740		}
4741	},
4742	{
4743		.name = "DDI B IO",
4744		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4745		.ops = &hsw_power_well_ops,
4746		.id = DISP_PW_ID_NONE,
4747		{
4748			.hsw.regs = &icl_ddi_power_well_regs,
4749			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4750		}
4751	},
4752	{
4753		.name = "DDI C IO",
4754		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4755		.ops = &hsw_power_well_ops,
4756		.id = DISP_PW_ID_NONE,
4757		{
4758			.hsw.regs = &icl_ddi_power_well_regs,
4759			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4760		}
4761	},
4762	{
4763		.name = "DDI IO D_XELPD",
4764		.domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
4765		.ops = &hsw_power_well_ops,
4766		.id = DISP_PW_ID_NONE,
4767		{
4768			.hsw.regs = &icl_ddi_power_well_regs,
4769			.hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
4770		}
4771	},
4772	{
4773		.name = "DDI IO E_XELPD",
4774		.domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
4775		.ops = &hsw_power_well_ops,
4776		.id = DISP_PW_ID_NONE,
4777		{
4778			.hsw.regs = &icl_ddi_power_well_regs,
4779			.hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
4780		}
4781	},
4782	{
4783		.name = "DDI IO TC1",
4784		.domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
4785		.ops = &hsw_power_well_ops,
4786		.id = DISP_PW_ID_NONE,
4787		{
4788			.hsw.regs = &icl_ddi_power_well_regs,
4789			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4790		}
4791	},
4792	{
4793		.name = "DDI IO TC2",
4794		.domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
4795		.ops = &hsw_power_well_ops,
4796		.id = DISP_PW_ID_NONE,
4797		{
4798			.hsw.regs = &icl_ddi_power_well_regs,
4799			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4800		}
4801	},
4802	{
4803		.name = "DDI IO TC3",
4804		.domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
4805		.ops = &hsw_power_well_ops,
4806		.id = DISP_PW_ID_NONE,
4807		{
4808			.hsw.regs = &icl_ddi_power_well_regs,
4809			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4810		}
4811	},
4812	{
4813		.name = "DDI IO TC4",
4814		.domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
4815		.ops = &hsw_power_well_ops,
4816		.id = DISP_PW_ID_NONE,
4817		{
4818			.hsw.regs = &icl_ddi_power_well_regs,
4819			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4820		}
4821	},
4822	{
4823		.name = "AUX A",
4824		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4825		.ops = &icl_aux_power_well_ops,
4826		.id = DISP_PW_ID_NONE,
4827		{
4828			.hsw.regs = &icl_aux_power_well_regs,
4829			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4830		},
4831	},
4832	{
4833		.name = "AUX B",
4834		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4835		.ops = &icl_aux_power_well_ops,
4836		.id = DISP_PW_ID_NONE,
4837		{
4838			.hsw.regs = &icl_aux_power_well_regs,
4839			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4840		},
4841	},
4842	{
4843		.name = "AUX C",
4844		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4845		.ops = &icl_aux_power_well_ops,
4846		.id = DISP_PW_ID_NONE,
4847		{
4848			.hsw.regs = &icl_aux_power_well_regs,
4849			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4850		},
4851	},
4852	{
4853		.name = "AUX D_XELPD",
4854		.domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
4855		.ops = &icl_aux_power_well_ops,
4856		.id = DISP_PW_ID_NONE,
4857		{
4858			.hsw.regs = &icl_aux_power_well_regs,
4859			.hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
4860		},
4861	},
4862	{
4863		.name = "AUX E_XELPD",
4864		.domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
4865		.ops = &icl_aux_power_well_ops,
4866		.id = DISP_PW_ID_NONE,
4867		{
4868			.hsw.regs = &icl_aux_power_well_regs,
4869			.hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
4870		},
4871	},
4872	{
4873		.name = "AUX USBC1",
4874		.domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
4875		.ops = &icl_aux_power_well_ops,
4876		.id = DISP_PW_ID_NONE,
4877		{
4878			.hsw.regs = &icl_aux_power_well_regs,
4879			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4880		},
4881	},
4882	{
4883		.name = "AUX USBC2",
4884		.domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
4885		.ops = &icl_aux_power_well_ops,
4886		.id = DISP_PW_ID_NONE,
4887		{
4888			.hsw.regs = &icl_aux_power_well_regs,
4889			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4890		},
4891	},
4892	{
4893		.name = "AUX USBC3",
4894		.domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
4895		.ops = &icl_aux_power_well_ops,
4896		.id = DISP_PW_ID_NONE,
4897		{
4898			.hsw.regs = &icl_aux_power_well_regs,
4899			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4900		},
4901	},
4902	{
4903		.name = "AUX USBC4",
4904		.domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
4905		.ops = &icl_aux_power_well_ops,
4906		.id = DISP_PW_ID_NONE,
4907		{
4908			.hsw.regs = &icl_aux_power_well_regs,
4909			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4910		},
4911	},
4912	{
4913		.name = "AUX TBT1",
4914		.domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
4915		.ops = &icl_aux_power_well_ops,
4916		.id = DISP_PW_ID_NONE,
4917		{
4918			.hsw.regs = &icl_aux_power_well_regs,
4919			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4920			.hsw.is_tc_tbt = true,
4921		},
4922	},
4923	{
4924		.name = "AUX TBT2",
4925		.domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
4926		.ops = &icl_aux_power_well_ops,
4927		.id = DISP_PW_ID_NONE,
4928		{
4929			.hsw.regs = &icl_aux_power_well_regs,
4930			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4931			.hsw.is_tc_tbt = true,
4932		},
4933	},
4934	{
4935		.name = "AUX TBT3",
4936		.domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
4937		.ops = &icl_aux_power_well_ops,
4938		.id = DISP_PW_ID_NONE,
4939		{
4940			.hsw.regs = &icl_aux_power_well_regs,
4941			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4942			.hsw.is_tc_tbt = true,
4943		},
4944	},
4945	{
4946		.name = "AUX TBT4",
4947		.domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
4948		.ops = &icl_aux_power_well_ops,
4949		.id = DISP_PW_ID_NONE,
4950		{
4951			.hsw.regs = &icl_aux_power_well_regs,
4952			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4953			.hsw.is_tc_tbt = true,
4954		},
4955	},
4956};
4957
4958static int
4959sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4960				   int disable_power_well)
4961{
4962	if (disable_power_well >= 0)
4963		return !!disable_power_well;
4964
4965	return 1;
4966}
4967
4968static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4969			       int enable_dc)
4970{
4971	u32 mask;
4972	int requested_dc;
4973	int max_dc;
4974
4975	if (!HAS_DISPLAY(dev_priv))
4976		return 0;
4977
4978	if (IS_DG1(dev_priv))
 
 
4979		max_dc = 3;
4980	else if (DISPLAY_VER(dev_priv) >= 12)
4981		max_dc = 4;
4982	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4983		max_dc = 1;
4984	else if (DISPLAY_VER(dev_priv) >= 9)
4985		max_dc = 2;
4986	else
4987		max_dc = 0;
4988
4989	/*
4990	 * DC9 has a separate HW flow from the rest of the DC states,
4991	 * not depending on the DMC firmware. It's needed by system
4992	 * suspend/resume, so allow it unconditionally.
4993	 */
4994	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
4995		DISPLAY_VER(dev_priv) >= 11 ?
4996	       DC_STATE_EN_DC9 : 0;
4997
4998	if (!dev_priv->params.disable_power_well)
4999		max_dc = 0;
5000
5001	if (enable_dc >= 0 && enable_dc <= max_dc) {
5002		requested_dc = enable_dc;
5003	} else if (enable_dc == -1) {
5004		requested_dc = max_dc;
5005	} else if (enable_dc > max_dc && enable_dc <= 4) {
5006		drm_dbg_kms(&dev_priv->drm,
5007			    "Adjusting requested max DC state (%d->%d)\n",
5008			    enable_dc, max_dc);
5009		requested_dc = max_dc;
5010	} else {
5011		drm_err(&dev_priv->drm,
5012			"Unexpected value for enable_dc (%d)\n", enable_dc);
5013		requested_dc = max_dc;
5014	}
5015
5016	switch (requested_dc) {
5017	case 4:
5018		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
5019		break;
5020	case 3:
5021		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
5022		break;
5023	case 2:
5024		mask |= DC_STATE_EN_UPTO_DC6;
5025		break;
5026	case 1:
5027		mask |= DC_STATE_EN_UPTO_DC5;
5028		break;
5029	}
5030
5031	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
5032
5033	return mask;
5034}
5035
5036static int
5037__set_power_wells(struct i915_power_domains *power_domains,
5038		  const struct i915_power_well_desc *power_well_descs,
5039		  int power_well_descs_sz, u64 skip_mask)
5040{
5041	struct drm_i915_private *i915 = container_of(power_domains,
5042						     struct drm_i915_private,
5043						     power_domains);
5044	u64 power_well_ids = 0;
5045	int power_well_count = 0;
5046	int i, plt_idx = 0;
5047
5048	for (i = 0; i < power_well_descs_sz; i++)
5049		if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
5050			power_well_count++;
5051
5052	power_domains->power_well_count = power_well_count;
5053	power_domains->power_wells =
5054				kcalloc(power_well_count,
5055					sizeof(*power_domains->power_wells),
5056					GFP_KERNEL);
5057	if (!power_domains->power_wells)
5058		return -ENOMEM;
5059
5060	for (i = 0; i < power_well_descs_sz; i++) {
5061		enum i915_power_well_id id = power_well_descs[i].id;
5062
5063		if (BIT_ULL(id) & skip_mask)
5064			continue;
5065
5066		power_domains->power_wells[plt_idx++].desc =
5067			&power_well_descs[i];
5068
5069		if (id == DISP_PW_ID_NONE)
5070			continue;
5071
5072		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
5073		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
5074		power_well_ids |= BIT_ULL(id);
5075	}
5076
5077	return 0;
5078}
5079
5080#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
5081	__set_power_wells(power_domains, __power_well_descs, \
5082			  ARRAY_SIZE(__power_well_descs), skip_mask)
5083
5084#define set_power_wells(power_domains, __power_well_descs) \
5085	set_power_wells_mask(power_domains, __power_well_descs, 0)
5086
5087/**
5088 * intel_power_domains_init - initializes the power domain structures
5089 * @dev_priv: i915 device instance
5090 *
5091 * Initializes the power domain structures for @dev_priv depending upon the
5092 * supported platform.
5093 */
5094int intel_power_domains_init(struct drm_i915_private *dev_priv)
5095{
5096	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5097	int err;
5098
5099	dev_priv->params.disable_power_well =
5100		sanitize_disable_power_well_option(dev_priv,
5101						   dev_priv->params.disable_power_well);
5102	dev_priv->dmc.allowed_dc_mask =
5103		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
5104
5105	dev_priv->dmc.target_dc_state =
5106		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
5107
5108	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
5109
5110	mutex_init(&power_domains->lock);
5111
5112	INIT_DELAYED_WORK(&power_domains->async_put_work,
5113			  intel_display_power_put_async_work);
5114
5115	/*
5116	 * The enabling order will be from lower to higher indexed wells,
5117	 * the disabling order is reversed.
5118	 */
5119	if (!HAS_DISPLAY(dev_priv)) {
5120		power_domains->power_well_count = 0;
5121		err = 0;
5122	} else if (DISPLAY_VER(dev_priv) >= 13) {
5123		err = set_power_wells(power_domains, xelpd_power_wells);
5124	} else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
5125		err = set_power_wells_mask(power_domains, tgl_power_wells,
5126					   BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
5127	} else if (IS_ROCKETLAKE(dev_priv)) {
5128		err = set_power_wells(power_domains, rkl_power_wells);
5129	} else if (DISPLAY_VER(dev_priv) == 12) {
5130		err = set_power_wells(power_domains, tgl_power_wells);
5131	} else if (DISPLAY_VER(dev_priv) == 11) {
5132		err = set_power_wells(power_domains, icl_power_wells);
5133	} else if (IS_CNL_WITH_PORT_F(dev_priv)) {
5134		err = set_power_wells(power_domains, cnl_power_wells);
5135	} else if (IS_CANNONLAKE(dev_priv)) {
5136		err = set_power_wells_mask(power_domains, cnl_power_wells,
5137					   BIT_ULL(CNL_DISP_PW_DDI_F_IO) |
5138					   BIT_ULL(CNL_DISP_PW_DDI_F_AUX));
5139	} else if (IS_GEMINILAKE(dev_priv)) {
5140		err = set_power_wells(power_domains, glk_power_wells);
5141	} else if (IS_BROXTON(dev_priv)) {
5142		err = set_power_wells(power_domains, bxt_power_wells);
5143	} else if (DISPLAY_VER(dev_priv) == 9) {
5144		err = set_power_wells(power_domains, skl_power_wells);
5145	} else if (IS_CHERRYVIEW(dev_priv)) {
5146		err = set_power_wells(power_domains, chv_power_wells);
5147	} else if (IS_BROADWELL(dev_priv)) {
5148		err = set_power_wells(power_domains, bdw_power_wells);
5149	} else if (IS_HASWELL(dev_priv)) {
5150		err = set_power_wells(power_domains, hsw_power_wells);
5151	} else if (IS_VALLEYVIEW(dev_priv)) {
5152		err = set_power_wells(power_domains, vlv_power_wells);
5153	} else if (IS_I830(dev_priv)) {
5154		err = set_power_wells(power_domains, i830_power_wells);
5155	} else {
5156		err = set_power_wells(power_domains, i9xx_always_on_power_well);
5157	}
5158
5159	return err;
5160}
5161
5162/**
5163 * intel_power_domains_cleanup - clean up power domains resources
5164 * @dev_priv: i915 device instance
5165 *
5166 * Release any resources acquired by intel_power_domains_init()
5167 */
5168void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
5169{
5170	kfree(dev_priv->power_domains.power_wells);
5171}
5172
5173static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5174{
5175	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5176	struct i915_power_well *power_well;
5177
5178	mutex_lock(&power_domains->lock);
5179	for_each_power_well(dev_priv, power_well) {
5180		power_well->desc->ops->sync_hw(dev_priv, power_well);
5181		power_well->hw_enabled =
5182			power_well->desc->ops->is_enabled(dev_priv, power_well);
5183	}
5184	mutex_unlock(&power_domains->lock);
5185}
5186
5187static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
5188				enum dbuf_slice slice, bool enable)
5189{
5190	i915_reg_t reg = DBUF_CTL_S(slice);
5191	bool state;
5192
5193	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
5194		     enable ? DBUF_POWER_REQUEST : 0);
5195	intel_de_posting_read(dev_priv, reg);
5196	udelay(10);
5197
5198	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
5199	drm_WARN(&dev_priv->drm, enable != state,
5200		 "DBuf slice %d power %s timeout!\n",
5201		 slice, enabledisable(enable));
5202}
5203
5204void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
5205			     u8 req_slices)
5206{
5207	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5208	u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
5209	enum dbuf_slice slice;
5210
5211	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
5212		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
5213		 req_slices, slice_mask);
5214
5215	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
5216		    req_slices);
5217
5218	/*
5219	 * Might be running this in parallel to gen9_dc_off_power_well_enable
5220	 * being called from intel_dp_detect for instance,
5221	 * which causes assertion triggered by race condition,
5222	 * as gen9_assert_dbuf_enabled might preempt this when registers
5223	 * were already updated, while dev_priv was not.
5224	 */
5225	mutex_lock(&power_domains->lock);
5226
5227	for_each_dbuf_slice(dev_priv, slice)
5228		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
5229
5230	dev_priv->dbuf.enabled_slices = req_slices;
5231
5232	mutex_unlock(&power_domains->lock);
5233}
5234
5235static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
5236{
5237	dev_priv->dbuf.enabled_slices =
5238		intel_enabled_dbuf_slices_mask(dev_priv);
5239
5240	/*
5241	 * Just power up at least 1 slice, we will
5242	 * figure out later which slices we have and what we need.
5243	 */
5244	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
5245				dev_priv->dbuf.enabled_slices);
5246}
5247
5248static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
5249{
5250	gen9_dbuf_slices_update(dev_priv, 0);
5251}
5252
5253static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
5254{
5255	enum dbuf_slice slice;
5256
5257	if (IS_ALDERLAKE_P(dev_priv))
5258		return;
5259
5260	for_each_dbuf_slice(dev_priv, slice)
5261		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
5262			     DBUF_TRACKER_STATE_SERVICE_MASK,
5263			     DBUF_TRACKER_STATE_SERVICE(8));
5264}
5265
5266static void icl_mbus_init(struct drm_i915_private *dev_priv)
5267{
5268	unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
5269	u32 mask, val, i;
5270
5271	if (IS_ALDERLAKE_P(dev_priv))
5272		return;
5273
5274	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
5275		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
5276		MBUS_ABOX_B_CREDIT_MASK |
5277		MBUS_ABOX_BW_CREDIT_MASK;
5278	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
5279		MBUS_ABOX_BT_CREDIT_POOL2(16) |
5280		MBUS_ABOX_B_CREDIT(1) |
5281		MBUS_ABOX_BW_CREDIT(1);
5282
5283	/*
5284	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
5285	 * expect us to program the abox_ctl0 register as well, even though
5286	 * we don't have to program other instance-0 registers like BW_BUDDY.
5287	 */
5288	if (DISPLAY_VER(dev_priv) == 12)
5289		abox_regs |= BIT(0);
5290
5291	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
5292		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
5293}
5294
5295static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
5296{
5297	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
5298
5299	/*
5300	 * The LCPLL register should be turned on by the BIOS. For now
5301	 * let's just check its state and print errors in case
5302	 * something is wrong.  Don't even try to turn it on.
5303	 */
5304
5305	if (val & LCPLL_CD_SOURCE_FCLK)
5306		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
5307
5308	if (val & LCPLL_PLL_DISABLE)
5309		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
5310
5311	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
5312		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
5313}
5314
5315static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5316{
5317	struct drm_device *dev = &dev_priv->drm;
5318	struct intel_crtc *crtc;
5319
5320	for_each_intel_crtc(dev, crtc)
5321		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
5322				pipe_name(crtc->pipe));
5323
5324	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
5325			"Display power well on\n");
5326	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
5327			"SPLL enabled\n");
5328	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
5329			"WRPLL1 enabled\n");
5330	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
5331			"WRPLL2 enabled\n");
5332	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
5333			"Panel power on\n");
5334	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5335			"CPU PWM1 enabled\n");
5336	if (IS_HASWELL(dev_priv))
5337		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5338				"CPU PWM2 enabled\n");
5339	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5340			"PCH PWM1 enabled\n");
5341	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5342			"Utility pin enabled\n");
5343	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
5344			"PCH GTC enabled\n");
5345
5346	/*
5347	 * In theory we can still leave IRQs enabled, as long as only the HPD
5348	 * interrupts remain enabled. We used to check for that, but since it's
5349	 * gen-specific and since we only disable LCPLL after we fully disable
5350	 * the interrupts, the check below should be enough.
5351	 */
5352	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
5353}
5354
5355static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
5356{
5357	if (IS_HASWELL(dev_priv))
5358		return intel_de_read(dev_priv, D_COMP_HSW);
5359	else
5360		return intel_de_read(dev_priv, D_COMP_BDW);
5361}
5362
5363static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
5364{
5365	if (IS_HASWELL(dev_priv)) {
5366		if (sandybridge_pcode_write(dev_priv,
5367					    GEN6_PCODE_WRITE_D_COMP, val))
5368			drm_dbg_kms(&dev_priv->drm,
5369				    "Failed to write to D_COMP\n");
5370	} else {
5371		intel_de_write(dev_priv, D_COMP_BDW, val);
5372		intel_de_posting_read(dev_priv, D_COMP_BDW);
5373	}
5374}
5375
5376/*
5377 * This function implements pieces of two sequences from BSpec:
5378 * - Sequence for display software to disable LCPLL
5379 * - Sequence for display software to allow package C8+
5380 * The steps implemented here are just the steps that actually touch the LCPLL
5381 * register. Callers should take care of disabling all the display engine
5382 * functions, doing the mode unset, fixing interrupts, etc.
5383 */
5384static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5385			      bool switch_to_fclk, bool allow_power_down)
5386{
5387	u32 val;
5388
5389	assert_can_disable_lcpll(dev_priv);
5390
5391	val = intel_de_read(dev_priv, LCPLL_CTL);
5392
5393	if (switch_to_fclk) {
5394		val |= LCPLL_CD_SOURCE_FCLK;
5395		intel_de_write(dev_priv, LCPLL_CTL, val);
5396
5397		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
5398				LCPLL_CD_SOURCE_FCLK_DONE, 1))
5399			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
5400
5401		val = intel_de_read(dev_priv, LCPLL_CTL);
5402	}
5403
5404	val |= LCPLL_PLL_DISABLE;
5405	intel_de_write(dev_priv, LCPLL_CTL, val);
5406	intel_de_posting_read(dev_priv, LCPLL_CTL);
5407
5408	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
5409		drm_err(&dev_priv->drm, "LCPLL still locked\n");
5410
5411	val = hsw_read_dcomp(dev_priv);
5412	val |= D_COMP_COMP_DISABLE;
5413	hsw_write_dcomp(dev_priv, val);
5414	ndelay(100);
5415
5416	if (wait_for((hsw_read_dcomp(dev_priv) &
5417		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
5418		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
5419
5420	if (allow_power_down) {
5421		val = intel_de_read(dev_priv, LCPLL_CTL);
5422		val |= LCPLL_POWER_DOWN_ALLOW;
5423		intel_de_write(dev_priv, LCPLL_CTL, val);
5424		intel_de_posting_read(dev_priv, LCPLL_CTL);
5425	}
5426}
5427
5428/*
5429 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
5430 * source.
5431 */
5432static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
5433{
5434	u32 val;
5435
5436	val = intel_de_read(dev_priv, LCPLL_CTL);
5437
5438	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
5439		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
5440		return;
5441
5442	/*
5443	 * Make sure we're not on PC8 state before disabling PC8, otherwise
5444	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
5445	 */
5446	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
5447
5448	if (val & LCPLL_POWER_DOWN_ALLOW) {
5449		val &= ~LCPLL_POWER_DOWN_ALLOW;
5450		intel_de_write(dev_priv, LCPLL_CTL, val);
5451		intel_de_posting_read(dev_priv, LCPLL_CTL);
5452	}
5453
5454	val = hsw_read_dcomp(dev_priv);
5455	val |= D_COMP_COMP_FORCE;
5456	val &= ~D_COMP_COMP_DISABLE;
5457	hsw_write_dcomp(dev_priv, val);
5458
5459	val = intel_de_read(dev_priv, LCPLL_CTL);
5460	val &= ~LCPLL_PLL_DISABLE;
5461	intel_de_write(dev_priv, LCPLL_CTL, val);
5462
5463	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
5464		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
5465
5466	if (val & LCPLL_CD_SOURCE_FCLK) {
5467		val = intel_de_read(dev_priv, LCPLL_CTL);
5468		val &= ~LCPLL_CD_SOURCE_FCLK;
5469		intel_de_write(dev_priv, LCPLL_CTL, val);
5470
5471		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
5472				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
5473			drm_err(&dev_priv->drm,
5474				"Switching back to LCPLL failed\n");
5475	}
5476
5477	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
5478
5479	intel_update_cdclk(dev_priv);
5480	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
5481}
5482
5483/*
5484 * Package states C8 and deeper are really deep PC states that can only be
5485 * reached when all the devices on the system allow it, so even if the graphics
5486 * device allows PC8+, it doesn't mean the system will actually get to these
5487 * states. Our driver only allows PC8+ when going into runtime PM.
5488 *
5489 * The requirements for PC8+ are that all the outputs are disabled, the power
5490 * well is disabled and most interrupts are disabled, and these are also
5491 * requirements for runtime PM. When these conditions are met, we manually do
5492 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
5493 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
5494 * hang the machine.
5495 *
5496 * When we really reach PC8 or deeper states (not just when we allow it) we lose
5497 * the state of some registers, so when we come back from PC8+ we need to
5498 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
5499 * need to take care of the registers kept by RC6. Notice that this happens even
5500 * if we don't put the device in PCI D3 state (which is what currently happens
5501 * because of the runtime PM support).
5502 *
5503 * For more, read "Display Sequences for Package C8" on the hardware
5504 * documentation.
5505 */
5506static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
5507{
5508	u32 val;
5509
5510	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5511
5512	if (HAS_PCH_LPT_LP(dev_priv)) {
5513		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5514		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5515		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5516	}
5517
5518	lpt_disable_clkout_dp(dev_priv);
5519	hsw_disable_lcpll(dev_priv, true, true);
5520}
5521
5522static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5523{
5524	u32 val;
5525
5526	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5527
5528	hsw_restore_lcpll(dev_priv);
5529	intel_init_pch_refclk(dev_priv);
5530
5531	if (HAS_PCH_LPT_LP(dev_priv)) {
5532		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5533		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5534		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5535	}
5536}
5537
5538static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5539				      bool enable)
5540{
5541	i915_reg_t reg;
5542	u32 reset_bits, val;
5543
5544	if (IS_IVYBRIDGE(dev_priv)) {
5545		reg = GEN7_MSG_CTL;
5546		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5547	} else {
5548		reg = HSW_NDE_RSTWRN_OPT;
5549		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5550	}
5551
 
 
 
5552	val = intel_de_read(dev_priv, reg);
5553
5554	if (enable)
5555		val |= reset_bits;
5556	else
5557		val &= ~reset_bits;
5558
5559	intel_de_write(dev_priv, reg, val);
5560}
5561
5562static void skl_display_core_init(struct drm_i915_private *dev_priv,
5563				  bool resume)
5564{
5565	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5566	struct i915_power_well *well;
5567
5568	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5569
5570	/* enable PCH reset handshake */
5571	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5572
5573	if (!HAS_DISPLAY(dev_priv))
5574		return;
5575
5576	/* enable PG1 and Misc I/O */
5577	mutex_lock(&power_domains->lock);
5578
5579	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5580	intel_power_well_enable(dev_priv, well);
5581
5582	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5583	intel_power_well_enable(dev_priv, well);
5584
5585	mutex_unlock(&power_domains->lock);
5586
5587	intel_cdclk_init_hw(dev_priv);
5588
5589	gen9_dbuf_enable(dev_priv);
5590
5591	if (resume && intel_dmc_has_payload(dev_priv))
5592		intel_dmc_load_program(dev_priv);
5593}
5594
5595static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5596{
5597	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5598	struct i915_power_well *well;
5599
5600	if (!HAS_DISPLAY(dev_priv))
5601		return;
5602
5603	gen9_disable_dc_states(dev_priv);
 
5604
5605	gen9_dbuf_disable(dev_priv);
5606
5607	intel_cdclk_uninit_hw(dev_priv);
5608
5609	/* The spec doesn't call for removing the reset handshake flag */
5610	/* disable PG1 and Misc I/O */
5611
5612	mutex_lock(&power_domains->lock);
5613
5614	/*
5615	 * BSpec says to keep the MISC IO power well enabled here, only
5616	 * remove our request for power well 1.
5617	 * Note that even though the driver's request is removed power well 1
5618	 * may stay enabled after this due to DMC's own request on it.
5619	 */
5620	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5621	intel_power_well_disable(dev_priv, well);
5622
5623	mutex_unlock(&power_domains->lock);
5624
5625	usleep_range(10, 30);		/* 10 us delay per Bspec */
5626}
5627
5628static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5629{
5630	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5631	struct i915_power_well *well;
5632
5633	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5634
5635	/*
5636	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5637	 * or else the reset will hang because there is no PCH to respond.
5638	 * Move the handshake programming to initialization sequence.
5639	 * Previously was left up to BIOS.
5640	 */
5641	intel_pch_reset_handshake(dev_priv, false);
5642
5643	if (!HAS_DISPLAY(dev_priv))
5644		return;
5645
5646	/* Enable PG1 */
5647	mutex_lock(&power_domains->lock);
5648
5649	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5650	intel_power_well_enable(dev_priv, well);
5651
5652	mutex_unlock(&power_domains->lock);
5653
5654	intel_cdclk_init_hw(dev_priv);
5655
5656	gen9_dbuf_enable(dev_priv);
5657
5658	if (resume && intel_dmc_has_payload(dev_priv))
5659		intel_dmc_load_program(dev_priv);
5660}
5661
5662static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5663{
5664	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5665	struct i915_power_well *well;
5666
5667	if (!HAS_DISPLAY(dev_priv))
5668		return;
5669
5670	gen9_disable_dc_states(dev_priv);
 
5671
5672	gen9_dbuf_disable(dev_priv);
5673
5674	intel_cdclk_uninit_hw(dev_priv);
5675
5676	/* The spec doesn't call for removing the reset handshake flag */
5677
5678	/*
5679	 * Disable PW1 (PG1).
5680	 * Note that even though the driver's request is removed power well 1
5681	 * may stay enabled after this due to DMC's own request on it.
5682	 */
5683	mutex_lock(&power_domains->lock);
5684
5685	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5686	intel_power_well_disable(dev_priv, well);
5687
5688	mutex_unlock(&power_domains->lock);
5689
5690	usleep_range(10, 30);		/* 10 us delay per Bspec */
5691}
5692
5693static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5694{
5695	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5696	struct i915_power_well *well;
5697
5698	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5699
5700	/* 1. Enable PCH Reset Handshake */
5701	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5702
5703	if (!HAS_DISPLAY(dev_priv))
5704		return;
5705
5706	/* 2-3. */
5707	intel_combo_phy_init(dev_priv);
5708
5709	/*
5710	 * 4. Enable Power Well 1 (PG1).
5711	 *    The AUX IO power wells will be enabled on demand.
5712	 */
5713	mutex_lock(&power_domains->lock);
5714	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5715	intel_power_well_enable(dev_priv, well);
5716	mutex_unlock(&power_domains->lock);
5717
5718	/* 5. Enable CD clock */
5719	intel_cdclk_init_hw(dev_priv);
5720
5721	/* 6. Enable DBUF */
5722	gen9_dbuf_enable(dev_priv);
5723
5724	if (resume && intel_dmc_has_payload(dev_priv))
5725		intel_dmc_load_program(dev_priv);
5726}
5727
5728static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5729{
5730	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5731	struct i915_power_well *well;
5732
5733	if (!HAS_DISPLAY(dev_priv))
5734		return;
5735
5736	gen9_disable_dc_states(dev_priv);
5737
5738	/* 1. Disable all display engine functions -> aready done */
5739
5740	/* 2. Disable DBUF */
5741	gen9_dbuf_disable(dev_priv);
5742
5743	/* 3. Disable CD clock */
5744	intel_cdclk_uninit_hw(dev_priv);
5745
5746	/*
5747	 * 4. Disable Power Well 1 (PG1).
5748	 *    The AUX IO power wells are toggled on demand, so they are already
5749	 *    disabled at this point.
5750	 */
5751	mutex_lock(&power_domains->lock);
5752	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5753	intel_power_well_disable(dev_priv, well);
5754	mutex_unlock(&power_domains->lock);
5755
5756	usleep_range(10, 30);		/* 10 us delay per Bspec */
5757
5758	/* 5. */
5759	intel_combo_phy_uninit(dev_priv);
5760}
5761
5762struct buddy_page_mask {
5763	u32 page_mask;
5764	u8 type;
5765	u8 num_channels;
5766};
5767
5768static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5769	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5770	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
5771	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5772	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
5773	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5774	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
5775	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5776	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
5777	{}
5778};
5779
5780static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5781	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5782	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5783	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
5784	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
5785	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5786	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5787	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
5788	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
5789	{}
5790};
5791
5792static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5793{
5794	enum intel_dram_type type = dev_priv->dram_info.type;
5795	u8 num_channels = dev_priv->dram_info.num_channels;
5796	const struct buddy_page_mask *table;
5797	unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5798	int config, i;
5799
 
 
 
 
5800	if (IS_ALDERLAKE_S(dev_priv) ||
5801	    IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
5802	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
 
5803		/* Wa_1409767108:tgl,dg1,adl-s */
5804		table = wa_1409767108_buddy_page_masks;
5805	else
5806		table = tgl_buddy_page_masks;
5807
5808	for (config = 0; table[config].page_mask != 0; config++)
5809		if (table[config].num_channels == num_channels &&
5810		    table[config].type == type)
5811			break;
5812
5813	if (table[config].page_mask == 0) {
5814		drm_dbg(&dev_priv->drm,
5815			"Unknown memory configuration; disabling address buddy logic.\n");
5816		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5817			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5818				       BW_BUDDY_DISABLE);
5819	} else {
5820		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5821			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5822				       table[config].page_mask);
5823
5824			/* Wa_22010178259:tgl,rkl */
5825			intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5826				     BW_BUDDY_TLB_REQ_TIMER_MASK,
5827				     BW_BUDDY_TLB_REQ_TIMER(0x8));
 
5828		}
5829	}
5830}
5831
5832static void icl_display_core_init(struct drm_i915_private *dev_priv,
5833				  bool resume)
5834{
5835	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5836	struct i915_power_well *well;
5837	u32 val;
5838
5839	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5840
5841	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
5842	if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
5843	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
5844		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
5845			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
5846
5847	/* 1. Enable PCH reset handshake. */
5848	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5849
5850	if (!HAS_DISPLAY(dev_priv))
5851		return;
5852
5853	/* 2. Initialize all combo phys */
5854	intel_combo_phy_init(dev_priv);
5855
5856	/*
5857	 * 3. Enable Power Well 1 (PG1).
5858	 *    The AUX IO power wells will be enabled on demand.
5859	 */
5860	mutex_lock(&power_domains->lock);
5861	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5862	intel_power_well_enable(dev_priv, well);
5863	mutex_unlock(&power_domains->lock);
5864
5865	/* 4. Enable CDCLK. */
5866	intel_cdclk_init_hw(dev_priv);
5867
5868	if (DISPLAY_VER(dev_priv) >= 12)
5869		gen12_dbuf_slices_config(dev_priv);
5870
5871	/* 5. Enable DBUF. */
5872	gen9_dbuf_enable(dev_priv);
5873
5874	/* 6. Setup MBUS. */
5875	icl_mbus_init(dev_priv);
5876
5877	/* 7. Program arbiter BW_BUDDY registers */
5878	if (DISPLAY_VER(dev_priv) >= 12)
5879		tgl_bw_buddy_init(dev_priv);
5880
5881	if (resume && intel_dmc_has_payload(dev_priv))
 
 
 
 
5882		intel_dmc_load_program(dev_priv);
5883
5884	/* Wa_14011508470 */
5885	if (DISPLAY_VER(dev_priv) == 12) {
5886		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5887		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5888		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5889	}
5890
5891	/* Wa_14011503030:xelpd */
5892	if (DISPLAY_VER(dev_priv) >= 13)
5893		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
5894}
5895
5896static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5897{
5898	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5899	struct i915_power_well *well;
5900
5901	if (!HAS_DISPLAY(dev_priv))
5902		return;
5903
5904	gen9_disable_dc_states(dev_priv);
 
5905
5906	/* 1. Disable all display engine functions -> aready done */
5907
5908	/* 2. Disable DBUF */
5909	gen9_dbuf_disable(dev_priv);
5910
5911	/* 3. Disable CD clock */
5912	intel_cdclk_uninit_hw(dev_priv);
5913
5914	/*
5915	 * 4. Disable Power Well 1 (PG1).
5916	 *    The AUX IO power wells are toggled on demand, so they are already
5917	 *    disabled at this point.
5918	 */
5919	mutex_lock(&power_domains->lock);
5920	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5921	intel_power_well_disable(dev_priv, well);
5922	mutex_unlock(&power_domains->lock);
5923
5924	/* 5. */
5925	intel_combo_phy_uninit(dev_priv);
5926}
5927
5928static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5929{
5930	struct i915_power_well *cmn_bc =
5931		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5932	struct i915_power_well *cmn_d =
5933		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5934
5935	/*
5936	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5937	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5938	 * instead maintain a shadow copy ourselves. Use the actual
5939	 * power well state and lane status to reconstruct the
5940	 * expected initial value.
5941	 */
5942	dev_priv->chv_phy_control =
5943		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5944		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5945		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5946		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5947		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5948
5949	/*
5950	 * If all lanes are disabled we leave the override disabled
5951	 * with all power down bits cleared to match the state we
5952	 * would use after disabling the port. Otherwise enable the
5953	 * override and set the lane powerdown bits accding to the
5954	 * current lane status.
5955	 */
5956	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5957		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5958		unsigned int mask;
5959
5960		mask = status & DPLL_PORTB_READY_MASK;
5961		if (mask == 0xf)
5962			mask = 0x0;
5963		else
5964			dev_priv->chv_phy_control |=
5965				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5966
5967		dev_priv->chv_phy_control |=
5968			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5969
5970		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5971		if (mask == 0xf)
5972			mask = 0x0;
5973		else
5974			dev_priv->chv_phy_control |=
5975				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5976
5977		dev_priv->chv_phy_control |=
5978			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5979
5980		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5981
5982		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5983	} else {
5984		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5985	}
5986
5987	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5988		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5989		unsigned int mask;
5990
5991		mask = status & DPLL_PORTD_READY_MASK;
5992
5993		if (mask == 0xf)
5994			mask = 0x0;
5995		else
5996			dev_priv->chv_phy_control |=
5997				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5998
5999		dev_priv->chv_phy_control |=
6000			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
6001
6002		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
6003
6004		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
6005	} else {
6006		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
6007	}
6008
6009	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
6010		    dev_priv->chv_phy_control);
6011
6012	/* Defer application of initial phy_control to enabling the powerwell */
6013}
6014
6015static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6016{
6017	struct i915_power_well *cmn =
6018		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
6019	struct i915_power_well *disp2d =
6020		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
6021
6022	/* If the display might be already active skip this */
6023	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
6024	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
6025	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
6026		return;
6027
6028	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
6029
6030	/* cmnlane needs DPLL registers */
6031	disp2d->desc->ops->enable(dev_priv, disp2d);
6032
6033	/*
6034	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
6035	 * Need to assert and de-assert PHY SB reset by gating the
6036	 * common lane power, then un-gating it.
6037	 * Simply ungating isn't enough to reset the PHY enough to get
6038	 * ports and lanes running.
6039	 */
6040	cmn->desc->ops->disable(dev_priv, cmn);
6041}
6042
6043static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
6044{
6045	bool ret;
6046
6047	vlv_punit_get(dev_priv);
6048	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
6049	vlv_punit_put(dev_priv);
6050
6051	return ret;
6052}
6053
6054static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
6055{
6056	drm_WARN(&dev_priv->drm,
6057		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
6058		 "VED not power gated\n");
6059}
6060
6061static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
6062{
6063	static const struct pci_device_id isp_ids[] = {
6064		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
6065		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
6066		{}
6067	};
6068
6069	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
6070		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
6071		 "ISP not power gated\n");
6072}
6073
6074static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
6075
6076/**
6077 * intel_power_domains_init_hw - initialize hardware power domain state
6078 * @i915: i915 device instance
6079 * @resume: Called from resume code paths or not
6080 *
6081 * This function initializes the hardware power domain state and enables all
6082 * power wells belonging to the INIT power domain. Power wells in other
6083 * domains (and not in the INIT domain) are referenced or disabled by
6084 * intel_modeset_readout_hw_state(). After that the reference count of each
6085 * power well must match its HW enabled state, see
6086 * intel_power_domains_verify_state().
6087 *
6088 * It will return with power domains disabled (to be enabled later by
6089 * intel_power_domains_enable()) and must be paired with
6090 * intel_power_domains_driver_remove().
6091 */
6092void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
6093{
6094	struct i915_power_domains *power_domains = &i915->power_domains;
6095
6096	power_domains->initializing = true;
6097
6098	if (DISPLAY_VER(i915) >= 11) {
6099		icl_display_core_init(i915, resume);
6100	} else if (IS_CANNONLAKE(i915)) {
6101		cnl_display_core_init(i915, resume);
6102	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6103		bxt_display_core_init(i915, resume);
6104	} else if (DISPLAY_VER(i915) == 9) {
6105		skl_display_core_init(i915, resume);
6106	} else if (IS_CHERRYVIEW(i915)) {
6107		mutex_lock(&power_domains->lock);
6108		chv_phy_control_init(i915);
6109		mutex_unlock(&power_domains->lock);
6110		assert_isp_power_gated(i915);
6111	} else if (IS_VALLEYVIEW(i915)) {
6112		mutex_lock(&power_domains->lock);
6113		vlv_cmnlane_wa(i915);
6114		mutex_unlock(&power_domains->lock);
6115		assert_ved_power_gated(i915);
6116		assert_isp_power_gated(i915);
6117	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
6118		hsw_assert_cdclk(i915);
6119		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6120	} else if (IS_IVYBRIDGE(i915)) {
6121		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6122	}
6123
6124	/*
6125	 * Keep all power wells enabled for any dependent HW access during
6126	 * initialization and to make sure we keep BIOS enabled display HW
6127	 * resources powered until display HW readout is complete. We drop
6128	 * this reference in intel_power_domains_enable().
6129	 */
6130	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6131	power_domains->init_wakeref =
6132		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6133
6134	/* Disable power support if the user asked so. */
6135	if (!i915->params.disable_power_well) {
6136		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
6137		i915->power_domains.disable_wakeref = intel_display_power_get(i915,
6138									      POWER_DOMAIN_INIT);
6139	}
6140	intel_power_domains_sync_hw(i915);
6141
6142	power_domains->initializing = false;
6143}
6144
6145/**
6146 * intel_power_domains_driver_remove - deinitialize hw power domain state
6147 * @i915: i915 device instance
6148 *
6149 * De-initializes the display power domain HW state. It also ensures that the
6150 * device stays powered up so that the driver can be reloaded.
6151 *
6152 * It must be called with power domains already disabled (after a call to
6153 * intel_power_domains_disable()) and must be paired with
6154 * intel_power_domains_init_hw().
6155 */
6156void intel_power_domains_driver_remove(struct drm_i915_private *i915)
6157{
6158	intel_wakeref_t wakeref __maybe_unused =
6159		fetch_and_zero(&i915->power_domains.init_wakeref);
6160
6161	/* Remove the refcount we took to keep power well support disabled. */
6162	if (!i915->params.disable_power_well)
6163		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6164					fetch_and_zero(&i915->power_domains.disable_wakeref));
6165
6166	intel_display_power_flush_work_sync(i915);
6167
6168	intel_power_domains_verify_state(i915);
6169
6170	/* Keep the power well enabled, but cancel its rpm wakeref. */
6171	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
6172}
6173
6174/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6175 * intel_power_domains_enable - enable toggling of display power wells
6176 * @i915: i915 device instance
6177 *
6178 * Enable the ondemand enabling/disabling of the display power wells. Note that
6179 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
6180 * only at specific points of the display modeset sequence, thus they are not
6181 * affected by the intel_power_domains_enable()/disable() calls. The purpose
6182 * of these function is to keep the rest of power wells enabled until the end
6183 * of display HW readout (which will acquire the power references reflecting
6184 * the current HW state).
6185 */
6186void intel_power_domains_enable(struct drm_i915_private *i915)
6187{
6188	intel_wakeref_t wakeref __maybe_unused =
6189		fetch_and_zero(&i915->power_domains.init_wakeref);
6190
6191	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6192	intel_power_domains_verify_state(i915);
6193}
6194
6195/**
6196 * intel_power_domains_disable - disable toggling of display power wells
6197 * @i915: i915 device instance
6198 *
6199 * Disable the ondemand enabling/disabling of the display power wells. See
6200 * intel_power_domains_enable() for which power wells this call controls.
6201 */
6202void intel_power_domains_disable(struct drm_i915_private *i915)
6203{
6204	struct i915_power_domains *power_domains = &i915->power_domains;
6205
6206	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6207	power_domains->init_wakeref =
6208		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6209
6210	intel_power_domains_verify_state(i915);
6211}
6212
6213/**
6214 * intel_power_domains_suspend - suspend power domain state
6215 * @i915: i915 device instance
6216 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
6217 *
6218 * This function prepares the hardware power domain state before entering
6219 * system suspend.
6220 *
6221 * It must be called with power domains already disabled (after a call to
6222 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
6223 */
6224void intel_power_domains_suspend(struct drm_i915_private *i915,
6225				 enum i915_drm_suspend_mode suspend_mode)
6226{
6227	struct i915_power_domains *power_domains = &i915->power_domains;
6228	intel_wakeref_t wakeref __maybe_unused =
6229		fetch_and_zero(&power_domains->init_wakeref);
6230
6231	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6232
6233	/*
6234	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
6235	 * support don't manually deinit the power domains. This also means the
6236	 * DMC firmware will stay active, it will power down any HW
6237	 * resources as required and also enable deeper system power states
6238	 * that would be blocked if the firmware was inactive.
6239	 */
6240	if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
6241	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
6242	    intel_dmc_has_payload(i915)) {
6243		intel_display_power_flush_work(i915);
6244		intel_power_domains_verify_state(i915);
6245		return;
6246	}
6247
6248	/*
6249	 * Even if power well support was disabled we still want to disable
6250	 * power wells if power domains must be deinitialized for suspend.
6251	 */
6252	if (!i915->params.disable_power_well)
6253		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6254					fetch_and_zero(&i915->power_domains.disable_wakeref));
6255
6256	intel_display_power_flush_work(i915);
6257	intel_power_domains_verify_state(i915);
6258
6259	if (DISPLAY_VER(i915) >= 11)
6260		icl_display_core_uninit(i915);
6261	else if (IS_CANNONLAKE(i915))
6262		cnl_display_core_uninit(i915);
6263	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
6264		bxt_display_core_uninit(i915);
6265	else if (DISPLAY_VER(i915) == 9)
6266		skl_display_core_uninit(i915);
6267
6268	power_domains->display_core_suspended = true;
6269}
6270
6271/**
6272 * intel_power_domains_resume - resume power domain state
6273 * @i915: i915 device instance
6274 *
6275 * This function resume the hardware power domain state during system resume.
6276 *
6277 * It will return with power domain support disabled (to be enabled later by
6278 * intel_power_domains_enable()) and must be paired with
6279 * intel_power_domains_suspend().
6280 */
6281void intel_power_domains_resume(struct drm_i915_private *i915)
6282{
6283	struct i915_power_domains *power_domains = &i915->power_domains;
6284
6285	if (power_domains->display_core_suspended) {
6286		intel_power_domains_init_hw(i915, true);
6287		power_domains->display_core_suspended = false;
6288	} else {
6289		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6290		power_domains->init_wakeref =
6291			intel_display_power_get(i915, POWER_DOMAIN_INIT);
6292	}
6293
6294	intel_power_domains_verify_state(i915);
6295}
6296
6297#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
6298
6299static void intel_power_domains_dump_info(struct drm_i915_private *i915)
6300{
6301	struct i915_power_domains *power_domains = &i915->power_domains;
6302	struct i915_power_well *power_well;
6303
6304	for_each_power_well(i915, power_well) {
6305		enum intel_display_power_domain domain;
6306
6307		drm_dbg(&i915->drm, "%-25s %d\n",
6308			power_well->desc->name, power_well->count);
6309
6310		for_each_power_domain(domain, power_well->desc->domains)
6311			drm_dbg(&i915->drm, "  %-23s %d\n",
6312				intel_display_power_domain_str(domain),
6313				power_domains->domain_use_count[domain]);
6314	}
6315}
6316
6317/**
6318 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
6319 * @i915: i915 device instance
6320 *
6321 * Verify if the reference count of each power well matches its HW enabled
6322 * state and the total refcount of the domains it belongs to. This must be
6323 * called after modeset HW state sanitization, which is responsible for
6324 * acquiring reference counts for any power wells in use and disabling the
6325 * ones left on by BIOS but not required by any active output.
6326 */
6327static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6328{
6329	struct i915_power_domains *power_domains = &i915->power_domains;
6330	struct i915_power_well *power_well;
6331	bool dump_domain_info;
6332
6333	mutex_lock(&power_domains->lock);
6334
6335	verify_async_put_domains_state(power_domains);
6336
6337	dump_domain_info = false;
6338	for_each_power_well(i915, power_well) {
6339		enum intel_display_power_domain domain;
6340		int domains_count;
6341		bool enabled;
6342
6343		enabled = power_well->desc->ops->is_enabled(i915, power_well);
6344		if ((power_well->count || power_well->desc->always_on) !=
 
6345		    enabled)
6346			drm_err(&i915->drm,
6347				"power well %s state mismatch (refcount %d/enabled %d)",
6348				power_well->desc->name,
6349				power_well->count, enabled);
6350
6351		domains_count = 0;
6352		for_each_power_domain(domain, power_well->desc->domains)
6353			domains_count += power_domains->domain_use_count[domain];
6354
6355		if (power_well->count != domains_count) {
6356			drm_err(&i915->drm,
6357				"power well %s refcount/domain refcount mismatch "
6358				"(refcount %d/domains refcount %d)\n",
6359				power_well->desc->name, power_well->count,
 
6360				domains_count);
6361			dump_domain_info = true;
6362		}
6363	}
6364
6365	if (dump_domain_info) {
6366		static bool dumped;
6367
6368		if (!dumped) {
6369			intel_power_domains_dump_info(i915);
6370			dumped = true;
6371		}
6372	}
6373
6374	mutex_unlock(&power_domains->lock);
6375}
6376
6377#else
6378
6379static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6380{
6381}
6382
6383#endif
6384
6385void intel_display_power_suspend_late(struct drm_i915_private *i915)
6386{
6387	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6388	    IS_BROXTON(i915)) {
6389		bxt_enable_dc9(i915);
6390	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6391		hsw_enable_pc8(i915);
6392	}
6393
6394	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6395	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6396		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
6397}
6398
6399void intel_display_power_resume_early(struct drm_i915_private *i915)
6400{
6401	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6402	    IS_BROXTON(i915)) {
6403		gen9_sanitize_dc_state(i915);
6404		bxt_disable_dc9(i915);
6405	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6406		hsw_disable_pc8(i915);
6407	}
6408
6409	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6410	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6411		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
6412}
6413
6414void intel_display_power_suspend(struct drm_i915_private *i915)
6415{
6416	if (DISPLAY_VER(i915) >= 11) {
6417		icl_display_core_uninit(i915);
6418		bxt_enable_dc9(i915);
6419	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6420		bxt_display_core_uninit(i915);
6421		bxt_enable_dc9(i915);
6422	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6423		hsw_enable_pc8(i915);
6424	}
6425}
6426
6427void intel_display_power_resume(struct drm_i915_private *i915)
6428{
6429	if (DISPLAY_VER(i915) >= 11) {
6430		bxt_disable_dc9(i915);
6431		icl_display_core_init(i915, true);
6432		if (intel_dmc_has_payload(i915)) {
6433			if (i915->dmc.allowed_dc_mask &
6434			    DC_STATE_EN_UPTO_DC6)
6435				skl_enable_dc6(i915);
6436			else if (i915->dmc.allowed_dc_mask &
6437				 DC_STATE_EN_UPTO_DC5)
6438				gen9_enable_dc5(i915);
6439		}
6440	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6441		bxt_disable_dc9(i915);
6442		bxt_display_core_init(i915, true);
6443		if (intel_dmc_has_payload(i915) &&
6444		    (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
6445			gen9_enable_dc5(i915);
6446	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6447		hsw_disable_pc8(i915);
6448	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6449}
v6.2
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include <linux/string_helpers.h>
   7
   8#include "i915_drv.h"
   9#include "i915_irq.h"
  10#include "intel_backlight_regs.h"
  11#include "intel_cdclk.h"
  12#include "intel_combo_phy.h"
 
  13#include "intel_de.h"
  14#include "intel_display_power.h"
  15#include "intel_display_power_map.h"
  16#include "intel_display_power_well.h"
  17#include "intel_display_types.h"
  18#include "intel_dmc.h"
  19#include "intel_mchbar_regs.h"
  20#include "intel_pch_refclk.h"
  21#include "intel_pcode.h"
  22#include "intel_snps_phy.h"
  23#include "skl_watermark.h"
  24#include "vlv_sideband.h"
  25
  26#define for_each_power_domain_well(__dev_priv, __power_well, __domain)	\
  27	for_each_power_well(__dev_priv, __power_well)				\
  28		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
  29
  30#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
  31	for_each_power_well_reverse(__dev_priv, __power_well)		        \
  32		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
  33
  34const char *
  35intel_display_power_domain_str(enum intel_display_power_domain domain)
  36{
  37	switch (domain) {
  38	case POWER_DOMAIN_DISPLAY_CORE:
  39		return "DISPLAY_CORE";
  40	case POWER_DOMAIN_PIPE_A:
  41		return "PIPE_A";
  42	case POWER_DOMAIN_PIPE_B:
  43		return "PIPE_B";
  44	case POWER_DOMAIN_PIPE_C:
  45		return "PIPE_C";
  46	case POWER_DOMAIN_PIPE_D:
  47		return "PIPE_D";
  48	case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
  49		return "PIPE_PANEL_FITTER_A";
  50	case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
  51		return "PIPE_PANEL_FITTER_B";
  52	case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
  53		return "PIPE_PANEL_FITTER_C";
  54	case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
  55		return "PIPE_PANEL_FITTER_D";
  56	case POWER_DOMAIN_TRANSCODER_A:
  57		return "TRANSCODER_A";
  58	case POWER_DOMAIN_TRANSCODER_B:
  59		return "TRANSCODER_B";
  60	case POWER_DOMAIN_TRANSCODER_C:
  61		return "TRANSCODER_C";
  62	case POWER_DOMAIN_TRANSCODER_D:
  63		return "TRANSCODER_D";
  64	case POWER_DOMAIN_TRANSCODER_EDP:
  65		return "TRANSCODER_EDP";
 
 
  66	case POWER_DOMAIN_TRANSCODER_DSI_A:
  67		return "TRANSCODER_DSI_A";
  68	case POWER_DOMAIN_TRANSCODER_DSI_C:
  69		return "TRANSCODER_DSI_C";
  70	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
  71		return "TRANSCODER_VDSC_PW2";
  72	case POWER_DOMAIN_PORT_DDI_LANES_A:
  73		return "PORT_DDI_LANES_A";
  74	case POWER_DOMAIN_PORT_DDI_LANES_B:
  75		return "PORT_DDI_LANES_B";
  76	case POWER_DOMAIN_PORT_DDI_LANES_C:
  77		return "PORT_DDI_LANES_C";
  78	case POWER_DOMAIN_PORT_DDI_LANES_D:
  79		return "PORT_DDI_LANES_D";
  80	case POWER_DOMAIN_PORT_DDI_LANES_E:
  81		return "PORT_DDI_LANES_E";
  82	case POWER_DOMAIN_PORT_DDI_LANES_F:
  83		return "PORT_DDI_LANES_F";
  84	case POWER_DOMAIN_PORT_DDI_LANES_TC1:
  85		return "PORT_DDI_LANES_TC1";
  86	case POWER_DOMAIN_PORT_DDI_LANES_TC2:
  87		return "PORT_DDI_LANES_TC2";
  88	case POWER_DOMAIN_PORT_DDI_LANES_TC3:
  89		return "PORT_DDI_LANES_TC3";
  90	case POWER_DOMAIN_PORT_DDI_LANES_TC4:
  91		return "PORT_DDI_LANES_TC4";
  92	case POWER_DOMAIN_PORT_DDI_LANES_TC5:
  93		return "PORT_DDI_LANES_TC5";
  94	case POWER_DOMAIN_PORT_DDI_LANES_TC6:
  95		return "PORT_DDI_LANES_TC6";
  96	case POWER_DOMAIN_PORT_DDI_IO_A:
  97		return "PORT_DDI_IO_A";
  98	case POWER_DOMAIN_PORT_DDI_IO_B:
  99		return "PORT_DDI_IO_B";
 100	case POWER_DOMAIN_PORT_DDI_IO_C:
 101		return "PORT_DDI_IO_C";
 102	case POWER_DOMAIN_PORT_DDI_IO_D:
 103		return "PORT_DDI_IO_D";
 104	case POWER_DOMAIN_PORT_DDI_IO_E:
 105		return "PORT_DDI_IO_E";
 106	case POWER_DOMAIN_PORT_DDI_IO_F:
 107		return "PORT_DDI_IO_F";
 108	case POWER_DOMAIN_PORT_DDI_IO_TC1:
 109		return "PORT_DDI_IO_TC1";
 110	case POWER_DOMAIN_PORT_DDI_IO_TC2:
 111		return "PORT_DDI_IO_TC2";
 112	case POWER_DOMAIN_PORT_DDI_IO_TC3:
 113		return "PORT_DDI_IO_TC3";
 114	case POWER_DOMAIN_PORT_DDI_IO_TC4:
 115		return "PORT_DDI_IO_TC4";
 116	case POWER_DOMAIN_PORT_DDI_IO_TC5:
 117		return "PORT_DDI_IO_TC5";
 118	case POWER_DOMAIN_PORT_DDI_IO_TC6:
 119		return "PORT_DDI_IO_TC6";
 120	case POWER_DOMAIN_PORT_DSI:
 121		return "PORT_DSI";
 122	case POWER_DOMAIN_PORT_CRT:
 123		return "PORT_CRT";
 124	case POWER_DOMAIN_PORT_OTHER:
 125		return "PORT_OTHER";
 126	case POWER_DOMAIN_VGA:
 127		return "VGA";
 128	case POWER_DOMAIN_AUDIO_MMIO:
 129		return "AUDIO_MMIO";
 130	case POWER_DOMAIN_AUDIO_PLAYBACK:
 131		return "AUDIO_PLAYBACK";
 132	case POWER_DOMAIN_AUX_IO_A:
 133		return "AUX_IO_A";
 134	case POWER_DOMAIN_AUX_IO_B:
 135		return "AUX_IO_B";
 136	case POWER_DOMAIN_AUX_IO_C:
 137		return "AUX_IO_C";
 138	case POWER_DOMAIN_AUX_IO_D:
 139		return "AUX_IO_D";
 140	case POWER_DOMAIN_AUX_IO_E:
 141		return "AUX_IO_E";
 142	case POWER_DOMAIN_AUX_IO_F:
 143		return "AUX_IO_F";
 144	case POWER_DOMAIN_AUX_A:
 145		return "AUX_A";
 146	case POWER_DOMAIN_AUX_B:
 147		return "AUX_B";
 148	case POWER_DOMAIN_AUX_C:
 149		return "AUX_C";
 150	case POWER_DOMAIN_AUX_D:
 151		return "AUX_D";
 152	case POWER_DOMAIN_AUX_E:
 153		return "AUX_E";
 154	case POWER_DOMAIN_AUX_F:
 155		return "AUX_F";
 156	case POWER_DOMAIN_AUX_USBC1:
 157		return "AUX_USBC1";
 158	case POWER_DOMAIN_AUX_USBC2:
 159		return "AUX_USBC2";
 160	case POWER_DOMAIN_AUX_USBC3:
 161		return "AUX_USBC3";
 162	case POWER_DOMAIN_AUX_USBC4:
 163		return "AUX_USBC4";
 164	case POWER_DOMAIN_AUX_USBC5:
 165		return "AUX_USBC5";
 166	case POWER_DOMAIN_AUX_USBC6:
 167		return "AUX_USBC6";
 168	case POWER_DOMAIN_AUX_TBT1:
 169		return "AUX_TBT1";
 170	case POWER_DOMAIN_AUX_TBT2:
 171		return "AUX_TBT2";
 172	case POWER_DOMAIN_AUX_TBT3:
 173		return "AUX_TBT3";
 174	case POWER_DOMAIN_AUX_TBT4:
 175		return "AUX_TBT4";
 176	case POWER_DOMAIN_AUX_TBT5:
 177		return "AUX_TBT5";
 178	case POWER_DOMAIN_AUX_TBT6:
 179		return "AUX_TBT6";
 180	case POWER_DOMAIN_GMBUS:
 181		return "GMBUS";
 182	case POWER_DOMAIN_INIT:
 183		return "INIT";
 184	case POWER_DOMAIN_MODESET:
 185		return "MODESET";
 186	case POWER_DOMAIN_GT_IRQ:
 187		return "GT_IRQ";
 188	case POWER_DOMAIN_DC_OFF:
 189		return "DC_OFF";
 190	case POWER_DOMAIN_TC_COLD_OFF:
 191		return "TC_COLD_OFF";
 192	default:
 193		MISSING_CASE(domain);
 194		return "?";
 195	}
 196}
 197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198/**
 199 * __intel_display_power_is_enabled - unlocked check for a power domain
 200 * @dev_priv: i915 device instance
 201 * @domain: power domain to check
 202 *
 203 * This is the unlocked version of intel_display_power_is_enabled() and should
 204 * only be used from error capture and recovery code where deadlocks are
 205 * possible.
 206 *
 207 * Returns:
 208 * True when the power domain is enabled, false otherwise.
 209 */
 210bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 211				      enum intel_display_power_domain domain)
 212{
 213	struct i915_power_well *power_well;
 214	bool is_enabled;
 215
 216	if (dev_priv->runtime_pm.suspended)
 217		return false;
 218
 219	is_enabled = true;
 220
 221	for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
 222		if (intel_power_well_is_always_on(power_well))
 223			continue;
 224
 225		if (!intel_power_well_is_enabled_cached(power_well)) {
 226			is_enabled = false;
 227			break;
 228		}
 229	}
 230
 231	return is_enabled;
 232}
 233
 234/**
 235 * intel_display_power_is_enabled - check for a power domain
 236 * @dev_priv: i915 device instance
 237 * @domain: power domain to check
 238 *
 239 * This function can be used to check the hw power domain state. It is mostly
 240 * used in hardware state readout functions. Everywhere else code should rely
 241 * upon explicit power domain reference counting to ensure that the hardware
 242 * block is powered up before accessing it.
 243 *
 244 * Callers must hold the relevant modesetting locks to ensure that concurrent
 245 * threads can't disable the power well while the caller tries to read a few
 246 * registers.
 247 *
 248 * Returns:
 249 * True when the power domain is enabled, false otherwise.
 250 */
 251bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 252				    enum intel_display_power_domain domain)
 253{
 254	struct i915_power_domains *power_domains;
 255	bool ret;
 256
 257	power_domains = &dev_priv->display.power.domains;
 258
 259	mutex_lock(&power_domains->lock);
 260	ret = __intel_display_power_is_enabled(dev_priv, domain);
 261	mutex_unlock(&power_domains->lock);
 262
 263	return ret;
 264}
 265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266static u32
 267sanitize_target_dc_state(struct drm_i915_private *dev_priv,
 268			 u32 target_dc_state)
 269{
 270	static const u32 states[] = {
 271		DC_STATE_EN_UPTO_DC6,
 272		DC_STATE_EN_UPTO_DC5,
 273		DC_STATE_EN_DC3CO,
 274		DC_STATE_DISABLE,
 275	};
 276	int i;
 277
 278	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
 279		if (target_dc_state != states[i])
 280			continue;
 281
 282		if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
 283			break;
 284
 285		target_dc_state = states[i + 1];
 286	}
 287
 288	return target_dc_state;
 289}
 290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291/**
 292 * intel_display_power_set_target_dc_state - Set target dc state.
 293 * @dev_priv: i915 device
 294 * @state: state which needs to be set as target_dc_state.
 295 *
 296 * This function set the "DC off" power well target_dc_state,
 297 * based upon this target_dc_stste, "DC off" power well will
 298 * enable desired DC state.
 299 */
 300void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 301					     u32 state)
 302{
 303	struct i915_power_well *power_well;
 304	bool dc_off_enabled;
 305	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 306
 307	mutex_lock(&power_domains->lock);
 308	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
 309
 310	if (drm_WARN_ON(&dev_priv->drm, !power_well))
 311		goto unlock;
 312
 313	state = sanitize_target_dc_state(dev_priv, state);
 314
 315	if (state == dev_priv->display.dmc.target_dc_state)
 316		goto unlock;
 317
 318	dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
 
 319	/*
 320	 * If DC off power well is disabled, need to enable and disable the
 321	 * DC off power well to effect target DC state.
 322	 */
 323	if (!dc_off_enabled)
 324		intel_power_well_enable(dev_priv, power_well);
 325
 326	dev_priv->display.dmc.target_dc_state = state;
 327
 328	if (!dc_off_enabled)
 329		intel_power_well_disable(dev_priv, power_well);
 330
 331unlock:
 332	mutex_unlock(&power_domains->lock);
 333}
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
 336
 337static void __async_put_domains_mask(struct i915_power_domains *power_domains,
 338				     struct intel_power_domain_mask *mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339{
 340	bitmap_or(mask->bits,
 341		  power_domains->async_put_domains[0].bits,
 342		  power_domains->async_put_domains[1].bits,
 343		  POWER_DOMAIN_NUM);
 344}
 345
 346#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 347
 348static bool
 349assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
 350{
 351	struct drm_i915_private *i915 = container_of(power_domains,
 352						     struct drm_i915_private,
 353						     display.power.domains);
 354
 355	return !drm_WARN_ON(&i915->drm,
 356			    bitmap_intersects(power_domains->async_put_domains[0].bits,
 357					      power_domains->async_put_domains[1].bits,
 358					      POWER_DOMAIN_NUM));
 359}
 360
 361static bool
 362__async_put_domains_state_ok(struct i915_power_domains *power_domains)
 363{
 364	struct drm_i915_private *i915 = container_of(power_domains,
 365						     struct drm_i915_private,
 366						     display.power.domains);
 367	struct intel_power_domain_mask async_put_mask;
 368	enum intel_display_power_domain domain;
 369	bool err = false;
 370
 371	err |= !assert_async_put_domain_masks_disjoint(power_domains);
 372	__async_put_domains_mask(power_domains, &async_put_mask);
 373	err |= drm_WARN_ON(&i915->drm,
 374			   !!power_domains->async_put_wakeref !=
 375			   !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
 376
 377	for_each_power_domain(domain, &async_put_mask)
 378		err |= drm_WARN_ON(&i915->drm,
 379				   power_domains->domain_use_count[domain] != 1);
 380
 381	return !err;
 382}
 383
 384static void print_power_domains(struct i915_power_domains *power_domains,
 385				const char *prefix, struct intel_power_domain_mask *mask)
 386{
 387	struct drm_i915_private *i915 = container_of(power_domains,
 388						     struct drm_i915_private,
 389						     display.power.domains);
 390	enum intel_display_power_domain domain;
 391
 392	drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
 393	for_each_power_domain(domain, mask)
 394		drm_dbg(&i915->drm, "%s use_count %d\n",
 395			intel_display_power_domain_str(domain),
 396			power_domains->domain_use_count[domain]);
 397}
 398
 399static void
 400print_async_put_domains_state(struct i915_power_domains *power_domains)
 401{
 402	struct drm_i915_private *i915 = container_of(power_domains,
 403						     struct drm_i915_private,
 404						     display.power.domains);
 405
 406	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
 407		power_domains->async_put_wakeref);
 408
 409	print_power_domains(power_domains, "async_put_domains[0]",
 410			    &power_domains->async_put_domains[0]);
 411	print_power_domains(power_domains, "async_put_domains[1]",
 412			    &power_domains->async_put_domains[1]);
 413}
 414
 415static void
 416verify_async_put_domains_state(struct i915_power_domains *power_domains)
 417{
 418	if (!__async_put_domains_state_ok(power_domains))
 419		print_async_put_domains_state(power_domains);
 420}
 421
 422#else
 423
 424static void
 425assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
 426{
 427}
 428
 429static void
 430verify_async_put_domains_state(struct i915_power_domains *power_domains)
 431{
 432}
 433
 434#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
 435
 436static void async_put_domains_mask(struct i915_power_domains *power_domains,
 437				   struct intel_power_domain_mask *mask)
 438
 439{
 440	assert_async_put_domain_masks_disjoint(power_domains);
 441
 442	__async_put_domains_mask(power_domains, mask);
 443}
 444
 445static void
 446async_put_domains_clear_domain(struct i915_power_domains *power_domains,
 447			       enum intel_display_power_domain domain)
 448{
 449	assert_async_put_domain_masks_disjoint(power_domains);
 450
 451	clear_bit(domain, power_domains->async_put_domains[0].bits);
 452	clear_bit(domain, power_domains->async_put_domains[1].bits);
 453}
 454
 455static bool
 456intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
 457				       enum intel_display_power_domain domain)
 458{
 459	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 460	struct intel_power_domain_mask async_put_mask;
 461	bool ret = false;
 462
 463	async_put_domains_mask(power_domains, &async_put_mask);
 464	if (!test_bit(domain, async_put_mask.bits))
 465		goto out_verify;
 466
 467	async_put_domains_clear_domain(power_domains, domain);
 468
 469	ret = true;
 470
 471	async_put_domains_mask(power_domains, &async_put_mask);
 472	if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
 473		goto out_verify;
 474
 475	cancel_delayed_work(&power_domains->async_put_work);
 476	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
 477				 fetch_and_zero(&power_domains->async_put_wakeref));
 478out_verify:
 479	verify_async_put_domains_state(power_domains);
 480
 481	return ret;
 482}
 483
 484static void
 485__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
 486				 enum intel_display_power_domain domain)
 487{
 488	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 489	struct i915_power_well *power_well;
 490
 491	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
 492		return;
 493
 494	for_each_power_domain_well(dev_priv, power_well, domain)
 495		intel_power_well_get(dev_priv, power_well);
 496
 497	power_domains->domain_use_count[domain]++;
 498}
 499
 500/**
 501 * intel_display_power_get - grab a power domain reference
 502 * @dev_priv: i915 device instance
 503 * @domain: power domain to reference
 504 *
 505 * This function grabs a power domain reference for @domain and ensures that the
 506 * power domain and all its parents are powered up. Therefore users should only
 507 * grab a reference to the innermost power domain they need.
 508 *
 509 * Any power domain reference obtained by this function must have a symmetric
 510 * call to intel_display_power_put() to release the reference again.
 511 */
 512intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
 513					enum intel_display_power_domain domain)
 514{
 515	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 516	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 517
 518	mutex_lock(&power_domains->lock);
 519	__intel_display_power_get_domain(dev_priv, domain);
 520	mutex_unlock(&power_domains->lock);
 521
 522	return wakeref;
 523}
 524
 525/**
 526 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
 527 * @dev_priv: i915 device instance
 528 * @domain: power domain to reference
 529 *
 530 * This function grabs a power domain reference for @domain and ensures that the
 531 * power domain and all its parents are powered up. Therefore users should only
 532 * grab a reference to the innermost power domain they need.
 533 *
 534 * Any power domain reference obtained by this function must have a symmetric
 535 * call to intel_display_power_put() to release the reference again.
 536 */
 537intel_wakeref_t
 538intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 539				   enum intel_display_power_domain domain)
 540{
 541	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 542	intel_wakeref_t wakeref;
 543	bool is_enabled;
 544
 545	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
 546	if (!wakeref)
 547		return false;
 548
 549	mutex_lock(&power_domains->lock);
 550
 551	if (__intel_display_power_is_enabled(dev_priv, domain)) {
 552		__intel_display_power_get_domain(dev_priv, domain);
 553		is_enabled = true;
 554	} else {
 555		is_enabled = false;
 556	}
 557
 558	mutex_unlock(&power_domains->lock);
 559
 560	if (!is_enabled) {
 561		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 562		wakeref = 0;
 563	}
 564
 565	return wakeref;
 566}
 567
 568static void
 569__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
 570				 enum intel_display_power_domain domain)
 571{
 572	struct i915_power_domains *power_domains;
 573	struct i915_power_well *power_well;
 574	const char *name = intel_display_power_domain_str(domain);
 575	struct intel_power_domain_mask async_put_mask;
 576
 577	power_domains = &dev_priv->display.power.domains;
 578
 579	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
 580		 "Use count on domain %s is already zero\n",
 581		 name);
 582	async_put_domains_mask(power_domains, &async_put_mask);
 583	drm_WARN(&dev_priv->drm,
 584		 test_bit(domain, async_put_mask.bits),
 585		 "Async disabling of domain %s is pending\n",
 586		 name);
 587
 588	power_domains->domain_use_count[domain]--;
 589
 590	for_each_power_domain_well_reverse(dev_priv, power_well, domain)
 591		intel_power_well_put(dev_priv, power_well);
 592}
 593
 594static void __intel_display_power_put(struct drm_i915_private *dev_priv,
 595				      enum intel_display_power_domain domain)
 596{
 597	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 598
 599	mutex_lock(&power_domains->lock);
 600	__intel_display_power_put_domain(dev_priv, domain);
 601	mutex_unlock(&power_domains->lock);
 602}
 603
 604static void
 605queue_async_put_domains_work(struct i915_power_domains *power_domains,
 606			     intel_wakeref_t wakeref)
 607{
 608	struct drm_i915_private *i915 = container_of(power_domains,
 609						     struct drm_i915_private,
 610						     display.power.domains);
 611	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
 612	power_domains->async_put_wakeref = wakeref;
 613	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
 614						    &power_domains->async_put_work,
 615						    msecs_to_jiffies(100)));
 616}
 617
 618static void
 619release_async_put_domains(struct i915_power_domains *power_domains,
 620			  struct intel_power_domain_mask *mask)
 621{
 622	struct drm_i915_private *dev_priv =
 623		container_of(power_domains, struct drm_i915_private,
 624			     display.power.domains);
 625	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
 626	enum intel_display_power_domain domain;
 627	intel_wakeref_t wakeref;
 628
 629	/*
 630	 * The caller must hold already raw wakeref, upgrade that to a proper
 631	 * wakeref to make the state checker happy about the HW access during
 632	 * power well disabling.
 633	 */
 634	assert_rpm_raw_wakeref_held(rpm);
 635	wakeref = intel_runtime_pm_get(rpm);
 636
 637	for_each_power_domain(domain, mask) {
 638		/* Clear before put, so put's sanity check is happy. */
 639		async_put_domains_clear_domain(power_domains, domain);
 640		__intel_display_power_put_domain(dev_priv, domain);
 641	}
 642
 643	intel_runtime_pm_put(rpm, wakeref);
 644}
 645
 646static void
 647intel_display_power_put_async_work(struct work_struct *work)
 648{
 649	struct drm_i915_private *dev_priv =
 650		container_of(work, struct drm_i915_private,
 651			     display.power.domains.async_put_work.work);
 652	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 653	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
 654	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
 655	intel_wakeref_t old_work_wakeref = 0;
 656
 657	mutex_lock(&power_domains->lock);
 658
 659	/*
 660	 * Bail out if all the domain refs pending to be released were grabbed
 661	 * by subsequent gets or a flush_work.
 662	 */
 663	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
 664	if (!old_work_wakeref)
 665		goto out_verify;
 666
 667	release_async_put_domains(power_domains,
 668				  &power_domains->async_put_domains[0]);
 669
 670	/* Requeue the work if more domains were async put meanwhile. */
 671	if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
 672		bitmap_copy(power_domains->async_put_domains[0].bits,
 673			    power_domains->async_put_domains[1].bits,
 674			    POWER_DOMAIN_NUM);
 675		bitmap_zero(power_domains->async_put_domains[1].bits,
 676			    POWER_DOMAIN_NUM);
 677		queue_async_put_domains_work(power_domains,
 678					     fetch_and_zero(&new_work_wakeref));
 679	} else {
 680		/*
 681		 * Cancel the work that got queued after this one got dequeued,
 682		 * since here we released the corresponding async-put reference.
 683		 */
 684		cancel_delayed_work(&power_domains->async_put_work);
 685	}
 686
 687out_verify:
 688	verify_async_put_domains_state(power_domains);
 689
 690	mutex_unlock(&power_domains->lock);
 691
 692	if (old_work_wakeref)
 693		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
 694	if (new_work_wakeref)
 695		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
 696}
 697
 698/**
 699 * intel_display_power_put_async - release a power domain reference asynchronously
 700 * @i915: i915 device instance
 701 * @domain: power domain to reference
 702 * @wakeref: wakeref acquired for the reference that is being released
 703 *
 704 * This function drops the power domain reference obtained by
 705 * intel_display_power_get*() and schedules a work to power down the
 706 * corresponding hardware block if this is the last reference.
 707 */
 708void __intel_display_power_put_async(struct drm_i915_private *i915,
 709				     enum intel_display_power_domain domain,
 710				     intel_wakeref_t wakeref)
 711{
 712	struct i915_power_domains *power_domains = &i915->display.power.domains;
 713	struct intel_runtime_pm *rpm = &i915->runtime_pm;
 714	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
 715
 716	mutex_lock(&power_domains->lock);
 717
 718	if (power_domains->domain_use_count[domain] > 1) {
 719		__intel_display_power_put_domain(i915, domain);
 720
 721		goto out_verify;
 722	}
 723
 724	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
 725
 726	/* Let a pending work requeue itself or queue a new one. */
 727	if (power_domains->async_put_wakeref) {
 728		set_bit(domain, power_domains->async_put_domains[1].bits);
 729	} else {
 730		set_bit(domain, power_domains->async_put_domains[0].bits);
 731		queue_async_put_domains_work(power_domains,
 732					     fetch_and_zero(&work_wakeref));
 733	}
 734
 735out_verify:
 736	verify_async_put_domains_state(power_domains);
 737
 738	mutex_unlock(&power_domains->lock);
 739
 740	if (work_wakeref)
 741		intel_runtime_pm_put_raw(rpm, work_wakeref);
 742
 743	intel_runtime_pm_put(rpm, wakeref);
 744}
 745
 746/**
 747 * intel_display_power_flush_work - flushes the async display power disabling work
 748 * @i915: i915 device instance
 749 *
 750 * Flushes any pending work that was scheduled by a preceding
 751 * intel_display_power_put_async() call, completing the disabling of the
 752 * corresponding power domains.
 753 *
 754 * Note that the work handler function may still be running after this
 755 * function returns; to ensure that the work handler isn't running use
 756 * intel_display_power_flush_work_sync() instead.
 757 */
 758void intel_display_power_flush_work(struct drm_i915_private *i915)
 759{
 760	struct i915_power_domains *power_domains = &i915->display.power.domains;
 761	struct intel_power_domain_mask async_put_mask;
 762	intel_wakeref_t work_wakeref;
 763
 764	mutex_lock(&power_domains->lock);
 765
 766	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
 767	if (!work_wakeref)
 768		goto out_verify;
 769
 770	async_put_domains_mask(power_domains, &async_put_mask);
 771	release_async_put_domains(power_domains, &async_put_mask);
 772	cancel_delayed_work(&power_domains->async_put_work);
 773
 774out_verify:
 775	verify_async_put_domains_state(power_domains);
 776
 777	mutex_unlock(&power_domains->lock);
 778
 779	if (work_wakeref)
 780		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
 781}
 782
 783/**
 784 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
 785 * @i915: i915 device instance
 786 *
 787 * Like intel_display_power_flush_work(), but also ensure that the work
 788 * handler function is not running any more when this function returns.
 789 */
 790static void
 791intel_display_power_flush_work_sync(struct drm_i915_private *i915)
 792{
 793	struct i915_power_domains *power_domains = &i915->display.power.domains;
 794
 795	intel_display_power_flush_work(i915);
 796	cancel_delayed_work_sync(&power_domains->async_put_work);
 797
 798	verify_async_put_domains_state(power_domains);
 799
 800	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
 801}
 802
 803#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 804/**
 805 * intel_display_power_put - release a power domain reference
 806 * @dev_priv: i915 device instance
 807 * @domain: power domain to reference
 808 * @wakeref: wakeref acquired for the reference that is being released
 809 *
 810 * This function drops the power domain reference obtained by
 811 * intel_display_power_get() and might power down the corresponding hardware
 812 * block right away if this is the last reference.
 813 */
 814void intel_display_power_put(struct drm_i915_private *dev_priv,
 815			     enum intel_display_power_domain domain,
 816			     intel_wakeref_t wakeref)
 817{
 818	__intel_display_power_put(dev_priv, domain);
 819	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 820}
 821#else
 822/**
 823 * intel_display_power_put_unchecked - release an unchecked power domain reference
 824 * @dev_priv: i915 device instance
 825 * @domain: power domain to reference
 826 *
 827 * This function drops the power domain reference obtained by
 828 * intel_display_power_get() and might power down the corresponding hardware
 829 * block right away if this is the last reference.
 830 *
 831 * This function is only for the power domain code's internal use to suppress wakeref
 832 * tracking when the correspondig debug kconfig option is disabled, should not
 833 * be used otherwise.
 834 */
 835void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
 836				       enum intel_display_power_domain domain)
 837{
 838	__intel_display_power_put(dev_priv, domain);
 839	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
 840}
 841#endif
 842
 843void
 844intel_display_power_get_in_set(struct drm_i915_private *i915,
 845			       struct intel_display_power_domain_set *power_domain_set,
 846			       enum intel_display_power_domain domain)
 847{
 848	intel_wakeref_t __maybe_unused wf;
 849
 850	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
 851
 852	wf = intel_display_power_get(i915, domain);
 853#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 854	power_domain_set->wakerefs[domain] = wf;
 855#endif
 856	set_bit(domain, power_domain_set->mask.bits);
 857}
 858
 859bool
 860intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
 861					  struct intel_display_power_domain_set *power_domain_set,
 862					  enum intel_display_power_domain domain)
 863{
 864	intel_wakeref_t wf;
 865
 866	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
 867
 868	wf = intel_display_power_get_if_enabled(i915, domain);
 869	if (!wf)
 870		return false;
 871
 872#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 873	power_domain_set->wakerefs[domain] = wf;
 874#endif
 875	set_bit(domain, power_domain_set->mask.bits);
 876
 877	return true;
 878}
 879
 880void
 881intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
 882				    struct intel_display_power_domain_set *power_domain_set,
 883				    struct intel_power_domain_mask *mask)
 884{
 885	enum intel_display_power_domain domain;
 886
 887	drm_WARN_ON(&i915->drm,
 888		    !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
 889
 890	for_each_power_domain(domain, mask) {
 891		intel_wakeref_t __maybe_unused wf = -1;
 892
 893#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 894		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
 895#endif
 896		intel_display_power_put(i915, domain, wf);
 897		clear_bit(domain, power_domain_set->mask.bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898	}
 
 
 
 
 
 
 
 899}
 900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 901static int
 902sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
 903				   int disable_power_well)
 904{
 905	if (disable_power_well >= 0)
 906		return !!disable_power_well;
 907
 908	return 1;
 909}
 910
 911static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
 912			       int enable_dc)
 913{
 914	u32 mask;
 915	int requested_dc;
 916	int max_dc;
 917
 918	if (!HAS_DISPLAY(dev_priv))
 919		return 0;
 920
 921	if (IS_DG2(dev_priv))
 922		max_dc = 1;
 923	else if (IS_DG1(dev_priv))
 924		max_dc = 3;
 925	else if (DISPLAY_VER(dev_priv) >= 12)
 926		max_dc = 4;
 927	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 928		max_dc = 1;
 929	else if (DISPLAY_VER(dev_priv) >= 9)
 930		max_dc = 2;
 931	else
 932		max_dc = 0;
 933
 934	/*
 935	 * DC9 has a separate HW flow from the rest of the DC states,
 936	 * not depending on the DMC firmware. It's needed by system
 937	 * suspend/resume, so allow it unconditionally.
 938	 */
 939	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
 940		DISPLAY_VER(dev_priv) >= 11 ?
 941	       DC_STATE_EN_DC9 : 0;
 942
 943	if (!dev_priv->params.disable_power_well)
 944		max_dc = 0;
 945
 946	if (enable_dc >= 0 && enable_dc <= max_dc) {
 947		requested_dc = enable_dc;
 948	} else if (enable_dc == -1) {
 949		requested_dc = max_dc;
 950	} else if (enable_dc > max_dc && enable_dc <= 4) {
 951		drm_dbg_kms(&dev_priv->drm,
 952			    "Adjusting requested max DC state (%d->%d)\n",
 953			    enable_dc, max_dc);
 954		requested_dc = max_dc;
 955	} else {
 956		drm_err(&dev_priv->drm,
 957			"Unexpected value for enable_dc (%d)\n", enable_dc);
 958		requested_dc = max_dc;
 959	}
 960
 961	switch (requested_dc) {
 962	case 4:
 963		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
 964		break;
 965	case 3:
 966		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
 967		break;
 968	case 2:
 969		mask |= DC_STATE_EN_UPTO_DC6;
 970		break;
 971	case 1:
 972		mask |= DC_STATE_EN_UPTO_DC5;
 973		break;
 974	}
 975
 976	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
 977
 978	return mask;
 979}
 980
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981/**
 982 * intel_power_domains_init - initializes the power domain structures
 983 * @dev_priv: i915 device instance
 984 *
 985 * Initializes the power domain structures for @dev_priv depending upon the
 986 * supported platform.
 987 */
 988int intel_power_domains_init(struct drm_i915_private *dev_priv)
 989{
 990	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 
 991
 992	dev_priv->params.disable_power_well =
 993		sanitize_disable_power_well_option(dev_priv,
 994						   dev_priv->params.disable_power_well);
 995	dev_priv->display.dmc.allowed_dc_mask =
 996		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
 997
 998	dev_priv->display.dmc.target_dc_state =
 999		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1000
 
 
1001	mutex_init(&power_domains->lock);
1002
1003	INIT_DELAYED_WORK(&power_domains->async_put_work,
1004			  intel_display_power_put_async_work);
1005
1006	return intel_display_power_map_init(power_domains);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007}
1008
1009/**
1010 * intel_power_domains_cleanup - clean up power domains resources
1011 * @dev_priv: i915 device instance
1012 *
1013 * Release any resources acquired by intel_power_domains_init()
1014 */
1015void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1016{
1017	intel_display_power_map_cleanup(&dev_priv->display.power.domains);
1018}
1019
1020static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1021{
1022	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1023	struct i915_power_well *power_well;
1024
1025	mutex_lock(&power_domains->lock);
1026	for_each_power_well(dev_priv, power_well)
1027		intel_power_well_sync_hw(dev_priv, power_well);
 
 
 
1028	mutex_unlock(&power_domains->lock);
1029}
1030
1031static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1032				enum dbuf_slice slice, bool enable)
1033{
1034	i915_reg_t reg = DBUF_CTL_S(slice);
1035	bool state;
1036
1037	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
1038		     enable ? DBUF_POWER_REQUEST : 0);
1039	intel_de_posting_read(dev_priv, reg);
1040	udelay(10);
1041
1042	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1043	drm_WARN(&dev_priv->drm, enable != state,
1044		 "DBuf slice %d power %s timeout!\n",
1045		 slice, str_enable_disable(enable));
1046}
1047
1048void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1049			     u8 req_slices)
1050{
1051	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1052	u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
1053	enum dbuf_slice slice;
1054
1055	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
1056		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1057		 req_slices, slice_mask);
1058
1059	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
1060		    req_slices);
1061
1062	/*
1063	 * Might be running this in parallel to gen9_dc_off_power_well_enable
1064	 * being called from intel_dp_detect for instance,
1065	 * which causes assertion triggered by race condition,
1066	 * as gen9_assert_dbuf_enabled might preempt this when registers
1067	 * were already updated, while dev_priv was not.
1068	 */
1069	mutex_lock(&power_domains->lock);
1070
1071	for_each_dbuf_slice(dev_priv, slice)
1072		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1073
1074	dev_priv->display.dbuf.enabled_slices = req_slices;
1075
1076	mutex_unlock(&power_domains->lock);
1077}
1078
1079static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1080{
1081	dev_priv->display.dbuf.enabled_slices =
1082		intel_enabled_dbuf_slices_mask(dev_priv);
1083
1084	/*
1085	 * Just power up at least 1 slice, we will
1086	 * figure out later which slices we have and what we need.
1087	 */
1088	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
1089				dev_priv->display.dbuf.enabled_slices);
1090}
1091
1092static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1093{
1094	gen9_dbuf_slices_update(dev_priv, 0);
1095}
1096
1097static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
1098{
1099	enum dbuf_slice slice;
1100
1101	if (IS_ALDERLAKE_P(dev_priv))
1102		return;
1103
1104	for_each_dbuf_slice(dev_priv, slice)
1105		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
1106			     DBUF_TRACKER_STATE_SERVICE_MASK,
1107			     DBUF_TRACKER_STATE_SERVICE(8));
1108}
1109
1110static void icl_mbus_init(struct drm_i915_private *dev_priv)
1111{
1112	unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
1113	u32 mask, val, i;
1114
1115	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1116		return;
1117
1118	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1119		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1120		MBUS_ABOX_B_CREDIT_MASK |
1121		MBUS_ABOX_BW_CREDIT_MASK;
1122	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1123		MBUS_ABOX_BT_CREDIT_POOL2(16) |
1124		MBUS_ABOX_B_CREDIT(1) |
1125		MBUS_ABOX_BW_CREDIT(1);
1126
1127	/*
1128	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1129	 * expect us to program the abox_ctl0 register as well, even though
1130	 * we don't have to program other instance-0 registers like BW_BUDDY.
1131	 */
1132	if (DISPLAY_VER(dev_priv) == 12)
1133		abox_regs |= BIT(0);
1134
1135	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1136		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1137}
1138
1139static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1140{
1141	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1142
1143	/*
1144	 * The LCPLL register should be turned on by the BIOS. For now
1145	 * let's just check its state and print errors in case
1146	 * something is wrong.  Don't even try to turn it on.
1147	 */
1148
1149	if (val & LCPLL_CD_SOURCE_FCLK)
1150		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1151
1152	if (val & LCPLL_PLL_DISABLE)
1153		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1154
1155	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1156		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1157}
1158
1159static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1160{
 
1161	struct intel_crtc *crtc;
1162
1163	for_each_intel_crtc(&dev_priv->drm, crtc)
1164		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
1165				pipe_name(crtc->pipe));
1166
1167	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
1168			"Display power well on\n");
1169	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
1170			"SPLL enabled\n");
1171	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1172			"WRPLL1 enabled\n");
1173	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1174			"WRPLL2 enabled\n");
1175	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
1176			"Panel power on\n");
1177	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1178			"CPU PWM1 enabled\n");
1179	if (IS_HASWELL(dev_priv))
1180		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1181				"CPU PWM2 enabled\n");
1182	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1183			"PCH PWM1 enabled\n");
1184	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1185			"Utility pin enabled\n");
1186	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1187			"PCH GTC enabled\n");
1188
1189	/*
1190	 * In theory we can still leave IRQs enabled, as long as only the HPD
1191	 * interrupts remain enabled. We used to check for that, but since it's
1192	 * gen-specific and since we only disable LCPLL after we fully disable
1193	 * the interrupts, the check below should be enough.
1194	 */
1195	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
1196}
1197
1198static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1199{
1200	if (IS_HASWELL(dev_priv))
1201		return intel_de_read(dev_priv, D_COMP_HSW);
1202	else
1203		return intel_de_read(dev_priv, D_COMP_BDW);
1204}
1205
1206static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1207{
1208	if (IS_HASWELL(dev_priv)) {
1209		if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
 
1210			drm_dbg_kms(&dev_priv->drm,
1211				    "Failed to write to D_COMP\n");
1212	} else {
1213		intel_de_write(dev_priv, D_COMP_BDW, val);
1214		intel_de_posting_read(dev_priv, D_COMP_BDW);
1215	}
1216}
1217
1218/*
1219 * This function implements pieces of two sequences from BSpec:
1220 * - Sequence for display software to disable LCPLL
1221 * - Sequence for display software to allow package C8+
1222 * The steps implemented here are just the steps that actually touch the LCPLL
1223 * register. Callers should take care of disabling all the display engine
1224 * functions, doing the mode unset, fixing interrupts, etc.
1225 */
1226static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1227			      bool switch_to_fclk, bool allow_power_down)
1228{
1229	u32 val;
1230
1231	assert_can_disable_lcpll(dev_priv);
1232
1233	val = intel_de_read(dev_priv, LCPLL_CTL);
1234
1235	if (switch_to_fclk) {
1236		val |= LCPLL_CD_SOURCE_FCLK;
1237		intel_de_write(dev_priv, LCPLL_CTL, val);
1238
1239		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1240				LCPLL_CD_SOURCE_FCLK_DONE, 1))
1241			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1242
1243		val = intel_de_read(dev_priv, LCPLL_CTL);
1244	}
1245
1246	val |= LCPLL_PLL_DISABLE;
1247	intel_de_write(dev_priv, LCPLL_CTL, val);
1248	intel_de_posting_read(dev_priv, LCPLL_CTL);
1249
1250	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1251		drm_err(&dev_priv->drm, "LCPLL still locked\n");
1252
1253	val = hsw_read_dcomp(dev_priv);
1254	val |= D_COMP_COMP_DISABLE;
1255	hsw_write_dcomp(dev_priv, val);
1256	ndelay(100);
1257
1258	if (wait_for((hsw_read_dcomp(dev_priv) &
1259		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1260		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1261
1262	if (allow_power_down) {
1263		val = intel_de_read(dev_priv, LCPLL_CTL);
1264		val |= LCPLL_POWER_DOWN_ALLOW;
1265		intel_de_write(dev_priv, LCPLL_CTL, val);
1266		intel_de_posting_read(dev_priv, LCPLL_CTL);
1267	}
1268}
1269
1270/*
1271 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1272 * source.
1273 */
1274static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1275{
1276	u32 val;
1277
1278	val = intel_de_read(dev_priv, LCPLL_CTL);
1279
1280	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1281		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1282		return;
1283
1284	/*
1285	 * Make sure we're not on PC8 state before disabling PC8, otherwise
1286	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1287	 */
1288	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1289
1290	if (val & LCPLL_POWER_DOWN_ALLOW) {
1291		val &= ~LCPLL_POWER_DOWN_ALLOW;
1292		intel_de_write(dev_priv, LCPLL_CTL, val);
1293		intel_de_posting_read(dev_priv, LCPLL_CTL);
1294	}
1295
1296	val = hsw_read_dcomp(dev_priv);
1297	val |= D_COMP_COMP_FORCE;
1298	val &= ~D_COMP_COMP_DISABLE;
1299	hsw_write_dcomp(dev_priv, val);
1300
1301	val = intel_de_read(dev_priv, LCPLL_CTL);
1302	val &= ~LCPLL_PLL_DISABLE;
1303	intel_de_write(dev_priv, LCPLL_CTL, val);
1304
1305	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1306		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1307
1308	if (val & LCPLL_CD_SOURCE_FCLK) {
1309		val = intel_de_read(dev_priv, LCPLL_CTL);
1310		val &= ~LCPLL_CD_SOURCE_FCLK;
1311		intel_de_write(dev_priv, LCPLL_CTL, val);
1312
1313		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1314				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1315			drm_err(&dev_priv->drm,
1316				"Switching back to LCPLL failed\n");
1317	}
1318
1319	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1320
1321	intel_update_cdclk(dev_priv);
1322	intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
1323}
1324
1325/*
1326 * Package states C8 and deeper are really deep PC states that can only be
1327 * reached when all the devices on the system allow it, so even if the graphics
1328 * device allows PC8+, it doesn't mean the system will actually get to these
1329 * states. Our driver only allows PC8+ when going into runtime PM.
1330 *
1331 * The requirements for PC8+ are that all the outputs are disabled, the power
1332 * well is disabled and most interrupts are disabled, and these are also
1333 * requirements for runtime PM. When these conditions are met, we manually do
1334 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1335 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1336 * hang the machine.
1337 *
1338 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1339 * the state of some registers, so when we come back from PC8+ we need to
1340 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1341 * need to take care of the registers kept by RC6. Notice that this happens even
1342 * if we don't put the device in PCI D3 state (which is what currently happens
1343 * because of the runtime PM support).
1344 *
1345 * For more, read "Display Sequences for Package C8" on the hardware
1346 * documentation.
1347 */
1348static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1349{
1350	u32 val;
1351
1352	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1353
1354	if (HAS_PCH_LPT_LP(dev_priv)) {
1355		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1356		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
1357		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1358	}
1359
1360	lpt_disable_clkout_dp(dev_priv);
1361	hsw_disable_lcpll(dev_priv, true, true);
1362}
1363
1364static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1365{
1366	u32 val;
1367
1368	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1369
1370	hsw_restore_lcpll(dev_priv);
1371	intel_init_pch_refclk(dev_priv);
1372
1373	if (HAS_PCH_LPT_LP(dev_priv)) {
1374		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1375		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
1376		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1377	}
1378}
1379
1380static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1381				      bool enable)
1382{
1383	i915_reg_t reg;
1384	u32 reset_bits, val;
1385
1386	if (IS_IVYBRIDGE(dev_priv)) {
1387		reg = GEN7_MSG_CTL;
1388		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1389	} else {
1390		reg = HSW_NDE_RSTWRN_OPT;
1391		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1392	}
1393
1394	if (DISPLAY_VER(dev_priv) >= 14)
1395		reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1396
1397	val = intel_de_read(dev_priv, reg);
1398
1399	if (enable)
1400		val |= reset_bits;
1401	else
1402		val &= ~reset_bits;
1403
1404	intel_de_write(dev_priv, reg, val);
1405}
1406
1407static void skl_display_core_init(struct drm_i915_private *dev_priv,
1408				  bool resume)
1409{
1410	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1411	struct i915_power_well *well;
1412
1413	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1414
1415	/* enable PCH reset handshake */
1416	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1417
1418	if (!HAS_DISPLAY(dev_priv))
1419		return;
1420
1421	/* enable PG1 and Misc I/O */
1422	mutex_lock(&power_domains->lock);
1423
1424	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1425	intel_power_well_enable(dev_priv, well);
1426
1427	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1428	intel_power_well_enable(dev_priv, well);
1429
1430	mutex_unlock(&power_domains->lock);
1431
1432	intel_cdclk_init_hw(dev_priv);
1433
1434	gen9_dbuf_enable(dev_priv);
1435
1436	if (resume)
1437		intel_dmc_load_program(dev_priv);
1438}
1439
1440static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1441{
1442	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1443	struct i915_power_well *well;
1444
1445	if (!HAS_DISPLAY(dev_priv))
1446		return;
1447
1448	gen9_disable_dc_states(dev_priv);
1449	/* TODO: disable DMC program */
1450
1451	gen9_dbuf_disable(dev_priv);
1452
1453	intel_cdclk_uninit_hw(dev_priv);
1454
1455	/* The spec doesn't call for removing the reset handshake flag */
1456	/* disable PG1 and Misc I/O */
1457
1458	mutex_lock(&power_domains->lock);
1459
1460	/*
1461	 * BSpec says to keep the MISC IO power well enabled here, only
1462	 * remove our request for power well 1.
1463	 * Note that even though the driver's request is removed power well 1
1464	 * may stay enabled after this due to DMC's own request on it.
1465	 */
1466	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1467	intel_power_well_disable(dev_priv, well);
1468
1469	mutex_unlock(&power_domains->lock);
1470
1471	usleep_range(10, 30);		/* 10 us delay per Bspec */
1472}
1473
1474static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1475{
1476	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1477	struct i915_power_well *well;
1478
1479	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1480
1481	/*
1482	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1483	 * or else the reset will hang because there is no PCH to respond.
1484	 * Move the handshake programming to initialization sequence.
1485	 * Previously was left up to BIOS.
1486	 */
1487	intel_pch_reset_handshake(dev_priv, false);
1488
1489	if (!HAS_DISPLAY(dev_priv))
1490		return;
1491
1492	/* Enable PG1 */
1493	mutex_lock(&power_domains->lock);
1494
1495	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1496	intel_power_well_enable(dev_priv, well);
1497
1498	mutex_unlock(&power_domains->lock);
1499
1500	intel_cdclk_init_hw(dev_priv);
1501
1502	gen9_dbuf_enable(dev_priv);
1503
1504	if (resume)
1505		intel_dmc_load_program(dev_priv);
1506}
1507
1508static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1509{
1510	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1511	struct i915_power_well *well;
1512
1513	if (!HAS_DISPLAY(dev_priv))
1514		return;
1515
1516	gen9_disable_dc_states(dev_priv);
1517	/* TODO: disable DMC program */
1518
1519	gen9_dbuf_disable(dev_priv);
1520
1521	intel_cdclk_uninit_hw(dev_priv);
1522
1523	/* The spec doesn't call for removing the reset handshake flag */
1524
1525	/*
1526	 * Disable PW1 (PG1).
1527	 * Note that even though the driver's request is removed power well 1
1528	 * may stay enabled after this due to DMC's own request on it.
1529	 */
1530	mutex_lock(&power_domains->lock);
1531
1532	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1533	intel_power_well_disable(dev_priv, well);
1534
1535	mutex_unlock(&power_domains->lock);
1536
1537	usleep_range(10, 30);		/* 10 us delay per Bspec */
1538}
1539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1540struct buddy_page_mask {
1541	u32 page_mask;
1542	u8 type;
1543	u8 num_channels;
1544};
1545
1546static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1547	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
1548	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
1549	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1550	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1551	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
1552	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
1553	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1554	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1555	{}
1556};
1557
1558static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1559	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1560	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
1561	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
1562	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1563	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1564	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
1565	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
1566	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1567	{}
1568};
1569
1570static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
1571{
1572	enum intel_dram_type type = dev_priv->dram_info.type;
1573	u8 num_channels = dev_priv->dram_info.num_channels;
1574	const struct buddy_page_mask *table;
1575	unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
1576	int config, i;
1577
1578	/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1579	if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
1580		return;
1581
1582	if (IS_ALDERLAKE_S(dev_priv) ||
1583	    IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1584	    IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1585	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
1586		/* Wa_1409767108:tgl,dg1,adl-s */
1587		table = wa_1409767108_buddy_page_masks;
1588	else
1589		table = tgl_buddy_page_masks;
1590
1591	for (config = 0; table[config].page_mask != 0; config++)
1592		if (table[config].num_channels == num_channels &&
1593		    table[config].type == type)
1594			break;
1595
1596	if (table[config].page_mask == 0) {
1597		drm_dbg(&dev_priv->drm,
1598			"Unknown memory configuration; disabling address buddy logic.\n");
1599		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1600			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
1601				       BW_BUDDY_DISABLE);
1602	} else {
1603		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1604			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
1605				       table[config].page_mask);
1606
1607			/* Wa_22010178259:tgl,dg1,rkl,adl-s */
1608			if (DISPLAY_VER(dev_priv) == 12)
1609				intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
1610					     BW_BUDDY_TLB_REQ_TIMER_MASK,
1611					     BW_BUDDY_TLB_REQ_TIMER(0x8));
1612		}
1613	}
1614}
1615
1616static void icl_display_core_init(struct drm_i915_private *dev_priv,
1617				  bool resume)
1618{
1619	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1620	struct i915_power_well *well;
1621	u32 val;
1622
1623	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1624
1625	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1626	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1627	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1628		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1629			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1630
1631	/* 1. Enable PCH reset handshake. */
1632	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1633
1634	if (!HAS_DISPLAY(dev_priv))
1635		return;
1636
1637	/* 2. Initialize all combo phys */
1638	intel_combo_phy_init(dev_priv);
1639
1640	/*
1641	 * 3. Enable Power Well 1 (PG1).
1642	 *    The AUX IO power wells will be enabled on demand.
1643	 */
1644	mutex_lock(&power_domains->lock);
1645	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1646	intel_power_well_enable(dev_priv, well);
1647	mutex_unlock(&power_domains->lock);
1648
1649	/* 4. Enable CDCLK. */
1650	intel_cdclk_init_hw(dev_priv);
1651
1652	if (DISPLAY_VER(dev_priv) >= 12)
1653		gen12_dbuf_slices_config(dev_priv);
1654
1655	/* 5. Enable DBUF. */
1656	gen9_dbuf_enable(dev_priv);
1657
1658	/* 6. Setup MBUS. */
1659	icl_mbus_init(dev_priv);
1660
1661	/* 7. Program arbiter BW_BUDDY registers */
1662	if (DISPLAY_VER(dev_priv) >= 12)
1663		tgl_bw_buddy_init(dev_priv);
1664
1665	/* 8. Ensure PHYs have completed calibration and adaptation */
1666	if (IS_DG2(dev_priv))
1667		intel_snps_phy_wait_for_calibration(dev_priv);
1668
1669	if (resume)
1670		intel_dmc_load_program(dev_priv);
1671
1672	/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
1673	if (DISPLAY_VER(dev_priv) >= 12) {
1674		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1675		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
1676		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
1677	}
1678
1679	/* Wa_14011503030:xelpd */
1680	if (DISPLAY_VER(dev_priv) >= 13)
1681		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1682}
1683
1684static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1685{
1686	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1687	struct i915_power_well *well;
1688
1689	if (!HAS_DISPLAY(dev_priv))
1690		return;
1691
1692	gen9_disable_dc_states(dev_priv);
1693	intel_dmc_disable_program(dev_priv);
1694
1695	/* 1. Disable all display engine functions -> aready done */
1696
1697	/* 2. Disable DBUF */
1698	gen9_dbuf_disable(dev_priv);
1699
1700	/* 3. Disable CD clock */
1701	intel_cdclk_uninit_hw(dev_priv);
1702
1703	/*
1704	 * 4. Disable Power Well 1 (PG1).
1705	 *    The AUX IO power wells are toggled on demand, so they are already
1706	 *    disabled at this point.
1707	 */
1708	mutex_lock(&power_domains->lock);
1709	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1710	intel_power_well_disable(dev_priv, well);
1711	mutex_unlock(&power_domains->lock);
1712
1713	/* 5. */
1714	intel_combo_phy_uninit(dev_priv);
1715}
1716
1717static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1718{
1719	struct i915_power_well *cmn_bc =
1720		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1721	struct i915_power_well *cmn_d =
1722		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1723
1724	/*
1725	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1726	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1727	 * instead maintain a shadow copy ourselves. Use the actual
1728	 * power well state and lane status to reconstruct the
1729	 * expected initial value.
1730	 */
1731	dev_priv->display.power.chv_phy_control =
1732		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1733		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1734		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1735		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1736		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1737
1738	/*
1739	 * If all lanes are disabled we leave the override disabled
1740	 * with all power down bits cleared to match the state we
1741	 * would use after disabling the port. Otherwise enable the
1742	 * override and set the lane powerdown bits accding to the
1743	 * current lane status.
1744	 */
1745	if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1746		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
1747		unsigned int mask;
1748
1749		mask = status & DPLL_PORTB_READY_MASK;
1750		if (mask == 0xf)
1751			mask = 0x0;
1752		else
1753			dev_priv->display.power.chv_phy_control |=
1754				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1755
1756		dev_priv->display.power.chv_phy_control |=
1757			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1758
1759		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1760		if (mask == 0xf)
1761			mask = 0x0;
1762		else
1763			dev_priv->display.power.chv_phy_control |=
1764				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1765
1766		dev_priv->display.power.chv_phy_control |=
1767			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1768
1769		dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1770
1771		dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
1772	} else {
1773		dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
1774	}
1775
1776	if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1777		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1778		unsigned int mask;
1779
1780		mask = status & DPLL_PORTD_READY_MASK;
1781
1782		if (mask == 0xf)
1783			mask = 0x0;
1784		else
1785			dev_priv->display.power.chv_phy_control |=
1786				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1787
1788		dev_priv->display.power.chv_phy_control |=
1789			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1790
1791		dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1792
1793		dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
1794	} else {
1795		dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
1796	}
1797
1798	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
1799		    dev_priv->display.power.chv_phy_control);
1800
1801	/* Defer application of initial phy_control to enabling the powerwell */
1802}
1803
1804static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1805{
1806	struct i915_power_well *cmn =
1807		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1808	struct i915_power_well *disp2d =
1809		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1810
1811	/* If the display might be already active skip this */
1812	if (intel_power_well_is_enabled(dev_priv, cmn) &&
1813	    intel_power_well_is_enabled(dev_priv, disp2d) &&
1814	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1815		return;
1816
1817	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1818
1819	/* cmnlane needs DPLL registers */
1820	intel_power_well_enable(dev_priv, disp2d);
1821
1822	/*
1823	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1824	 * Need to assert and de-assert PHY SB reset by gating the
1825	 * common lane power, then un-gating it.
1826	 * Simply ungating isn't enough to reset the PHY enough to get
1827	 * ports and lanes running.
1828	 */
1829	intel_power_well_disable(dev_priv, cmn);
1830}
1831
1832static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1833{
1834	bool ret;
1835
1836	vlv_punit_get(dev_priv);
1837	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1838	vlv_punit_put(dev_priv);
1839
1840	return ret;
1841}
1842
1843static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1844{
1845	drm_WARN(&dev_priv->drm,
1846		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1847		 "VED not power gated\n");
1848}
1849
1850static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1851{
1852	static const struct pci_device_id isp_ids[] = {
1853		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1854		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1855		{}
1856	};
1857
1858	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1859		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1860		 "ISP not power gated\n");
1861}
1862
1863static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1864
1865/**
1866 * intel_power_domains_init_hw - initialize hardware power domain state
1867 * @i915: i915 device instance
1868 * @resume: Called from resume code paths or not
1869 *
1870 * This function initializes the hardware power domain state and enables all
1871 * power wells belonging to the INIT power domain. Power wells in other
1872 * domains (and not in the INIT domain) are referenced or disabled by
1873 * intel_modeset_readout_hw_state(). After that the reference count of each
1874 * power well must match its HW enabled state, see
1875 * intel_power_domains_verify_state().
1876 *
1877 * It will return with power domains disabled (to be enabled later by
1878 * intel_power_domains_enable()) and must be paired with
1879 * intel_power_domains_driver_remove().
1880 */
1881void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1882{
1883	struct i915_power_domains *power_domains = &i915->display.power.domains;
1884
1885	power_domains->initializing = true;
1886
1887	if (DISPLAY_VER(i915) >= 11) {
1888		icl_display_core_init(i915, resume);
 
 
1889	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1890		bxt_display_core_init(i915, resume);
1891	} else if (DISPLAY_VER(i915) == 9) {
1892		skl_display_core_init(i915, resume);
1893	} else if (IS_CHERRYVIEW(i915)) {
1894		mutex_lock(&power_domains->lock);
1895		chv_phy_control_init(i915);
1896		mutex_unlock(&power_domains->lock);
1897		assert_isp_power_gated(i915);
1898	} else if (IS_VALLEYVIEW(i915)) {
1899		mutex_lock(&power_domains->lock);
1900		vlv_cmnlane_wa(i915);
1901		mutex_unlock(&power_domains->lock);
1902		assert_ved_power_gated(i915);
1903		assert_isp_power_gated(i915);
1904	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1905		hsw_assert_cdclk(i915);
1906		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1907	} else if (IS_IVYBRIDGE(i915)) {
1908		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1909	}
1910
1911	/*
1912	 * Keep all power wells enabled for any dependent HW access during
1913	 * initialization and to make sure we keep BIOS enabled display HW
1914	 * resources powered until display HW readout is complete. We drop
1915	 * this reference in intel_power_domains_enable().
1916	 */
1917	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
1918	power_domains->init_wakeref =
1919		intel_display_power_get(i915, POWER_DOMAIN_INIT);
1920
1921	/* Disable power support if the user asked so. */
1922	if (!i915->params.disable_power_well) {
1923		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
1924		i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
1925										      POWER_DOMAIN_INIT);
1926	}
1927	intel_power_domains_sync_hw(i915);
1928
1929	power_domains->initializing = false;
1930}
1931
1932/**
1933 * intel_power_domains_driver_remove - deinitialize hw power domain state
1934 * @i915: i915 device instance
1935 *
1936 * De-initializes the display power domain HW state. It also ensures that the
1937 * device stays powered up so that the driver can be reloaded.
1938 *
1939 * It must be called with power domains already disabled (after a call to
1940 * intel_power_domains_disable()) and must be paired with
1941 * intel_power_domains_init_hw().
1942 */
1943void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1944{
1945	intel_wakeref_t wakeref __maybe_unused =
1946		fetch_and_zero(&i915->display.power.domains.init_wakeref);
1947
1948	/* Remove the refcount we took to keep power well support disabled. */
1949	if (!i915->params.disable_power_well)
1950		intel_display_power_put(i915, POWER_DOMAIN_INIT,
1951					fetch_and_zero(&i915->display.power.domains.disable_wakeref));
1952
1953	intel_display_power_flush_work_sync(i915);
1954
1955	intel_power_domains_verify_state(i915);
1956
1957	/* Keep the power well enabled, but cancel its rpm wakeref. */
1958	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1959}
1960
1961/**
1962 * intel_power_domains_sanitize_state - sanitize power domains state
1963 * @i915: i915 device instance
1964 *
1965 * Sanitize the power domains state during driver loading and system resume.
1966 * The function will disable all display power wells that BIOS has enabled
1967 * without a user for it (any user for a power well has taken a reference
1968 * on it by the time this function is called, after the state of all the
1969 * pipe, encoder, etc. HW resources have been sanitized).
1970 */
1971void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
1972{
1973	struct i915_power_domains *power_domains = &i915->display.power.domains;
1974	struct i915_power_well *power_well;
1975
1976	mutex_lock(&power_domains->lock);
1977
1978	for_each_power_well_reverse(i915, power_well) {
1979		if (power_well->desc->always_on || power_well->count ||
1980		    !intel_power_well_is_enabled(i915, power_well))
1981			continue;
1982
1983		drm_dbg_kms(&i915->drm,
1984			    "BIOS left unused %s power well enabled, disabling it\n",
1985			    intel_power_well_name(power_well));
1986		intel_power_well_disable(i915, power_well);
1987	}
1988
1989	mutex_unlock(&power_domains->lock);
1990}
1991
1992/**
1993 * intel_power_domains_enable - enable toggling of display power wells
1994 * @i915: i915 device instance
1995 *
1996 * Enable the ondemand enabling/disabling of the display power wells. Note that
1997 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
1998 * only at specific points of the display modeset sequence, thus they are not
1999 * affected by the intel_power_domains_enable()/disable() calls. The purpose
2000 * of these function is to keep the rest of power wells enabled until the end
2001 * of display HW readout (which will acquire the power references reflecting
2002 * the current HW state).
2003 */
2004void intel_power_domains_enable(struct drm_i915_private *i915)
2005{
2006	intel_wakeref_t wakeref __maybe_unused =
2007		fetch_and_zero(&i915->display.power.domains.init_wakeref);
2008
2009	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2010	intel_power_domains_verify_state(i915);
2011}
2012
2013/**
2014 * intel_power_domains_disable - disable toggling of display power wells
2015 * @i915: i915 device instance
2016 *
2017 * Disable the ondemand enabling/disabling of the display power wells. See
2018 * intel_power_domains_enable() for which power wells this call controls.
2019 */
2020void intel_power_domains_disable(struct drm_i915_private *i915)
2021{
2022	struct i915_power_domains *power_domains = &i915->display.power.domains;
2023
2024	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2025	power_domains->init_wakeref =
2026		intel_display_power_get(i915, POWER_DOMAIN_INIT);
2027
2028	intel_power_domains_verify_state(i915);
2029}
2030
2031/**
2032 * intel_power_domains_suspend - suspend power domain state
2033 * @i915: i915 device instance
2034 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
2035 *
2036 * This function prepares the hardware power domain state before entering
2037 * system suspend.
2038 *
2039 * It must be called with power domains already disabled (after a call to
2040 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2041 */
2042void intel_power_domains_suspend(struct drm_i915_private *i915,
2043				 enum i915_drm_suspend_mode suspend_mode)
2044{
2045	struct i915_power_domains *power_domains = &i915->display.power.domains;
2046	intel_wakeref_t wakeref __maybe_unused =
2047		fetch_and_zero(&power_domains->init_wakeref);
2048
2049	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2050
2051	/*
2052	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2053	 * support don't manually deinit the power domains. This also means the
2054	 * DMC firmware will stay active, it will power down any HW
2055	 * resources as required and also enable deeper system power states
2056	 * that would be blocked if the firmware was inactive.
2057	 */
2058	if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
2059	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
2060	    intel_dmc_has_payload(i915)) {
2061		intel_display_power_flush_work(i915);
2062		intel_power_domains_verify_state(i915);
2063		return;
2064	}
2065
2066	/*
2067	 * Even if power well support was disabled we still want to disable
2068	 * power wells if power domains must be deinitialized for suspend.
2069	 */
2070	if (!i915->params.disable_power_well)
2071		intel_display_power_put(i915, POWER_DOMAIN_INIT,
2072					fetch_and_zero(&i915->display.power.domains.disable_wakeref));
2073
2074	intel_display_power_flush_work(i915);
2075	intel_power_domains_verify_state(i915);
2076
2077	if (DISPLAY_VER(i915) >= 11)
2078		icl_display_core_uninit(i915);
 
 
2079	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2080		bxt_display_core_uninit(i915);
2081	else if (DISPLAY_VER(i915) == 9)
2082		skl_display_core_uninit(i915);
2083
2084	power_domains->display_core_suspended = true;
2085}
2086
2087/**
2088 * intel_power_domains_resume - resume power domain state
2089 * @i915: i915 device instance
2090 *
2091 * This function resume the hardware power domain state during system resume.
2092 *
2093 * It will return with power domain support disabled (to be enabled later by
2094 * intel_power_domains_enable()) and must be paired with
2095 * intel_power_domains_suspend().
2096 */
2097void intel_power_domains_resume(struct drm_i915_private *i915)
2098{
2099	struct i915_power_domains *power_domains = &i915->display.power.domains;
2100
2101	if (power_domains->display_core_suspended) {
2102		intel_power_domains_init_hw(i915, true);
2103		power_domains->display_core_suspended = false;
2104	} else {
2105		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2106		power_domains->init_wakeref =
2107			intel_display_power_get(i915, POWER_DOMAIN_INIT);
2108	}
2109
2110	intel_power_domains_verify_state(i915);
2111}
2112
2113#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2114
2115static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2116{
2117	struct i915_power_domains *power_domains = &i915->display.power.domains;
2118	struct i915_power_well *power_well;
2119
2120	for_each_power_well(i915, power_well) {
2121		enum intel_display_power_domain domain;
2122
2123		drm_dbg(&i915->drm, "%-25s %d\n",
2124			intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2125
2126		for_each_power_domain(domain, intel_power_well_domains(power_well))
2127			drm_dbg(&i915->drm, "  %-23s %d\n",
2128				intel_display_power_domain_str(domain),
2129				power_domains->domain_use_count[domain]);
2130	}
2131}
2132
2133/**
2134 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2135 * @i915: i915 device instance
2136 *
2137 * Verify if the reference count of each power well matches its HW enabled
2138 * state and the total refcount of the domains it belongs to. This must be
2139 * called after modeset HW state sanitization, which is responsible for
2140 * acquiring reference counts for any power wells in use and disabling the
2141 * ones left on by BIOS but not required by any active output.
2142 */
2143static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2144{
2145	struct i915_power_domains *power_domains = &i915->display.power.domains;
2146	struct i915_power_well *power_well;
2147	bool dump_domain_info;
2148
2149	mutex_lock(&power_domains->lock);
2150
2151	verify_async_put_domains_state(power_domains);
2152
2153	dump_domain_info = false;
2154	for_each_power_well(i915, power_well) {
2155		enum intel_display_power_domain domain;
2156		int domains_count;
2157		bool enabled;
2158
2159		enabled = intel_power_well_is_enabled(i915, power_well);
2160		if ((intel_power_well_refcount(power_well) ||
2161		     intel_power_well_is_always_on(power_well)) !=
2162		    enabled)
2163			drm_err(&i915->drm,
2164				"power well %s state mismatch (refcount %d/enabled %d)",
2165				intel_power_well_name(power_well),
2166				intel_power_well_refcount(power_well), enabled);
2167
2168		domains_count = 0;
2169		for_each_power_domain(domain, intel_power_well_domains(power_well))
2170			domains_count += power_domains->domain_use_count[domain];
2171
2172		if (intel_power_well_refcount(power_well) != domains_count) {
2173			drm_err(&i915->drm,
2174				"power well %s refcount/domain refcount mismatch "
2175				"(refcount %d/domains refcount %d)\n",
2176				intel_power_well_name(power_well),
2177				intel_power_well_refcount(power_well),
2178				domains_count);
2179			dump_domain_info = true;
2180		}
2181	}
2182
2183	if (dump_domain_info) {
2184		static bool dumped;
2185
2186		if (!dumped) {
2187			intel_power_domains_dump_info(i915);
2188			dumped = true;
2189		}
2190	}
2191
2192	mutex_unlock(&power_domains->lock);
2193}
2194
2195#else
2196
2197static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2198{
2199}
2200
2201#endif
2202
2203void intel_display_power_suspend_late(struct drm_i915_private *i915)
2204{
2205	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2206	    IS_BROXTON(i915)) {
2207		bxt_enable_dc9(i915);
2208	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2209		hsw_enable_pc8(i915);
2210	}
2211
2212	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2213	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2214		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2215}
2216
2217void intel_display_power_resume_early(struct drm_i915_private *i915)
2218{
2219	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2220	    IS_BROXTON(i915)) {
2221		gen9_sanitize_dc_state(i915);
2222		bxt_disable_dc9(i915);
2223	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2224		hsw_disable_pc8(i915);
2225	}
2226
2227	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2228	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2229		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2230}
2231
2232void intel_display_power_suspend(struct drm_i915_private *i915)
2233{
2234	if (DISPLAY_VER(i915) >= 11) {
2235		icl_display_core_uninit(i915);
2236		bxt_enable_dc9(i915);
2237	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2238		bxt_display_core_uninit(i915);
2239		bxt_enable_dc9(i915);
2240	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2241		hsw_enable_pc8(i915);
2242	}
2243}
2244
2245void intel_display_power_resume(struct drm_i915_private *i915)
2246{
2247	if (DISPLAY_VER(i915) >= 11) {
2248		bxt_disable_dc9(i915);
2249		icl_display_core_init(i915, true);
2250		if (intel_dmc_has_payload(i915)) {
2251			if (i915->display.dmc.allowed_dc_mask &
2252			    DC_STATE_EN_UPTO_DC6)
2253				skl_enable_dc6(i915);
2254			else if (i915->display.dmc.allowed_dc_mask &
2255				 DC_STATE_EN_UPTO_DC5)
2256				gen9_enable_dc5(i915);
2257		}
2258	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2259		bxt_disable_dc9(i915);
2260		bxt_display_core_init(i915, true);
2261		if (intel_dmc_has_payload(i915) &&
2262		    (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2263			gen9_enable_dc5(i915);
2264	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2265		hsw_disable_pc8(i915);
2266	}
2267}
2268
2269void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
2270{
2271	struct i915_power_domains *power_domains = &i915->display.power.domains;
2272	int i;
2273
2274	mutex_lock(&power_domains->lock);
2275
2276	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2277	for (i = 0; i < power_domains->power_well_count; i++) {
2278		struct i915_power_well *power_well;
2279		enum intel_display_power_domain power_domain;
2280
2281		power_well = &power_domains->power_wells[i];
2282		seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2283			   intel_power_well_refcount(power_well));
2284
2285		for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2286			seq_printf(m, "  %-23s %d\n",
2287				   intel_display_power_domain_str(power_domain),
2288				   power_domains->domain_use_count[power_domain]);
2289	}
2290
2291	mutex_unlock(&power_domains->lock);
2292}
2293
2294struct intel_ddi_port_domains {
2295	enum port port_start;
2296	enum port port_end;
2297	enum aux_ch aux_ch_start;
2298	enum aux_ch aux_ch_end;
2299
2300	enum intel_display_power_domain ddi_lanes;
2301	enum intel_display_power_domain ddi_io;
2302	enum intel_display_power_domain aux_io;
2303	enum intel_display_power_domain aux_legacy_usbc;
2304	enum intel_display_power_domain aux_tbt;
2305};
2306
2307static const struct intel_ddi_port_domains
2308i9xx_port_domains[] = {
2309	{
2310		.port_start = PORT_A,
2311		.port_end = PORT_F,
2312		.aux_ch_start = AUX_CH_A,
2313		.aux_ch_end = AUX_CH_F,
2314
2315		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2316		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2317		.aux_io = POWER_DOMAIN_AUX_IO_A,
2318		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2319		.aux_tbt = POWER_DOMAIN_INVALID,
2320	},
2321};
2322
2323static const struct intel_ddi_port_domains
2324d11_port_domains[] = {
2325	{
2326		.port_start = PORT_A,
2327		.port_end = PORT_B,
2328		.aux_ch_start = AUX_CH_A,
2329		.aux_ch_end = AUX_CH_B,
2330
2331		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2332		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2333		.aux_io = POWER_DOMAIN_AUX_IO_A,
2334		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2335		.aux_tbt = POWER_DOMAIN_INVALID,
2336	}, {
2337		.port_start = PORT_C,
2338		.port_end = PORT_F,
2339		.aux_ch_start = AUX_CH_C,
2340		.aux_ch_end = AUX_CH_F,
2341
2342		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2343		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2344		.aux_io = POWER_DOMAIN_AUX_IO_C,
2345		.aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2346		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
2347	},
2348};
2349
2350static const struct intel_ddi_port_domains
2351d12_port_domains[] = {
2352	{
2353		.port_start = PORT_A,
2354		.port_end = PORT_C,
2355		.aux_ch_start = AUX_CH_A,
2356		.aux_ch_end = AUX_CH_C,
2357
2358		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2359		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2360		.aux_io = POWER_DOMAIN_AUX_IO_A,
2361		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2362		.aux_tbt = POWER_DOMAIN_INVALID,
2363	}, {
2364		.port_start = PORT_TC1,
2365		.port_end = PORT_TC6,
2366		.aux_ch_start = AUX_CH_USBC1,
2367		.aux_ch_end = AUX_CH_USBC6,
2368
2369		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2370		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2371		.aux_io = POWER_DOMAIN_INVALID,
2372		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2373		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
2374	},
2375};
2376
2377static const struct intel_ddi_port_domains
2378d13_port_domains[] = {
2379	{
2380		.port_start = PORT_A,
2381		.port_end = PORT_C,
2382		.aux_ch_start = AUX_CH_A,
2383		.aux_ch_end = AUX_CH_C,
2384
2385		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2386		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2387		.aux_io = POWER_DOMAIN_AUX_IO_A,
2388		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2389		.aux_tbt = POWER_DOMAIN_INVALID,
2390	}, {
2391		.port_start = PORT_TC1,
2392		.port_end = PORT_TC4,
2393		.aux_ch_start = AUX_CH_USBC1,
2394		.aux_ch_end = AUX_CH_USBC4,
2395
2396		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2397		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2398		.aux_io = POWER_DOMAIN_INVALID,
2399		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2400		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
2401	}, {
2402		.port_start = PORT_D_XELPD,
2403		.port_end = PORT_E_XELPD,
2404		.aux_ch_start = AUX_CH_D_XELPD,
2405		.aux_ch_end = AUX_CH_E_XELPD,
2406
2407		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2408		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2409		.aux_io = POWER_DOMAIN_AUX_IO_D,
2410		.aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2411		.aux_tbt = POWER_DOMAIN_INVALID,
2412	},
2413};
2414
2415static void
2416intel_port_domains_for_platform(struct drm_i915_private *i915,
2417				const struct intel_ddi_port_domains **domains,
2418				int *domains_size)
2419{
2420	if (DISPLAY_VER(i915) >= 13) {
2421		*domains = d13_port_domains;
2422		*domains_size = ARRAY_SIZE(d13_port_domains);
2423	} else if (DISPLAY_VER(i915) >= 12) {
2424		*domains = d12_port_domains;
2425		*domains_size = ARRAY_SIZE(d12_port_domains);
2426	} else if (DISPLAY_VER(i915) >= 11) {
2427		*domains = d11_port_domains;
2428		*domains_size = ARRAY_SIZE(d11_port_domains);
2429	} else {
2430		*domains = i9xx_port_domains;
2431		*domains_size = ARRAY_SIZE(i9xx_port_domains);
2432	}
2433}
2434
2435static const struct intel_ddi_port_domains *
2436intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
2437{
2438	const struct intel_ddi_port_domains *domains;
2439	int domains_size;
2440	int i;
2441
2442	intel_port_domains_for_platform(i915, &domains, &domains_size);
2443	for (i = 0; i < domains_size; i++)
2444		if (port >= domains[i].port_start && port <= domains[i].port_end)
2445			return &domains[i];
2446
2447	return NULL;
2448}
2449
2450enum intel_display_power_domain
2451intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2452{
2453	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2454
2455	if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2456		return POWER_DOMAIN_PORT_DDI_IO_A;
2457
2458	return domains->ddi_io + (int)(port - domains->port_start);
2459}
2460
2461enum intel_display_power_domain
2462intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2463{
2464	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2465
2466	if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2467		return POWER_DOMAIN_PORT_DDI_LANES_A;
2468
2469	return domains->ddi_lanes + (int)(port - domains->port_start);
2470}
2471
2472static const struct intel_ddi_port_domains *
2473intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
2474{
2475	const struct intel_ddi_port_domains *domains;
2476	int domains_size;
2477	int i;
2478
2479	intel_port_domains_for_platform(i915, &domains, &domains_size);
2480	for (i = 0; i < domains_size; i++)
2481		if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2482			return &domains[i];
2483
2484	return NULL;
2485}
2486
2487enum intel_display_power_domain
2488intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2489{
2490	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2491
2492	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2493		return POWER_DOMAIN_AUX_IO_A;
2494
2495	return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2496}
2497
2498enum intel_display_power_domain
2499intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2500{
2501	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2502
2503	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2504		return POWER_DOMAIN_AUX_A;
2505
2506	return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2507}
2508
2509enum intel_display_power_domain
2510intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2511{
2512	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2513
2514	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2515		return POWER_DOMAIN_AUX_TBT1;
2516
2517	return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2518}