Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
  25 *    Daniel Vetter <daniel.vetter@ffwll.ch>
  26 *
  27 */
  28
  29#include <linux/pm_runtime.h>
  30#include <linux/vgaarb.h>
  31
  32#include "i915_drv.h"
  33#include "intel_drv.h"
  34
  35/**
  36 * DOC: runtime pm
  37 *
  38 * The i915 driver supports dynamic enabling and disabling of entire hardware
  39 * blocks at runtime. This is especially important on the display side where
  40 * software is supposed to control many power gates manually on recent hardware,
  41 * since on the GT side a lot of the power management is done by the hardware.
  42 * But even there some manual control at the device level is required.
  43 *
  44 * Since i915 supports a diverse set of platforms with a unified codebase and
  45 * hardware engineers just love to shuffle functionality around between power
  46 * domains there's a sizeable amount of indirection required. This file provides
  47 * generic functions to the driver for grabbing and releasing references for
  48 * abstract power domains. It then maps those to the actual power wells
  49 * present for a given platform.
  50 */
  51
  52#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
  53	for (i = 0;							\
  54	     i < (power_domains)->power_well_count &&			\
  55		 ((power_well) = &(power_domains)->power_wells[i]);	\
  56	     i++)							\
  57		for_each_if ((power_well)->domains & (domain_mask))
  58
  59#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  60	for (i = (power_domains)->power_well_count - 1;			 \
  61	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  62	     i--)							 \
  63		for_each_if ((power_well)->domains & (domain_mask))
  64
  65bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  66				    int power_well_id);
  67
  68static struct i915_power_well *
  69lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
  70
  71const char *
  72intel_display_power_domain_str(enum intel_display_power_domain domain)
  73{
  74	switch (domain) {
  75	case POWER_DOMAIN_PIPE_A:
  76		return "PIPE_A";
  77	case POWER_DOMAIN_PIPE_B:
  78		return "PIPE_B";
  79	case POWER_DOMAIN_PIPE_C:
  80		return "PIPE_C";
  81	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  82		return "PIPE_A_PANEL_FITTER";
  83	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  84		return "PIPE_B_PANEL_FITTER";
  85	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  86		return "PIPE_C_PANEL_FITTER";
  87	case POWER_DOMAIN_TRANSCODER_A:
  88		return "TRANSCODER_A";
  89	case POWER_DOMAIN_TRANSCODER_B:
  90		return "TRANSCODER_B";
  91	case POWER_DOMAIN_TRANSCODER_C:
  92		return "TRANSCODER_C";
  93	case POWER_DOMAIN_TRANSCODER_EDP:
  94		return "TRANSCODER_EDP";
  95	case POWER_DOMAIN_TRANSCODER_DSI_A:
  96		return "TRANSCODER_DSI_A";
  97	case POWER_DOMAIN_TRANSCODER_DSI_C:
  98		return "TRANSCODER_DSI_C";
  99	case POWER_DOMAIN_PORT_DDI_A_LANES:
 100		return "PORT_DDI_A_LANES";
 101	case POWER_DOMAIN_PORT_DDI_B_LANES:
 102		return "PORT_DDI_B_LANES";
 103	case POWER_DOMAIN_PORT_DDI_C_LANES:
 104		return "PORT_DDI_C_LANES";
 105	case POWER_DOMAIN_PORT_DDI_D_LANES:
 106		return "PORT_DDI_D_LANES";
 107	case POWER_DOMAIN_PORT_DDI_E_LANES:
 108		return "PORT_DDI_E_LANES";
 109	case POWER_DOMAIN_PORT_DSI:
 110		return "PORT_DSI";
 111	case POWER_DOMAIN_PORT_CRT:
 112		return "PORT_CRT";
 113	case POWER_DOMAIN_PORT_OTHER:
 114		return "PORT_OTHER";
 115	case POWER_DOMAIN_VGA:
 116		return "VGA";
 117	case POWER_DOMAIN_AUDIO:
 118		return "AUDIO";
 119	case POWER_DOMAIN_PLLS:
 120		return "PLLS";
 121	case POWER_DOMAIN_AUX_A:
 122		return "AUX_A";
 123	case POWER_DOMAIN_AUX_B:
 124		return "AUX_B";
 125	case POWER_DOMAIN_AUX_C:
 126		return "AUX_C";
 127	case POWER_DOMAIN_AUX_D:
 128		return "AUX_D";
 129	case POWER_DOMAIN_GMBUS:
 130		return "GMBUS";
 131	case POWER_DOMAIN_INIT:
 132		return "INIT";
 133	case POWER_DOMAIN_MODESET:
 134		return "MODESET";
 135	default:
 136		MISSING_CASE(domain);
 137		return "?";
 138	}
 139}
 140
 141static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 142				    struct i915_power_well *power_well)
 143{
 144	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
 145	power_well->ops->enable(dev_priv, power_well);
 146	power_well->hw_enabled = true;
 147}
 148
 149static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 150				     struct i915_power_well *power_well)
 151{
 152	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
 153	power_well->hw_enabled = false;
 154	power_well->ops->disable(dev_priv, power_well);
 155}
 156
 157static void intel_power_well_get(struct drm_i915_private *dev_priv,
 158				 struct i915_power_well *power_well)
 159{
 160	if (!power_well->count++)
 161		intel_power_well_enable(dev_priv, power_well);
 162}
 163
 164static void intel_power_well_put(struct drm_i915_private *dev_priv,
 165				 struct i915_power_well *power_well)
 166{
 167	WARN(!power_well->count, "Use count on power well %s is already zero",
 168	     power_well->name);
 169
 170	if (!--power_well->count)
 171		intel_power_well_disable(dev_priv, power_well);
 172}
 173
 174/*
 175 * We should only use the power well if we explicitly asked the hardware to
 176 * enable it, so check if it's enabled and also check if we've requested it to
 177 * be enabled.
 178 */
 179static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 180				   struct i915_power_well *power_well)
 181{
 182	return I915_READ(HSW_PWR_WELL_DRIVER) ==
 183		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 184}
 185
 186/**
 187 * __intel_display_power_is_enabled - unlocked check for a power domain
 188 * @dev_priv: i915 device instance
 189 * @domain: power domain to check
 190 *
 191 * This is the unlocked version of intel_display_power_is_enabled() and should
 192 * only be used from error capture and recovery code where deadlocks are
 193 * possible.
 194 *
 195 * Returns:
 196 * True when the power domain is enabled, false otherwise.
 197 */
 198bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 199				      enum intel_display_power_domain domain)
 200{
 201	struct i915_power_domains *power_domains;
 202	struct i915_power_well *power_well;
 203	bool is_enabled;
 204	int i;
 205
 206	if (dev_priv->pm.suspended)
 207		return false;
 208
 209	power_domains = &dev_priv->power_domains;
 210
 211	is_enabled = true;
 212
 213	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
 214		if (power_well->always_on)
 215			continue;
 216
 217		if (!power_well->hw_enabled) {
 218			is_enabled = false;
 219			break;
 220		}
 221	}
 222
 223	return is_enabled;
 224}
 225
 226/**
 227 * intel_display_power_is_enabled - check for a power domain
 228 * @dev_priv: i915 device instance
 229 * @domain: power domain to check
 230 *
 231 * This function can be used to check the hw power domain state. It is mostly
 232 * used in hardware state readout functions. Everywhere else code should rely
 233 * upon explicit power domain reference counting to ensure that the hardware
 234 * block is powered up before accessing it.
 235 *
 236 * Callers must hold the relevant modesetting locks to ensure that concurrent
 237 * threads can't disable the power well while the caller tries to read a few
 238 * registers.
 239 *
 240 * Returns:
 241 * True when the power domain is enabled, false otherwise.
 242 */
 243bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 244				    enum intel_display_power_domain domain)
 245{
 246	struct i915_power_domains *power_domains;
 247	bool ret;
 248
 249	power_domains = &dev_priv->power_domains;
 250
 251	mutex_lock(&power_domains->lock);
 252	ret = __intel_display_power_is_enabled(dev_priv, domain);
 253	mutex_unlock(&power_domains->lock);
 254
 255	return ret;
 256}
 257
 258/**
 259 * intel_display_set_init_power - set the initial power domain state
 260 * @dev_priv: i915 device instance
 261 * @enable: whether to enable or disable the initial power domain state
 262 *
 263 * For simplicity our driver load/unload and system suspend/resume code assumes
 264 * that all power domains are always enabled. This functions controls the state
 265 * of this little hack. While the initial power domain state is enabled runtime
 266 * pm is effectively disabled.
 267 */
 268void intel_display_set_init_power(struct drm_i915_private *dev_priv,
 269				  bool enable)
 270{
 271	if (dev_priv->power_domains.init_power_on == enable)
 272		return;
 273
 274	if (enable)
 275		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 276	else
 277		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 278
 279	dev_priv->power_domains.init_power_on = enable;
 280}
 281
 282/*
 283 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 284 * when not needed anymore. We have 4 registers that can request the power well
 285 * to be enabled, and it will only be disabled if none of the registers is
 286 * requesting it to be enabled.
 287 */
 288static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 289{
 290	struct pci_dev *pdev = dev_priv->drm.pdev;
 291
 292	/*
 293	 * After we re-enable the power well, if we touch VGA register 0x3d5
 294	 * we'll get unclaimed register interrupts. This stops after we write
 295	 * anything to the VGA MSR register. The vgacon module uses this
 296	 * register all the time, so if we unbind our driver and, as a
 297	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
 298	 * console_unlock(). So make here we touch the VGA MSR register, making
 299	 * sure vgacon can keep working normally without triggering interrupts
 300	 * and error messages.
 301	 */
 302	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 303	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 304	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 305
 306	if (IS_BROADWELL(dev_priv))
 307		gen8_irq_power_well_post_enable(dev_priv,
 308						1 << PIPE_C | 1 << PIPE_B);
 309}
 310
 311static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
 312{
 313	if (IS_BROADWELL(dev_priv))
 314		gen8_irq_power_well_pre_disable(dev_priv,
 315						1 << PIPE_C | 1 << PIPE_B);
 316}
 317
 318static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 319				       struct i915_power_well *power_well)
 320{
 321	struct pci_dev *pdev = dev_priv->drm.pdev;
 322
 323	/*
 324	 * After we re-enable the power well, if we touch VGA register 0x3d5
 325	 * we'll get unclaimed register interrupts. This stops after we write
 326	 * anything to the VGA MSR register. The vgacon module uses this
 327	 * register all the time, so if we unbind our driver and, as a
 328	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
 329	 * console_unlock(). So make here we touch the VGA MSR register, making
 330	 * sure vgacon can keep working normally without triggering interrupts
 331	 * and error messages.
 332	 */
 333	if (power_well->id == SKL_DISP_PW_2) {
 334		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 335		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 336		vga_put(pdev, VGA_RSRC_LEGACY_IO);
 337
 338		gen8_irq_power_well_post_enable(dev_priv,
 339						1 << PIPE_C | 1 << PIPE_B);
 340	}
 341}
 342
 343static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
 344				       struct i915_power_well *power_well)
 345{
 346	if (power_well->id == SKL_DISP_PW_2)
 347		gen8_irq_power_well_pre_disable(dev_priv,
 348						1 << PIPE_C | 1 << PIPE_B);
 349}
 350
 351static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 352			       struct i915_power_well *power_well, bool enable)
 353{
 354	bool is_enabled, enable_requested;
 355	uint32_t tmp;
 356
 357	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
 358	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
 359	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
 360
 361	if (enable) {
 362		if (!enable_requested)
 363			I915_WRITE(HSW_PWR_WELL_DRIVER,
 364				   HSW_PWR_WELL_ENABLE_REQUEST);
 365
 366		if (!is_enabled) {
 367			DRM_DEBUG_KMS("Enabling power well\n");
 368			if (intel_wait_for_register(dev_priv,
 369						    HSW_PWR_WELL_DRIVER,
 370						    HSW_PWR_WELL_STATE_ENABLED,
 371						    HSW_PWR_WELL_STATE_ENABLED,
 372						    20))
 373				DRM_ERROR("Timeout enabling power well\n");
 374			hsw_power_well_post_enable(dev_priv);
 375		}
 376
 377	} else {
 378		if (enable_requested) {
 379			hsw_power_well_pre_disable(dev_priv);
 380			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
 381			POSTING_READ(HSW_PWR_WELL_DRIVER);
 382			DRM_DEBUG_KMS("Requesting to disable the power well\n");
 383		}
 384	}
 385}
 386
 387#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
 388	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
 389	BIT(POWER_DOMAIN_PIPE_B) |			\
 390	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
 391	BIT(POWER_DOMAIN_PIPE_C) |			\
 392	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
 393	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
 394	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
 395	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
 396	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
 397	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
 398	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
 399	BIT(POWER_DOMAIN_AUX_B) |                       \
 400	BIT(POWER_DOMAIN_AUX_C) |			\
 401	BIT(POWER_DOMAIN_AUX_D) |			\
 402	BIT(POWER_DOMAIN_AUDIO) |			\
 403	BIT(POWER_DOMAIN_VGA) |				\
 404	BIT(POWER_DOMAIN_INIT))
 405#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
 406	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
 407	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
 408	BIT(POWER_DOMAIN_INIT))
 409#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
 410	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
 411	BIT(POWER_DOMAIN_INIT))
 412#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
 413	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
 414	BIT(POWER_DOMAIN_INIT))
 415#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
 416	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
 417	BIT(POWER_DOMAIN_INIT))
 418#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
 419	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
 420	BIT(POWER_DOMAIN_MODESET) |			\
 421	BIT(POWER_DOMAIN_AUX_A) |			\
 422	BIT(POWER_DOMAIN_INIT))
 423
 424#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
 425	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
 426	BIT(POWER_DOMAIN_PIPE_B) |			\
 427	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
 428	BIT(POWER_DOMAIN_PIPE_C) |			\
 429	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
 430	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
 431	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
 432	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
 433	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
 434	BIT(POWER_DOMAIN_AUX_B) |			\
 435	BIT(POWER_DOMAIN_AUX_C) |			\
 436	BIT(POWER_DOMAIN_AUDIO) |			\
 437	BIT(POWER_DOMAIN_VGA) |				\
 438	BIT(POWER_DOMAIN_GMBUS) |			\
 439	BIT(POWER_DOMAIN_INIT))
 440#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
 441	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
 442	BIT(POWER_DOMAIN_MODESET) |			\
 443	BIT(POWER_DOMAIN_AUX_A) |			\
 444	BIT(POWER_DOMAIN_INIT))
 445#define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
 446	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
 447	BIT(POWER_DOMAIN_AUX_A) |			\
 448	BIT(POWER_DOMAIN_INIT))
 449#define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
 450	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
 451	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
 452	BIT(POWER_DOMAIN_AUX_B) |			\
 453	BIT(POWER_DOMAIN_AUX_C) |			\
 454	BIT(POWER_DOMAIN_INIT))
 455
 456static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 457{
 458	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
 459		  "DC9 already programmed to be enabled.\n");
 460	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
 461		  "DC5 still not disabled to enable DC9.\n");
 462	WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
 463	WARN_ONCE(intel_irqs_enabled(dev_priv),
 464		  "Interrupts not disabled yet.\n");
 465
 466	 /*
 467	  * TODO: check for the following to verify the conditions to enter DC9
 468	  * state are satisfied:
 469	  * 1] Check relevant display engine registers to verify if mode set
 470	  * disable sequence was followed.
 471	  * 2] Check if display uninitialize sequence is initialized.
 472	  */
 473}
 474
 475static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 476{
 477	WARN_ONCE(intel_irqs_enabled(dev_priv),
 478		  "Interrupts not disabled yet.\n");
 479	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
 480		  "DC5 still not disabled.\n");
 481
 482	 /*
 483	  * TODO: check for the following to verify DC9 state was indeed
 484	  * entered before programming to disable it:
 485	  * 1] Check relevant display engine registers to verify if mode
 486	  *  set disable sequence was followed.
 487	  * 2] Check if display uninitialize sequence is initialized.
 488	  */
 489}
 490
 491static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 492				u32 state)
 493{
 494	int rewrites = 0;
 495	int rereads = 0;
 496	u32 v;
 497
 498	I915_WRITE(DC_STATE_EN, state);
 499
 500	/* It has been observed that disabling the dc6 state sometimes
 501	 * doesn't stick and dmc keeps returning old value. Make sure
 502	 * the write really sticks enough times and also force rewrite until
 503	 * we are confident that state is exactly what we want.
 504	 */
 505	do  {
 506		v = I915_READ(DC_STATE_EN);
 507
 508		if (v != state) {
 509			I915_WRITE(DC_STATE_EN, state);
 510			rewrites++;
 511			rereads = 0;
 512		} else if (rereads++ > 5) {
 513			break;
 514		}
 515
 516	} while (rewrites < 100);
 517
 518	if (v != state)
 519		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
 520			  state, v);
 521
 522	/* Most of the times we need one retry, avoid spam */
 523	if (rewrites > 1)
 524		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
 525			      state, rewrites);
 526}
 527
 528static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 529{
 530	u32 mask;
 531
 532	mask = DC_STATE_EN_UPTO_DC5;
 533	if (IS_BROXTON(dev_priv))
 534		mask |= DC_STATE_EN_DC9;
 535	else
 536		mask |= DC_STATE_EN_UPTO_DC6;
 537
 538	return mask;
 539}
 540
 541void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 542{
 543	u32 val;
 544
 545	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
 546
 547	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
 548		      dev_priv->csr.dc_state, val);
 549	dev_priv->csr.dc_state = val;
 550}
 551
 552static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
 553{
 554	uint32_t val;
 555	uint32_t mask;
 556
 557	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
 558		state &= dev_priv->csr.allowed_dc_mask;
 559
 560	val = I915_READ(DC_STATE_EN);
 561	mask = gen9_dc_mask(dev_priv);
 562	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
 563		      val & mask, state);
 564
 565	/* Check if DMC is ignoring our DC state requests */
 566	if ((val & mask) != dev_priv->csr.dc_state)
 567		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
 568			  dev_priv->csr.dc_state, val & mask);
 569
 570	val &= ~mask;
 571	val |= state;
 572
 573	gen9_write_dc_state(dev_priv, val);
 574
 575	dev_priv->csr.dc_state = val & mask;
 576}
 577
 578void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 579{
 580	assert_can_enable_dc9(dev_priv);
 581
 582	DRM_DEBUG_KMS("Enabling DC9\n");
 583
 584	intel_power_sequencer_reset(dev_priv);
 585	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 586}
 587
 588void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 589{
 590	assert_can_disable_dc9(dev_priv);
 591
 592	DRM_DEBUG_KMS("Disabling DC9\n");
 593
 594	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 595
 596	intel_pps_unlock_regs_wa(dev_priv);
 597}
 598
 599static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 600{
 601	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
 602		  "CSR program storage start is NULL\n");
 603	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
 604	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
 605}
 606
 607static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 608{
 609	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
 610					SKL_DISP_PW_2);
 611
 612	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 613
 614	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
 615		  "DC5 already programmed to be enabled.\n");
 616	assert_rpm_wakelock_held(dev_priv);
 617
 618	assert_csr_loaded(dev_priv);
 619}
 620
 621void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 622{
 623	assert_can_enable_dc5(dev_priv);
 624
 625	DRM_DEBUG_KMS("Enabling DC5\n");
 626
 627	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
 628}
 629
 630static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 631{
 632	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
 633		  "Backlight is not disabled.\n");
 634	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
 635		  "DC6 already programmed to be enabled.\n");
 636
 637	assert_csr_loaded(dev_priv);
 638}
 639
 640void skl_enable_dc6(struct drm_i915_private *dev_priv)
 641{
 642	assert_can_enable_dc6(dev_priv);
 643
 644	DRM_DEBUG_KMS("Enabling DC6\n");
 645
 646	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 647
 648}
 649
 650void skl_disable_dc6(struct drm_i915_private *dev_priv)
 651{
 652	DRM_DEBUG_KMS("Disabling DC6\n");
 653
 654	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 655}
 656
 657static void
 658gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
 659				  struct i915_power_well *power_well)
 660{
 661	enum skl_disp_power_wells power_well_id = power_well->id;
 662	u32 val;
 663	u32 mask;
 664
 665	mask = SKL_POWER_WELL_REQ(power_well_id);
 666
 667	val = I915_READ(HSW_PWR_WELL_KVMR);
 668	if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
 669		      power_well->name))
 670		I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
 671
 672	val = I915_READ(HSW_PWR_WELL_BIOS);
 673	val |= I915_READ(HSW_PWR_WELL_DEBUG);
 674
 675	if (!(val & mask))
 676		return;
 677
 678	/*
 679	 * DMC is known to force on the request bits for power well 1 on SKL
 680	 * and BXT and the misc IO power well on SKL but we don't expect any
 681	 * other request bits to be set, so WARN for those.
 682	 */
 683	if (power_well_id == SKL_DISP_PW_1 ||
 684	    ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
 685	     power_well_id == SKL_DISP_PW_MISC_IO))
 686		DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
 687				 "by DMC\n", power_well->name);
 688	else
 689		WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
 690			  power_well->name);
 691
 692	I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
 693	I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
 694}
 695
 696static void skl_set_power_well(struct drm_i915_private *dev_priv,
 697			struct i915_power_well *power_well, bool enable)
 698{
 699	uint32_t tmp, fuse_status;
 700	uint32_t req_mask, state_mask;
 701	bool is_enabled, enable_requested, check_fuse_status = false;
 702
 703	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
 704	fuse_status = I915_READ(SKL_FUSE_STATUS);
 705
 706	switch (power_well->id) {
 707	case SKL_DISP_PW_1:
 708		if (intel_wait_for_register(dev_priv,
 709					    SKL_FUSE_STATUS,
 710					    SKL_FUSE_PG0_DIST_STATUS,
 711					    SKL_FUSE_PG0_DIST_STATUS,
 712					    1)) {
 713			DRM_ERROR("PG0 not enabled\n");
 714			return;
 715		}
 716		break;
 717	case SKL_DISP_PW_2:
 718		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
 719			DRM_ERROR("PG1 in disabled state\n");
 720			return;
 721		}
 722		break;
 723	case SKL_DISP_PW_DDI_A_E:
 724	case SKL_DISP_PW_DDI_B:
 725	case SKL_DISP_PW_DDI_C:
 726	case SKL_DISP_PW_DDI_D:
 727	case SKL_DISP_PW_MISC_IO:
 728		break;
 729	default:
 730		WARN(1, "Unknown power well %lu\n", power_well->id);
 731		return;
 732	}
 733
 734	req_mask = SKL_POWER_WELL_REQ(power_well->id);
 735	enable_requested = tmp & req_mask;
 736	state_mask = SKL_POWER_WELL_STATE(power_well->id);
 737	is_enabled = tmp & state_mask;
 738
 739	if (!enable && enable_requested)
 740		skl_power_well_pre_disable(dev_priv, power_well);
 741
 742	if (enable) {
 743		if (!enable_requested) {
 744			WARN((tmp & state_mask) &&
 745				!I915_READ(HSW_PWR_WELL_BIOS),
 746				"Invalid for power well status to be enabled, unless done by the BIOS, \
 747				when request is to disable!\n");
 748			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
 749		}
 750
 751		if (!is_enabled) {
 752			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
 753			check_fuse_status = true;
 754		}
 755	} else {
 756		if (enable_requested) {
 757			I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
 758			POSTING_READ(HSW_PWR_WELL_DRIVER);
 759			DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
 760		}
 761
 762		if (IS_GEN9(dev_priv))
 763			gen9_sanitize_power_well_requests(dev_priv, power_well);
 764	}
 765
 766	if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
 767		     1))
 768		DRM_ERROR("%s %s timeout\n",
 769			  power_well->name, enable ? "enable" : "disable");
 770
 771	if (check_fuse_status) {
 772		if (power_well->id == SKL_DISP_PW_1) {
 773			if (intel_wait_for_register(dev_priv,
 774						    SKL_FUSE_STATUS,
 775						    SKL_FUSE_PG1_DIST_STATUS,
 776						    SKL_FUSE_PG1_DIST_STATUS,
 777						    1))
 778				DRM_ERROR("PG1 distributing status timeout\n");
 779		} else if (power_well->id == SKL_DISP_PW_2) {
 780			if (intel_wait_for_register(dev_priv,
 781						    SKL_FUSE_STATUS,
 782						    SKL_FUSE_PG2_DIST_STATUS,
 783						    SKL_FUSE_PG2_DIST_STATUS,
 784						    1))
 785				DRM_ERROR("PG2 distributing status timeout\n");
 786		}
 787	}
 788
 789	if (enable && !is_enabled)
 790		skl_power_well_post_enable(dev_priv, power_well);
 791}
 792
 793static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
 794				   struct i915_power_well *power_well)
 795{
 796	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
 797
 798	/*
 799	 * We're taking over the BIOS, so clear any requests made by it since
 800	 * the driver is in charge now.
 801	 */
 802	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
 803		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
 804}
 805
 806static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 807				  struct i915_power_well *power_well)
 808{
 809	hsw_set_power_well(dev_priv, power_well, true);
 810}
 811
 812static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 813				   struct i915_power_well *power_well)
 814{
 815	hsw_set_power_well(dev_priv, power_well, false);
 816}
 817
 818static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
 819					struct i915_power_well *power_well)
 820{
 821	uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
 822		SKL_POWER_WELL_STATE(power_well->id);
 823
 824	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
 825}
 826
 827static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
 828				struct i915_power_well *power_well)
 829{
 830	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
 831
 832	/* Clear any request made by BIOS as driver is taking over */
 833	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
 834}
 835
 836static void skl_power_well_enable(struct drm_i915_private *dev_priv,
 837				struct i915_power_well *power_well)
 838{
 839	skl_set_power_well(dev_priv, power_well, true);
 840}
 841
 842static void skl_power_well_disable(struct drm_i915_private *dev_priv,
 843				struct i915_power_well *power_well)
 844{
 845	skl_set_power_well(dev_priv, power_well, false);
 846}
 847
 848static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 849					   struct i915_power_well *power_well)
 850{
 851	bxt_ddi_phy_init(dev_priv, power_well->data);
 852}
 853
 854static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 855					    struct i915_power_well *power_well)
 856{
 857	bxt_ddi_phy_uninit(dev_priv, power_well->data);
 858}
 859
 860static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
 861					    struct i915_power_well *power_well)
 862{
 863	return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
 864}
 865
 866static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
 867					    struct i915_power_well *power_well)
 868{
 869	if (power_well->count > 0)
 870		bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
 871	else
 872		bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
 873}
 874
 875
 876static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
 877{
 878	struct i915_power_well *power_well;
 879
 880	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
 881	if (power_well->count > 0)
 882		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
 883
 884	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
 885	if (power_well->count > 0)
 886		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
 887}
 888
 889static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
 890					   struct i915_power_well *power_well)
 891{
 892	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
 893}
 894
 895static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
 896{
 897	u32 tmp = I915_READ(DBUF_CTL);
 898
 899	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
 900	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
 901	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
 902}
 903
 904static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 905					  struct i915_power_well *power_well)
 906{
 907	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 908
 909	WARN_ON(dev_priv->cdclk_freq !=
 910		dev_priv->display.get_display_clock_speed(dev_priv));
 911
 912	gen9_assert_dbuf_enabled(dev_priv);
 913
 914	if (IS_BROXTON(dev_priv))
 915		bxt_verify_ddi_phy_power_wells(dev_priv);
 916}
 917
 918static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
 919					   struct i915_power_well *power_well)
 920{
 921	if (!dev_priv->csr.dmc_payload)
 922		return;
 923
 924	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
 925		skl_enable_dc6(dev_priv);
 926	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
 927		gen9_enable_dc5(dev_priv);
 928}
 929
 930static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
 931					   struct i915_power_well *power_well)
 932{
 933	if (power_well->count > 0)
 934		gen9_dc_off_power_well_enable(dev_priv, power_well);
 935	else
 936		gen9_dc_off_power_well_disable(dev_priv, power_well);
 937}
 938
 939static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
 940					   struct i915_power_well *power_well)
 941{
 942}
 943
 944static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
 945					     struct i915_power_well *power_well)
 946{
 947	return true;
 948}
 949
 950static void vlv_set_power_well(struct drm_i915_private *dev_priv,
 951			       struct i915_power_well *power_well, bool enable)
 952{
 953	enum punit_power_well power_well_id = power_well->id;
 954	u32 mask;
 955	u32 state;
 956	u32 ctrl;
 957
 958	mask = PUNIT_PWRGT_MASK(power_well_id);
 959	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
 960			 PUNIT_PWRGT_PWR_GATE(power_well_id);
 961
 962	mutex_lock(&dev_priv->rps.hw_lock);
 963
 964#define COND \
 965	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
 966
 967	if (COND)
 968		goto out;
 969
 970	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
 971	ctrl &= ~mask;
 972	ctrl |= state;
 973	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 974
 975	if (wait_for(COND, 100))
 976		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
 977			  state,
 978			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 979
 980#undef COND
 981
 982out:
 983	mutex_unlock(&dev_priv->rps.hw_lock);
 984}
 985
 986static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
 987				   struct i915_power_well *power_well)
 988{
 989	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
 990}
 991
 992static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
 993				  struct i915_power_well *power_well)
 994{
 995	vlv_set_power_well(dev_priv, power_well, true);
 996}
 997
 998static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
 999				   struct i915_power_well *power_well)
1000{
1001	vlv_set_power_well(dev_priv, power_well, false);
1002}
1003
1004static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1005				   struct i915_power_well *power_well)
1006{
1007	int power_well_id = power_well->id;
1008	bool enabled = false;
1009	u32 mask;
1010	u32 state;
1011	u32 ctrl;
1012
1013	mask = PUNIT_PWRGT_MASK(power_well_id);
1014	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1015
1016	mutex_lock(&dev_priv->rps.hw_lock);
1017
1018	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1019	/*
1020	 * We only ever set the power-on and power-gate states, anything
1021	 * else is unexpected.
1022	 */
1023	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1024		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1025	if (state == ctrl)
1026		enabled = true;
1027
1028	/*
1029	 * A transient state at this point would mean some unexpected party
1030	 * is poking at the power controls too.
1031	 */
1032	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1033	WARN_ON(ctrl != state);
1034
1035	mutex_unlock(&dev_priv->rps.hw_lock);
1036
1037	return enabled;
1038}
1039
1040static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1041{
1042	u32 val;
1043
1044	/*
1045	 * On driver load, a pipe may be active and driving a DSI display.
1046	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1047	 * (and never recovering) in this case. intel_dsi_post_disable() will
1048	 * clear it when we turn off the display.
1049	 */
1050	val = I915_READ(DSPCLK_GATE_D);
1051	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1052	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1053	I915_WRITE(DSPCLK_GATE_D, val);
1054
1055	/*
1056	 * Disable trickle feed and enable pnd deadline calculation
1057	 */
1058	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1059	I915_WRITE(CBR1_VLV, 0);
1060
1061	WARN_ON(dev_priv->rawclk_freq == 0);
1062
1063	I915_WRITE(RAWCLK_FREQ_VLV,
1064		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1065}
1066
1067static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1068{
1069	struct intel_encoder *encoder;
1070	enum pipe pipe;
1071
1072	/*
1073	 * Enable the CRI clock source so we can get at the
1074	 * display and the reference clock for VGA
1075	 * hotplug / manual detection. Supposedly DSI also
1076	 * needs the ref clock up and running.
1077	 *
1078	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1079	 */
1080	for_each_pipe(dev_priv, pipe) {
1081		u32 val = I915_READ(DPLL(pipe));
1082
1083		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1084		if (pipe != PIPE_A)
1085			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1086
1087		I915_WRITE(DPLL(pipe), val);
1088	}
1089
1090	vlv_init_display_clock_gating(dev_priv);
1091
1092	spin_lock_irq(&dev_priv->irq_lock);
1093	valleyview_enable_display_irqs(dev_priv);
1094	spin_unlock_irq(&dev_priv->irq_lock);
1095
1096	/*
1097	 * During driver initialization/resume we can avoid restoring the
1098	 * part of the HW/SW state that will be inited anyway explicitly.
1099	 */
1100	if (dev_priv->power_domains.initializing)
1101		return;
1102
1103	intel_hpd_init(dev_priv);
1104
1105	/* Re-enable the ADPA, if we have one */
1106	for_each_intel_encoder(&dev_priv->drm, encoder) {
1107		if (encoder->type == INTEL_OUTPUT_ANALOG)
1108			intel_crt_reset(&encoder->base);
1109	}
1110
1111	i915_redisable_vga_power_on(dev_priv);
1112
1113	intel_pps_unlock_regs_wa(dev_priv);
1114}
1115
1116static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1117{
1118	spin_lock_irq(&dev_priv->irq_lock);
1119	valleyview_disable_display_irqs(dev_priv);
1120	spin_unlock_irq(&dev_priv->irq_lock);
1121
1122	/* make sure we're done processing display irqs */
1123	synchronize_irq(dev_priv->drm.irq);
1124
1125	intel_power_sequencer_reset(dev_priv);
1126
1127	/* Prevent us from re-enabling polling on accident in late suspend */
1128	if (!dev_priv->drm.dev->power.is_suspended)
1129		intel_hpd_poll_init(dev_priv);
1130}
1131
1132static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1133					  struct i915_power_well *power_well)
1134{
1135	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1136
1137	vlv_set_power_well(dev_priv, power_well, true);
1138
1139	vlv_display_power_well_init(dev_priv);
1140}
1141
1142static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1143					   struct i915_power_well *power_well)
1144{
1145	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1146
1147	vlv_display_power_well_deinit(dev_priv);
1148
1149	vlv_set_power_well(dev_priv, power_well, false);
1150}
1151
1152static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1153					   struct i915_power_well *power_well)
1154{
1155	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1156
1157	/* since ref/cri clock was enabled */
1158	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1159
1160	vlv_set_power_well(dev_priv, power_well, true);
1161
1162	/*
1163	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1164	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1165	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1166	 *   b.	The other bits such as sfr settings / modesel may all
1167	 *	be set to 0.
1168	 *
1169	 * This should only be done on init and resume from S3 with
1170	 * both PLLs disabled, or we risk losing DPIO and PLL
1171	 * synchronization.
1172	 */
1173	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1174}
1175
1176static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1177					    struct i915_power_well *power_well)
1178{
1179	enum pipe pipe;
1180
1181	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1182
1183	for_each_pipe(dev_priv, pipe)
1184		assert_pll_disabled(dev_priv, pipe);
1185
1186	/* Assert common reset */
1187	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1188
1189	vlv_set_power_well(dev_priv, power_well, false);
1190}
1191
1192#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1193
1194static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1195						 int power_well_id)
1196{
1197	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1198	int i;
1199
1200	for (i = 0; i < power_domains->power_well_count; i++) {
1201		struct i915_power_well *power_well;
1202
1203		power_well = &power_domains->power_wells[i];
1204		if (power_well->id == power_well_id)
1205			return power_well;
1206	}
1207
1208	return NULL;
1209}
1210
1211#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1212
1213static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1214{
1215	struct i915_power_well *cmn_bc =
1216		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1217	struct i915_power_well *cmn_d =
1218		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1219	u32 phy_control = dev_priv->chv_phy_control;
1220	u32 phy_status = 0;
1221	u32 phy_status_mask = 0xffffffff;
1222
1223	/*
1224	 * The BIOS can leave the PHY is some weird state
1225	 * where it doesn't fully power down some parts.
1226	 * Disable the asserts until the PHY has been fully
1227	 * reset (ie. the power well has been disabled at
1228	 * least once).
1229	 */
1230	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1231		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1232				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1233				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1234				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1235				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1236				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1237
1238	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1239		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1240				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1241				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1242
1243	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1244		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1245
1246		/* this assumes override is only used to enable lanes */
1247		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1248			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1249
1250		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1251			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1252
1253		/* CL1 is on whenever anything is on in either channel */
1254		if (BITS_SET(phy_control,
1255			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1256			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1257			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1258
1259		/*
1260		 * The DPLLB check accounts for the pipe B + port A usage
1261		 * with CL2 powered up but all the lanes in the second channel
1262		 * powered down.
1263		 */
1264		if (BITS_SET(phy_control,
1265			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1266		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1267			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1268
1269		if (BITS_SET(phy_control,
1270			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1271			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1272		if (BITS_SET(phy_control,
1273			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1274			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1275
1276		if (BITS_SET(phy_control,
1277			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1278			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1279		if (BITS_SET(phy_control,
1280			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1281			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1282	}
1283
1284	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1285		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1286
1287		/* this assumes override is only used to enable lanes */
1288		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1289			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1290
1291		if (BITS_SET(phy_control,
1292			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1293			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1294
1295		if (BITS_SET(phy_control,
1296			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1297			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1298		if (BITS_SET(phy_control,
1299			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1300			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1301	}
1302
1303	phy_status &= phy_status_mask;
1304
1305	/*
1306	 * The PHY may be busy with some initial calibration and whatnot,
1307	 * so the power state can take a while to actually change.
1308	 */
1309	if (intel_wait_for_register(dev_priv,
1310				    DISPLAY_PHY_STATUS,
1311				    phy_status_mask,
1312				    phy_status,
1313				    10))
1314		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1315			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1316			   phy_status, dev_priv->chv_phy_control);
1317}
1318
1319#undef BITS_SET
1320
1321static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1322					   struct i915_power_well *power_well)
1323{
1324	enum dpio_phy phy;
1325	enum pipe pipe;
1326	uint32_t tmp;
1327
1328	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1329		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1330
1331	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1332		pipe = PIPE_A;
1333		phy = DPIO_PHY0;
1334	} else {
1335		pipe = PIPE_C;
1336		phy = DPIO_PHY1;
1337	}
1338
1339	/* since ref/cri clock was enabled */
1340	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1341	vlv_set_power_well(dev_priv, power_well, true);
1342
1343	/* Poll for phypwrgood signal */
1344	if (intel_wait_for_register(dev_priv,
1345				    DISPLAY_PHY_STATUS,
1346				    PHY_POWERGOOD(phy),
1347				    PHY_POWERGOOD(phy),
1348				    1))
1349		DRM_ERROR("Display PHY %d is not power up\n", phy);
1350
1351	mutex_lock(&dev_priv->sb_lock);
1352
1353	/* Enable dynamic power down */
1354	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1355	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1356		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1357	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1358
1359	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1360		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1361		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1362		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1363	} else {
1364		/*
1365		 * Force the non-existing CL2 off. BXT does this
1366		 * too, so maybe it saves some power even though
1367		 * CL2 doesn't exist?
1368		 */
1369		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1370		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1371		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1372	}
1373
1374	mutex_unlock(&dev_priv->sb_lock);
1375
1376	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1377	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1378
1379	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1380		      phy, dev_priv->chv_phy_control);
1381
1382	assert_chv_phy_status(dev_priv);
1383}
1384
1385static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1386					    struct i915_power_well *power_well)
1387{
1388	enum dpio_phy phy;
1389
1390	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1391		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1392
1393	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1394		phy = DPIO_PHY0;
1395		assert_pll_disabled(dev_priv, PIPE_A);
1396		assert_pll_disabled(dev_priv, PIPE_B);
1397	} else {
1398		phy = DPIO_PHY1;
1399		assert_pll_disabled(dev_priv, PIPE_C);
1400	}
1401
1402	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1403	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1404
1405	vlv_set_power_well(dev_priv, power_well, false);
1406
1407	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1408		      phy, dev_priv->chv_phy_control);
1409
1410	/* PHY is fully reset now, so we can enable the PHY state asserts */
1411	dev_priv->chv_phy_assert[phy] = true;
1412
1413	assert_chv_phy_status(dev_priv);
1414}
1415
1416static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1417				     enum dpio_channel ch, bool override, unsigned int mask)
1418{
1419	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1420	u32 reg, val, expected, actual;
1421
1422	/*
1423	 * The BIOS can leave the PHY is some weird state
1424	 * where it doesn't fully power down some parts.
1425	 * Disable the asserts until the PHY has been fully
1426	 * reset (ie. the power well has been disabled at
1427	 * least once).
1428	 */
1429	if (!dev_priv->chv_phy_assert[phy])
1430		return;
1431
1432	if (ch == DPIO_CH0)
1433		reg = _CHV_CMN_DW0_CH0;
1434	else
1435		reg = _CHV_CMN_DW6_CH1;
1436
1437	mutex_lock(&dev_priv->sb_lock);
1438	val = vlv_dpio_read(dev_priv, pipe, reg);
1439	mutex_unlock(&dev_priv->sb_lock);
1440
1441	/*
1442	 * This assumes !override is only used when the port is disabled.
1443	 * All lanes should power down even without the override when
1444	 * the port is disabled.
1445	 */
1446	if (!override || mask == 0xf) {
1447		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1448		/*
1449		 * If CH1 common lane is not active anymore
1450		 * (eg. for pipe B DPLL) the entire channel will
1451		 * shut down, which causes the common lane registers
1452		 * to read as 0. That means we can't actually check
1453		 * the lane power down status bits, but as the entire
1454		 * register reads as 0 it's a good indication that the
1455		 * channel is indeed entirely powered down.
1456		 */
1457		if (ch == DPIO_CH1 && val == 0)
1458			expected = 0;
1459	} else if (mask != 0x0) {
1460		expected = DPIO_ANYDL_POWERDOWN;
1461	} else {
1462		expected = 0;
1463	}
1464
1465	if (ch == DPIO_CH0)
1466		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1467	else
1468		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1469	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1470
1471	WARN(actual != expected,
1472	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1473	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1474	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1475	     reg, val);
1476}
1477
1478bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1479			  enum dpio_channel ch, bool override)
1480{
1481	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1482	bool was_override;
1483
1484	mutex_lock(&power_domains->lock);
1485
1486	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1487
1488	if (override == was_override)
1489		goto out;
1490
1491	if (override)
1492		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1493	else
1494		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1495
1496	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1497
1498	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1499		      phy, ch, dev_priv->chv_phy_control);
1500
1501	assert_chv_phy_status(dev_priv);
1502
1503out:
1504	mutex_unlock(&power_domains->lock);
1505
1506	return was_override;
1507}
1508
1509void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1510			     bool override, unsigned int mask)
1511{
1512	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1513	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1514	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1515	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1516
1517	mutex_lock(&power_domains->lock);
1518
1519	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1520	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1521
1522	if (override)
1523		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1524	else
1525		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1526
1527	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1528
1529	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1530		      phy, ch, mask, dev_priv->chv_phy_control);
1531
1532	assert_chv_phy_status(dev_priv);
1533
1534	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1535
1536	mutex_unlock(&power_domains->lock);
1537}
1538
1539static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1540					struct i915_power_well *power_well)
1541{
1542	enum pipe pipe = power_well->id;
1543	bool enabled;
1544	u32 state, ctrl;
1545
1546	mutex_lock(&dev_priv->rps.hw_lock);
1547
1548	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1549	/*
1550	 * We only ever set the power-on and power-gate states, anything
1551	 * else is unexpected.
1552	 */
1553	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1554	enabled = state == DP_SSS_PWR_ON(pipe);
1555
1556	/*
1557	 * A transient state at this point would mean some unexpected party
1558	 * is poking at the power controls too.
1559	 */
1560	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1561	WARN_ON(ctrl << 16 != state);
1562
1563	mutex_unlock(&dev_priv->rps.hw_lock);
1564
1565	return enabled;
1566}
1567
1568static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1569				    struct i915_power_well *power_well,
1570				    bool enable)
1571{
1572	enum pipe pipe = power_well->id;
1573	u32 state;
1574	u32 ctrl;
1575
1576	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1577
1578	mutex_lock(&dev_priv->rps.hw_lock);
1579
1580#define COND \
1581	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1582
1583	if (COND)
1584		goto out;
1585
1586	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1587	ctrl &= ~DP_SSC_MASK(pipe);
1588	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1589	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1590
1591	if (wait_for(COND, 100))
1592		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1593			  state,
1594			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1595
1596#undef COND
1597
1598out:
1599	mutex_unlock(&dev_priv->rps.hw_lock);
1600}
1601
1602static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1603					struct i915_power_well *power_well)
1604{
1605	WARN_ON_ONCE(power_well->id != PIPE_A);
1606
1607	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1608}
1609
1610static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1611				       struct i915_power_well *power_well)
1612{
1613	WARN_ON_ONCE(power_well->id != PIPE_A);
1614
1615	chv_set_pipe_power_well(dev_priv, power_well, true);
1616
1617	vlv_display_power_well_init(dev_priv);
1618}
1619
1620static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1621					struct i915_power_well *power_well)
1622{
1623	WARN_ON_ONCE(power_well->id != PIPE_A);
1624
1625	vlv_display_power_well_deinit(dev_priv);
1626
1627	chv_set_pipe_power_well(dev_priv, power_well, false);
1628}
1629
1630static void
1631__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1632				 enum intel_display_power_domain domain)
1633{
1634	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1635	struct i915_power_well *power_well;
1636	int i;
1637
1638	for_each_power_well(i, power_well, BIT(domain), power_domains)
1639		intel_power_well_get(dev_priv, power_well);
1640
1641	power_domains->domain_use_count[domain]++;
1642}
1643
1644/**
1645 * intel_display_power_get - grab a power domain reference
1646 * @dev_priv: i915 device instance
1647 * @domain: power domain to reference
1648 *
1649 * This function grabs a power domain reference for @domain and ensures that the
1650 * power domain and all its parents are powered up. Therefore users should only
1651 * grab a reference to the innermost power domain they need.
1652 *
1653 * Any power domain reference obtained by this function must have a symmetric
1654 * call to intel_display_power_put() to release the reference again.
1655 */
1656void intel_display_power_get(struct drm_i915_private *dev_priv,
1657			     enum intel_display_power_domain domain)
1658{
1659	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1660
1661	intel_runtime_pm_get(dev_priv);
1662
1663	mutex_lock(&power_domains->lock);
1664
1665	__intel_display_power_get_domain(dev_priv, domain);
1666
1667	mutex_unlock(&power_domains->lock);
1668}
1669
1670/**
1671 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1672 * @dev_priv: i915 device instance
1673 * @domain: power domain to reference
1674 *
1675 * This function grabs a power domain reference for @domain and ensures that the
1676 * power domain and all its parents are powered up. Therefore users should only
1677 * grab a reference to the innermost power domain they need.
1678 *
1679 * Any power domain reference obtained by this function must have a symmetric
1680 * call to intel_display_power_put() to release the reference again.
1681 */
1682bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1683					enum intel_display_power_domain domain)
1684{
1685	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1686	bool is_enabled;
1687
1688	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1689		return false;
1690
1691	mutex_lock(&power_domains->lock);
1692
1693	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1694		__intel_display_power_get_domain(dev_priv, domain);
1695		is_enabled = true;
1696	} else {
1697		is_enabled = false;
1698	}
1699
1700	mutex_unlock(&power_domains->lock);
1701
1702	if (!is_enabled)
1703		intel_runtime_pm_put(dev_priv);
1704
1705	return is_enabled;
1706}
1707
1708/**
1709 * intel_display_power_put - release a power domain reference
1710 * @dev_priv: i915 device instance
1711 * @domain: power domain to reference
1712 *
1713 * This function drops the power domain reference obtained by
1714 * intel_display_power_get() and might power down the corresponding hardware
1715 * block right away if this is the last reference.
1716 */
1717void intel_display_power_put(struct drm_i915_private *dev_priv,
1718			     enum intel_display_power_domain domain)
1719{
1720	struct i915_power_domains *power_domains;
1721	struct i915_power_well *power_well;
1722	int i;
1723
1724	power_domains = &dev_priv->power_domains;
1725
1726	mutex_lock(&power_domains->lock);
1727
1728	WARN(!power_domains->domain_use_count[domain],
1729	     "Use count on domain %s is already zero\n",
1730	     intel_display_power_domain_str(domain));
1731	power_domains->domain_use_count[domain]--;
1732
1733	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
1734		intel_power_well_put(dev_priv, power_well);
1735
1736	mutex_unlock(&power_domains->lock);
1737
1738	intel_runtime_pm_put(dev_priv);
1739}
1740
1741#define HSW_DISPLAY_POWER_DOMAINS (			\
1742	BIT(POWER_DOMAIN_PIPE_B) |			\
1743	BIT(POWER_DOMAIN_PIPE_C) |			\
1744	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1745	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1746	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1747	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
1748	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
1749	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
1750	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1751	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1752	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1753	BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1754	BIT(POWER_DOMAIN_VGA) |				\
1755	BIT(POWER_DOMAIN_AUDIO) |			\
1756	BIT(POWER_DOMAIN_INIT))
1757
1758#define BDW_DISPLAY_POWER_DOMAINS (			\
1759	BIT(POWER_DOMAIN_PIPE_B) |			\
1760	BIT(POWER_DOMAIN_PIPE_C) |			\
1761	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1762	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1763	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
1764	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
1765	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
1766	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1767	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1768	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1769	BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1770	BIT(POWER_DOMAIN_VGA) |				\
1771	BIT(POWER_DOMAIN_AUDIO) |			\
1772	BIT(POWER_DOMAIN_INIT))
1773
1774#define VLV_DISPLAY_POWER_DOMAINS (		\
1775	BIT(POWER_DOMAIN_PIPE_A) |		\
1776	BIT(POWER_DOMAIN_PIPE_B) |		\
1777	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1778	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1779	BIT(POWER_DOMAIN_TRANSCODER_A) |	\
1780	BIT(POWER_DOMAIN_TRANSCODER_B) |	\
1781	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1782	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1783	BIT(POWER_DOMAIN_PORT_DSI) |		\
1784	BIT(POWER_DOMAIN_PORT_CRT) |		\
1785	BIT(POWER_DOMAIN_VGA) |			\
1786	BIT(POWER_DOMAIN_AUDIO) |		\
1787	BIT(POWER_DOMAIN_AUX_B) |		\
1788	BIT(POWER_DOMAIN_AUX_C) |		\
1789	BIT(POWER_DOMAIN_GMBUS) |		\
1790	BIT(POWER_DOMAIN_INIT))
1791
1792#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1793	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1794	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1795	BIT(POWER_DOMAIN_PORT_CRT) |		\
1796	BIT(POWER_DOMAIN_AUX_B) |		\
1797	BIT(POWER_DOMAIN_AUX_C) |		\
1798	BIT(POWER_DOMAIN_INIT))
1799
1800#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1801	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1802	BIT(POWER_DOMAIN_AUX_B) |		\
1803	BIT(POWER_DOMAIN_INIT))
1804
1805#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1806	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1807	BIT(POWER_DOMAIN_AUX_B) |		\
1808	BIT(POWER_DOMAIN_INIT))
1809
1810#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1811	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1812	BIT(POWER_DOMAIN_AUX_C) |		\
1813	BIT(POWER_DOMAIN_INIT))
1814
1815#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1816	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1817	BIT(POWER_DOMAIN_AUX_C) |		\
1818	BIT(POWER_DOMAIN_INIT))
1819
1820#define CHV_DISPLAY_POWER_DOMAINS (		\
1821	BIT(POWER_DOMAIN_PIPE_A) |		\
1822	BIT(POWER_DOMAIN_PIPE_B) |		\
1823	BIT(POWER_DOMAIN_PIPE_C) |		\
1824	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1825	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1826	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1827	BIT(POWER_DOMAIN_TRANSCODER_A) |	\
1828	BIT(POWER_DOMAIN_TRANSCODER_B) |	\
1829	BIT(POWER_DOMAIN_TRANSCODER_C) |	\
1830	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1831	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1832	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1833	BIT(POWER_DOMAIN_PORT_DSI) |		\
1834	BIT(POWER_DOMAIN_VGA) |			\
1835	BIT(POWER_DOMAIN_AUDIO) |		\
1836	BIT(POWER_DOMAIN_AUX_B) |		\
1837	BIT(POWER_DOMAIN_AUX_C) |		\
1838	BIT(POWER_DOMAIN_AUX_D) |		\
1839	BIT(POWER_DOMAIN_GMBUS) |		\
1840	BIT(POWER_DOMAIN_INIT))
1841
1842#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1843	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1844	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1845	BIT(POWER_DOMAIN_AUX_B) |		\
1846	BIT(POWER_DOMAIN_AUX_C) |		\
1847	BIT(POWER_DOMAIN_INIT))
1848
1849#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1850	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1851	BIT(POWER_DOMAIN_AUX_D) |		\
1852	BIT(POWER_DOMAIN_INIT))
1853
1854static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1855	.sync_hw = i9xx_always_on_power_well_noop,
1856	.enable = i9xx_always_on_power_well_noop,
1857	.disable = i9xx_always_on_power_well_noop,
1858	.is_enabled = i9xx_always_on_power_well_enabled,
1859};
1860
1861static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1862	.sync_hw = chv_pipe_power_well_sync_hw,
1863	.enable = chv_pipe_power_well_enable,
1864	.disable = chv_pipe_power_well_disable,
1865	.is_enabled = chv_pipe_power_well_enabled,
1866};
1867
1868static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1869	.sync_hw = vlv_power_well_sync_hw,
1870	.enable = chv_dpio_cmn_power_well_enable,
1871	.disable = chv_dpio_cmn_power_well_disable,
1872	.is_enabled = vlv_power_well_enabled,
1873};
1874
1875static struct i915_power_well i9xx_always_on_power_well[] = {
1876	{
1877		.name = "always-on",
1878		.always_on = 1,
1879		.domains = POWER_DOMAIN_MASK,
1880		.ops = &i9xx_always_on_power_well_ops,
1881	},
1882};
1883
1884static const struct i915_power_well_ops hsw_power_well_ops = {
1885	.sync_hw = hsw_power_well_sync_hw,
1886	.enable = hsw_power_well_enable,
1887	.disable = hsw_power_well_disable,
1888	.is_enabled = hsw_power_well_enabled,
1889};
1890
1891static const struct i915_power_well_ops skl_power_well_ops = {
1892	.sync_hw = skl_power_well_sync_hw,
1893	.enable = skl_power_well_enable,
1894	.disable = skl_power_well_disable,
1895	.is_enabled = skl_power_well_enabled,
1896};
1897
1898static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1899	.sync_hw = gen9_dc_off_power_well_sync_hw,
1900	.enable = gen9_dc_off_power_well_enable,
1901	.disable = gen9_dc_off_power_well_disable,
1902	.is_enabled = gen9_dc_off_power_well_enabled,
1903};
1904
1905static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1906	.sync_hw = bxt_dpio_cmn_power_well_sync_hw,
1907	.enable = bxt_dpio_cmn_power_well_enable,
1908	.disable = bxt_dpio_cmn_power_well_disable,
1909	.is_enabled = bxt_dpio_cmn_power_well_enabled,
1910};
1911
1912static struct i915_power_well hsw_power_wells[] = {
1913	{
1914		.name = "always-on",
1915		.always_on = 1,
1916		.domains = POWER_DOMAIN_MASK,
1917		.ops = &i9xx_always_on_power_well_ops,
1918	},
1919	{
1920		.name = "display",
1921		.domains = HSW_DISPLAY_POWER_DOMAINS,
1922		.ops = &hsw_power_well_ops,
1923	},
1924};
1925
1926static struct i915_power_well bdw_power_wells[] = {
1927	{
1928		.name = "always-on",
1929		.always_on = 1,
1930		.domains = POWER_DOMAIN_MASK,
1931		.ops = &i9xx_always_on_power_well_ops,
1932	},
1933	{
1934		.name = "display",
1935		.domains = BDW_DISPLAY_POWER_DOMAINS,
1936		.ops = &hsw_power_well_ops,
1937	},
1938};
1939
1940static const struct i915_power_well_ops vlv_display_power_well_ops = {
1941	.sync_hw = vlv_power_well_sync_hw,
1942	.enable = vlv_display_power_well_enable,
1943	.disable = vlv_display_power_well_disable,
1944	.is_enabled = vlv_power_well_enabled,
1945};
1946
1947static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1948	.sync_hw = vlv_power_well_sync_hw,
1949	.enable = vlv_dpio_cmn_power_well_enable,
1950	.disable = vlv_dpio_cmn_power_well_disable,
1951	.is_enabled = vlv_power_well_enabled,
1952};
1953
1954static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1955	.sync_hw = vlv_power_well_sync_hw,
1956	.enable = vlv_power_well_enable,
1957	.disable = vlv_power_well_disable,
1958	.is_enabled = vlv_power_well_enabled,
1959};
1960
1961static struct i915_power_well vlv_power_wells[] = {
1962	{
1963		.name = "always-on",
1964		.always_on = 1,
1965		.domains = POWER_DOMAIN_MASK,
1966		.ops = &i9xx_always_on_power_well_ops,
1967		.id = PUNIT_POWER_WELL_ALWAYS_ON,
1968	},
1969	{
1970		.name = "display",
1971		.domains = VLV_DISPLAY_POWER_DOMAINS,
1972		.id = PUNIT_POWER_WELL_DISP2D,
1973		.ops = &vlv_display_power_well_ops,
1974	},
1975	{
1976		.name = "dpio-tx-b-01",
1977		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1978			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1979			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1980			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1981		.ops = &vlv_dpio_power_well_ops,
1982		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1983	},
1984	{
1985		.name = "dpio-tx-b-23",
1986		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1987			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1988			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1989			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1990		.ops = &vlv_dpio_power_well_ops,
1991		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1992	},
1993	{
1994		.name = "dpio-tx-c-01",
1995		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1996			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1997			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1998			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1999		.ops = &vlv_dpio_power_well_ops,
2000		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2001	},
2002	{
2003		.name = "dpio-tx-c-23",
2004		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2005			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2006			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2007			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2008		.ops = &vlv_dpio_power_well_ops,
2009		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2010	},
2011	{
2012		.name = "dpio-common",
2013		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2014		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2015		.ops = &vlv_dpio_cmn_power_well_ops,
2016	},
2017};
2018
2019static struct i915_power_well chv_power_wells[] = {
2020	{
2021		.name = "always-on",
2022		.always_on = 1,
2023		.domains = POWER_DOMAIN_MASK,
2024		.ops = &i9xx_always_on_power_well_ops,
2025	},
2026	{
2027		.name = "display",
2028		/*
2029		 * Pipe A power well is the new disp2d well. Pipe B and C
2030		 * power wells don't actually exist. Pipe A power well is
2031		 * required for any pipe to work.
2032		 */
2033		.domains = CHV_DISPLAY_POWER_DOMAINS,
2034		.id = PIPE_A,
2035		.ops = &chv_pipe_power_well_ops,
2036	},
2037	{
2038		.name = "dpio-common-bc",
2039		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2040		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2041		.ops = &chv_dpio_cmn_power_well_ops,
2042	},
2043	{
2044		.name = "dpio-common-d",
2045		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2046		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2047		.ops = &chv_dpio_cmn_power_well_ops,
2048	},
2049};
2050
2051bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2052				    int power_well_id)
2053{
2054	struct i915_power_well *power_well;
2055	bool ret;
2056
2057	power_well = lookup_power_well(dev_priv, power_well_id);
2058	ret = power_well->ops->is_enabled(dev_priv, power_well);
2059
2060	return ret;
2061}
2062
2063static struct i915_power_well skl_power_wells[] = {
2064	{
2065		.name = "always-on",
2066		.always_on = 1,
2067		.domains = POWER_DOMAIN_MASK,
2068		.ops = &i9xx_always_on_power_well_ops,
2069		.id = SKL_DISP_PW_ALWAYS_ON,
2070	},
2071	{
2072		.name = "power well 1",
2073		/* Handled by the DMC firmware */
2074		.domains = 0,
2075		.ops = &skl_power_well_ops,
2076		.id = SKL_DISP_PW_1,
2077	},
2078	{
2079		.name = "MISC IO power well",
2080		/* Handled by the DMC firmware */
2081		.domains = 0,
2082		.ops = &skl_power_well_ops,
2083		.id = SKL_DISP_PW_MISC_IO,
2084	},
2085	{
2086		.name = "DC off",
2087		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2088		.ops = &gen9_dc_off_power_well_ops,
2089		.id = SKL_DISP_PW_DC_OFF,
2090	},
2091	{
2092		.name = "power well 2",
2093		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2094		.ops = &skl_power_well_ops,
2095		.id = SKL_DISP_PW_2,
2096	},
2097	{
2098		.name = "DDI A/E power well",
2099		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
2100		.ops = &skl_power_well_ops,
2101		.id = SKL_DISP_PW_DDI_A_E,
2102	},
2103	{
2104		.name = "DDI B power well",
2105		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
2106		.ops = &skl_power_well_ops,
2107		.id = SKL_DISP_PW_DDI_B,
2108	},
2109	{
2110		.name = "DDI C power well",
2111		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
2112		.ops = &skl_power_well_ops,
2113		.id = SKL_DISP_PW_DDI_C,
2114	},
2115	{
2116		.name = "DDI D power well",
2117		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
2118		.ops = &skl_power_well_ops,
2119		.id = SKL_DISP_PW_DDI_D,
2120	},
2121};
2122
2123static struct i915_power_well bxt_power_wells[] = {
2124	{
2125		.name = "always-on",
2126		.always_on = 1,
2127		.domains = POWER_DOMAIN_MASK,
2128		.ops = &i9xx_always_on_power_well_ops,
2129	},
2130	{
2131		.name = "power well 1",
2132		.domains = 0,
2133		.ops = &skl_power_well_ops,
2134		.id = SKL_DISP_PW_1,
2135	},
2136	{
2137		.name = "DC off",
2138		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2139		.ops = &gen9_dc_off_power_well_ops,
2140		.id = SKL_DISP_PW_DC_OFF,
2141	},
2142	{
2143		.name = "power well 2",
2144		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2145		.ops = &skl_power_well_ops,
2146		.id = SKL_DISP_PW_2,
2147	},
2148	{
2149		.name = "dpio-common-a",
2150		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2151		.ops = &bxt_dpio_cmn_power_well_ops,
2152		.id = BXT_DPIO_CMN_A,
2153		.data = DPIO_PHY1,
2154	},
2155	{
2156		.name = "dpio-common-bc",
2157		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2158		.ops = &bxt_dpio_cmn_power_well_ops,
2159		.id = BXT_DPIO_CMN_BC,
2160		.data = DPIO_PHY0,
2161	},
2162};
2163
2164static int
2165sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2166				   int disable_power_well)
2167{
2168	if (disable_power_well >= 0)
2169		return !!disable_power_well;
2170
2171	return 1;
2172}
2173
2174static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2175				    int enable_dc)
2176{
2177	uint32_t mask;
2178	int requested_dc;
2179	int max_dc;
2180
2181	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2182		max_dc = 2;
2183		mask = 0;
2184	} else if (IS_BROXTON(dev_priv)) {
2185		max_dc = 1;
2186		/*
2187		 * DC9 has a separate HW flow from the rest of the DC states,
2188		 * not depending on the DMC firmware. It's needed by system
2189		 * suspend/resume, so allow it unconditionally.
2190		 */
2191		mask = DC_STATE_EN_DC9;
2192	} else {
2193		max_dc = 0;
2194		mask = 0;
2195	}
2196
2197	if (!i915.disable_power_well)
2198		max_dc = 0;
2199
2200	if (enable_dc >= 0 && enable_dc <= max_dc) {
2201		requested_dc = enable_dc;
2202	} else if (enable_dc == -1) {
2203		requested_dc = max_dc;
2204	} else if (enable_dc > max_dc && enable_dc <= 2) {
2205		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2206			      enable_dc, max_dc);
2207		requested_dc = max_dc;
2208	} else {
2209		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2210		requested_dc = max_dc;
2211	}
2212
2213	if (requested_dc > 1)
2214		mask |= DC_STATE_EN_UPTO_DC6;
2215	if (requested_dc > 0)
2216		mask |= DC_STATE_EN_UPTO_DC5;
2217
2218	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2219
2220	return mask;
2221}
2222
2223#define set_power_wells(power_domains, __power_wells) ({		\
2224	(power_domains)->power_wells = (__power_wells);			\
2225	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2226})
2227
2228/**
2229 * intel_power_domains_init - initializes the power domain structures
2230 * @dev_priv: i915 device instance
2231 *
2232 * Initializes the power domain structures for @dev_priv depending upon the
2233 * supported platform.
2234 */
2235int intel_power_domains_init(struct drm_i915_private *dev_priv)
2236{
2237	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2238
2239	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2240						     i915.disable_power_well);
2241	dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2242							    i915.enable_dc);
2243
2244	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
2245
2246	mutex_init(&power_domains->lock);
2247
2248	/*
2249	 * The enabling order will be from lower to higher indexed wells,
2250	 * the disabling order is reversed.
2251	 */
2252	if (IS_HASWELL(dev_priv)) {
2253		set_power_wells(power_domains, hsw_power_wells);
2254	} else if (IS_BROADWELL(dev_priv)) {
2255		set_power_wells(power_domains, bdw_power_wells);
2256	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2257		set_power_wells(power_domains, skl_power_wells);
2258	} else if (IS_BROXTON(dev_priv)) {
2259		set_power_wells(power_domains, bxt_power_wells);
2260	} else if (IS_CHERRYVIEW(dev_priv)) {
2261		set_power_wells(power_domains, chv_power_wells);
2262	} else if (IS_VALLEYVIEW(dev_priv)) {
2263		set_power_wells(power_domains, vlv_power_wells);
2264	} else {
2265		set_power_wells(power_domains, i9xx_always_on_power_well);
2266	}
2267
2268	return 0;
2269}
2270
2271/**
2272 * intel_power_domains_fini - finalizes the power domain structures
2273 * @dev_priv: i915 device instance
2274 *
2275 * Finalizes the power domain structures for @dev_priv depending upon the
2276 * supported platform. This function also disables runtime pm and ensures that
2277 * the device stays powered up so that the driver can be reloaded.
2278 */
2279void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2280{
2281	struct device *kdev = &dev_priv->drm.pdev->dev;
2282
2283	/*
2284	 * The i915.ko module is still not prepared to be loaded when
2285	 * the power well is not enabled, so just enable it in case
2286	 * we're going to unload/reload.
2287	 * The following also reacquires the RPM reference the core passed
2288	 * to the driver during loading, which is dropped in
2289	 * intel_runtime_pm_enable(). We have to hand back the control of the
2290	 * device to the core with this reference held.
2291	 */
2292	intel_display_set_init_power(dev_priv, true);
2293
2294	/* Remove the refcount we took to keep power well support disabled. */
2295	if (!i915.disable_power_well)
2296		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2297
2298	/*
2299	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2300	 * the platform doesn't support runtime PM.
2301	 */
2302	if (!HAS_RUNTIME_PM(dev_priv))
2303		pm_runtime_put(kdev);
2304}
2305
2306static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2307{
2308	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2309	struct i915_power_well *power_well;
2310	int i;
2311
2312	mutex_lock(&power_domains->lock);
2313	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
2314		power_well->ops->sync_hw(dev_priv, power_well);
2315		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2316								     power_well);
2317	}
2318	mutex_unlock(&power_domains->lock);
2319}
2320
2321static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2322{
2323	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2324	POSTING_READ(DBUF_CTL);
2325
2326	udelay(10);
2327
2328	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2329		DRM_ERROR("DBuf power enable timeout\n");
2330}
2331
2332static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2333{
2334	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2335	POSTING_READ(DBUF_CTL);
2336
2337	udelay(10);
2338
2339	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2340		DRM_ERROR("DBuf power disable timeout!\n");
2341}
2342
2343static void skl_display_core_init(struct drm_i915_private *dev_priv,
2344				   bool resume)
2345{
2346	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2347	struct i915_power_well *well;
2348	uint32_t val;
2349
2350	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2351
2352	/* enable PCH reset handshake */
2353	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2354	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2355
2356	/* enable PG1 and Misc I/O */
2357	mutex_lock(&power_domains->lock);
2358
2359	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2360	intel_power_well_enable(dev_priv, well);
2361
2362	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2363	intel_power_well_enable(dev_priv, well);
2364
2365	mutex_unlock(&power_domains->lock);
2366
2367	skl_init_cdclk(dev_priv);
2368
2369	gen9_dbuf_enable(dev_priv);
2370
2371	if (resume && dev_priv->csr.dmc_payload)
2372		intel_csr_load_program(dev_priv);
2373}
2374
2375static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2376{
2377	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2378	struct i915_power_well *well;
2379
2380	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2381
2382	gen9_dbuf_disable(dev_priv);
2383
2384	skl_uninit_cdclk(dev_priv);
2385
2386	/* The spec doesn't call for removing the reset handshake flag */
2387	/* disable PG1 and Misc I/O */
2388
2389	mutex_lock(&power_domains->lock);
2390
2391	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2392	intel_power_well_disable(dev_priv, well);
2393
2394	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2395	intel_power_well_disable(dev_priv, well);
2396
2397	mutex_unlock(&power_domains->lock);
2398}
2399
2400void bxt_display_core_init(struct drm_i915_private *dev_priv,
2401			   bool resume)
2402{
2403	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2404	struct i915_power_well *well;
2405	uint32_t val;
2406
2407	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2408
2409	/*
2410	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2411	 * or else the reset will hang because there is no PCH to respond.
2412	 * Move the handshake programming to initialization sequence.
2413	 * Previously was left up to BIOS.
2414	 */
2415	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2416	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2417	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2418
2419	/* Enable PG1 */
2420	mutex_lock(&power_domains->lock);
2421
2422	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2423	intel_power_well_enable(dev_priv, well);
2424
2425	mutex_unlock(&power_domains->lock);
2426
2427	bxt_init_cdclk(dev_priv);
2428
2429	gen9_dbuf_enable(dev_priv);
2430
2431	if (resume && dev_priv->csr.dmc_payload)
2432		intel_csr_load_program(dev_priv);
2433}
2434
2435void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2436{
2437	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2438	struct i915_power_well *well;
2439
2440	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2441
2442	gen9_dbuf_disable(dev_priv);
2443
2444	bxt_uninit_cdclk(dev_priv);
2445
2446	/* The spec doesn't call for removing the reset handshake flag */
2447
2448	/* Disable PG1 */
2449	mutex_lock(&power_domains->lock);
2450
2451	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2452	intel_power_well_disable(dev_priv, well);
2453
2454	mutex_unlock(&power_domains->lock);
2455}
2456
2457static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2458{
2459	struct i915_power_well *cmn_bc =
2460		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2461	struct i915_power_well *cmn_d =
2462		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2463
2464	/*
2465	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2466	 * workaround never ever read DISPLAY_PHY_CONTROL, and
2467	 * instead maintain a shadow copy ourselves. Use the actual
2468	 * power well state and lane status to reconstruct the
2469	 * expected initial value.
2470	 */
2471	dev_priv->chv_phy_control =
2472		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2473		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2474		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2475		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2476		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2477
2478	/*
2479	 * If all lanes are disabled we leave the override disabled
2480	 * with all power down bits cleared to match the state we
2481	 * would use after disabling the port. Otherwise enable the
2482	 * override and set the lane powerdown bits accding to the
2483	 * current lane status.
2484	 */
2485	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2486		uint32_t status = I915_READ(DPLL(PIPE_A));
2487		unsigned int mask;
2488
2489		mask = status & DPLL_PORTB_READY_MASK;
2490		if (mask == 0xf)
2491			mask = 0x0;
2492		else
2493			dev_priv->chv_phy_control |=
2494				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2495
2496		dev_priv->chv_phy_control |=
2497			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2498
2499		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2500		if (mask == 0xf)
2501			mask = 0x0;
2502		else
2503			dev_priv->chv_phy_control |=
2504				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2505
2506		dev_priv->chv_phy_control |=
2507			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2508
2509		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2510
2511		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2512	} else {
2513		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2514	}
2515
2516	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2517		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2518		unsigned int mask;
2519
2520		mask = status & DPLL_PORTD_READY_MASK;
2521
2522		if (mask == 0xf)
2523			mask = 0x0;
2524		else
2525			dev_priv->chv_phy_control |=
2526				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2527
2528		dev_priv->chv_phy_control |=
2529			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2530
2531		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2532
2533		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2534	} else {
2535		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2536	}
2537
2538	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2539
2540	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2541		      dev_priv->chv_phy_control);
2542}
2543
2544static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2545{
2546	struct i915_power_well *cmn =
2547		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2548	struct i915_power_well *disp2d =
2549		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2550
2551	/* If the display might be already active skip this */
2552	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2553	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2554	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2555		return;
2556
2557	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2558
2559	/* cmnlane needs DPLL registers */
2560	disp2d->ops->enable(dev_priv, disp2d);
2561
2562	/*
2563	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2564	 * Need to assert and de-assert PHY SB reset by gating the
2565	 * common lane power, then un-gating it.
2566	 * Simply ungating isn't enough to reset the PHY enough to get
2567	 * ports and lanes running.
2568	 */
2569	cmn->ops->disable(dev_priv, cmn);
2570}
2571
2572/**
2573 * intel_power_domains_init_hw - initialize hardware power domain state
2574 * @dev_priv: i915 device instance
2575 * @resume: Called from resume code paths or not
2576 *
2577 * This function initializes the hardware power domain state and enables all
2578 * power domains using intel_display_set_init_power().
2579 */
2580void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2581{
2582	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2583
2584	power_domains->initializing = true;
2585
2586	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2587		skl_display_core_init(dev_priv, resume);
2588	} else if (IS_BROXTON(dev_priv)) {
2589		bxt_display_core_init(dev_priv, resume);
2590	} else if (IS_CHERRYVIEW(dev_priv)) {
2591		mutex_lock(&power_domains->lock);
2592		chv_phy_control_init(dev_priv);
2593		mutex_unlock(&power_domains->lock);
2594	} else if (IS_VALLEYVIEW(dev_priv)) {
2595		mutex_lock(&power_domains->lock);
2596		vlv_cmnlane_wa(dev_priv);
2597		mutex_unlock(&power_domains->lock);
2598	}
2599
2600	/* For now, we need the power well to be always enabled. */
2601	intel_display_set_init_power(dev_priv, true);
2602	/* Disable power support if the user asked so. */
2603	if (!i915.disable_power_well)
2604		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2605	intel_power_domains_sync_hw(dev_priv);
2606	power_domains->initializing = false;
2607}
2608
2609/**
2610 * intel_power_domains_suspend - suspend power domain state
2611 * @dev_priv: i915 device instance
2612 *
2613 * This function prepares the hardware power domain state before entering
2614 * system suspend. It must be paired with intel_power_domains_init_hw().
2615 */
2616void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2617{
2618	/*
2619	 * Even if power well support was disabled we still want to disable
2620	 * power wells while we are system suspended.
2621	 */
2622	if (!i915.disable_power_well)
2623		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2624
2625	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2626		skl_display_core_uninit(dev_priv);
2627	else if (IS_BROXTON(dev_priv))
2628		bxt_display_core_uninit(dev_priv);
2629}
2630
2631/**
2632 * intel_runtime_pm_get - grab a runtime pm reference
2633 * @dev_priv: i915 device instance
2634 *
2635 * This function grabs a device-level runtime pm reference (mostly used for GEM
2636 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2637 *
2638 * Any runtime pm reference obtained by this function must have a symmetric
2639 * call to intel_runtime_pm_put() to release the reference again.
2640 */
2641void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2642{
2643	struct pci_dev *pdev = dev_priv->drm.pdev;
2644	struct device *kdev = &pdev->dev;
2645
2646	pm_runtime_get_sync(kdev);
2647
2648	atomic_inc(&dev_priv->pm.wakeref_count);
2649	assert_rpm_wakelock_held(dev_priv);
2650}
2651
2652/**
2653 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2654 * @dev_priv: i915 device instance
2655 *
2656 * This function grabs a device-level runtime pm reference if the device is
2657 * already in use and ensures that it is powered up.
2658 *
2659 * Any runtime pm reference obtained by this function must have a symmetric
2660 * call to intel_runtime_pm_put() to release the reference again.
2661 */
2662bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2663{
2664	struct pci_dev *pdev = dev_priv->drm.pdev;
2665	struct device *kdev = &pdev->dev;
2666
2667	if (IS_ENABLED(CONFIG_PM)) {
2668		int ret = pm_runtime_get_if_in_use(kdev);
2669
2670		/*
2671		 * In cases runtime PM is disabled by the RPM core and we get
2672		 * an -EINVAL return value we are not supposed to call this
2673		 * function, since the power state is undefined. This applies
2674		 * atm to the late/early system suspend/resume handlers.
2675		 */
2676		WARN_ON_ONCE(ret < 0);
2677		if (ret <= 0)
2678			return false;
2679	}
2680
2681	atomic_inc(&dev_priv->pm.wakeref_count);
2682	assert_rpm_wakelock_held(dev_priv);
2683
2684	return true;
2685}
2686
2687/**
2688 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2689 * @dev_priv: i915 device instance
2690 *
2691 * This function grabs a device-level runtime pm reference (mostly used for GEM
2692 * code to ensure the GTT or GT is on).
2693 *
2694 * It will _not_ power up the device but instead only check that it's powered
2695 * on.  Therefore it is only valid to call this functions from contexts where
2696 * the device is known to be powered up and where trying to power it up would
2697 * result in hilarity and deadlocks. That pretty much means only the system
2698 * suspend/resume code where this is used to grab runtime pm references for
2699 * delayed setup down in work items.
2700 *
2701 * Any runtime pm reference obtained by this function must have a symmetric
2702 * call to intel_runtime_pm_put() to release the reference again.
2703 */
2704void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2705{
2706	struct pci_dev *pdev = dev_priv->drm.pdev;
2707	struct device *kdev = &pdev->dev;
2708
2709	assert_rpm_wakelock_held(dev_priv);
2710	pm_runtime_get_noresume(kdev);
2711
2712	atomic_inc(&dev_priv->pm.wakeref_count);
2713}
2714
2715/**
2716 * intel_runtime_pm_put - release a runtime pm reference
2717 * @dev_priv: i915 device instance
2718 *
2719 * This function drops the device-level runtime pm reference obtained by
2720 * intel_runtime_pm_get() and might power down the corresponding
2721 * hardware block right away if this is the last reference.
2722 */
2723void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2724{
2725	struct pci_dev *pdev = dev_priv->drm.pdev;
2726	struct device *kdev = &pdev->dev;
2727
2728	assert_rpm_wakelock_held(dev_priv);
2729	atomic_dec(&dev_priv->pm.wakeref_count);
2730
2731	pm_runtime_mark_last_busy(kdev);
2732	pm_runtime_put_autosuspend(kdev);
2733}
2734
2735/**
2736 * intel_runtime_pm_enable - enable runtime pm
2737 * @dev_priv: i915 device instance
2738 *
2739 * This function enables runtime pm at the end of the driver load sequence.
2740 *
2741 * Note that this function does currently not enable runtime pm for the
2742 * subordinate display power domains. That is only done on the first modeset
2743 * using intel_display_set_init_power().
2744 */
2745void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2746{
2747	struct pci_dev *pdev = dev_priv->drm.pdev;
2748	struct device *kdev = &pdev->dev;
2749
2750	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
2751	pm_runtime_mark_last_busy(kdev);
2752
2753	/*
2754	 * Take a permanent reference to disable the RPM functionality and drop
2755	 * it only when unloading the driver. Use the low level get/put helpers,
2756	 * so the driver's own RPM reference tracking asserts also work on
2757	 * platforms without RPM support.
2758	 */
2759	if (!HAS_RUNTIME_PM(dev_priv)) {
2760		pm_runtime_dont_use_autosuspend(kdev);
2761		pm_runtime_get_sync(kdev);
2762	} else {
2763		pm_runtime_use_autosuspend(kdev);
2764	}
2765
2766	/*
2767	 * The core calls the driver load handler with an RPM reference held.
2768	 * We drop that here and will reacquire it during unloading in
2769	 * intel_power_domains_fini().
2770	 */
2771	pm_runtime_put_autosuspend(kdev);
2772}