Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include "g4x_dp.h"
   7#include "i915_drv.h"
 
   8#include "intel_de.h"
 
   9#include "intel_display_types.h"
  10#include "intel_dp.h"
 
  11#include "intel_dpll.h"
 
 
  12#include "intel_pps.h"
 
 
  13
  14static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
  15				      enum pipe pipe);
  16
  17static void pps_init_delays(struct intel_dp *intel_dp);
  18static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
  19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  20intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
  21{
  22	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  23	intel_wakeref_t wakeref;
  24
  25	/*
  26	 * See intel_pps_reset_all() why we need a power domain reference here.
  27	 */
  28	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
  29	mutex_lock(&dev_priv->pps_mutex);
  30
  31	return wakeref;
  32}
  33
  34intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
  35				 intel_wakeref_t wakeref)
  36{
  37	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  38
  39	mutex_unlock(&dev_priv->pps_mutex);
  40	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  41
  42	return 0;
  43}
  44
  45static void
  46vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  47{
  48	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  49	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  50	enum pipe pipe = intel_dp->pps.pps_pipe;
  51	bool pll_enabled, release_cl_override = false;
  52	enum dpio_phy phy = DPIO_PHY(pipe);
  53	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
  54	u32 DP;
  55
  56	if (drm_WARN(&dev_priv->drm,
  57		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
  58		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
  59		     pipe_name(pipe), dig_port->base.base.base.id,
  60		     dig_port->base.base.name))
  61		return;
  62
  63	drm_dbg_kms(&dev_priv->drm,
  64		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
  65		    pipe_name(pipe), dig_port->base.base.base.id,
  66		    dig_port->base.base.name);
  67
  68	/* Preserve the BIOS-computed detected bit. This is
  69	 * supposed to be read-only.
  70	 */
  71	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
  72	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  73	DP |= DP_PORT_WIDTH(1);
  74	DP |= DP_LINK_TRAIN_PAT_1;
  75
  76	if (IS_CHERRYVIEW(dev_priv))
  77		DP |= DP_PIPE_SEL_CHV(pipe);
  78	else
  79		DP |= DP_PIPE_SEL(pipe);
  80
  81	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
  82
  83	/*
  84	 * The DPLL for the pipe must be enabled for this to work.
  85	 * So enable temporarily it if it's not already enabled.
  86	 */
  87	if (!pll_enabled) {
  88		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
  89			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
  90
  91		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
  92			drm_err(&dev_priv->drm,
  93				"Failed to force on pll for pipe %c!\n",
  94				pipe_name(pipe));
  95			return;
  96		}
  97	}
  98
  99	/*
 100	 * Similar magic as in intel_dp_enable_port().
 101	 * We _must_ do this port enable + disable trick
 102	 * to make this power sequencer lock onto the port.
 103	 * Otherwise even VDD force bit won't work.
 104	 */
 105	intel_de_write(dev_priv, intel_dp->output_reg, DP);
 106	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 107
 108	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
 109	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 110
 111	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
 112	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 113
 114	if (!pll_enabled) {
 115		vlv_force_pll_off(dev_priv, pipe);
 116
 117		if (release_cl_override)
 118			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 119	}
 120}
 121
 122static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 123{
 124	struct intel_encoder *encoder;
 125	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 126
 127	/*
 128	 * We don't have power sequencer currently.
 129	 * Pick one that's not used by other ports.
 130	 */
 131	for_each_intel_dp(&dev_priv->drm, encoder) {
 132		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 133
 134		if (encoder->type == INTEL_OUTPUT_EDP) {
 135			drm_WARN_ON(&dev_priv->drm,
 136				    intel_dp->pps.active_pipe != INVALID_PIPE &&
 137				    intel_dp->pps.active_pipe !=
 138				    intel_dp->pps.pps_pipe);
 139
 140			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 141				pipes &= ~(1 << intel_dp->pps.pps_pipe);
 142		} else {
 143			drm_WARN_ON(&dev_priv->drm,
 144				    intel_dp->pps.pps_pipe != INVALID_PIPE);
 145
 146			if (intel_dp->pps.active_pipe != INVALID_PIPE)
 147				pipes &= ~(1 << intel_dp->pps.active_pipe);
 148		}
 149	}
 150
 151	if (pipes == 0)
 152		return INVALID_PIPE;
 153
 154	return ffs(pipes) - 1;
 155}
 156
 157static enum pipe
 158vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 159{
 160	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 161	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 162	enum pipe pipe;
 163
 164	lockdep_assert_held(&dev_priv->pps_mutex);
 165
 166	/* We should never land here with regular DP ports */
 167	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 168
 169	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
 170		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
 171
 172	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 173		return intel_dp->pps.pps_pipe;
 174
 175	pipe = vlv_find_free_pps(dev_priv);
 176
 177	/*
 178	 * Didn't find one. This should not happen since there
 179	 * are two power sequencers and up to two eDP ports.
 180	 */
 181	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
 182		pipe = PIPE_A;
 183
 184	vlv_steal_power_sequencer(dev_priv, pipe);
 185	intel_dp->pps.pps_pipe = pipe;
 186
 187	drm_dbg_kms(&dev_priv->drm,
 188		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
 189		    pipe_name(intel_dp->pps.pps_pipe),
 190		    dig_port->base.base.base.id,
 191		    dig_port->base.base.name);
 192
 193	/* init power sequencer on this pipe and port */
 194	pps_init_delays(intel_dp);
 195	pps_init_registers(intel_dp, true);
 196
 197	/*
 198	 * Even vdd force doesn't work until we've made
 199	 * the power sequencer lock in on the port.
 200	 */
 201	vlv_power_sequencer_kick(intel_dp);
 202
 203	return intel_dp->pps.pps_pipe;
 204}
 205
 206static int
 207bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 208{
 209	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 210	int backlight_controller = dev_priv->vbt.backlight.controller;
 211
 212	lockdep_assert_held(&dev_priv->pps_mutex);
 213
 214	/* We should never land here with regular DP ports */
 215	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 216
 217	if (!intel_dp->pps.pps_reset)
 218		return backlight_controller;
 219
 220	intel_dp->pps.pps_reset = false;
 221
 222	/*
 223	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 224	 * has been setup during connector init.
 225	 */
 226	pps_init_registers(intel_dp, false);
 227
 228	return backlight_controller;
 229}
 230
 231typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 232			       enum pipe pipe);
 233
 234static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 235			       enum pipe pipe)
 236{
 237	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
 238}
 239
 240static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 241				enum pipe pipe)
 242{
 243	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 244}
 245
 246static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 247			 enum pipe pipe)
 248{
 249	return true;
 250}
 251
 252static enum pipe
 253vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 254		     enum port port,
 255		     vlv_pipe_check pipe_check)
 256{
 257	enum pipe pipe;
 258
 259	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 260		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
 261			PANEL_PORT_SELECT_MASK;
 262
 263		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 264			continue;
 265
 266		if (!pipe_check(dev_priv, pipe))
 267			continue;
 268
 269		return pipe;
 270	}
 271
 272	return INVALID_PIPE;
 273}
 274
 275static void
 276vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 277{
 278	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 279	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 280	enum port port = dig_port->base.port;
 281
 282	lockdep_assert_held(&dev_priv->pps_mutex);
 283
 284	/* try to find a pipe with this port selected */
 285	/* first pick one where the panel is on */
 286	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 287						      vlv_pipe_has_pp_on);
 288	/* didn't find one? pick one where vdd is on */
 289	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 290		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 291							      vlv_pipe_has_vdd_on);
 292	/* didn't find one? pick one with just the correct port */
 293	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 294		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 295							      vlv_pipe_any);
 296
 297	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 298	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
 299		drm_dbg_kms(&dev_priv->drm,
 300			    "no initial power sequencer for [ENCODER:%d:%s]\n",
 301			    dig_port->base.base.base.id,
 302			    dig_port->base.base.name);
 303		return;
 304	}
 305
 306	drm_dbg_kms(&dev_priv->drm,
 307		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
 308		    dig_port->base.base.base.id,
 309		    dig_port->base.base.name,
 310		    pipe_name(intel_dp->pps.pps_pipe));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311}
 312
 313void intel_pps_reset_all(struct drm_i915_private *dev_priv)
 314{
 315	struct intel_encoder *encoder;
 316
 317	if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
 318		return;
 319
 320	if (!HAS_DISPLAY(dev_priv))
 321		return;
 322
 323	/*
 324	 * We can't grab pps_mutex here due to deadlock with power_domain
 325	 * mutex when power_domain functions are called while holding pps_mutex.
 326	 * That also means that in order to use pps_pipe the code needs to
 327	 * hold both a power domain reference and pps_mutex, and the power domain
 328	 * reference get/put must be done while _not_ holding pps_mutex.
 329	 * pps_{lock,unlock}() do these steps in the correct order, so one
 330	 * should use them always.
 331	 */
 332
 333	for_each_intel_dp(&dev_priv->drm, encoder) {
 334		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 335
 336		drm_WARN_ON(&dev_priv->drm,
 337			    intel_dp->pps.active_pipe != INVALID_PIPE);
 338
 339		if (encoder->type != INTEL_OUTPUT_EDP)
 340			continue;
 341
 342		if (DISPLAY_VER(dev_priv) >= 9)
 343			intel_dp->pps.pps_reset = true;
 344		else
 345			intel_dp->pps.pps_pipe = INVALID_PIPE;
 346	}
 347}
 348
 349struct pps_registers {
 350	i915_reg_t pp_ctrl;
 351	i915_reg_t pp_stat;
 352	i915_reg_t pp_on;
 353	i915_reg_t pp_off;
 354	i915_reg_t pp_div;
 355};
 356
 357static void intel_pps_get_registers(struct intel_dp *intel_dp,
 358				    struct pps_registers *regs)
 359{
 360	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 361	int pps_idx = 0;
 362
 363	memset(regs, 0, sizeof(*regs));
 364
 365	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 366		pps_idx = bxt_power_sequencer_idx(intel_dp);
 367	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 368		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 
 
 
 
 369
 370	regs->pp_ctrl = PP_CONTROL(pps_idx);
 371	regs->pp_stat = PP_STATUS(pps_idx);
 372	regs->pp_on = PP_ON_DELAYS(pps_idx);
 373	regs->pp_off = PP_OFF_DELAYS(pps_idx);
 374
 375	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
 376	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
 377	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
 378		regs->pp_div = INVALID_MMIO_REG;
 379	else
 380		regs->pp_div = PP_DIVISOR(pps_idx);
 381}
 382
 383static i915_reg_t
 384_pp_ctrl_reg(struct intel_dp *intel_dp)
 385{
 386	struct pps_registers regs;
 387
 388	intel_pps_get_registers(intel_dp, &regs);
 389
 390	return regs.pp_ctrl;
 391}
 392
 393static i915_reg_t
 394_pp_stat_reg(struct intel_dp *intel_dp)
 395{
 396	struct pps_registers regs;
 397
 398	intel_pps_get_registers(intel_dp, &regs);
 399
 400	return regs.pp_stat;
 401}
 402
 403static bool edp_have_panel_power(struct intel_dp *intel_dp)
 404{
 405	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 406
 407	lockdep_assert_held(&dev_priv->pps_mutex);
 408
 409	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 410	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 411		return false;
 412
 413	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
 414}
 415
 416static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 417{
 418	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 419
 420	lockdep_assert_held(&dev_priv->pps_mutex);
 421
 422	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 423	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 424		return false;
 425
 426	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 427}
 428
 429void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
 430{
 431	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 432
 433	if (!intel_dp_is_edp(intel_dp))
 434		return;
 435
 436	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 437		drm_WARN(&dev_priv->drm, 1,
 438			 "eDP powered off while attempting aux channel communication.\n");
 439		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
 
 
 
 
 
 440			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
 441			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
 442	}
 443}
 444
 445#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 446#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 447
 448#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
 449#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
 450
 451#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 452#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 453
 454static void intel_pps_verify_state(struct intel_dp *intel_dp);
 455
 456static void wait_panel_status(struct intel_dp *intel_dp,
 457				       u32 mask,
 458				       u32 value)
 459{
 460	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 461	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 462
 463	lockdep_assert_held(&dev_priv->pps_mutex);
 464
 465	intel_pps_verify_state(intel_dp);
 466
 467	pp_stat_reg = _pp_stat_reg(intel_dp);
 468	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 469
 470	drm_dbg_kms(&dev_priv->drm,
 471		    "mask %08x value %08x status %08x control %08x\n",
 
 
 472		    mask, value,
 473		    intel_de_read(dev_priv, pp_stat_reg),
 474		    intel_de_read(dev_priv, pp_ctrl_reg));
 475
 476	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
 477				       mask, value, 5000))
 478		drm_err(&dev_priv->drm,
 479			"Panel status timeout: status %08x control %08x\n",
 
 
 480			intel_de_read(dev_priv, pp_stat_reg),
 481			intel_de_read(dev_priv, pp_ctrl_reg));
 482
 483	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
 484}
 485
 486static void wait_panel_on(struct intel_dp *intel_dp)
 487{
 488	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 489
 490	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
 
 
 491	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 492}
 493
 494static void wait_panel_off(struct intel_dp *intel_dp)
 495{
 496	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 497
 498	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
 
 
 499	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 500}
 501
 502static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 503{
 504	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 505	ktime_t panel_power_on_time;
 506	s64 panel_power_off_duration;
 507
 508	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
 
 
 509
 510	/* take the difference of currrent time and panel power off time
 511	 * and then make panel wait for t11_t12 if needed. */
 512	panel_power_on_time = ktime_get_boottime();
 513	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
 514
 515	/* When we disable the VDD override bit last we have to do the manual
 516	 * wait. */
 517	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
 518		wait_remaining_ms_from_jiffies(jiffies,
 519				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
 520
 521	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 522}
 523
 524void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
 525{
 526	intel_wakeref_t wakeref;
 527
 528	if (!intel_dp_is_edp(intel_dp))
 529		return;
 530
 531	with_intel_pps_lock(intel_dp, wakeref)
 532		wait_panel_power_cycle(intel_dp);
 533}
 534
 535static void wait_backlight_on(struct intel_dp *intel_dp)
 536{
 537	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
 538				       intel_dp->pps.backlight_on_delay);
 539}
 540
 541static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 542{
 543	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
 544				       intel_dp->pps.backlight_off_delay);
 545}
 546
 547/* Read the current pp_control value, unlocking the register if it
 548 * is locked
 549 */
 550
 551static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 552{
 553	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 554	u32 control;
 555
 556	lockdep_assert_held(&dev_priv->pps_mutex);
 557
 558	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
 559	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
 560			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 561		control &= ~PANEL_UNLOCK_MASK;
 562		control |= PANEL_UNLOCK_REGS;
 563	}
 564	return control;
 565}
 566
 567/*
 568 * Must be paired with intel_pps_vdd_off_unlocked().
 569 * Must hold pps_mutex around the whole on/off sequence.
 570 * Can be nested with intel_pps_vdd_{on,off}() calls.
 571 */
 572bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
 573{
 574	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 575	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 576	u32 pp;
 577	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 578	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
 579
 580	lockdep_assert_held(&dev_priv->pps_mutex);
 581
 582	if (!intel_dp_is_edp(intel_dp))
 583		return false;
 584
 585	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
 586	intel_dp->pps.want_panel_vdd = true;
 587
 588	if (edp_have_panel_vdd(intel_dp))
 589		return need_to_disable;
 590
 591	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
 592	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
 593							    intel_aux_power_domain(dig_port));
 594
 595	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
 596		    dig_port->base.base.base.id,
 597		    dig_port->base.base.name);
 
 
 
 598
 599	if (!edp_have_panel_power(intel_dp))
 600		wait_panel_power_cycle(intel_dp);
 601
 602	pp = ilk_get_pp_control(intel_dp);
 603	pp |= EDP_FORCE_VDD;
 604
 605	pp_stat_reg = _pp_stat_reg(intel_dp);
 606	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 607
 608	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 609	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 610	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 
 
 611		    intel_de_read(dev_priv, pp_stat_reg),
 612		    intel_de_read(dev_priv, pp_ctrl_reg));
 613	/*
 614	 * If the panel wasn't on, delay before accessing aux channel
 615	 */
 616	if (!edp_have_panel_power(intel_dp)) {
 617		drm_dbg_kms(&dev_priv->drm,
 618			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
 619			    dig_port->base.base.base.id,
 620			    dig_port->base.base.name);
 621		msleep(intel_dp->pps.panel_power_up_delay);
 622	}
 623
 624	return need_to_disable;
 625}
 626
 627/*
 628 * Must be paired with intel_pps_off().
 629 * Nested calls to these functions are not allowed since
 630 * we drop the lock. Caller must use some higher level
 631 * locking to prevent nested calls from other threads.
 632 */
 633void intel_pps_vdd_on(struct intel_dp *intel_dp)
 634{
 
 635	intel_wakeref_t wakeref;
 636	bool vdd;
 637
 638	if (!intel_dp_is_edp(intel_dp))
 639		return;
 640
 641	vdd = false;
 642	with_intel_pps_lock(intel_dp, wakeref)
 643		vdd = intel_pps_vdd_on_unlocked(intel_dp);
 644	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
 645			dp_to_dig_port(intel_dp)->base.base.base.id,
 646			dp_to_dig_port(intel_dp)->base.base.name);
 
 647}
 648
 649static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
 650{
 651	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 652	struct intel_digital_port *dig_port =
 653		dp_to_dig_port(intel_dp);
 654	u32 pp;
 655	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 656
 657	lockdep_assert_held(&dev_priv->pps_mutex);
 658
 659	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
 660
 661	if (!edp_have_panel_vdd(intel_dp))
 662		return;
 663
 664	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
 665		    dig_port->base.base.base.id,
 666		    dig_port->base.base.name);
 667
 668	pp = ilk_get_pp_control(intel_dp);
 669	pp &= ~EDP_FORCE_VDD;
 670
 671	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 672	pp_stat_reg = _pp_stat_reg(intel_dp);
 673
 674	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 675	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 676
 677	/* Make sure sequencer is idle before allowing subsequent activity */
 678	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 
 
 679		    intel_de_read(dev_priv, pp_stat_reg),
 680		    intel_de_read(dev_priv, pp_ctrl_reg));
 681
 682	if ((pp & PANEL_POWER_ON) == 0)
 683		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 684
 685	intel_display_power_put(dev_priv,
 686				intel_aux_power_domain(dig_port),
 687				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 688}
 689
 690void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
 691{
 692	intel_wakeref_t wakeref;
 693
 694	if (!intel_dp_is_edp(intel_dp))
 695		return;
 696
 697	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
 698	/*
 699	 * vdd might still be enabled due to the delayed vdd off.
 700	 * Make sure vdd is actually turned off here.
 701	 */
 702	with_intel_pps_lock(intel_dp, wakeref)
 703		intel_pps_vdd_off_sync_unlocked(intel_dp);
 704}
 705
 706static void edp_panel_vdd_work(struct work_struct *__work)
 707{
 708	struct intel_pps *pps = container_of(to_delayed_work(__work),
 709					     struct intel_pps, panel_vdd_work);
 710	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
 711	intel_wakeref_t wakeref;
 712
 713	with_intel_pps_lock(intel_dp, wakeref) {
 714		if (!intel_dp->pps.want_panel_vdd)
 715			intel_pps_vdd_off_sync_unlocked(intel_dp);
 716	}
 717}
 718
 719static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
 720{
 
 721	unsigned long delay;
 722
 723	/*
 
 
 
 
 
 
 
 724	 * Queue the timer to fire a long time from now (relative to the power
 725	 * down delay) to keep the panel power up across a sequence of
 726	 * operations.
 727	 */
 728	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
 729	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
 
 730}
 731
 732/*
 733 * Must be paired with edp_panel_vdd_on().
 734 * Must hold pps_mutex around the whole on/off sequence.
 735 * Can be nested with intel_pps_vdd_{on,off}() calls.
 736 */
 737void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
 738{
 739	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 740
 741	lockdep_assert_held(&dev_priv->pps_mutex);
 742
 743	if (!intel_dp_is_edp(intel_dp))
 744		return;
 745
 746	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
 
 747			dp_to_dig_port(intel_dp)->base.base.base.id,
 748			dp_to_dig_port(intel_dp)->base.base.name);
 
 749
 750	intel_dp->pps.want_panel_vdd = false;
 751
 752	if (sync)
 753		intel_pps_vdd_off_sync_unlocked(intel_dp);
 754	else
 755		edp_panel_vdd_schedule_off(intel_dp);
 756}
 757
 758void intel_pps_on_unlocked(struct intel_dp *intel_dp)
 759{
 760	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 761	u32 pp;
 762	i915_reg_t pp_ctrl_reg;
 763
 764	lockdep_assert_held(&dev_priv->pps_mutex);
 765
 766	if (!intel_dp_is_edp(intel_dp))
 767		return;
 768
 769	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
 770		    dp_to_dig_port(intel_dp)->base.base.base.id,
 771		    dp_to_dig_port(intel_dp)->base.base.name);
 
 772
 773	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
 774		     "[ENCODER:%d:%s] panel power already on\n",
 775		     dp_to_dig_port(intel_dp)->base.base.base.id,
 776		     dp_to_dig_port(intel_dp)->base.base.name))
 
 777		return;
 778
 779	wait_panel_power_cycle(intel_dp);
 780
 781	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 782	pp = ilk_get_pp_control(intel_dp);
 783	if (IS_IRONLAKE(dev_priv)) {
 784		/* ILK workaround: disable reset around power sequence */
 785		pp &= ~PANEL_POWER_RESET;
 786		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 787		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 788	}
 789
 790	pp |= PANEL_POWER_ON;
 791	if (!IS_IRONLAKE(dev_priv))
 792		pp |= PANEL_POWER_RESET;
 793
 794	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 795	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 796
 797	wait_panel_on(intel_dp);
 798	intel_dp->pps.last_power_on = jiffies;
 799
 800	if (IS_IRONLAKE(dev_priv)) {
 801		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 802		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 803		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 804	}
 805}
 806
 807void intel_pps_on(struct intel_dp *intel_dp)
 808{
 809	intel_wakeref_t wakeref;
 810
 811	if (!intel_dp_is_edp(intel_dp))
 812		return;
 813
 814	with_intel_pps_lock(intel_dp, wakeref)
 815		intel_pps_on_unlocked(intel_dp);
 816}
 817
 818void intel_pps_off_unlocked(struct intel_dp *intel_dp)
 819{
 820	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 821	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 822	u32 pp;
 823	i915_reg_t pp_ctrl_reg;
 824
 825	lockdep_assert_held(&dev_priv->pps_mutex);
 826
 827	if (!intel_dp_is_edp(intel_dp))
 828		return;
 829
 830	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
 831		    dig_port->base.base.base.id, dig_port->base.base.name);
 
 832
 833	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
 834		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
 835		 dig_port->base.base.base.id, dig_port->base.base.name);
 
 836
 837	pp = ilk_get_pp_control(intel_dp);
 838	/* We need to switch off panel power _and_ force vdd, for otherwise some
 839	 * panels get very unhappy and cease to work. */
 840	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
 841		EDP_BLC_ENABLE);
 842
 843	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 844
 845	intel_dp->pps.want_panel_vdd = false;
 846
 847	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 848	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 849
 850	wait_panel_off(intel_dp);
 851	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 852
 853	/* We got a reference when we enabled the VDD. */
 854	intel_display_power_put(dev_priv,
 855				intel_aux_power_domain(dig_port),
 856				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 857}
 858
 859void intel_pps_off(struct intel_dp *intel_dp)
 860{
 861	intel_wakeref_t wakeref;
 862
 863	if (!intel_dp_is_edp(intel_dp))
 864		return;
 865
 866	with_intel_pps_lock(intel_dp, wakeref)
 867		intel_pps_off_unlocked(intel_dp);
 868}
 869
 870/* Enable backlight in the panel power control. */
 871void intel_pps_backlight_on(struct intel_dp *intel_dp)
 872{
 873	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 874	intel_wakeref_t wakeref;
 875
 876	/*
 877	 * If we enable the backlight right away following a panel power
 878	 * on, we may see slight flicker as the panel syncs with the eDP
 879	 * link.  So delay a bit to make sure the image is solid before
 880	 * allowing it to appear.
 881	 */
 882	wait_backlight_on(intel_dp);
 883
 884	with_intel_pps_lock(intel_dp, wakeref) {
 885		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 886		u32 pp;
 887
 888		pp = ilk_get_pp_control(intel_dp);
 889		pp |= EDP_BLC_ENABLE;
 890
 891		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 892		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 893	}
 894}
 895
 896/* Disable backlight in the panel power control. */
 897void intel_pps_backlight_off(struct intel_dp *intel_dp)
 898{
 899	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 900	intel_wakeref_t wakeref;
 901
 902	if (!intel_dp_is_edp(intel_dp))
 903		return;
 904
 905	with_intel_pps_lock(intel_dp, wakeref) {
 906		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 907		u32 pp;
 908
 909		pp = ilk_get_pp_control(intel_dp);
 910		pp &= ~EDP_BLC_ENABLE;
 911
 912		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 913		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 914	}
 915
 916	intel_dp->pps.last_backlight_off = jiffies;
 917	edp_wait_backlight_off(intel_dp);
 918}
 919
 920/*
 921 * Hook for controlling the panel power control backlight through the bl_power
 922 * sysfs attribute. Take care to handle multiple calls.
 923 */
 924void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
 925{
 926	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 927	struct intel_dp *intel_dp = intel_attached_dp(connector);
 928	intel_wakeref_t wakeref;
 929	bool is_enabled;
 930
 931	is_enabled = false;
 932	with_intel_pps_lock(intel_dp, wakeref)
 933		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
 934	if (is_enabled == enable)
 935		return;
 936
 937	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
 938		    enable ? "enable" : "disable");
 939
 940	if (enable)
 941		intel_pps_backlight_on(intel_dp);
 942	else
 943		intel_pps_backlight_off(intel_dp);
 944}
 945
 946static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 947{
 948	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 949	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 950	enum pipe pipe = intel_dp->pps.pps_pipe;
 951	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 952
 953	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
 954
 955	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 956		return;
 957
 958	intel_pps_vdd_off_sync_unlocked(intel_dp);
 959
 960	/*
 961	 * VLV seems to get confused when multiple power sequencers
 962	 * have the same port selected (even if only one has power/vdd
 963	 * enabled). The failure manifests as vlv_wait_port_ready() failing
 964	 * CHV on the other hand doesn't seem to mind having the same port
 965	 * selected in multiple power sequencers, but let's clear the
 966	 * port select always when logically disconnecting a power sequencer
 967	 * from a port.
 968	 */
 969	drm_dbg_kms(&dev_priv->drm,
 970		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
 971		    pipe_name(pipe), dig_port->base.base.base.id,
 972		    dig_port->base.base.name);
 973	intel_de_write(dev_priv, pp_on_reg, 0);
 974	intel_de_posting_read(dev_priv, pp_on_reg);
 975
 976	intel_dp->pps.pps_pipe = INVALID_PIPE;
 977}
 978
 979static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 980				      enum pipe pipe)
 981{
 982	struct intel_encoder *encoder;
 983
 984	lockdep_assert_held(&dev_priv->pps_mutex);
 985
 986	for_each_intel_dp(&dev_priv->drm, encoder) {
 987		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 988
 989		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
 990			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
 991			 pipe_name(pipe), encoder->base.base.id,
 992			 encoder->base.name);
 993
 994		if (intel_dp->pps.pps_pipe != pipe)
 995			continue;
 996
 997		drm_dbg_kms(&dev_priv->drm,
 998			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
 999			    pipe_name(pipe), encoder->base.base.id,
1000			    encoder->base.name);
1001
1002		/* make sure vdd is off before we steal it */
1003		vlv_detach_power_sequencer(intel_dp);
1004	}
1005}
1006
1007void vlv_pps_init(struct intel_encoder *encoder,
1008		  const struct intel_crtc_state *crtc_state)
1009{
1010	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1011	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1012	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1013
1014	lockdep_assert_held(&dev_priv->pps_mutex);
1015
1016	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1017
1018	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1019	    intel_dp->pps.pps_pipe != crtc->pipe) {
1020		/*
1021		 * If another power sequencer was being used on this
1022		 * port previously make sure to turn off vdd there while
1023		 * we still have control of it.
1024		 */
1025		vlv_detach_power_sequencer(intel_dp);
1026	}
1027
1028	/*
1029	 * We may be stealing the power
1030	 * sequencer from another port.
1031	 */
1032	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1033
1034	intel_dp->pps.active_pipe = crtc->pipe;
1035
1036	if (!intel_dp_is_edp(intel_dp))
1037		return;
1038
1039	/* now it's all ours */
1040	intel_dp->pps.pps_pipe = crtc->pipe;
1041
1042	drm_dbg_kms(&dev_priv->drm,
1043		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1044		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1045		    encoder->base.name);
1046
1047	/* init power sequencer on this pipe and port */
1048	pps_init_delays(intel_dp);
1049	pps_init_registers(intel_dp, true);
1050}
1051
1052static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
1053{
1054	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1055	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1056
1057	lockdep_assert_held(&dev_priv->pps_mutex);
1058
1059	if (!edp_have_panel_vdd(intel_dp))
1060		return;
1061
1062	/*
1063	 * The VDD bit needs a power domain reference, so if the bit is
1064	 * already enabled when we boot or resume, grab this reference and
1065	 * schedule a vdd off, so we don't hold on to the reference
1066	 * indefinitely.
1067	 */
1068	drm_dbg_kms(&dev_priv->drm,
1069		    "VDD left on by BIOS, adjusting state tracking\n");
 
 
1070	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1071	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1072							    intel_aux_power_domain(dig_port));
1073
1074	edp_panel_vdd_schedule_off(intel_dp);
1075}
1076
1077bool intel_pps_have_power(struct intel_dp *intel_dp)
1078{
1079	intel_wakeref_t wakeref;
1080	bool have_power = false;
1081
1082	with_intel_pps_lock(intel_dp, wakeref) {
1083		have_power = edp_have_panel_power(intel_dp) &&
1084						  edp_have_panel_vdd(intel_dp);
1085	}
1086
1087	return have_power;
1088}
1089
1090static void pps_init_timestamps(struct intel_dp *intel_dp)
1091{
1092	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 
 
 
 
 
 
1093	intel_dp->pps.last_power_on = jiffies;
1094	intel_dp->pps.last_backlight_off = jiffies;
1095}
1096
1097static void
1098intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1099{
1100	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101	u32 pp_on, pp_off, pp_ctl;
1102	struct pps_registers regs;
1103
1104	intel_pps_get_registers(intel_dp, &regs);
1105
1106	pp_ctl = ilk_get_pp_control(intel_dp);
1107
1108	/* Ensure PPS is unlocked */
1109	if (!HAS_DDI(dev_priv))
1110		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1111
1112	pp_on = intel_de_read(dev_priv, regs.pp_on);
1113	pp_off = intel_de_read(dev_priv, regs.pp_off);
1114
1115	/* Pull timing values out of registers */
1116	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1117	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1118	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1119	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1120
1121	if (i915_mmio_reg_valid(regs.pp_div)) {
1122		u32 pp_div;
1123
1124		pp_div = intel_de_read(dev_priv, regs.pp_div);
1125
1126		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1127	} else {
1128		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1129	}
1130}
1131
1132static void
1133intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
 
1134{
1135	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1136		      state_name,
1137		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
 
 
1138}
1139
1140static void
1141intel_pps_verify_state(struct intel_dp *intel_dp)
1142{
 
1143	struct edp_power_seq hw;
1144	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1145
1146	intel_pps_readout_hw_state(intel_dp, &hw);
1147
1148	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1149	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1150		DRM_ERROR("PPS state mismatch\n");
1151		intel_pps_dump_state("sw", sw);
1152		intel_pps_dump_state("hw", &hw);
1153	}
1154}
1155
1156static void pps_init_delays(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
1157{
1158	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1159	struct edp_power_seq cur, vbt, spec,
1160		*final = &intel_dp->pps.pps_delays;
1161
1162	lockdep_assert_held(&dev_priv->pps_mutex);
1163
1164	/* already initialized? */
1165	if (final->t11_t12 != 0)
1166		return;
 
 
 
 
 
 
 
 
 
 
1167
1168	intel_pps_readout_hw_state(intel_dp, &cur);
1169
1170	intel_pps_dump_state("cur", &cur);
 
1171
1172	vbt = dev_priv->vbt.edp.pps;
1173	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1174	 * of 500ms appears to be too short. Ocassionally the panel
1175	 * just fails to power back on. Increasing the delay to 800ms
1176	 * seems sufficient to avoid this problem.
1177	 */
1178	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1179		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
1180		drm_dbg_kms(&dev_priv->drm,
1181			    "Increasing T12 panel delay as per the quirk to %d\n",
1182			    vbt.t11_t12);
1183	}
 
1184	/* T11_T12 delay is special and actually in units of 100ms, but zero
1185	 * based in the hw (so we need to add 100 ms). But the sw vbt
1186	 * table multiplies it with 1000 to make it in units of 100usec,
1187	 * too. */
1188	vbt.t11_t12 += 100 * 10;
 
 
 
 
 
 
 
 
 
 
1189
1190	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1191	 * our hw here, which are all in 100usec. */
1192	spec.t1_t3 = 210 * 10;
1193	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
1194	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1195	spec.t10 = 500 * 10;
1196	/* This one is special and actually in units of 100ms, but zero
1197	 * based in the hw (so we need to add 100 ms). But the sw vbt
1198	 * table multiplies it with 1000 to make it in units of 100usec,
1199	 * too. */
1200	spec.t11_t12 = (510 + 100) * 10;
 
 
 
 
 
 
 
 
 
1201
1202	intel_pps_dump_state("vbt", &vbt);
 
 
 
 
 
 
 
 
1203
1204	/* Use the max of the register settings and vbt. If both are
1205	 * unset, fall back to the spec limits. */
1206#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1207				       spec.field : \
1208				       max(cur.field, vbt.field))
1209	assign_final(t1_t3);
1210	assign_final(t8);
1211	assign_final(t9);
1212	assign_final(t10);
1213	assign_final(t11_t12);
1214#undef assign_final
1215
1216#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1217	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1218	intel_dp->pps.backlight_on_delay = get_delay(t8);
1219	intel_dp->pps.backlight_off_delay = get_delay(t9);
1220	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1221	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1222#undef get_delay
1223
1224	drm_dbg_kms(&dev_priv->drm,
1225		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1226		    intel_dp->pps.panel_power_up_delay,
1227		    intel_dp->pps.panel_power_down_delay,
1228		    intel_dp->pps.panel_power_cycle_delay);
1229
1230	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1231		    intel_dp->pps.backlight_on_delay,
1232		    intel_dp->pps.backlight_off_delay);
1233
1234	/*
1235	 * We override the HW backlight delays to 1 because we do manual waits
1236	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1237	 * don't do this, we'll end up waiting for the backlight off delay
1238	 * twice: once when we do the manual sleep, and once when we disable
1239	 * the panel and wait for the PP_STATUS bit to become zero.
1240	 */
1241	final->t8 = 1;
1242	final->t9 = 1;
1243
1244	/*
1245	 * HW has only a 100msec granularity for t11_t12 so round it up
1246	 * accordingly.
1247	 */
1248	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1249}
1250
1251static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1252{
1253	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1254	u32 pp_on, pp_off, port_sel = 0;
1255	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1256	struct pps_registers regs;
1257	enum port port = dp_to_dig_port(intel_dp)->base.port;
1258	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1259
1260	lockdep_assert_held(&dev_priv->pps_mutex);
1261
1262	intel_pps_get_registers(intel_dp, &regs);
1263
1264	/*
1265	 * On some VLV machines the BIOS can leave the VDD
1266	 * enabled even on power sequencers which aren't
1267	 * hooked up to any port. This would mess up the
1268	 * power domain tracking the first time we pick
1269	 * one of these power sequencers for use since
1270	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1271	 * already on and therefore wouldn't grab the power
1272	 * domain reference. Disable VDD first to avoid this.
1273	 * This also avoids spuriously turning the VDD on as
1274	 * soon as the new power sequencer gets initialized.
1275	 */
1276	if (force_disable_vdd) {
1277		u32 pp = ilk_get_pp_control(intel_dp);
1278
1279		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1280			 "Panel power already on\n");
1281
1282		if (pp & EDP_FORCE_VDD)
1283			drm_dbg_kms(&dev_priv->drm,
1284				    "VDD already on, disabling first\n");
1285
1286		pp &= ~EDP_FORCE_VDD;
1287
1288		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1289	}
1290
1291	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1292		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1293	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1294		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1295
1296	/* Haswell doesn't have any port selection bits for the panel
1297	 * power sequencer any more. */
1298	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1299		port_sel = PANEL_PORT_SELECT_VLV(port);
1300	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1301		switch (port) {
1302		case PORT_A:
1303			port_sel = PANEL_PORT_SELECT_DPA;
1304			break;
1305		case PORT_C:
1306			port_sel = PANEL_PORT_SELECT_DPC;
1307			break;
1308		case PORT_D:
1309			port_sel = PANEL_PORT_SELECT_DPD;
1310			break;
1311		default:
1312			MISSING_CASE(port);
1313			break;
1314		}
1315	}
1316
1317	pp_on |= port_sel;
1318
1319	intel_de_write(dev_priv, regs.pp_on, pp_on);
1320	intel_de_write(dev_priv, regs.pp_off, pp_off);
1321
1322	/*
1323	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1324	 */
1325	if (i915_mmio_reg_valid(regs.pp_div)) {
1326		intel_de_write(dev_priv, regs.pp_div,
1327			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1328	} else {
1329		u32 pp_ctl;
1330
1331		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1332		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1333		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1334		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1335	}
1336
1337	drm_dbg_kms(&dev_priv->drm,
1338		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1339		    intel_de_read(dev_priv, regs.pp_on),
1340		    intel_de_read(dev_priv, regs.pp_off),
1341		    i915_mmio_reg_valid(regs.pp_div) ?
1342		    intel_de_read(dev_priv, regs.pp_div) :
1343		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1344}
1345
1346void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1347{
1348	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1349	intel_wakeref_t wakeref;
1350
1351	if (!intel_dp_is_edp(intel_dp))
1352		return;
1353
1354	with_intel_pps_lock(intel_dp, wakeref) {
1355		/*
1356		 * Reinit the power sequencer also on the resume path, in case
1357		 * BIOS did something nasty with it.
1358		 */
1359		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1360			vlv_initial_power_sequencer_setup(intel_dp);
1361
1362		pps_init_delays(intel_dp);
1363		pps_init_registers(intel_dp, false);
 
1364
1365		intel_pps_vdd_sanitize(intel_dp);
 
1366	}
1367}
1368
1369void intel_pps_init(struct intel_dp *intel_dp)
1370{
 
 
 
 
1371	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1372
1373	pps_init_timestamps(intel_dp);
1374
1375	intel_pps_encoder_reset(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376}
1377
1378void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1379{
1380	int pps_num;
1381	int pps_idx;
1382
1383	if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1384		return;
1385	/*
1386	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1387	 * everywhere where registers can be write protected.
1388	 */
1389	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1390		pps_num = 2;
1391	else
1392		pps_num = 1;
1393
1394	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1395		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1396
1397		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1398		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1399	}
1400}
1401
1402void intel_pps_setup(struct drm_i915_private *i915)
1403{
1404	if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1405		i915->pps_mmio_base = PCH_PPS_BASE;
1406	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1407		i915->pps_mmio_base = VLV_PPS_BASE;
1408	else
1409		i915->pps_mmio_base = PPS_BASE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410}
v6.8
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include "g4x_dp.h"
   7#include "i915_drv.h"
   8#include "i915_reg.h"
   9#include "intel_de.h"
  10#include "intel_display_power_well.h"
  11#include "intel_display_types.h"
  12#include "intel_dp.h"
  13#include "intel_dpio_phy.h"
  14#include "intel_dpll.h"
  15#include "intel_lvds.h"
  16#include "intel_lvds_regs.h"
  17#include "intel_pps.h"
  18#include "intel_pps_regs.h"
  19#include "intel_quirks.h"
  20
  21static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
  22				      enum pipe pipe);
  23
  24static void pps_init_delays(struct intel_dp *intel_dp);
  25static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
  26
  27static const char *pps_name(struct drm_i915_private *i915,
  28			    struct intel_pps *pps)
  29{
  30	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
  31		switch (pps->pps_pipe) {
  32		case INVALID_PIPE:
  33			/*
  34			 * FIXME would be nice if we can guarantee
  35			 * to always have a valid PPS when calling this.
  36			 */
  37			return "PPS <none>";
  38		case PIPE_A:
  39			return "PPS A";
  40		case PIPE_B:
  41			return "PPS B";
  42		default:
  43			MISSING_CASE(pps->pps_pipe);
  44			break;
  45		}
  46	} else {
  47		switch (pps->pps_idx) {
  48		case 0:
  49			return "PPS 0";
  50		case 1:
  51			return "PPS 1";
  52		default:
  53			MISSING_CASE(pps->pps_idx);
  54			break;
  55		}
  56	}
  57
  58	return "PPS <invalid>";
  59}
  60
  61intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
  62{
  63	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  64	intel_wakeref_t wakeref;
  65
  66	/*
  67	 * See intel_pps_reset_all() why we need a power domain reference here.
  68	 */
  69	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
  70	mutex_lock(&dev_priv->display.pps.mutex);
  71
  72	return wakeref;
  73}
  74
  75intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
  76				 intel_wakeref_t wakeref)
  77{
  78	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  79
  80	mutex_unlock(&dev_priv->display.pps.mutex);
  81	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  82
  83	return 0;
  84}
  85
  86static void
  87vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  88{
  89	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  90	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  91	enum pipe pipe = intel_dp->pps.pps_pipe;
  92	bool pll_enabled, release_cl_override = false;
  93	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
  94	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
  95	u32 DP;
  96
  97	if (drm_WARN(&dev_priv->drm,
  98		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
  99		     "skipping %s kick due to [ENCODER:%d:%s] being active\n",
 100		     pps_name(dev_priv, &intel_dp->pps),
 101		     dig_port->base.base.base.id, dig_port->base.base.name))
 102		return;
 103
 104	drm_dbg_kms(&dev_priv->drm,
 105		    "kicking %s for [ENCODER:%d:%s]\n",
 106		    pps_name(dev_priv, &intel_dp->pps),
 107		    dig_port->base.base.base.id, dig_port->base.base.name);
 108
 109	/* Preserve the BIOS-computed detected bit. This is
 110	 * supposed to be read-only.
 111	 */
 112	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
 113	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 114	DP |= DP_PORT_WIDTH(1);
 115	DP |= DP_LINK_TRAIN_PAT_1;
 116
 117	if (IS_CHERRYVIEW(dev_priv))
 118		DP |= DP_PIPE_SEL_CHV(pipe);
 119	else
 120		DP |= DP_PIPE_SEL(pipe);
 121
 122	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
 123
 124	/*
 125	 * The DPLL for the pipe must be enabled for this to work.
 126	 * So enable temporarily it if it's not already enabled.
 127	 */
 128	if (!pll_enabled) {
 129		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
 130			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 131
 132		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
 133			drm_err(&dev_priv->drm,
 134				"Failed to force on PLL for pipe %c!\n",
 135				pipe_name(pipe));
 136			return;
 137		}
 138	}
 139
 140	/*
 141	 * Similar magic as in intel_dp_enable_port().
 142	 * We _must_ do this port enable + disable trick
 143	 * to make this power sequencer lock onto the port.
 144	 * Otherwise even VDD force bit won't work.
 145	 */
 146	intel_de_write(dev_priv, intel_dp->output_reg, DP);
 147	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 148
 149	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
 150	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 151
 152	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
 153	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 154
 155	if (!pll_enabled) {
 156		vlv_force_pll_off(dev_priv, pipe);
 157
 158		if (release_cl_override)
 159			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 160	}
 161}
 162
 163static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 164{
 165	struct intel_encoder *encoder;
 166	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 167
 168	/*
 169	 * We don't have power sequencer currently.
 170	 * Pick one that's not used by other ports.
 171	 */
 172	for_each_intel_dp(&dev_priv->drm, encoder) {
 173		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 174
 175		if (encoder->type == INTEL_OUTPUT_EDP) {
 176			drm_WARN_ON(&dev_priv->drm,
 177				    intel_dp->pps.active_pipe != INVALID_PIPE &&
 178				    intel_dp->pps.active_pipe !=
 179				    intel_dp->pps.pps_pipe);
 180
 181			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 182				pipes &= ~(1 << intel_dp->pps.pps_pipe);
 183		} else {
 184			drm_WARN_ON(&dev_priv->drm,
 185				    intel_dp->pps.pps_pipe != INVALID_PIPE);
 186
 187			if (intel_dp->pps.active_pipe != INVALID_PIPE)
 188				pipes &= ~(1 << intel_dp->pps.active_pipe);
 189		}
 190	}
 191
 192	if (pipes == 0)
 193		return INVALID_PIPE;
 194
 195	return ffs(pipes) - 1;
 196}
 197
 198static enum pipe
 199vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 200{
 201	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 202	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 203	enum pipe pipe;
 204
 205	lockdep_assert_held(&dev_priv->display.pps.mutex);
 206
 207	/* We should never land here with regular DP ports */
 208	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 209
 210	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
 211		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
 212
 213	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 214		return intel_dp->pps.pps_pipe;
 215
 216	pipe = vlv_find_free_pps(dev_priv);
 217
 218	/*
 219	 * Didn't find one. This should not happen since there
 220	 * are two power sequencers and up to two eDP ports.
 221	 */
 222	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
 223		pipe = PIPE_A;
 224
 225	vlv_steal_power_sequencer(dev_priv, pipe);
 226	intel_dp->pps.pps_pipe = pipe;
 227
 228	drm_dbg_kms(&dev_priv->drm,
 229		    "picked %s for [ENCODER:%d:%s]\n",
 230		    pps_name(dev_priv, &intel_dp->pps),
 231		    dig_port->base.base.base.id, dig_port->base.base.name);
 
 232
 233	/* init power sequencer on this pipe and port */
 234	pps_init_delays(intel_dp);
 235	pps_init_registers(intel_dp, true);
 236
 237	/*
 238	 * Even vdd force doesn't work until we've made
 239	 * the power sequencer lock in on the port.
 240	 */
 241	vlv_power_sequencer_kick(intel_dp);
 242
 243	return intel_dp->pps.pps_pipe;
 244}
 245
 246static int
 247bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 248{
 249	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 250	int pps_idx = intel_dp->pps.pps_idx;
 251
 252	lockdep_assert_held(&dev_priv->display.pps.mutex);
 253
 254	/* We should never land here with regular DP ports */
 255	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 256
 257	if (!intel_dp->pps.pps_reset)
 258		return pps_idx;
 259
 260	intel_dp->pps.pps_reset = false;
 261
 262	/*
 263	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 264	 * has been setup during connector init.
 265	 */
 266	pps_init_registers(intel_dp, false);
 267
 268	return pps_idx;
 269}
 270
 271typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
 
 272
 273static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
 
 274{
 275	return intel_de_read(dev_priv, PP_STATUS(pps_idx)) & PP_ON;
 276}
 277
 278static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
 
 279{
 280	return intel_de_read(dev_priv, PP_CONTROL(pps_idx)) & EDP_FORCE_VDD;
 281}
 282
 283static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
 
 284{
 285	return true;
 286}
 287
 288static enum pipe
 289vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 290		     enum port port, pps_check check)
 
 291{
 292	enum pipe pipe;
 293
 294	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 295		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
 296			PANEL_PORT_SELECT_MASK;
 297
 298		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 299			continue;
 300
 301		if (!check(dev_priv, pipe))
 302			continue;
 303
 304		return pipe;
 305	}
 306
 307	return INVALID_PIPE;
 308}
 309
 310static void
 311vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 312{
 313	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 314	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 315	enum port port = dig_port->base.port;
 316
 317	lockdep_assert_held(&dev_priv->display.pps.mutex);
 318
 319	/* try to find a pipe with this port selected */
 320	/* first pick one where the panel is on */
 321	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 322						      pps_has_pp_on);
 323	/* didn't find one? pick one where vdd is on */
 324	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 325		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 326							      pps_has_vdd_on);
 327	/* didn't find one? pick one with just the correct port */
 328	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 329		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 330							      pps_any);
 331
 332	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 333	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
 334		drm_dbg_kms(&dev_priv->drm,
 335			    "[ENCODER:%d:%s] no initial power sequencer\n",
 336			    dig_port->base.base.base.id, dig_port->base.base.name);
 
 337		return;
 338	}
 339
 340	drm_dbg_kms(&dev_priv->drm,
 341		    "[ENCODER:%d:%s] initial power sequencer: %s\n",
 342		    dig_port->base.base.base.id, dig_port->base.base.name,
 343		    pps_name(dev_priv, &intel_dp->pps));
 344}
 345
 346static int intel_num_pps(struct drm_i915_private *i915)
 347{
 348	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
 349		return 2;
 350
 351	if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
 352		return 2;
 353
 354	if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
 355		return 1;
 356
 357	if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
 358		return 2;
 359
 360	return 1;
 361}
 362
 363static bool intel_pps_is_valid(struct intel_dp *intel_dp)
 364{
 365	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 366
 367	if (intel_dp->pps.pps_idx == 1 &&
 368	    INTEL_PCH_TYPE(i915) >= PCH_ICP &&
 369	    INTEL_PCH_TYPE(i915) < PCH_MTP)
 370		return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
 371
 372	return true;
 373}
 374
 375static int
 376bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
 377{
 378	int pps_idx, pps_num = intel_num_pps(i915);
 379
 380	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
 381		if (check(i915, pps_idx))
 382			return pps_idx;
 383	}
 384
 385	return -1;
 386}
 387
 388static bool
 389pps_initial_setup(struct intel_dp *intel_dp)
 390{
 391	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 392	struct intel_connector *connector = intel_dp->attached_connector;
 393	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 394
 395	lockdep_assert_held(&i915->display.pps.mutex);
 396
 397	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
 398		vlv_initial_power_sequencer_setup(intel_dp);
 399		return true;
 400	}
 401
 402	/* first ask the VBT */
 403	if (intel_num_pps(i915) > 1)
 404		intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
 405	else
 406		intel_dp->pps.pps_idx = 0;
 407
 408	if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
 409		intel_dp->pps.pps_idx = -1;
 410
 411	/* VBT wasn't parsed yet? pick one where the panel is on */
 412	if (intel_dp->pps.pps_idx < 0)
 413		intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
 414	/* didn't find one? pick one where vdd is on */
 415	if (intel_dp->pps.pps_idx < 0)
 416		intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
 417	/* didn't find one? pick any */
 418	if (intel_dp->pps.pps_idx < 0) {
 419		intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
 420
 421		drm_dbg_kms(&i915->drm,
 422			    "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
 423			    encoder->base.base.id, encoder->base.name,
 424			    pps_name(i915, &intel_dp->pps));
 425	} else {
 426		drm_dbg_kms(&i915->drm,
 427			    "[ENCODER:%d:%s] initial power sequencer: %s\n",
 428			    encoder->base.base.id, encoder->base.name,
 429			    pps_name(i915, &intel_dp->pps));
 430	}
 431
 432	return intel_pps_is_valid(intel_dp);
 433}
 434
 435void intel_pps_reset_all(struct drm_i915_private *dev_priv)
 436{
 437	struct intel_encoder *encoder;
 438
 439	if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
 440		return;
 441
 442	if (!HAS_DISPLAY(dev_priv))
 443		return;
 444
 445	/*
 446	 * We can't grab pps_mutex here due to deadlock with power_domain
 447	 * mutex when power_domain functions are called while holding pps_mutex.
 448	 * That also means that in order to use pps_pipe the code needs to
 449	 * hold both a power domain reference and pps_mutex, and the power domain
 450	 * reference get/put must be done while _not_ holding pps_mutex.
 451	 * pps_{lock,unlock}() do these steps in the correct order, so one
 452	 * should use them always.
 453	 */
 454
 455	for_each_intel_dp(&dev_priv->drm, encoder) {
 456		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 457
 458		drm_WARN_ON(&dev_priv->drm,
 459			    intel_dp->pps.active_pipe != INVALID_PIPE);
 460
 461		if (encoder->type != INTEL_OUTPUT_EDP)
 462			continue;
 463
 464		if (DISPLAY_VER(dev_priv) >= 9)
 465			intel_dp->pps.pps_reset = true;
 466		else
 467			intel_dp->pps.pps_pipe = INVALID_PIPE;
 468	}
 469}
 470
 471struct pps_registers {
 472	i915_reg_t pp_ctrl;
 473	i915_reg_t pp_stat;
 474	i915_reg_t pp_on;
 475	i915_reg_t pp_off;
 476	i915_reg_t pp_div;
 477};
 478
 479static void intel_pps_get_registers(struct intel_dp *intel_dp,
 480				    struct pps_registers *regs)
 481{
 482	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 483	int pps_idx;
 484
 485	memset(regs, 0, sizeof(*regs));
 486
 487	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 
 
 488		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 489	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 490		pps_idx = bxt_power_sequencer_idx(intel_dp);
 491	else
 492		pps_idx = intel_dp->pps.pps_idx;
 493
 494	regs->pp_ctrl = PP_CONTROL(pps_idx);
 495	regs->pp_stat = PP_STATUS(pps_idx);
 496	regs->pp_on = PP_ON_DELAYS(pps_idx);
 497	regs->pp_off = PP_OFF_DELAYS(pps_idx);
 498
 499	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
 500	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
 501	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
 502		regs->pp_div = INVALID_MMIO_REG;
 503	else
 504		regs->pp_div = PP_DIVISOR(pps_idx);
 505}
 506
 507static i915_reg_t
 508_pp_ctrl_reg(struct intel_dp *intel_dp)
 509{
 510	struct pps_registers regs;
 511
 512	intel_pps_get_registers(intel_dp, &regs);
 513
 514	return regs.pp_ctrl;
 515}
 516
 517static i915_reg_t
 518_pp_stat_reg(struct intel_dp *intel_dp)
 519{
 520	struct pps_registers regs;
 521
 522	intel_pps_get_registers(intel_dp, &regs);
 523
 524	return regs.pp_stat;
 525}
 526
 527static bool edp_have_panel_power(struct intel_dp *intel_dp)
 528{
 529	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 530
 531	lockdep_assert_held(&dev_priv->display.pps.mutex);
 532
 533	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 534	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 535		return false;
 536
 537	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
 538}
 539
 540static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 541{
 542	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 543
 544	lockdep_assert_held(&dev_priv->display.pps.mutex);
 545
 546	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 547	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 548		return false;
 549
 550	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 551}
 552
 553void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
 554{
 555	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 556	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 557
 558	if (!intel_dp_is_edp(intel_dp))
 559		return;
 560
 561	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 562		drm_WARN(&dev_priv->drm, 1,
 563			 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
 564			 dig_port->base.base.base.id, dig_port->base.base.name,
 565			 pps_name(dev_priv, &intel_dp->pps));
 566		drm_dbg_kms(&dev_priv->drm,
 567			    "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 568			    dig_port->base.base.base.id, dig_port->base.base.name,
 569			    pps_name(dev_priv, &intel_dp->pps),
 570			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
 571			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
 572	}
 573}
 574
 575#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 576#define IDLE_ON_VALUE		(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 577
 578#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
 579#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
 580
 581#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 582#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 583
 584static void intel_pps_verify_state(struct intel_dp *intel_dp);
 585
 586static void wait_panel_status(struct intel_dp *intel_dp,
 587			      u32 mask, u32 value)
 
 588{
 589	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 590	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 591	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 592
 593	lockdep_assert_held(&dev_priv->display.pps.mutex);
 594
 595	intel_pps_verify_state(intel_dp);
 596
 597	pp_stat_reg = _pp_stat_reg(intel_dp);
 598	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 599
 600	drm_dbg_kms(&dev_priv->drm,
 601		    "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 602		    dig_port->base.base.base.id, dig_port->base.base.name,
 603		    pps_name(dev_priv, &intel_dp->pps),
 604		    mask, value,
 605		    intel_de_read(dev_priv, pp_stat_reg),
 606		    intel_de_read(dev_priv, pp_ctrl_reg));
 607
 608	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
 609				       mask, value, 5000))
 610		drm_err(&dev_priv->drm,
 611			"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 612			dig_port->base.base.base.id, dig_port->base.base.name,
 613			pps_name(dev_priv, &intel_dp->pps),
 614			intel_de_read(dev_priv, pp_stat_reg),
 615			intel_de_read(dev_priv, pp_ctrl_reg));
 616
 617	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
 618}
 619
 620static void wait_panel_on(struct intel_dp *intel_dp)
 621{
 622	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 623	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 624
 625	drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
 626		    dig_port->base.base.base.id, dig_port->base.base.name,
 627		    pps_name(i915, &intel_dp->pps));
 628	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 629}
 630
 631static void wait_panel_off(struct intel_dp *intel_dp)
 632{
 633	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 634	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 635
 636	drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
 637		    dig_port->base.base.base.id, dig_port->base.base.name,
 638		    pps_name(i915, &intel_dp->pps));
 639	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 640}
 641
 642static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 643{
 644	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 645	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 646	ktime_t panel_power_on_time;
 647	s64 panel_power_off_duration;
 648
 649	drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
 650		    dig_port->base.base.base.id, dig_port->base.base.name,
 651		    pps_name(i915, &intel_dp->pps));
 652
 653	/* take the difference of current time and panel power off time
 654	 * and then make panel wait for t11_t12 if needed. */
 655	panel_power_on_time = ktime_get_boottime();
 656	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
 657
 658	/* When we disable the VDD override bit last we have to do the manual
 659	 * wait. */
 660	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
 661		wait_remaining_ms_from_jiffies(jiffies,
 662				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
 663
 664	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 665}
 666
 667void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
 668{
 669	intel_wakeref_t wakeref;
 670
 671	if (!intel_dp_is_edp(intel_dp))
 672		return;
 673
 674	with_intel_pps_lock(intel_dp, wakeref)
 675		wait_panel_power_cycle(intel_dp);
 676}
 677
 678static void wait_backlight_on(struct intel_dp *intel_dp)
 679{
 680	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
 681				       intel_dp->pps.backlight_on_delay);
 682}
 683
 684static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 685{
 686	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
 687				       intel_dp->pps.backlight_off_delay);
 688}
 689
 690/* Read the current pp_control value, unlocking the register if it
 691 * is locked
 692 */
 693
 694static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 695{
 696	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 697	u32 control;
 698
 699	lockdep_assert_held(&dev_priv->display.pps.mutex);
 700
 701	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
 702	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
 703			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 704		control &= ~PANEL_UNLOCK_MASK;
 705		control |= PANEL_UNLOCK_REGS;
 706	}
 707	return control;
 708}
 709
 710/*
 711 * Must be paired with intel_pps_vdd_off_unlocked().
 712 * Must hold pps_mutex around the whole on/off sequence.
 713 * Can be nested with intel_pps_vdd_{on,off}() calls.
 714 */
 715bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
 716{
 717	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 718	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 719	u32 pp;
 720	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 721	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
 722
 723	lockdep_assert_held(&dev_priv->display.pps.mutex);
 724
 725	if (!intel_dp_is_edp(intel_dp))
 726		return false;
 727
 728	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
 729	intel_dp->pps.want_panel_vdd = true;
 730
 731	if (edp_have_panel_vdd(intel_dp))
 732		return need_to_disable;
 733
 734	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
 735	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
 736							    intel_aux_power_domain(dig_port));
 737
 738	pp_stat_reg = _pp_stat_reg(intel_dp);
 739	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 740
 741	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
 742		    dig_port->base.base.base.id, dig_port->base.base.name,
 743		    pps_name(dev_priv, &intel_dp->pps));
 744
 745	if (!edp_have_panel_power(intel_dp))
 746		wait_panel_power_cycle(intel_dp);
 747
 748	pp = ilk_get_pp_control(intel_dp);
 749	pp |= EDP_FORCE_VDD;
 750
 
 
 
 751	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 752	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 753	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 754		    dig_port->base.base.base.id, dig_port->base.base.name,
 755		    pps_name(dev_priv, &intel_dp->pps),
 756		    intel_de_read(dev_priv, pp_stat_reg),
 757		    intel_de_read(dev_priv, pp_ctrl_reg));
 758	/*
 759	 * If the panel wasn't on, delay before accessing aux channel
 760	 */
 761	if (!edp_have_panel_power(intel_dp)) {
 762		drm_dbg_kms(&dev_priv->drm,
 763			    "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
 764			    dig_port->base.base.base.id, dig_port->base.base.name,
 765			    pps_name(dev_priv, &intel_dp->pps));
 766		msleep(intel_dp->pps.panel_power_up_delay);
 767	}
 768
 769	return need_to_disable;
 770}
 771
 772/*
 773 * Must be paired with intel_pps_off().
 774 * Nested calls to these functions are not allowed since
 775 * we drop the lock. Caller must use some higher level
 776 * locking to prevent nested calls from other threads.
 777 */
 778void intel_pps_vdd_on(struct intel_dp *intel_dp)
 779{
 780	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 781	intel_wakeref_t wakeref;
 782	bool vdd;
 783
 784	if (!intel_dp_is_edp(intel_dp))
 785		return;
 786
 787	vdd = false;
 788	with_intel_pps_lock(intel_dp, wakeref)
 789		vdd = intel_pps_vdd_on_unlocked(intel_dp);
 790	I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
 791			dp_to_dig_port(intel_dp)->base.base.base.id,
 792			dp_to_dig_port(intel_dp)->base.base.name,
 793			pps_name(i915, &intel_dp->pps));
 794}
 795
 796static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
 797{
 798	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 799	struct intel_digital_port *dig_port =
 800		dp_to_dig_port(intel_dp);
 801	u32 pp;
 802	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 803
 804	lockdep_assert_held(&dev_priv->display.pps.mutex);
 805
 806	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
 807
 808	if (!edp_have_panel_vdd(intel_dp))
 809		return;
 810
 811	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
 812		    dig_port->base.base.base.id, dig_port->base.base.name,
 813		    pps_name(dev_priv, &intel_dp->pps));
 814
 815	pp = ilk_get_pp_control(intel_dp);
 816	pp &= ~EDP_FORCE_VDD;
 817
 818	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 819	pp_stat_reg = _pp_stat_reg(intel_dp);
 820
 821	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 822	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 823
 824	/* Make sure sequencer is idle before allowing subsequent activity */
 825	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 826		    dig_port->base.base.base.id, dig_port->base.base.name,
 827		    pps_name(dev_priv, &intel_dp->pps),
 828		    intel_de_read(dev_priv, pp_stat_reg),
 829		    intel_de_read(dev_priv, pp_ctrl_reg));
 830
 831	if ((pp & PANEL_POWER_ON) == 0)
 832		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 833
 834	intel_display_power_put(dev_priv,
 835				intel_aux_power_domain(dig_port),
 836				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 837}
 838
 839void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
 840{
 841	intel_wakeref_t wakeref;
 842
 843	if (!intel_dp_is_edp(intel_dp))
 844		return;
 845
 846	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
 847	/*
 848	 * vdd might still be enabled due to the delayed vdd off.
 849	 * Make sure vdd is actually turned off here.
 850	 */
 851	with_intel_pps_lock(intel_dp, wakeref)
 852		intel_pps_vdd_off_sync_unlocked(intel_dp);
 853}
 854
 855static void edp_panel_vdd_work(struct work_struct *__work)
 856{
 857	struct intel_pps *pps = container_of(to_delayed_work(__work),
 858					     struct intel_pps, panel_vdd_work);
 859	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
 860	intel_wakeref_t wakeref;
 861
 862	with_intel_pps_lock(intel_dp, wakeref) {
 863		if (!intel_dp->pps.want_panel_vdd)
 864			intel_pps_vdd_off_sync_unlocked(intel_dp);
 865	}
 866}
 867
 868static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
 869{
 870	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 871	unsigned long delay;
 872
 873	/*
 874	 * We may not yet know the real power sequencing delays,
 875	 * so keep VDD enabled until we're done with init.
 876	 */
 877	if (intel_dp->pps.initializing)
 878		return;
 879
 880	/*
 881	 * Queue the timer to fire a long time from now (relative to the power
 882	 * down delay) to keep the panel power up across a sequence of
 883	 * operations.
 884	 */
 885	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
 886	queue_delayed_work(i915->unordered_wq,
 887			   &intel_dp->pps.panel_vdd_work, delay);
 888}
 889
 890/*
 891 * Must be paired with edp_panel_vdd_on().
 892 * Must hold pps_mutex around the whole on/off sequence.
 893 * Can be nested with intel_pps_vdd_{on,off}() calls.
 894 */
 895void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
 896{
 897	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 898
 899	lockdep_assert_held(&dev_priv->display.pps.mutex);
 900
 901	if (!intel_dp_is_edp(intel_dp))
 902		return;
 903
 904	I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd,
 905			"[ENCODER:%d:%s] %s VDD not forced on",
 906			dp_to_dig_port(intel_dp)->base.base.base.id,
 907			dp_to_dig_port(intel_dp)->base.base.name,
 908			pps_name(dev_priv, &intel_dp->pps));
 909
 910	intel_dp->pps.want_panel_vdd = false;
 911
 912	if (sync)
 913		intel_pps_vdd_off_sync_unlocked(intel_dp);
 914	else
 915		edp_panel_vdd_schedule_off(intel_dp);
 916}
 917
 918void intel_pps_on_unlocked(struct intel_dp *intel_dp)
 919{
 920	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 921	u32 pp;
 922	i915_reg_t pp_ctrl_reg;
 923
 924	lockdep_assert_held(&dev_priv->display.pps.mutex);
 925
 926	if (!intel_dp_is_edp(intel_dp))
 927		return;
 928
 929	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
 930		    dp_to_dig_port(intel_dp)->base.base.base.id,
 931		    dp_to_dig_port(intel_dp)->base.base.name,
 932		    pps_name(dev_priv, &intel_dp->pps));
 933
 934	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
 935		     "[ENCODER:%d:%s] %s panel power already on\n",
 936		     dp_to_dig_port(intel_dp)->base.base.base.id,
 937		     dp_to_dig_port(intel_dp)->base.base.name,
 938		     pps_name(dev_priv, &intel_dp->pps)))
 939		return;
 940
 941	wait_panel_power_cycle(intel_dp);
 942
 943	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 944	pp = ilk_get_pp_control(intel_dp);
 945	if (IS_IRONLAKE(dev_priv)) {
 946		/* ILK workaround: disable reset around power sequence */
 947		pp &= ~PANEL_POWER_RESET;
 948		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 949		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 950	}
 951
 952	pp |= PANEL_POWER_ON;
 953	if (!IS_IRONLAKE(dev_priv))
 954		pp |= PANEL_POWER_RESET;
 955
 956	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 957	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 958
 959	wait_panel_on(intel_dp);
 960	intel_dp->pps.last_power_on = jiffies;
 961
 962	if (IS_IRONLAKE(dev_priv)) {
 963		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 964		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 965		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 966	}
 967}
 968
 969void intel_pps_on(struct intel_dp *intel_dp)
 970{
 971	intel_wakeref_t wakeref;
 972
 973	if (!intel_dp_is_edp(intel_dp))
 974		return;
 975
 976	with_intel_pps_lock(intel_dp, wakeref)
 977		intel_pps_on_unlocked(intel_dp);
 978}
 979
 980void intel_pps_off_unlocked(struct intel_dp *intel_dp)
 981{
 982	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 983	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 984	u32 pp;
 985	i915_reg_t pp_ctrl_reg;
 986
 987	lockdep_assert_held(&dev_priv->display.pps.mutex);
 988
 989	if (!intel_dp_is_edp(intel_dp))
 990		return;
 991
 992	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
 993		    dig_port->base.base.base.id, dig_port->base.base.name,
 994		    pps_name(dev_priv, &intel_dp->pps));
 995
 996	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
 997		 "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
 998		 dig_port->base.base.base.id, dig_port->base.base.name,
 999		 pps_name(dev_priv, &intel_dp->pps));
1000
1001	pp = ilk_get_pp_control(intel_dp);
1002	/* We need to switch off panel power _and_ force vdd, for otherwise some
1003	 * panels get very unhappy and cease to work. */
1004	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1005		EDP_BLC_ENABLE);
1006
1007	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1008
1009	intel_dp->pps.want_panel_vdd = false;
1010
1011	intel_de_write(dev_priv, pp_ctrl_reg, pp);
1012	intel_de_posting_read(dev_priv, pp_ctrl_reg);
1013
1014	wait_panel_off(intel_dp);
1015	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1016
1017	/* We got a reference when we enabled the VDD. */
1018	intel_display_power_put(dev_priv,
1019				intel_aux_power_domain(dig_port),
1020				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
1021}
1022
1023void intel_pps_off(struct intel_dp *intel_dp)
1024{
1025	intel_wakeref_t wakeref;
1026
1027	if (!intel_dp_is_edp(intel_dp))
1028		return;
1029
1030	with_intel_pps_lock(intel_dp, wakeref)
1031		intel_pps_off_unlocked(intel_dp);
1032}
1033
1034/* Enable backlight in the panel power control. */
1035void intel_pps_backlight_on(struct intel_dp *intel_dp)
1036{
1037	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1038	intel_wakeref_t wakeref;
1039
1040	/*
1041	 * If we enable the backlight right away following a panel power
1042	 * on, we may see slight flicker as the panel syncs with the eDP
1043	 * link.  So delay a bit to make sure the image is solid before
1044	 * allowing it to appear.
1045	 */
1046	wait_backlight_on(intel_dp);
1047
1048	with_intel_pps_lock(intel_dp, wakeref) {
1049		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1050		u32 pp;
1051
1052		pp = ilk_get_pp_control(intel_dp);
1053		pp |= EDP_BLC_ENABLE;
1054
1055		intel_de_write(dev_priv, pp_ctrl_reg, pp);
1056		intel_de_posting_read(dev_priv, pp_ctrl_reg);
1057	}
1058}
1059
1060/* Disable backlight in the panel power control. */
1061void intel_pps_backlight_off(struct intel_dp *intel_dp)
1062{
1063	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1064	intel_wakeref_t wakeref;
1065
1066	if (!intel_dp_is_edp(intel_dp))
1067		return;
1068
1069	with_intel_pps_lock(intel_dp, wakeref) {
1070		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1071		u32 pp;
1072
1073		pp = ilk_get_pp_control(intel_dp);
1074		pp &= ~EDP_BLC_ENABLE;
1075
1076		intel_de_write(dev_priv, pp_ctrl_reg, pp);
1077		intel_de_posting_read(dev_priv, pp_ctrl_reg);
1078	}
1079
1080	intel_dp->pps.last_backlight_off = jiffies;
1081	edp_wait_backlight_off(intel_dp);
1082}
1083
1084/*
1085 * Hook for controlling the panel power control backlight through the bl_power
1086 * sysfs attribute. Take care to handle multiple calls.
1087 */
1088void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
1089{
1090	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1091	struct intel_dp *intel_dp = intel_attached_dp(connector);
1092	intel_wakeref_t wakeref;
1093	bool is_enabled;
1094
1095	is_enabled = false;
1096	with_intel_pps_lock(intel_dp, wakeref)
1097		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1098	if (is_enabled == enable)
1099		return;
1100
1101	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
1102		    enable ? "enable" : "disable");
1103
1104	if (enable)
1105		intel_pps_backlight_on(intel_dp);
1106	else
1107		intel_pps_backlight_off(intel_dp);
1108}
1109
1110static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
1111{
1112	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1113	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1114	enum pipe pipe = intel_dp->pps.pps_pipe;
1115	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
1116
1117	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1118
1119	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
1120		return;
1121
1122	intel_pps_vdd_off_sync_unlocked(intel_dp);
1123
1124	/*
1125	 * VLV seems to get confused when multiple power sequencers
1126	 * have the same port selected (even if only one has power/vdd
1127	 * enabled). The failure manifests as vlv_wait_port_ready() failing
1128	 * CHV on the other hand doesn't seem to mind having the same port
1129	 * selected in multiple power sequencers, but let's clear the
1130	 * port select always when logically disconnecting a power sequencer
1131	 * from a port.
1132	 */
1133	drm_dbg_kms(&dev_priv->drm,
1134		    "detaching %s from [ENCODER:%d:%s]\n",
1135		    pps_name(dev_priv, &intel_dp->pps),
1136		    dig_port->base.base.base.id, dig_port->base.base.name);
1137	intel_de_write(dev_priv, pp_on_reg, 0);
1138	intel_de_posting_read(dev_priv, pp_on_reg);
1139
1140	intel_dp->pps.pps_pipe = INVALID_PIPE;
1141}
1142
1143static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
1144				      enum pipe pipe)
1145{
1146	struct intel_encoder *encoder;
1147
1148	lockdep_assert_held(&dev_priv->display.pps.mutex);
1149
1150	for_each_intel_dp(&dev_priv->drm, encoder) {
1151		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1152
1153		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
1154			 "stealing PPS %c from active [ENCODER:%d:%s]\n",
1155			 pipe_name(pipe), encoder->base.base.id,
1156			 encoder->base.name);
1157
1158		if (intel_dp->pps.pps_pipe != pipe)
1159			continue;
1160
1161		drm_dbg_kms(&dev_priv->drm,
1162			    "stealing PPS %c from [ENCODER:%d:%s]\n",
1163			    pipe_name(pipe), encoder->base.base.id,
1164			    encoder->base.name);
1165
1166		/* make sure vdd is off before we steal it */
1167		vlv_detach_power_sequencer(intel_dp);
1168	}
1169}
1170
1171void vlv_pps_init(struct intel_encoder *encoder,
1172		  const struct intel_crtc_state *crtc_state)
1173{
1174	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1175	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1176	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1177
1178	lockdep_assert_held(&dev_priv->display.pps.mutex);
1179
1180	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1181
1182	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1183	    intel_dp->pps.pps_pipe != crtc->pipe) {
1184		/*
1185		 * If another power sequencer was being used on this
1186		 * port previously make sure to turn off vdd there while
1187		 * we still have control of it.
1188		 */
1189		vlv_detach_power_sequencer(intel_dp);
1190	}
1191
1192	/*
1193	 * We may be stealing the power
1194	 * sequencer from another port.
1195	 */
1196	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1197
1198	intel_dp->pps.active_pipe = crtc->pipe;
1199
1200	if (!intel_dp_is_edp(intel_dp))
1201		return;
1202
1203	/* now it's all ours */
1204	intel_dp->pps.pps_pipe = crtc->pipe;
1205
1206	drm_dbg_kms(&dev_priv->drm,
1207		    "initializing %s for [ENCODER:%d:%s]\n",
1208		    pps_name(dev_priv, &intel_dp->pps),
1209		    encoder->base.base.id, encoder->base.name);
1210
1211	/* init power sequencer on this pipe and port */
1212	pps_init_delays(intel_dp);
1213	pps_init_registers(intel_dp, true);
1214}
1215
1216static void pps_vdd_init(struct intel_dp *intel_dp)
1217{
1218	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1219	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1220
1221	lockdep_assert_held(&dev_priv->display.pps.mutex);
1222
1223	if (!edp_have_panel_vdd(intel_dp))
1224		return;
1225
1226	/*
1227	 * The VDD bit needs a power domain reference, so if the bit is
1228	 * already enabled when we boot or resume, grab this reference and
1229	 * schedule a vdd off, so we don't hold on to the reference
1230	 * indefinitely.
1231	 */
1232	drm_dbg_kms(&dev_priv->drm,
1233		    "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
1234		    dig_port->base.base.base.id, dig_port->base.base.name,
1235		    pps_name(dev_priv, &intel_dp->pps));
1236	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1237	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1238							    intel_aux_power_domain(dig_port));
 
 
1239}
1240
1241bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1242{
1243	intel_wakeref_t wakeref;
1244	bool have_power = false;
1245
1246	with_intel_pps_lock(intel_dp, wakeref) {
1247		have_power = edp_have_panel_power(intel_dp) ||
1248			     edp_have_panel_vdd(intel_dp);
1249	}
1250
1251	return have_power;
1252}
1253
1254static void pps_init_timestamps(struct intel_dp *intel_dp)
1255{
1256	/*
1257	 * Initialize panel power off time to 0, assuming panel power could have
1258	 * been toggled between kernel boot and now only by a previously loaded
1259	 * and removed i915, which has already ensured sufficient power off
1260	 * delay at module remove.
1261	 */
1262	intel_dp->pps.panel_power_off_time = 0;
1263	intel_dp->pps.last_power_on = jiffies;
1264	intel_dp->pps.last_backlight_off = jiffies;
1265}
1266
1267static void
1268intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1269{
1270	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1271	u32 pp_on, pp_off, pp_ctl;
1272	struct pps_registers regs;
1273
1274	intel_pps_get_registers(intel_dp, &regs);
1275
1276	pp_ctl = ilk_get_pp_control(intel_dp);
1277
1278	/* Ensure PPS is unlocked */
1279	if (!HAS_DDI(dev_priv))
1280		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1281
1282	pp_on = intel_de_read(dev_priv, regs.pp_on);
1283	pp_off = intel_de_read(dev_priv, regs.pp_off);
1284
1285	/* Pull timing values out of registers */
1286	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1287	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1288	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1289	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1290
1291	if (i915_mmio_reg_valid(regs.pp_div)) {
1292		u32 pp_div;
1293
1294		pp_div = intel_de_read(dev_priv, regs.pp_div);
1295
1296		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1297	} else {
1298		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1299	}
1300}
1301
1302static void
1303intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1304		     const struct edp_power_seq *seq)
1305{
1306	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1307
1308	drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1309		    state_name,
1310		    seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1311}
1312
1313static void
1314intel_pps_verify_state(struct intel_dp *intel_dp)
1315{
1316	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1317	struct edp_power_seq hw;
1318	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1319
1320	intel_pps_readout_hw_state(intel_dp, &hw);
1321
1322	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1323	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1324		drm_err(&i915->drm, "PPS state mismatch\n");
1325		intel_pps_dump_state(intel_dp, "sw", sw);
1326		intel_pps_dump_state(intel_dp, "hw", &hw);
1327	}
1328}
1329
1330static bool pps_delays_valid(struct edp_power_seq *delays)
1331{
1332	return delays->t1_t3 || delays->t8 || delays->t9 ||
1333		delays->t10 || delays->t11_t12;
1334}
1335
1336static void pps_init_delays_bios(struct intel_dp *intel_dp,
1337				 struct edp_power_seq *bios)
1338{
1339	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
1340
1341	lockdep_assert_held(&dev_priv->display.pps.mutex);
1342
1343	if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1344		intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1345
1346	*bios = intel_dp->pps.bios_pps_delays;
1347
1348	intel_pps_dump_state(intel_dp, "bios", bios);
1349}
1350
1351static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1352				struct edp_power_seq *vbt)
1353{
1354	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1355	struct intel_connector *connector = intel_dp->attached_connector;
1356
1357	*vbt = connector->panel.vbt.edp.pps;
1358
1359	if (!pps_delays_valid(vbt))
1360		return;
1361
 
1362	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1363	 * of 500ms appears to be too short. Ocassionally the panel
1364	 * just fails to power back on. Increasing the delay to 800ms
1365	 * seems sufficient to avoid this problem.
1366	 */
1367	if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
1368		vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
1369		drm_dbg_kms(&dev_priv->drm,
1370			    "Increasing T12 panel delay as per the quirk to %d\n",
1371			    vbt->t11_t12);
1372	}
1373
1374	/* T11_T12 delay is special and actually in units of 100ms, but zero
1375	 * based in the hw (so we need to add 100 ms). But the sw vbt
1376	 * table multiplies it with 1000 to make it in units of 100usec,
1377	 * too. */
1378	vbt->t11_t12 += 100 * 10;
1379
1380	intel_pps_dump_state(intel_dp, "vbt", vbt);
1381}
1382
1383static void pps_init_delays_spec(struct intel_dp *intel_dp,
1384				 struct edp_power_seq *spec)
1385{
1386	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1387
1388	lockdep_assert_held(&dev_priv->display.pps.mutex);
1389
1390	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1391	 * our hw here, which are all in 100usec. */
1392	spec->t1_t3 = 210 * 10;
1393	spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
1394	spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1395	spec->t10 = 500 * 10;
1396	/* This one is special and actually in units of 100ms, but zero
1397	 * based in the hw (so we need to add 100 ms). But the sw vbt
1398	 * table multiplies it with 1000 to make it in units of 100usec,
1399	 * too. */
1400	spec->t11_t12 = (510 + 100) * 10;
1401
1402	intel_pps_dump_state(intel_dp, "spec", spec);
1403}
1404
1405static void pps_init_delays(struct intel_dp *intel_dp)
1406{
1407	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1408	struct edp_power_seq cur, vbt, spec,
1409		*final = &intel_dp->pps.pps_delays;
1410
1411	lockdep_assert_held(&dev_priv->display.pps.mutex);
1412
1413	/* already initialized? */
1414	if (pps_delays_valid(final))
1415		return;
1416
1417	pps_init_delays_bios(intel_dp, &cur);
1418	pps_init_delays_vbt(intel_dp, &vbt);
1419	pps_init_delays_spec(intel_dp, &spec);
1420
1421	/* Use the max of the register settings and vbt. If both are
1422	 * unset, fall back to the spec limits. */
1423#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1424				       spec.field : \
1425				       max(cur.field, vbt.field))
1426	assign_final(t1_t3);
1427	assign_final(t8);
1428	assign_final(t9);
1429	assign_final(t10);
1430	assign_final(t11_t12);
1431#undef assign_final
1432
1433#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1434	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1435	intel_dp->pps.backlight_on_delay = get_delay(t8);
1436	intel_dp->pps.backlight_off_delay = get_delay(t9);
1437	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1438	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1439#undef get_delay
1440
1441	drm_dbg_kms(&dev_priv->drm,
1442		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1443		    intel_dp->pps.panel_power_up_delay,
1444		    intel_dp->pps.panel_power_down_delay,
1445		    intel_dp->pps.panel_power_cycle_delay);
1446
1447	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1448		    intel_dp->pps.backlight_on_delay,
1449		    intel_dp->pps.backlight_off_delay);
1450
1451	/*
1452	 * We override the HW backlight delays to 1 because we do manual waits
1453	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1454	 * don't do this, we'll end up waiting for the backlight off delay
1455	 * twice: once when we do the manual sleep, and once when we disable
1456	 * the panel and wait for the PP_STATUS bit to become zero.
1457	 */
1458	final->t8 = 1;
1459	final->t9 = 1;
1460
1461	/*
1462	 * HW has only a 100msec granularity for t11_t12 so round it up
1463	 * accordingly.
1464	 */
1465	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1466}
1467
1468static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1469{
1470	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1471	u32 pp_on, pp_off, port_sel = 0;
1472	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1473	struct pps_registers regs;
1474	enum port port = dp_to_dig_port(intel_dp)->base.port;
1475	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1476
1477	lockdep_assert_held(&dev_priv->display.pps.mutex);
1478
1479	intel_pps_get_registers(intel_dp, &regs);
1480
1481	/*
1482	 * On some VLV machines the BIOS can leave the VDD
1483	 * enabled even on power sequencers which aren't
1484	 * hooked up to any port. This would mess up the
1485	 * power domain tracking the first time we pick
1486	 * one of these power sequencers for use since
1487	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1488	 * already on and therefore wouldn't grab the power
1489	 * domain reference. Disable VDD first to avoid this.
1490	 * This also avoids spuriously turning the VDD on as
1491	 * soon as the new power sequencer gets initialized.
1492	 */
1493	if (force_disable_vdd) {
1494		u32 pp = ilk_get_pp_control(intel_dp);
1495
1496		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1497			 "Panel power already on\n");
1498
1499		if (pp & EDP_FORCE_VDD)
1500			drm_dbg_kms(&dev_priv->drm,
1501				    "VDD already on, disabling first\n");
1502
1503		pp &= ~EDP_FORCE_VDD;
1504
1505		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1506	}
1507
1508	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1509		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1510	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1511		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1512
1513	/* Haswell doesn't have any port selection bits for the panel
1514	 * power sequencer any more. */
1515	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1516		port_sel = PANEL_PORT_SELECT_VLV(port);
1517	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1518		switch (port) {
1519		case PORT_A:
1520			port_sel = PANEL_PORT_SELECT_DPA;
1521			break;
1522		case PORT_C:
1523			port_sel = PANEL_PORT_SELECT_DPC;
1524			break;
1525		case PORT_D:
1526			port_sel = PANEL_PORT_SELECT_DPD;
1527			break;
1528		default:
1529			MISSING_CASE(port);
1530			break;
1531		}
1532	}
1533
1534	pp_on |= port_sel;
1535
1536	intel_de_write(dev_priv, regs.pp_on, pp_on);
1537	intel_de_write(dev_priv, regs.pp_off, pp_off);
1538
1539	/*
1540	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1541	 */
1542	if (i915_mmio_reg_valid(regs.pp_div))
1543		intel_de_write(dev_priv, regs.pp_div,
1544			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1545	else
1546		intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
1547			     REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
1548					    DIV_ROUND_UP(seq->t11_t12, 1000)));
 
 
 
 
1549
1550	drm_dbg_kms(&dev_priv->drm,
1551		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1552		    intel_de_read(dev_priv, regs.pp_on),
1553		    intel_de_read(dev_priv, regs.pp_off),
1554		    i915_mmio_reg_valid(regs.pp_div) ?
1555		    intel_de_read(dev_priv, regs.pp_div) :
1556		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1557}
1558
1559void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1560{
1561	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1562	intel_wakeref_t wakeref;
1563
1564	if (!intel_dp_is_edp(intel_dp))
1565		return;
1566
1567	with_intel_pps_lock(intel_dp, wakeref) {
1568		/*
1569		 * Reinit the power sequencer also on the resume path, in case
1570		 * BIOS did something nasty with it.
1571		 */
1572		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1573			vlv_initial_power_sequencer_setup(intel_dp);
1574
1575		pps_init_delays(intel_dp);
1576		pps_init_registers(intel_dp, false);
1577		pps_vdd_init(intel_dp);
1578
1579		if (edp_have_panel_vdd(intel_dp))
1580			edp_panel_vdd_schedule_off(intel_dp);
1581	}
1582}
1583
1584bool intel_pps_init(struct intel_dp *intel_dp)
1585{
1586	intel_wakeref_t wakeref;
1587	bool ret;
1588
1589	intel_dp->pps.initializing = true;
1590	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1591
1592	pps_init_timestamps(intel_dp);
1593
1594	with_intel_pps_lock(intel_dp, wakeref) {
1595		ret = pps_initial_setup(intel_dp);
1596
1597		pps_init_delays(intel_dp);
1598		pps_init_registers(intel_dp, false);
1599		pps_vdd_init(intel_dp);
1600	}
1601
1602	return ret;
1603}
1604
1605static void pps_init_late(struct intel_dp *intel_dp)
1606{
1607	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1608	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1609	struct intel_connector *connector = intel_dp->attached_connector;
1610
1611	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1612		return;
1613
1614	if (intel_num_pps(i915) < 2)
1615		return;
1616
1617	drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
1618		 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
1619		 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
1620		 encoder->base.base.id, encoder->base.name,
1621		 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
1622
1623	if (connector->panel.vbt.backlight.controller >= 0)
1624		intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
1625}
1626
1627void intel_pps_init_late(struct intel_dp *intel_dp)
1628{
1629	intel_wakeref_t wakeref;
1630
1631	with_intel_pps_lock(intel_dp, wakeref) {
1632		/* Reinit delays after per-panel info has been parsed from VBT */
1633		pps_init_late(intel_dp);
1634
1635		memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1636		pps_init_delays(intel_dp);
1637		pps_init_registers(intel_dp, false);
1638
1639		intel_dp->pps.initializing = false;
1640
1641		if (edp_have_panel_vdd(intel_dp))
1642			edp_panel_vdd_schedule_off(intel_dp);
1643	}
1644}
1645
1646void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1647{
1648	int pps_num;
1649	int pps_idx;
1650
1651	if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1652		return;
1653	/*
1654	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1655	 * everywhere where registers can be write protected.
1656	 */
1657	pps_num = intel_num_pps(dev_priv);
 
 
 
 
 
 
1658
1659	for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
1660		intel_de_rmw(dev_priv, PP_CONTROL(pps_idx),
1661			     PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
1662}
1663
1664void intel_pps_setup(struct drm_i915_private *i915)
1665{
1666	if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1667		i915->display.pps.mmio_base = PCH_PPS_BASE;
1668	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1669		i915->display.pps.mmio_base = VLV_PPS_BASE;
1670	else
1671		i915->display.pps.mmio_base = PPS_BASE;
1672}
1673
1674void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1675{
1676	i915_reg_t pp_reg;
1677	u32 val;
1678	enum pipe panel_pipe = INVALID_PIPE;
1679	bool locked = true;
1680
1681	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1682		return;
1683
1684	if (HAS_PCH_SPLIT(dev_priv)) {
1685		u32 port_sel;
1686
1687		pp_reg = PP_CONTROL(0);
1688		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1689
1690		switch (port_sel) {
1691		case PANEL_PORT_SELECT_LVDS:
1692			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1693			break;
1694		case PANEL_PORT_SELECT_DPA:
1695			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1696			break;
1697		case PANEL_PORT_SELECT_DPC:
1698			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1699			break;
1700		case PANEL_PORT_SELECT_DPD:
1701			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1702			break;
1703		default:
1704			MISSING_CASE(port_sel);
1705			break;
1706		}
1707	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1708		/* presumably write lock depends on pipe, not port select */
1709		pp_reg = PP_CONTROL(pipe);
1710		panel_pipe = pipe;
1711	} else {
1712		u32 port_sel;
1713
1714		pp_reg = PP_CONTROL(0);
1715		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1716
1717		drm_WARN_ON(&dev_priv->drm,
1718			    port_sel != PANEL_PORT_SELECT_LVDS);
1719		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1720	}
1721
1722	val = intel_de_read(dev_priv, pp_reg);
1723	if (!(val & PANEL_POWER_ON) ||
1724	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1725		locked = false;
1726
1727	I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked,
1728			"panel assertion failure, pipe %c regs locked\n",
1729			pipe_name(pipe));
1730}