Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
 
 
   6#include "g4x_dp.h"
   7#include "i915_drv.h"
 
   8#include "intel_de.h"
 
   9#include "intel_display_types.h"
  10#include "intel_dp.h"
 
  11#include "intel_dpll.h"
 
 
  12#include "intel_pps.h"
 
 
  13
  14static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
  15				      enum pipe pipe);
  16
  17static void pps_init_delays(struct intel_dp *intel_dp);
  18static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
  19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  20intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
  21{
  22	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
  23	intel_wakeref_t wakeref;
  24
  25	/*
  26	 * See intel_pps_reset_all() why we need a power domain reference here.
  27	 */
  28	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
  29	mutex_lock(&dev_priv->pps_mutex);
  30
  31	return wakeref;
  32}
  33
  34intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
  35				 intel_wakeref_t wakeref)
  36{
  37	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
  38
  39	mutex_unlock(&dev_priv->pps_mutex);
  40	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  41
  42	return 0;
  43}
  44
  45static void
  46vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  47{
  48	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
  49	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  50	enum pipe pipe = intel_dp->pps.pps_pipe;
  51	bool pll_enabled, release_cl_override = false;
  52	enum dpio_phy phy = DPIO_PHY(pipe);
  53	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
  54	u32 DP;
  55
  56	if (drm_WARN(&dev_priv->drm,
  57		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
  58		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
  59		     pipe_name(pipe), dig_port->base.base.base.id,
  60		     dig_port->base.base.name))
  61		return;
  62
  63	drm_dbg_kms(&dev_priv->drm,
  64		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
  65		    pipe_name(pipe), dig_port->base.base.base.id,
  66		    dig_port->base.base.name);
  67
  68	/* Preserve the BIOS-computed detected bit. This is
  69	 * supposed to be read-only.
  70	 */
  71	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
  72	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  73	DP |= DP_PORT_WIDTH(1);
  74	DP |= DP_LINK_TRAIN_PAT_1;
  75
  76	if (IS_CHERRYVIEW(dev_priv))
  77		DP |= DP_PIPE_SEL_CHV(pipe);
  78	else
  79		DP |= DP_PIPE_SEL(pipe);
  80
  81	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
  82
  83	/*
  84	 * The DPLL for the pipe must be enabled for this to work.
  85	 * So enable temporarily it if it's not already enabled.
  86	 */
  87	if (!pll_enabled) {
  88		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
  89			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
  90
  91		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
  92			drm_err(&dev_priv->drm,
  93				"Failed to force on pll for pipe %c!\n",
  94				pipe_name(pipe));
  95			return;
  96		}
  97	}
  98
  99	/*
 100	 * Similar magic as in intel_dp_enable_port().
 101	 * We _must_ do this port enable + disable trick
 102	 * to make this power sequencer lock onto the port.
 103	 * Otherwise even VDD force bit won't work.
 104	 */
 105	intel_de_write(dev_priv, intel_dp->output_reg, DP);
 106	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 107
 108	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
 109	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 110
 111	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
 112	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 113
 114	if (!pll_enabled) {
 115		vlv_force_pll_off(dev_priv, pipe);
 116
 117		if (release_cl_override)
 118			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 119	}
 120}
 121
 122static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 123{
 124	struct intel_encoder *encoder;
 125	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 126
 127	/*
 128	 * We don't have power sequencer currently.
 129	 * Pick one that's not used by other ports.
 130	 */
 131	for_each_intel_dp(&dev_priv->drm, encoder) {
 132		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 133
 134		if (encoder->type == INTEL_OUTPUT_EDP) {
 135			drm_WARN_ON(&dev_priv->drm,
 136				    intel_dp->pps.active_pipe != INVALID_PIPE &&
 137				    intel_dp->pps.active_pipe !=
 138				    intel_dp->pps.pps_pipe);
 139
 140			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 141				pipes &= ~(1 << intel_dp->pps.pps_pipe);
 142		} else {
 143			drm_WARN_ON(&dev_priv->drm,
 144				    intel_dp->pps.pps_pipe != INVALID_PIPE);
 145
 146			if (intel_dp->pps.active_pipe != INVALID_PIPE)
 147				pipes &= ~(1 << intel_dp->pps.active_pipe);
 148		}
 149	}
 150
 151	if (pipes == 0)
 152		return INVALID_PIPE;
 153
 154	return ffs(pipes) - 1;
 155}
 156
 157static enum pipe
 158vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 159{
 160	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 161	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 162	enum pipe pipe;
 163
 164	lockdep_assert_held(&dev_priv->pps_mutex);
 165
 166	/* We should never land here with regular DP ports */
 167	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 168
 169	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
 170		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
 171
 172	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 173		return intel_dp->pps.pps_pipe;
 174
 175	pipe = vlv_find_free_pps(dev_priv);
 176
 177	/*
 178	 * Didn't find one. This should not happen since there
 179	 * are two power sequencers and up to two eDP ports.
 180	 */
 181	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
 182		pipe = PIPE_A;
 183
 184	vlv_steal_power_sequencer(dev_priv, pipe);
 185	intel_dp->pps.pps_pipe = pipe;
 186
 187	drm_dbg_kms(&dev_priv->drm,
 188		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
 189		    pipe_name(intel_dp->pps.pps_pipe),
 190		    dig_port->base.base.base.id,
 191		    dig_port->base.base.name);
 192
 193	/* init power sequencer on this pipe and port */
 194	pps_init_delays(intel_dp);
 195	pps_init_registers(intel_dp, true);
 196
 197	/*
 198	 * Even vdd force doesn't work until we've made
 199	 * the power sequencer lock in on the port.
 200	 */
 201	vlv_power_sequencer_kick(intel_dp);
 202
 203	return intel_dp->pps.pps_pipe;
 204}
 205
 206static int
 207bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 208{
 209	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 210	int backlight_controller = dev_priv->vbt.backlight.controller;
 211
 212	lockdep_assert_held(&dev_priv->pps_mutex);
 213
 214	/* We should never land here with regular DP ports */
 215	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 216
 217	if (!intel_dp->pps.pps_reset)
 218		return backlight_controller;
 219
 220	intel_dp->pps.pps_reset = false;
 221
 222	/*
 223	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 224	 * has been setup during connector init.
 225	 */
 226	pps_init_registers(intel_dp, false);
 227
 228	return backlight_controller;
 229}
 230
 231typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 232			       enum pipe pipe);
 233
 234static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 235			       enum pipe pipe)
 236{
 237	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
 238}
 239
 240static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 241				enum pipe pipe)
 242{
 243	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 244}
 245
 246static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 247			 enum pipe pipe)
 248{
 249	return true;
 250}
 251
 252static enum pipe
 253vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 254		     enum port port,
 255		     vlv_pipe_check pipe_check)
 256{
 257	enum pipe pipe;
 258
 259	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 260		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
 
 261			PANEL_PORT_SELECT_MASK;
 262
 263		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 264			continue;
 265
 266		if (!pipe_check(dev_priv, pipe))
 267			continue;
 268
 269		return pipe;
 270	}
 271
 272	return INVALID_PIPE;
 273}
 274
 275static void
 276vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 277{
 278	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 279	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 280	enum port port = dig_port->base.port;
 281
 282	lockdep_assert_held(&dev_priv->pps_mutex);
 283
 284	/* try to find a pipe with this port selected */
 285	/* first pick one where the panel is on */
 286	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 287						      vlv_pipe_has_pp_on);
 288	/* didn't find one? pick one where vdd is on */
 289	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 290		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 291							      vlv_pipe_has_vdd_on);
 292	/* didn't find one? pick one with just the correct port */
 293	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 294		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 295							      vlv_pipe_any);
 296
 297	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 298	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
 299		drm_dbg_kms(&dev_priv->drm,
 300			    "no initial power sequencer for [ENCODER:%d:%s]\n",
 301			    dig_port->base.base.base.id,
 302			    dig_port->base.base.name);
 303		return;
 304	}
 305
 306	drm_dbg_kms(&dev_priv->drm,
 307		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
 308		    dig_port->base.base.base.id,
 309		    dig_port->base.base.name,
 310		    pipe_name(intel_dp->pps.pps_pipe));
 311}
 312
 313void intel_pps_reset_all(struct drm_i915_private *dev_priv)
 314{
 315	struct intel_encoder *encoder;
 316
 317	if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
 318		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319
 320	if (!HAS_DISPLAY(dev_priv))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321		return;
 322
 323	/*
 324	 * We can't grab pps_mutex here due to deadlock with power_domain
 325	 * mutex when power_domain functions are called while holding pps_mutex.
 326	 * That also means that in order to use pps_pipe the code needs to
 327	 * hold both a power domain reference and pps_mutex, and the power domain
 328	 * reference get/put must be done while _not_ holding pps_mutex.
 329	 * pps_{lock,unlock}() do these steps in the correct order, so one
 330	 * should use them always.
 331	 */
 332
 333	for_each_intel_dp(&dev_priv->drm, encoder) {
 334		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 335
 336		drm_WARN_ON(&dev_priv->drm,
 337			    intel_dp->pps.active_pipe != INVALID_PIPE);
 338
 339		if (encoder->type != INTEL_OUTPUT_EDP)
 340			continue;
 
 
 
 
 
 
 341
 342		if (DISPLAY_VER(dev_priv) >= 9)
 343			intel_dp->pps.pps_reset = true;
 344		else
 345			intel_dp->pps.pps_pipe = INVALID_PIPE;
 
 
 
 
 
 
 346	}
 347}
 348
 349struct pps_registers {
 350	i915_reg_t pp_ctrl;
 351	i915_reg_t pp_stat;
 352	i915_reg_t pp_on;
 353	i915_reg_t pp_off;
 354	i915_reg_t pp_div;
 355};
 356
 357static void intel_pps_get_registers(struct intel_dp *intel_dp,
 358				    struct pps_registers *regs)
 359{
 360	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 361	int pps_idx = 0;
 
 362
 363	memset(regs, 0, sizeof(*regs));
 364
 365	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 366		pps_idx = bxt_power_sequencer_idx(intel_dp);
 367	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 368		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 
 
 
 
 369
 370	regs->pp_ctrl = PP_CONTROL(pps_idx);
 371	regs->pp_stat = PP_STATUS(pps_idx);
 372	regs->pp_on = PP_ON_DELAYS(pps_idx);
 373	regs->pp_off = PP_OFF_DELAYS(pps_idx);
 374
 375	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
 376	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
 377	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
 378		regs->pp_div = INVALID_MMIO_REG;
 379	else
 380		regs->pp_div = PP_DIVISOR(pps_idx);
 381}
 382
 383static i915_reg_t
 384_pp_ctrl_reg(struct intel_dp *intel_dp)
 385{
 386	struct pps_registers regs;
 387
 388	intel_pps_get_registers(intel_dp, &regs);
 389
 390	return regs.pp_ctrl;
 391}
 392
 393static i915_reg_t
 394_pp_stat_reg(struct intel_dp *intel_dp)
 395{
 396	struct pps_registers regs;
 397
 398	intel_pps_get_registers(intel_dp, &regs);
 399
 400	return regs.pp_stat;
 401}
 402
 403static bool edp_have_panel_power(struct intel_dp *intel_dp)
 404{
 405	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 406
 407	lockdep_assert_held(&dev_priv->pps_mutex);
 408
 409	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 410	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 411		return false;
 412
 413	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
 414}
 415
 416static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 417{
 418	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 419
 420	lockdep_assert_held(&dev_priv->pps_mutex);
 421
 422	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 423	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 424		return false;
 425
 426	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 427}
 428
 429void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
 430{
 431	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 432
 433	if (!intel_dp_is_edp(intel_dp))
 434		return;
 435
 436	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 437		drm_WARN(&dev_priv->drm, 1,
 438			 "eDP powered off while attempting aux channel communication.\n");
 439		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
 440			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
 441			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
 
 
 
 
 
 442	}
 443}
 444
 445#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 446#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 447
 448#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
 449#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
 450
 451#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 452#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 453
 454static void intel_pps_verify_state(struct intel_dp *intel_dp);
 455
 456static void wait_panel_status(struct intel_dp *intel_dp,
 457				       u32 mask,
 458				       u32 value)
 459{
 460	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 461	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 462
 463	lockdep_assert_held(&dev_priv->pps_mutex);
 464
 465	intel_pps_verify_state(intel_dp);
 466
 467	pp_stat_reg = _pp_stat_reg(intel_dp);
 468	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 469
 470	drm_dbg_kms(&dev_priv->drm,
 471		    "mask %08x value %08x status %08x control %08x\n",
 
 
 472		    mask, value,
 473		    intel_de_read(dev_priv, pp_stat_reg),
 474		    intel_de_read(dev_priv, pp_ctrl_reg));
 475
 476	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
 477				       mask, value, 5000))
 478		drm_err(&dev_priv->drm,
 479			"Panel status timeout: status %08x control %08x\n",
 480			intel_de_read(dev_priv, pp_stat_reg),
 481			intel_de_read(dev_priv, pp_ctrl_reg));
 
 482
 483	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
 484}
 485
 486static void wait_panel_on(struct intel_dp *intel_dp)
 487{
 488	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 489
 490	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
 
 
 
 491	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 492}
 493
 494static void wait_panel_off(struct intel_dp *intel_dp)
 495{
 496	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 497
 498	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
 
 
 
 499	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 500}
 501
 502static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 503{
 504	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 505	ktime_t panel_power_on_time;
 506	s64 panel_power_off_duration;
 507
 508	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
 
 
 
 509
 510	/* take the difference of currrent time and panel power off time
 511	 * and then make panel wait for t11_t12 if needed. */
 512	panel_power_on_time = ktime_get_boottime();
 513	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
 514
 515	/* When we disable the VDD override bit last we have to do the manual
 516	 * wait. */
 517	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
 518		wait_remaining_ms_from_jiffies(jiffies,
 519				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
 520
 521	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 522}
 523
 524void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
 525{
 526	intel_wakeref_t wakeref;
 527
 528	if (!intel_dp_is_edp(intel_dp))
 529		return;
 530
 531	with_intel_pps_lock(intel_dp, wakeref)
 532		wait_panel_power_cycle(intel_dp);
 533}
 534
 535static void wait_backlight_on(struct intel_dp *intel_dp)
 536{
 537	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
 538				       intel_dp->pps.backlight_on_delay);
 539}
 540
 541static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 542{
 543	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
 544				       intel_dp->pps.backlight_off_delay);
 545}
 546
 547/* Read the current pp_control value, unlocking the register if it
 548 * is locked
 549 */
 550
 551static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 552{
 553	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 554	u32 control;
 555
 556	lockdep_assert_held(&dev_priv->pps_mutex);
 557
 558	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
 559	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
 560			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 561		control &= ~PANEL_UNLOCK_MASK;
 562		control |= PANEL_UNLOCK_REGS;
 563	}
 564	return control;
 565}
 566
 567/*
 568 * Must be paired with intel_pps_vdd_off_unlocked().
 569 * Must hold pps_mutex around the whole on/off sequence.
 570 * Can be nested with intel_pps_vdd_{on,off}() calls.
 571 */
 572bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
 573{
 574	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 575	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 576	u32 pp;
 577	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 578	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
 579
 580	lockdep_assert_held(&dev_priv->pps_mutex);
 581
 582	if (!intel_dp_is_edp(intel_dp))
 583		return false;
 584
 585	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
 586	intel_dp->pps.want_panel_vdd = true;
 587
 588	if (edp_have_panel_vdd(intel_dp))
 589		return need_to_disable;
 590
 591	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
 592	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
 593							    intel_aux_power_domain(dig_port));
 594
 595	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
 596		    dig_port->base.base.base.id,
 597		    dig_port->base.base.name);
 
 
 
 598
 599	if (!edp_have_panel_power(intel_dp))
 600		wait_panel_power_cycle(intel_dp);
 601
 602	pp = ilk_get_pp_control(intel_dp);
 603	pp |= EDP_FORCE_VDD;
 604
 605	pp_stat_reg = _pp_stat_reg(intel_dp);
 606	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 607
 608	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 609	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 610	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 611		    intel_de_read(dev_priv, pp_stat_reg),
 612		    intel_de_read(dev_priv, pp_ctrl_reg));
 613	/*
 614	 * If the panel wasn't on, delay before accessing aux channel
 615	 */
 616	if (!edp_have_panel_power(intel_dp)) {
 617		drm_dbg_kms(&dev_priv->drm,
 618			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
 619			    dig_port->base.base.base.id,
 620			    dig_port->base.base.name);
 621		msleep(intel_dp->pps.panel_power_up_delay);
 622	}
 623
 624	return need_to_disable;
 625}
 626
 627/*
 628 * Must be paired with intel_pps_off().
 
 629 * Nested calls to these functions are not allowed since
 630 * we drop the lock. Caller must use some higher level
 631 * locking to prevent nested calls from other threads.
 632 */
 633void intel_pps_vdd_on(struct intel_dp *intel_dp)
 634{
 
 635	intel_wakeref_t wakeref;
 636	bool vdd;
 637
 638	if (!intel_dp_is_edp(intel_dp))
 639		return;
 640
 641	vdd = false;
 642	with_intel_pps_lock(intel_dp, wakeref)
 643		vdd = intel_pps_vdd_on_unlocked(intel_dp);
 644	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
 645			dp_to_dig_port(intel_dp)->base.base.base.id,
 646			dp_to_dig_port(intel_dp)->base.base.name);
 
 647}
 648
 649static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
 650{
 651	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 652	struct intel_digital_port *dig_port =
 653		dp_to_dig_port(intel_dp);
 654	u32 pp;
 655	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 656
 657	lockdep_assert_held(&dev_priv->pps_mutex);
 658
 659	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
 660
 661	if (!edp_have_panel_vdd(intel_dp))
 662		return;
 663
 664	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
 665		    dig_port->base.base.base.id,
 666		    dig_port->base.base.name);
 667
 668	pp = ilk_get_pp_control(intel_dp);
 669	pp &= ~EDP_FORCE_VDD;
 670
 671	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 672	pp_stat_reg = _pp_stat_reg(intel_dp);
 673
 674	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 675	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 676
 677	/* Make sure sequencer is idle before allowing subsequent activity */
 678	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 679		    intel_de_read(dev_priv, pp_stat_reg),
 680		    intel_de_read(dev_priv, pp_ctrl_reg));
 
 
 
 681
 682	if ((pp & PANEL_POWER_ON) == 0)
 683		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 
 
 684
 685	intel_display_power_put(dev_priv,
 686				intel_aux_power_domain(dig_port),
 687				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 688}
 689
 690void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
 691{
 692	intel_wakeref_t wakeref;
 693
 694	if (!intel_dp_is_edp(intel_dp))
 695		return;
 696
 697	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
 698	/*
 699	 * vdd might still be enabled due to the delayed vdd off.
 700	 * Make sure vdd is actually turned off here.
 701	 */
 702	with_intel_pps_lock(intel_dp, wakeref)
 703		intel_pps_vdd_off_sync_unlocked(intel_dp);
 704}
 705
 706static void edp_panel_vdd_work(struct work_struct *__work)
 707{
 708	struct intel_pps *pps = container_of(to_delayed_work(__work),
 709					     struct intel_pps, panel_vdd_work);
 710	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
 711	intel_wakeref_t wakeref;
 712
 713	with_intel_pps_lock(intel_dp, wakeref) {
 714		if (!intel_dp->pps.want_panel_vdd)
 715			intel_pps_vdd_off_sync_unlocked(intel_dp);
 716	}
 717}
 718
 719static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
 720{
 
 
 721	unsigned long delay;
 722
 723	/*
 
 
 
 
 
 
 
 724	 * Queue the timer to fire a long time from now (relative to the power
 725	 * down delay) to keep the panel power up across a sequence of
 726	 * operations.
 727	 */
 728	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
 729	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
 
 730}
 731
 732/*
 733 * Must be paired with edp_panel_vdd_on().
 734 * Must hold pps_mutex around the whole on/off sequence.
 735 * Can be nested with intel_pps_vdd_{on,off}() calls.
 736 */
 737void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
 738{
 739	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 740
 741	lockdep_assert_held(&dev_priv->pps_mutex);
 742
 743	if (!intel_dp_is_edp(intel_dp))
 744		return;
 745
 746	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
 747			dp_to_dig_port(intel_dp)->base.base.base.id,
 748			dp_to_dig_port(intel_dp)->base.base.name);
 
 
 749
 750	intel_dp->pps.want_panel_vdd = false;
 751
 752	if (sync)
 753		intel_pps_vdd_off_sync_unlocked(intel_dp);
 754	else
 755		edp_panel_vdd_schedule_off(intel_dp);
 756}
 757
 
 
 
 
 
 
 
 
 
 
 
 758void intel_pps_on_unlocked(struct intel_dp *intel_dp)
 759{
 760	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 761	u32 pp;
 762	i915_reg_t pp_ctrl_reg;
 763
 764	lockdep_assert_held(&dev_priv->pps_mutex);
 765
 766	if (!intel_dp_is_edp(intel_dp))
 767		return;
 768
 769	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
 770		    dp_to_dig_port(intel_dp)->base.base.base.id,
 771		    dp_to_dig_port(intel_dp)->base.base.name);
 
 772
 773	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
 774		     "[ENCODER:%d:%s] panel power already on\n",
 775		     dp_to_dig_port(intel_dp)->base.base.base.id,
 776		     dp_to_dig_port(intel_dp)->base.base.name))
 
 777		return;
 778
 779	wait_panel_power_cycle(intel_dp);
 780
 781	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 782	pp = ilk_get_pp_control(intel_dp);
 783	if (IS_IRONLAKE(dev_priv)) {
 784		/* ILK workaround: disable reset around power sequence */
 785		pp &= ~PANEL_POWER_RESET;
 786		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 787		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 788	}
 789
 
 
 
 
 
 
 
 
 790	pp |= PANEL_POWER_ON;
 791	if (!IS_IRONLAKE(dev_priv))
 792		pp |= PANEL_POWER_RESET;
 793
 794	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 795	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 796
 797	wait_panel_on(intel_dp);
 798	intel_dp->pps.last_power_on = jiffies;
 799
 800	if (IS_IRONLAKE(dev_priv)) {
 
 
 
 
 801		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 802		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 803		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 804	}
 805}
 806
 807void intel_pps_on(struct intel_dp *intel_dp)
 808{
 809	intel_wakeref_t wakeref;
 810
 811	if (!intel_dp_is_edp(intel_dp))
 812		return;
 813
 814	with_intel_pps_lock(intel_dp, wakeref)
 815		intel_pps_on_unlocked(intel_dp);
 816}
 817
 818void intel_pps_off_unlocked(struct intel_dp *intel_dp)
 819{
 820	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 821	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 822	u32 pp;
 823	i915_reg_t pp_ctrl_reg;
 824
 825	lockdep_assert_held(&dev_priv->pps_mutex);
 826
 827	if (!intel_dp_is_edp(intel_dp))
 828		return;
 829
 830	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
 831		    dig_port->base.base.base.id, dig_port->base.base.name);
 832
 833	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
 834		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
 835		 dig_port->base.base.base.id, dig_port->base.base.name);
 
 
 836
 837	pp = ilk_get_pp_control(intel_dp);
 838	/* We need to switch off panel power _and_ force vdd, for otherwise some
 839	 * panels get very unhappy and cease to work. */
 840	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
 841		EDP_BLC_ENABLE);
 842
 843	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 844
 845	intel_dp->pps.want_panel_vdd = false;
 846
 847	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 848	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 849
 850	wait_panel_off(intel_dp);
 851	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 852
 
 
 853	/* We got a reference when we enabled the VDD. */
 854	intel_display_power_put(dev_priv,
 855				intel_aux_power_domain(dig_port),
 856				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 857}
 858
 859void intel_pps_off(struct intel_dp *intel_dp)
 860{
 861	intel_wakeref_t wakeref;
 862
 863	if (!intel_dp_is_edp(intel_dp))
 864		return;
 865
 866	with_intel_pps_lock(intel_dp, wakeref)
 867		intel_pps_off_unlocked(intel_dp);
 868}
 869
 870/* Enable backlight in the panel power control. */
 871void intel_pps_backlight_on(struct intel_dp *intel_dp)
 872{
 873	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 874	intel_wakeref_t wakeref;
 875
 876	/*
 877	 * If we enable the backlight right away following a panel power
 878	 * on, we may see slight flicker as the panel syncs with the eDP
 879	 * link.  So delay a bit to make sure the image is solid before
 880	 * allowing it to appear.
 881	 */
 882	wait_backlight_on(intel_dp);
 883
 884	with_intel_pps_lock(intel_dp, wakeref) {
 885		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 886		u32 pp;
 887
 888		pp = ilk_get_pp_control(intel_dp);
 889		pp |= EDP_BLC_ENABLE;
 890
 891		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 892		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 893	}
 894}
 895
 896/* Disable backlight in the panel power control. */
 897void intel_pps_backlight_off(struct intel_dp *intel_dp)
 898{
 899	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 900	intel_wakeref_t wakeref;
 901
 902	if (!intel_dp_is_edp(intel_dp))
 903		return;
 904
 905	with_intel_pps_lock(intel_dp, wakeref) {
 906		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 907		u32 pp;
 908
 909		pp = ilk_get_pp_control(intel_dp);
 910		pp &= ~EDP_BLC_ENABLE;
 911
 912		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 913		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 914	}
 915
 916	intel_dp->pps.last_backlight_off = jiffies;
 917	edp_wait_backlight_off(intel_dp);
 918}
 919
 920/*
 921 * Hook for controlling the panel power control backlight through the bl_power
 922 * sysfs attribute. Take care to handle multiple calls.
 923 */
 924void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
 925{
 926	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 927	struct intel_dp *intel_dp = intel_attached_dp(connector);
 928	intel_wakeref_t wakeref;
 929	bool is_enabled;
 930
 931	is_enabled = false;
 932	with_intel_pps_lock(intel_dp, wakeref)
 933		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
 934	if (is_enabled == enable)
 935		return;
 936
 937	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
 938		    enable ? "enable" : "disable");
 939
 940	if (enable)
 941		intel_pps_backlight_on(intel_dp);
 942	else
 943		intel_pps_backlight_off(intel_dp);
 944}
 945
 946static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 947{
 
 948	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 949	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 950	enum pipe pipe = intel_dp->pps.pps_pipe;
 951	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 952
 953	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
 954
 955	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 956		return;
 957
 958	intel_pps_vdd_off_sync_unlocked(intel_dp);
 959
 960	/*
 961	 * VLV seems to get confused when multiple power sequencers
 962	 * have the same port selected (even if only one has power/vdd
 963	 * enabled). The failure manifests as vlv_wait_port_ready() failing
 964	 * CHV on the other hand doesn't seem to mind having the same port
 965	 * selected in multiple power sequencers, but let's clear the
 966	 * port select always when logically disconnecting a power sequencer
 967	 * from a port.
 968	 */
 969	drm_dbg_kms(&dev_priv->drm,
 970		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
 971		    pipe_name(pipe), dig_port->base.base.base.id,
 972		    dig_port->base.base.name);
 973	intel_de_write(dev_priv, pp_on_reg, 0);
 974	intel_de_posting_read(dev_priv, pp_on_reg);
 975
 976	intel_dp->pps.pps_pipe = INVALID_PIPE;
 977}
 978
 979static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 980				      enum pipe pipe)
 981{
 982	struct intel_encoder *encoder;
 983
 984	lockdep_assert_held(&dev_priv->pps_mutex);
 985
 986	for_each_intel_dp(&dev_priv->drm, encoder) {
 987		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 988
 989		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
 990			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
 991			 pipe_name(pipe), encoder->base.base.id,
 992			 encoder->base.name);
 993
 994		if (intel_dp->pps.pps_pipe != pipe)
 995			continue;
 996
 997		drm_dbg_kms(&dev_priv->drm,
 998			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
 999			    pipe_name(pipe), encoder->base.base.id,
1000			    encoder->base.name);
1001
1002		/* make sure vdd is off before we steal it */
1003		vlv_detach_power_sequencer(intel_dp);
1004	}
1005}
1006
1007void vlv_pps_init(struct intel_encoder *encoder,
1008		  const struct intel_crtc_state *crtc_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009{
1010	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1011	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1012	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1013
1014	lockdep_assert_held(&dev_priv->pps_mutex);
1015
1016	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1017
1018	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1019	    intel_dp->pps.pps_pipe != crtc->pipe) {
1020		/*
1021		 * If another power sequencer was being used on this
1022		 * port previously make sure to turn off vdd there while
1023		 * we still have control of it.
1024		 */
1025		vlv_detach_power_sequencer(intel_dp);
1026	}
1027
1028	/*
1029	 * We may be stealing the power
1030	 * sequencer from another port.
1031	 */
1032	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1033
1034	intel_dp->pps.active_pipe = crtc->pipe;
1035
1036	if (!intel_dp_is_edp(intel_dp))
1037		return;
1038
1039	/* now it's all ours */
1040	intel_dp->pps.pps_pipe = crtc->pipe;
1041
1042	drm_dbg_kms(&dev_priv->drm,
1043		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1044		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1045		    encoder->base.name);
1046
1047	/* init power sequencer on this pipe and port */
1048	pps_init_delays(intel_dp);
1049	pps_init_registers(intel_dp, true);
1050}
1051
1052static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
 
 
1053{
1054	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
1055	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1056
1057	lockdep_assert_held(&dev_priv->pps_mutex);
1058
1059	if (!edp_have_panel_vdd(intel_dp))
1060		return;
1061
1062	/*
1063	 * The VDD bit needs a power domain reference, so if the bit is
1064	 * already enabled when we boot or resume, grab this reference and
1065	 * schedule a vdd off, so we don't hold on to the reference
1066	 * indefinitely.
1067	 */
1068	drm_dbg_kms(&dev_priv->drm,
1069		    "VDD left on by BIOS, adjusting state tracking\n");
1070	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
 
 
1071	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1072							    intel_aux_power_domain(dig_port));
1073
1074	edp_panel_vdd_schedule_off(intel_dp);
1075}
1076
1077bool intel_pps_have_power(struct intel_dp *intel_dp)
1078{
1079	intel_wakeref_t wakeref;
1080	bool have_power = false;
1081
1082	with_intel_pps_lock(intel_dp, wakeref) {
1083		have_power = edp_have_panel_power(intel_dp) &&
1084						  edp_have_panel_vdd(intel_dp);
1085	}
1086
1087	return have_power;
1088}
1089
1090static void pps_init_timestamps(struct intel_dp *intel_dp)
1091{
1092	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 
 
 
 
 
 
1093	intel_dp->pps.last_power_on = jiffies;
1094	intel_dp->pps.last_backlight_off = jiffies;
1095}
1096
1097static void
1098intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1099{
1100	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101	u32 pp_on, pp_off, pp_ctl;
1102	struct pps_registers regs;
1103
1104	intel_pps_get_registers(intel_dp, &regs);
1105
1106	pp_ctl = ilk_get_pp_control(intel_dp);
1107
1108	/* Ensure PPS is unlocked */
1109	if (!HAS_DDI(dev_priv))
1110		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1111
1112	pp_on = intel_de_read(dev_priv, regs.pp_on);
1113	pp_off = intel_de_read(dev_priv, regs.pp_off);
1114
1115	/* Pull timing values out of registers */
1116	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1117	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1118	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1119	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1120
1121	if (i915_mmio_reg_valid(regs.pp_div)) {
1122		u32 pp_div;
1123
1124		pp_div = intel_de_read(dev_priv, regs.pp_div);
1125
1126		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1127	} else {
1128		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1129	}
1130}
1131
1132static void
1133intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
 
1134{
1135	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1136		      state_name,
1137		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
 
 
 
1138}
1139
1140static void
1141intel_pps_verify_state(struct intel_dp *intel_dp)
1142{
 
1143	struct edp_power_seq hw;
1144	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1145
1146	intel_pps_readout_hw_state(intel_dp, &hw);
1147
1148	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1149	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1150		DRM_ERROR("PPS state mismatch\n");
1151		intel_pps_dump_state("sw", sw);
1152		intel_pps_dump_state("hw", &hw);
1153	}
1154}
1155
1156static void pps_init_delays(struct intel_dp *intel_dp)
1157{
1158	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1159	struct edp_power_seq cur, vbt, spec,
1160		*final = &intel_dp->pps.pps_delays;
1161
1162	lockdep_assert_held(&dev_priv->pps_mutex);
 
 
 
1163
1164	/* already initialized? */
1165	if (final->t11_t12 != 0)
1166		return;
 
 
 
1167
1168	intel_pps_readout_hw_state(intel_dp, &cur);
 
 
 
 
 
 
 
 
 
1169
1170	intel_pps_dump_state("cur", &cur);
 
1171
1172	vbt = dev_priv->vbt.edp.pps;
1173	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1174	 * of 500ms appears to be too short. Ocassionally the panel
1175	 * just fails to power back on. Increasing the delay to 800ms
1176	 * seems sufficient to avoid this problem.
1177	 */
1178	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1179		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
1180		drm_dbg_kms(&dev_priv->drm,
1181			    "Increasing T12 panel delay as per the quirk to %d\n",
1182			    vbt.t11_t12);
1183	}
 
1184	/* T11_T12 delay is special and actually in units of 100ms, but zero
1185	 * based in the hw (so we need to add 100 ms). But the sw vbt
1186	 * table multiplies it with 1000 to make it in units of 100usec,
1187	 * too. */
1188	vbt.t11_t12 += 100 * 10;
 
 
 
 
 
 
 
 
 
 
1189
1190	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1191	 * our hw here, which are all in 100usec. */
1192	spec.t1_t3 = 210 * 10;
1193	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
1194	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1195	spec.t10 = 500 * 10;
1196	/* This one is special and actually in units of 100ms, but zero
1197	 * based in the hw (so we need to add 100 ms). But the sw vbt
1198	 * table multiplies it with 1000 to make it in units of 100usec,
1199	 * too. */
1200	spec.t11_t12 = (510 + 100) * 10;
1201
1202	intel_pps_dump_state("vbt", &vbt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1203
1204	/* Use the max of the register settings and vbt. If both are
1205	 * unset, fall back to the spec limits. */
1206#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1207				       spec.field : \
1208				       max(cur.field, vbt.field))
1209	assign_final(t1_t3);
1210	assign_final(t8);
1211	assign_final(t9);
1212	assign_final(t10);
1213	assign_final(t11_t12);
1214#undef assign_final
1215
1216#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1217	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1218	intel_dp->pps.backlight_on_delay = get_delay(t8);
1219	intel_dp->pps.backlight_off_delay = get_delay(t9);
1220	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1221	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1222#undef get_delay
1223
1224	drm_dbg_kms(&dev_priv->drm,
1225		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1226		    intel_dp->pps.panel_power_up_delay,
1227		    intel_dp->pps.panel_power_down_delay,
1228		    intel_dp->pps.panel_power_cycle_delay);
1229
1230	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1231		    intel_dp->pps.backlight_on_delay,
1232		    intel_dp->pps.backlight_off_delay);
1233
1234	/*
1235	 * We override the HW backlight delays to 1 because we do manual waits
1236	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1237	 * don't do this, we'll end up waiting for the backlight off delay
1238	 * twice: once when we do the manual sleep, and once when we disable
1239	 * the panel and wait for the PP_STATUS bit to become zero.
1240	 */
1241	final->t8 = 1;
1242	final->t9 = 1;
1243
1244	/*
1245	 * HW has only a 100msec granularity for t11_t12 so round it up
1246	 * accordingly.
1247	 */
1248	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1249}
1250
1251static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1252{
1253	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1254	u32 pp_on, pp_off, port_sel = 0;
1255	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1256	struct pps_registers regs;
1257	enum port port = dp_to_dig_port(intel_dp)->base.port;
1258	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1259
1260	lockdep_assert_held(&dev_priv->pps_mutex);
1261
1262	intel_pps_get_registers(intel_dp, &regs);
1263
1264	/*
1265	 * On some VLV machines the BIOS can leave the VDD
1266	 * enabled even on power sequencers which aren't
1267	 * hooked up to any port. This would mess up the
1268	 * power domain tracking the first time we pick
1269	 * one of these power sequencers for use since
1270	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1271	 * already on and therefore wouldn't grab the power
1272	 * domain reference. Disable VDD first to avoid this.
1273	 * This also avoids spuriously turning the VDD on as
1274	 * soon as the new power sequencer gets initialized.
1275	 */
1276	if (force_disable_vdd) {
1277		u32 pp = ilk_get_pp_control(intel_dp);
1278
1279		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1280			 "Panel power already on\n");
1281
1282		if (pp & EDP_FORCE_VDD)
1283			drm_dbg_kms(&dev_priv->drm,
1284				    "VDD already on, disabling first\n");
1285
1286		pp &= ~EDP_FORCE_VDD;
1287
1288		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1289	}
1290
1291	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1292		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1293	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1294		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1295
1296	/* Haswell doesn't have any port selection bits for the panel
1297	 * power sequencer any more. */
1298	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1299		port_sel = PANEL_PORT_SELECT_VLV(port);
1300	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1301		switch (port) {
1302		case PORT_A:
1303			port_sel = PANEL_PORT_SELECT_DPA;
1304			break;
1305		case PORT_C:
1306			port_sel = PANEL_PORT_SELECT_DPC;
1307			break;
1308		case PORT_D:
1309			port_sel = PANEL_PORT_SELECT_DPD;
1310			break;
1311		default:
1312			MISSING_CASE(port);
1313			break;
1314		}
1315	}
1316
1317	pp_on |= port_sel;
1318
1319	intel_de_write(dev_priv, regs.pp_on, pp_on);
1320	intel_de_write(dev_priv, regs.pp_off, pp_off);
1321
1322	/*
1323	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1324	 */
1325	if (i915_mmio_reg_valid(regs.pp_div)) {
1326		intel_de_write(dev_priv, regs.pp_div,
1327			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1328	} else {
1329		u32 pp_ctl;
1330
1331		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1332		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1333		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1334		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1335	}
1336
1337	drm_dbg_kms(&dev_priv->drm,
1338		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1339		    intel_de_read(dev_priv, regs.pp_on),
1340		    intel_de_read(dev_priv, regs.pp_off),
1341		    i915_mmio_reg_valid(regs.pp_div) ?
1342		    intel_de_read(dev_priv, regs.pp_div) :
1343		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1344}
1345
1346void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1347{
1348	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1349	intel_wakeref_t wakeref;
1350
1351	if (!intel_dp_is_edp(intel_dp))
1352		return;
1353
1354	with_intel_pps_lock(intel_dp, wakeref) {
1355		/*
1356		 * Reinit the power sequencer also on the resume path, in case
1357		 * BIOS did something nasty with it.
1358		 */
1359		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1360			vlv_initial_power_sequencer_setup(intel_dp);
1361
1362		pps_init_delays(intel_dp);
1363		pps_init_registers(intel_dp, false);
 
1364
1365		intel_pps_vdd_sanitize(intel_dp);
 
1366	}
1367}
1368
1369void intel_pps_init(struct intel_dp *intel_dp)
1370{
 
 
 
 
1371	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1372
1373	pps_init_timestamps(intel_dp);
1374
1375	intel_pps_encoder_reset(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376}
1377
1378void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379{
1380	int pps_num;
1381	int pps_idx;
1382
1383	if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1384		return;
1385	/*
1386	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1387	 * everywhere where registers can be write protected.
1388	 */
1389	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1390		pps_num = 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
1391	else
1392		pps_num = 1;
 
1393
1394	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1395		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
 
 
1396
1397		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1398		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1399	}
 
 
 
 
 
 
 
 
 
 
1400}
 
1401
1402void intel_pps_setup(struct drm_i915_private *i915)
1403{
1404	if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1405		i915->pps_mmio_base = PCH_PPS_BASE;
1406	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1407		i915->pps_mmio_base = VLV_PPS_BASE;
1408	else
1409		i915->pps_mmio_base = PPS_BASE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410}
v6.13.7
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/debugfs.h>
   7
   8#include "g4x_dp.h"
   9#include "i915_drv.h"
  10#include "i915_reg.h"
  11#include "intel_de.h"
  12#include "intel_display_power_well.h"
  13#include "intel_display_types.h"
  14#include "intel_dp.h"
  15#include "intel_dpio_phy.h"
  16#include "intel_dpll.h"
  17#include "intel_lvds.h"
  18#include "intel_lvds_regs.h"
  19#include "intel_pps.h"
  20#include "intel_pps_regs.h"
  21#include "intel_quirks.h"
  22
  23static void vlv_steal_power_sequencer(struct intel_display *display,
  24				      enum pipe pipe);
  25
  26static void pps_init_delays(struct intel_dp *intel_dp);
  27static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
  28
  29static const char *pps_name(struct intel_dp *intel_dp)
  30{
  31	struct intel_display *display = to_intel_display(intel_dp);
  32	struct intel_pps *pps = &intel_dp->pps;
  33
  34	if (display->platform.valleyview || display->platform.cherryview) {
  35		switch (pps->vlv_pps_pipe) {
  36		case INVALID_PIPE:
  37			/*
  38			 * FIXME would be nice if we can guarantee
  39			 * to always have a valid PPS when calling this.
  40			 */
  41			return "PPS <none>";
  42		case PIPE_A:
  43			return "PPS A";
  44		case PIPE_B:
  45			return "PPS B";
  46		default:
  47			MISSING_CASE(pps->vlv_pps_pipe);
  48			break;
  49		}
  50	} else {
  51		switch (pps->pps_idx) {
  52		case 0:
  53			return "PPS 0";
  54		case 1:
  55			return "PPS 1";
  56		default:
  57			MISSING_CASE(pps->pps_idx);
  58			break;
  59		}
  60	}
  61
  62	return "PPS <invalid>";
  63}
  64
  65intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
  66{
  67	struct intel_display *display = to_intel_display(intel_dp);
  68	struct drm_i915_private *dev_priv = to_i915(display->drm);
  69	intel_wakeref_t wakeref;
  70
  71	/*
  72	 * See vlv_pps_reset_all() why we need a power domain reference here.
  73	 */
  74	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
  75	mutex_lock(&display->pps.mutex);
  76
  77	return wakeref;
  78}
  79
  80intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
  81				 intel_wakeref_t wakeref)
  82{
  83	struct intel_display *display = to_intel_display(intel_dp);
  84	struct drm_i915_private *dev_priv = to_i915(display->drm);
  85
  86	mutex_unlock(&display->pps.mutex);
  87	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  88
  89	return NULL;
  90}
  91
  92static void
  93vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  94{
  95	struct intel_display *display = to_intel_display(intel_dp);
  96	struct drm_i915_private *dev_priv = to_i915(display->drm);
  97	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  98	enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
  99	bool pll_enabled, release_cl_override = false;
 100	enum dpio_phy phy = vlv_pipe_to_phy(pipe);
 101	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 102	u32 DP;
 103
 104	if (drm_WARN(display->drm,
 105		     intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
 106		     "skipping %s kick due to [ENCODER:%d:%s] being active\n",
 107		     pps_name(intel_dp),
 108		     dig_port->base.base.base.id, dig_port->base.base.name))
 109		return;
 110
 111	drm_dbg_kms(display->drm,
 112		    "kicking %s for [ENCODER:%d:%s]\n",
 113		    pps_name(intel_dp),
 114		    dig_port->base.base.base.id, dig_port->base.base.name);
 115
 116	/* Preserve the BIOS-computed detected bit. This is
 117	 * supposed to be read-only.
 118	 */
 119	DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
 120	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 121	DP |= DP_PORT_WIDTH(1);
 122	DP |= DP_LINK_TRAIN_PAT_1;
 123
 124	if (display->platform.cherryview)
 125		DP |= DP_PIPE_SEL_CHV(pipe);
 126	else
 127		DP |= DP_PIPE_SEL(pipe);
 128
 129	pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
 130
 131	/*
 132	 * The DPLL for the pipe must be enabled for this to work.
 133	 * So enable temporarily it if it's not already enabled.
 134	 */
 135	if (!pll_enabled) {
 136		release_cl_override = display->platform.cherryview &&
 137			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 138
 139		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
 140			drm_err(display->drm,
 141				"Failed to force on PLL for pipe %c!\n",
 142				pipe_name(pipe));
 143			return;
 144		}
 145	}
 146
 147	/*
 148	 * Similar magic as in intel_dp_enable_port().
 149	 * We _must_ do this port enable + disable trick
 150	 * to make this power sequencer lock onto the port.
 151	 * Otherwise even VDD force bit won't work.
 152	 */
 153	intel_de_write(display, intel_dp->output_reg, DP);
 154	intel_de_posting_read(display, intel_dp->output_reg);
 155
 156	intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN);
 157	intel_de_posting_read(display, intel_dp->output_reg);
 158
 159	intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN);
 160	intel_de_posting_read(display, intel_dp->output_reg);
 161
 162	if (!pll_enabled) {
 163		vlv_force_pll_off(dev_priv, pipe);
 164
 165		if (release_cl_override)
 166			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 167	}
 168}
 169
 170static enum pipe vlv_find_free_pps(struct intel_display *display)
 171{
 172	struct intel_encoder *encoder;
 173	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 174
 175	/*
 176	 * We don't have power sequencer currently.
 177	 * Pick one that's not used by other ports.
 178	 */
 179	for_each_intel_dp(display->drm, encoder) {
 180		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 181
 182		if (encoder->type == INTEL_OUTPUT_EDP) {
 183			drm_WARN_ON(display->drm,
 184				    intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
 185				    intel_dp->pps.vlv_active_pipe !=
 186				    intel_dp->pps.vlv_pps_pipe);
 187
 188			if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
 189				pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe);
 190		} else {
 191			drm_WARN_ON(display->drm,
 192				    intel_dp->pps.vlv_pps_pipe != INVALID_PIPE);
 193
 194			if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE)
 195				pipes &= ~(1 << intel_dp->pps.vlv_active_pipe);
 196		}
 197	}
 198
 199	if (pipes == 0)
 200		return INVALID_PIPE;
 201
 202	return ffs(pipes) - 1;
 203}
 204
 205static enum pipe
 206vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 207{
 208	struct intel_display *display = to_intel_display(intel_dp);
 209	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 210	enum pipe pipe;
 211
 212	lockdep_assert_held(&display->pps.mutex);
 213
 214	/* We should never land here with regular DP ports */
 215	drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
 216
 217	drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
 218		    intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe);
 219
 220	if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
 221		return intel_dp->pps.vlv_pps_pipe;
 222
 223	pipe = vlv_find_free_pps(display);
 224
 225	/*
 226	 * Didn't find one. This should not happen since there
 227	 * are two power sequencers and up to two eDP ports.
 228	 */
 229	if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
 230		pipe = PIPE_A;
 231
 232	vlv_steal_power_sequencer(display, pipe);
 233	intel_dp->pps.vlv_pps_pipe = pipe;
 234
 235	drm_dbg_kms(display->drm,
 236		    "picked %s for [ENCODER:%d:%s]\n",
 237		    pps_name(intel_dp),
 238		    dig_port->base.base.base.id, dig_port->base.base.name);
 
 239
 240	/* init power sequencer on this pipe and port */
 241	pps_init_delays(intel_dp);
 242	pps_init_registers(intel_dp, true);
 243
 244	/*
 245	 * Even vdd force doesn't work until we've made
 246	 * the power sequencer lock in on the port.
 247	 */
 248	vlv_power_sequencer_kick(intel_dp);
 249
 250	return intel_dp->pps.vlv_pps_pipe;
 251}
 252
 253static int
 254bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 255{
 256	struct intel_display *display = to_intel_display(intel_dp);
 257	int pps_idx = intel_dp->pps.pps_idx;
 258
 259	lockdep_assert_held(&display->pps.mutex);
 260
 261	/* We should never land here with regular DP ports */
 262	drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
 263
 264	if (!intel_dp->pps.bxt_pps_reset)
 265		return pps_idx;
 266
 267	intel_dp->pps.bxt_pps_reset = false;
 268
 269	/*
 270	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 271	 * has been setup during connector init.
 272	 */
 273	pps_init_registers(intel_dp, false);
 274
 275	return pps_idx;
 276}
 277
 278typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
 
 279
 280static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
 
 281{
 282	return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
 283}
 284
 285static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
 
 286{
 287	return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
 288}
 289
 290static bool pps_any(struct intel_display *display, int pps_idx)
 
 291{
 292	return true;
 293}
 294
 295static enum pipe
 296vlv_initial_pps_pipe(struct intel_display *display,
 297		     enum port port, pps_check check)
 
 298{
 299	enum pipe pipe;
 300
 301	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 302		u32 port_sel = intel_de_read(display,
 303					     PP_ON_DELAYS(display, pipe)) &
 304			PANEL_PORT_SELECT_MASK;
 305
 306		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 307			continue;
 308
 309		if (!check(display, pipe))
 310			continue;
 311
 312		return pipe;
 313	}
 314
 315	return INVALID_PIPE;
 316}
 317
 318static void
 319vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 320{
 321	struct intel_display *display = to_intel_display(intel_dp);
 322	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 323	enum port port = dig_port->base.port;
 324
 325	lockdep_assert_held(&display->pps.mutex);
 326
 327	/* try to find a pipe with this port selected */
 328	/* first pick one where the panel is on */
 329	intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
 330							  pps_has_pp_on);
 331	/* didn't find one? pick one where vdd is on */
 332	if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
 333		intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
 334								  pps_has_vdd_on);
 335	/* didn't find one? pick one with just the correct port */
 336	if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
 337		intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
 338								  pps_any);
 339
 340	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 341	if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) {
 342		drm_dbg_kms(display->drm,
 343			    "[ENCODER:%d:%s] no initial power sequencer\n",
 344			    dig_port->base.base.base.id, dig_port->base.base.name);
 
 345		return;
 346	}
 347
 348	drm_dbg_kms(display->drm,
 349		    "[ENCODER:%d:%s] initial power sequencer: %s\n",
 350		    dig_port->base.base.base.id, dig_port->base.base.name,
 351		    pps_name(intel_dp));
 
 352}
 353
 354static int intel_num_pps(struct intel_display *display)
 355{
 356	struct drm_i915_private *i915 = to_i915(display->drm);
 357
 358	if (display->platform.valleyview || display->platform.cherryview)
 359		return 2;
 360
 361	if (display->platform.geminilake || display->platform.broxton)
 362		return 2;
 363
 364	if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
 365		return 2;
 366
 367	if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
 368		return 1;
 369
 370	if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
 371		return 2;
 372
 373	return 1;
 374}
 375
 376static bool intel_pps_is_valid(struct intel_dp *intel_dp)
 377{
 378	struct intel_display *display = to_intel_display(intel_dp);
 379	struct drm_i915_private *i915 = to_i915(display->drm);
 380
 381	if (intel_dp->pps.pps_idx == 1 &&
 382	    INTEL_PCH_TYPE(i915) >= PCH_ICP &&
 383	    INTEL_PCH_TYPE(i915) <= PCH_ADP)
 384		return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
 385
 386	return true;
 387}
 388
 389static int
 390bxt_initial_pps_idx(struct intel_display *display, pps_check check)
 391{
 392	int pps_idx, pps_num = intel_num_pps(display);
 393
 394	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
 395		if (check(display, pps_idx))
 396			return pps_idx;
 397	}
 398
 399	return -1;
 400}
 401
 402static bool
 403pps_initial_setup(struct intel_dp *intel_dp)
 404{
 405	struct intel_display *display = to_intel_display(intel_dp);
 406	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 407	struct intel_connector *connector = intel_dp->attached_connector;
 408
 409	lockdep_assert_held(&display->pps.mutex);
 410
 411	if (display->platform.valleyview || display->platform.cherryview) {
 412		vlv_initial_power_sequencer_setup(intel_dp);
 413		return true;
 414	}
 415
 416	/* first ask the VBT */
 417	if (intel_num_pps(display) > 1)
 418		intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
 419	else
 420		intel_dp->pps.pps_idx = 0;
 421
 422	if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
 423		intel_dp->pps.pps_idx = -1;
 424
 425	/* VBT wasn't parsed yet? pick one where the panel is on */
 426	if (intel_dp->pps.pps_idx < 0)
 427		intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on);
 428	/* didn't find one? pick one where vdd is on */
 429	if (intel_dp->pps.pps_idx < 0)
 430		intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on);
 431	/* didn't find one? pick any */
 432	if (intel_dp->pps.pps_idx < 0) {
 433		intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any);
 434
 435		drm_dbg_kms(display->drm,
 436			    "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
 437			    encoder->base.base.id, encoder->base.name,
 438			    pps_name(intel_dp));
 439	} else {
 440		drm_dbg_kms(display->drm,
 441			    "[ENCODER:%d:%s] initial power sequencer: %s\n",
 442			    encoder->base.base.id, encoder->base.name,
 443			    pps_name(intel_dp));
 444	}
 445
 446	return intel_pps_is_valid(intel_dp);
 447}
 448
 449void vlv_pps_reset_all(struct intel_display *display)
 450{
 451	struct intel_encoder *encoder;
 452
 453	if (!HAS_DISPLAY(display))
 454		return;
 455
 456	/*
 457	 * We can't grab pps_mutex here due to deadlock with power_domain
 458	 * mutex when power_domain functions are called while holding pps_mutex.
 459	 * That also means that in order to use vlv_pps_pipe the code needs to
 460	 * hold both a power domain reference and pps_mutex, and the power domain
 461	 * reference get/put must be done while _not_ holding pps_mutex.
 462	 * pps_{lock,unlock}() do these steps in the correct order, so one
 463	 * should use them always.
 464	 */
 465
 466	for_each_intel_dp(display->drm, encoder) {
 467		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 468
 469		drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
 
 470
 471		if (encoder->type == INTEL_OUTPUT_EDP)
 472			intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
 473	}
 474}
 475
 476void bxt_pps_reset_all(struct intel_display *display)
 477{
 478	struct intel_encoder *encoder;
 479
 480	if (!HAS_DISPLAY(display))
 481		return;
 482
 483	/* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */
 484
 485	for_each_intel_dp(display->drm, encoder) {
 486		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 487
 488		if (encoder->type == INTEL_OUTPUT_EDP)
 489			intel_dp->pps.bxt_pps_reset = true;
 490	}
 491}
 492
 493struct pps_registers {
 494	i915_reg_t pp_ctrl;
 495	i915_reg_t pp_stat;
 496	i915_reg_t pp_on;
 497	i915_reg_t pp_off;
 498	i915_reg_t pp_div;
 499};
 500
 501static void intel_pps_get_registers(struct intel_dp *intel_dp,
 502				    struct pps_registers *regs)
 503{
 504	struct intel_display *display = to_intel_display(intel_dp);
 505	struct drm_i915_private *dev_priv = to_i915(display->drm);
 506	int pps_idx;
 507
 508	memset(regs, 0, sizeof(*regs));
 509
 510	if (display->platform.valleyview || display->platform.cherryview)
 
 
 511		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 512	else if (display->platform.geminilake || display->platform.broxton)
 513		pps_idx = bxt_power_sequencer_idx(intel_dp);
 514	else
 515		pps_idx = intel_dp->pps.pps_idx;
 516
 517	regs->pp_ctrl = PP_CONTROL(display, pps_idx);
 518	regs->pp_stat = PP_STATUS(display, pps_idx);
 519	regs->pp_on = PP_ON_DELAYS(display, pps_idx);
 520	regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
 521
 522	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
 523	if (display->platform.geminilake || display->platform.broxton ||
 524	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
 525		regs->pp_div = INVALID_MMIO_REG;
 526	else
 527		regs->pp_div = PP_DIVISOR(display, pps_idx);
 528}
 529
 530static i915_reg_t
 531_pp_ctrl_reg(struct intel_dp *intel_dp)
 532{
 533	struct pps_registers regs;
 534
 535	intel_pps_get_registers(intel_dp, &regs);
 536
 537	return regs.pp_ctrl;
 538}
 539
 540static i915_reg_t
 541_pp_stat_reg(struct intel_dp *intel_dp)
 542{
 543	struct pps_registers regs;
 544
 545	intel_pps_get_registers(intel_dp, &regs);
 546
 547	return regs.pp_stat;
 548}
 549
 550static bool edp_have_panel_power(struct intel_dp *intel_dp)
 551{
 552	struct intel_display *display = to_intel_display(intel_dp);
 553
 554	lockdep_assert_held(&display->pps.mutex);
 555
 556	if ((display->platform.valleyview || display->platform.cherryview) &&
 557	    intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
 558		return false;
 559
 560	return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
 561}
 562
 563static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 564{
 565	struct intel_display *display = to_intel_display(intel_dp);
 566
 567	lockdep_assert_held(&display->pps.mutex);
 568
 569	if ((display->platform.valleyview || display->platform.cherryview) &&
 570	    intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
 571		return false;
 572
 573	return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 574}
 575
 576void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
 577{
 578	struct intel_display *display = to_intel_display(intel_dp);
 579	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 580
 581	if (!intel_dp_is_edp(intel_dp))
 582		return;
 583
 584	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 585		drm_WARN(display->drm, 1,
 586			 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
 587			 dig_port->base.base.base.id, dig_port->base.base.name,
 588			 pps_name(intel_dp));
 589		drm_dbg_kms(display->drm,
 590			    "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 591			    dig_port->base.base.base.id, dig_port->base.base.name,
 592			    pps_name(intel_dp),
 593			    intel_de_read(display, _pp_stat_reg(intel_dp)),
 594			    intel_de_read(display, _pp_ctrl_reg(intel_dp)));
 595	}
 596}
 597
 598#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 599#define IDLE_ON_VALUE		(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 600
 601#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
 602#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
 603
 604#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 605#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 606
 607static void intel_pps_verify_state(struct intel_dp *intel_dp);
 608
 609static void wait_panel_status(struct intel_dp *intel_dp,
 610			      u32 mask, u32 value)
 
 611{
 612	struct intel_display *display = to_intel_display(intel_dp);
 613	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 614	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 615
 616	lockdep_assert_held(&display->pps.mutex);
 617
 618	intel_pps_verify_state(intel_dp);
 619
 620	pp_stat_reg = _pp_stat_reg(intel_dp);
 621	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 622
 623	drm_dbg_kms(display->drm,
 624		    "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 625		    dig_port->base.base.base.id, dig_port->base.base.name,
 626		    pps_name(intel_dp),
 627		    mask, value,
 628		    intel_de_read(display, pp_stat_reg),
 629		    intel_de_read(display, pp_ctrl_reg));
 630
 631	if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
 632		drm_err(display->drm,
 633			"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 634			dig_port->base.base.base.id, dig_port->base.base.name,
 635			pps_name(intel_dp),
 636			intel_de_read(display, pp_stat_reg),
 637			intel_de_read(display, pp_ctrl_reg));
 638
 639	drm_dbg_kms(display->drm, "Wait complete\n");
 640}
 641
 642static void wait_panel_on(struct intel_dp *intel_dp)
 643{
 644	struct intel_display *display = to_intel_display(intel_dp);
 645	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 646
 647	drm_dbg_kms(display->drm,
 648		    "[ENCODER:%d:%s] %s wait for panel power on\n",
 649		    dig_port->base.base.base.id, dig_port->base.base.name,
 650		    pps_name(intel_dp));
 651	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 652}
 653
 654static void wait_panel_off(struct intel_dp *intel_dp)
 655{
 656	struct intel_display *display = to_intel_display(intel_dp);
 657	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 658
 659	drm_dbg_kms(display->drm,
 660		    "[ENCODER:%d:%s] %s wait for panel power off time\n",
 661		    dig_port->base.base.base.id, dig_port->base.base.name,
 662		    pps_name(intel_dp));
 663	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 664}
 665
 666static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 667{
 668	struct intel_display *display = to_intel_display(intel_dp);
 669	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 670	ktime_t panel_power_on_time;
 671	s64 panel_power_off_duration;
 672
 673	drm_dbg_kms(display->drm,
 674		    "[ENCODER:%d:%s] %s wait for panel power cycle\n",
 675		    dig_port->base.base.base.id, dig_port->base.base.name,
 676		    pps_name(intel_dp));
 677
 678	/* take the difference of current time and panel power off time
 679	 * and then make panel wait for t11_t12 if needed. */
 680	panel_power_on_time = ktime_get_boottime();
 681	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
 682
 683	/* When we disable the VDD override bit last we have to do the manual
 684	 * wait. */
 685	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
 686		wait_remaining_ms_from_jiffies(jiffies,
 687				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
 688
 689	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 690}
 691
 692void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
 693{
 694	intel_wakeref_t wakeref;
 695
 696	if (!intel_dp_is_edp(intel_dp))
 697		return;
 698
 699	with_intel_pps_lock(intel_dp, wakeref)
 700		wait_panel_power_cycle(intel_dp);
 701}
 702
 703static void wait_backlight_on(struct intel_dp *intel_dp)
 704{
 705	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
 706				       intel_dp->pps.backlight_on_delay);
 707}
 708
 709static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 710{
 711	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
 712				       intel_dp->pps.backlight_off_delay);
 713}
 714
 715/* Read the current pp_control value, unlocking the register if it
 716 * is locked
 717 */
 718
 719static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 720{
 721	struct intel_display *display = to_intel_display(intel_dp);
 722	u32 control;
 723
 724	lockdep_assert_held(&display->pps.mutex);
 725
 726	control = intel_de_read(display, _pp_ctrl_reg(intel_dp));
 727	if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
 728			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 729		control &= ~PANEL_UNLOCK_MASK;
 730		control |= PANEL_UNLOCK_REGS;
 731	}
 732	return control;
 733}
 734
 735/*
 736 * Must be paired with intel_pps_vdd_off_unlocked().
 737 * Must hold pps_mutex around the whole on/off sequence.
 738 * Can be nested with intel_pps_vdd_{on,off}() calls.
 739 */
 740bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
 741{
 742	struct intel_display *display = to_intel_display(intel_dp);
 743	struct drm_i915_private *dev_priv = to_i915(display->drm);
 744	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 745	u32 pp;
 746	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 747	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
 748
 749	lockdep_assert_held(&display->pps.mutex);
 750
 751	if (!intel_dp_is_edp(intel_dp))
 752		return false;
 753
 754	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
 755	intel_dp->pps.want_panel_vdd = true;
 756
 757	if (edp_have_panel_vdd(intel_dp))
 758		return need_to_disable;
 759
 760	drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
 761	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
 762							    intel_aux_power_domain(dig_port));
 763
 764	pp_stat_reg = _pp_stat_reg(intel_dp);
 765	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 766
 767	drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
 768		    dig_port->base.base.base.id, dig_port->base.base.name,
 769		    pps_name(intel_dp));
 770
 771	if (!edp_have_panel_power(intel_dp))
 772		wait_panel_power_cycle(intel_dp);
 773
 774	pp = ilk_get_pp_control(intel_dp);
 775	pp |= EDP_FORCE_VDD;
 776
 777	intel_de_write(display, pp_ctrl_reg, pp);
 778	intel_de_posting_read(display, pp_ctrl_reg);
 779	drm_dbg_kms(display->drm,
 780		    "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 781		    dig_port->base.base.base.id, dig_port->base.base.name,
 782		    pps_name(intel_dp),
 783		    intel_de_read(display, pp_stat_reg),
 784		    intel_de_read(display, pp_ctrl_reg));
 785	/*
 786	 * If the panel wasn't on, delay before accessing aux channel
 787	 */
 788	if (!edp_have_panel_power(intel_dp)) {
 789		drm_dbg_kms(display->drm,
 790			    "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
 791			    dig_port->base.base.base.id, dig_port->base.base.name,
 792			    pps_name(intel_dp));
 793		msleep(intel_dp->pps.panel_power_up_delay);
 794	}
 795
 796	return need_to_disable;
 797}
 798
 799/*
 800 * Must be paired with intel_pps_vdd_off() or - to disable
 801 * both VDD and panel power - intel_pps_off().
 802 * Nested calls to these functions are not allowed since
 803 * we drop the lock. Caller must use some higher level
 804 * locking to prevent nested calls from other threads.
 805 */
 806void intel_pps_vdd_on(struct intel_dp *intel_dp)
 807{
 808	struct intel_display *display = to_intel_display(intel_dp);
 809	intel_wakeref_t wakeref;
 810	bool vdd;
 811
 812	if (!intel_dp_is_edp(intel_dp))
 813		return;
 814
 815	vdd = false;
 816	with_intel_pps_lock(intel_dp, wakeref)
 817		vdd = intel_pps_vdd_on_unlocked(intel_dp);
 818	INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
 819				 dp_to_dig_port(intel_dp)->base.base.base.id,
 820				 dp_to_dig_port(intel_dp)->base.base.name,
 821				 pps_name(intel_dp));
 822}
 823
 824static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
 825{
 826	struct intel_display *display = to_intel_display(intel_dp);
 827	struct drm_i915_private *dev_priv = to_i915(display->drm);
 828	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 829	u32 pp;
 830	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 831
 832	lockdep_assert_held(&display->pps.mutex);
 833
 834	drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
 835
 836	if (!edp_have_panel_vdd(intel_dp))
 837		return;
 838
 839	drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
 840		    dig_port->base.base.base.id, dig_port->base.base.name,
 841		    pps_name(intel_dp));
 842
 843	pp = ilk_get_pp_control(intel_dp);
 844	pp &= ~EDP_FORCE_VDD;
 845
 846	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 847	pp_stat_reg = _pp_stat_reg(intel_dp);
 848
 849	intel_de_write(display, pp_ctrl_reg, pp);
 850	intel_de_posting_read(display, pp_ctrl_reg);
 851
 852	/* Make sure sequencer is idle before allowing subsequent activity */
 853	drm_dbg_kms(display->drm,
 854		    "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 855		    dig_port->base.base.base.id, dig_port->base.base.name,
 856		    pps_name(intel_dp),
 857		    intel_de_read(display, pp_stat_reg),
 858		    intel_de_read(display, pp_ctrl_reg));
 859
 860	if ((pp & PANEL_POWER_ON) == 0) {
 861		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 862		intel_dp_invalidate_source_oui(intel_dp);
 863	}
 864
 865	intel_display_power_put(dev_priv,
 866				intel_aux_power_domain(dig_port),
 867				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 868}
 869
 870void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
 871{
 872	intel_wakeref_t wakeref;
 873
 874	if (!intel_dp_is_edp(intel_dp))
 875		return;
 876
 877	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
 878	/*
 879	 * vdd might still be enabled due to the delayed vdd off.
 880	 * Make sure vdd is actually turned off here.
 881	 */
 882	with_intel_pps_lock(intel_dp, wakeref)
 883		intel_pps_vdd_off_sync_unlocked(intel_dp);
 884}
 885
 886static void edp_panel_vdd_work(struct work_struct *__work)
 887{
 888	struct intel_pps *pps = container_of(to_delayed_work(__work),
 889					     struct intel_pps, panel_vdd_work);
 890	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
 891	intel_wakeref_t wakeref;
 892
 893	with_intel_pps_lock(intel_dp, wakeref) {
 894		if (!intel_dp->pps.want_panel_vdd)
 895			intel_pps_vdd_off_sync_unlocked(intel_dp);
 896	}
 897}
 898
 899static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
 900{
 901	struct intel_display *display = to_intel_display(intel_dp);
 902	struct drm_i915_private *i915 = to_i915(display->drm);
 903	unsigned long delay;
 904
 905	/*
 906	 * We may not yet know the real power sequencing delays,
 907	 * so keep VDD enabled until we're done with init.
 908	 */
 909	if (intel_dp->pps.initializing)
 910		return;
 911
 912	/*
 913	 * Queue the timer to fire a long time from now (relative to the power
 914	 * down delay) to keep the panel power up across a sequence of
 915	 * operations.
 916	 */
 917	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
 918	queue_delayed_work(i915->unordered_wq,
 919			   &intel_dp->pps.panel_vdd_work, delay);
 920}
 921
 922/*
 923 * Must be paired with edp_panel_vdd_on().
 924 * Must hold pps_mutex around the whole on/off sequence.
 925 * Can be nested with intel_pps_vdd_{on,off}() calls.
 926 */
 927void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
 928{
 929	struct intel_display *display = to_intel_display(intel_dp);
 930
 931	lockdep_assert_held(&display->pps.mutex);
 932
 933	if (!intel_dp_is_edp(intel_dp))
 934		return;
 935
 936	INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
 937				 "[ENCODER:%d:%s] %s VDD not forced on",
 938				 dp_to_dig_port(intel_dp)->base.base.base.id,
 939				 dp_to_dig_port(intel_dp)->base.base.name,
 940				 pps_name(intel_dp));
 941
 942	intel_dp->pps.want_panel_vdd = false;
 943
 944	if (sync)
 945		intel_pps_vdd_off_sync_unlocked(intel_dp);
 946	else
 947		edp_panel_vdd_schedule_off(intel_dp);
 948}
 949
 950void intel_pps_vdd_off(struct intel_dp *intel_dp)
 951{
 952	intel_wakeref_t wakeref;
 953
 954	if (!intel_dp_is_edp(intel_dp))
 955		return;
 956
 957	with_intel_pps_lock(intel_dp, wakeref)
 958		intel_pps_vdd_off_unlocked(intel_dp, false);
 959}
 960
 961void intel_pps_on_unlocked(struct intel_dp *intel_dp)
 962{
 963	struct intel_display *display = to_intel_display(intel_dp);
 964	u32 pp;
 965	i915_reg_t pp_ctrl_reg;
 966
 967	lockdep_assert_held(&display->pps.mutex);
 968
 969	if (!intel_dp_is_edp(intel_dp))
 970		return;
 971
 972	drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
 973		    dp_to_dig_port(intel_dp)->base.base.base.id,
 974		    dp_to_dig_port(intel_dp)->base.base.name,
 975		    pps_name(intel_dp));
 976
 977	if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
 978		     "[ENCODER:%d:%s] %s panel power already on\n",
 979		     dp_to_dig_port(intel_dp)->base.base.base.id,
 980		     dp_to_dig_port(intel_dp)->base.base.name,
 981		     pps_name(intel_dp)))
 982		return;
 983
 984	wait_panel_power_cycle(intel_dp);
 985
 986	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 987	pp = ilk_get_pp_control(intel_dp);
 988	if (display->platform.ironlake) {
 989		/* ILK workaround: disable reset around power sequence */
 990		pp &= ~PANEL_POWER_RESET;
 991		intel_de_write(display, pp_ctrl_reg, pp);
 992		intel_de_posting_read(display, pp_ctrl_reg);
 993	}
 994
 995	/*
 996	 * WA: 22019252566
 997	 * Disable DPLS gating around power sequence.
 998	 */
 999	if (IS_DISPLAY_VER(display, 13, 14))
1000		intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1001			     0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
1002
1003	pp |= PANEL_POWER_ON;
1004	if (!display->platform.ironlake)
1005		pp |= PANEL_POWER_RESET;
1006
1007	intel_de_write(display, pp_ctrl_reg, pp);
1008	intel_de_posting_read(display, pp_ctrl_reg);
1009
1010	wait_panel_on(intel_dp);
1011	intel_dp->pps.last_power_on = jiffies;
1012
1013	if (IS_DISPLAY_VER(display, 13, 14))
1014		intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1015			     PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
1016
1017	if (display->platform.ironlake) {
1018		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1019		intel_de_write(display, pp_ctrl_reg, pp);
1020		intel_de_posting_read(display, pp_ctrl_reg);
1021	}
1022}
1023
1024void intel_pps_on(struct intel_dp *intel_dp)
1025{
1026	intel_wakeref_t wakeref;
1027
1028	if (!intel_dp_is_edp(intel_dp))
1029		return;
1030
1031	with_intel_pps_lock(intel_dp, wakeref)
1032		intel_pps_on_unlocked(intel_dp);
1033}
1034
1035void intel_pps_off_unlocked(struct intel_dp *intel_dp)
1036{
1037	struct intel_display *display = to_intel_display(intel_dp);
1038	struct drm_i915_private *dev_priv = to_i915(display->drm);
1039	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1040	u32 pp;
1041	i915_reg_t pp_ctrl_reg;
1042
1043	lockdep_assert_held(&display->pps.mutex);
1044
1045	if (!intel_dp_is_edp(intel_dp))
1046		return;
1047
1048	drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
1049		    dig_port->base.base.base.id, dig_port->base.base.name,
1050		    pps_name(intel_dp));
1051
1052	drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
1053		 "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
1054		 dig_port->base.base.base.id, dig_port->base.base.name,
1055		 pps_name(intel_dp));
1056
1057	pp = ilk_get_pp_control(intel_dp);
1058	/* We need to switch off panel power _and_ force vdd, for otherwise some
1059	 * panels get very unhappy and cease to work. */
1060	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1061		EDP_BLC_ENABLE);
1062
1063	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1064
1065	intel_dp->pps.want_panel_vdd = false;
1066
1067	intel_de_write(display, pp_ctrl_reg, pp);
1068	intel_de_posting_read(display, pp_ctrl_reg);
1069
1070	wait_panel_off(intel_dp);
1071	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1072
1073	intel_dp_invalidate_source_oui(intel_dp);
1074
1075	/* We got a reference when we enabled the VDD. */
1076	intel_display_power_put(dev_priv,
1077				intel_aux_power_domain(dig_port),
1078				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
1079}
1080
1081void intel_pps_off(struct intel_dp *intel_dp)
1082{
1083	intel_wakeref_t wakeref;
1084
1085	if (!intel_dp_is_edp(intel_dp))
1086		return;
1087
1088	with_intel_pps_lock(intel_dp, wakeref)
1089		intel_pps_off_unlocked(intel_dp);
1090}
1091
1092/* Enable backlight in the panel power control. */
1093void intel_pps_backlight_on(struct intel_dp *intel_dp)
1094{
1095	struct intel_display *display = to_intel_display(intel_dp);
1096	intel_wakeref_t wakeref;
1097
1098	/*
1099	 * If we enable the backlight right away following a panel power
1100	 * on, we may see slight flicker as the panel syncs with the eDP
1101	 * link.  So delay a bit to make sure the image is solid before
1102	 * allowing it to appear.
1103	 */
1104	wait_backlight_on(intel_dp);
1105
1106	with_intel_pps_lock(intel_dp, wakeref) {
1107		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1108		u32 pp;
1109
1110		pp = ilk_get_pp_control(intel_dp);
1111		pp |= EDP_BLC_ENABLE;
1112
1113		intel_de_write(display, pp_ctrl_reg, pp);
1114		intel_de_posting_read(display, pp_ctrl_reg);
1115	}
1116}
1117
1118/* Disable backlight in the panel power control. */
1119void intel_pps_backlight_off(struct intel_dp *intel_dp)
1120{
1121	struct intel_display *display = to_intel_display(intel_dp);
1122	intel_wakeref_t wakeref;
1123
1124	if (!intel_dp_is_edp(intel_dp))
1125		return;
1126
1127	with_intel_pps_lock(intel_dp, wakeref) {
1128		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1129		u32 pp;
1130
1131		pp = ilk_get_pp_control(intel_dp);
1132		pp &= ~EDP_BLC_ENABLE;
1133
1134		intel_de_write(display, pp_ctrl_reg, pp);
1135		intel_de_posting_read(display, pp_ctrl_reg);
1136	}
1137
1138	intel_dp->pps.last_backlight_off = jiffies;
1139	edp_wait_backlight_off(intel_dp);
1140}
1141
1142/*
1143 * Hook for controlling the panel power control backlight through the bl_power
1144 * sysfs attribute. Take care to handle multiple calls.
1145 */
1146void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
1147{
1148	struct intel_display *display = to_intel_display(connector);
1149	struct intel_dp *intel_dp = intel_attached_dp(connector);
1150	intel_wakeref_t wakeref;
1151	bool is_enabled;
1152
1153	is_enabled = false;
1154	with_intel_pps_lock(intel_dp, wakeref)
1155		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1156	if (is_enabled == enable)
1157		return;
1158
1159	drm_dbg_kms(display->drm, "panel power control backlight %s\n",
1160		    str_enable_disable(enable));
1161
1162	if (enable)
1163		intel_pps_backlight_on(intel_dp);
1164	else
1165		intel_pps_backlight_off(intel_dp);
1166}
1167
1168static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
1169{
1170	struct intel_display *display = to_intel_display(intel_dp);
1171	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1172	enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
1173	i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
 
1174
1175	drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
1176
1177	if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
1178		return;
1179
1180	intel_pps_vdd_off_sync_unlocked(intel_dp);
1181
1182	/*
1183	 * VLV seems to get confused when multiple power sequencers
1184	 * have the same port selected (even if only one has power/vdd
1185	 * enabled). The failure manifests as vlv_wait_port_ready() failing
1186	 * CHV on the other hand doesn't seem to mind having the same port
1187	 * selected in multiple power sequencers, but let's clear the
1188	 * port select always when logically disconnecting a power sequencer
1189	 * from a port.
1190	 */
1191	drm_dbg_kms(display->drm,
1192		    "detaching %s from [ENCODER:%d:%s]\n",
1193		    pps_name(intel_dp),
1194		    dig_port->base.base.base.id, dig_port->base.base.name);
1195	intel_de_write(display, pp_on_reg, 0);
1196	intel_de_posting_read(display, pp_on_reg);
1197
1198	intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
1199}
1200
1201static void vlv_steal_power_sequencer(struct intel_display *display,
1202				      enum pipe pipe)
1203{
1204	struct intel_encoder *encoder;
1205
1206	lockdep_assert_held(&display->pps.mutex);
1207
1208	for_each_intel_dp(display->drm, encoder) {
1209		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1210
1211		drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe,
1212			 "stealing PPS %c from active [ENCODER:%d:%s]\n",
1213			 pipe_name(pipe), encoder->base.base.id,
1214			 encoder->base.name);
1215
1216		if (intel_dp->pps.vlv_pps_pipe != pipe)
1217			continue;
1218
1219		drm_dbg_kms(display->drm,
1220			    "stealing PPS %c from [ENCODER:%d:%s]\n",
1221			    pipe_name(pipe), encoder->base.base.id,
1222			    encoder->base.name);
1223
1224		/* make sure vdd is off before we steal it */
1225		vlv_detach_power_sequencer(intel_dp);
1226	}
1227}
1228
1229static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
1230{
1231	struct intel_display *display = to_intel_display(intel_dp);
1232	struct drm_i915_private *dev_priv = to_i915(display->drm);
1233	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1234	enum pipe pipe;
1235
1236	if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
1237				encoder->port, &pipe))
1238		return pipe;
1239
1240	return INVALID_PIPE;
1241}
1242
1243/* Call on all DP, not just eDP */
1244void vlv_pps_pipe_init(struct intel_dp *intel_dp)
1245{
1246	intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
1247	intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
1248}
1249
1250/* Call on all DP, not just eDP */
1251void vlv_pps_pipe_reset(struct intel_dp *intel_dp)
1252{
1253	intel_wakeref_t wakeref;
1254
1255	with_intel_pps_lock(intel_dp, wakeref)
1256		intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
1257}
1258
1259enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp)
1260{
1261	enum pipe pipe;
1262
1263	/*
1264	 * Figure out the current pipe for the initial backlight setup. If the
1265	 * current pipe isn't valid, try the PPS pipe, and if that fails just
1266	 * assume pipe A.
1267	 */
1268	pipe = vlv_active_pipe(intel_dp);
1269
1270	if (pipe != PIPE_A && pipe != PIPE_B)
1271		pipe = intel_dp->pps.vlv_pps_pipe;
1272
1273	if (pipe != PIPE_A && pipe != PIPE_B)
1274		pipe = PIPE_A;
1275
1276	return pipe;
1277}
1278
1279/* Call on all DP, not just eDP */
1280void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder,
1281				  const struct intel_crtc_state *crtc_state)
1282{
1283	struct intel_display *display = to_intel_display(encoder);
1284	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1285	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1286
1287	lockdep_assert_held(&display->pps.mutex);
1288
1289	drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
1290
1291	if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE &&
1292	    intel_dp->pps.vlv_pps_pipe != crtc->pipe) {
1293		/*
1294		 * If another power sequencer was being used on this
1295		 * port previously make sure to turn off vdd there while
1296		 * we still have control of it.
1297		 */
1298		vlv_detach_power_sequencer(intel_dp);
1299	}
1300
1301	/*
1302	 * We may be stealing the power
1303	 * sequencer from another port.
1304	 */
1305	vlv_steal_power_sequencer(display, crtc->pipe);
1306
1307	intel_dp->pps.vlv_active_pipe = crtc->pipe;
1308
1309	if (!intel_dp_is_edp(intel_dp))
1310		return;
1311
1312	/* now it's all ours */
1313	intel_dp->pps.vlv_pps_pipe = crtc->pipe;
1314
1315	drm_dbg_kms(display->drm,
1316		    "initializing %s for [ENCODER:%d:%s]\n",
1317		    pps_name(intel_dp),
1318		    encoder->base.base.id, encoder->base.name);
1319
1320	/* init power sequencer on this pipe and port */
1321	pps_init_delays(intel_dp);
1322	pps_init_registers(intel_dp, true);
1323}
1324
1325/* Call on all DP, not just eDP */
1326void vlv_pps_port_disable(struct intel_encoder *encoder,
1327			  const struct intel_crtc_state *crtc_state)
1328{
1329	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1330
1331	intel_wakeref_t wakeref;
1332
1333	with_intel_pps_lock(intel_dp, wakeref)
1334		intel_dp->pps.vlv_active_pipe = INVALID_PIPE;
1335}
1336
1337static void pps_vdd_init(struct intel_dp *intel_dp)
1338{
1339	struct intel_display *display = to_intel_display(intel_dp);
1340	struct drm_i915_private *dev_priv = to_i915(display->drm);
1341	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1342
1343	lockdep_assert_held(&display->pps.mutex);
1344
1345	if (!edp_have_panel_vdd(intel_dp))
1346		return;
1347
1348	/*
1349	 * The VDD bit needs a power domain reference, so if the bit is
1350	 * already enabled when we boot or resume, grab this reference and
1351	 * schedule a vdd off, so we don't hold on to the reference
1352	 * indefinitely.
1353	 */
1354	drm_dbg_kms(display->drm,
1355		    "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
1356		    dig_port->base.base.base.id, dig_port->base.base.name,
1357		    pps_name(intel_dp));
1358	drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
1359	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1360							    intel_aux_power_domain(dig_port));
 
 
1361}
1362
1363bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1364{
1365	intel_wakeref_t wakeref;
1366	bool have_power = false;
1367
1368	with_intel_pps_lock(intel_dp, wakeref) {
1369		have_power = edp_have_panel_power(intel_dp) ||
1370			     edp_have_panel_vdd(intel_dp);
1371	}
1372
1373	return have_power;
1374}
1375
1376static void pps_init_timestamps(struct intel_dp *intel_dp)
1377{
1378	/*
1379	 * Initialize panel power off time to 0, assuming panel power could have
1380	 * been toggled between kernel boot and now only by a previously loaded
1381	 * and removed i915, which has already ensured sufficient power off
1382	 * delay at module remove.
1383	 */
1384	intel_dp->pps.panel_power_off_time = 0;
1385	intel_dp->pps.last_power_on = jiffies;
1386	intel_dp->pps.last_backlight_off = jiffies;
1387}
1388
1389static void
1390intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1391{
1392	struct intel_display *display = to_intel_display(intel_dp);
1393	u32 pp_on, pp_off, pp_ctl;
1394	struct pps_registers regs;
1395
1396	intel_pps_get_registers(intel_dp, &regs);
1397
1398	pp_ctl = ilk_get_pp_control(intel_dp);
1399
1400	/* Ensure PPS is unlocked */
1401	if (!HAS_DDI(display))
1402		intel_de_write(display, regs.pp_ctrl, pp_ctl);
1403
1404	pp_on = intel_de_read(display, regs.pp_on);
1405	pp_off = intel_de_read(display, regs.pp_off);
1406
1407	/* Pull timing values out of registers */
1408	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1409	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1410	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1411	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1412
1413	if (i915_mmio_reg_valid(regs.pp_div)) {
1414		u32 pp_div;
1415
1416		pp_div = intel_de_read(display, regs.pp_div);
1417
1418		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1419	} else {
1420		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1421	}
1422}
1423
1424static void
1425intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1426		     const struct edp_power_seq *seq)
1427{
1428	struct intel_display *display = to_intel_display(intel_dp);
1429
1430	drm_dbg_kms(display->drm,
1431		    "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1432		    state_name,
1433		    seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1434}
1435
1436static void
1437intel_pps_verify_state(struct intel_dp *intel_dp)
1438{
1439	struct intel_display *display = to_intel_display(intel_dp);
1440	struct edp_power_seq hw;
1441	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1442
1443	intel_pps_readout_hw_state(intel_dp, &hw);
1444
1445	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1446	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1447		drm_err(display->drm, "PPS state mismatch\n");
1448		intel_pps_dump_state(intel_dp, "sw", sw);
1449		intel_pps_dump_state(intel_dp, "hw", &hw);
1450	}
1451}
1452
1453static bool pps_delays_valid(struct edp_power_seq *delays)
1454{
1455	return delays->t1_t3 || delays->t8 || delays->t9 ||
1456		delays->t10 || delays->t11_t12;
1457}
1458
1459static void pps_init_delays_bios(struct intel_dp *intel_dp,
1460				 struct edp_power_seq *bios)
1461{
1462	struct intel_display *display = to_intel_display(intel_dp);
1463
1464	lockdep_assert_held(&display->pps.mutex);
1465
1466	if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1467		intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1468
1469	*bios = intel_dp->pps.bios_pps_delays;
1470
1471	intel_pps_dump_state(intel_dp, "bios", bios);
1472}
1473
1474static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1475				struct edp_power_seq *vbt)
1476{
1477	struct intel_display *display = to_intel_display(intel_dp);
1478	struct intel_connector *connector = intel_dp->attached_connector;
1479
1480	*vbt = connector->panel.vbt.edp.pps;
1481
1482	if (!pps_delays_valid(vbt))
1483		return;
1484
 
1485	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1486	 * of 500ms appears to be too short. Ocassionally the panel
1487	 * just fails to power back on. Increasing the delay to 800ms
1488	 * seems sufficient to avoid this problem.
1489	 */
1490	if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
1491		vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
1492		drm_dbg_kms(display->drm,
1493			    "Increasing T12 panel delay as per the quirk to %d\n",
1494			    vbt->t11_t12);
1495	}
1496
1497	/* T11_T12 delay is special and actually in units of 100ms, but zero
1498	 * based in the hw (so we need to add 100 ms). But the sw vbt
1499	 * table multiplies it with 1000 to make it in units of 100usec,
1500	 * too. */
1501	vbt->t11_t12 += 100 * 10;
1502
1503	intel_pps_dump_state(intel_dp, "vbt", vbt);
1504}
1505
1506static void pps_init_delays_spec(struct intel_dp *intel_dp,
1507				 struct edp_power_seq *spec)
1508{
1509	struct intel_display *display = to_intel_display(intel_dp);
1510
1511	lockdep_assert_held(&display->pps.mutex);
1512
1513	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1514	 * our hw here, which are all in 100usec. */
1515	spec->t1_t3 = 210 * 10;
1516	spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
1517	spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1518	spec->t10 = 500 * 10;
1519	/* This one is special and actually in units of 100ms, but zero
1520	 * based in the hw (so we need to add 100 ms). But the sw vbt
1521	 * table multiplies it with 1000 to make it in units of 100usec,
1522	 * too. */
1523	spec->t11_t12 = (510 + 100) * 10;
1524
1525	intel_pps_dump_state(intel_dp, "spec", spec);
1526}
1527
1528static void pps_init_delays(struct intel_dp *intel_dp)
1529{
1530	struct intel_display *display = to_intel_display(intel_dp);
1531	struct edp_power_seq cur, vbt, spec,
1532		*final = &intel_dp->pps.pps_delays;
1533
1534	lockdep_assert_held(&display->pps.mutex);
1535
1536	/* already initialized? */
1537	if (pps_delays_valid(final))
1538		return;
1539
1540	pps_init_delays_bios(intel_dp, &cur);
1541	pps_init_delays_vbt(intel_dp, &vbt);
1542	pps_init_delays_spec(intel_dp, &spec);
1543
1544	/* Use the max of the register settings and vbt. If both are
1545	 * unset, fall back to the spec limits. */
1546#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1547				       spec.field : \
1548				       max(cur.field, vbt.field))
1549	assign_final(t1_t3);
1550	assign_final(t8);
1551	assign_final(t9);
1552	assign_final(t10);
1553	assign_final(t11_t12);
1554#undef assign_final
1555
1556#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1557	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1558	intel_dp->pps.backlight_on_delay = get_delay(t8);
1559	intel_dp->pps.backlight_off_delay = get_delay(t9);
1560	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1561	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1562#undef get_delay
1563
1564	drm_dbg_kms(display->drm,
1565		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1566		    intel_dp->pps.panel_power_up_delay,
1567		    intel_dp->pps.panel_power_down_delay,
1568		    intel_dp->pps.panel_power_cycle_delay);
1569
1570	drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
1571		    intel_dp->pps.backlight_on_delay,
1572		    intel_dp->pps.backlight_off_delay);
1573
1574	/*
1575	 * We override the HW backlight delays to 1 because we do manual waits
1576	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1577	 * don't do this, we'll end up waiting for the backlight off delay
1578	 * twice: once when we do the manual sleep, and once when we disable
1579	 * the panel and wait for the PP_STATUS bit to become zero.
1580	 */
1581	final->t8 = 1;
1582	final->t9 = 1;
1583
1584	/*
1585	 * HW has only a 100msec granularity for t11_t12 so round it up
1586	 * accordingly.
1587	 */
1588	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1589}
1590
1591static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1592{
1593	struct intel_display *display = to_intel_display(intel_dp);
1594	struct drm_i915_private *dev_priv = to_i915(display->drm);
1595	u32 pp_on, pp_off, port_sel = 0;
1596	int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
1597	struct pps_registers regs;
1598	enum port port = dp_to_dig_port(intel_dp)->base.port;
1599	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1600
1601	lockdep_assert_held(&display->pps.mutex);
1602
1603	intel_pps_get_registers(intel_dp, &regs);
1604
1605	/*
1606	 * On some VLV machines the BIOS can leave the VDD
1607	 * enabled even on power sequencers which aren't
1608	 * hooked up to any port. This would mess up the
1609	 * power domain tracking the first time we pick
1610	 * one of these power sequencers for use since
1611	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1612	 * already on and therefore wouldn't grab the power
1613	 * domain reference. Disable VDD first to avoid this.
1614	 * This also avoids spuriously turning the VDD on as
1615	 * soon as the new power sequencer gets initialized.
1616	 */
1617	if (force_disable_vdd) {
1618		u32 pp = ilk_get_pp_control(intel_dp);
1619
1620		drm_WARN(display->drm, pp & PANEL_POWER_ON,
1621			 "Panel power already on\n");
1622
1623		if (pp & EDP_FORCE_VDD)
1624			drm_dbg_kms(display->drm,
1625				    "VDD already on, disabling first\n");
1626
1627		pp &= ~EDP_FORCE_VDD;
1628
1629		intel_de_write(display, regs.pp_ctrl, pp);
1630	}
1631
1632	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1633		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1634	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1635		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1636
1637	/* Haswell doesn't have any port selection bits for the panel
1638	 * power sequencer any more. */
1639	if (display->platform.valleyview || display->platform.cherryview) {
1640		port_sel = PANEL_PORT_SELECT_VLV(port);
1641	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1642		switch (port) {
1643		case PORT_A:
1644			port_sel = PANEL_PORT_SELECT_DPA;
1645			break;
1646		case PORT_C:
1647			port_sel = PANEL_PORT_SELECT_DPC;
1648			break;
1649		case PORT_D:
1650			port_sel = PANEL_PORT_SELECT_DPD;
1651			break;
1652		default:
1653			MISSING_CASE(port);
1654			break;
1655		}
1656	}
1657
1658	pp_on |= port_sel;
1659
1660	intel_de_write(display, regs.pp_on, pp_on);
1661	intel_de_write(display, regs.pp_off, pp_off);
1662
1663	/*
1664	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1665	 */
1666	if (i915_mmio_reg_valid(regs.pp_div))
1667		intel_de_write(display, regs.pp_div,
1668			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1669	else
1670		intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
1671			     REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
1672					    DIV_ROUND_UP(seq->t11_t12, 1000)));
 
 
 
 
1673
1674	drm_dbg_kms(display->drm,
1675		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1676		    intel_de_read(display, regs.pp_on),
1677		    intel_de_read(display, regs.pp_off),
1678		    i915_mmio_reg_valid(regs.pp_div) ?
1679		    intel_de_read(display, regs.pp_div) :
1680		    (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1681}
1682
1683void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1684{
1685	struct intel_display *display = to_intel_display(intel_dp);
1686	intel_wakeref_t wakeref;
1687
1688	if (!intel_dp_is_edp(intel_dp))
1689		return;
1690
1691	with_intel_pps_lock(intel_dp, wakeref) {
1692		/*
1693		 * Reinit the power sequencer also on the resume path, in case
1694		 * BIOS did something nasty with it.
1695		 */
1696		if (display->platform.valleyview || display->platform.cherryview)
1697			vlv_initial_power_sequencer_setup(intel_dp);
1698
1699		pps_init_delays(intel_dp);
1700		pps_init_registers(intel_dp, false);
1701		pps_vdd_init(intel_dp);
1702
1703		if (edp_have_panel_vdd(intel_dp))
1704			edp_panel_vdd_schedule_off(intel_dp);
1705	}
1706}
1707
1708bool intel_pps_init(struct intel_dp *intel_dp)
1709{
1710	intel_wakeref_t wakeref;
1711	bool ret;
1712
1713	intel_dp->pps.initializing = true;
1714	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1715
1716	pps_init_timestamps(intel_dp);
1717
1718	with_intel_pps_lock(intel_dp, wakeref) {
1719		ret = pps_initial_setup(intel_dp);
1720
1721		pps_init_delays(intel_dp);
1722		pps_init_registers(intel_dp, false);
1723		pps_vdd_init(intel_dp);
1724	}
1725
1726	return ret;
1727}
1728
1729static void pps_init_late(struct intel_dp *intel_dp)
1730{
1731	struct intel_display *display = to_intel_display(intel_dp);
1732	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1733	struct intel_connector *connector = intel_dp->attached_connector;
1734
1735	if (display->platform.valleyview || display->platform.cherryview)
1736		return;
1737
1738	if (intel_num_pps(display) < 2)
1739		return;
1740
1741	drm_WARN(display->drm,
1742		 connector->panel.vbt.backlight.controller >= 0 &&
1743		 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
1744		 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
1745		 encoder->base.base.id, encoder->base.name,
1746		 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
1747
1748	if (connector->panel.vbt.backlight.controller >= 0)
1749		intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
1750}
1751
1752void intel_pps_init_late(struct intel_dp *intel_dp)
1753{
1754	intel_wakeref_t wakeref;
1755
1756	with_intel_pps_lock(intel_dp, wakeref) {
1757		/* Reinit delays after per-panel info has been parsed from VBT */
1758		pps_init_late(intel_dp);
1759
1760		memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1761		pps_init_delays(intel_dp);
1762		pps_init_registers(intel_dp, false);
1763
1764		intel_dp->pps.initializing = false;
1765
1766		if (edp_have_panel_vdd(intel_dp))
1767			edp_panel_vdd_schedule_off(intel_dp);
1768	}
1769}
1770
1771void intel_pps_unlock_regs_wa(struct intel_display *display)
1772{
1773	int pps_num;
1774	int pps_idx;
1775
1776	if (!HAS_DISPLAY(display) || HAS_DDI(display))
1777		return;
1778	/*
1779	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1780	 * everywhere where registers can be write protected.
1781	 */
1782	pps_num = intel_num_pps(display);
1783
1784	for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
1785		intel_de_rmw(display, PP_CONTROL(display, pps_idx),
1786			     PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
1787}
1788
1789void intel_pps_setup(struct intel_display *display)
1790{
1791	struct drm_i915_private *i915 = to_i915(display->drm);
1792
1793	if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
1794		display->pps.mmio_base = PCH_PPS_BASE;
1795	else if (display->platform.valleyview || display->platform.cherryview)
1796		display->pps.mmio_base = VLV_PPS_BASE;
1797	else
1798		display->pps.mmio_base = PPS_BASE;
1799}
1800
1801static int intel_pps_show(struct seq_file *m, void *data)
1802{
1803	struct intel_connector *connector = m->private;
1804	struct intel_dp *intel_dp = intel_attached_dp(connector);
1805
1806	if (connector->base.status != connector_status_connected)
1807		return -ENODEV;
1808
1809	seq_printf(m, "Panel power up delay: %d\n",
1810		   intel_dp->pps.panel_power_up_delay);
1811	seq_printf(m, "Panel power down delay: %d\n",
1812		   intel_dp->pps.panel_power_down_delay);
1813	seq_printf(m, "Backlight on delay: %d\n",
1814		   intel_dp->pps.backlight_on_delay);
1815	seq_printf(m, "Backlight off delay: %d\n",
1816		   intel_dp->pps.backlight_off_delay);
1817
1818	return 0;
1819}
1820DEFINE_SHOW_ATTRIBUTE(intel_pps);
1821
1822void intel_pps_connector_debugfs_add(struct intel_connector *connector)
1823{
1824	struct dentry *root = connector->base.debugfs_entry;
1825	int connector_type = connector->base.connector_type;
1826
1827	if (connector_type == DRM_MODE_CONNECTOR_eDP)
1828		debugfs_create_file("i915_panel_timings", 0444, root,
1829				    connector, &intel_pps_fops);
1830}
1831
1832void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
1833{
1834	struct drm_i915_private *dev_priv = to_i915(display->drm);
1835	i915_reg_t pp_reg;
1836	u32 val;
1837	enum pipe panel_pipe = INVALID_PIPE;
1838	bool locked = true;
1839
1840	if (drm_WARN_ON(display->drm, HAS_DDI(display)))
1841		return;
1842
1843	if (HAS_PCH_SPLIT(dev_priv)) {
1844		u32 port_sel;
1845
1846		pp_reg = PP_CONTROL(display, 0);
1847		port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1848			PANEL_PORT_SELECT_MASK;
1849
1850		switch (port_sel) {
1851		case PANEL_PORT_SELECT_LVDS:
1852			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1853			break;
1854		case PANEL_PORT_SELECT_DPA:
1855			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1856			break;
1857		case PANEL_PORT_SELECT_DPC:
1858			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1859			break;
1860		case PANEL_PORT_SELECT_DPD:
1861			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1862			break;
1863		default:
1864			MISSING_CASE(port_sel);
1865			break;
1866		}
1867	} else if (display->platform.valleyview || display->platform.cherryview) {
1868		/* presumably write lock depends on pipe, not port select */
1869		pp_reg = PP_CONTROL(display, pipe);
1870		panel_pipe = pipe;
1871	} else {
1872		u32 port_sel;
1873
1874		pp_reg = PP_CONTROL(display, 0);
1875		port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1876			PANEL_PORT_SELECT_MASK;
1877
1878		drm_WARN_ON(display->drm,
1879			    port_sel != PANEL_PORT_SELECT_LVDS);
1880		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1881	}
1882
1883	val = intel_de_read(display, pp_reg);
1884	if (!(val & PANEL_POWER_ON) ||
1885	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1886		locked = false;
1887
1888	INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked,
1889				 "panel assertion failure, pipe %c regs locked\n",
1890				 pipe_name(pipe));
1891}