Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include "g4x_dp.h"
   7#include "i915_drv.h"
   8#include "intel_de.h"
   9#include "intel_display_types.h"
  10#include "intel_dp.h"
  11#include "intel_dpll.h"
  12#include "intel_pps.h"
  13
  14static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
  15				      enum pipe pipe);
  16
  17static void pps_init_delays(struct intel_dp *intel_dp);
  18static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
  19
  20intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
  21{
  22	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  23	intel_wakeref_t wakeref;
  24
  25	/*
  26	 * See intel_pps_reset_all() why we need a power domain reference here.
  27	 */
  28	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
  29	mutex_lock(&dev_priv->pps_mutex);
  30
  31	return wakeref;
  32}
  33
  34intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
  35				 intel_wakeref_t wakeref)
  36{
  37	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  38
  39	mutex_unlock(&dev_priv->pps_mutex);
  40	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
  41
  42	return 0;
  43}
  44
  45static void
  46vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  47{
  48	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
  49	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  50	enum pipe pipe = intel_dp->pps.pps_pipe;
  51	bool pll_enabled, release_cl_override = false;
  52	enum dpio_phy phy = DPIO_PHY(pipe);
  53	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
  54	u32 DP;
  55
  56	if (drm_WARN(&dev_priv->drm,
  57		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
  58		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
  59		     pipe_name(pipe), dig_port->base.base.base.id,
  60		     dig_port->base.base.name))
  61		return;
  62
  63	drm_dbg_kms(&dev_priv->drm,
  64		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
  65		    pipe_name(pipe), dig_port->base.base.base.id,
  66		    dig_port->base.base.name);
  67
  68	/* Preserve the BIOS-computed detected bit. This is
  69	 * supposed to be read-only.
  70	 */
  71	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
  72	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  73	DP |= DP_PORT_WIDTH(1);
  74	DP |= DP_LINK_TRAIN_PAT_1;
  75
  76	if (IS_CHERRYVIEW(dev_priv))
  77		DP |= DP_PIPE_SEL_CHV(pipe);
  78	else
  79		DP |= DP_PIPE_SEL(pipe);
  80
  81	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
  82
  83	/*
  84	 * The DPLL for the pipe must be enabled for this to work.
  85	 * So enable temporarily it if it's not already enabled.
  86	 */
  87	if (!pll_enabled) {
  88		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
  89			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
  90
  91		if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
  92			drm_err(&dev_priv->drm,
  93				"Failed to force on pll for pipe %c!\n",
  94				pipe_name(pipe));
  95			return;
  96		}
  97	}
  98
  99	/*
 100	 * Similar magic as in intel_dp_enable_port().
 101	 * We _must_ do this port enable + disable trick
 102	 * to make this power sequencer lock onto the port.
 103	 * Otherwise even VDD force bit won't work.
 104	 */
 105	intel_de_write(dev_priv, intel_dp->output_reg, DP);
 106	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 107
 108	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
 109	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 110
 111	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
 112	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 113
 114	if (!pll_enabled) {
 115		vlv_force_pll_off(dev_priv, pipe);
 116
 117		if (release_cl_override)
 118			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 119	}
 120}
 121
 122static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 123{
 124	struct intel_encoder *encoder;
 125	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 126
 127	/*
 128	 * We don't have power sequencer currently.
 129	 * Pick one that's not used by other ports.
 130	 */
 131	for_each_intel_dp(&dev_priv->drm, encoder) {
 132		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 133
 134		if (encoder->type == INTEL_OUTPUT_EDP) {
 135			drm_WARN_ON(&dev_priv->drm,
 136				    intel_dp->pps.active_pipe != INVALID_PIPE &&
 137				    intel_dp->pps.active_pipe !=
 138				    intel_dp->pps.pps_pipe);
 139
 140			if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 141				pipes &= ~(1 << intel_dp->pps.pps_pipe);
 142		} else {
 143			drm_WARN_ON(&dev_priv->drm,
 144				    intel_dp->pps.pps_pipe != INVALID_PIPE);
 145
 146			if (intel_dp->pps.active_pipe != INVALID_PIPE)
 147				pipes &= ~(1 << intel_dp->pps.active_pipe);
 148		}
 149	}
 150
 151	if (pipes == 0)
 152		return INVALID_PIPE;
 153
 154	return ffs(pipes) - 1;
 155}
 156
 157static enum pipe
 158vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 159{
 160	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 161	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 162	enum pipe pipe;
 163
 164	lockdep_assert_held(&dev_priv->pps_mutex);
 165
 166	/* We should never land here with regular DP ports */
 167	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 168
 169	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
 170		    intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
 171
 172	if (intel_dp->pps.pps_pipe != INVALID_PIPE)
 173		return intel_dp->pps.pps_pipe;
 174
 175	pipe = vlv_find_free_pps(dev_priv);
 176
 177	/*
 178	 * Didn't find one. This should not happen since there
 179	 * are two power sequencers and up to two eDP ports.
 180	 */
 181	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
 182		pipe = PIPE_A;
 183
 184	vlv_steal_power_sequencer(dev_priv, pipe);
 185	intel_dp->pps.pps_pipe = pipe;
 186
 187	drm_dbg_kms(&dev_priv->drm,
 188		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
 189		    pipe_name(intel_dp->pps.pps_pipe),
 190		    dig_port->base.base.base.id,
 191		    dig_port->base.base.name);
 192
 193	/* init power sequencer on this pipe and port */
 194	pps_init_delays(intel_dp);
 195	pps_init_registers(intel_dp, true);
 196
 197	/*
 198	 * Even vdd force doesn't work until we've made
 199	 * the power sequencer lock in on the port.
 200	 */
 201	vlv_power_sequencer_kick(intel_dp);
 202
 203	return intel_dp->pps.pps_pipe;
 204}
 205
 206static int
 207bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 208{
 209	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 210	int backlight_controller = dev_priv->vbt.backlight.controller;
 211
 212	lockdep_assert_held(&dev_priv->pps_mutex);
 213
 214	/* We should never land here with regular DP ports */
 215	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 216
 217	if (!intel_dp->pps.pps_reset)
 218		return backlight_controller;
 219
 220	intel_dp->pps.pps_reset = false;
 221
 222	/*
 223	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 224	 * has been setup during connector init.
 225	 */
 226	pps_init_registers(intel_dp, false);
 227
 228	return backlight_controller;
 229}
 230
 231typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 232			       enum pipe pipe);
 233
 234static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 235			       enum pipe pipe)
 236{
 237	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
 238}
 239
 240static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 241				enum pipe pipe)
 242{
 243	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 244}
 245
 246static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 247			 enum pipe pipe)
 248{
 249	return true;
 250}
 251
 252static enum pipe
 253vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 254		     enum port port,
 255		     vlv_pipe_check pipe_check)
 256{
 257	enum pipe pipe;
 258
 259	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 260		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
 261			PANEL_PORT_SELECT_MASK;
 262
 263		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 264			continue;
 265
 266		if (!pipe_check(dev_priv, pipe))
 267			continue;
 268
 269		return pipe;
 270	}
 271
 272	return INVALID_PIPE;
 273}
 274
 275static void
 276vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 277{
 278	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 279	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 280	enum port port = dig_port->base.port;
 281
 282	lockdep_assert_held(&dev_priv->pps_mutex);
 283
 284	/* try to find a pipe with this port selected */
 285	/* first pick one where the panel is on */
 286	intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 287						      vlv_pipe_has_pp_on);
 288	/* didn't find one? pick one where vdd is on */
 289	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 290		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 291							      vlv_pipe_has_vdd_on);
 292	/* didn't find one? pick one with just the correct port */
 293	if (intel_dp->pps.pps_pipe == INVALID_PIPE)
 294		intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 295							      vlv_pipe_any);
 296
 297	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 298	if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
 299		drm_dbg_kms(&dev_priv->drm,
 300			    "no initial power sequencer for [ENCODER:%d:%s]\n",
 301			    dig_port->base.base.base.id,
 302			    dig_port->base.base.name);
 303		return;
 304	}
 305
 306	drm_dbg_kms(&dev_priv->drm,
 307		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
 308		    dig_port->base.base.base.id,
 309		    dig_port->base.base.name,
 310		    pipe_name(intel_dp->pps.pps_pipe));
 311}
 312
 313void intel_pps_reset_all(struct drm_i915_private *dev_priv)
 314{
 315	struct intel_encoder *encoder;
 316
 317	if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
 318		return;
 319
 320	if (!HAS_DISPLAY(dev_priv))
 321		return;
 322
 323	/*
 324	 * We can't grab pps_mutex here due to deadlock with power_domain
 325	 * mutex when power_domain functions are called while holding pps_mutex.
 326	 * That also means that in order to use pps_pipe the code needs to
 327	 * hold both a power domain reference and pps_mutex, and the power domain
 328	 * reference get/put must be done while _not_ holding pps_mutex.
 329	 * pps_{lock,unlock}() do these steps in the correct order, so one
 330	 * should use them always.
 331	 */
 332
 333	for_each_intel_dp(&dev_priv->drm, encoder) {
 334		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 335
 336		drm_WARN_ON(&dev_priv->drm,
 337			    intel_dp->pps.active_pipe != INVALID_PIPE);
 338
 339		if (encoder->type != INTEL_OUTPUT_EDP)
 340			continue;
 341
 342		if (DISPLAY_VER(dev_priv) >= 9)
 343			intel_dp->pps.pps_reset = true;
 344		else
 345			intel_dp->pps.pps_pipe = INVALID_PIPE;
 346	}
 347}
 348
 349struct pps_registers {
 350	i915_reg_t pp_ctrl;
 351	i915_reg_t pp_stat;
 352	i915_reg_t pp_on;
 353	i915_reg_t pp_off;
 354	i915_reg_t pp_div;
 355};
 356
 357static void intel_pps_get_registers(struct intel_dp *intel_dp,
 358				    struct pps_registers *regs)
 359{
 360	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 361	int pps_idx = 0;
 362
 363	memset(regs, 0, sizeof(*regs));
 364
 365	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 366		pps_idx = bxt_power_sequencer_idx(intel_dp);
 367	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 368		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 369
 370	regs->pp_ctrl = PP_CONTROL(pps_idx);
 371	regs->pp_stat = PP_STATUS(pps_idx);
 372	regs->pp_on = PP_ON_DELAYS(pps_idx);
 373	regs->pp_off = PP_OFF_DELAYS(pps_idx);
 374
 375	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
 376	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
 377	    INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
 378		regs->pp_div = INVALID_MMIO_REG;
 379	else
 380		regs->pp_div = PP_DIVISOR(pps_idx);
 381}
 382
 383static i915_reg_t
 384_pp_ctrl_reg(struct intel_dp *intel_dp)
 385{
 386	struct pps_registers regs;
 387
 388	intel_pps_get_registers(intel_dp, &regs);
 389
 390	return regs.pp_ctrl;
 391}
 392
 393static i915_reg_t
 394_pp_stat_reg(struct intel_dp *intel_dp)
 395{
 396	struct pps_registers regs;
 397
 398	intel_pps_get_registers(intel_dp, &regs);
 399
 400	return regs.pp_stat;
 401}
 402
 403static bool edp_have_panel_power(struct intel_dp *intel_dp)
 404{
 405	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 406
 407	lockdep_assert_held(&dev_priv->pps_mutex);
 408
 409	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 410	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 411		return false;
 412
 413	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
 414}
 415
 416static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 417{
 418	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 419
 420	lockdep_assert_held(&dev_priv->pps_mutex);
 421
 422	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 423	    intel_dp->pps.pps_pipe == INVALID_PIPE)
 424		return false;
 425
 426	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 427}
 428
 429void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
 430{
 431	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 432
 433	if (!intel_dp_is_edp(intel_dp))
 434		return;
 435
 436	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 437		drm_WARN(&dev_priv->drm, 1,
 438			 "eDP powered off while attempting aux channel communication.\n");
 439		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
 440			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
 441			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
 442	}
 443}
 444
 445#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 446#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 447
 448#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
 449#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
 450
 451#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 452#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 453
 454static void intel_pps_verify_state(struct intel_dp *intel_dp);
 455
 456static void wait_panel_status(struct intel_dp *intel_dp,
 457				       u32 mask,
 458				       u32 value)
 459{
 460	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 461	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 462
 463	lockdep_assert_held(&dev_priv->pps_mutex);
 464
 465	intel_pps_verify_state(intel_dp);
 466
 467	pp_stat_reg = _pp_stat_reg(intel_dp);
 468	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 469
 470	drm_dbg_kms(&dev_priv->drm,
 471		    "mask %08x value %08x status %08x control %08x\n",
 472		    mask, value,
 473		    intel_de_read(dev_priv, pp_stat_reg),
 474		    intel_de_read(dev_priv, pp_ctrl_reg));
 475
 476	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
 477				       mask, value, 5000))
 478		drm_err(&dev_priv->drm,
 479			"Panel status timeout: status %08x control %08x\n",
 480			intel_de_read(dev_priv, pp_stat_reg),
 481			intel_de_read(dev_priv, pp_ctrl_reg));
 482
 483	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
 484}
 485
 486static void wait_panel_on(struct intel_dp *intel_dp)
 487{
 488	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 489
 490	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
 491	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 492}
 493
 494static void wait_panel_off(struct intel_dp *intel_dp)
 495{
 496	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 497
 498	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
 499	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 500}
 501
 502static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 503{
 504	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 505	ktime_t panel_power_on_time;
 506	s64 panel_power_off_duration;
 507
 508	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
 509
 510	/* take the difference of currrent time and panel power off time
 511	 * and then make panel wait for t11_t12 if needed. */
 512	panel_power_on_time = ktime_get_boottime();
 513	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
 514
 515	/* When we disable the VDD override bit last we have to do the manual
 516	 * wait. */
 517	if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
 518		wait_remaining_ms_from_jiffies(jiffies,
 519				       intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
 520
 521	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 522}
 523
 524void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
 525{
 526	intel_wakeref_t wakeref;
 527
 528	if (!intel_dp_is_edp(intel_dp))
 529		return;
 530
 531	with_intel_pps_lock(intel_dp, wakeref)
 532		wait_panel_power_cycle(intel_dp);
 533}
 534
 535static void wait_backlight_on(struct intel_dp *intel_dp)
 536{
 537	wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
 538				       intel_dp->pps.backlight_on_delay);
 539}
 540
 541static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 542{
 543	wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
 544				       intel_dp->pps.backlight_off_delay);
 545}
 546
 547/* Read the current pp_control value, unlocking the register if it
 548 * is locked
 549 */
 550
 551static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 552{
 553	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 554	u32 control;
 555
 556	lockdep_assert_held(&dev_priv->pps_mutex);
 557
 558	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
 559	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
 560			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 561		control &= ~PANEL_UNLOCK_MASK;
 562		control |= PANEL_UNLOCK_REGS;
 563	}
 564	return control;
 565}
 566
 567/*
 568 * Must be paired with intel_pps_vdd_off_unlocked().
 569 * Must hold pps_mutex around the whole on/off sequence.
 570 * Can be nested with intel_pps_vdd_{on,off}() calls.
 571 */
 572bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
 573{
 574	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 575	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 576	u32 pp;
 577	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 578	bool need_to_disable = !intel_dp->pps.want_panel_vdd;
 579
 580	lockdep_assert_held(&dev_priv->pps_mutex);
 581
 582	if (!intel_dp_is_edp(intel_dp))
 583		return false;
 584
 585	cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
 586	intel_dp->pps.want_panel_vdd = true;
 587
 588	if (edp_have_panel_vdd(intel_dp))
 589		return need_to_disable;
 590
 591	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
 592	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
 593							    intel_aux_power_domain(dig_port));
 594
 595	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
 596		    dig_port->base.base.base.id,
 597		    dig_port->base.base.name);
 598
 599	if (!edp_have_panel_power(intel_dp))
 600		wait_panel_power_cycle(intel_dp);
 601
 602	pp = ilk_get_pp_control(intel_dp);
 603	pp |= EDP_FORCE_VDD;
 604
 605	pp_stat_reg = _pp_stat_reg(intel_dp);
 606	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 607
 608	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 609	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 610	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 611		    intel_de_read(dev_priv, pp_stat_reg),
 612		    intel_de_read(dev_priv, pp_ctrl_reg));
 613	/*
 614	 * If the panel wasn't on, delay before accessing aux channel
 615	 */
 616	if (!edp_have_panel_power(intel_dp)) {
 617		drm_dbg_kms(&dev_priv->drm,
 618			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
 619			    dig_port->base.base.base.id,
 620			    dig_port->base.base.name);
 621		msleep(intel_dp->pps.panel_power_up_delay);
 622	}
 623
 624	return need_to_disable;
 625}
 626
 627/*
 628 * Must be paired with intel_pps_off().
 629 * Nested calls to these functions are not allowed since
 630 * we drop the lock. Caller must use some higher level
 631 * locking to prevent nested calls from other threads.
 632 */
 633void intel_pps_vdd_on(struct intel_dp *intel_dp)
 634{
 635	intel_wakeref_t wakeref;
 636	bool vdd;
 637
 638	if (!intel_dp_is_edp(intel_dp))
 639		return;
 640
 641	vdd = false;
 642	with_intel_pps_lock(intel_dp, wakeref)
 643		vdd = intel_pps_vdd_on_unlocked(intel_dp);
 644	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
 645			dp_to_dig_port(intel_dp)->base.base.base.id,
 646			dp_to_dig_port(intel_dp)->base.base.name);
 647}
 648
 649static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
 650{
 651	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 652	struct intel_digital_port *dig_port =
 653		dp_to_dig_port(intel_dp);
 654	u32 pp;
 655	i915_reg_t pp_stat_reg, pp_ctrl_reg;
 656
 657	lockdep_assert_held(&dev_priv->pps_mutex);
 658
 659	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
 660
 661	if (!edp_have_panel_vdd(intel_dp))
 662		return;
 663
 664	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
 665		    dig_port->base.base.base.id,
 666		    dig_port->base.base.name);
 667
 668	pp = ilk_get_pp_control(intel_dp);
 669	pp &= ~EDP_FORCE_VDD;
 670
 671	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 672	pp_stat_reg = _pp_stat_reg(intel_dp);
 673
 674	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 675	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 676
 677	/* Make sure sequencer is idle before allowing subsequent activity */
 678	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 679		    intel_de_read(dev_priv, pp_stat_reg),
 680		    intel_de_read(dev_priv, pp_ctrl_reg));
 681
 682	if ((pp & PANEL_POWER_ON) == 0)
 683		intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 684
 685	intel_display_power_put(dev_priv,
 686				intel_aux_power_domain(dig_port),
 687				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 688}
 689
 690void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
 691{
 692	intel_wakeref_t wakeref;
 693
 694	if (!intel_dp_is_edp(intel_dp))
 695		return;
 696
 697	cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
 698	/*
 699	 * vdd might still be enabled due to the delayed vdd off.
 700	 * Make sure vdd is actually turned off here.
 701	 */
 702	with_intel_pps_lock(intel_dp, wakeref)
 703		intel_pps_vdd_off_sync_unlocked(intel_dp);
 704}
 705
 706static void edp_panel_vdd_work(struct work_struct *__work)
 707{
 708	struct intel_pps *pps = container_of(to_delayed_work(__work),
 709					     struct intel_pps, panel_vdd_work);
 710	struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
 711	intel_wakeref_t wakeref;
 712
 713	with_intel_pps_lock(intel_dp, wakeref) {
 714		if (!intel_dp->pps.want_panel_vdd)
 715			intel_pps_vdd_off_sync_unlocked(intel_dp);
 716	}
 717}
 718
 719static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
 720{
 721	unsigned long delay;
 722
 723	/*
 724	 * Queue the timer to fire a long time from now (relative to the power
 725	 * down delay) to keep the panel power up across a sequence of
 726	 * operations.
 727	 */
 728	delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
 729	schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
 730}
 731
 732/*
 733 * Must be paired with edp_panel_vdd_on().
 734 * Must hold pps_mutex around the whole on/off sequence.
 735 * Can be nested with intel_pps_vdd_{on,off}() calls.
 736 */
 737void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
 738{
 739	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 740
 741	lockdep_assert_held(&dev_priv->pps_mutex);
 742
 743	if (!intel_dp_is_edp(intel_dp))
 744		return;
 745
 746	I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
 747			dp_to_dig_port(intel_dp)->base.base.base.id,
 748			dp_to_dig_port(intel_dp)->base.base.name);
 749
 750	intel_dp->pps.want_panel_vdd = false;
 751
 752	if (sync)
 753		intel_pps_vdd_off_sync_unlocked(intel_dp);
 754	else
 755		edp_panel_vdd_schedule_off(intel_dp);
 756}
 757
 758void intel_pps_on_unlocked(struct intel_dp *intel_dp)
 759{
 760	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 761	u32 pp;
 762	i915_reg_t pp_ctrl_reg;
 763
 764	lockdep_assert_held(&dev_priv->pps_mutex);
 765
 766	if (!intel_dp_is_edp(intel_dp))
 767		return;
 768
 769	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
 770		    dp_to_dig_port(intel_dp)->base.base.base.id,
 771		    dp_to_dig_port(intel_dp)->base.base.name);
 772
 773	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
 774		     "[ENCODER:%d:%s] panel power already on\n",
 775		     dp_to_dig_port(intel_dp)->base.base.base.id,
 776		     dp_to_dig_port(intel_dp)->base.base.name))
 777		return;
 778
 779	wait_panel_power_cycle(intel_dp);
 780
 781	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 782	pp = ilk_get_pp_control(intel_dp);
 783	if (IS_IRONLAKE(dev_priv)) {
 784		/* ILK workaround: disable reset around power sequence */
 785		pp &= ~PANEL_POWER_RESET;
 786		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 787		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 788	}
 789
 790	pp |= PANEL_POWER_ON;
 791	if (!IS_IRONLAKE(dev_priv))
 792		pp |= PANEL_POWER_RESET;
 793
 794	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 795	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 796
 797	wait_panel_on(intel_dp);
 798	intel_dp->pps.last_power_on = jiffies;
 799
 800	if (IS_IRONLAKE(dev_priv)) {
 801		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 802		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 803		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 804	}
 805}
 806
 807void intel_pps_on(struct intel_dp *intel_dp)
 808{
 809	intel_wakeref_t wakeref;
 810
 811	if (!intel_dp_is_edp(intel_dp))
 812		return;
 813
 814	with_intel_pps_lock(intel_dp, wakeref)
 815		intel_pps_on_unlocked(intel_dp);
 816}
 817
 818void intel_pps_off_unlocked(struct intel_dp *intel_dp)
 819{
 820	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 821	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 822	u32 pp;
 823	i915_reg_t pp_ctrl_reg;
 824
 825	lockdep_assert_held(&dev_priv->pps_mutex);
 826
 827	if (!intel_dp_is_edp(intel_dp))
 828		return;
 829
 830	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
 831		    dig_port->base.base.base.id, dig_port->base.base.name);
 832
 833	drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
 834		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
 835		 dig_port->base.base.base.id, dig_port->base.base.name);
 836
 837	pp = ilk_get_pp_control(intel_dp);
 838	/* We need to switch off panel power _and_ force vdd, for otherwise some
 839	 * panels get very unhappy and cease to work. */
 840	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
 841		EDP_BLC_ENABLE);
 842
 843	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 844
 845	intel_dp->pps.want_panel_vdd = false;
 846
 847	intel_de_write(dev_priv, pp_ctrl_reg, pp);
 848	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 849
 850	wait_panel_off(intel_dp);
 851	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
 852
 853	/* We got a reference when we enabled the VDD. */
 854	intel_display_power_put(dev_priv,
 855				intel_aux_power_domain(dig_port),
 856				fetch_and_zero(&intel_dp->pps.vdd_wakeref));
 857}
 858
 859void intel_pps_off(struct intel_dp *intel_dp)
 860{
 861	intel_wakeref_t wakeref;
 862
 863	if (!intel_dp_is_edp(intel_dp))
 864		return;
 865
 866	with_intel_pps_lock(intel_dp, wakeref)
 867		intel_pps_off_unlocked(intel_dp);
 868}
 869
 870/* Enable backlight in the panel power control. */
 871void intel_pps_backlight_on(struct intel_dp *intel_dp)
 872{
 873	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 874	intel_wakeref_t wakeref;
 875
 876	/*
 877	 * If we enable the backlight right away following a panel power
 878	 * on, we may see slight flicker as the panel syncs with the eDP
 879	 * link.  So delay a bit to make sure the image is solid before
 880	 * allowing it to appear.
 881	 */
 882	wait_backlight_on(intel_dp);
 883
 884	with_intel_pps_lock(intel_dp, wakeref) {
 885		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 886		u32 pp;
 887
 888		pp = ilk_get_pp_control(intel_dp);
 889		pp |= EDP_BLC_ENABLE;
 890
 891		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 892		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 893	}
 894}
 895
 896/* Disable backlight in the panel power control. */
 897void intel_pps_backlight_off(struct intel_dp *intel_dp)
 898{
 899	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 900	intel_wakeref_t wakeref;
 901
 902	if (!intel_dp_is_edp(intel_dp))
 903		return;
 904
 905	with_intel_pps_lock(intel_dp, wakeref) {
 906		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 907		u32 pp;
 908
 909		pp = ilk_get_pp_control(intel_dp);
 910		pp &= ~EDP_BLC_ENABLE;
 911
 912		intel_de_write(dev_priv, pp_ctrl_reg, pp);
 913		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 914	}
 915
 916	intel_dp->pps.last_backlight_off = jiffies;
 917	edp_wait_backlight_off(intel_dp);
 918}
 919
 920/*
 921 * Hook for controlling the panel power control backlight through the bl_power
 922 * sysfs attribute. Take care to handle multiple calls.
 923 */
 924void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
 925{
 926	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 927	struct intel_dp *intel_dp = intel_attached_dp(connector);
 928	intel_wakeref_t wakeref;
 929	bool is_enabled;
 930
 931	is_enabled = false;
 932	with_intel_pps_lock(intel_dp, wakeref)
 933		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
 934	if (is_enabled == enable)
 935		return;
 936
 937	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
 938		    enable ? "enable" : "disable");
 939
 940	if (enable)
 941		intel_pps_backlight_on(intel_dp);
 942	else
 943		intel_pps_backlight_off(intel_dp);
 944}
 945
 946static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 947{
 948	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 949	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 950	enum pipe pipe = intel_dp->pps.pps_pipe;
 951	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 952
 953	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
 954
 955	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 956		return;
 957
 958	intel_pps_vdd_off_sync_unlocked(intel_dp);
 959
 960	/*
 961	 * VLV seems to get confused when multiple power sequencers
 962	 * have the same port selected (even if only one has power/vdd
 963	 * enabled). The failure manifests as vlv_wait_port_ready() failing
 964	 * CHV on the other hand doesn't seem to mind having the same port
 965	 * selected in multiple power sequencers, but let's clear the
 966	 * port select always when logically disconnecting a power sequencer
 967	 * from a port.
 968	 */
 969	drm_dbg_kms(&dev_priv->drm,
 970		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
 971		    pipe_name(pipe), dig_port->base.base.base.id,
 972		    dig_port->base.base.name);
 973	intel_de_write(dev_priv, pp_on_reg, 0);
 974	intel_de_posting_read(dev_priv, pp_on_reg);
 975
 976	intel_dp->pps.pps_pipe = INVALID_PIPE;
 977}
 978
 979static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 980				      enum pipe pipe)
 981{
 982	struct intel_encoder *encoder;
 983
 984	lockdep_assert_held(&dev_priv->pps_mutex);
 985
 986	for_each_intel_dp(&dev_priv->drm, encoder) {
 987		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 988
 989		drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
 990			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
 991			 pipe_name(pipe), encoder->base.base.id,
 992			 encoder->base.name);
 993
 994		if (intel_dp->pps.pps_pipe != pipe)
 995			continue;
 996
 997		drm_dbg_kms(&dev_priv->drm,
 998			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
 999			    pipe_name(pipe), encoder->base.base.id,
1000			    encoder->base.name);
1001
1002		/* make sure vdd is off before we steal it */
1003		vlv_detach_power_sequencer(intel_dp);
1004	}
1005}
1006
1007void vlv_pps_init(struct intel_encoder *encoder,
1008		  const struct intel_crtc_state *crtc_state)
1009{
1010	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1011	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1012	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1013
1014	lockdep_assert_held(&dev_priv->pps_mutex);
1015
1016	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1017
1018	if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1019	    intel_dp->pps.pps_pipe != crtc->pipe) {
1020		/*
1021		 * If another power sequencer was being used on this
1022		 * port previously make sure to turn off vdd there while
1023		 * we still have control of it.
1024		 */
1025		vlv_detach_power_sequencer(intel_dp);
1026	}
1027
1028	/*
1029	 * We may be stealing the power
1030	 * sequencer from another port.
1031	 */
1032	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1033
1034	intel_dp->pps.active_pipe = crtc->pipe;
1035
1036	if (!intel_dp_is_edp(intel_dp))
1037		return;
1038
1039	/* now it's all ours */
1040	intel_dp->pps.pps_pipe = crtc->pipe;
1041
1042	drm_dbg_kms(&dev_priv->drm,
1043		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1044		    pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1045		    encoder->base.name);
1046
1047	/* init power sequencer on this pipe and port */
1048	pps_init_delays(intel_dp);
1049	pps_init_registers(intel_dp, true);
1050}
1051
1052static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
1053{
1054	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1055	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1056
1057	lockdep_assert_held(&dev_priv->pps_mutex);
1058
1059	if (!edp_have_panel_vdd(intel_dp))
1060		return;
1061
1062	/*
1063	 * The VDD bit needs a power domain reference, so if the bit is
1064	 * already enabled when we boot or resume, grab this reference and
1065	 * schedule a vdd off, so we don't hold on to the reference
1066	 * indefinitely.
1067	 */
1068	drm_dbg_kms(&dev_priv->drm,
1069		    "VDD left on by BIOS, adjusting state tracking\n");
1070	drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1071	intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1072							    intel_aux_power_domain(dig_port));
1073
1074	edp_panel_vdd_schedule_off(intel_dp);
1075}
1076
1077bool intel_pps_have_power(struct intel_dp *intel_dp)
1078{
1079	intel_wakeref_t wakeref;
1080	bool have_power = false;
1081
1082	with_intel_pps_lock(intel_dp, wakeref) {
1083		have_power = edp_have_panel_power(intel_dp) &&
1084						  edp_have_panel_vdd(intel_dp);
1085	}
1086
1087	return have_power;
1088}
1089
1090static void pps_init_timestamps(struct intel_dp *intel_dp)
1091{
1092	intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1093	intel_dp->pps.last_power_on = jiffies;
1094	intel_dp->pps.last_backlight_off = jiffies;
1095}
1096
1097static void
1098intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1099{
1100	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101	u32 pp_on, pp_off, pp_ctl;
1102	struct pps_registers regs;
1103
1104	intel_pps_get_registers(intel_dp, &regs);
1105
1106	pp_ctl = ilk_get_pp_control(intel_dp);
1107
1108	/* Ensure PPS is unlocked */
1109	if (!HAS_DDI(dev_priv))
1110		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1111
1112	pp_on = intel_de_read(dev_priv, regs.pp_on);
1113	pp_off = intel_de_read(dev_priv, regs.pp_off);
1114
1115	/* Pull timing values out of registers */
1116	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1117	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1118	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1119	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1120
1121	if (i915_mmio_reg_valid(regs.pp_div)) {
1122		u32 pp_div;
1123
1124		pp_div = intel_de_read(dev_priv, regs.pp_div);
1125
1126		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1127	} else {
1128		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1129	}
1130}
1131
1132static void
1133intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
1134{
1135	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1136		      state_name,
1137		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1138}
1139
1140static void
1141intel_pps_verify_state(struct intel_dp *intel_dp)
1142{
1143	struct edp_power_seq hw;
1144	struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1145
1146	intel_pps_readout_hw_state(intel_dp, &hw);
1147
1148	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1149	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1150		DRM_ERROR("PPS state mismatch\n");
1151		intel_pps_dump_state("sw", sw);
1152		intel_pps_dump_state("hw", &hw);
1153	}
1154}
1155
1156static void pps_init_delays(struct intel_dp *intel_dp)
1157{
1158	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1159	struct edp_power_seq cur, vbt, spec,
1160		*final = &intel_dp->pps.pps_delays;
1161
1162	lockdep_assert_held(&dev_priv->pps_mutex);
1163
1164	/* already initialized? */
1165	if (final->t11_t12 != 0)
1166		return;
1167
1168	intel_pps_readout_hw_state(intel_dp, &cur);
1169
1170	intel_pps_dump_state("cur", &cur);
1171
1172	vbt = dev_priv->vbt.edp.pps;
1173	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1174	 * of 500ms appears to be too short. Ocassionally the panel
1175	 * just fails to power back on. Increasing the delay to 800ms
1176	 * seems sufficient to avoid this problem.
1177	 */
1178	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1179		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
1180		drm_dbg_kms(&dev_priv->drm,
1181			    "Increasing T12 panel delay as per the quirk to %d\n",
1182			    vbt.t11_t12);
1183	}
1184	/* T11_T12 delay is special and actually in units of 100ms, but zero
1185	 * based in the hw (so we need to add 100 ms). But the sw vbt
1186	 * table multiplies it with 1000 to make it in units of 100usec,
1187	 * too. */
1188	vbt.t11_t12 += 100 * 10;
1189
1190	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1191	 * our hw here, which are all in 100usec. */
1192	spec.t1_t3 = 210 * 10;
1193	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
1194	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1195	spec.t10 = 500 * 10;
1196	/* This one is special and actually in units of 100ms, but zero
1197	 * based in the hw (so we need to add 100 ms). But the sw vbt
1198	 * table multiplies it with 1000 to make it in units of 100usec,
1199	 * too. */
1200	spec.t11_t12 = (510 + 100) * 10;
1201
1202	intel_pps_dump_state("vbt", &vbt);
1203
1204	/* Use the max of the register settings and vbt. If both are
1205	 * unset, fall back to the spec limits. */
1206#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
1207				       spec.field : \
1208				       max(cur.field, vbt.field))
1209	assign_final(t1_t3);
1210	assign_final(t8);
1211	assign_final(t9);
1212	assign_final(t10);
1213	assign_final(t11_t12);
1214#undef assign_final
1215
1216#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
1217	intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1218	intel_dp->pps.backlight_on_delay = get_delay(t8);
1219	intel_dp->pps.backlight_off_delay = get_delay(t9);
1220	intel_dp->pps.panel_power_down_delay = get_delay(t10);
1221	intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1222#undef get_delay
1223
1224	drm_dbg_kms(&dev_priv->drm,
1225		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1226		    intel_dp->pps.panel_power_up_delay,
1227		    intel_dp->pps.panel_power_down_delay,
1228		    intel_dp->pps.panel_power_cycle_delay);
1229
1230	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1231		    intel_dp->pps.backlight_on_delay,
1232		    intel_dp->pps.backlight_off_delay);
1233
1234	/*
1235	 * We override the HW backlight delays to 1 because we do manual waits
1236	 * on them. For T8, even BSpec recommends doing it. For T9, if we
1237	 * don't do this, we'll end up waiting for the backlight off delay
1238	 * twice: once when we do the manual sleep, and once when we disable
1239	 * the panel and wait for the PP_STATUS bit to become zero.
1240	 */
1241	final->t8 = 1;
1242	final->t9 = 1;
1243
1244	/*
1245	 * HW has only a 100msec granularity for t11_t12 so round it up
1246	 * accordingly.
1247	 */
1248	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1249}
1250
1251static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1252{
1253	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1254	u32 pp_on, pp_off, port_sel = 0;
1255	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1256	struct pps_registers regs;
1257	enum port port = dp_to_dig_port(intel_dp)->base.port;
1258	const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1259
1260	lockdep_assert_held(&dev_priv->pps_mutex);
1261
1262	intel_pps_get_registers(intel_dp, &regs);
1263
1264	/*
1265	 * On some VLV machines the BIOS can leave the VDD
1266	 * enabled even on power sequencers which aren't
1267	 * hooked up to any port. This would mess up the
1268	 * power domain tracking the first time we pick
1269	 * one of these power sequencers for use since
1270	 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1271	 * already on and therefore wouldn't grab the power
1272	 * domain reference. Disable VDD first to avoid this.
1273	 * This also avoids spuriously turning the VDD on as
1274	 * soon as the new power sequencer gets initialized.
1275	 */
1276	if (force_disable_vdd) {
1277		u32 pp = ilk_get_pp_control(intel_dp);
1278
1279		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1280			 "Panel power already on\n");
1281
1282		if (pp & EDP_FORCE_VDD)
1283			drm_dbg_kms(&dev_priv->drm,
1284				    "VDD already on, disabling first\n");
1285
1286		pp &= ~EDP_FORCE_VDD;
1287
1288		intel_de_write(dev_priv, regs.pp_ctrl, pp);
1289	}
1290
1291	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1292		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1293	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1294		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1295
1296	/* Haswell doesn't have any port selection bits for the panel
1297	 * power sequencer any more. */
1298	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1299		port_sel = PANEL_PORT_SELECT_VLV(port);
1300	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1301		switch (port) {
1302		case PORT_A:
1303			port_sel = PANEL_PORT_SELECT_DPA;
1304			break;
1305		case PORT_C:
1306			port_sel = PANEL_PORT_SELECT_DPC;
1307			break;
1308		case PORT_D:
1309			port_sel = PANEL_PORT_SELECT_DPD;
1310			break;
1311		default:
1312			MISSING_CASE(port);
1313			break;
1314		}
1315	}
1316
1317	pp_on |= port_sel;
1318
1319	intel_de_write(dev_priv, regs.pp_on, pp_on);
1320	intel_de_write(dev_priv, regs.pp_off, pp_off);
1321
1322	/*
1323	 * Compute the divisor for the pp clock, simply match the Bspec formula.
1324	 */
1325	if (i915_mmio_reg_valid(regs.pp_div)) {
1326		intel_de_write(dev_priv, regs.pp_div,
1327			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1328	} else {
1329		u32 pp_ctl;
1330
1331		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1332		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1333		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1334		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1335	}
1336
1337	drm_dbg_kms(&dev_priv->drm,
1338		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1339		    intel_de_read(dev_priv, regs.pp_on),
1340		    intel_de_read(dev_priv, regs.pp_off),
1341		    i915_mmio_reg_valid(regs.pp_div) ?
1342		    intel_de_read(dev_priv, regs.pp_div) :
1343		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1344}
1345
1346void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1347{
1348	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1349	intel_wakeref_t wakeref;
1350
1351	if (!intel_dp_is_edp(intel_dp))
1352		return;
1353
1354	with_intel_pps_lock(intel_dp, wakeref) {
1355		/*
1356		 * Reinit the power sequencer also on the resume path, in case
1357		 * BIOS did something nasty with it.
1358		 */
1359		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1360			vlv_initial_power_sequencer_setup(intel_dp);
1361
1362		pps_init_delays(intel_dp);
1363		pps_init_registers(intel_dp, false);
1364
1365		intel_pps_vdd_sanitize(intel_dp);
1366	}
1367}
1368
1369void intel_pps_init(struct intel_dp *intel_dp)
1370{
1371	INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1372
1373	pps_init_timestamps(intel_dp);
1374
1375	intel_pps_encoder_reset(intel_dp);
1376}
1377
1378void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1379{
1380	int pps_num;
1381	int pps_idx;
1382
1383	if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1384		return;
1385	/*
1386	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1387	 * everywhere where registers can be write protected.
1388	 */
1389	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1390		pps_num = 2;
1391	else
1392		pps_num = 1;
1393
1394	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1395		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1396
1397		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1398		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1399	}
1400}
1401
1402void intel_pps_setup(struct drm_i915_private *i915)
1403{
1404	if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1405		i915->pps_mmio_base = PCH_PPS_BASE;
1406	else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1407		i915->pps_mmio_base = VLV_PPS_BASE;
1408	else
1409		i915->pps_mmio_base = PPS_BASE;
1410}