Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.14.15
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <drm/drm_atomic_helper.h>
  25
  26#include "display/intel_dp.h"
 
 
  27
  28#include "i915_drv.h"
 
 
  29#include "intel_atomic.h"
 
 
 
  30#include "intel_de.h"
 
  31#include "intel_display_types.h"
 
  32#include "intel_dp_aux.h"
 
  33#include "intel_hdmi.h"
  34#include "intel_psr.h"
  35#include "intel_sprite.h"
 
  36#include "skl_universal_plane.h"
  37
  38/**
  39 * DOC: Panel Self Refresh (PSR/SRD)
  40 *
  41 * Since Haswell Display controller supports Panel Self-Refresh on display
  42 * panels witch have a remote frame buffer (RFB) implemented according to PSR
  43 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  44 * when system is idle but display is on as it eliminates display refresh
  45 * request to DDR memory completely as long as the frame buffer for that
  46 * display is unchanged.
  47 *
  48 * Panel Self Refresh must be supported by both Hardware (source) and
  49 * Panel (sink).
  50 *
  51 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  52 * to power down the link and memory controller. For DSI panels the same idea
  53 * is called "manual mode".
  54 *
  55 * The implementation uses the hardware-based PSR support which automatically
  56 * enters/exits self-refresh mode. The hardware takes care of sending the
  57 * required DP aux message and could even retrain the link (that part isn't
  58 * enabled yet though). The hardware also keeps track of any frontbuffer
  59 * changes to know when to exit self-refresh mode again. Unfortunately that
  60 * part doesn't work too well, hence why the i915 PSR support uses the
  61 * software frontbuffer tracking to make sure it doesn't miss a screen
  62 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  63 * get called by the frontbuffer tracking code. Note that because of locking
  64 * issues the self-refresh re-enable code is done from a work queue, which
  65 * must be correctly synchronized/cancelled when shutting down the pipe."
  66 *
  67 * DC3CO (DC3 clock off)
  68 *
  69 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
  70 * clock off automatically during PSR2 idle state.
  71 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
  72 * entry/exit allows the HW to enter a low-power state even when page flipping
  73 * periodically (for instance a 30fps video playback scenario).
  74 *
  75 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
  76 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
  77 * frames, if no other flip occurs and the function above is executed, DC3CO is
  78 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
  79 * of another flip.
  80 * Front buffer modifications do not trigger DC3CO activation on purpose as it
  81 * would bring a lot of complexity and most of the moderns systems will only
  82 * use page flips.
  83 */
  84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  85static bool psr_global_enabled(struct intel_dp *intel_dp)
  86{
  87	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
  88
  89	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
  90	case I915_PSR_DEBUG_DEFAULT:
  91		return i915->params.enable_psr;
 
 
 
 
  92	case I915_PSR_DEBUG_DISABLE:
  93		return false;
  94	default:
  95		return true;
  96	}
  97}
  98
  99static bool psr2_global_enabled(struct intel_dp *intel_dp)
 100{
 
 
 101	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 102	case I915_PSR_DEBUG_DISABLE:
 103	case I915_PSR_DEBUG_FORCE_PSR1:
 104		return false;
 105	default:
 
 
 106		return true;
 107	}
 108}
 109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110static void psr_irq_control(struct intel_dp *intel_dp)
 111{
 112	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 113	enum transcoder trans_shift;
 114	i915_reg_t imr_reg;
 115	u32 mask, val;
 116
 117	/*
 118	 * gen12+ has registers relative to transcoder and one per transcoder
 119	 * using the same bit definition: handle it as TRANSCODER_EDP to force
 120	 * 0 shift in bit definition
 121	 */
 122	if (DISPLAY_VER(dev_priv) >= 12) {
 123		trans_shift = 0;
 124		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
 125	} else {
 126		trans_shift = intel_dp->psr.transcoder;
 127		imr_reg = EDP_PSR_IMR;
 128	}
 129
 130	mask = EDP_PSR_ERROR(trans_shift);
 131	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
 132		mask |= EDP_PSR_POST_EXIT(trans_shift) |
 133			EDP_PSR_PRE_ENTRY(trans_shift);
 134
 135	/* Warning: it is masking/setting reserved bits too */
 136	val = intel_de_read(dev_priv, imr_reg);
 137	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
 138	val |= ~mask;
 139	intel_de_write(dev_priv, imr_reg, val);
 140}
 141
 142static void psr_event_print(struct drm_i915_private *i915,
 143			    u32 val, bool psr2_enabled)
 144{
 145	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
 146	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
 147		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
 148	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
 149		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
 150	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
 151		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
 152	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
 153		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
 154	if (val & PSR_EVENT_GRAPHICS_RESET)
 155		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
 156	if (val & PSR_EVENT_PCH_INTERRUPT)
 157		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
 158	if (val & PSR_EVENT_MEMORY_UP)
 159		drm_dbg_kms(&i915->drm, "\tMemory up\n");
 160	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
 161		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
 162	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
 163		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
 164	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
 165		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
 166	if (val & PSR_EVENT_REGISTER_UPDATE)
 167		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
 168	if (val & PSR_EVENT_HDCP_ENABLE)
 169		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
 170	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
 171		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
 172	if (val & PSR_EVENT_VBI_ENABLE)
 173		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
 174	if (val & PSR_EVENT_LPSP_MODE_EXIT)
 175		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
 176	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
 177		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
 178}
 179
 180void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 181{
 
 
 182	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 183	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 184	ktime_t time_ns =  ktime_get();
 185	enum transcoder trans_shift;
 186	i915_reg_t imr_reg;
 187
 188	if (DISPLAY_VER(dev_priv) >= 12) {
 189		trans_shift = 0;
 190		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
 191	} else {
 192		trans_shift = intel_dp->psr.transcoder;
 193		imr_reg = EDP_PSR_IMR;
 194	}
 195
 196	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
 197		intel_dp->psr.last_entry_attempt = time_ns;
 198		drm_dbg_kms(&dev_priv->drm,
 199			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
 200			    transcoder_name(cpu_transcoder));
 201	}
 202
 203	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
 204		intel_dp->psr.last_exit = time_ns;
 205		drm_dbg_kms(&dev_priv->drm,
 206			    "[transcoder %s] PSR exit completed\n",
 207			    transcoder_name(cpu_transcoder));
 208
 209		if (DISPLAY_VER(dev_priv) >= 9) {
 210			u32 val = intel_de_read(dev_priv,
 211						PSR_EVENT(cpu_transcoder));
 212			bool psr2_enabled = intel_dp->psr.psr2_enabled;
 213
 214			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
 215				       val);
 216			psr_event_print(dev_priv, val, psr2_enabled);
 217		}
 218	}
 219
 220	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
 221		u32 val;
 222
 223		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
 224			 transcoder_name(cpu_transcoder));
 225
 226		intel_dp->psr.irq_aux_error = true;
 227
 228		/*
 229		 * If this interruption is not masked it will keep
 230		 * interrupting so fast that it prevents the scheduled
 231		 * work to run.
 232		 * Also after a PSR error, we don't want to arm PSR
 233		 * again so we don't care about unmask the interruption
 234		 * or unset irq_aux_error.
 235		 */
 236		val = intel_de_read(dev_priv, imr_reg);
 237		val |= EDP_PSR_ERROR(trans_shift);
 238		intel_de_write(dev_priv, imr_reg, val);
 239
 240		schedule_work(&intel_dp->psr.work);
 241	}
 242}
 243
 244static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 245{
 246	u8 alpm_caps = 0;
 247
 248	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
 249			      &alpm_caps) != 1)
 250		return false;
 251	return alpm_caps & DP_ALPM_CAP;
 252}
 253
 254static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
 255{
 256	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 257	u8 val = 8; /* assume the worst if we can't read the value */
 258
 259	if (drm_dp_dpcd_readb(&intel_dp->aux,
 260			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
 261		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
 262	else
 263		drm_dbg_kms(&i915->drm,
 264			    "Unable to get sink synchronization latency, assuming 8 frames\n");
 265	return val;
 266}
 267
 268static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 269{
 270	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 271	u16 val;
 272	ssize_t r;
 
 
 273
 274	/*
 275	 * Returning the default X granularity if granularity not required or
 276	 * if DPCD read fails
 277	 */
 278	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
 279		return 4;
 280
 281	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
 282	if (r != 2)
 283		drm_dbg_kms(&i915->drm,
 284			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
 
 
 
 
 
 
 
 285
 
 
 
 
 
 
 286	/*
 287	 * Spec says that if the value read is 0 the default granularity should
 288	 * be used instead.
 289	 */
 290	if (r != 2 || val == 0)
 291		val = 4;
 292
 293	return val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 294}
 295
 296void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 297{
 298	struct drm_i915_private *dev_priv =
 299		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 300
 301	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
 302			 sizeof(intel_dp->psr_dpcd));
 
 
 
 
 303
 304	if (!intel_dp->psr_dpcd[0])
 305		return;
 306	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307		    intel_dp->psr_dpcd[0]);
 308
 309	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
 310		drm_dbg_kms(&dev_priv->drm,
 311			    "PSR support not currently available for this panel\n");
 312		return;
 313	}
 314
 315	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
 316		drm_dbg_kms(&dev_priv->drm,
 317			    "Panel lacks power state control, PSR cannot be enabled\n");
 318		return;
 319	}
 320
 321	intel_dp->psr.sink_support = true;
 322	intel_dp->psr.sink_sync_latency =
 323		intel_dp_get_sink_sync_latency(intel_dp);
 324
 325	if (DISPLAY_VER(dev_priv) >= 9 &&
 326	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
 327		bool y_req = intel_dp->psr_dpcd[1] &
 328			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
 329		bool alpm = intel_dp_get_alpm_status(intel_dp);
 330
 331		/*
 332		 * All panels that supports PSR version 03h (PSR2 +
 333		 * Y-coordinate) can handle Y-coordinates in VSC but we are
 334		 * only sure that it is going to be used when required by the
 335		 * panel. This way panel is capable to do selective update
 336		 * without a aux frame sync.
 337		 *
 338		 * To support PSR version 02h and PSR version 03h without
 339		 * Y-coordinate requirement panels we would need to enable
 340		 * GTC first.
 341		 */
 342		intel_dp->psr.sink_psr2_support = y_req && alpm;
 343		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
 
 344			    intel_dp->psr.sink_psr2_support ? "" : "not ");
 345
 346		if (intel_dp->psr.sink_psr2_support) {
 347			intel_dp->psr.colorimetry_support =
 348				intel_dp_get_colorimetry_status(intel_dp);
 349			intel_dp->psr.su_x_granularity =
 350				intel_dp_get_su_x_granulartiy(intel_dp);
 351		}
 352	}
 353}
 354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 356{
 357	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 358	u32 aux_clock_divider, aux_ctl;
 359	int i;
 360	static const u8 aux_msg[] = {
 361		[0] = DP_AUX_NATIVE_WRITE << 4,
 362		[1] = DP_SET_POWER >> 8,
 363		[2] = DP_SET_POWER & 0xff,
 364		[3] = 1 - 1,
 365		[4] = DP_SET_POWER_D0,
 366	};
 367	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
 368			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
 369			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
 370			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
 371
 372	BUILD_BUG_ON(sizeof(aux_msg) > 20);
 373	for (i = 0; i < sizeof(aux_msg); i += 4)
 374		intel_de_write(dev_priv,
 375			       EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
 376			       intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 377
 378	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 379
 380	/* Start with bits set for DDI_AUX_CTL register */
 381	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
 382					     aux_clock_divider);
 383
 384	/* Select only valid bits for SRD_AUX_CTL */
 385	aux_ctl &= psr_aux_mask;
 386	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
 
 
 
 
 387		       aux_ctl);
 388}
 389
 390static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 391{
 392	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 393	u8 dpcd_val = DP_PSR_ENABLE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 394
 395	/* Enable ALPM at sink for psr2 */
 396	if (intel_dp->psr.psr2_enabled) {
 397		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
 398				   DP_ALPM_ENABLE |
 399				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
 
 
 
 
 400
 401		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
 
 402	} else {
 403		if (intel_dp->psr.link_standby)
 404			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
 405
 406		if (DISPLAY_VER(dev_priv) >= 8)
 407			dpcd_val |= DP_PSR_CRC_VERIFICATION;
 408	}
 409
 410	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
 
 411
 412	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 413}
 414
 415static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
 416{
 417	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 418	u32 val = 0;
 419
 420	if (DISPLAY_VER(dev_priv) >= 11)
 421		val |= EDP_PSR_TP4_TIME_0US;
 422
 423	if (dev_priv->params.psr_safest_params) {
 424		val |= EDP_PSR_TP1_TIME_2500us;
 425		val |= EDP_PSR_TP2_TP3_TIME_2500us;
 426		goto check_tp3_sel;
 427	}
 428
 429	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
 430		val |= EDP_PSR_TP1_TIME_0us;
 431	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
 432		val |= EDP_PSR_TP1_TIME_100us;
 433	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
 434		val |= EDP_PSR_TP1_TIME_500us;
 435	else
 436		val |= EDP_PSR_TP1_TIME_2500us;
 437
 438	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
 439		val |= EDP_PSR_TP2_TP3_TIME_0us;
 440	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
 441		val |= EDP_PSR_TP2_TP3_TIME_100us;
 442	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
 443		val |= EDP_PSR_TP2_TP3_TIME_500us;
 444	else
 445		val |= EDP_PSR_TP2_TP3_TIME_2500us;
 446
 
 
 
 
 
 
 
 
 
 447check_tp3_sel:
 448	if (intel_dp_source_supports_hbr2(intel_dp) &&
 449	    drm_dp_tps3_supported(intel_dp->dpcd))
 450		val |= EDP_PSR_TP1_TP3_SEL;
 451	else
 452		val |= EDP_PSR_TP1_TP2_SEL;
 453
 454	return val;
 455}
 456
 457static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
 458{
 459	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 460	int idle_frames;
 461
 462	/* Let's use 6 as the minimum to cover all known cases including the
 463	 * off-by-one issue that HW has in some cases.
 464	 */
 465	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 466	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
 467
 468	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
 469		idle_frames = 0xf;
 470
 471	return idle_frames;
 472}
 473
 474static void hsw_activate_psr1(struct intel_dp *intel_dp)
 475{
 476	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 477	u32 max_sleep_time = 0x1f;
 478	u32 val = EDP_PSR_ENABLE;
 479
 480	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
 
 
 
 481
 482	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
 483	if (IS_HASWELL(dev_priv))
 484		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 485
 486	if (intel_dp->psr.link_standby)
 487		val |= EDP_PSR_LINK_STANDBY;
 488
 489	val |= intel_psr1_get_tp_time(intel_dp);
 490
 491	if (DISPLAY_VER(dev_priv) >= 8)
 492		val |= EDP_PSR_CRC_ENABLE;
 493
 494	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
 495		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
 496	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
 
 
 497}
 498
 499static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
 500{
 501	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 502	u32 val = 0;
 503
 504	if (dev_priv->params.psr_safest_params)
 505		return EDP_PSR2_TP2_TIME_2500us;
 506
 507	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
 508	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
 509		val |= EDP_PSR2_TP2_TIME_50us;
 510	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
 511		val |= EDP_PSR2_TP2_TIME_100us;
 512	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
 513		val |= EDP_PSR2_TP2_TIME_500us;
 514	else
 515		val |= EDP_PSR2_TP2_TIME_2500us;
 516
 517	return val;
 518}
 519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520static void hsw_activate_psr2(struct intel_dp *intel_dp)
 521{
 522	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 523	u32 val;
 
 
 
 
 
 524
 525	val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
 
 526
 527	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
 528	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
 529		val |= EDP_Y_COORDINATE_ENABLE;
 530
 531	val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
 
 532	val |= intel_psr2_get_tp_time(intel_dp);
 533
 534	if (DISPLAY_VER(dev_priv) >= 12) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 535		/*
 536		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
 537		 * values from BSpec. In order to setting an optimal power
 538		 * consumption, lower than 4k resoluition mode needs to decrese
 539		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
 540		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
 541		 */
 542		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
 543		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
 544		val |= TGL_EDP_PSR2_FAST_WAKE(7);
 545	} else if (DISPLAY_VER(dev_priv) >= 9) {
 546		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
 547		val |= EDP_PSR2_FAST_WAKE(7);
 
 
 
 
 
 
 
 
 
 
 548	}
 549
 
 
 
 
 
 
 550	if (intel_dp->psr.psr2_sel_fetch_enabled) {
 551		/* WA 1408330847 */
 552		if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
 553		    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
 554			intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
 555				     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
 556				     DIS_RAM_BYPASS_PSR2_MAN_TRACK);
 557
 558		intel_de_write(dev_priv,
 559			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
 560			       PSR2_MAN_TRK_CTL_ENABLE);
 561	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
 562		intel_de_write(dev_priv,
 563			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
 564	}
 565
 
 
 
 566	/*
 567	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
 568	 * recommending keep this bit unset while PSR2 is enabled.
 569	 */
 570	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
 571
 572	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 573}
 574
 575static bool
 576transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
 577{
 578	if (DISPLAY_VER(dev_priv) < 9)
 579		return false;
 580	else if (DISPLAY_VER(dev_priv) >= 12)
 581		return trans == TRANSCODER_A;
 
 
 
 
 582	else
 583		return trans == TRANSCODER_EDP;
 584}
 585
 586static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
 587{
 588	if (!cstate || !cstate->hw.active)
 589		return 0;
 590
 591	return DIV_ROUND_UP(1000 * 1000,
 592			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
 593}
 594
 595static void psr2_program_idle_frames(struct intel_dp *intel_dp,
 596				     u32 idle_frames)
 597{
 598	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 599	u32 val;
 600
 601	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
 602	val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
 603	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
 604	val |= idle_frames;
 605	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 606}
 607
 608static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
 609{
 610	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 611
 612	psr2_program_idle_frames(intel_dp, 0);
 613	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 614}
 615
 616static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
 617{
 618	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 619
 620	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 621	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
 622}
 623
 624static void tgl_dc3co_disable_work(struct work_struct *work)
 625{
 626	struct intel_dp *intel_dp =
 627		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
 628
 629	mutex_lock(&intel_dp->psr.lock);
 630	/* If delayed work is pending, it is not idle */
 631	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
 632		goto unlock;
 633
 634	tgl_psr2_disable_dc3co(intel_dp);
 635unlock:
 636	mutex_unlock(&intel_dp->psr.lock);
 637}
 638
 639static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
 640{
 641	if (!intel_dp->psr.dc3co_exitline)
 642		return;
 643
 644	cancel_delayed_work(&intel_dp->psr.dc3co_work);
 645	/* Before PSR2 exit disallow dc3co*/
 646	tgl_psr2_disable_dc3co(intel_dp);
 647}
 648
 649static bool
 650dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
 651			      struct intel_crtc_state *crtc_state)
 652{
 
 653	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 654	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 655	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 656	enum port port = dig_port->base.port;
 657
 658	if (IS_ALDERLAKE_P(dev_priv))
 659		return pipe <= PIPE_B && port <= PORT_B;
 660	else
 661		return pipe == PIPE_A && port == PORT_A;
 662}
 663
 664static void
 665tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
 666				  struct intel_crtc_state *crtc_state)
 667{
 
 
 668	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
 669	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 670	u32 exit_scanlines;
 671
 672	/*
 673	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
 674	 * disable DC3CO until the changed dc3co activating/deactivating sequence
 675	 * is applied. B.Specs:49196
 676	 */
 677	return;
 678
 679	/*
 680	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
 681	 * TODO: when the issue is addressed, this restriction should be removed.
 682	 */
 683	if (crtc_state->enable_psr2_sel_fetch)
 684		return;
 685
 686	if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
 687		return;
 688
 689	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
 690		return;
 691
 
 
 
 
 692	/*
 693	 * DC3CO Exit time 200us B.Spec 49196
 694	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
 695	 */
 696	exit_scanlines =
 697		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
 698
 699	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
 700		return;
 701
 702	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
 703}
 704
 705static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
 706					      struct intel_crtc_state *crtc_state)
 707{
 708	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
 709	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 710	struct intel_plane_state *plane_state;
 711	struct intel_plane *plane;
 712	int i;
 713
 714	if (!dev_priv->params.enable_psr2_sel_fetch &&
 715	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
 716		drm_dbg_kms(&dev_priv->drm,
 717			    "PSR2 sel fetch not enabled, disabled by parameter\n");
 718		return false;
 719	}
 720
 721	if (crtc_state->uapi.async_flip) {
 722		drm_dbg_kms(&dev_priv->drm,
 723			    "PSR2 sel fetch not enabled, async flip enabled\n");
 724		return false;
 725	}
 726
 727	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
 728		if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
 729			drm_dbg_kms(&dev_priv->drm,
 730				    "PSR2 sel fetch not enabled, plane rotated\n");
 731			return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732		}
 733	}
 734
 735	/* Wa_14010254185 Wa_14010103792 */
 736	if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
 737		drm_dbg_kms(&dev_priv->drm,
 738			    "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739		return false;
 740	}
 741
 742	return crtc_state->enable_psr2_sel_fetch = true;
 743}
 744
 745static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 746				    struct intel_crtc_state *crtc_state)
 747{
 748	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 749	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
 750	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
 751	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
 752
 753	if (!intel_dp->psr.sink_psr2_support)
 754		return false;
 755
 756	/* JSL and EHL only supports eDP 1.3 */
 757	if (IS_JSL_EHL(dev_priv)) {
 758		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
 759		return false;
 760	}
 761
 762	/* Wa_16011181250 */
 763	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv)) {
 764		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
 
 
 765		return false;
 766	}
 767
 768	/*
 769	 * We are missing the implementation of some workarounds to enabled PSR2
 770	 * in Alderlake_P, until ready PSR2 should be kept disabled.
 771	 */
 772	if (IS_ALDERLAKE_P(dev_priv)) {
 773		drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
 774		return false;
 775	}
 776
 777	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
 778		drm_dbg_kms(&dev_priv->drm,
 779			    "PSR2 not supported in transcoder %s\n",
 780			    transcoder_name(crtc_state->cpu_transcoder));
 781		return false;
 782	}
 783
 784	if (!psr2_global_enabled(intel_dp)) {
 785		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
 786		return false;
 787	}
 788
 789	/*
 790	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
 791	 * resolution requires DSC to be enabled, priority is given to DSC
 792	 * over PSR2.
 793	 */
 794	if (crtc_state->dsc.compression_enable) {
 795		drm_dbg_kms(&dev_priv->drm,
 
 796			    "PSR2 cannot be enabled since DSC is enabled\n");
 797		return false;
 798	}
 799
 800	if (crtc_state->crc_enabled) {
 801		drm_dbg_kms(&dev_priv->drm,
 802			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
 803		return false;
 804	}
 805
 806	if (DISPLAY_VER(dev_priv) >= 12) {
 807		psr_max_h = 5120;
 808		psr_max_v = 3200;
 809		max_bpp = 30;
 810	} else if (DISPLAY_VER(dev_priv) >= 10) {
 811		psr_max_h = 4096;
 812		psr_max_v = 2304;
 813		max_bpp = 24;
 814	} else if (DISPLAY_VER(dev_priv) == 9) {
 815		psr_max_h = 3640;
 816		psr_max_v = 2304;
 817		max_bpp = 24;
 818	}
 819
 820	if (crtc_state->pipe_bpp > max_bpp) {
 821		drm_dbg_kms(&dev_priv->drm,
 822			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
 823			    crtc_state->pipe_bpp, max_bpp);
 824		return false;
 825	}
 826
 827	/*
 828	 * HW sends SU blocks of size four scan lines, which means the starting
 829	 * X coordinate and Y granularity requirements will always be met. We
 830	 * only need to validate the SU block width is a multiple of
 831	 * x granularity.
 832	 */
 833	if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
 834		drm_dbg_kms(&dev_priv->drm,
 835			    "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
 836			    crtc_hdisplay, intel_dp->psr.su_x_granularity);
 837		return false;
 838	}
 839
 840	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
 841		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
 842		    !HAS_PSR_HW_TRACKING(dev_priv)) {
 843			drm_dbg_kms(&dev_priv->drm,
 844				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
 845			return false;
 846		}
 847	}
 848
 849	/* Wa_2209313811 */
 850	if (!crtc_state->enable_psr2_sel_fetch &&
 851	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
 852		drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
 853		return false;
 854	}
 855
 856	if (!crtc_state->enable_psr2_sel_fetch &&
 857	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
 858		drm_dbg_kms(&dev_priv->drm,
 859			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
 860			    crtc_hdisplay, crtc_vdisplay,
 861			    psr_max_h, psr_max_v);
 862		return false;
 863	}
 864
 865	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
 
 866	return true;
 867}
 868
 869void intel_psr_compute_config(struct intel_dp *intel_dp,
 870			      struct intel_crtc_state *crtc_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871{
 872	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 873	const struct drm_display_mode *adjusted_mode =
 874		&crtc_state->hw.adjusted_mode;
 875	int psr_setup_time;
 876
 877	/*
 878	 * Current PSR panels dont work reliably with VRR enabled
 879	 * So if VRR is enabled, do not enable PSR.
 880	 */
 881	if (crtc_state->vrr.enable)
 882		return;
 883
 884	if (!CAN_PSR(intel_dp))
 885		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886
 887	if (!psr_global_enabled(intel_dp)) {
 888		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
 889		return;
 890	}
 891
 892	if (intel_dp->psr.sink_not_reliable) {
 893		drm_dbg_kms(&dev_priv->drm,
 894			    "PSR sink implementation is not reliable\n");
 895		return;
 896	}
 897
 898	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 899		drm_dbg_kms(&dev_priv->drm,
 900			    "PSR condition failed: Interlaced mode enabled\n");
 901		return;
 902	}
 903
 904	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
 905	if (psr_setup_time < 0) {
 906		drm_dbg_kms(&dev_priv->drm,
 907			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
 908			    intel_dp->psr_dpcd[1]);
 
 
 
 909		return;
 910	}
 911
 912	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
 913	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
 914		drm_dbg_kms(&dev_priv->drm,
 915			    "PSR condition failed: PSR setup time (%d us) too long\n",
 916			    psr_setup_time);
 
 
 
 917		return;
 918	}
 919
 920	crtc_state->has_psr = true;
 921	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
 922	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
 923}
 924
 925void intel_psr_get_config(struct intel_encoder *encoder,
 926			  struct intel_crtc_state *pipe_config)
 927{
 928	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 929	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 
 930	struct intel_dp *intel_dp;
 931	u32 val;
 932
 933	if (!dig_port)
 934		return;
 935
 936	intel_dp = &dig_port->dp;
 937	if (!CAN_PSR(intel_dp))
 938		return;
 939
 940	mutex_lock(&intel_dp->psr.lock);
 941	if (!intel_dp->psr.enabled)
 942		goto unlock;
 943
 944	/*
 945	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
 946	 * enabled/disabled because of frontbuffer tracking and others.
 947	 */
 948	pipe_config->has_psr = true;
 949	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
 
 
 
 
 
 950	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
 951
 952	if (!intel_dp->psr.psr2_enabled)
 953		goto unlock;
 954
 955	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
 956		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
 
 957		if (val & PSR2_MAN_TRK_CTL_ENABLE)
 958			pipe_config->enable_psr2_sel_fetch = true;
 959	}
 960
 961	if (DISPLAY_VER(dev_priv) >= 12) {
 962		val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
 963		val &= EXITLINE_MASK;
 964		pipe_config->dc3co_exitline = val;
 
 
 965	}
 966unlock:
 967	mutex_unlock(&intel_dp->psr.lock);
 968}
 969
 970static void intel_psr_activate(struct intel_dp *intel_dp)
 971{
 972	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 973	enum transcoder transcoder = intel_dp->psr.transcoder;
 
 
 
 
 
 
 
 
 
 974
 975	if (transcoder_has_psr2(dev_priv, transcoder))
 976		drm_WARN_ON(&dev_priv->drm,
 977			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
 978
 979	drm_WARN_ON(&dev_priv->drm,
 980		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
 981	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
 982	lockdep_assert_held(&intel_dp->psr.lock);
 983
 984	/* psr1 and psr2 are mutually exclusive.*/
 985	if (intel_dp->psr.psr2_enabled)
 
 
 986		hsw_activate_psr2(intel_dp);
 987	else
 988		hsw_activate_psr1(intel_dp);
 989
 990	intel_dp->psr.active = true;
 991}
 992
 993static void intel_psr_enable_source(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 994{
 995	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 997	u32 mask;
 998
 999	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
1000	 * use hardcoded values PSR AUX transactions
 
1001	 */
1002	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1003		hsw_psr_setup_aux(intel_dp);
1004
1005	if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
1006		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
1007		u32 chicken = intel_de_read(dev_priv, reg);
1008
1009		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
1010			   PSR2_ADD_VERTICAL_LINE_COUNT;
1011		intel_de_write(dev_priv, reg, chicken);
1012	}
1013
1014	/*
1015	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1016	 * mask LPSP to avoid dependency on other drivers that might block
1017	 * runtime_pm besides preventing  other hw tracking issues now we
1018	 * can rely on frontbuffer tracking.
 
 
 
 
 
 
 
 
1019	 */
1020	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1021	       EDP_PSR_DEBUG_MASK_HPD |
1022	       EDP_PSR_DEBUG_MASK_LPSP |
1023	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1024
1025	if (DISPLAY_VER(dev_priv) < 11)
1026		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1027
1028	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1029		       mask);
1030
1031	psr_irq_control(intel_dp);
 
 
 
 
 
 
 
 
 
1032
1033	if (intel_dp->psr.dc3co_exitline) {
1034		u32 val;
1035
1036		/*
1037		 * TODO: if future platforms supports DC3CO in more than one
1038		 * transcoder, EXITLINE will need to be unset when disabling PSR
1039		 */
1040		val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
1041		val &= ~EXITLINE_MASK;
1042		val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
1043		val |= EXITLINE_ENABLE;
1044		intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
 
1045	}
1046
1047	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1048		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1049			     intel_dp->psr.psr2_sel_fetch_enabled ?
1050			     IGNORE_PSR2_HW_TRACKING : 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051}
1052
1053static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1054{
1055	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1056	u32 val;
1057
 
 
 
1058	/*
1059	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1060	 * will still keep the error set even after the reset done in the
1061	 * irq_preinstall and irq_uninstall hooks.
1062	 * And enabling in this situation cause the screen to freeze in the
1063	 * first time that PSR HW tries to activate so lets keep PSR disabled
1064	 * to avoid any rendering problems.
1065	 */
1066	if (DISPLAY_VER(dev_priv) >= 12) {
1067		val = intel_de_read(dev_priv,
1068				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
1069		val &= EDP_PSR_ERROR(0);
1070	} else {
1071		val = intel_de_read(dev_priv, EDP_PSR_IIR);
1072		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
1073	}
1074	if (val) {
1075		intel_dp->psr.sink_not_reliable = true;
1076		drm_dbg_kms(&dev_priv->drm,
1077			    "PSR interruption error set, not enabling PSR\n");
1078		return false;
1079	}
1080
 
1081	return true;
1082}
1083
1084static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1085				    const struct intel_crtc_state *crtc_state,
1086				    const struct drm_connector_state *conn_state)
1087{
 
1088	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1089	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1090	struct intel_encoder *encoder = &dig_port->base;
1091	u32 val;
1092
1093	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1094
1095	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
 
1096	intel_dp->psr.busy_frontbuffer_bits = 0;
1097	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1098	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1099	/* DC5/DC6 requires at least 6 idle frames */
1100	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1101	intel_dp->psr.dc3co_exit_delay = val;
1102	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1103	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
 
 
 
 
1104
1105	if (!psr_interrupt_error_check(intel_dp))
1106		return;
1107
1108	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1109		    intel_dp->psr.psr2_enabled ? "2" : "1");
1110	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1111				     &intel_dp->psr.vsc);
1112	intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1113	intel_psr_enable_sink(intel_dp);
1114	intel_psr_enable_source(intel_dp);
1115	intel_dp->psr.enabled = true;
1116	intel_dp->psr.paused = false;
1117
1118	intel_psr_activate(intel_dp);
1119}
1120
1121/**
1122 * intel_psr_enable - Enable PSR
1123 * @intel_dp: Intel DP
1124 * @crtc_state: new CRTC state
1125 * @conn_state: new CONNECTOR state
1126 *
1127 * This function can only be called after the pipe is fully trained and enabled.
1128 */
1129void intel_psr_enable(struct intel_dp *intel_dp,
1130		      const struct intel_crtc_state *crtc_state,
1131		      const struct drm_connector_state *conn_state)
1132{
1133	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1134
1135	if (!CAN_PSR(intel_dp))
1136		return;
1137
1138	if (!crtc_state->has_psr)
1139		return;
 
1140
1141	drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
 
 
 
 
 
 
 
1142
1143	mutex_lock(&intel_dp->psr.lock);
1144	intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1145	mutex_unlock(&intel_dp->psr.lock);
1146}
1147
1148static void intel_psr_exit(struct intel_dp *intel_dp)
1149{
1150	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1151	u32 val;
1152
1153	if (!intel_dp->psr.active) {
1154		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1155			val = intel_de_read(dev_priv,
1156					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1157			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1158		}
1159
1160		val = intel_de_read(dev_priv,
1161				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1162		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1163
1164		return;
1165	}
1166
1167	if (intel_dp->psr.psr2_enabled) {
 
 
 
1168		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1169		val = intel_de_read(dev_priv,
1170				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1171		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1172		val &= ~EDP_PSR2_ENABLE;
1173		intel_de_write(dev_priv,
1174			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1175	} else {
1176		val = intel_de_read(dev_priv,
1177				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1178		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1179		val &= ~EDP_PSR_ENABLE;
1180		intel_de_write(dev_priv,
1181			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1182	}
1183	intel_dp->psr.active = false;
1184}
1185
1186static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1187{
1188	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1189	i915_reg_t psr_status;
1190	u32 psr_status_mask;
1191
1192	if (intel_dp->psr.psr2_enabled) {
1193		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
 
1194		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1195	} else {
1196		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1197		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1198	}
1199
1200	/* Wait till PSR is idle */
1201	if (intel_de_wait_for_clear(dev_priv, psr_status,
1202				    psr_status_mask, 2000))
1203		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1204}
1205
1206static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1207{
1208	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
1209
1210	lockdep_assert_held(&intel_dp->psr.lock);
1211
1212	if (!intel_dp->psr.enabled)
1213		return;
1214
1215	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1216		    intel_dp->psr.psr2_enabled ? "2" : "1");
 
 
 
1217
1218	intel_psr_exit(intel_dp);
1219	intel_psr_wait_exit_locked(intel_dp);
1220
1221	/* WA 1408330847 */
1222	if (intel_dp->psr.psr2_sel_fetch_enabled &&
1223	    (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
1224	     IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
1225		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
1226			     DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1227
1228	/* Disable PSR on Sink */
1229	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
1230
1231	if (intel_dp->psr.psr2_enabled)
1232		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
 
 
1233
1234	intel_dp->psr.enabled = false;
 
 
 
 
 
1235}
1236
1237/**
1238 * intel_psr_disable - Disable PSR
1239 * @intel_dp: Intel DP
1240 * @old_crtc_state: old CRTC state
1241 *
1242 * This function needs to be called before disabling pipe.
1243 */
1244void intel_psr_disable(struct intel_dp *intel_dp,
1245		       const struct intel_crtc_state *old_crtc_state)
1246{
1247	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1248
1249	if (!old_crtc_state->has_psr)
1250		return;
1251
1252	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1253		return;
1254
1255	mutex_lock(&intel_dp->psr.lock);
1256
1257	intel_psr_disable_locked(intel_dp);
1258
 
 
1259	mutex_unlock(&intel_dp->psr.lock);
1260	cancel_work_sync(&intel_dp->psr.work);
1261	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1262}
1263
1264/**
1265 * intel_psr_pause - Pause PSR
1266 * @intel_dp: Intel DP
1267 *
1268 * This function need to be called after enabling psr.
1269 */
1270void intel_psr_pause(struct intel_dp *intel_dp)
1271{
 
1272	struct intel_psr *psr = &intel_dp->psr;
1273
1274	if (!CAN_PSR(intel_dp))
1275		return;
1276
1277	mutex_lock(&psr->lock);
1278
1279	if (!psr->enabled) {
1280		mutex_unlock(&psr->lock);
1281		return;
1282	}
1283
 
 
 
1284	intel_psr_exit(intel_dp);
1285	intel_psr_wait_exit_locked(intel_dp);
1286	psr->paused = true;
1287
1288	mutex_unlock(&psr->lock);
1289
1290	cancel_work_sync(&psr->work);
1291	cancel_delayed_work_sync(&psr->dc3co_work);
1292}
1293
1294/**
1295 * intel_psr_resume - Resume PSR
1296 * @intel_dp: Intel DP
1297 *
1298 * This function need to be called after pausing psr.
1299 */
1300void intel_psr_resume(struct intel_dp *intel_dp)
1301{
1302	struct intel_psr *psr = &intel_dp->psr;
1303
1304	if (!CAN_PSR(intel_dp))
1305		return;
1306
1307	mutex_lock(&psr->lock);
1308
1309	if (!psr->paused)
1310		goto unlock;
1311
1312	psr->paused = false;
1313	intel_psr_activate(intel_dp);
1314
1315unlock:
1316	mutex_unlock(&psr->lock);
1317}
1318
1319static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
 
 
1320{
1321	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1322
1323	if (DISPLAY_VER(dev_priv) >= 9)
1324		/*
1325		 * Display WA #0884: skl+
1326		 * This documented WA for bxt can be safely applied
1327		 * broadly so we can force HW tracking to exit PSR
1328		 * instead of disabling and re-enabling.
1329		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1330		 * but it makes more sense write to the current active
1331		 * pipe.
1332		 */
1333		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1334	else
1335		/*
1336		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
1337		 * on older gens so doing the manual exit instead.
1338		 */
1339		intel_psr_exit(intel_dp);
1340}
1341
1342void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1343					const struct intel_crtc_state *crtc_state,
1344					const struct intel_plane_state *plane_state,
1345					int color_plane)
1346{
1347	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1348	enum pipe pipe = plane->pipe;
1349	const struct drm_rect *clip;
1350	u32 val, offset;
1351	int ret, x, y;
1352
1353	if (!crtc_state->enable_psr2_sel_fetch)
1354		return;
 
1355
1356	val = plane_state ? plane_state->ctl : 0;
1357	val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
1358	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
1359	if (!val || plane->id == PLANE_CURSOR)
1360		return;
1361
1362	clip = &plane_state->psr2_sel_fetch_area;
1363
1364	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1365	val |= plane_state->uapi.dst.x1;
1366	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1367
1368	/* TODO: consider auxiliary surfaces */
1369	x = plane_state->uapi.src.x1 >> 16;
1370	y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
1371	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
1372	if (ret)
1373		drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
1374			      ret);
1375	val = y << 16 | x;
1376	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1377			  val);
1378
1379	/* Sizes are 0 based */
1380	val = (drm_rect_height(clip) - 1) << 16;
1381	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1382	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383}
1384
1385void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1386{
1387	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 
 
1388
1389	if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
1390	    !crtc_state->enable_psr2_sel_fetch)
1391		return;
1392
1393	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
 
 
 
 
 
 
 
 
 
 
1394		       crtc_state->psr2_man_track_ctl);
 
 
 
 
 
 
1395}
1396
1397static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1398				  struct drm_rect *clip, bool full_update)
1399{
1400	u32 val = PSR2_MAN_TRK_CTL_ENABLE;
 
 
 
 
 
 
1401
1402	if (full_update) {
1403		val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
 
1404		goto exit;
1405	}
1406
1407	if (clip->y1 == -1)
1408		goto exit;
1409
1410	drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1411
1412	val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1413	val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1414	val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
 
 
 
 
 
 
 
 
1415exit:
1416	crtc_state->psr2_man_track_ctl = val;
1417}
1418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1419static void clip_area_update(struct drm_rect *overlap_damage_area,
1420			     struct drm_rect *damage_area)
 
1421{
 
 
 
1422	if (overlap_damage_area->y1 == -1) {
1423		overlap_damage_area->y1 = damage_area->y1;
1424		overlap_damage_area->y2 = damage_area->y2;
1425		return;
1426	}
1427
1428	if (damage_area->y1 < overlap_damage_area->y1)
1429		overlap_damage_area->y1 = damage_area->y1;
1430
1431	if (damage_area->y2 > overlap_damage_area->y2)
1432		overlap_damage_area->y2 = damage_area->y2;
1433}
1434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1435int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1436				struct intel_crtc *crtc)
1437{
 
1438	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1439	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1440	struct intel_plane_state *new_plane_state, *old_plane_state;
1441	struct intel_plane *plane;
1442	bool full_update = false;
1443	int i, ret;
1444
1445	if (!crtc_state->enable_psr2_sel_fetch)
1446		return 0;
1447
1448	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1449	if (ret)
1450		return ret;
 
 
 
 
 
 
1451
1452	/*
1453	 * Calculate minimal selective fetch area of each plane and calculate
1454	 * the pipe damaged area.
1455	 * In the next loop the plane selective fetch area will actually be set
1456	 * using whole pipe damaged area.
1457	 */
1458	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1459					     new_plane_state, i) {
1460		struct drm_rect src, damaged_area = { .y1 = -1 };
1461		struct drm_mode_rect *damaged_clips;
1462		u32 num_clips, j;
1463
1464		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1465			continue;
1466
1467		if (!new_plane_state->uapi.visible &&
1468		    !old_plane_state->uapi.visible)
1469			continue;
1470
1471		/*
1472		 * TODO: Not clear how to handle planes with negative position,
1473		 * also planes are not updated if they have a negative X
1474		 * position so for now doing a full update in this cases
1475		 */
1476		if (new_plane_state->uapi.dst.y1 < 0 ||
1477		    new_plane_state->uapi.dst.x1 < 0) {
1478			full_update = true;
1479			break;
1480		}
1481
1482		num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1483
1484		/*
1485		 * If visibility or plane moved, mark the whole plane area as
1486		 * damaged as it needs to be complete redraw in the new and old
1487		 * position.
1488		 */
1489		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1490		    !drm_rect_equals(&new_plane_state->uapi.dst,
1491				     &old_plane_state->uapi.dst)) {
1492			if (old_plane_state->uapi.visible) {
1493				damaged_area.y1 = old_plane_state->uapi.dst.y1;
1494				damaged_area.y2 = old_plane_state->uapi.dst.y2;
1495				clip_area_update(&pipe_clip, &damaged_area);
 
1496			}
1497
1498			if (new_plane_state->uapi.visible) {
1499				damaged_area.y1 = new_plane_state->uapi.dst.y1;
1500				damaged_area.y2 = new_plane_state->uapi.dst.y2;
1501				clip_area_update(&pipe_clip, &damaged_area);
 
1502			}
1503			continue;
1504		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
1505			   (!num_clips &&
1506			    new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
1507			/*
1508			 * If the plane don't have damaged areas but the
1509			 * framebuffer changed or alpha changed, mark the whole
1510			 * plane area as damaged.
1511			 */
1512			damaged_area.y1 = new_plane_state->uapi.dst.y1;
1513			damaged_area.y2 = new_plane_state->uapi.dst.y2;
1514			clip_area_update(&pipe_clip, &damaged_area);
 
1515			continue;
1516		}
1517
1518		drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
1519		damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
1520
1521		for (j = 0; j < num_clips; j++) {
1522			struct drm_rect clip;
1523
1524			clip.x1 = damaged_clips[j].x1;
1525			clip.y1 = damaged_clips[j].y1;
1526			clip.x2 = damaged_clips[j].x2;
1527			clip.y2 = damaged_clips[j].y2;
1528			if (drm_rect_intersect(&clip, &src))
1529				clip_area_update(&damaged_area, &clip);
1530		}
1531
1532		if (damaged_area.y1 == -1)
 
1533			continue;
1534
1535		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1536		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1537		clip_area_update(&pipe_clip, &damaged_area);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538	}
1539
1540	if (full_update)
1541		goto skip_sel_fetch_set_loop;
1542
1543	/* It must be aligned to 4 lines */
1544	pipe_clip.y1 -= pipe_clip.y1 % 4;
1545	if (pipe_clip.y2 % 4)
1546		pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
 
 
 
 
 
 
 
 
 
 
 
1547
1548	/*
1549	 * Now that we have the pipe damaged area check if it intersect with
1550	 * every plane, if it does set the plane selective fetch area.
1551	 */
1552	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1553					     new_plane_state, i) {
1554		struct drm_rect *sel_fetch_area, inter;
 
1555
1556		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1557		    !new_plane_state->uapi.visible)
1558			continue;
1559
1560		inter = pipe_clip;
1561		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
 
 
 
 
 
 
 
 
 
 
1562			continue;
 
 
 
 
 
 
1563
1564		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1565		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1566		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1567	}
1568
1569skip_sel_fetch_set_loop:
1570	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
 
 
1571	return 0;
1572}
1573
1574/**
1575 * intel_psr_update - Update PSR state
1576 * @intel_dp: Intel DP
1577 * @crtc_state: new CRTC state
1578 * @conn_state: new CONNECTOR state
1579 *
1580 * This functions will update PSR states, disabling, enabling or switching PSR
1581 * version when executing fastsets. For full modeset, intel_psr_disable() and
1582 * intel_psr_enable() should be called instead.
1583 */
1584void intel_psr_update(struct intel_dp *intel_dp,
1585		      const struct intel_crtc_state *crtc_state,
1586		      const struct drm_connector_state *conn_state)
1587{
1588	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1589	struct intel_psr *psr = &intel_dp->psr;
1590	bool enable, psr2_enable;
 
 
 
 
1591
1592	if (!CAN_PSR(intel_dp))
1593		return;
1594
1595	mutex_lock(&intel_dp->psr.lock);
 
 
 
 
 
 
1596
1597	enable = crtc_state->has_psr;
1598	psr2_enable = crtc_state->has_psr2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599
1600	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
1601	    crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1602		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1603		if (crtc_state->crc_enabled && psr->enabled)
1604			psr_force_hw_tracking_exit(intel_dp);
1605		else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
1606			/*
1607			 * Activate PSR again after a force exit when enabling
1608			 * CRC in older gens
1609			 */
1610			if (!intel_dp->psr.active &&
1611			    !intel_dp->psr.busy_frontbuffer_bits)
1612				schedule_work(&intel_dp->psr.work);
1613		}
1614
1615		goto unlock;
1616	}
 
 
 
1617
1618	if (psr->enabled)
1619		intel_psr_disable_locked(intel_dp);
 
1620
1621	if (enable)
1622		intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
 
 
1623
1624unlock:
1625	mutex_unlock(&intel_dp->psr.lock);
 
 
 
 
 
 
1626}
1627
1628/**
1629 * psr_wait_for_idle - wait for PSR1 to idle
1630 * @intel_dp: Intel DP
1631 * @out_value: PSR status in case of failure
1632 *
1633 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1634 *
1635 */
1636static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1637{
1638	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1639
1640	/*
1641	 * From bspec: Panel Self Refresh (BDW+)
1642	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1643	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1644	 * defensive enough to cover everything.
1645	 */
1646	return __intel_wait_for_register(&dev_priv->uncore,
1647					 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1648					 EDP_PSR_STATUS_STATE_MASK,
1649					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1650					 out_value);
1651}
1652
1653/**
1654 * intel_psr_wait_for_idle - wait for PSR1 to idle
1655 * @new_crtc_state: new CRTC state
1656 *
1657 * This function is expected to be called from pipe_update_start() where it is
1658 * not expected to race with PSR enable or disable.
1659 */
1660void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
1661{
1662	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
1663	struct intel_encoder *encoder;
1664
1665	if (!new_crtc_state->has_psr)
1666		return;
1667
1668	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1669					     new_crtc_state->uapi.encoder_mask) {
1670		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1671		u32 psr_status;
1672
1673		mutex_lock(&intel_dp->psr.lock);
1674		if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
1675			mutex_unlock(&intel_dp->psr.lock);
1676			continue;
1677		}
1678
1679		/* when the PSR1 is enabled */
1680		if (psr_wait_for_idle(intel_dp, &psr_status))
1681			drm_err(&dev_priv->drm,
1682				"PSR idle timed out 0x%x, atomic update may fail\n",
1683				psr_status);
1684		mutex_unlock(&intel_dp->psr.lock);
 
 
1685	}
1686}
1687
1688static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
1689{
1690	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1691	i915_reg_t reg;
1692	u32 mask;
1693	int err;
1694
1695	if (!intel_dp->psr.enabled)
1696		return false;
1697
1698	if (intel_dp->psr.psr2_enabled) {
1699		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
 
1700		mask = EDP_PSR2_STATUS_STATE_MASK;
1701	} else {
1702		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1703		mask = EDP_PSR_STATUS_STATE_MASK;
1704	}
1705
1706	mutex_unlock(&intel_dp->psr.lock);
1707
1708	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1709	if (err)
1710		drm_err(&dev_priv->drm,
1711			"Timed out waiting for PSR Idle for re-enable\n");
1712
1713	/* After the unlocked wait, verify that PSR is still wanted! */
1714	mutex_lock(&intel_dp->psr.lock);
1715	return err == 0 && intel_dp->psr.enabled;
1716}
1717
1718static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1719{
1720	struct drm_connector_list_iter conn_iter;
1721	struct drm_device *dev = &dev_priv->drm;
1722	struct drm_modeset_acquire_ctx ctx;
1723	struct drm_atomic_state *state;
1724	struct drm_connector *conn;
1725	int err = 0;
1726
1727	state = drm_atomic_state_alloc(dev);
1728	if (!state)
1729		return -ENOMEM;
1730
1731	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
 
1732	state->acquire_ctx = &ctx;
 
1733
1734retry:
1735
1736	drm_connector_list_iter_begin(dev, &conn_iter);
1737	drm_for_each_connector_iter(conn, &conn_iter) {
1738		struct drm_connector_state *conn_state;
1739		struct drm_crtc_state *crtc_state;
1740
1741		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
1742			continue;
1743
1744		conn_state = drm_atomic_get_connector_state(state, conn);
1745		if (IS_ERR(conn_state)) {
1746			err = PTR_ERR(conn_state);
1747			break;
1748		}
1749
1750		if (!conn_state->crtc)
1751			continue;
1752
1753		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
1754		if (IS_ERR(crtc_state)) {
1755			err = PTR_ERR(crtc_state);
1756			break;
1757		}
1758
1759		/* Mark mode as changed to trigger a pipe->update() */
1760		crtc_state->mode_changed = true;
1761	}
1762	drm_connector_list_iter_end(&conn_iter);
1763
1764	if (err == 0)
1765		err = drm_atomic_commit(state);
1766
1767	if (err == -EDEADLK) {
1768		drm_atomic_state_clear(state);
1769		err = drm_modeset_backoff(&ctx);
1770		if (!err)
1771			goto retry;
1772	}
1773
1774	drm_modeset_drop_locks(&ctx);
1775	drm_modeset_acquire_fini(&ctx);
1776	drm_atomic_state_put(state);
1777
1778	return err;
1779}
1780
1781int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1782{
1783	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1784	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1785	u32 old_mode;
 
 
1786	int ret;
1787
1788	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
 
 
1789	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1790		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1791		return -EINVAL;
1792	}
1793
1794	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1795	if (ret)
1796		return ret;
1797
1798	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
 
 
 
 
1799	intel_dp->psr.debug = val;
1800
1801	/*
1802	 * Do it right away if it's already enabled, otherwise it will be done
1803	 * when enabling the source.
1804	 */
1805	if (intel_dp->psr.enabled)
1806		psr_irq_control(intel_dp);
1807
1808	mutex_unlock(&intel_dp->psr.lock);
1809
1810	if (old_mode != mode)
1811		ret = intel_psr_fastset_force(dev_priv);
1812
1813	return ret;
1814}
1815
1816static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1817{
1818	struct intel_psr *psr = &intel_dp->psr;
1819
1820	intel_psr_disable_locked(intel_dp);
1821	psr->sink_not_reliable = true;
1822	/* let's make sure that sink is awaken */
1823	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1824}
1825
1826static void intel_psr_work(struct work_struct *work)
1827{
1828	struct intel_dp *intel_dp =
1829		container_of(work, typeof(*intel_dp), psr.work);
1830
1831	mutex_lock(&intel_dp->psr.lock);
1832
1833	if (!intel_dp->psr.enabled)
1834		goto unlock;
1835
1836	if (READ_ONCE(intel_dp->psr.irq_aux_error))
1837		intel_psr_handle_irq(intel_dp);
1838
1839	/*
1840	 * We have to make sure PSR is ready for re-enable
1841	 * otherwise it keeps disabled until next full enable/disable cycle.
1842	 * PSR might take some time to get fully disabled
1843	 * and be ready for re-enable.
1844	 */
1845	if (!__psr_wait_for_idle_locked(intel_dp))
1846		goto unlock;
1847
1848	/*
1849	 * The delayed work can race with an invalidate hence we need to
1850	 * recheck. Since psr_flush first clears this and then reschedules we
1851	 * won't ever miss a flush when bailing out here.
1852	 */
1853	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
1854		goto unlock;
1855
1856	intel_psr_activate(intel_dp);
1857unlock:
1858	mutex_unlock(&intel_dp->psr.lock);
1859}
1860
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1861/**
1862 * intel_psr_invalidate - Invalidade PSR
1863 * @dev_priv: i915 device
1864 * @frontbuffer_bits: frontbuffer plane tracking bits
1865 * @origin: which operation caused the invalidate
1866 *
1867 * Since the hardware frontbuffer tracking has gaps we need to integrate
1868 * with the software frontbuffer tracking. This function gets called every
1869 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1870 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1871 *
1872 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1873 */
1874void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1875			  unsigned frontbuffer_bits, enum fb_op_origin origin)
1876{
1877	struct intel_encoder *encoder;
1878
1879	if (origin == ORIGIN_FLIP)
1880		return;
1881
1882	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1883		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
1884		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1885
1886		mutex_lock(&intel_dp->psr.lock);
1887		if (!intel_dp->psr.enabled) {
1888			mutex_unlock(&intel_dp->psr.lock);
1889			continue;
1890		}
1891
1892		pipe_frontbuffer_bits &=
1893			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
1894		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
1895
1896		if (pipe_frontbuffer_bits)
1897			intel_psr_exit(intel_dp);
1898
1899		mutex_unlock(&intel_dp->psr.lock);
1900	}
1901}
1902/*
1903 * When we will be completely rely on PSR2 S/W tracking in future,
1904 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1905 * event also therefore tgl_dc3co_flush() require to be changed
1906 * accordingly in future.
1907 */
1908static void
1909tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
1910		enum fb_op_origin origin)
1911{
1912	mutex_lock(&intel_dp->psr.lock);
 
1913
1914	if (!intel_dp->psr.dc3co_exitline)
1915		goto unlock;
1916
1917	if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
1918		goto unlock;
1919
1920	/*
1921	 * At every frontbuffer flush flip event modified delay of delayed work,
1922	 * when delayed work schedules that means display has been idle.
1923	 */
1924	if (!(frontbuffer_bits &
1925	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
1926		goto unlock;
1927
1928	tgl_psr2_enable_dc3co(intel_dp);
1929	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
1930			 intel_dp->psr.dc3co_exit_delay);
 
1931
1932unlock:
1933	mutex_unlock(&intel_dp->psr.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1934}
1935
1936/**
1937 * intel_psr_flush - Flush PSR
1938 * @dev_priv: i915 device
1939 * @frontbuffer_bits: frontbuffer plane tracking bits
1940 * @origin: which operation caused the flush
1941 *
1942 * Since the hardware frontbuffer tracking has gaps we need to integrate
1943 * with the software frontbuffer tracking. This function gets called every
1944 * time frontbuffer rendering has completed and flushed out to memory. PSR
1945 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1946 *
1947 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1948 */
1949void intel_psr_flush(struct drm_i915_private *dev_priv,
1950		     unsigned frontbuffer_bits, enum fb_op_origin origin)
1951{
1952	struct intel_encoder *encoder;
1953
1954	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1955		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
1956		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1957
1958		if (origin == ORIGIN_FLIP) {
1959			tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
1960			continue;
1961		}
1962
1963		mutex_lock(&intel_dp->psr.lock);
1964		if (!intel_dp->psr.enabled) {
1965			mutex_unlock(&intel_dp->psr.lock);
1966			continue;
1967		}
1968
1969		pipe_frontbuffer_bits &=
1970			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
1971		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
1972
1973		/*
1974		 * If the PSR is paused by an explicit intel_psr_paused() call,
1975		 * we have to ensure that the PSR is not activated until
1976		 * intel_psr_resume() is called.
1977		 */
1978		if (intel_dp->psr.paused) {
1979			mutex_unlock(&intel_dp->psr.lock);
1980			continue;
 
 
 
 
 
1981		}
1982
1983		/* By definition flush = invalidate + flush */
1984		if (pipe_frontbuffer_bits)
1985			psr_force_hw_tracking_exit(intel_dp);
1986
1987		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
1988			schedule_work(&intel_dp->psr.work);
 
1989		mutex_unlock(&intel_dp->psr.lock);
1990	}
1991}
1992
1993/**
1994 * intel_psr_init - Init basic PSR work and mutex.
1995 * @intel_dp: Intel DP
1996 *
1997 * This function is called after the initializing connector.
1998 * (the initializing of connector treats the handling of connector capabilities)
1999 * And it initializes basic PSR stuff for each DP Encoder.
2000 */
2001void intel_psr_init(struct intel_dp *intel_dp)
2002{
 
 
 
2003	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2004	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2005
2006	if (!HAS_PSR(dev_priv))
2007		return;
2008
2009	/*
2010	 * HSW spec explicitly says PSR is tied to port A.
2011	 * BDW+ platforms have a instance of PSR registers per transcoder but
2012	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2013	 * than eDP one.
2014	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2015	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2016	 * But GEN12 supports a instance of PSR registers per transcoder.
2017	 */
2018	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2019		drm_dbg_kms(&dev_priv->drm,
2020			    "PSR condition failed: Port not supported\n");
2021		return;
2022	}
2023
2024	intel_dp->psr.source_support = true;
2025
2026	if (IS_HASWELL(dev_priv))
2027		/*
2028		 * HSW don't have PSR registers on the same space as transcoder
2029		 * so set this to a value that when subtract to the register
2030		 * in transcoder space results in the right offset for HSW
2031		 */
2032		dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
2033
2034	if (dev_priv->params.enable_psr == -1)
2035		if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
2036			dev_priv->params.enable_psr = 0;
2037
2038	/* Set link_standby x link_off defaults */
2039	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2040		/* HSW and BDW require workarounds that we don't implement. */
2041		intel_dp->psr.link_standby = false;
2042	else if (DISPLAY_VER(dev_priv) < 12)
2043		/* For new platforms up to TGL let's respect VBT back again */
2044		intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
2045
2046	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2047	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2048	mutex_init(&intel_dp->psr.lock);
2049}
2050
2051static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2052					   u8 *status, u8 *error_status)
2053{
2054	struct drm_dp_aux *aux = &intel_dp->aux;
2055	int ret;
 
 
 
 
2056
2057	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2058	if (ret != 1)
2059		return ret;
2060
2061	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
 
 
 
2062	if (ret != 1)
2063		return ret;
2064
2065	*status = *status & DP_PSR_SINK_STATE_MASK;
2066
2067	return 0;
2068}
2069
2070static void psr_alpm_check(struct intel_dp *intel_dp)
2071{
2072	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2073	struct drm_dp_aux *aux = &intel_dp->aux;
2074	struct intel_psr *psr = &intel_dp->psr;
2075	u8 val;
2076	int r;
2077
2078	if (!psr->psr2_enabled)
2079		return;
2080
2081	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2082	if (r != 1) {
2083		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2084		return;
2085	}
2086
2087	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2088		intel_psr_disable_locked(intel_dp);
2089		psr->sink_not_reliable = true;
2090		drm_dbg_kms(&dev_priv->drm,
2091			    "ALPM lock timeout error, disabling PSR\n");
2092
2093		/* Clearing error */
2094		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2095	}
2096}
2097
2098static void psr_capability_changed_check(struct intel_dp *intel_dp)
2099{
2100	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2101	struct intel_psr *psr = &intel_dp->psr;
2102	u8 val;
2103	int r;
2104
2105	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2106	if (r != 1) {
2107		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2108		return;
2109	}
2110
2111	if (val & DP_PSR_CAPS_CHANGE) {
2112		intel_psr_disable_locked(intel_dp);
2113		psr->sink_not_reliable = true;
2114		drm_dbg_kms(&dev_priv->drm,
2115			    "Sink PSR capability changed, disabling PSR\n");
2116
2117		/* Clearing it */
2118		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2119	}
2120}
2121
 
 
 
 
 
 
 
2122void intel_psr_short_pulse(struct intel_dp *intel_dp)
2123{
2124	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2125	struct intel_psr *psr = &intel_dp->psr;
2126	u8 status, error_status;
2127	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2128			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2129			  DP_PSR_LINK_CRC_ERROR;
2130
2131	if (!CAN_PSR(intel_dp))
2132		return;
2133
2134	mutex_lock(&psr->lock);
2135
 
 
2136	if (!psr->enabled)
2137		goto exit;
2138
2139	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2140		drm_err(&dev_priv->drm,
2141			"Error reading PSR status or error status\n");
2142		goto exit;
2143	}
2144
2145	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
 
2146		intel_psr_disable_locked(intel_dp);
2147		psr->sink_not_reliable = true;
2148	}
2149
2150	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2151		drm_dbg_kms(&dev_priv->drm,
 
2152			    "PSR sink internal error, disabling PSR\n");
2153	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2154		drm_dbg_kms(&dev_priv->drm,
2155			    "PSR RFB storage error, disabling PSR\n");
2156	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2157		drm_dbg_kms(&dev_priv->drm,
2158			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2159	if (error_status & DP_PSR_LINK_CRC_ERROR)
2160		drm_dbg_kms(&dev_priv->drm,
2161			    "PSR Link CRC error, disabling PSR\n");
2162
2163	if (error_status & ~errors)
2164		drm_err(&dev_priv->drm,
2165			"PSR_ERROR_STATUS unhandled errors %x\n",
2166			error_status & ~errors);
2167	/* clear status register */
2168	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2169
2170	psr_alpm_check(intel_dp);
2171	psr_capability_changed_check(intel_dp);
 
 
2172
2173exit:
2174	mutex_unlock(&psr->lock);
2175}
2176
2177bool intel_psr_enabled(struct intel_dp *intel_dp)
2178{
2179	bool ret;
2180
2181	if (!CAN_PSR(intel_dp))
2182		return false;
2183
2184	mutex_lock(&intel_dp->psr.lock);
2185	ret = intel_dp->psr.enabled;
2186	mutex_unlock(&intel_dp->psr.lock);
2187
2188	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2189}
v6.13.7
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/debugfs.h>
  25
  26#include <drm/drm_atomic_helper.h>
  27#include <drm/drm_damage_helper.h>
  28#include <drm/drm_debugfs.h>
  29
  30#include "i915_drv.h"
  31#include "i915_reg.h"
  32#include "intel_alpm.h"
  33#include "intel_atomic.h"
  34#include "intel_crtc.h"
  35#include "intel_cursor_regs.h"
  36#include "intel_ddi.h"
  37#include "intel_de.h"
  38#include "intel_display_irq.h"
  39#include "intel_display_types.h"
  40#include "intel_dp.h"
  41#include "intel_dp_aux.h"
  42#include "intel_frontbuffer.h"
  43#include "intel_hdmi.h"
  44#include "intel_psr.h"
  45#include "intel_psr_regs.h"
  46#include "intel_snps_phy.h"
  47#include "skl_universal_plane.h"
  48
  49/**
  50 * DOC: Panel Self Refresh (PSR/SRD)
  51 *
  52 * Since Haswell Display controller supports Panel Self-Refresh on display
  53 * panels witch have a remote frame buffer (RFB) implemented according to PSR
  54 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  55 * when system is idle but display is on as it eliminates display refresh
  56 * request to DDR memory completely as long as the frame buffer for that
  57 * display is unchanged.
  58 *
  59 * Panel Self Refresh must be supported by both Hardware (source) and
  60 * Panel (sink).
  61 *
  62 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  63 * to power down the link and memory controller. For DSI panels the same idea
  64 * is called "manual mode".
  65 *
  66 * The implementation uses the hardware-based PSR support which automatically
  67 * enters/exits self-refresh mode. The hardware takes care of sending the
  68 * required DP aux message and could even retrain the link (that part isn't
  69 * enabled yet though). The hardware also keeps track of any frontbuffer
  70 * changes to know when to exit self-refresh mode again. Unfortunately that
  71 * part doesn't work too well, hence why the i915 PSR support uses the
  72 * software frontbuffer tracking to make sure it doesn't miss a screen
  73 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  74 * get called by the frontbuffer tracking code. Note that because of locking
  75 * issues the self-refresh re-enable code is done from a work queue, which
  76 * must be correctly synchronized/cancelled when shutting down the pipe."
  77 *
  78 * DC3CO (DC3 clock off)
  79 *
  80 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
  81 * clock off automatically during PSR2 idle state.
  82 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
  83 * entry/exit allows the HW to enter a low-power state even when page flipping
  84 * periodically (for instance a 30fps video playback scenario).
  85 *
  86 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
  87 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
  88 * frames, if no other flip occurs and the function above is executed, DC3CO is
  89 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
  90 * of another flip.
  91 * Front buffer modifications do not trigger DC3CO activation on purpose as it
  92 * would bring a lot of complexity and most of the moderns systems will only
  93 * use page flips.
  94 */
  95
  96/*
  97 * Description of PSR mask bits:
  98 *
  99 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
 100 *
 101 *  When unmasked (nearly) all display register writes (eg. even
 102 *  SWF) trigger a PSR exit. Some registers are excluded from this
 103 *  and they have a more specific mask (described below). On icl+
 104 *  this bit no longer exists and is effectively always set.
 105 *
 106 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
 107 *
 108 *  When unmasked (nearly) all pipe/plane register writes
 109 *  trigger a PSR exit. Some plane registers are excluded from this
 110 *  and they have a more specific mask (described below).
 111 *
 112 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
 113 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
 114 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
 115 *
 116 *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
 117 *  SPR_SURF/CURBASE are not included in this and instead are
 118 *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
 119 *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
 120 *
 121 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
 122 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
 123 *
 124 *  When unmasked PSR is blocked as long as the sprite
 125 *  plane is enabled. skl+ with their universal planes no
 126 *  longer have a mask bit like this, and no plane being
 127 *  enabledb blocks PSR.
 128 *
 129 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
 130 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
 131 *
 132 *  When umasked CURPOS writes trigger a PSR exit. On skl+
 133 *  this doesn't exit but CURPOS is included in the
 134 *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
 135 *
 136 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
 137 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
 138 *
 139 *  When unmasked PSR is blocked as long as vblank and/or vsync
 140 *  interrupt is unmasked in IMR *and* enabled in IER.
 141 *
 142 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
 143 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
 144 *
 145 *  Selectcs whether PSR exit generates an extra vblank before
 146 *  the first frame is transmitted. Also note the opposite polarity
 147 *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
 148 *  unmasked==do not generate the extra vblank).
 149 *
 150 *  With DC states enabled the extra vblank happens after link training,
 151 *  with DC states disabled it happens immediately upuon PSR exit trigger.
 152 *  No idea as of now why there is a difference. HSW/BDW (which don't
 153 *  even have DMC) always generate it after link training. Go figure.
 154 *
 155 *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
 156 *  and thus won't latch until the first vblank. So with DC states
 157 *  enabled the register effctively uses the reset value during DC5
 158 *  exit+PSR exit sequence, and thus the bit does nothing until
 159 *  latched by the vblank that it was trying to prevent from being
 160 *  generated in the first place. So we should probably call this
 161 *  one a chicken/egg bit instead on skl+.
 162 *
 163 *  In standby mode (as opposed to link-off) this makes no difference
 164 *  as the timing generator keeps running the whole time generating
 165 *  normal periodic vblanks.
 166 *
 167 *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
 168 *  and doing so makes the behaviour match the skl+ reset value.
 169 *
 170 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
 171 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
 172 *
 173 *  On BDW without this bit is no vblanks whatsoever are
 174 *  generated after PSR exit. On HSW this has no apparant effect.
 175 *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
 176 *
 177 * The rest of the bits are more self-explanatory and/or
 178 * irrelevant for normal operation.
 179 *
 180 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
 181 * has_sel_update:
 182 *
 183 *  has_psr (alone):					PSR1
 184 *  has_psr + has_sel_update:				PSR2
 185 *  has_psr + has_panel_replay:				Panel Replay
 186 *  has_psr + has_panel_replay + has_sel_update:	Panel Replay Selective Update
 187 *
 188 * Description of some intel_psr varibles. enabled, panel_replay_enabled,
 189 * sel_update_enabled
 190 *
 191 *  enabled (alone):						PSR1
 192 *  enabled + sel_update_enabled:				PSR2
 193 *  enabled + panel_replay_enabled:				Panel Replay
 194 *  enabled + panel_replay_enabled + sel_update_enabled:	Panel Replay SU
 195 */
 196
 197#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
 198			   (intel_dp)->psr.source_support)
 199
 200bool intel_encoder_can_psr(struct intel_encoder *encoder)
 201{
 202	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
 203		return CAN_PSR(enc_to_intel_dp(encoder)) ||
 204		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
 205	else
 206		return false;
 207}
 208
 209bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
 210				  const struct intel_crtc_state *crtc_state)
 211{
 212	/*
 213	 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
 214	 * the output is enabled. For non-eDP outputs the main link is always
 215	 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
 216	 * for eDP.
 217	 *
 218	 * TODO:
 219	 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
 220	 *   the ALPM with main-link off mode is not enabled.
 221	 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
 222	 *   main-link off mode is added for it and this mode gets enabled.
 223	 */
 224	return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
 225	       intel_encoder_can_psr(encoder);
 226}
 227
 228static bool psr_global_enabled(struct intel_dp *intel_dp)
 229{
 230	struct intel_display *display = to_intel_display(intel_dp);
 231	struct intel_connector *connector = intel_dp->attached_connector;
 232
 233	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 234	case I915_PSR_DEBUG_DEFAULT:
 235		if (display->params.enable_psr == -1)
 236			return intel_dp_is_edp(intel_dp) ?
 237				connector->panel.vbt.psr.enable :
 238				true;
 239		return display->params.enable_psr;
 240	case I915_PSR_DEBUG_DISABLE:
 241		return false;
 242	default:
 243		return true;
 244	}
 245}
 246
 247static bool psr2_global_enabled(struct intel_dp *intel_dp)
 248{
 249	struct intel_display *display = to_intel_display(intel_dp);
 250
 251	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 252	case I915_PSR_DEBUG_DISABLE:
 253	case I915_PSR_DEBUG_FORCE_PSR1:
 254		return false;
 255	default:
 256		if (display->params.enable_psr == 1)
 257			return false;
 258		return true;
 259	}
 260}
 261
 262static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
 263{
 264	struct intel_display *display = to_intel_display(intel_dp);
 265
 266	if (display->params.enable_psr != -1)
 267		return false;
 268
 269	return true;
 270}
 271
 272static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
 273{
 274	struct intel_display *display = to_intel_display(intel_dp);
 275
 276	if ((display->params.enable_psr != -1) ||
 277	    (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
 278		return false;
 279	return true;
 280}
 281
 282static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
 283{
 284	struct intel_display *display = to_intel_display(intel_dp);
 285
 286	return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
 287		EDP_PSR_ERROR(intel_dp->psr.transcoder);
 288}
 289
 290static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
 291{
 292	struct intel_display *display = to_intel_display(intel_dp);
 293
 294	return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
 295		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
 296}
 297
 298static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
 299{
 300	struct intel_display *display = to_intel_display(intel_dp);
 301
 302	return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
 303		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
 304}
 305
 306static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
 307{
 308	struct intel_display *display = to_intel_display(intel_dp);
 309
 310	return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
 311		EDP_PSR_MASK(intel_dp->psr.transcoder);
 312}
 313
 314static i915_reg_t psr_ctl_reg(struct intel_display *display,
 315			      enum transcoder cpu_transcoder)
 316{
 317	if (DISPLAY_VER(display) >= 8)
 318		return EDP_PSR_CTL(display, cpu_transcoder);
 319	else
 320		return HSW_SRD_CTL;
 321}
 322
 323static i915_reg_t psr_debug_reg(struct intel_display *display,
 324				enum transcoder cpu_transcoder)
 325{
 326	if (DISPLAY_VER(display) >= 8)
 327		return EDP_PSR_DEBUG(display, cpu_transcoder);
 328	else
 329		return HSW_SRD_DEBUG;
 330}
 331
 332static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
 333				   enum transcoder cpu_transcoder)
 334{
 335	if (DISPLAY_VER(display) >= 8)
 336		return EDP_PSR_PERF_CNT(display, cpu_transcoder);
 337	else
 338		return HSW_SRD_PERF_CNT;
 339}
 340
 341static i915_reg_t psr_status_reg(struct intel_display *display,
 342				 enum transcoder cpu_transcoder)
 343{
 344	if (DISPLAY_VER(display) >= 8)
 345		return EDP_PSR_STATUS(display, cpu_transcoder);
 346	else
 347		return HSW_SRD_STATUS;
 348}
 349
 350static i915_reg_t psr_imr_reg(struct intel_display *display,
 351			      enum transcoder cpu_transcoder)
 352{
 353	if (DISPLAY_VER(display) >= 12)
 354		return TRANS_PSR_IMR(display, cpu_transcoder);
 355	else
 356		return EDP_PSR_IMR;
 357}
 358
 359static i915_reg_t psr_iir_reg(struct intel_display *display,
 360			      enum transcoder cpu_transcoder)
 361{
 362	if (DISPLAY_VER(display) >= 12)
 363		return TRANS_PSR_IIR(display, cpu_transcoder);
 364	else
 365		return EDP_PSR_IIR;
 366}
 367
 368static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
 369				  enum transcoder cpu_transcoder)
 370{
 371	if (DISPLAY_VER(display) >= 8)
 372		return EDP_PSR_AUX_CTL(display, cpu_transcoder);
 373	else
 374		return HSW_SRD_AUX_CTL;
 375}
 376
 377static i915_reg_t psr_aux_data_reg(struct intel_display *display,
 378				   enum transcoder cpu_transcoder, int i)
 379{
 380	if (DISPLAY_VER(display) >= 8)
 381		return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
 382	else
 383		return HSW_SRD_AUX_DATA(i);
 384}
 385
 386static void psr_irq_control(struct intel_dp *intel_dp)
 387{
 388	struct intel_display *display = to_intel_display(intel_dp);
 389	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 390	u32 mask;
 
 391
 392	if (intel_dp->psr.panel_replay_enabled)
 393		return;
 
 
 
 
 
 
 
 
 
 
 394
 395	mask = psr_irq_psr_error_bit_get(intel_dp);
 396	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
 397		mask |= psr_irq_post_exit_bit_get(intel_dp) |
 398			psr_irq_pre_entry_bit_get(intel_dp);
 399
 400	intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
 401		     psr_irq_mask_get(intel_dp), ~mask);
 
 
 
 402}
 403
 404static void psr_event_print(struct intel_display *display,
 405			    u32 val, bool sel_update_enabled)
 406{
 407	drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
 408	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
 409		drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
 410	if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
 411		drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
 412	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
 413		drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
 414	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
 415		drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
 416	if (val & PSR_EVENT_GRAPHICS_RESET)
 417		drm_dbg_kms(display->drm, "\tGraphics reset\n");
 418	if (val & PSR_EVENT_PCH_INTERRUPT)
 419		drm_dbg_kms(display->drm, "\tPCH interrupt\n");
 420	if (val & PSR_EVENT_MEMORY_UP)
 421		drm_dbg_kms(display->drm, "\tMemory up\n");
 422	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
 423		drm_dbg_kms(display->drm, "\tFront buffer modification\n");
 424	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
 425		drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
 426	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
 427		drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
 428	if (val & PSR_EVENT_REGISTER_UPDATE)
 429		drm_dbg_kms(display->drm, "\tRegister updated\n");
 430	if (val & PSR_EVENT_HDCP_ENABLE)
 431		drm_dbg_kms(display->drm, "\tHDCP enabled\n");
 432	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
 433		drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
 434	if (val & PSR_EVENT_VBI_ENABLE)
 435		drm_dbg_kms(display->drm, "\tVBI enabled\n");
 436	if (val & PSR_EVENT_LPSP_MODE_EXIT)
 437		drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
 438	if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
 439		drm_dbg_kms(display->drm, "\tPSR disabled\n");
 440}
 441
 442void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 443{
 444	struct intel_display *display = to_intel_display(intel_dp);
 445	struct drm_i915_private *dev_priv = to_i915(display->drm);
 446	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 
 447	ktime_t time_ns =  ktime_get();
 
 
 
 
 
 
 
 
 
 
 448
 449	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
 450		intel_dp->psr.last_entry_attempt = time_ns;
 451		drm_dbg_kms(display->drm,
 452			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
 453			    transcoder_name(cpu_transcoder));
 454	}
 455
 456	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
 457		intel_dp->psr.last_exit = time_ns;
 458		drm_dbg_kms(display->drm,
 459			    "[transcoder %s] PSR exit completed\n",
 460			    transcoder_name(cpu_transcoder));
 461
 462		if (DISPLAY_VER(display) >= 9) {
 463			u32 val;
 464
 465			val = intel_de_rmw(dev_priv,
 466					   PSR_EVENT(dev_priv, cpu_transcoder),
 467					   0, 0);
 468
 469			psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
 470		}
 471	}
 472
 473	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
 474		drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
 
 
 475			 transcoder_name(cpu_transcoder));
 476
 477		intel_dp->psr.irq_aux_error = true;
 478
 479		/*
 480		 * If this interruption is not masked it will keep
 481		 * interrupting so fast that it prevents the scheduled
 482		 * work to run.
 483		 * Also after a PSR error, we don't want to arm PSR
 484		 * again so we don't care about unmask the interruption
 485		 * or unset irq_aux_error.
 486		 */
 487		intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
 488			     0, psr_irq_psr_error_bit_get(intel_dp));
 
 489
 490		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
 491	}
 492}
 493
 
 
 
 
 
 
 
 
 
 
 494static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
 495{
 496	struct intel_display *display = to_intel_display(intel_dp);
 497	u8 val = 8; /* assume the worst if we can't read the value */
 498
 499	if (drm_dp_dpcd_readb(&intel_dp->aux,
 500			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
 501		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
 502	else
 503		drm_dbg_kms(display->drm,
 504			    "Unable to get sink synchronization latency, assuming 8 frames\n");
 505	return val;
 506}
 507
 508static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
 509{
 510	u8 su_capability = 0;
 511
 512	if (intel_dp->psr.sink_panel_replay_su_support)
 513		drm_dp_dpcd_readb(&intel_dp->aux,
 514				  DP_PANEL_PANEL_REPLAY_CAPABILITY,
 515				  &su_capability);
 516	else
 517		su_capability = intel_dp->psr_dpcd[1];
 518
 519	return su_capability;
 520}
 521
 522static unsigned int
 523intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
 524{
 525	return intel_dp->psr.sink_panel_replay_su_support ?
 526		DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
 527		DP_PSR2_SU_X_GRANULARITY;
 528}
 529
 530static unsigned int
 531intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
 532{
 533	return intel_dp->psr.sink_panel_replay_su_support ?
 534		DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
 535		DP_PSR2_SU_Y_GRANULARITY;
 536}
 537
 538/*
 539 * Note: Bits related to granularity are same in panel replay and psr
 540 * registers. Rely on PSR definitions on these "common" bits.
 541 */
 542static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
 543{
 544	struct intel_display *display = to_intel_display(intel_dp);
 
 545	ssize_t r;
 546	u16 w;
 547	u8 y;
 548
 549	/*
 550	 * TODO: Do we need to take into account panel supporting both PSR and
 551	 * Panel replay?
 552	 */
 
 
 553
 554	/*
 555	 * If sink don't have specific granularity requirements set legacy
 556	 * ones.
 557	 */
 558	if (!(intel_dp_get_su_capability(intel_dp) &
 559	      DP_PSR2_SU_GRANULARITY_REQUIRED)) {
 560		/* As PSR2 HW sends full lines, we do not care about x granularity */
 561		w = 4;
 562		y = 4;
 563		goto exit;
 564	}
 565
 566	r = drm_dp_dpcd_read(&intel_dp->aux,
 567			     intel_dp_get_su_x_granularity_offset(intel_dp),
 568			     &w, 2);
 569	if (r != 2)
 570		drm_dbg_kms(display->drm,
 571			    "Unable to read selective update x granularity\n");
 572	/*
 573	 * Spec says that if the value read is 0 the default granularity should
 574	 * be used instead.
 575	 */
 576	if (r != 2 || w == 0)
 577		w = 4;
 578
 579	r = drm_dp_dpcd_read(&intel_dp->aux,
 580			     intel_dp_get_su_y_granularity_offset(intel_dp),
 581			     &y, 1);
 582	if (r != 1) {
 583		drm_dbg_kms(display->drm,
 584			    "Unable to read selective update y granularity\n");
 585		y = 4;
 586	}
 587	if (y == 0)
 588		y = 1;
 589
 590exit:
 591	intel_dp->psr.su_w_granularity = w;
 592	intel_dp->psr.su_y_granularity = y;
 593}
 594
 595static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
 596{
 597	struct intel_display *display = to_intel_display(intel_dp);
 
 598
 599	if (intel_dp_is_edp(intel_dp)) {
 600		if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
 601			drm_dbg_kms(display->drm,
 602				    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
 603			return;
 604		}
 605
 606		if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
 607			drm_dbg_kms(display->drm,
 608				    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
 609			return;
 610		}
 611	}
 612
 613	intel_dp->psr.sink_panel_replay_support = true;
 614
 615	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
 616		intel_dp->psr.sink_panel_replay_su_support = true;
 617
 618	drm_dbg_kms(display->drm,
 619		    "Panel replay %sis supported by panel\n",
 620		    intel_dp->psr.sink_panel_replay_su_support ?
 621		    "selective_update " : "");
 622}
 623
 624static void _psr_init_dpcd(struct intel_dp *intel_dp)
 625{
 626	struct intel_display *display = to_intel_display(intel_dp);
 627
 628	drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
 629		    intel_dp->psr_dpcd[0]);
 630
 631	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
 632		drm_dbg_kms(display->drm,
 633			    "PSR support not currently available for this panel\n");
 634		return;
 635	}
 636
 637	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
 638		drm_dbg_kms(display->drm,
 639			    "Panel lacks power state control, PSR cannot be enabled\n");
 640		return;
 641	}
 642
 643	intel_dp->psr.sink_support = true;
 644	intel_dp->psr.sink_sync_latency =
 645		intel_dp_get_sink_sync_latency(intel_dp);
 646
 647	if (DISPLAY_VER(display) >= 9 &&
 648	    intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
 649		bool y_req = intel_dp->psr_dpcd[1] &
 650			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
 
 651
 652		/*
 653		 * All panels that supports PSR version 03h (PSR2 +
 654		 * Y-coordinate) can handle Y-coordinates in VSC but we are
 655		 * only sure that it is going to be used when required by the
 656		 * panel. This way panel is capable to do selective update
 657		 * without a aux frame sync.
 658		 *
 659		 * To support PSR version 02h and PSR version 03h without
 660		 * Y-coordinate requirement panels we would need to enable
 661		 * GTC first.
 662		 */
 663		intel_dp->psr.sink_psr2_support = y_req &&
 664			intel_alpm_aux_wake_supported(intel_dp);
 665		drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
 666			    intel_dp->psr.sink_psr2_support ? "" : "not ");
 
 
 
 
 
 
 
 667	}
 668}
 669
 670void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 671{
 672	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
 673			 sizeof(intel_dp->psr_dpcd));
 674	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
 675			  &intel_dp->pr_dpcd);
 676
 677	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
 678		_panel_replay_init_dpcd(intel_dp);
 679
 680	if (intel_dp->psr_dpcd[0])
 681		_psr_init_dpcd(intel_dp);
 682
 683	if (intel_dp->psr.sink_psr2_support ||
 684	    intel_dp->psr.sink_panel_replay_su_support)
 685		intel_dp_get_su_granularity(intel_dp);
 686}
 687
 688static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 689{
 690	struct intel_display *display = to_intel_display(intel_dp);
 691	struct drm_i915_private *dev_priv = to_i915(display->drm);
 692	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 693	u32 aux_clock_divider, aux_ctl;
 694	/* write DP_SET_POWER=D0 */
 695	static const u8 aux_msg[] = {
 696		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
 697		[1] = (DP_SET_POWER >> 8) & 0xff,
 698		[2] = DP_SET_POWER & 0xff,
 699		[3] = 1 - 1,
 700		[4] = DP_SET_POWER_D0,
 701	};
 702	int i;
 
 
 
 703
 704	BUILD_BUG_ON(sizeof(aux_msg) > 20);
 705	for (i = 0; i < sizeof(aux_msg); i += 4)
 706		intel_de_write(dev_priv,
 707			       psr_aux_data_reg(display, cpu_transcoder, i >> 2),
 708			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
 709
 710	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 711
 712	/* Start with bits set for DDI_AUX_CTL register */
 713	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
 714					     aux_clock_divider);
 715
 716	/* Select only valid bits for SRD_AUX_CTL */
 717	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
 718		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
 719		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
 720		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
 721
 722	intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
 723		       aux_ctl);
 724}
 725
 726static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
 727{
 728	struct intel_display *display = to_intel_display(intel_dp);
 729
 730	if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
 731	    intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
 732		return false;
 733
 734	return panel_replay ?
 735		intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
 736		intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
 737		psr2_su_region_et_global_enabled(intel_dp);
 738}
 739
 740static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
 741				      const struct intel_crtc_state *crtc_state)
 742{
 743	u8 val = DP_PANEL_REPLAY_ENABLE |
 744		DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
 745		DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
 746		DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
 747		DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
 748	u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
 749
 750	if (crtc_state->has_sel_update)
 751		val |= DP_PANEL_REPLAY_SU_ENABLE;
 752
 753	if (crtc_state->enable_psr2_su_region_et)
 754		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
 755
 756	if (crtc_state->req_psr2_sdp_prior_scanline)
 757		panel_replay_config2 |=
 758			DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
 759
 760	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
 761
 762	drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
 763			   panel_replay_config2);
 764}
 765
 766static void _psr_enable_sink(struct intel_dp *intel_dp,
 767			     const struct intel_crtc_state *crtc_state)
 768{
 769	struct intel_display *display = to_intel_display(intel_dp);
 770	u8 val = 0;
 771
 772	if (crtc_state->has_sel_update) {
 773		val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
 774	} else {
 775		if (intel_dp->psr.link_standby)
 776			val |= DP_PSR_MAIN_LINK_ACTIVE;
 777
 778		if (DISPLAY_VER(display) >= 8)
 779			val |= DP_PSR_CRC_VERIFICATION;
 780	}
 781
 782	if (crtc_state->req_psr2_sdp_prior_scanline)
 783		val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
 784
 785	if (crtc_state->enable_psr2_su_region_et)
 786		val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
 787
 788	if (intel_dp->psr.entry_setup_frames > 0)
 789		val |= DP_PSR_FRAME_CAPTURE;
 790	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
 791
 792	val |= DP_PSR_ENABLE;
 793	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
 794}
 795
 796static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
 797				       const struct intel_crtc_state *crtc_state)
 798{
 799	u8 val;
 800
 801	/*
 802	 * eDP Panel Replay uses always ALPM
 803	 * PSR2 uses ALPM but PSR1 doesn't
 804	 */
 805	if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
 806					   !crtc_state->has_sel_update))
 807		return;
 808
 809	val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
 810
 811	if (crtc_state->has_panel_replay)
 812		val |= DP_ALPM_MODE_AUX_LESS;
 813
 814	drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
 815}
 816
 817void intel_psr_enable_sink(struct intel_dp *intel_dp,
 818			   const struct intel_crtc_state *crtc_state)
 819{
 820	intel_psr_enable_sink_alpm(intel_dp, crtc_state);
 821
 822	crtc_state->has_panel_replay ?
 823		_panel_replay_enable_sink(intel_dp, crtc_state) :
 824		_psr_enable_sink(intel_dp, crtc_state);
 825
 826	if (intel_dp_is_edp(intel_dp))
 827		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 828}
 829
 830static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
 831{
 832	struct intel_display *display = to_intel_display(intel_dp);
 833	struct intel_connector *connector = intel_dp->attached_connector;
 834	struct drm_i915_private *dev_priv = to_i915(display->drm);
 835	u32 val = 0;
 836
 837	if (DISPLAY_VER(display) >= 11)
 838		val |= EDP_PSR_TP4_TIME_0us;
 839
 840	if (display->params.psr_safest_params) {
 841		val |= EDP_PSR_TP1_TIME_2500us;
 842		val |= EDP_PSR_TP2_TP3_TIME_2500us;
 843		goto check_tp3_sel;
 844	}
 845
 846	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
 847		val |= EDP_PSR_TP1_TIME_0us;
 848	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
 849		val |= EDP_PSR_TP1_TIME_100us;
 850	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
 851		val |= EDP_PSR_TP1_TIME_500us;
 852	else
 853		val |= EDP_PSR_TP1_TIME_2500us;
 854
 855	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
 856		val |= EDP_PSR_TP2_TP3_TIME_0us;
 857	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
 858		val |= EDP_PSR_TP2_TP3_TIME_100us;
 859	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
 860		val |= EDP_PSR_TP2_TP3_TIME_500us;
 861	else
 862		val |= EDP_PSR_TP2_TP3_TIME_2500us;
 863
 864	/*
 865	 * WA 0479: hsw,bdw
 866	 * "Do not skip both TP1 and TP2/TP3"
 867	 */
 868	if (DISPLAY_VER(dev_priv) < 9 &&
 869	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
 870	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
 871		val |= EDP_PSR_TP2_TP3_TIME_100us;
 872
 873check_tp3_sel:
 874	if (intel_dp_source_supports_tps3(dev_priv) &&
 875	    drm_dp_tps3_supported(intel_dp->dpcd))
 876		val |= EDP_PSR_TP_TP1_TP3;
 877	else
 878		val |= EDP_PSR_TP_TP1_TP2;
 879
 880	return val;
 881}
 882
 883static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
 884{
 885	struct intel_display *display = to_intel_display(intel_dp);
 886	struct intel_connector *connector = intel_dp->attached_connector;
 887	int idle_frames;
 888
 889	/* Let's use 6 as the minimum to cover all known cases including the
 890	 * off-by-one issue that HW has in some cases.
 891	 */
 892	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
 893	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
 894
 895	if (drm_WARN_ON(display->drm, idle_frames > 0xf))
 896		idle_frames = 0xf;
 897
 898	return idle_frames;
 899}
 900
 901static void hsw_activate_psr1(struct intel_dp *intel_dp)
 902{
 903	struct intel_display *display = to_intel_display(intel_dp);
 904	struct drm_i915_private *dev_priv = to_i915(display->drm);
 905	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 906	u32 max_sleep_time = 0x1f;
 907	u32 val = EDP_PSR_ENABLE;
 908
 909	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
 910
 911	if (DISPLAY_VER(display) < 20)
 912		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
 913
 
 914	if (IS_HASWELL(dev_priv))
 915		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 916
 917	if (intel_dp->psr.link_standby)
 918		val |= EDP_PSR_LINK_STANDBY;
 919
 920	val |= intel_psr1_get_tp_time(intel_dp);
 921
 922	if (DISPLAY_VER(display) >= 8)
 923		val |= EDP_PSR_CRC_ENABLE;
 924
 925	if (DISPLAY_VER(display) >= 20)
 926		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
 927
 928	intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
 929		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
 930}
 931
 932static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
 933{
 934	struct intel_display *display = to_intel_display(intel_dp);
 935	struct intel_connector *connector = intel_dp->attached_connector;
 936	u32 val = 0;
 937
 938	if (display->params.psr_safest_params)
 939		return EDP_PSR2_TP2_TIME_2500us;
 940
 941	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
 942	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
 943		val |= EDP_PSR2_TP2_TIME_50us;
 944	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
 945		val |= EDP_PSR2_TP2_TIME_100us;
 946	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
 947		val |= EDP_PSR2_TP2_TIME_500us;
 948	else
 949		val |= EDP_PSR2_TP2_TIME_2500us;
 950
 951	return val;
 952}
 953
 954static int psr2_block_count_lines(struct intel_dp *intel_dp)
 955{
 956	return intel_dp->alpm_parameters.io_wake_lines < 9 &&
 957		intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
 958}
 959
 960static int psr2_block_count(struct intel_dp *intel_dp)
 961{
 962	return psr2_block_count_lines(intel_dp) / 4;
 963}
 964
 965static u8 frames_before_su_entry(struct intel_dp *intel_dp)
 966{
 967	u8 frames_before_su_entry;
 968
 969	frames_before_su_entry = max_t(u8,
 970				       intel_dp->psr.sink_sync_latency + 1,
 971				       2);
 972
 973	/* Entry setup frames must be at least 1 less than frames before SU entry */
 974	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
 975		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
 976
 977	return frames_before_su_entry;
 978}
 979
 980static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
 981{
 982	struct intel_display *display = to_intel_display(intel_dp);
 983	struct intel_psr *psr = &intel_dp->psr;
 984	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 985
 986	if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
 987		u32 val = psr->su_region_et_enabled ?
 988			LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
 989
 990		if (intel_dp->psr.req_psr2_sdp_prior_scanline)
 991			val |= EDP_PSR2_SU_SDP_SCANLINE;
 992
 993		intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
 994			       val);
 995	}
 996
 997	intel_de_rmw(display,
 998		     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
 999		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1000
1001	intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1002		     TRANS_DP2_PANEL_REPLAY_ENABLE);
1003}
1004
1005static void hsw_activate_psr2(struct intel_dp *intel_dp)
1006{
1007	struct intel_display *display = to_intel_display(intel_dp);
1008	struct drm_i915_private *dev_priv = to_i915(display->drm);
1009	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1010	u32 val = EDP_PSR2_ENABLE;
1011	u32 psr_val = 0;
1012
1013	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
1014
1015	if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
1016		val |= EDP_SU_TRACK_ENABLE;
1017
1018	if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
 
1019		val |= EDP_Y_COORDINATE_ENABLE;
1020
1021	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1022
1023	val |= intel_psr2_get_tp_time(intel_dp);
1024
1025	if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1026		if (psr2_block_count(intel_dp) > 2)
1027			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1028		else
1029			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1030	}
1031
1032	/* Wa_22012278275:adl-p */
1033	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1034		static const u8 map[] = {
1035			2, /* 5 lines */
1036			1, /* 6 lines */
1037			0, /* 7 lines */
1038			3, /* 8 lines */
1039			6, /* 9 lines */
1040			5, /* 10 lines */
1041			4, /* 11 lines */
1042			7, /* 12 lines */
1043		};
1044		/*
1045		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1046		 * comments bellow for more information
 
 
 
1047		 */
1048		int tmp;
1049
1050		tmp = map[intel_dp->alpm_parameters.io_wake_lines -
1051			  TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1052		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1053
1054		tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1055		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1056	} else if (DISPLAY_VER(display) >= 20) {
1057		val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1058	} else if (DISPLAY_VER(display) >= 12) {
1059		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1060		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1061	} else if (DISPLAY_VER(display) >= 9) {
1062		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
1063		val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
1064	}
1065
1066	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1067		val |= EDP_PSR2_SU_SDP_SCANLINE;
1068
1069	if (DISPLAY_VER(display) >= 20)
1070		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1071
1072	if (intel_dp->psr.psr2_sel_fetch_enabled) {
1073		u32 tmp;
 
 
 
 
 
1074
1075		tmp = intel_de_read(display,
1076				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1077		drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1078	} else if (HAS_PSR2_SEL_FETCH(display)) {
1079		intel_de_write(display,
1080			       PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1081	}
1082
1083	if (intel_dp->psr.su_region_et_enabled)
1084		val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1085
1086	/*
1087	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1088	 * recommending keep this bit unset while PSR2 is enabled.
1089	 */
1090	intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1091
1092	intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1093}
1094
1095static bool
1096transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1097{
1098	struct drm_i915_private *dev_priv = to_i915(display->drm);
1099
1100	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1101		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1102	else if (DISPLAY_VER(display) >= 12)
1103		return cpu_transcoder == TRANSCODER_A;
1104	else if (DISPLAY_VER(display) >= 9)
1105		return cpu_transcoder == TRANSCODER_EDP;
1106	else
1107		return false;
1108}
1109
1110static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1111{
1112	if (!crtc_state->hw.active)
1113		return 0;
1114
1115	return DIV_ROUND_UP(1000 * 1000,
1116			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1117}
1118
1119static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1120				     u32 idle_frames)
1121{
1122	struct intel_display *display = to_intel_display(intel_dp);
1123	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1124
1125	intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1126		     EDP_PSR2_IDLE_FRAMES_MASK,
1127		     EDP_PSR2_IDLE_FRAMES(idle_frames));
 
 
1128}
1129
1130static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1131{
1132	struct intel_display *display = to_intel_display(intel_dp);
1133	struct drm_i915_private *dev_priv = to_i915(display->drm);
1134
1135	psr2_program_idle_frames(intel_dp, 0);
1136	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
1137}
1138
1139static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1140{
1141	struct intel_display *display = to_intel_display(intel_dp);
1142	struct drm_i915_private *dev_priv = to_i915(display->drm);
1143
1144	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1145	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1146}
1147
1148static void tgl_dc3co_disable_work(struct work_struct *work)
1149{
1150	struct intel_dp *intel_dp =
1151		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1152
1153	mutex_lock(&intel_dp->psr.lock);
1154	/* If delayed work is pending, it is not idle */
1155	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1156		goto unlock;
1157
1158	tgl_psr2_disable_dc3co(intel_dp);
1159unlock:
1160	mutex_unlock(&intel_dp->psr.lock);
1161}
1162
1163static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1164{
1165	if (!intel_dp->psr.dc3co_exitline)
1166		return;
1167
1168	cancel_delayed_work(&intel_dp->psr.dc3co_work);
1169	/* Before PSR2 exit disallow dc3co*/
1170	tgl_psr2_disable_dc3co(intel_dp);
1171}
1172
1173static bool
1174dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1175			      struct intel_crtc_state *crtc_state)
1176{
1177	struct intel_display *display = to_intel_display(intel_dp);
1178	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1179	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1180	struct drm_i915_private *dev_priv = to_i915(display->drm);
1181	enum port port = dig_port->base.port;
1182
1183	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1184		return pipe <= PIPE_B && port <= PORT_B;
1185	else
1186		return pipe == PIPE_A && port == PORT_A;
1187}
1188
1189static void
1190tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1191				  struct intel_crtc_state *crtc_state)
1192{
1193	struct intel_display *display = to_intel_display(intel_dp);
1194	struct drm_i915_private *dev_priv = to_i915(display->drm);
1195	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1196	struct i915_power_domains *power_domains = &display->power.domains;
1197	u32 exit_scanlines;
1198
1199	/*
1200	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1201	 * disable DC3CO until the changed dc3co activating/deactivating sequence
1202	 * is applied. B.Specs:49196
1203	 */
1204	return;
1205
1206	/*
1207	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1208	 * TODO: when the issue is addressed, this restriction should be removed.
1209	 */
1210	if (crtc_state->enable_psr2_sel_fetch)
1211		return;
1212
1213	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1214		return;
1215
1216	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1217		return;
1218
1219	/* Wa_16011303918:adl-p */
1220	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1221		return;
1222
1223	/*
1224	 * DC3CO Exit time 200us B.Spec 49196
1225	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1226	 */
1227	exit_scanlines =
1228		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1229
1230	if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1231		return;
1232
1233	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1234}
1235
1236static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1237					      struct intel_crtc_state *crtc_state)
1238{
1239	struct intel_display *display = to_intel_display(intel_dp);
 
 
 
 
1240
1241	if (!display->params.enable_psr2_sel_fetch &&
1242	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1243		drm_dbg_kms(display->drm,
1244			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1245		return false;
1246	}
1247
1248	if (crtc_state->uapi.async_flip) {
1249		drm_dbg_kms(display->drm,
1250			    "PSR2 sel fetch not enabled, async flip enabled\n");
1251		return false;
1252	}
1253
1254	return crtc_state->enable_psr2_sel_fetch = true;
1255}
1256
1257static bool psr2_granularity_check(struct intel_dp *intel_dp,
1258				   struct intel_crtc_state *crtc_state)
1259{
1260	struct intel_display *display = to_intel_display(intel_dp);
1261	struct drm_i915_private *dev_priv = to_i915(display->drm);
1262	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1263	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1264	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1265	u16 y_granularity = 0;
1266
1267	/* PSR2 HW only send full lines so we only need to validate the width */
1268	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1269		return false;
1270
1271	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1272		return false;
1273
1274	/* HW tracking is only aligned to 4 lines */
1275	if (!crtc_state->enable_psr2_sel_fetch)
1276		return intel_dp->psr.su_y_granularity == 4;
1277
1278	/*
1279	 * adl_p and mtl platforms have 1 line granularity.
1280	 * For other platforms with SW tracking we can adjust the y coordinates
1281	 * to match sink requirement if multiple of 4.
1282	 */
1283	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
1284		y_granularity = intel_dp->psr.su_y_granularity;
1285	else if (intel_dp->psr.su_y_granularity <= 2)
1286		y_granularity = 4;
1287	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1288		y_granularity = intel_dp->psr.su_y_granularity;
1289
1290	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1291		return false;
1292
1293	if (crtc_state->dsc.compression_enable &&
1294	    vdsc_cfg->slice_height % y_granularity)
1295		return false;
1296
1297	crtc_state->su_y_granularity = y_granularity;
1298	return true;
1299}
1300
1301static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1302							struct intel_crtc_state *crtc_state)
1303{
1304	struct intel_display *display = to_intel_display(intel_dp);
1305	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1306	u32 hblank_total, hblank_ns, req_ns;
1307
1308	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1309	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1310
1311	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1312	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1313
1314	if ((hblank_ns - req_ns) > 100)
1315		return true;
1316
1317	/* Not supported <13 / Wa_22012279113:adl-p */
1318	if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1319		return false;
1320
1321	crtc_state->req_psr2_sdp_prior_scanline = true;
1322	return true;
1323}
1324
1325static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1326					const struct drm_display_mode *adjusted_mode)
1327{
1328	struct intel_display *display = to_intel_display(intel_dp);
1329	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1330	int entry_setup_frames = 0;
1331
1332	if (psr_setup_time < 0) {
1333		drm_dbg_kms(display->drm,
1334			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1335			    intel_dp->psr_dpcd[1]);
1336		return -ETIME;
1337	}
1338
1339	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1340	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1341		if (DISPLAY_VER(display) >= 20) {
1342			/* setup entry frames can be up to 3 frames */
1343			entry_setup_frames = 1;
1344			drm_dbg_kms(display->drm,
1345				    "PSR setup entry frames %d\n",
1346				    entry_setup_frames);
1347		} else {
1348			drm_dbg_kms(display->drm,
1349				    "PSR condition failed: PSR setup time (%d us) too long\n",
1350				    psr_setup_time);
1351			return -ETIME;
1352		}
1353	}
1354
1355	return entry_setup_frames;
1356}
1357
1358static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1359				       const struct intel_crtc_state *crtc_state,
1360				       bool aux_less)
1361{
1362	struct intel_display *display = to_intel_display(intel_dp);
1363	int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1364		crtc_state->hw.adjusted_mode.crtc_vblank_start;
1365	int wake_lines;
1366
1367	if (aux_less)
1368		wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
1369	else
1370		wake_lines = DISPLAY_VER(display) < 20 ?
1371			psr2_block_count_lines(intel_dp) :
1372			intel_dp->alpm_parameters.io_wake_lines;
1373
1374	if (crtc_state->req_psr2_sdp_prior_scanline)
1375		vblank -= 1;
1376
1377	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1378	if (vblank < wake_lines)
1379		return false;
1380
1381	return true;
1382}
1383
1384static bool alpm_config_valid(struct intel_dp *intel_dp,
1385			      const struct intel_crtc_state *crtc_state,
1386			      bool aux_less)
1387{
1388	struct intel_display *display = to_intel_display(intel_dp);
1389
1390	if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1391		drm_dbg_kms(display->drm,
1392			    "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
1393		return false;
1394	}
1395
1396	if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
1397		drm_dbg_kms(display->drm,
1398			    "PSR2/Panel Replay not enabled, too short vblank time\n");
1399		return false;
1400	}
1401
1402	return true;
1403}
1404
1405static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1406				    struct intel_crtc_state *crtc_state)
1407{
1408	struct intel_display *display = to_intel_display(intel_dp);
1409	struct drm_i915_private *dev_priv = to_i915(display->drm);
1410	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1411	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1412	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1413
1414	if (!intel_dp->psr.sink_psr2_support)
1415		return false;
1416
1417	/* JSL and EHL only supports eDP 1.3 */
1418	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1419		drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1420		return false;
1421	}
1422
1423	/* Wa_16011181250 */
1424	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1425	    IS_DG2(dev_priv)) {
1426		drm_dbg_kms(display->drm,
1427			    "PSR2 is defeatured for this platform\n");
1428		return false;
1429	}
1430
1431	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1432		drm_dbg_kms(display->drm,
1433			    "PSR2 not completely functional in this stepping\n");
 
 
 
1434		return false;
1435	}
1436
1437	if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1438		drm_dbg_kms(display->drm,
1439			    "PSR2 not supported in transcoder %s\n",
1440			    transcoder_name(crtc_state->cpu_transcoder));
1441		return false;
1442	}
1443
 
 
 
 
 
1444	/*
1445	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1446	 * resolution requires DSC to be enabled, priority is given to DSC
1447	 * over PSR2.
1448	 */
1449	if (crtc_state->dsc.compression_enable &&
1450	    (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1451		drm_dbg_kms(display->drm,
1452			    "PSR2 cannot be enabled since DSC is enabled\n");
1453		return false;
1454	}
1455
1456	if (DISPLAY_VER(display) >= 20) {
1457		psr_max_h = crtc_hdisplay;
1458		psr_max_v = crtc_vdisplay;
1459		max_bpp = crtc_state->pipe_bpp;
1460	} else if (IS_DISPLAY_VER(display, 12, 14)) {
 
 
1461		psr_max_h = 5120;
1462		psr_max_v = 3200;
1463		max_bpp = 30;
1464	} else if (IS_DISPLAY_VER(display, 10, 11)) {
1465		psr_max_h = 4096;
1466		psr_max_v = 2304;
1467		max_bpp = 24;
1468	} else if (DISPLAY_VER(display) == 9) {
1469		psr_max_h = 3640;
1470		psr_max_v = 2304;
1471		max_bpp = 24;
1472	}
1473
1474	if (crtc_state->pipe_bpp > max_bpp) {
1475		drm_dbg_kms(display->drm,
1476			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1477			    crtc_state->pipe_bpp, max_bpp);
1478		return false;
1479	}
1480
1481	/* Wa_16011303918:adl-p */
1482	if (crtc_state->vrr.enable &&
1483	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1484		drm_dbg_kms(display->drm,
1485			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
 
 
 
 
 
1486		return false;
1487	}
1488
1489	if (!alpm_config_valid(intel_dp, crtc_state, false))
 
 
 
 
 
 
 
 
 
 
 
 
1490		return false;
 
1491
1492	if (!crtc_state->enable_psr2_sel_fetch &&
1493	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1494		drm_dbg_kms(display->drm,
1495			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1496			    crtc_hdisplay, crtc_vdisplay,
1497			    psr_max_h, psr_max_v);
1498		return false;
1499	}
1500
1501	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1502
1503	return true;
1504}
1505
1506static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
1507					  struct intel_crtc_state *crtc_state)
1508{
1509	struct intel_display *display = to_intel_display(intel_dp);
1510
1511	if (HAS_PSR2_SEL_FETCH(display) &&
1512	    !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1513	    !HAS_PSR_HW_TRACKING(display)) {
1514		drm_dbg_kms(display->drm,
1515			    "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1516		goto unsupported;
1517	}
1518
1519	if (!psr2_global_enabled(intel_dp)) {
1520		drm_dbg_kms(display->drm,
1521			    "Selective update disabled by flag\n");
1522		goto unsupported;
1523	}
1524
1525	if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
1526		goto unsupported;
1527
1528	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1529		drm_dbg_kms(display->drm,
1530			    "Selective update not enabled, SDP indication do not fit in hblank\n");
1531		goto unsupported;
1532	}
1533
1534	if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
1535					     !intel_dp->psr.sink_panel_replay_su_support))
1536		goto unsupported;
1537
1538	if (crtc_state->crc_enabled) {
1539		drm_dbg_kms(display->drm,
1540			    "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1541		goto unsupported;
1542	}
1543
1544	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1545		drm_dbg_kms(display->drm,
1546			    "Selective update not enabled, SU granularity not compatible\n");
1547		goto unsupported;
1548	}
1549
1550	crtc_state->enable_psr2_su_region_et =
1551		psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
1552
1553	return true;
1554
1555unsupported:
1556	crtc_state->enable_psr2_sel_fetch = false;
1557	return false;
1558}
1559
1560static bool _psr_compute_config(struct intel_dp *intel_dp,
1561				struct intel_crtc_state *crtc_state)
1562{
1563	struct intel_display *display = to_intel_display(intel_dp);
1564	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1565	int entry_setup_frames;
 
1566
1567	/*
1568	 * Current PSR panels don't work reliably with VRR enabled
1569	 * So if VRR is enabled, do not enable PSR.
1570	 */
1571	if (crtc_state->vrr.enable)
1572		return false;
1573
1574	if (!CAN_PSR(intel_dp))
1575		return false;
1576
1577	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1578
1579	if (entry_setup_frames >= 0) {
1580		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1581	} else {
1582		drm_dbg_kms(display->drm,
1583			    "PSR condition failed: PSR setup timing not met\n");
1584		return false;
1585	}
1586
1587	return true;
1588}
1589
1590static bool
1591_panel_replay_compute_config(struct intel_dp *intel_dp,
1592			     const struct intel_crtc_state *crtc_state,
1593			     const struct drm_connector_state *conn_state)
1594{
1595	struct intel_display *display = to_intel_display(intel_dp);
1596	struct intel_connector *connector =
1597		to_intel_connector(conn_state->connector);
1598	struct intel_hdcp *hdcp = &connector->hdcp;
1599
1600	if (!CAN_PANEL_REPLAY(intel_dp))
1601		return false;
1602
1603	if (!panel_replay_global_enabled(intel_dp)) {
1604		drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1605		return false;
1606	}
1607
1608	if (!intel_dp_is_edp(intel_dp))
1609		return true;
1610
1611	/* Remaining checks are for eDP only */
1612
1613	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1614	    to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1615		return false;
1616
1617	/* 128b/132b Panel Replay is not supported on eDP */
1618	if (intel_dp_is_uhbr(crtc_state)) {
1619		drm_dbg_kms(display->drm,
1620			    "Panel Replay is not supported with 128b/132b\n");
1621		return false;
1622	}
1623
1624	/* HW will not allow Panel Replay on eDP when HDCP enabled */
1625	if (conn_state->content_protection ==
1626	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1627	    (conn_state->content_protection ==
1628	     DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1629	     DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1630		drm_dbg_kms(display->drm,
1631			    "Panel Replay is not supported with HDCP\n");
1632		return false;
1633	}
1634
1635	if (!alpm_config_valid(intel_dp, crtc_state, true))
1636		return false;
1637
1638	if (crtc_state->crc_enabled) {
1639		drm_dbg_kms(display->drm,
1640			    "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1641		return false;
1642	}
1643
1644	return true;
1645}
1646
1647void intel_psr_compute_config(struct intel_dp *intel_dp,
1648			      struct intel_crtc_state *crtc_state,
1649			      struct drm_connector_state *conn_state)
1650{
1651	struct intel_display *display = to_intel_display(intel_dp);
1652	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1653
1654	if (!psr_global_enabled(intel_dp)) {
1655		drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1656		return;
1657	}
1658
1659	if (intel_dp->psr.sink_not_reliable) {
1660		drm_dbg_kms(display->drm,
1661			    "PSR sink implementation is not reliable\n");
1662		return;
1663	}
1664
1665	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1666		drm_dbg_kms(display->drm,
1667			    "PSR condition failed: Interlaced mode enabled\n");
1668		return;
1669	}
1670
1671	/*
1672	 * FIXME figure out what is wrong with PSR+joiner and
1673	 * fix it. Presumably something related to the fact that
1674	 * PSR is a transcoder level feature.
1675	 */
1676	if (crtc_state->joiner_pipes) {
1677		drm_dbg_kms(display->drm,
1678			    "PSR disabled due to joiner\n");
1679		return;
1680	}
1681
1682	crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
1683								    crtc_state,
1684								    conn_state);
1685
1686	crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1687		_psr_compute_config(intel_dp, crtc_state);
1688
1689	if (!crtc_state->has_psr)
1690		return;
 
1691
1692	crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
 
 
1693}
1694
1695void intel_psr_get_config(struct intel_encoder *encoder,
1696			  struct intel_crtc_state *pipe_config)
1697{
1698	struct intel_display *display = to_intel_display(encoder);
1699	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1700	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1701	struct intel_dp *intel_dp;
1702	u32 val;
1703
1704	if (!dig_port)
1705		return;
1706
1707	intel_dp = &dig_port->dp;
1708	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1709		return;
1710
1711	mutex_lock(&intel_dp->psr.lock);
1712	if (!intel_dp->psr.enabled)
1713		goto unlock;
1714
1715	if (intel_dp->psr.panel_replay_enabled) {
1716		pipe_config->has_psr = pipe_config->has_panel_replay = true;
1717	} else {
1718		/*
1719		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1720		 * enabled/disabled because of frontbuffer tracking and others.
1721		 */
1722		pipe_config->has_psr = true;
1723	}
1724
1725	pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1726	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1727
1728	if (!intel_dp->psr.sel_update_enabled)
1729		goto unlock;
1730
1731	if (HAS_PSR2_SEL_FETCH(display)) {
1732		val = intel_de_read(display,
1733				    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1734		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1735			pipe_config->enable_psr2_sel_fetch = true;
1736	}
1737
1738	pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1739
1740	if (DISPLAY_VER(display) >= 12) {
1741		val = intel_de_read(display,
1742				    TRANS_EXITLINE(display, cpu_transcoder));
1743		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1744	}
1745unlock:
1746	mutex_unlock(&intel_dp->psr.lock);
1747}
1748
1749static void intel_psr_activate(struct intel_dp *intel_dp)
1750{
1751	struct intel_display *display = to_intel_display(intel_dp);
1752	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1753
1754	drm_WARN_ON(display->drm,
1755		    transcoder_has_psr2(display, cpu_transcoder) &&
1756		    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1757
1758	drm_WARN_ON(display->drm,
1759		    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1760
1761	drm_WARN_ON(display->drm, intel_dp->psr.active);
1762
 
 
 
 
 
 
 
1763	lockdep_assert_held(&intel_dp->psr.lock);
1764
1765	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1766	if (intel_dp->psr.panel_replay_enabled)
1767		dg2_activate_panel_replay(intel_dp);
1768	else if (intel_dp->psr.sel_update_enabled)
1769		hsw_activate_psr2(intel_dp);
1770	else
1771		hsw_activate_psr1(intel_dp);
1772
1773	intel_dp->psr.active = true;
1774}
1775
1776static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1777{
1778	switch (intel_dp->psr.pipe) {
1779	case PIPE_A:
1780		return LATENCY_REPORTING_REMOVED_PIPE_A;
1781	case PIPE_B:
1782		return LATENCY_REPORTING_REMOVED_PIPE_B;
1783	case PIPE_C:
1784		return LATENCY_REPORTING_REMOVED_PIPE_C;
1785	case PIPE_D:
1786		return LATENCY_REPORTING_REMOVED_PIPE_D;
1787	default:
1788		MISSING_CASE(intel_dp->psr.pipe);
1789		return 0;
1790	}
1791}
1792
1793/*
1794 * Wa_16013835468
1795 * Wa_14015648006
1796 */
1797static void wm_optimization_wa(struct intel_dp *intel_dp,
1798			       const struct intel_crtc_state *crtc_state)
1799{
1800	struct intel_display *display = to_intel_display(intel_dp);
1801	bool set_wa_bit = false;
1802
1803	/* Wa_14015648006 */
1804	if (IS_DISPLAY_VER(display, 11, 14))
1805		set_wa_bit |= crtc_state->wm_level_disabled;
1806
1807	/* Wa_16013835468 */
1808	if (DISPLAY_VER(display) == 12)
1809		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1810			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1811
1812	if (set_wa_bit)
1813		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1814			     0, wa_16013835468_bit_get(intel_dp));
1815	else
1816		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
1817			     wa_16013835468_bit_get(intel_dp), 0);
1818}
1819
1820static void intel_psr_enable_source(struct intel_dp *intel_dp,
1821				    const struct intel_crtc_state *crtc_state)
1822{
1823	struct intel_display *display = to_intel_display(intel_dp);
1824	struct drm_i915_private *dev_priv = to_i915(display->drm);
1825	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1826	u32 mask = 0;
1827
1828	/*
1829	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1830	 * SKL+ use hardcoded values PSR AUX transactions
1831	 */
1832	if (DISPLAY_VER(display) < 9)
1833		hsw_psr_setup_aux(intel_dp);
1834
 
 
 
 
 
 
 
 
 
1835	/*
1836	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1837	 * mask LPSP to avoid dependency on other drivers that might block
1838	 * runtime_pm besides preventing  other hw tracking issues now we
1839	 * can rely on frontbuffer tracking.
1840	 *
1841	 * From bspec prior LunarLake:
1842	 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1843	 * panel replay mode.
1844	 *
1845	 * From bspec beyod LunarLake:
1846	 * Panel Replay on DP: No bits are applicable
1847	 * Panel Replay on eDP: All bits are applicable
1848	 */
1849	if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
1850		mask = EDP_PSR_DEBUG_MASK_HPD;
 
 
 
 
 
1851
1852	if (intel_dp_is_edp(intel_dp)) {
1853		mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1854
1855		/*
1856		 * For some unknown reason on HSW non-ULT (or at least on
1857		 * Dell Latitude E6540) external displays start to flicker
1858		 * when PSR is enabled on the eDP. SR/PC6 residency is much
1859		 * higher than should be possible with an external display.
1860		 * As a workaround leave LPSP unmasked to prevent PSR entry
1861		 * when external displays are active.
1862		 */
1863		if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
1864			mask |= EDP_PSR_DEBUG_MASK_LPSP;
1865
1866		if (DISPLAY_VER(display) < 20)
1867			mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1868
1869		/*
1870		 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1871		 * registers in order to keep the CURSURFLIVE tricks working :(
1872		 */
1873		if (IS_DISPLAY_VER(display, 9, 10))
1874			mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1875
1876		/* allow PSR with sprite enabled */
1877		if (IS_HASWELL(dev_priv))
1878			mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1879	}
1880
1881	intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
1882
1883	psr_irq_control(intel_dp);
1884
1885	/*
1886	 * TODO: if future platforms supports DC3CO in more than one
1887	 * transcoder, EXITLINE will need to be unset when disabling PSR
1888	 */
1889	if (intel_dp->psr.dc3co_exitline)
1890		intel_de_rmw(display,
1891			     TRANS_EXITLINE(display, cpu_transcoder),
1892			     EXITLINE_MASK,
1893			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1894
1895	if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
1896		intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1897			     intel_dp->psr.psr2_sel_fetch_enabled ?
1898			     IGNORE_PSR2_HW_TRACKING : 0);
1899
1900	if (intel_dp_is_edp(intel_dp))
1901		intel_alpm_configure(intel_dp, crtc_state);
1902
1903	/*
1904	 * Wa_16013835468
1905	 * Wa_14015648006
1906	 */
1907	wm_optimization_wa(intel_dp, crtc_state);
1908
1909	if (intel_dp->psr.sel_update_enabled) {
1910		if (DISPLAY_VER(display) == 9)
1911			intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
1912				     PSR2_VSC_ENABLE_PROG_HEADER |
1913				     PSR2_ADD_VERTICAL_LINE_COUNT);
1914
1915		/*
1916		 * Wa_16014451276:adlp,mtl[a0,b0]
1917		 * All supported adlp panels have 1-based X granularity, this may
1918		 * cause issues if non-supported panels are used.
1919		 */
1920		if (!intel_dp->psr.panel_replay_enabled &&
1921		    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
1922		     IS_ALDERLAKE_P(dev_priv)))
1923			intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1924				     0, ADLP_1_BASED_X_GRANULARITY);
1925
1926		/* Wa_16012604467:adlp,mtl[a0,b0] */
1927		if (!intel_dp->psr.panel_replay_enabled &&
1928		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
1929			intel_de_rmw(display,
1930				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
1931				     0,
1932				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1933		else if (IS_ALDERLAKE_P(dev_priv))
1934			intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
1935				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1936	}
1937}
1938
1939static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1940{
1941	struct intel_display *display = to_intel_display(intel_dp);
1942	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1943	u32 val;
1944
1945	if (intel_dp->psr.panel_replay_enabled)
1946		goto no_err;
1947
1948	/*
1949	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1950	 * will still keep the error set even after the reset done in the
1951	 * irq_preinstall and irq_uninstall hooks.
1952	 * And enabling in this situation cause the screen to freeze in the
1953	 * first time that PSR HW tries to activate so lets keep PSR disabled
1954	 * to avoid any rendering problems.
1955	 */
1956	val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
1957	val &= psr_irq_psr_error_bit_get(intel_dp);
 
 
 
 
 
 
1958	if (val) {
1959		intel_dp->psr.sink_not_reliable = true;
1960		drm_dbg_kms(display->drm,
1961			    "PSR interruption error set, not enabling PSR\n");
1962		return false;
1963	}
1964
1965no_err:
1966	return true;
1967}
1968
1969static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1970				    const struct intel_crtc_state *crtc_state)
 
1971{
1972	struct intel_display *display = to_intel_display(intel_dp);
1973	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
 
1974	u32 val;
1975
1976	drm_WARN_ON(display->drm, intel_dp->psr.enabled);
1977
1978	intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
1979	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1980	intel_dp->psr.busy_frontbuffer_bits = 0;
1981	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1982	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1983	/* DC5/DC6 requires at least 6 idle frames */
1984	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1985	intel_dp->psr.dc3co_exit_delay = val;
1986	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1987	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1988	intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
1989	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1990	intel_dp->psr.req_psr2_sdp_prior_scanline =
1991		crtc_state->req_psr2_sdp_prior_scanline;
1992
1993	if (!psr_interrupt_error_check(intel_dp))
1994		return;
1995
1996	if (intel_dp->psr.panel_replay_enabled) {
1997		drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
1998	} else {
1999		drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2000			    intel_dp->psr.sel_update_enabled ? "2" : "1");
 
 
 
 
 
 
 
2001
2002		/*
2003		 * Panel replay has to be enabled before link training: doing it
2004		 * only for PSR here.
2005		 */
2006		intel_psr_enable_sink(intel_dp, crtc_state);
2007	}
 
 
 
 
 
 
 
2008
2009	if (intel_dp_is_edp(intel_dp))
2010		intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2011
2012	intel_psr_enable_source(intel_dp, crtc_state);
2013	intel_dp->psr.enabled = true;
2014	intel_dp->psr.paused = false;
2015
2016	/*
2017	 * Link_ok is sticky and set here on PSR enable. We can assume link
2018	 * training is complete as we never continue to PSR enable with
2019	 * untrained link. Link_ok is kept as set until first short pulse
2020	 * interrupt. This is targeted to workaround panels stating bad link
2021	 * after PSR is enabled.
2022	 */
2023	intel_dp->psr.link_ok = true;
2024
2025	intel_psr_activate(intel_dp);
 
 
2026}
2027
2028static void intel_psr_exit(struct intel_dp *intel_dp)
2029{
2030	struct intel_display *display = to_intel_display(intel_dp);
2031	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2032	u32 val;
2033
2034	if (!intel_dp->psr.active) {
2035		if (transcoder_has_psr2(display, cpu_transcoder)) {
2036			val = intel_de_read(display,
2037					    EDP_PSR2_CTL(display, cpu_transcoder));
2038			drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2039		}
2040
2041		val = intel_de_read(display,
2042				    psr_ctl_reg(display, cpu_transcoder));
2043		drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2044
2045		return;
2046	}
2047
2048	if (intel_dp->psr.panel_replay_enabled) {
2049		intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2050			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2051	} else if (intel_dp->psr.sel_update_enabled) {
2052		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2053
2054		val = intel_de_rmw(display,
2055				   EDP_PSR2_CTL(display, cpu_transcoder),
2056				   EDP_PSR2_ENABLE, 0);
2057
2058		drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2059	} else {
2060		val = intel_de_rmw(display,
2061				   psr_ctl_reg(display, cpu_transcoder),
2062				   EDP_PSR_ENABLE, 0);
2063
2064		drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
 
2065	}
2066	intel_dp->psr.active = false;
2067}
2068
2069static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2070{
2071	struct intel_display *display = to_intel_display(intel_dp);
2072	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2073	i915_reg_t psr_status;
2074	u32 psr_status_mask;
2075
2076	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2077					  intel_dp->psr.panel_replay_enabled)) {
2078		psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2079		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2080	} else {
2081		psr_status = psr_status_reg(display, cpu_transcoder);
2082		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2083	}
2084
2085	/* Wait till PSR is idle */
2086	if (intel_de_wait_for_clear(display, psr_status,
2087				    psr_status_mask, 2000))
2088		drm_err(display->drm, "Timed out waiting PSR idle state\n");
2089}
2090
2091static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2092{
2093	struct intel_display *display = to_intel_display(intel_dp);
2094	struct drm_i915_private *dev_priv = to_i915(display->drm);
2095	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2096
2097	lockdep_assert_held(&intel_dp->psr.lock);
2098
2099	if (!intel_dp->psr.enabled)
2100		return;
2101
2102	if (intel_dp->psr.panel_replay_enabled)
2103		drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2104	else
2105		drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2106			    intel_dp->psr.sel_update_enabled ? "2" : "1");
2107
2108	intel_psr_exit(intel_dp);
2109	intel_psr_wait_exit_locked(intel_dp);
2110
2111	/*
2112	 * Wa_16013835468
2113	 * Wa_14015648006
2114	 */
2115	if (DISPLAY_VER(display) >= 11)
2116		intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2117			     wa_16013835468_bit_get(intel_dp), 0);
2118
2119	if (intel_dp->psr.sel_update_enabled) {
2120		/* Wa_16012604467:adlp,mtl[a0,b0] */
2121		if (!intel_dp->psr.panel_replay_enabled &&
2122		    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2123			intel_de_rmw(display,
2124				     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2125				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2126		else if (IS_ALDERLAKE_P(dev_priv))
2127			intel_de_rmw(display, CLKGATE_DIS_MISC,
2128				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2129	}
2130
2131	if (intel_dp_is_edp(intel_dp))
2132		intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2133
2134	/* Panel Replay on eDP is always using ALPM aux less. */
2135	if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2136		intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
2137			     ALPM_CTL_ALPM_ENABLE |
2138			     ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2139
2140		intel_de_rmw(display,
2141			     PORT_ALPM_CTL(cpu_transcoder),
2142			     PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2143	}
2144
2145	/* Disable PSR on Sink */
2146	if (!intel_dp->psr.panel_replay_enabled) {
2147		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2148
2149		if (intel_dp->psr.sel_update_enabled)
2150			drm_dp_dpcd_writeb(&intel_dp->aux,
2151					   DP_RECEIVER_ALPM_CONFIG, 0);
2152	}
2153
2154	intel_dp->psr.enabled = false;
2155	intel_dp->psr.panel_replay_enabled = false;
2156	intel_dp->psr.sel_update_enabled = false;
2157	intel_dp->psr.psr2_sel_fetch_enabled = false;
2158	intel_dp->psr.su_region_et_enabled = false;
2159	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2160}
2161
2162/**
2163 * intel_psr_disable - Disable PSR
2164 * @intel_dp: Intel DP
2165 * @old_crtc_state: old CRTC state
2166 *
2167 * This function needs to be called before disabling pipe.
2168 */
2169void intel_psr_disable(struct intel_dp *intel_dp,
2170		       const struct intel_crtc_state *old_crtc_state)
2171{
2172	struct intel_display *display = to_intel_display(intel_dp);
2173
2174	if (!old_crtc_state->has_psr)
2175		return;
2176
2177	if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
2178		return;
2179
2180	mutex_lock(&intel_dp->psr.lock);
2181
2182	intel_psr_disable_locked(intel_dp);
2183
2184	intel_dp->psr.link_ok = false;
2185
2186	mutex_unlock(&intel_dp->psr.lock);
2187	cancel_work_sync(&intel_dp->psr.work);
2188	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2189}
2190
2191/**
2192 * intel_psr_pause - Pause PSR
2193 * @intel_dp: Intel DP
2194 *
2195 * This function need to be called after enabling psr.
2196 */
2197void intel_psr_pause(struct intel_dp *intel_dp)
2198{
2199	struct intel_display *display = to_intel_display(intel_dp);
2200	struct intel_psr *psr = &intel_dp->psr;
2201
2202	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2203		return;
2204
2205	mutex_lock(&psr->lock);
2206
2207	if (!psr->enabled) {
2208		mutex_unlock(&psr->lock);
2209		return;
2210	}
2211
2212	/* If we ever hit this, we will need to add refcount to pause/resume */
2213	drm_WARN_ON(display->drm, psr->paused);
2214
2215	intel_psr_exit(intel_dp);
2216	intel_psr_wait_exit_locked(intel_dp);
2217	psr->paused = true;
2218
2219	mutex_unlock(&psr->lock);
2220
2221	cancel_work_sync(&psr->work);
2222	cancel_delayed_work_sync(&psr->dc3co_work);
2223}
2224
2225/**
2226 * intel_psr_resume - Resume PSR
2227 * @intel_dp: Intel DP
2228 *
2229 * This function need to be called after pausing psr.
2230 */
2231void intel_psr_resume(struct intel_dp *intel_dp)
2232{
2233	struct intel_psr *psr = &intel_dp->psr;
2234
2235	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2236		return;
2237
2238	mutex_lock(&psr->lock);
2239
2240	if (!psr->paused)
2241		goto unlock;
2242
2243	psr->paused = false;
2244	intel_psr_activate(intel_dp);
2245
2246unlock:
2247	mutex_unlock(&psr->lock);
2248}
2249
2250/**
2251 * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
2252 * @crtc_state: CRTC status
2253 *
2254 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2255 * prevent it in case of Panel Replay. Panel Replay switches main link off on
2256 * DC entry. This means vblank interrupts are not fired and is a problem if
2257 * user-space is polling for vblank events.
2258 */
2259bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
2260{
2261	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2262	struct intel_encoder *encoder;
2263
2264	for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2265		struct intel_dp *intel_dp;
2266
2267		if (!intel_encoder_is_dp(encoder))
2268			continue;
2269
2270		intel_dp = enc_to_intel_dp(encoder);
2271
2272		if (intel_dp_is_edp(intel_dp) &&
2273		    CAN_PANEL_REPLAY(intel_dp))
2274			return true;
2275	}
2276
2277	return false;
 
 
 
2278}
2279
2280static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2281{
2282	struct drm_i915_private *dev_priv = to_i915(display->drm);
 
 
 
 
 
 
 
2283
2284	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
2285		PSR2_MAN_TRK_CTL_ENABLE;
2286}
2287
2288static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2289{
2290	struct drm_i915_private *dev_priv = to_i915(display->drm);
2291
2292	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2293	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2294	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2295}
2296
2297static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2298{
2299	struct drm_i915_private *dev_priv = to_i915(display->drm);
2300
2301	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2302	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2303	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2304}
2305
2306static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2307{
2308	struct drm_i915_private *dev_priv = to_i915(display->drm);
2309
2310	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
2311	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2312	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2313}
2314
2315static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2316{
2317	struct intel_display *display = to_intel_display(intel_dp);
2318	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2319
2320	if (intel_dp->psr.psr2_sel_fetch_enabled)
2321		intel_de_write(display,
2322			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2323			       man_trk_ctl_enable_bit_get(display) |
2324			       man_trk_ctl_partial_frame_bit_get(display) |
2325			       man_trk_ctl_single_full_frame_bit_get(display) |
2326			       man_trk_ctl_continuos_full_frame(display));
2327
2328	/*
2329	 * Display WA #0884: skl+
2330	 * This documented WA for bxt can be safely applied
2331	 * broadly so we can force HW tracking to exit PSR
2332	 * instead of disabling and re-enabling.
2333	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2334	 * but it makes more sense write to the current active
2335	 * pipe.
2336	 *
2337	 * This workaround do not exist for platforms with display 10 or newer
2338	 * but testing proved that it works for up display 13, for newer
2339	 * than that testing will be needed.
2340	 */
2341	intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2342}
2343
2344void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2345{
2346	struct intel_display *display = to_intel_display(crtc_state);
2347	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2348	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2349	struct intel_encoder *encoder;
2350
2351	if (!crtc_state->enable_psr2_sel_fetch)
 
2352		return;
2353
2354	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2355					     crtc_state->uapi.encoder_mask) {
2356		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2357
2358		lockdep_assert_held(&intel_dp->psr.lock);
2359		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2360			return;
2361		break;
2362	}
2363
2364	intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2365		       crtc_state->psr2_man_track_ctl);
2366
2367	if (!crtc_state->enable_psr2_su_region_et)
2368		return;
2369
2370	intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2371		       crtc_state->pipe_srcsz_early_tpt);
2372}
2373
2374static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2375				  bool full_update)
2376{
2377	struct intel_display *display = to_intel_display(crtc_state);
2378	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2379	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2380	u32 val = man_trk_ctl_enable_bit_get(display);
2381
2382	/* SF partial frame enable has to be set even on full update */
2383	val |= man_trk_ctl_partial_frame_bit_get(display);
2384
2385	if (full_update) {
2386		val |= man_trk_ctl_single_full_frame_bit_get(display);
2387		val |= man_trk_ctl_continuos_full_frame(display);
2388		goto exit;
2389	}
2390
2391	if (crtc_state->psr2_su_area.y1 == -1)
2392		goto exit;
2393
2394	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
2395		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2396		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2397	} else {
2398		drm_WARN_ON(crtc_state->uapi.crtc->dev,
2399			    crtc_state->psr2_su_area.y1 % 4 ||
2400			    crtc_state->psr2_su_area.y2 % 4);
2401
2402		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2403			crtc_state->psr2_su_area.y1 / 4 + 1);
2404		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2405			crtc_state->psr2_su_area.y2 / 4 + 1);
2406	}
2407exit:
2408	crtc_state->psr2_man_track_ctl = val;
2409}
2410
2411static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2412					  bool full_update)
2413{
2414	int width, height;
2415
2416	if (!crtc_state->enable_psr2_su_region_et || full_update)
2417		return 0;
2418
2419	width = drm_rect_width(&crtc_state->psr2_su_area);
2420	height = drm_rect_height(&crtc_state->psr2_su_area);
2421
2422	return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2423}
2424
2425static void clip_area_update(struct drm_rect *overlap_damage_area,
2426			     struct drm_rect *damage_area,
2427			     struct drm_rect *pipe_src)
2428{
2429	if (!drm_rect_intersect(damage_area, pipe_src))
2430		return;
2431
2432	if (overlap_damage_area->y1 == -1) {
2433		overlap_damage_area->y1 = damage_area->y1;
2434		overlap_damage_area->y2 = damage_area->y2;
2435		return;
2436	}
2437
2438	if (damage_area->y1 < overlap_damage_area->y1)
2439		overlap_damage_area->y1 = damage_area->y1;
2440
2441	if (damage_area->y2 > overlap_damage_area->y2)
2442		overlap_damage_area->y2 = damage_area->y2;
2443}
2444
2445static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2446{
2447	struct intel_display *display = to_intel_display(crtc_state);
2448	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2449	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2450	u16 y_alignment;
2451
2452	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2453	if (crtc_state->dsc.compression_enable &&
2454	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
2455		y_alignment = vdsc_cfg->slice_height;
2456	else
2457		y_alignment = crtc_state->su_y_granularity;
2458
2459	crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2460	if (crtc_state->psr2_su_area.y2 % y_alignment)
2461		crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2462						y_alignment) + 1) * y_alignment;
2463}
2464
2465/*
2466 * When early transport is in use we need to extend SU area to cover
2467 * cursor fully when cursor is in SU area.
2468 */
2469static void
2470intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2471				  struct intel_crtc *crtc,
2472				  bool *cursor_in_su_area)
2473{
2474	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2475	struct intel_plane_state *new_plane_state;
2476	struct intel_plane *plane;
2477	int i;
2478
2479	if (!crtc_state->enable_psr2_su_region_et)
2480		return;
2481
2482	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2483		struct drm_rect inter;
2484
2485		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2486			continue;
2487
2488		if (plane->id != PLANE_CURSOR)
2489			continue;
2490
2491		if (!new_plane_state->uapi.visible)
2492			continue;
2493
2494		inter = crtc_state->psr2_su_area;
2495		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2496			continue;
2497
2498		clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2499				 &crtc_state->pipe_src);
2500		*cursor_in_su_area = true;
2501	}
2502}
2503
2504/*
2505 * TODO: Not clear how to handle planes with negative position,
2506 * also planes are not updated if they have a negative X
2507 * position so for now doing a full update in this cases
2508 *
2509 * Plane scaling and rotation is not supported by selective fetch and both
2510 * properties can change without a modeset, so need to be check at every
2511 * atomic commit.
2512 */
2513static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2514{
2515	if (plane_state->uapi.dst.y1 < 0 ||
2516	    plane_state->uapi.dst.x1 < 0 ||
2517	    plane_state->scaler_id >= 0 ||
2518	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2519		return false;
2520
2521	return true;
2522}
2523
2524/*
2525 * Check for pipe properties that is not supported by selective fetch.
2526 *
2527 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2528 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2529 * enabled and going to the full update path.
2530 */
2531static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2532{
2533	if (crtc_state->scaler_state.scaler_id >= 0)
2534		return false;
2535
2536	return true;
2537}
2538
2539/* Wa 14019834836 */
2540static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2541{
2542	struct intel_display *display = to_intel_display(crtc_state);
2543	struct intel_encoder *encoder;
2544	int hactive_limit;
2545
2546	if (crtc_state->psr2_su_area.y1 != 0 ||
2547	    crtc_state->psr2_su_area.y2 != 0)
2548		return;
2549
2550	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2551		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2552	else
2553		hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2554
2555	if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2556		return;
2557
2558	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2559					     crtc_state->uapi.encoder_mask) {
2560		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2561
2562		if (!intel_dp_is_edp(intel_dp) &&
2563		    intel_dp->psr.panel_replay_enabled &&
2564		    intel_dp->psr.sel_update_enabled) {
2565			crtc_state->psr2_su_area.y2++;
2566			return;
2567		}
2568	}
2569}
2570
2571static void
2572intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2573{
2574	struct intel_display *display = to_intel_display(crtc_state);
2575	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2576
2577	/* Wa_14014971492 */
2578	if (!crtc_state->has_panel_replay &&
2579	    ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2580	      IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
2581	    crtc_state->splitter.enable)
2582		crtc_state->psr2_su_area.y1 = 0;
2583
2584	/* Wa 14019834836 */
2585	if (DISPLAY_VER(display) == 30)
2586		intel_psr_apply_pr_link_on_su_wa(crtc_state);
2587}
2588
2589int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2590				struct intel_crtc *crtc)
2591{
2592	struct intel_display *display = to_intel_display(state);
2593	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 
2594	struct intel_plane_state *new_plane_state, *old_plane_state;
2595	struct intel_plane *plane;
2596	bool full_update = false, cursor_in_su_area = false;
2597	int i, ret;
2598
2599	if (!crtc_state->enable_psr2_sel_fetch)
2600		return 0;
2601
2602	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2603		full_update = true;
2604		goto skip_sel_fetch_set_loop;
2605	}
2606
2607	crtc_state->psr2_su_area.x1 = 0;
2608	crtc_state->psr2_su_area.y1 = -1;
2609	crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
2610	crtc_state->psr2_su_area.y2 = -1;
2611
2612	/*
2613	 * Calculate minimal selective fetch area of each plane and calculate
2614	 * the pipe damaged area.
2615	 * In the next loop the plane selective fetch area will actually be set
2616	 * using whole pipe damaged area.
2617	 */
2618	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2619					     new_plane_state, i) {
2620		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2621						      .x2 = INT_MAX };
 
2622
2623		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2624			continue;
2625
2626		if (!new_plane_state->uapi.visible &&
2627		    !old_plane_state->uapi.visible)
2628			continue;
2629
2630		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
 
 
 
 
 
 
2631			full_update = true;
2632			break;
2633		}
2634
 
 
2635		/*
2636		 * If visibility or plane moved, mark the whole plane area as
2637		 * damaged as it needs to be complete redraw in the new and old
2638		 * position.
2639		 */
2640		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2641		    !drm_rect_equals(&new_plane_state->uapi.dst,
2642				     &old_plane_state->uapi.dst)) {
2643			if (old_plane_state->uapi.visible) {
2644				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2645				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2646				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2647						 &crtc_state->pipe_src);
2648			}
2649
2650			if (new_plane_state->uapi.visible) {
2651				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2652				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2653				clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2654						 &crtc_state->pipe_src);
2655			}
2656			continue;
2657		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2658			/* If alpha changed mark the whole plane area as damaged */
 
 
 
 
 
 
2659			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2660			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2661			clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2662					 &crtc_state->pipe_src);
2663			continue;
2664		}
2665
2666		src = drm_plane_state_src(&new_plane_state->uapi);
2667		drm_rect_fp_to_int(&src, &src);
 
 
 
 
 
 
 
 
 
 
 
2668
2669		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2670						     &new_plane_state->uapi, &damaged_area))
2671			continue;
2672
2673		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2674		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2675		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2676		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2677
2678		clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2679	}
2680
2681	/*
2682	 * TODO: For now we are just using full update in case
2683	 * selective fetch area calculation fails. To optimize this we
2684	 * should identify cases where this happens and fix the area
2685	 * calculation for those.
2686	 */
2687	if (crtc_state->psr2_su_area.y1 == -1) {
2688		drm_info_once(display->drm,
2689			      "Selective fetch area calculation failed in pipe %c\n",
2690			      pipe_name(crtc->pipe));
2691		full_update = true;
2692	}
2693
2694	if (full_update)
2695		goto skip_sel_fetch_set_loop;
2696
2697	intel_psr_apply_su_area_workarounds(crtc_state);
2698
2699	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2700	if (ret)
2701		return ret;
2702
2703	/*
2704	 * Adjust su area to cover cursor fully as necessary (early
2705	 * transport). This needs to be done after
2706	 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2707	 * affected planes even when cursor is not updated by itself.
2708	 */
2709	intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2710
2711	intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2712
2713	/*
2714	 * Now that we have the pipe damaged area check if it intersect with
2715	 * every plane, if it does set the plane selective fetch area.
2716	 */
2717	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2718					     new_plane_state, i) {
2719		struct drm_rect *sel_fetch_area, inter;
2720		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2721
2722		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2723		    !new_plane_state->uapi.visible)
2724			continue;
2725
2726		inter = crtc_state->psr2_su_area;
2727		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2728		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2729			sel_fetch_area->y1 = -1;
2730			sel_fetch_area->y2 = -1;
2731			/*
2732			 * if plane sel fetch was previously enabled ->
2733			 * disable it
2734			 */
2735			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2736				crtc_state->update_planes |= BIT(plane->id);
2737
2738			continue;
2739		}
2740
2741		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2742			full_update = true;
2743			break;
2744		}
2745
2746		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2747		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2748		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2749		crtc_state->update_planes |= BIT(plane->id);
2750
2751		/*
2752		 * Sel_fetch_area is calculated for UV plane. Use
2753		 * same area for Y plane as well.
2754		 */
2755		if (linked) {
2756			struct intel_plane_state *linked_new_plane_state;
2757			struct drm_rect *linked_sel_fetch_area;
2758
2759			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2760			if (IS_ERR(linked_new_plane_state))
2761				return PTR_ERR(linked_new_plane_state);
2762
2763			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2764			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2765			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2766			crtc_state->update_planes |= BIT(linked->id);
2767		}
2768	}
2769
2770skip_sel_fetch_set_loop:
2771	psr2_man_trk_ctl_calc(crtc_state, full_update);
2772	crtc_state->pipe_srcsz_early_tpt =
2773		psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2774	return 0;
2775}
2776
2777void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2778				struct intel_crtc *crtc)
 
 
 
 
 
 
 
 
 
 
 
2779{
2780	struct intel_display *display = to_intel_display(state);
2781	struct drm_i915_private *i915 = to_i915(state->base.dev);
2782	const struct intel_crtc_state *old_crtc_state =
2783		intel_atomic_get_old_crtc_state(state, crtc);
2784	const struct intel_crtc_state *new_crtc_state =
2785		intel_atomic_get_new_crtc_state(state, crtc);
2786	struct intel_encoder *encoder;
2787
2788	if (!HAS_PSR(display))
2789		return;
2790
2791	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2792					     old_crtc_state->uapi.encoder_mask) {
2793		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2794		struct intel_psr *psr = &intel_dp->psr;
2795		bool needs_to_disable = false;
2796
2797		mutex_lock(&psr->lock);
2798
2799		/*
2800		 * Reasons to disable:
2801		 * - PSR disabled in new state
2802		 * - All planes will go inactive
2803		 * - Changing between PSR versions
2804		 * - Region Early Transport changing
2805		 * - Display WA #1136: skl, bxt
2806		 */
2807		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2808		needs_to_disable |= !new_crtc_state->has_psr;
2809		needs_to_disable |= !new_crtc_state->active_planes;
2810		needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
2811		needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
2812			psr->su_region_et_enabled;
2813		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2814			new_crtc_state->wm_level_disabled;
2815
2816		if (psr->enabled && needs_to_disable)
2817			intel_psr_disable_locked(intel_dp);
2818		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2819			/* Wa_14015648006 */
2820			wm_optimization_wa(intel_dp, new_crtc_state);
2821
2822		mutex_unlock(&psr->lock);
2823	}
2824}
2825
2826void intel_psr_post_plane_update(struct intel_atomic_state *state,
2827				 struct intel_crtc *crtc)
2828{
2829	struct intel_display *display = to_intel_display(state);
2830	const struct intel_crtc_state *crtc_state =
2831		intel_atomic_get_new_crtc_state(state, crtc);
2832	struct intel_encoder *encoder;
2833
2834	if (!crtc_state->has_psr)
2835		return;
2836
2837	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2838					     crtc_state->uapi.encoder_mask) {
2839		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2840		struct intel_psr *psr = &intel_dp->psr;
2841		bool keep_disabled = false;
2842
2843		mutex_lock(&psr->lock);
2844
2845		drm_WARN_ON(display->drm,
2846			    psr->enabled && !crtc_state->active_planes);
2847
2848		keep_disabled |= psr->sink_not_reliable;
2849		keep_disabled |= !crtc_state->active_planes;
2850
2851		/* Display WA #1136: skl, bxt */
2852		keep_disabled |= DISPLAY_VER(display) < 11 &&
2853			crtc_state->wm_level_disabled;
2854
2855		if (!psr->enabled && !keep_disabled)
2856			intel_psr_enable_locked(intel_dp, crtc_state);
2857		else if (psr->enabled && !crtc_state->wm_level_disabled)
2858			/* Wa_14015648006 */
2859			wm_optimization_wa(intel_dp, crtc_state);
2860
 
 
2861		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2862		if (crtc_state->crc_enabled && psr->enabled)
2863			psr_force_hw_tracking_exit(intel_dp);
 
 
 
 
 
 
 
 
 
2864
2865		/*
2866		 * Clear possible busy bits in case we have
2867		 * invalidate -> flip -> flush sequence.
2868		 */
2869		intel_dp->psr.busy_frontbuffer_bits = 0;
2870
2871		mutex_unlock(&psr->lock);
2872	}
2873}
2874
2875static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2876{
2877	struct intel_display *display = to_intel_display(intel_dp);
2878	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2879
2880	/*
2881	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2882	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2883	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2884	 */
2885	return intel_de_wait_for_clear(display,
2886				       EDP_PSR2_STATUS(display, cpu_transcoder),
2887				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2888}
2889
2890static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
 
2891{
2892	struct intel_display *display = to_intel_display(intel_dp);
2893	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2894
2895	/*
2896	 * From bspec: Panel Self Refresh (BDW+)
2897	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2898	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2899	 * defensive enough to cover everything.
2900	 */
2901	return intel_de_wait_for_clear(display,
2902				       psr_status_reg(display, cpu_transcoder),
2903				       EDP_PSR_STATUS_STATE_MASK, 50);
 
 
2904}
2905
2906/**
2907 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2908 * @new_crtc_state: new CRTC state
2909 *
2910 * This function is expected to be called from pipe_update_start() where it is
2911 * not expected to race with PSR enable or disable.
2912 */
2913void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2914{
2915	struct intel_display *display = to_intel_display(new_crtc_state);
2916	struct intel_encoder *encoder;
2917
2918	if (!new_crtc_state->has_psr)
2919		return;
2920
2921	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2922					     new_crtc_state->uapi.encoder_mask) {
2923		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2924		int ret;
2925
2926		lockdep_assert_held(&intel_dp->psr.lock);
2927
2928		if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
2929			continue;
 
2930
2931		if (intel_dp->psr.sel_update_enabled)
2932			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2933		else
2934			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2935
2936		if (ret)
2937			drm_err(display->drm,
2938				"PSR wait timed out, atomic update may fail\n");
2939	}
2940}
2941
2942static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2943{
2944	struct intel_display *display = to_intel_display(intel_dp);
2945	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2946	i915_reg_t reg;
2947	u32 mask;
2948	int err;
2949
2950	if (!intel_dp->psr.enabled)
2951		return false;
2952
2953	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2954					  intel_dp->psr.panel_replay_enabled)) {
2955		reg = EDP_PSR2_STATUS(display, cpu_transcoder);
2956		mask = EDP_PSR2_STATUS_STATE_MASK;
2957	} else {
2958		reg = psr_status_reg(display, cpu_transcoder);
2959		mask = EDP_PSR_STATUS_STATE_MASK;
2960	}
2961
2962	mutex_unlock(&intel_dp->psr.lock);
2963
2964	err = intel_de_wait_for_clear(display, reg, mask, 50);
2965	if (err)
2966		drm_err(display->drm,
2967			"Timed out waiting for PSR Idle for re-enable\n");
2968
2969	/* After the unlocked wait, verify that PSR is still wanted! */
2970	mutex_lock(&intel_dp->psr.lock);
2971	return err == 0 && intel_dp->psr.enabled;
2972}
2973
2974static int intel_psr_fastset_force(struct intel_display *display)
2975{
2976	struct drm_connector_list_iter conn_iter;
 
2977	struct drm_modeset_acquire_ctx ctx;
2978	struct drm_atomic_state *state;
2979	struct drm_connector *conn;
2980	int err = 0;
2981
2982	state = drm_atomic_state_alloc(display->drm);
2983	if (!state)
2984		return -ENOMEM;
2985
2986	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2987
2988	state->acquire_ctx = &ctx;
2989	to_intel_atomic_state(state)->internal = true;
2990
2991retry:
2992	drm_connector_list_iter_begin(display->drm, &conn_iter);
 
2993	drm_for_each_connector_iter(conn, &conn_iter) {
2994		struct drm_connector_state *conn_state;
2995		struct drm_crtc_state *crtc_state;
2996
2997		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2998			continue;
2999
3000		conn_state = drm_atomic_get_connector_state(state, conn);
3001		if (IS_ERR(conn_state)) {
3002			err = PTR_ERR(conn_state);
3003			break;
3004		}
3005
3006		if (!conn_state->crtc)
3007			continue;
3008
3009		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3010		if (IS_ERR(crtc_state)) {
3011			err = PTR_ERR(crtc_state);
3012			break;
3013		}
3014
3015		/* Mark mode as changed to trigger a pipe->update() */
3016		crtc_state->mode_changed = true;
3017	}
3018	drm_connector_list_iter_end(&conn_iter);
3019
3020	if (err == 0)
3021		err = drm_atomic_commit(state);
3022
3023	if (err == -EDEADLK) {
3024		drm_atomic_state_clear(state);
3025		err = drm_modeset_backoff(&ctx);
3026		if (!err)
3027			goto retry;
3028	}
3029
3030	drm_modeset_drop_locks(&ctx);
3031	drm_modeset_acquire_fini(&ctx);
3032	drm_atomic_state_put(state);
3033
3034	return err;
3035}
3036
3037int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3038{
3039	struct intel_display *display = to_intel_display(intel_dp);
3040	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3041	const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3042					I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3043	u32 old_mode, old_disable_bits;
3044	int ret;
3045
3046	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3047		    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3048		    I915_PSR_DEBUG_MODE_MASK) ||
3049	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3050		drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3051		return -EINVAL;
3052	}
3053
3054	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3055	if (ret)
3056		return ret;
3057
3058	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3059	old_disable_bits = intel_dp->psr.debug &
3060		(I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3061		 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3062
3063	intel_dp->psr.debug = val;
3064
3065	/*
3066	 * Do it right away if it's already enabled, otherwise it will be done
3067	 * when enabling the source.
3068	 */
3069	if (intel_dp->psr.enabled)
3070		psr_irq_control(intel_dp);
3071
3072	mutex_unlock(&intel_dp->psr.lock);
3073
3074	if (old_mode != mode || old_disable_bits != disable_bits)
3075		ret = intel_psr_fastset_force(display);
3076
3077	return ret;
3078}
3079
3080static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3081{
3082	struct intel_psr *psr = &intel_dp->psr;
3083
3084	intel_psr_disable_locked(intel_dp);
3085	psr->sink_not_reliable = true;
3086	/* let's make sure that sink is awaken */
3087	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3088}
3089
3090static void intel_psr_work(struct work_struct *work)
3091{
3092	struct intel_dp *intel_dp =
3093		container_of(work, typeof(*intel_dp), psr.work);
3094
3095	mutex_lock(&intel_dp->psr.lock);
3096
3097	if (!intel_dp->psr.enabled)
3098		goto unlock;
3099
3100	if (READ_ONCE(intel_dp->psr.irq_aux_error))
3101		intel_psr_handle_irq(intel_dp);
3102
3103	/*
3104	 * We have to make sure PSR is ready for re-enable
3105	 * otherwise it keeps disabled until next full enable/disable cycle.
3106	 * PSR might take some time to get fully disabled
3107	 * and be ready for re-enable.
3108	 */
3109	if (!__psr_wait_for_idle_locked(intel_dp))
3110		goto unlock;
3111
3112	/*
3113	 * The delayed work can race with an invalidate hence we need to
3114	 * recheck. Since psr_flush first clears this and then reschedules we
3115	 * won't ever miss a flush when bailing out here.
3116	 */
3117	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3118		goto unlock;
3119
3120	intel_psr_activate(intel_dp);
3121unlock:
3122	mutex_unlock(&intel_dp->psr.lock);
3123}
3124
3125static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3126{
3127	struct intel_display *display = to_intel_display(intel_dp);
3128	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3129
3130	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3131		u32 val;
3132
3133		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3134			/* Send one update otherwise lag is observed in screen */
3135			intel_de_write(display,
3136				       CURSURFLIVE(display, intel_dp->psr.pipe),
3137				       0);
3138			return;
3139		}
3140
3141		val = man_trk_ctl_enable_bit_get(display) |
3142		      man_trk_ctl_partial_frame_bit_get(display) |
3143		      man_trk_ctl_continuos_full_frame(display);
3144		intel_de_write(display,
3145			       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3146			       val);
3147		intel_de_write(display,
3148			       CURSURFLIVE(display, intel_dp->psr.pipe), 0);
3149		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3150	} else {
3151		intel_psr_exit(intel_dp);
3152	}
3153}
3154
3155/**
3156 * intel_psr_invalidate - Invalidate PSR
3157 * @display: display device
3158 * @frontbuffer_bits: frontbuffer plane tracking bits
3159 * @origin: which operation caused the invalidate
3160 *
3161 * Since the hardware frontbuffer tracking has gaps we need to integrate
3162 * with the software frontbuffer tracking. This function gets called every
3163 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3164 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3165 *
3166 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3167 */
3168void intel_psr_invalidate(struct intel_display *display,
3169			  unsigned frontbuffer_bits, enum fb_op_origin origin)
3170{
3171	struct intel_encoder *encoder;
3172
3173	if (origin == ORIGIN_FLIP)
3174		return;
3175
3176	for_each_intel_encoder_with_psr(display->drm, encoder) {
3177		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3178		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3179
3180		mutex_lock(&intel_dp->psr.lock);
3181		if (!intel_dp->psr.enabled) {
3182			mutex_unlock(&intel_dp->psr.lock);
3183			continue;
3184		}
3185
3186		pipe_frontbuffer_bits &=
3187			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3188		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3189
3190		if (pipe_frontbuffer_bits)
3191			_psr_invalidate_handle(intel_dp);
3192
3193		mutex_unlock(&intel_dp->psr.lock);
3194	}
3195}
3196/*
3197 * When we will be completely rely on PSR2 S/W tracking in future,
3198 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3199 * event also therefore tgl_dc3co_flush_locked() require to be changed
3200 * accordingly in future.
3201 */
3202static void
3203tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3204		       enum fb_op_origin origin)
3205{
3206	struct intel_display *display = to_intel_display(intel_dp);
3207	struct drm_i915_private *i915 = to_i915(display->drm);
3208
3209	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3210	    !intel_dp->psr.active)
3211		return;
 
 
3212
3213	/*
3214	 * At every frontbuffer flush flip event modified delay of delayed work,
3215	 * when delayed work schedules that means display has been idle.
3216	 */
3217	if (!(frontbuffer_bits &
3218	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3219		return;
3220
3221	tgl_psr2_enable_dc3co(intel_dp);
3222	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3223			 intel_dp->psr.dc3co_exit_delay);
3224}
3225
3226static void _psr_flush_handle(struct intel_dp *intel_dp)
3227{
3228	struct intel_display *display = to_intel_display(intel_dp);
3229	struct drm_i915_private *dev_priv = to_i915(display->drm);
3230	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3231
3232	if (intel_dp->psr.psr2_sel_fetch_enabled) {
3233		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3234			/* can we turn CFF off? */
3235			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3236				u32 val = man_trk_ctl_enable_bit_get(display) |
3237					man_trk_ctl_partial_frame_bit_get(display) |
3238					man_trk_ctl_single_full_frame_bit_get(display) |
3239					man_trk_ctl_continuos_full_frame(display);
3240
3241				/*
3242				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3243				 * updates. Still keep cff bit enabled as we don't have proper
3244				 * SU configuration in case update is sent for any reason after
3245				 * sff bit gets cleared by the HW on next vblank.
3246				 */
3247				intel_de_write(display,
3248					       PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3249					       val);
3250				intel_de_write(display,
3251					       CURSURFLIVE(display, intel_dp->psr.pipe),
3252					       0);
3253				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3254			}
3255		} else {
3256			/*
3257			 * continuous full frame is disabled, only a single full
3258			 * frame is required
3259			 */
3260			psr_force_hw_tracking_exit(intel_dp);
3261		}
3262	} else {
3263		psr_force_hw_tracking_exit(intel_dp);
3264
3265		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3266			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3267	}
3268}
3269
3270/**
3271 * intel_psr_flush - Flush PSR
3272 * @display: display device
3273 * @frontbuffer_bits: frontbuffer plane tracking bits
3274 * @origin: which operation caused the flush
3275 *
3276 * Since the hardware frontbuffer tracking has gaps we need to integrate
3277 * with the software frontbuffer tracking. This function gets called every
3278 * time frontbuffer rendering has completed and flushed out to memory. PSR
3279 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3280 *
3281 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3282 */
3283void intel_psr_flush(struct intel_display *display,
3284		     unsigned frontbuffer_bits, enum fb_op_origin origin)
3285{
3286	struct intel_encoder *encoder;
3287
3288	for_each_intel_encoder_with_psr(display->drm, encoder) {
3289		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3290		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3291
 
 
 
 
 
3292		mutex_lock(&intel_dp->psr.lock);
3293		if (!intel_dp->psr.enabled) {
3294			mutex_unlock(&intel_dp->psr.lock);
3295			continue;
3296		}
3297
3298		pipe_frontbuffer_bits &=
3299			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3300		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3301
3302		/*
3303		 * If the PSR is paused by an explicit intel_psr_paused() call,
3304		 * we have to ensure that the PSR is not activated until
3305		 * intel_psr_resume() is called.
3306		 */
3307		if (intel_dp->psr.paused)
3308			goto unlock;
3309
3310		if (origin == ORIGIN_FLIP ||
3311		    (origin == ORIGIN_CURSOR_UPDATE &&
3312		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
3313			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3314			goto unlock;
3315		}
3316
3317		if (pipe_frontbuffer_bits == 0)
3318			goto unlock;
 
3319
3320		/* By definition flush = invalidate + flush */
3321		_psr_flush_handle(intel_dp);
3322unlock:
3323		mutex_unlock(&intel_dp->psr.lock);
3324	}
3325}
3326
3327/**
3328 * intel_psr_init - Init basic PSR work and mutex.
3329 * @intel_dp: Intel DP
3330 *
3331 * This function is called after the initializing connector.
3332 * (the initializing of connector treats the handling of connector capabilities)
3333 * And it initializes basic PSR stuff for each DP Encoder.
3334 */
3335void intel_psr_init(struct intel_dp *intel_dp)
3336{
3337	struct intel_display *display = to_intel_display(intel_dp);
3338	struct drm_i915_private *dev_priv = to_i915(display->drm);
3339	struct intel_connector *connector = intel_dp->attached_connector;
3340	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
3341
3342	if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
3343		return;
3344
3345	/*
3346	 * HSW spec explicitly says PSR is tied to port A.
3347	 * BDW+ platforms have a instance of PSR registers per transcoder but
3348	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3349	 * than eDP one.
3350	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3351	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3352	 * But GEN12 supports a instance of PSR registers per transcoder.
3353	 */
3354	if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3355		drm_dbg_kms(display->drm,
3356			    "PSR condition failed: Port not supported\n");
3357		return;
3358	}
3359
3360	if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
3361	    DISPLAY_VER(display) >= 20)
3362		intel_dp->psr.source_panel_replay_support = true;
 
 
 
 
 
 
3363
3364	if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3365		intel_dp->psr.source_support = true;
 
3366
3367	/* Set link_standby x link_off defaults */
3368	if (DISPLAY_VER(display) < 12)
 
 
 
3369		/* For new platforms up to TGL let's respect VBT back again */
3370		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3371
3372	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3373	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3374	mutex_init(&intel_dp->psr.lock);
3375}
3376
3377static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3378					   u8 *status, u8 *error_status)
3379{
3380	struct drm_dp_aux *aux = &intel_dp->aux;
3381	int ret;
3382	unsigned int offset;
3383
3384	offset = intel_dp->psr.panel_replay_enabled ?
3385		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3386
3387	ret = drm_dp_dpcd_readb(aux, offset, status);
3388	if (ret != 1)
3389		return ret;
3390
3391	offset = intel_dp->psr.panel_replay_enabled ?
3392		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3393
3394	ret = drm_dp_dpcd_readb(aux, offset, error_status);
3395	if (ret != 1)
3396		return ret;
3397
3398	*status = *status & DP_PSR_SINK_STATE_MASK;
3399
3400	return 0;
3401}
3402
3403static void psr_alpm_check(struct intel_dp *intel_dp)
3404{
3405	struct intel_display *display = to_intel_display(intel_dp);
3406	struct drm_dp_aux *aux = &intel_dp->aux;
3407	struct intel_psr *psr = &intel_dp->psr;
3408	u8 val;
3409	int r;
3410
3411	if (!psr->sel_update_enabled)
3412		return;
3413
3414	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3415	if (r != 1) {
3416		drm_err(display->drm, "Error reading ALPM status\n");
3417		return;
3418	}
3419
3420	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3421		intel_psr_disable_locked(intel_dp);
3422		psr->sink_not_reliable = true;
3423		drm_dbg_kms(display->drm,
3424			    "ALPM lock timeout error, disabling PSR\n");
3425
3426		/* Clearing error */
3427		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3428	}
3429}
3430
3431static void psr_capability_changed_check(struct intel_dp *intel_dp)
3432{
3433	struct intel_display *display = to_intel_display(intel_dp);
3434	struct intel_psr *psr = &intel_dp->psr;
3435	u8 val;
3436	int r;
3437
3438	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3439	if (r != 1) {
3440		drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3441		return;
3442	}
3443
3444	if (val & DP_PSR_CAPS_CHANGE) {
3445		intel_psr_disable_locked(intel_dp);
3446		psr->sink_not_reliable = true;
3447		drm_dbg_kms(display->drm,
3448			    "Sink PSR capability changed, disabling PSR\n");
3449
3450		/* Clearing it */
3451		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3452	}
3453}
3454
3455/*
3456 * On common bits:
3457 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3458 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3459 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3460 * this function is relying on PSR definitions
3461 */
3462void intel_psr_short_pulse(struct intel_dp *intel_dp)
3463{
3464	struct intel_display *display = to_intel_display(intel_dp);
3465	struct intel_psr *psr = &intel_dp->psr;
3466	u8 status, error_status;
3467	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3468			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3469			  DP_PSR_LINK_CRC_ERROR;
3470
3471	if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3472		return;
3473
3474	mutex_lock(&psr->lock);
3475
3476	psr->link_ok = false;
3477
3478	if (!psr->enabled)
3479		goto exit;
3480
3481	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3482		drm_err(display->drm,
3483			"Error reading PSR status or error status\n");
3484		goto exit;
3485	}
3486
3487	if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3488	    (error_status & errors)) {
3489		intel_psr_disable_locked(intel_dp);
3490		psr->sink_not_reliable = true;
3491	}
3492
3493	if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3494	    !error_status)
3495		drm_dbg_kms(display->drm,
3496			    "PSR sink internal error, disabling PSR\n");
3497	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3498		drm_dbg_kms(display->drm,
3499			    "PSR RFB storage error, disabling PSR\n");
3500	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3501		drm_dbg_kms(display->drm,
3502			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
3503	if (error_status & DP_PSR_LINK_CRC_ERROR)
3504		drm_dbg_kms(display->drm,
3505			    "PSR Link CRC error, disabling PSR\n");
3506
3507	if (error_status & ~errors)
3508		drm_err(display->drm,
3509			"PSR_ERROR_STATUS unhandled errors %x\n",
3510			error_status & ~errors);
3511	/* clear status register */
3512	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3513
3514	if (!psr->panel_replay_enabled) {
3515		psr_alpm_check(intel_dp);
3516		psr_capability_changed_check(intel_dp);
3517	}
3518
3519exit:
3520	mutex_unlock(&psr->lock);
3521}
3522
3523bool intel_psr_enabled(struct intel_dp *intel_dp)
3524{
3525	bool ret;
3526
3527	if (!CAN_PSR(intel_dp))
3528		return false;
3529
3530	mutex_lock(&intel_dp->psr.lock);
3531	ret = intel_dp->psr.enabled;
3532	mutex_unlock(&intel_dp->psr.lock);
3533
3534	return ret;
3535}
3536
3537/**
3538 * intel_psr_link_ok - return psr->link_ok
3539 * @intel_dp: struct intel_dp
3540 *
3541 * We are seeing unexpected link re-trainings with some panels. This is caused
3542 * by panel stating bad link status after PSR is enabled. Code checking link
3543 * status can call this to ensure it can ignore bad link status stated by the
3544 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3545 * is ok caller should rely on latter.
3546 *
3547 * Return value of link_ok
3548 */
3549bool intel_psr_link_ok(struct intel_dp *intel_dp)
3550{
3551	bool ret;
3552
3553	if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3554	    !intel_dp_is_edp(intel_dp))
3555		return false;
3556
3557	mutex_lock(&intel_dp->psr.lock);
3558	ret = intel_dp->psr.link_ok;
3559	mutex_unlock(&intel_dp->psr.lock);
3560
3561	return ret;
3562}
3563
3564/**
3565 * intel_psr_lock - grab PSR lock
3566 * @crtc_state: the crtc state
3567 *
3568 * This is initially meant to be used by around CRTC update, when
3569 * vblank sensitive registers are updated and we need grab the lock
3570 * before it to avoid vblank evasion.
3571 */
3572void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3573{
3574	struct intel_display *display = to_intel_display(crtc_state);
3575	struct intel_encoder *encoder;
3576
3577	if (!crtc_state->has_psr)
3578		return;
3579
3580	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3581					     crtc_state->uapi.encoder_mask) {
3582		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3583
3584		mutex_lock(&intel_dp->psr.lock);
3585		break;
3586	}
3587}
3588
3589/**
3590 * intel_psr_unlock - release PSR lock
3591 * @crtc_state: the crtc state
3592 *
3593 * Release the PSR lock that was held during pipe update.
3594 */
3595void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3596{
3597	struct intel_display *display = to_intel_display(crtc_state);
3598	struct intel_encoder *encoder;
3599
3600	if (!crtc_state->has_psr)
3601		return;
3602
3603	for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3604					     crtc_state->uapi.encoder_mask) {
3605		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3606
3607		mutex_unlock(&intel_dp->psr.lock);
3608		break;
3609	}
3610}
3611
3612static void
3613psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3614{
3615	struct intel_display *display = to_intel_display(intel_dp);
3616	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3617	const char *status = "unknown";
3618	u32 val, status_val;
3619
3620	if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3621					  intel_dp->psr.panel_replay_enabled)) {
3622		static const char * const live_status[] = {
3623			"IDLE",
3624			"CAPTURE",
3625			"CAPTURE_FS",
3626			"SLEEP",
3627			"BUFON_FW",
3628			"ML_UP",
3629			"SU_STANDBY",
3630			"FAST_SLEEP",
3631			"DEEP_SLEEP",
3632			"BUF_ON",
3633			"TG_ON"
3634		};
3635		val = intel_de_read(display,
3636				    EDP_PSR2_STATUS(display, cpu_transcoder));
3637		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3638		if (status_val < ARRAY_SIZE(live_status))
3639			status = live_status[status_val];
3640	} else {
3641		static const char * const live_status[] = {
3642			"IDLE",
3643			"SRDONACK",
3644			"SRDENT",
3645			"BUFOFF",
3646			"BUFON",
3647			"AUXACK",
3648			"SRDOFFACK",
3649			"SRDENT_ON",
3650		};
3651		val = intel_de_read(display,
3652				    psr_status_reg(display, cpu_transcoder));
3653		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3654		if (status_val < ARRAY_SIZE(live_status))
3655			status = live_status[status_val];
3656	}
3657
3658	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3659}
3660
3661static void intel_psr_sink_capability(struct intel_dp *intel_dp,
3662				      struct seq_file *m)
3663{
3664	struct intel_psr *psr = &intel_dp->psr;
3665
3666	seq_printf(m, "Sink support: PSR = %s",
3667		   str_yes_no(psr->sink_support));
3668
3669	if (psr->sink_support)
3670		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3671	if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
3672		seq_printf(m, " (Early Transport)");
3673	seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
3674	seq_printf(m, ", Panel Replay Selective Update = %s",
3675		   str_yes_no(psr->sink_panel_replay_su_support));
3676	if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
3677		seq_printf(m, " (Early Transport)");
3678	seq_printf(m, "\n");
3679}
3680
3681static void intel_psr_print_mode(struct intel_dp *intel_dp,
3682				 struct seq_file *m)
3683{
3684	struct intel_psr *psr = &intel_dp->psr;
3685	const char *status, *mode, *region_et;
3686
3687	if (psr->enabled)
3688		status = " enabled";
3689	else
3690		status = "disabled";
3691
3692	if (psr->panel_replay_enabled && psr->sel_update_enabled)
3693		mode = "Panel Replay Selective Update";
3694	else if (psr->panel_replay_enabled)
3695		mode = "Panel Replay";
3696	else if (psr->sel_update_enabled)
3697		mode = "PSR2";
3698	else if (psr->enabled)
3699		mode = "PSR1";
3700	else
3701		mode = "";
3702
3703	if (psr->su_region_et_enabled)
3704		region_et = " (Early Transport)";
3705	else
3706		region_et = "";
3707
3708	seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
3709}
3710
3711static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3712{
3713	struct intel_display *display = to_intel_display(intel_dp);
3714	struct drm_i915_private *dev_priv = to_i915(display->drm);
3715	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3716	struct intel_psr *psr = &intel_dp->psr;
3717	intel_wakeref_t wakeref;
3718	bool enabled;
3719	u32 val, psr2_ctl;
3720
3721	intel_psr_sink_capability(intel_dp, m);
3722
3723	if (!(psr->sink_support || psr->sink_panel_replay_support))
3724		return 0;
3725
3726	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3727	mutex_lock(&psr->lock);
3728
3729	intel_psr_print_mode(intel_dp, m);
3730
3731	if (!psr->enabled) {
3732		seq_printf(m, "PSR sink not reliable: %s\n",
3733			   str_yes_no(psr->sink_not_reliable));
3734
3735		goto unlock;
3736	}
3737
3738	if (psr->panel_replay_enabled) {
3739		val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
3740
3741		if (intel_dp_is_edp(intel_dp))
3742			psr2_ctl = intel_de_read(display,
3743						 EDP_PSR2_CTL(display,
3744							      cpu_transcoder));
3745
3746		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3747	} else if (psr->sel_update_enabled) {
3748		val = intel_de_read(display,
3749				    EDP_PSR2_CTL(display, cpu_transcoder));
3750		enabled = val & EDP_PSR2_ENABLE;
3751	} else {
3752		val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
3753		enabled = val & EDP_PSR_ENABLE;
3754	}
3755	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3756		   str_enabled_disabled(enabled), val);
3757	if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
3758		seq_printf(m, "PSR2_CTL: 0x%08x\n",
3759			   psr2_ctl);
3760	psr_source_status(intel_dp, m);
3761	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3762		   psr->busy_frontbuffer_bits);
3763
3764	/*
3765	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3766	 */
3767	val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
3768	seq_printf(m, "Performance counter: %u\n",
3769		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3770
3771	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3772		seq_printf(m, "Last attempted entry at: %lld\n",
3773			   psr->last_entry_attempt);
3774		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3775	}
3776
3777	if (psr->sel_update_enabled) {
3778		u32 su_frames_val[3];
3779		int frame;
3780
3781		/*
3782		 * Reading all 3 registers before hand to minimize crossing a
3783		 * frame boundary between register reads
3784		 */
3785		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3786			val = intel_de_read(display,
3787					    PSR2_SU_STATUS(display, cpu_transcoder, frame));
3788			su_frames_val[frame / 3] = val;
3789		}
3790
3791		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3792
3793		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3794			u32 su_blocks;
3795
3796			su_blocks = su_frames_val[frame / 3] &
3797				    PSR2_SU_STATUS_MASK(frame);
3798			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3799			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3800		}
3801
3802		seq_printf(m, "PSR2 selective fetch: %s\n",
3803			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3804	}
3805
3806unlock:
3807	mutex_unlock(&psr->lock);
3808	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3809
3810	return 0;
3811}
3812
3813static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3814{
3815	struct intel_display *display = m->private;
3816	struct intel_dp *intel_dp = NULL;
3817	struct intel_encoder *encoder;
3818
3819	if (!HAS_PSR(display))
3820		return -ENODEV;
3821
3822	/* Find the first EDP which supports PSR */
3823	for_each_intel_encoder_with_psr(display->drm, encoder) {
3824		intel_dp = enc_to_intel_dp(encoder);
3825		break;
3826	}
3827
3828	if (!intel_dp)
3829		return -ENODEV;
3830
3831	return intel_psr_status(m, intel_dp);
3832}
3833DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3834
3835static int
3836i915_edp_psr_debug_set(void *data, u64 val)
3837{
3838	struct intel_display *display = data;
3839	struct drm_i915_private *dev_priv = to_i915(display->drm);
3840	struct intel_encoder *encoder;
3841	intel_wakeref_t wakeref;
3842	int ret = -ENODEV;
3843
3844	if (!HAS_PSR(display))
3845		return ret;
3846
3847	for_each_intel_encoder_with_psr(display->drm, encoder) {
3848		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3849
3850		drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
3851
3852		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3853
3854		// TODO: split to each transcoder's PSR debug state
3855		ret = intel_psr_debug_set(intel_dp, val);
3856
3857		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3858	}
3859
3860	return ret;
3861}
3862
3863static int
3864i915_edp_psr_debug_get(void *data, u64 *val)
3865{
3866	struct intel_display *display = data;
3867	struct intel_encoder *encoder;
3868
3869	if (!HAS_PSR(display))
3870		return -ENODEV;
3871
3872	for_each_intel_encoder_with_psr(display->drm, encoder) {
3873		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3874
3875		// TODO: split to each transcoder's PSR debug state
3876		*val = READ_ONCE(intel_dp->psr.debug);
3877		return 0;
3878	}
3879
3880	return -ENODEV;
3881}
3882
3883DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3884			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3885			"%llu\n");
3886
3887void intel_psr_debugfs_register(struct intel_display *display)
3888{
3889	struct drm_minor *minor = display->drm->primary;
3890
3891	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3892			    display, &i915_edp_psr_debug_fops);
3893
3894	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3895			    display, &i915_edp_psr_status_fops);
3896}
3897
3898static const char *psr_mode_str(struct intel_dp *intel_dp)
3899{
3900	if (intel_dp->psr.panel_replay_enabled)
3901		return "PANEL-REPLAY";
3902	else if (intel_dp->psr.enabled)
3903		return "PSR";
3904
3905	return "unknown";
3906}
3907
3908static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3909{
3910	struct intel_connector *connector = m->private;
3911	struct intel_dp *intel_dp = intel_attached_dp(connector);
3912	static const char * const sink_status[] = {
3913		"inactive",
3914		"transition to active, capture and display",
3915		"active, display from RFB",
3916		"active, capture and display on sink device timings",
3917		"transition to inactive, capture and display, timing re-sync",
3918		"reserved",
3919		"reserved",
3920		"sink internal error",
3921	};
3922	const char *str;
3923	int ret;
3924	u8 status, error_status;
3925
3926	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3927		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3928		return -ENODEV;
3929	}
3930
3931	if (connector->base.status != connector_status_connected)
3932		return -ENODEV;
3933
3934	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3935	if (ret)
3936		return ret;
3937
3938	status &= DP_PSR_SINK_STATE_MASK;
3939	if (status < ARRAY_SIZE(sink_status))
3940		str = sink_status[status];
3941	else
3942		str = "unknown";
3943
3944	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3945
3946	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3947
3948	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3949			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3950			    DP_PSR_LINK_CRC_ERROR))
3951		seq_puts(m, ":\n");
3952	else
3953		seq_puts(m, "\n");
3954	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3955		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3956	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3957		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3958	if (error_status & DP_PSR_LINK_CRC_ERROR)
3959		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3960
3961	return ret;
3962}
3963DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3964
3965static int i915_psr_status_show(struct seq_file *m, void *data)
3966{
3967	struct intel_connector *connector = m->private;
3968	struct intel_dp *intel_dp = intel_attached_dp(connector);
3969
3970	return intel_psr_status(m, intel_dp);
3971}
3972DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3973
3974void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3975{
3976	struct intel_display *display = to_intel_display(connector);
3977	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3978	struct dentry *root = connector->base.debugfs_entry;
3979
3980	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3981	    connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3982		return;
3983
3984	debugfs_create_file("i915_psr_sink_status", 0444, root,
3985			    connector, &i915_psr_sink_status_fops);
3986
3987	if (HAS_PSR(display) || HAS_DP20(i915))
3988		debugfs_create_file("i915_psr_status", 0444, root,
3989				    connector, &i915_psr_status_fops);
3990}