Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21 * DEALINGS IN THE SOFTWARE.
 22 */
 23
 24/**
 25 * DOC: Panel Self Refresh (PSR/SRD)
 26 *
 27 * Since Haswell Display controller supports Panel Self-Refresh on display
 28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 30 * when system is idle but display is on as it eliminates display refresh
 31 * request to DDR memory completely as long as the frame buffer for that
 32 * display is unchanged.
 33 *
 34 * Panel Self Refresh must be supported by both Hardware (source) and
 35 * Panel (sink).
 36 *
 37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 38 * to power down the link and memory controller. For DSI panels the same idea
 39 * is called "manual mode".
 40 *
 41 * The implementation uses the hardware-based PSR support which automatically
 42 * enters/exits self-refresh mode. The hardware takes care of sending the
 43 * required DP aux message and could even retrain the link (that part isn't
 44 * enabled yet though). The hardware also keeps track of any frontbuffer
 45 * changes to know when to exit self-refresh mode again. Unfortunately that
 46 * part doesn't work too well, hence why the i915 PSR support uses the
 47 * software frontbuffer tracking to make sure it doesn't miss a screen
 48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 49 * get called by the frontbuffer tracking code. Note that because of locking
 50 * issues the self-refresh re-enable code is done from a work queue, which
 51 * must be correctly synchronized/cancelled when shutting down the pipe."
 52 */
 53
 54#include <drm/drmP.h>
 55
 56#include "intel_drv.h"
 57#include "i915_drv.h"
 58
 59static bool is_edp_psr(struct intel_dp *intel_dp)
 
 60{
 61	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62}
 63
 64static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
 65{
 66	struct drm_i915_private *dev_priv = dev->dev_private;
 67	uint32_t val;
 68
 69	val = I915_READ(VLV_PSRSTAT(pipe)) &
 70	      VLV_EDP_PSR_CURR_STATE_MASK;
 71	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
 72	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
 73}
 74
 75static void intel_psr_write_vsc(struct intel_dp *intel_dp,
 76				const struct edp_vsc_psr *vsc_psr)
 77{
 78	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 79	struct drm_device *dev = dig_port->base.base.dev;
 80	struct drm_i915_private *dev_priv = dev->dev_private;
 81	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
 82	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 83	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
 84	uint32_t *data = (uint32_t *) vsc_psr;
 85	unsigned int i;
 86
 87	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
 88	   the video DIP being updated before program video DIP data buffer
 89	   registers for DIP being updated. */
 90	I915_WRITE(ctl_reg, 0);
 91	POSTING_READ(ctl_reg);
 92
 93	for (i = 0; i < sizeof(*vsc_psr); i += 4) {
 94		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
 95						   i >> 2), *data);
 96		data++;
 97	}
 98	for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
 99		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
100						   i >> 2), 0);
101
102	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
103	POSTING_READ(ctl_reg);
104}
105
106static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
107{
108	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109	struct drm_device *dev = intel_dig_port->base.base.dev;
110	struct drm_i915_private *dev_priv = dev->dev_private;
111	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112	enum pipe pipe = to_intel_crtc(crtc)->pipe;
113	uint32_t val;
114
115	/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
116	val  = I915_READ(VLV_VSCSDP(pipe));
117	val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
118	val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
119	I915_WRITE(VLV_VSCSDP(pipe), val);
120}
121
122static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
 
123{
 
 
124	struct edp_vsc_psr psr_vsc;
125
126	/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
127	memset(&psr_vsc, 0, sizeof(psr_vsc));
128	psr_vsc.sdp_header.HB0 = 0;
129	psr_vsc.sdp_header.HB1 = 0x7;
130	psr_vsc.sdp_header.HB2 = 0x3;
131	psr_vsc.sdp_header.HB3 = 0xb;
132	intel_psr_write_vsc(intel_dp, &psr_vsc);
133}
134
135static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
136{
137	struct edp_vsc_psr psr_vsc;
 
 
 
 
 
 
 
 
 
 
 
 
138
139	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
140	memset(&psr_vsc, 0, sizeof(psr_vsc));
141	psr_vsc.sdp_header.HB0 = 0;
142	psr_vsc.sdp_header.HB1 = 0x7;
143	psr_vsc.sdp_header.HB2 = 0x2;
144	psr_vsc.sdp_header.HB3 = 0x8;
145	intel_psr_write_vsc(intel_dp, &psr_vsc);
146}
147
148static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
149{
150	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
151			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
152}
153
154static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
155				       enum port port)
156{
157	if (INTEL_INFO(dev_priv)->gen >= 9)
158		return DP_AUX_CH_CTL(port);
159	else
160		return EDP_PSR_AUX_CTL;
161}
162
163static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
164					enum port port, int index)
165{
166	if (INTEL_INFO(dev_priv)->gen >= 9)
167		return DP_AUX_CH_DATA(port, index);
168	else
169		return EDP_PSR_AUX_DATA(index);
170}
171
172static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
173{
174	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
175	struct drm_device *dev = dig_port->base.base.dev;
176	struct drm_i915_private *dev_priv = dev->dev_private;
177	uint32_t aux_clock_divider;
178	i915_reg_t aux_ctl_reg;
179	int precharge = 0x3;
180	static const uint8_t aux_msg[] = {
181		[0] = DP_AUX_NATIVE_WRITE << 4,
182		[1] = DP_SET_POWER >> 8,
183		[2] = DP_SET_POWER & 0xff,
184		[3] = 1 - 1,
185		[4] = DP_SET_POWER_D0,
186	};
187	enum port port = dig_port->port;
 
188	int i;
189
190	BUILD_BUG_ON(sizeof(aux_msg) > 20);
191
192	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
193
194	/* Enable AUX frame sync at sink */
195	if (dev_priv->psr.aux_frame_sync)
196		drm_dp_dpcd_writeb(&intel_dp->aux,
197				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198				DP_AUX_FRAME_SYNC_ENABLE);
 
 
 
 
 
 
 
 
 
 
 
199
200	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
201
202	/* Setup AUX registers */
203	for (i = 0; i < sizeof(aux_msg); i += 4)
204		I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
205			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206
207	if (INTEL_INFO(dev)->gen >= 9) {
208		uint32_t val;
209
210		val = I915_READ(aux_ctl_reg);
211		val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212		val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213		val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214		val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215		/* Use hardcoded data values for PSR, frame sync and GTC */
216		val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217		val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218		val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219		I915_WRITE(aux_ctl_reg, val);
220	} else {
221		I915_WRITE(aux_ctl_reg,
222		   DP_AUX_CH_CTL_TIME_OUT_400us |
223		   (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
226	}
227
228	if (dev_priv->psr.link_standby)
229		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
230				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
231	else
232		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
233				   DP_PSR_ENABLE);
234}
235
236static void vlv_psr_enable_source(struct intel_dp *intel_dp)
 
237{
238	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
239	struct drm_device *dev = dig_port->base.base.dev;
240	struct drm_i915_private *dev_priv = dev->dev_private;
241	struct drm_crtc *crtc = dig_port->base.base.crtc;
242	enum pipe pipe = to_intel_crtc(crtc)->pipe;
243
244	/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
245	I915_WRITE(VLV_PSRCTL(pipe),
246		   VLV_EDP_PSR_MODE_SW_TIMER |
247		   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
248		   VLV_EDP_PSR_ENABLE);
249}
250
251static void vlv_psr_activate(struct intel_dp *intel_dp)
252{
253	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
254	struct drm_device *dev = dig_port->base.base.dev;
255	struct drm_i915_private *dev_priv = dev->dev_private;
256	struct drm_crtc *crtc = dig_port->base.base.crtc;
257	enum pipe pipe = to_intel_crtc(crtc)->pipe;
258
259	/* Let's do the transition from PSR_state 1 to PSR_state 2
260	 * that is PSR transition to active - static frame transmission.
261	 * Then Hardware is responsible for the transition to PSR_state 3
262	 * that is PSR active - no Remote Frame Buffer (RFB) update.
 
263	 */
264	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
265		   VLV_EDP_PSR_ACTIVE_ENTRY);
266}
267
268static void hsw_psr_enable_source(struct intel_dp *intel_dp)
269{
270	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
271	struct drm_device *dev = dig_port->base.base.dev;
272	struct drm_i915_private *dev_priv = dev->dev_private;
273
274	uint32_t max_sleep_time = 0x1f;
275	/*
276	 * Let's respect VBT in case VBT asks a higher idle_frame value.
277	 * Let's use 6 as the minimum to cover all known cases including
278	 * the off-by-one issue that HW has in some cases. Also there are
279	 * cases where sink should be able to train
280	 * with the 5 or 6 idle patterns.
281	 */
282	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
283	uint32_t val = 0x0;
284
285	if (IS_HASWELL(dev))
 
 
 
286		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
287
288	if (dev_priv->psr.link_standby)
289		val |= EDP_PSR_LINK_STANDBY;
290
291	I915_WRITE(EDP_PSR_CTL, val |
292		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
293		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
294		   EDP_PSR_ENABLE);
 
 
 
 
295
296	if (dev_priv->psr.psr2_support)
297		I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
298				EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299}
300
301static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
302{
303	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
304	struct drm_device *dev = dig_port->base.base.dev;
305	struct drm_i915_private *dev_priv = dev->dev_private;
306	struct drm_crtc *crtc = dig_port->base.base.crtc;
307	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 
 
 
 
 
 
 
308
309	lockdep_assert_held(&dev_priv->psr.lock);
310	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
311	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
313	dev_priv->psr.source_ok = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
315	/*
316	 * HSW spec explicitly says PSR is tied to port A.
317	 * BDW+ platforms with DDI implementation of PSR have different
318	 * PSR registers per transcoder and we only implement transcoder EDP
319	 * ones. Since by Display design transcoder EDP is tied to port A
320	 * we can safely escape based on the port A.
321	 */
322	if (HAS_DDI(dev) && dig_port->port != PORT_A) {
323		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
324		return false;
325	}
326
327	if (!i915.enable_psr) {
328		DRM_DEBUG_KMS("PSR disable by flag\n");
329		return false;
330	}
331
332	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
333	    !dev_priv->psr.link_standby) {
334		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
335		return false;
336	}
337
338	if (IS_HASWELL(dev) &&
339	    I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
340		      S3D_ENABLE) {
341		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
342		return false;
343	}
344
345	if (IS_HASWELL(dev) &&
346	    intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
347		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
348		return false;
349	}
350
351	dev_priv->psr.source_ok = true;
352	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353}
354
355static void intel_psr_activate(struct intel_dp *intel_dp)
356{
357	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
358	struct drm_device *dev = intel_dig_port->base.base.dev;
359	struct drm_i915_private *dev_priv = dev->dev_private;
360
361	WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
 
 
 
362	WARN_ON(dev_priv->psr.active);
363	lockdep_assert_held(&dev_priv->psr.lock);
364
365	/* Enable/Re-enable PSR on the host */
366	if (HAS_DDI(dev))
367		/* On HSW+ after we enable PSR on source it will activate it
368		 * as soon as it match configure idle_frame count. So
369		 * we just actually enable it here on activation time.
370		 */
371		hsw_psr_enable_source(intel_dp);
372	else
373		vlv_psr_activate(intel_dp);
374
375	dev_priv->psr.active = true;
376}
377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378/**
379 * intel_psr_enable - Enable PSR
380 * @intel_dp: Intel DP
 
381 *
382 * This function can only be called after the pipe is fully trained and enabled.
383 */
384void intel_psr_enable(struct intel_dp *intel_dp)
 
385{
386	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
387	struct drm_device *dev = intel_dig_port->base.base.dev;
388	struct drm_i915_private *dev_priv = dev->dev_private;
389	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
390
391	if (!HAS_PSR(dev)) {
392		DRM_DEBUG_KMS("PSR not supported on this platform\n");
393		return;
394	}
395
396	if (!is_edp_psr(intel_dp)) {
397		DRM_DEBUG_KMS("PSR not supported by this panel\n");
398		return;
399	}
400
 
401	mutex_lock(&dev_priv->psr.lock);
402	if (dev_priv->psr.enabled) {
403		DRM_DEBUG_KMS("PSR already in use\n");
404		goto unlock;
405	}
406
407	if (!intel_psr_match_conditions(intel_dp))
408		goto unlock;
409
410	dev_priv->psr.busy_frontbuffer_bits = 0;
411
412	if (HAS_DDI(dev)) {
413		hsw_psr_setup_vsc(intel_dp);
414
415		if (dev_priv->psr.psr2_support) {
416			/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
417			if (crtc->config->pipe_src_w > 3200 ||
418				crtc->config->pipe_src_h > 2000)
419				dev_priv->psr.psr2_support = false;
420			else
421				skl_psr_setup_su_vsc(intel_dp);
422		}
423
424		/*
425		 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
426		 * Also mask LPSP to avoid dependency on other drivers that
427		 * might block runtime_pm besides preventing other hw tracking
428		 * issues now we can rely on frontbuffer tracking.
429		 */
430		I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
431			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
432
433		/* Enable PSR on the panel */
434		hsw_psr_enable_sink(intel_dp);
435
436		if (INTEL_INFO(dev)->gen >= 9)
437			intel_psr_activate(intel_dp);
438	} else {
439		vlv_psr_setup_vsc(intel_dp);
440
441		/* Enable PSR on the panel */
442		vlv_psr_enable_sink(intel_dp);
443
444		/* On HSW+ enable_source also means go to PSR entry/active
445		 * state as soon as idle_frame achieved and here would be
446		 * to soon. However on VLV enable_source just enable PSR
447		 * but let it on inactive state. So we might do this prior
448		 * to active transition, i.e. here.
449		 */
450		vlv_psr_enable_source(intel_dp);
451	}
452
453	/*
454	 * FIXME: Activation should happen immediately since this function
455	 * is just called after pipe is fully trained and enabled.
456	 * However on every platform we face issues when first activation
457	 * follows a modeset so quickly.
458	 *     - On VLV/CHV we get bank screen on first activation
459	 *     - On HSW/BDW we get a recoverable frozen screen until next
460	 *       exit-activate sequence.
461	 */
462	if (INTEL_INFO(dev)->gen < 9)
463		schedule_delayed_work(&dev_priv->psr.work,
464				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
 
465
466	dev_priv->psr.enabled = intel_dp;
467unlock:
468	mutex_unlock(&dev_priv->psr.lock);
469}
470
471static void vlv_psr_disable(struct intel_dp *intel_dp)
 
472{
473	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
474	struct drm_device *dev = intel_dig_port->base.base.dev;
475	struct drm_i915_private *dev_priv = dev->dev_private;
476	struct intel_crtc *intel_crtc =
477		to_intel_crtc(intel_dig_port->base.base.crtc);
478	uint32_t val;
479
480	if (dev_priv->psr.active) {
481		/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
482		if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
483			      VLV_EDP_PSR_IN_TRANS) == 0, 1))
 
 
 
484			WARN(1, "PSR transition took longer than expected\n");
485
486		val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
487		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
488		val &= ~VLV_EDP_PSR_ENABLE;
489		val &= ~VLV_EDP_PSR_MODE_MASK;
490		I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
491
492		dev_priv->psr.active = false;
493	} else {
494		WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
495	}
496}
497
498static void hsw_psr_disable(struct intel_dp *intel_dp)
 
499{
500	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501	struct drm_device *dev = intel_dig_port->base.base.dev;
502	struct drm_i915_private *dev_priv = dev->dev_private;
503
504	if (dev_priv->psr.active) {
505		I915_WRITE(EDP_PSR_CTL,
506			   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
508		/* Wait till PSR is idle */
509		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
510			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
 
511			DRM_ERROR("Timed out waiting for PSR Idle State\n");
512
513		dev_priv->psr.active = false;
514	} else {
515		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
 
 
 
516	}
 
 
517}
518
519/**
520 * intel_psr_disable - Disable PSR
521 * @intel_dp: Intel DP
 
522 *
523 * This function needs to be called before disabling pipe.
524 */
525void intel_psr_disable(struct intel_dp *intel_dp)
 
526{
527	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
528	struct drm_device *dev = intel_dig_port->base.base.dev;
529	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
 
 
 
530
531	mutex_lock(&dev_priv->psr.lock);
532	if (!dev_priv->psr.enabled) {
533		mutex_unlock(&dev_priv->psr.lock);
534		return;
535	}
536
537	/* Disable PSR on Source */
538	if (HAS_DDI(dev))
539		hsw_psr_disable(intel_dp);
540	else
541		vlv_psr_disable(intel_dp);
542
543	/* Disable PSR on Sink */
544	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
545
546	dev_priv->psr.enabled = NULL;
547	mutex_unlock(&dev_priv->psr.lock);
548
549	cancel_delayed_work_sync(&dev_priv->psr.work);
550}
551
552static void intel_psr_work(struct work_struct *work)
553{
554	struct drm_i915_private *dev_priv =
555		container_of(work, typeof(*dev_priv), psr.work.work);
556	struct intel_dp *intel_dp = dev_priv->psr.enabled;
557	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
558	enum pipe pipe = to_intel_crtc(crtc)->pipe;
559
560	/* We have to make sure PSR is ready for re-enable
561	 * otherwise it keeps disabled until next full enable/disable cycle.
562	 * PSR might take some time to get fully disabled
563	 * and be ready for re-enable.
564	 */
565	if (HAS_DDI(dev_priv->dev)) {
566		if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
567			      EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
568			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
569			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570		}
571	} else {
572		if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
573			      VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
 
 
 
574			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
575			return;
576		}
577	}
578	mutex_lock(&dev_priv->psr.lock);
579	intel_dp = dev_priv->psr.enabled;
580
581	if (!intel_dp)
582		goto unlock;
583
584	/*
585	 * The delayed work can race with an invalidate hence we need to
586	 * recheck. Since psr_flush first clears this and then reschedules we
587	 * won't ever miss a flush when bailing out here.
588	 */
589	if (dev_priv->psr.busy_frontbuffer_bits)
590		goto unlock;
591
592	intel_psr_activate(intel_dp);
593unlock:
594	mutex_unlock(&dev_priv->psr.lock);
595}
596
597static void intel_psr_exit(struct drm_device *dev)
598{
599	struct drm_i915_private *dev_priv = dev->dev_private;
600	struct intel_dp *intel_dp = dev_priv->psr.enabled;
601	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
602	enum pipe pipe = to_intel_crtc(crtc)->pipe;
603	u32 val;
604
605	if (!dev_priv->psr.active)
606		return;
607
608	if (HAS_DDI(dev)) {
609		val = I915_READ(EDP_PSR_CTL);
610
611		WARN_ON(!(val & EDP_PSR_ENABLE));
612
613		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
 
 
 
 
 
 
 
 
614	} else {
615		val = I915_READ(VLV_PSRCTL(pipe));
616
617		/* Here we do the transition from PSR_state 3 to PSR_state 5
618		 * directly once PSR State 4 that is active with single frame
619		 * update can be skipped. PSR_state 5 that is PSR exit then
620		 * Hardware is responsible to transition back to PSR_state 1
621		 * that is PSR inactive. Same state after
622		 * vlv_edp_psr_enable_source.
 
 
623		 */
624		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
625		I915_WRITE(VLV_PSRCTL(pipe), val);
626
627		/* Send AUX wake up - Spec says after transitioning to PSR
 
628		 * active we have to send AUX wake up by writing 01h in DPCD
629		 * 600h of sink device.
630		 * XXX: This might slow down the transition, but without this
631		 * HW doesn't complete the transition to PSR_state 1 and we
632		 * never get the screen updated.
633		 */
634		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
635				   DP_SET_POWER_D0);
636	}
637
638	dev_priv->psr.active = false;
639}
640
641/**
642 * intel_psr_single_frame_update - Single Frame Update
643 * @dev: DRM device
644 * @frontbuffer_bits: frontbuffer plane tracking bits
645 *
646 * Some platforms support a single frame update feature that is used to
647 * send and update only one frame on Remote Frame Buffer.
648 * So far it is only implemented for Valleyview and Cherryview because
649 * hardware requires this to be done before a page flip.
650 */
651void intel_psr_single_frame_update(struct drm_device *dev,
652				   unsigned frontbuffer_bits)
653{
654	struct drm_i915_private *dev_priv = dev->dev_private;
655	struct drm_crtc *crtc;
656	enum pipe pipe;
657	u32 val;
658
 
 
 
659	/*
660	 * Single frame update is already supported on BDW+ but it requires
661	 * many W/A and it isn't really needed.
662	 */
663	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
664		return;
665
666	mutex_lock(&dev_priv->psr.lock);
667	if (!dev_priv->psr.enabled) {
668		mutex_unlock(&dev_priv->psr.lock);
669		return;
670	}
671
672	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
673	pipe = to_intel_crtc(crtc)->pipe;
674
675	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
676		val = I915_READ(VLV_PSRCTL(pipe));
677
678		/*
679		 * We need to set this bit before writing registers for a flip.
680		 * This bit will be self-clear when it gets to the PSR active state.
681		 */
682		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
683	}
684	mutex_unlock(&dev_priv->psr.lock);
685}
686
687/**
688 * intel_psr_invalidate - Invalidade PSR
689 * @dev: DRM device
690 * @frontbuffer_bits: frontbuffer plane tracking bits
691 *
692 * Since the hardware frontbuffer tracking has gaps we need to integrate
693 * with the software frontbuffer tracking. This function gets called every
694 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
695 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
696 *
697 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
698 */
699void intel_psr_invalidate(struct drm_device *dev,
700			  unsigned frontbuffer_bits)
701{
702	struct drm_i915_private *dev_priv = dev->dev_private;
703	struct drm_crtc *crtc;
704	enum pipe pipe;
705
 
 
 
706	mutex_lock(&dev_priv->psr.lock);
707	if (!dev_priv->psr.enabled) {
708		mutex_unlock(&dev_priv->psr.lock);
709		return;
710	}
711
712	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
713	pipe = to_intel_crtc(crtc)->pipe;
714
715	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
716	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
717
718	if (frontbuffer_bits)
719		intel_psr_exit(dev);
720
721	mutex_unlock(&dev_priv->psr.lock);
722}
723
724/**
725 * intel_psr_flush - Flush PSR
726 * @dev: DRM device
727 * @frontbuffer_bits: frontbuffer plane tracking bits
728 * @origin: which operation caused the flush
729 *
730 * Since the hardware frontbuffer tracking has gaps we need to integrate
731 * with the software frontbuffer tracking. This function gets called every
732 * time frontbuffer rendering has completed and flushed out to memory. PSR
733 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
734 *
735 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
736 */
737void intel_psr_flush(struct drm_device *dev,
738		     unsigned frontbuffer_bits, enum fb_op_origin origin)
739{
740	struct drm_i915_private *dev_priv = dev->dev_private;
741	struct drm_crtc *crtc;
742	enum pipe pipe;
743
 
 
 
744	mutex_lock(&dev_priv->psr.lock);
745	if (!dev_priv->psr.enabled) {
746		mutex_unlock(&dev_priv->psr.lock);
747		return;
748	}
749
750	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
751	pipe = to_intel_crtc(crtc)->pipe;
752
753	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
754	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
755
756	/* By definition flush = invalidate + flush */
757	if (frontbuffer_bits)
758		intel_psr_exit(dev);
759
760	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
761		if (!work_busy(&dev_priv->psr.work.work))
762			schedule_delayed_work(&dev_priv->psr.work,
763					      msecs_to_jiffies(100));
764	mutex_unlock(&dev_priv->psr.lock);
765}
766
767/**
768 * intel_psr_init - Init basic PSR work and mutex.
769 * @dev: DRM device
770 *
771 * This function is  called only once at driver load to initialize basic
772 * PSR stuff.
773 */
774void intel_psr_init(struct drm_device *dev)
775{
776	struct drm_i915_private *dev_priv = dev->dev_private;
 
777
778	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
779		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
780
781	/* Per platform default */
782	if (i915.enable_psr == -1) {
783		if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
784		    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
785			i915.enable_psr = 1;
786		else
787			i915.enable_psr = 0;
788	}
789
790	/* Set link_standby x link_off defaults */
791	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
792		/* HSW and BDW require workarounds that we don't implement. */
793		dev_priv->psr.link_standby = false;
794	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
795		/* On VLV and CHV only standby mode is supported. */
796		dev_priv->psr.link_standby = true;
797	else
798		/* For new platforms let's respect VBT back again */
799		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
800
801	/* Override link_standby x link_off defaults */
802	if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
803		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
804		dev_priv->psr.link_standby = true;
805	}
806	if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
807		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
808		dev_priv->psr.link_standby = false;
809	}
810
811	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
812	mutex_init(&dev_priv->psr.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
813}
v4.17
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24/**
  25 * DOC: Panel Self Refresh (PSR/SRD)
  26 *
  27 * Since Haswell Display controller supports Panel Self-Refresh on display
  28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
  29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  30 * when system is idle but display is on as it eliminates display refresh
  31 * request to DDR memory completely as long as the frame buffer for that
  32 * display is unchanged.
  33 *
  34 * Panel Self Refresh must be supported by both Hardware (source) and
  35 * Panel (sink).
  36 *
  37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  38 * to power down the link and memory controller. For DSI panels the same idea
  39 * is called "manual mode".
  40 *
  41 * The implementation uses the hardware-based PSR support which automatically
  42 * enters/exits self-refresh mode. The hardware takes care of sending the
  43 * required DP aux message and could even retrain the link (that part isn't
  44 * enabled yet though). The hardware also keeps track of any frontbuffer
  45 * changes to know when to exit self-refresh mode again. Unfortunately that
  46 * part doesn't work too well, hence why the i915 PSR support uses the
  47 * software frontbuffer tracking to make sure it doesn't miss a screen
  48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  49 * get called by the frontbuffer tracking code. Note that because of locking
  50 * issues the self-refresh re-enable code is done from a work queue, which
  51 * must be correctly synchronized/cancelled when shutting down the pipe."
  52 */
  53
  54#include <drm/drmP.h>
  55
  56#include "intel_drv.h"
  57#include "i915_drv.h"
  58
  59static inline enum intel_display_power_domain
  60psr_aux_domain(struct intel_dp *intel_dp)
  61{
  62	/* CNL HW requires corresponding AUX IOs to be powered up for PSR.
  63	 * However, for non-A AUX ports the corresponding non-EDP transcoders
  64	 * would have already enabled power well 2 and DC_OFF. This means we can
  65	 * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
  66	 * specific AUX_IO reference without powering up any extra wells.
  67	 * Note that PSR is enabled only on Port A even though this function
  68	 * returns the correct domain for other ports too.
  69	 */
  70	return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
  71					      intel_dp->aux_power_domain;
  72}
  73
  74static void psr_aux_io_power_get(struct intel_dp *intel_dp)
  75{
  76	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  77	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
  78
  79	if (INTEL_GEN(dev_priv) < 10)
  80		return;
  81
  82	intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
  83}
  84
  85static void psr_aux_io_power_put(struct intel_dp *intel_dp)
  86{
  87	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  88	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
  89
  90	if (INTEL_GEN(dev_priv) < 10)
  91		return;
  92
  93	intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
  94}
  95
  96static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
  97{
  98	uint8_t psr_caps = 0;
  99
 100	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
 101		return false;
 102	return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
 103}
 104
 105static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
 106{
 107	uint8_t dprx = 0;
 108
 109	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
 110			      &dprx) != 1)
 111		return false;
 112	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
 113}
 114
 115static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 116{
 117	uint8_t alpm_caps = 0;
 118
 119	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
 120			      &alpm_caps) != 1)
 121		return false;
 122	return alpm_caps & DP_ALPM_CAP;
 123}
 124
 125void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 126{
 127	struct drm_i915_private *dev_priv =
 128		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 129
 130	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
 131			 sizeof(intel_dp->psr_dpcd));
 132
 133	if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
 134		dev_priv->psr.sink_support = true;
 135		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
 136	}
 137
 138	if (INTEL_GEN(dev_priv) >= 9 &&
 139	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
 140		uint8_t frame_sync_cap;
 141
 142		dev_priv->psr.sink_support = true;
 143		if (drm_dp_dpcd_readb(&intel_dp->aux,
 144				      DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
 145				      &frame_sync_cap) != 1)
 146			frame_sync_cap = 0;
 147		dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
 148		/* PSR2 needs frame sync as well */
 149		dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
 150		DRM_DEBUG_KMS("PSR2 %s on sink",
 151			      dev_priv->psr.psr2_support ? "supported" : "not supported");
 152
 153		if (dev_priv->psr.psr2_support) {
 154			dev_priv->psr.y_cord_support =
 155				intel_dp_get_y_cord_status(intel_dp);
 156			dev_priv->psr.colorimetry_support =
 157				intel_dp_get_colorimetry_status(intel_dp);
 158			dev_priv->psr.alpm =
 159				intel_dp_get_alpm_status(intel_dp);
 160		}
 161	}
 162}
 163
 164static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
 165{
 166	struct drm_i915_private *dev_priv = to_i915(dev);
 167	uint32_t val;
 168
 169	val = I915_READ(VLV_PSRSTAT(pipe)) &
 170	      VLV_EDP_PSR_CURR_STATE_MASK;
 171	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
 172	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
 173}
 174
 175static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
 176			      const struct intel_crtc_state *crtc_state)
 177{
 178	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 179	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180	uint32_t val;
 181
 182	/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
 183	val  = I915_READ(VLV_VSCSDP(crtc->pipe));
 184	val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
 185	val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
 186	I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
 187}
 188
 189static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
 190			      const struct intel_crtc_state *crtc_state)
 191{
 192	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 193	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 194	struct edp_vsc_psr psr_vsc;
 195
 196	if (dev_priv->psr.psr2_support) {
 197		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
 198		memset(&psr_vsc, 0, sizeof(psr_vsc));
 199		psr_vsc.sdp_header.HB0 = 0;
 200		psr_vsc.sdp_header.HB1 = 0x7;
 201		if (dev_priv->psr.colorimetry_support &&
 202		    dev_priv->psr.y_cord_support) {
 203			psr_vsc.sdp_header.HB2 = 0x5;
 204			psr_vsc.sdp_header.HB3 = 0x13;
 205		} else if (dev_priv->psr.y_cord_support) {
 206			psr_vsc.sdp_header.HB2 = 0x4;
 207			psr_vsc.sdp_header.HB3 = 0xe;
 208		} else {
 209			psr_vsc.sdp_header.HB2 = 0x3;
 210			psr_vsc.sdp_header.HB3 = 0xc;
 211		}
 212	} else {
 213		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
 214		memset(&psr_vsc, 0, sizeof(psr_vsc));
 215		psr_vsc.sdp_header.HB0 = 0;
 216		psr_vsc.sdp_header.HB1 = 0x7;
 217		psr_vsc.sdp_header.HB2 = 0x2;
 218		psr_vsc.sdp_header.HB3 = 0x8;
 219	}
 220
 221	intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
 222					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
 
 
 
 
 
 223}
 224
 225static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
 226{
 227	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 228			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 229}
 230
 231static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
 232				       enum port port)
 233{
 234	if (INTEL_GEN(dev_priv) >= 9)
 235		return DP_AUX_CH_CTL(port);
 236	else
 237		return EDP_PSR_AUX_CTL;
 238}
 239
 240static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
 241					enum port port, int index)
 242{
 243	if (INTEL_GEN(dev_priv) >= 9)
 244		return DP_AUX_CH_DATA(port, index);
 245	else
 246		return EDP_PSR_AUX_DATA(index);
 247}
 248
 249static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 250{
 251	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 252	struct drm_device *dev = dig_port->base.base.dev;
 253	struct drm_i915_private *dev_priv = to_i915(dev);
 254	uint32_t aux_clock_divider;
 255	i915_reg_t aux_ctl_reg;
 
 256	static const uint8_t aux_msg[] = {
 257		[0] = DP_AUX_NATIVE_WRITE << 4,
 258		[1] = DP_SET_POWER >> 8,
 259		[2] = DP_SET_POWER & 0xff,
 260		[3] = 1 - 1,
 261		[4] = DP_SET_POWER_D0,
 262	};
 263	enum port port = dig_port->base.port;
 264	u32 aux_ctl;
 265	int i;
 266
 267	BUILD_BUG_ON(sizeof(aux_msg) > 20);
 268
 269	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 270
 271	/* Enable AUX frame sync at sink */
 272	if (dev_priv->psr.aux_frame_sync)
 273		drm_dp_dpcd_writeb(&intel_dp->aux,
 274				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
 275				DP_AUX_FRAME_SYNC_ENABLE);
 276	/* Enable ALPM at sink for psr2 */
 277	if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
 278		drm_dp_dpcd_writeb(&intel_dp->aux,
 279				DP_RECEIVER_ALPM_CONFIG,
 280				DP_ALPM_ENABLE);
 281	if (dev_priv->psr.link_standby)
 282		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 283				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 284	else
 285		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 286				   DP_PSR_ENABLE);
 287
 288	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
 289
 290	/* Setup AUX registers */
 291	for (i = 0; i < sizeof(aux_msg); i += 4)
 292		I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
 293			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 294
 295	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
 296					     aux_clock_divider);
 297	I915_WRITE(aux_ctl_reg, aux_ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298}
 299
 300static void vlv_psr_enable_source(struct intel_dp *intel_dp,
 301				  const struct intel_crtc_state *crtc_state)
 302{
 303	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 304	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 305	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 
 
 306
 307	/* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
 308	I915_WRITE(VLV_PSRCTL(crtc->pipe),
 309		   VLV_EDP_PSR_MODE_SW_TIMER |
 310		   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
 311		   VLV_EDP_PSR_ENABLE);
 312}
 313
 314static void vlv_psr_activate(struct intel_dp *intel_dp)
 315{
 316	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 317	struct drm_device *dev = dig_port->base.base.dev;
 318	struct drm_i915_private *dev_priv = to_i915(dev);
 319	struct drm_crtc *crtc = dig_port->base.base.crtc;
 320	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 321
 322	/*
 323	 * Let's do the transition from PSR_state 1 (inactive) to
 324	 * PSR_state 2 (transition to active - static frame transmission).
 325	 * Then Hardware is responsible for the transition to
 326	 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
 327	 */
 328	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
 329		   VLV_EDP_PSR_ACTIVE_ENTRY);
 330}
 331
 332static void hsw_activate_psr1(struct intel_dp *intel_dp)
 333{
 334	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 335	struct drm_device *dev = dig_port->base.base.dev;
 336	struct drm_i915_private *dev_priv = to_i915(dev);
 337
 338	uint32_t max_sleep_time = 0x1f;
 339	/*
 340	 * Let's respect VBT in case VBT asks a higher idle_frame value.
 341	 * Let's use 6 as the minimum to cover all known cases including
 342	 * the off-by-one issue that HW has in some cases. Also there are
 343	 * cases where sink should be able to train
 344	 * with the 5 or 6 idle patterns.
 345	 */
 346	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 347	uint32_t val = EDP_PSR_ENABLE;
 348
 349	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
 350	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 351
 352	if (IS_HASWELL(dev_priv))
 353		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 354
 355	if (dev_priv->psr.link_standby)
 356		val |= EDP_PSR_LINK_STANDBY;
 357
 358	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
 359		val |= EDP_PSR_TP1_TIME_2500us;
 360	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
 361		val |= EDP_PSR_TP1_TIME_500us;
 362	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
 363		val |= EDP_PSR_TP1_TIME_100us;
 364	else
 365		val |= EDP_PSR_TP1_TIME_0us;
 366
 367	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
 368		val |= EDP_PSR_TP2_TP3_TIME_2500us;
 369	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
 370		val |= EDP_PSR_TP2_TP3_TIME_500us;
 371	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
 372		val |= EDP_PSR_TP2_TP3_TIME_100us;
 373	else
 374		val |= EDP_PSR_TP2_TP3_TIME_0us;
 375
 376	if (intel_dp_source_supports_hbr2(intel_dp) &&
 377	    drm_dp_tps3_supported(intel_dp->dpcd))
 378		val |= EDP_PSR_TP1_TP3_SEL;
 379	else
 380		val |= EDP_PSR_TP1_TP2_SEL;
 381
 382	val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
 383	I915_WRITE(EDP_PSR_CTL, val);
 384}
 385
 386static void hsw_activate_psr2(struct intel_dp *intel_dp)
 387{
 388	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 389	struct drm_device *dev = dig_port->base.base.dev;
 390	struct drm_i915_private *dev_priv = to_i915(dev);
 391	/*
 392	 * Let's respect VBT in case VBT asks a higher idle_frame value.
 393	 * Let's use 6 as the minimum to cover all known cases including
 394	 * the off-by-one issue that HW has in some cases. Also there are
 395	 * cases where sink should be able to train
 396	 * with the 5 or 6 idle patterns.
 397	 */
 398	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 399	uint32_t val;
 400	uint8_t sink_latency;
 401
 402	val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 403
 404	/* FIXME: selective update is probably totally broken because it doesn't
 405	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
 406	 * good enough. */
 407	val |= EDP_PSR2_ENABLE |
 408		EDP_SU_TRACK_ENABLE;
 409
 410	if (drm_dp_dpcd_readb(&intel_dp->aux,
 411				DP_SYNCHRONIZATION_LATENCY_IN_SINK,
 412				&sink_latency) == 1) {
 413		sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
 414	} else {
 415		sink_latency = 0;
 416	}
 417	val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
 418
 419	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
 420		val |= EDP_PSR2_TP2_TIME_2500;
 421	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
 422		val |= EDP_PSR2_TP2_TIME_500;
 423	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
 424		val |= EDP_PSR2_TP2_TIME_100;
 425	else
 426		val |= EDP_PSR2_TP2_TIME_50;
 427
 428	I915_WRITE(EDP_PSR2_CTL, val);
 429}
 430
 431static void hsw_psr_activate(struct intel_dp *intel_dp)
 432{
 433	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 434	struct drm_device *dev = dig_port->base.base.dev;
 435	struct drm_i915_private *dev_priv = to_i915(dev);
 436
 437	/* On HSW+ after we enable PSR on source it will activate it
 438	 * as soon as it match configure idle_frame count. So
 439	 * we just actually enable it here on activation time.
 440	 */
 441
 442	/* psr1 and psr2 are mutually exclusive.*/
 443	if (dev_priv->psr.psr2_support)
 444		hsw_activate_psr2(intel_dp);
 445	else
 446		hsw_activate_psr1(intel_dp);
 447}
 448
 449static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 450				    struct intel_crtc_state *crtc_state)
 451{
 452	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 453	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 454	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
 455	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
 456	int psr_max_h = 0, psr_max_v = 0;
 457
 458	/*
 459	 * FIXME psr2_support is messed up. It's both computed
 460	 * dynamically during PSR enable, and extracted from sink
 461	 * caps during eDP detection.
 462	 */
 463	if (!dev_priv->psr.psr2_support)
 464		return false;
 465
 466	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
 467		psr_max_h = 4096;
 468		psr_max_v = 2304;
 469	} else if (IS_GEN9(dev_priv)) {
 470		psr_max_h = 3640;
 471		psr_max_v = 2304;
 472	}
 473
 474	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
 475		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
 476			      crtc_hdisplay, crtc_vdisplay,
 477			      psr_max_h, psr_max_v);
 478		return false;
 479	}
 480
 481	/*
 482	 * FIXME:enable psr2 only for y-cordinate psr2 panels
 483	 * After gtc implementation , remove this restriction.
 484	 */
 485	if (!dev_priv->psr.y_cord_support) {
 486		DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
 487		return false;
 488	}
 489
 490	return true;
 491}
 492
 493void intel_psr_compute_config(struct intel_dp *intel_dp,
 494			      struct intel_crtc_state *crtc_state)
 495{
 496	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 497	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 498	const struct drm_display_mode *adjusted_mode =
 499		&crtc_state->base.adjusted_mode;
 500	int psr_setup_time;
 501
 502	if (!CAN_PSR(dev_priv))
 503		return;
 504
 505	if (!i915_modparams.enable_psr) {
 506		DRM_DEBUG_KMS("PSR disable by flag\n");
 507		return;
 508	}
 509
 510	/*
 511	 * HSW spec explicitly says PSR is tied to port A.
 512	 * BDW+ platforms with DDI implementation of PSR have different
 513	 * PSR registers per transcoder and we only implement transcoder EDP
 514	 * ones. Since by Display design transcoder EDP is tied to port A
 515	 * we can safely escape based on the port A.
 516	 */
 517	if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
 518		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
 519		return;
 
 
 
 
 
 520	}
 521
 522	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 523	    !dev_priv->psr.link_standby) {
 524		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
 525		return;
 526	}
 527
 528	if (IS_HASWELL(dev_priv) &&
 529	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
 530		      S3D_ENABLE) {
 531		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
 532		return;
 533	}
 534
 535	if (IS_HASWELL(dev_priv) &&
 536	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 537		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
 538		return;
 539	}
 540
 541	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
 542	if (psr_setup_time < 0) {
 543		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
 544			      intel_dp->psr_dpcd[1]);
 545		return;
 546	}
 547
 548	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
 549	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
 550		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
 551			      psr_setup_time);
 552		return;
 553	}
 554
 555	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
 556		DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
 557		return;
 558	}
 559
 560	crtc_state->has_psr = true;
 561	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
 562	DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
 563}
 564
 565static void intel_psr_activate(struct intel_dp *intel_dp)
 566{
 567	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 568	struct drm_device *dev = intel_dig_port->base.base.dev;
 569	struct drm_i915_private *dev_priv = to_i915(dev);
 570
 571	if (dev_priv->psr.psr2_support)
 572		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
 573	else
 574		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
 575	WARN_ON(dev_priv->psr.active);
 576	lockdep_assert_held(&dev_priv->psr.lock);
 577
 578	dev_priv->psr.activate(intel_dp);
 
 
 
 
 
 
 
 
 
 579	dev_priv->psr.active = true;
 580}
 581
 582static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 583				  const struct intel_crtc_state *crtc_state)
 584{
 585	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 586	struct drm_device *dev = dig_port->base.base.dev;
 587	struct drm_i915_private *dev_priv = to_i915(dev);
 588	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 589	u32 chicken;
 590
 591	psr_aux_io_power_get(intel_dp);
 592
 593	if (dev_priv->psr.psr2_support) {
 594		chicken = PSR2_VSC_ENABLE_PROG_HEADER;
 595		if (dev_priv->psr.y_cord_support)
 596			chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
 597		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
 598
 599		I915_WRITE(EDP_PSR_DEBUG,
 600			   EDP_PSR_DEBUG_MASK_MEMUP |
 601			   EDP_PSR_DEBUG_MASK_HPD |
 602			   EDP_PSR_DEBUG_MASK_LPSP |
 603			   EDP_PSR_DEBUG_MASK_MAX_SLEEP |
 604			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
 605	} else {
 606		/*
 607		 * Per Spec: Avoid continuous PSR exit by masking MEMUP
 608		 * and HPD. also mask LPSP to avoid dependency on other
 609		 * drivers that might block runtime_pm besides
 610		 * preventing  other hw tracking issues now we can rely
 611		 * on frontbuffer tracking.
 612		 */
 613		I915_WRITE(EDP_PSR_DEBUG,
 614			   EDP_PSR_DEBUG_MASK_MEMUP |
 615			   EDP_PSR_DEBUG_MASK_HPD |
 616			   EDP_PSR_DEBUG_MASK_LPSP);
 617	}
 618}
 619
 620/**
 621 * intel_psr_enable - Enable PSR
 622 * @intel_dp: Intel DP
 623 * @crtc_state: new CRTC state
 624 *
 625 * This function can only be called after the pipe is fully trained and enabled.
 626 */
 627void intel_psr_enable(struct intel_dp *intel_dp,
 628		      const struct intel_crtc_state *crtc_state)
 629{
 630	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 631	struct drm_device *dev = intel_dig_port->base.base.dev;
 632	struct drm_i915_private *dev_priv = to_i915(dev);
 
 633
 634	if (!crtc_state->has_psr)
 
 635		return;
 
 636
 637	if (WARN_ON(!CAN_PSR(dev_priv)))
 
 638		return;
 
 639
 640	WARN_ON(dev_priv->drrs.dp);
 641	mutex_lock(&dev_priv->psr.lock);
 642	if (dev_priv->psr.enabled) {
 643		DRM_DEBUG_KMS("PSR already in use\n");
 644		goto unlock;
 645	}
 646
 647	dev_priv->psr.psr2_support = crtc_state->has_psr2;
 
 
 648	dev_priv->psr.busy_frontbuffer_bits = 0;
 649
 650	dev_priv->psr.setup_vsc(intel_dp, crtc_state);
 651	dev_priv->psr.enable_sink(intel_dp);
 652	dev_priv->psr.enable_source(intel_dp, crtc_state);
 653	dev_priv->psr.enabled = intel_dp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654
 655	if (INTEL_GEN(dev_priv) >= 9) {
 656		intel_psr_activate(intel_dp);
 657	} else {
 658		/*
 659		 * FIXME: Activation should happen immediately since this
 660		 * function is just called after pipe is fully trained and
 661		 * enabled.
 662		 * However on some platforms we face issues when first
 663		 * activation follows a modeset so quickly.
 664		 *     - On VLV/CHV we get bank screen on first activation
 665		 *     - On HSW/BDW we get a recoverable frozen screen until
 666		 *       next exit-activate sequence.
 
 667		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 668		schedule_delayed_work(&dev_priv->psr.work,
 669				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
 670	}
 671
 
 672unlock:
 673	mutex_unlock(&dev_priv->psr.lock);
 674}
 675
 676static void vlv_psr_disable(struct intel_dp *intel_dp,
 677			    const struct intel_crtc_state *old_crtc_state)
 678{
 679	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 680	struct drm_device *dev = intel_dig_port->base.base.dev;
 681	struct drm_i915_private *dev_priv = to_i915(dev);
 682	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 
 683	uint32_t val;
 684
 685	if (dev_priv->psr.active) {
 686		/* Put VLV PSR back to PSR_state 0 (disabled). */
 687		if (intel_wait_for_register(dev_priv,
 688					    VLV_PSRSTAT(crtc->pipe),
 689					    VLV_EDP_PSR_IN_TRANS,
 690					    0,
 691					    1))
 692			WARN(1, "PSR transition took longer than expected\n");
 693
 694		val = I915_READ(VLV_PSRCTL(crtc->pipe));
 695		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
 696		val &= ~VLV_EDP_PSR_ENABLE;
 697		val &= ~VLV_EDP_PSR_MODE_MASK;
 698		I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
 699
 700		dev_priv->psr.active = false;
 701	} else {
 702		WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
 703	}
 704}
 705
 706static void hsw_psr_disable(struct intel_dp *intel_dp,
 707			    const struct intel_crtc_state *old_crtc_state)
 708{
 709	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 710	struct drm_device *dev = intel_dig_port->base.base.dev;
 711	struct drm_i915_private *dev_priv = to_i915(dev);
 712
 713	if (dev_priv->psr.active) {
 714		i915_reg_t psr_status;
 715		u32 psr_status_mask;
 716
 717		if (dev_priv->psr.aux_frame_sync)
 718			drm_dp_dpcd_writeb(&intel_dp->aux,
 719					DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
 720					0);
 721
 722		if (dev_priv->psr.psr2_support) {
 723			psr_status = EDP_PSR2_STATUS;
 724			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
 725
 726			I915_WRITE(EDP_PSR2_CTL,
 727				   I915_READ(EDP_PSR2_CTL) &
 728				   ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
 729
 730		} else {
 731			psr_status = EDP_PSR_STATUS;
 732			psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
 733
 734			I915_WRITE(EDP_PSR_CTL,
 735				   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 736		}
 737
 738		/* Wait till PSR is idle */
 739		if (intel_wait_for_register(dev_priv,
 740					    psr_status, psr_status_mask, 0,
 741					    2000))
 742			DRM_ERROR("Timed out waiting for PSR Idle State\n");
 743
 744		dev_priv->psr.active = false;
 745	} else {
 746		if (dev_priv->psr.psr2_support)
 747			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
 748		else
 749			WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
 750	}
 751
 752	psr_aux_io_power_put(intel_dp);
 753}
 754
 755/**
 756 * intel_psr_disable - Disable PSR
 757 * @intel_dp: Intel DP
 758 * @old_crtc_state: old CRTC state
 759 *
 760 * This function needs to be called before disabling pipe.
 761 */
 762void intel_psr_disable(struct intel_dp *intel_dp,
 763		       const struct intel_crtc_state *old_crtc_state)
 764{
 765	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 766	struct drm_device *dev = intel_dig_port->base.base.dev;
 767	struct drm_i915_private *dev_priv = to_i915(dev);
 768
 769	if (!old_crtc_state->has_psr)
 770		return;
 771
 772	if (WARN_ON(!CAN_PSR(dev_priv)))
 773		return;
 774
 775	mutex_lock(&dev_priv->psr.lock);
 776	if (!dev_priv->psr.enabled) {
 777		mutex_unlock(&dev_priv->psr.lock);
 778		return;
 779	}
 780
 781	dev_priv->psr.disable_source(intel_dp, old_crtc_state);
 
 
 
 
 782
 783	/* Disable PSR on Sink */
 784	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 785
 786	dev_priv->psr.enabled = NULL;
 787	mutex_unlock(&dev_priv->psr.lock);
 788
 789	cancel_delayed_work_sync(&dev_priv->psr.work);
 790}
 791
 792static void intel_psr_work(struct work_struct *work)
 793{
 794	struct drm_i915_private *dev_priv =
 795		container_of(work, typeof(*dev_priv), psr.work.work);
 796	struct intel_dp *intel_dp = dev_priv->psr.enabled;
 797	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 798	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 799
 800	/* We have to make sure PSR is ready for re-enable
 801	 * otherwise it keeps disabled until next full enable/disable cycle.
 802	 * PSR might take some time to get fully disabled
 803	 * and be ready for re-enable.
 804	 */
 805	if (HAS_DDI(dev_priv)) {
 806		if (dev_priv->psr.psr2_support) {
 807			if (intel_wait_for_register(dev_priv,
 808						    EDP_PSR2_STATUS,
 809						    EDP_PSR2_STATUS_STATE_MASK,
 810						    0,
 811						    50)) {
 812				DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
 813				return;
 814			}
 815		} else {
 816			if (intel_wait_for_register(dev_priv,
 817						    EDP_PSR_STATUS,
 818						    EDP_PSR_STATUS_STATE_MASK,
 819						    0,
 820						    50)) {
 821				DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
 822				return;
 823			}
 824		}
 825	} else {
 826		if (intel_wait_for_register(dev_priv,
 827					    VLV_PSRSTAT(pipe),
 828					    VLV_EDP_PSR_IN_TRANS,
 829					    0,
 830					    1)) {
 831			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
 832			return;
 833		}
 834	}
 835	mutex_lock(&dev_priv->psr.lock);
 836	intel_dp = dev_priv->psr.enabled;
 837
 838	if (!intel_dp)
 839		goto unlock;
 840
 841	/*
 842	 * The delayed work can race with an invalidate hence we need to
 843	 * recheck. Since psr_flush first clears this and then reschedules we
 844	 * won't ever miss a flush when bailing out here.
 845	 */
 846	if (dev_priv->psr.busy_frontbuffer_bits)
 847		goto unlock;
 848
 849	intel_psr_activate(intel_dp);
 850unlock:
 851	mutex_unlock(&dev_priv->psr.lock);
 852}
 853
 854static void intel_psr_exit(struct drm_i915_private *dev_priv)
 855{
 
 856	struct intel_dp *intel_dp = dev_priv->psr.enabled;
 857	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 858	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 859	u32 val;
 860
 861	if (!dev_priv->psr.active)
 862		return;
 863
 864	if (HAS_DDI(dev_priv)) {
 865		if (dev_priv->psr.aux_frame_sync)
 866			drm_dp_dpcd_writeb(&intel_dp->aux,
 867					DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
 868					0);
 869		if (dev_priv->psr.psr2_support) {
 870			val = I915_READ(EDP_PSR2_CTL);
 871			WARN_ON(!(val & EDP_PSR2_ENABLE));
 872			I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
 873		} else {
 874			val = I915_READ(EDP_PSR_CTL);
 875			WARN_ON(!(val & EDP_PSR_ENABLE));
 876			I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
 877		}
 878	} else {
 879		val = I915_READ(VLV_PSRCTL(pipe));
 880
 881		/*
 882		 * Here we do the transition drirectly from
 883		 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
 884		 * PSR_state 5 (exit).
 885		 * PSR State 4 (active with single frame update) can be skipped.
 886		 * On PSR_state 5 (exit) Hardware is responsible to transition
 887		 * back to PSR_state 1 (inactive).
 888		 * Now we are at Same state after vlv_psr_enable_source.
 889		 */
 890		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
 891		I915_WRITE(VLV_PSRCTL(pipe), val);
 892
 893		/*
 894		 * Send AUX wake up - Spec says after transitioning to PSR
 895		 * active we have to send AUX wake up by writing 01h in DPCD
 896		 * 600h of sink device.
 897		 * XXX: This might slow down the transition, but without this
 898		 * HW doesn't complete the transition to PSR_state 1 and we
 899		 * never get the screen updated.
 900		 */
 901		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
 902				   DP_SET_POWER_D0);
 903	}
 904
 905	dev_priv->psr.active = false;
 906}
 907
 908/**
 909 * intel_psr_single_frame_update - Single Frame Update
 910 * @dev_priv: i915 device
 911 * @frontbuffer_bits: frontbuffer plane tracking bits
 912 *
 913 * Some platforms support a single frame update feature that is used to
 914 * send and update only one frame on Remote Frame Buffer.
 915 * So far it is only implemented for Valleyview and Cherryview because
 916 * hardware requires this to be done before a page flip.
 917 */
 918void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 919				   unsigned frontbuffer_bits)
 920{
 
 921	struct drm_crtc *crtc;
 922	enum pipe pipe;
 923	u32 val;
 924
 925	if (!CAN_PSR(dev_priv))
 926		return;
 927
 928	/*
 929	 * Single frame update is already supported on BDW+ but it requires
 930	 * many W/A and it isn't really needed.
 931	 */
 932	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 933		return;
 934
 935	mutex_lock(&dev_priv->psr.lock);
 936	if (!dev_priv->psr.enabled) {
 937		mutex_unlock(&dev_priv->psr.lock);
 938		return;
 939	}
 940
 941	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 942	pipe = to_intel_crtc(crtc)->pipe;
 943
 944	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
 945		val = I915_READ(VLV_PSRCTL(pipe));
 946
 947		/*
 948		 * We need to set this bit before writing registers for a flip.
 949		 * This bit will be self-clear when it gets to the PSR active state.
 950		 */
 951		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
 952	}
 953	mutex_unlock(&dev_priv->psr.lock);
 954}
 955
 956/**
 957 * intel_psr_invalidate - Invalidade PSR
 958 * @dev_priv: i915 device
 959 * @frontbuffer_bits: frontbuffer plane tracking bits
 960 *
 961 * Since the hardware frontbuffer tracking has gaps we need to integrate
 962 * with the software frontbuffer tracking. This function gets called every
 963 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 964 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 965 *
 966 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 967 */
 968void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 969			  unsigned frontbuffer_bits)
 970{
 
 971	struct drm_crtc *crtc;
 972	enum pipe pipe;
 973
 974	if (!CAN_PSR(dev_priv))
 975		return;
 976
 977	mutex_lock(&dev_priv->psr.lock);
 978	if (!dev_priv->psr.enabled) {
 979		mutex_unlock(&dev_priv->psr.lock);
 980		return;
 981	}
 982
 983	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 984	pipe = to_intel_crtc(crtc)->pipe;
 985
 986	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 987	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 988
 989	if (frontbuffer_bits)
 990		intel_psr_exit(dev_priv);
 991
 992	mutex_unlock(&dev_priv->psr.lock);
 993}
 994
 995/**
 996 * intel_psr_flush - Flush PSR
 997 * @dev_priv: i915 device
 998 * @frontbuffer_bits: frontbuffer plane tracking bits
 999 * @origin: which operation caused the flush
1000 *
1001 * Since the hardware frontbuffer tracking has gaps we need to integrate
1002 * with the software frontbuffer tracking. This function gets called every
1003 * time frontbuffer rendering has completed and flushed out to memory. PSR
1004 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1005 *
1006 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1007 */
1008void intel_psr_flush(struct drm_i915_private *dev_priv,
1009		     unsigned frontbuffer_bits, enum fb_op_origin origin)
1010{
 
1011	struct drm_crtc *crtc;
1012	enum pipe pipe;
1013
1014	if (!CAN_PSR(dev_priv))
1015		return;
1016
1017	mutex_lock(&dev_priv->psr.lock);
1018	if (!dev_priv->psr.enabled) {
1019		mutex_unlock(&dev_priv->psr.lock);
1020		return;
1021	}
1022
1023	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1024	pipe = to_intel_crtc(crtc)->pipe;
1025
1026	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1027	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1028
1029	/* By definition flush = invalidate + flush */
1030	if (frontbuffer_bits)
1031		intel_psr_exit(dev_priv);
1032
1033	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1034		if (!work_busy(&dev_priv->psr.work.work))
1035			schedule_delayed_work(&dev_priv->psr.work,
1036					      msecs_to_jiffies(100));
1037	mutex_unlock(&dev_priv->psr.lock);
1038}
1039
1040/**
1041 * intel_psr_init - Init basic PSR work and mutex.
1042 * @dev_priv: i915 device private
1043 *
1044 * This function is  called only once at driver load to initialize basic
1045 * PSR stuff.
1046 */
1047void intel_psr_init(struct drm_i915_private *dev_priv)
1048{
1049	if (!HAS_PSR(dev_priv))
1050		return;
1051
1052	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1053		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1054
1055	if (!dev_priv->psr.sink_support)
1056		return;
1057
1058	/* Per platform default: all disabled. */
1059	if (i915_modparams.enable_psr == -1)
1060		i915_modparams.enable_psr = 0;
 
 
1061
1062	/* Set link_standby x link_off defaults */
1063	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1064		/* HSW and BDW require workarounds that we don't implement. */
1065		dev_priv->psr.link_standby = false;
1066	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1067		/* On VLV and CHV only standby mode is supported. */
1068		dev_priv->psr.link_standby = true;
1069	else
1070		/* For new platforms let's respect VBT back again */
1071		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1072
1073	/* Override link_standby x link_off defaults */
1074	if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
1075		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
1076		dev_priv->psr.link_standby = true;
1077	}
1078	if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
1079		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
1080		dev_priv->psr.link_standby = false;
1081	}
1082
1083	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
1084	mutex_init(&dev_priv->psr.lock);
1085
1086	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1087		dev_priv->psr.enable_source = vlv_psr_enable_source;
1088		dev_priv->psr.disable_source = vlv_psr_disable;
1089		dev_priv->psr.enable_sink = vlv_psr_enable_sink;
1090		dev_priv->psr.activate = vlv_psr_activate;
1091		dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
1092	} else {
1093		dev_priv->psr.enable_source = hsw_psr_enable_source;
1094		dev_priv->psr.disable_source = hsw_psr_disable;
1095		dev_priv->psr.enable_sink = hsw_psr_enable_sink;
1096		dev_priv->psr.activate = hsw_psr_activate;
1097		dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
1098	}
1099}