Loading...
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <drm/drm_atomic_helper.h>
25
26#include "display/intel_dp.h"
27
28#include "i915_drv.h"
29#include "intel_atomic.h"
30#include "intel_de.h"
31#include "intel_display_types.h"
32#include "intel_dp_aux.h"
33#include "intel_hdmi.h"
34#include "intel_psr.h"
35#include "intel_sprite.h"
36#include "skl_universal_plane.h"
37
38/**
39 * DOC: Panel Self Refresh (PSR/SRD)
40 *
41 * Since Haswell Display controller supports Panel Self-Refresh on display
42 * panels witch have a remote frame buffer (RFB) implemented according to PSR
43 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
44 * when system is idle but display is on as it eliminates display refresh
45 * request to DDR memory completely as long as the frame buffer for that
46 * display is unchanged.
47 *
48 * Panel Self Refresh must be supported by both Hardware (source) and
49 * Panel (sink).
50 *
51 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
52 * to power down the link and memory controller. For DSI panels the same idea
53 * is called "manual mode".
54 *
55 * The implementation uses the hardware-based PSR support which automatically
56 * enters/exits self-refresh mode. The hardware takes care of sending the
57 * required DP aux message and could even retrain the link (that part isn't
58 * enabled yet though). The hardware also keeps track of any frontbuffer
59 * changes to know when to exit self-refresh mode again. Unfortunately that
60 * part doesn't work too well, hence why the i915 PSR support uses the
61 * software frontbuffer tracking to make sure it doesn't miss a screen
62 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
63 * get called by the frontbuffer tracking code. Note that because of locking
64 * issues the self-refresh re-enable code is done from a work queue, which
65 * must be correctly synchronized/cancelled when shutting down the pipe."
66 *
67 * DC3CO (DC3 clock off)
68 *
69 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
70 * clock off automatically during PSR2 idle state.
71 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
72 * entry/exit allows the HW to enter a low-power state even when page flipping
73 * periodically (for instance a 30fps video playback scenario).
74 *
75 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
76 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
77 * frames, if no other flip occurs and the function above is executed, DC3CO is
78 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
79 * of another flip.
80 * Front buffer modifications do not trigger DC3CO activation on purpose as it
81 * would bring a lot of complexity and most of the moderns systems will only
82 * use page flips.
83 */
84
85static bool psr_global_enabled(struct intel_dp *intel_dp)
86{
87 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
88
89 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
90 case I915_PSR_DEBUG_DEFAULT:
91 return i915->params.enable_psr;
92 case I915_PSR_DEBUG_DISABLE:
93 return false;
94 default:
95 return true;
96 }
97}
98
99static bool psr2_global_enabled(struct intel_dp *intel_dp)
100{
101 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
102 case I915_PSR_DEBUG_DISABLE:
103 case I915_PSR_DEBUG_FORCE_PSR1:
104 return false;
105 default:
106 return true;
107 }
108}
109
110static void psr_irq_control(struct intel_dp *intel_dp)
111{
112 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
113 enum transcoder trans_shift;
114 i915_reg_t imr_reg;
115 u32 mask, val;
116
117 /*
118 * gen12+ has registers relative to transcoder and one per transcoder
119 * using the same bit definition: handle it as TRANSCODER_EDP to force
120 * 0 shift in bit definition
121 */
122 if (DISPLAY_VER(dev_priv) >= 12) {
123 trans_shift = 0;
124 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
125 } else {
126 trans_shift = intel_dp->psr.transcoder;
127 imr_reg = EDP_PSR_IMR;
128 }
129
130 mask = EDP_PSR_ERROR(trans_shift);
131 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
132 mask |= EDP_PSR_POST_EXIT(trans_shift) |
133 EDP_PSR_PRE_ENTRY(trans_shift);
134
135 /* Warning: it is masking/setting reserved bits too */
136 val = intel_de_read(dev_priv, imr_reg);
137 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
138 val |= ~mask;
139 intel_de_write(dev_priv, imr_reg, val);
140}
141
142static void psr_event_print(struct drm_i915_private *i915,
143 u32 val, bool psr2_enabled)
144{
145 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
146 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
147 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
148 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
149 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
150 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
151 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
152 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
153 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
154 if (val & PSR_EVENT_GRAPHICS_RESET)
155 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
156 if (val & PSR_EVENT_PCH_INTERRUPT)
157 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
158 if (val & PSR_EVENT_MEMORY_UP)
159 drm_dbg_kms(&i915->drm, "\tMemory up\n");
160 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
161 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
162 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
163 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
164 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
165 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
166 if (val & PSR_EVENT_REGISTER_UPDATE)
167 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
168 if (val & PSR_EVENT_HDCP_ENABLE)
169 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
170 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
171 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
172 if (val & PSR_EVENT_VBI_ENABLE)
173 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
174 if (val & PSR_EVENT_LPSP_MODE_EXIT)
175 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
176 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
177 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
178}
179
180void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
181{
182 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
183 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
184 ktime_t time_ns = ktime_get();
185 enum transcoder trans_shift;
186 i915_reg_t imr_reg;
187
188 if (DISPLAY_VER(dev_priv) >= 12) {
189 trans_shift = 0;
190 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
191 } else {
192 trans_shift = intel_dp->psr.transcoder;
193 imr_reg = EDP_PSR_IMR;
194 }
195
196 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
197 intel_dp->psr.last_entry_attempt = time_ns;
198 drm_dbg_kms(&dev_priv->drm,
199 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
200 transcoder_name(cpu_transcoder));
201 }
202
203 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
204 intel_dp->psr.last_exit = time_ns;
205 drm_dbg_kms(&dev_priv->drm,
206 "[transcoder %s] PSR exit completed\n",
207 transcoder_name(cpu_transcoder));
208
209 if (DISPLAY_VER(dev_priv) >= 9) {
210 u32 val = intel_de_read(dev_priv,
211 PSR_EVENT(cpu_transcoder));
212 bool psr2_enabled = intel_dp->psr.psr2_enabled;
213
214 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
215 val);
216 psr_event_print(dev_priv, val, psr2_enabled);
217 }
218 }
219
220 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
221 u32 val;
222
223 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
224 transcoder_name(cpu_transcoder));
225
226 intel_dp->psr.irq_aux_error = true;
227
228 /*
229 * If this interruption is not masked it will keep
230 * interrupting so fast that it prevents the scheduled
231 * work to run.
232 * Also after a PSR error, we don't want to arm PSR
233 * again so we don't care about unmask the interruption
234 * or unset irq_aux_error.
235 */
236 val = intel_de_read(dev_priv, imr_reg);
237 val |= EDP_PSR_ERROR(trans_shift);
238 intel_de_write(dev_priv, imr_reg, val);
239
240 schedule_work(&intel_dp->psr.work);
241 }
242}
243
244static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
245{
246 u8 alpm_caps = 0;
247
248 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
249 &alpm_caps) != 1)
250 return false;
251 return alpm_caps & DP_ALPM_CAP;
252}
253
254static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
255{
256 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
257 u8 val = 8; /* assume the worst if we can't read the value */
258
259 if (drm_dp_dpcd_readb(&intel_dp->aux,
260 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
261 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
262 else
263 drm_dbg_kms(&i915->drm,
264 "Unable to get sink synchronization latency, assuming 8 frames\n");
265 return val;
266}
267
268static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
269{
270 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
271 u16 val;
272 ssize_t r;
273
274 /*
275 * Returning the default X granularity if granularity not required or
276 * if DPCD read fails
277 */
278 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
279 return 4;
280
281 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
282 if (r != 2)
283 drm_dbg_kms(&i915->drm,
284 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
285
286 /*
287 * Spec says that if the value read is 0 the default granularity should
288 * be used instead.
289 */
290 if (r != 2 || val == 0)
291 val = 4;
292
293 return val;
294}
295
296void intel_psr_init_dpcd(struct intel_dp *intel_dp)
297{
298 struct drm_i915_private *dev_priv =
299 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
300
301 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
302 sizeof(intel_dp->psr_dpcd));
303
304 if (!intel_dp->psr_dpcd[0])
305 return;
306 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
307 intel_dp->psr_dpcd[0]);
308
309 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
310 drm_dbg_kms(&dev_priv->drm,
311 "PSR support not currently available for this panel\n");
312 return;
313 }
314
315 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
316 drm_dbg_kms(&dev_priv->drm,
317 "Panel lacks power state control, PSR cannot be enabled\n");
318 return;
319 }
320
321 intel_dp->psr.sink_support = true;
322 intel_dp->psr.sink_sync_latency =
323 intel_dp_get_sink_sync_latency(intel_dp);
324
325 if (DISPLAY_VER(dev_priv) >= 9 &&
326 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
327 bool y_req = intel_dp->psr_dpcd[1] &
328 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
329 bool alpm = intel_dp_get_alpm_status(intel_dp);
330
331 /*
332 * All panels that supports PSR version 03h (PSR2 +
333 * Y-coordinate) can handle Y-coordinates in VSC but we are
334 * only sure that it is going to be used when required by the
335 * panel. This way panel is capable to do selective update
336 * without a aux frame sync.
337 *
338 * To support PSR version 02h and PSR version 03h without
339 * Y-coordinate requirement panels we would need to enable
340 * GTC first.
341 */
342 intel_dp->psr.sink_psr2_support = y_req && alpm;
343 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
344 intel_dp->psr.sink_psr2_support ? "" : "not ");
345
346 if (intel_dp->psr.sink_psr2_support) {
347 intel_dp->psr.colorimetry_support =
348 intel_dp_get_colorimetry_status(intel_dp);
349 intel_dp->psr.su_x_granularity =
350 intel_dp_get_su_x_granulartiy(intel_dp);
351 }
352 }
353}
354
355static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
356{
357 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
358 u32 aux_clock_divider, aux_ctl;
359 int i;
360 static const u8 aux_msg[] = {
361 [0] = DP_AUX_NATIVE_WRITE << 4,
362 [1] = DP_SET_POWER >> 8,
363 [2] = DP_SET_POWER & 0xff,
364 [3] = 1 - 1,
365 [4] = DP_SET_POWER_D0,
366 };
367 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
368 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
369 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
370 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
371
372 BUILD_BUG_ON(sizeof(aux_msg) > 20);
373 for (i = 0; i < sizeof(aux_msg); i += 4)
374 intel_de_write(dev_priv,
375 EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
376 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
377
378 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
379
380 /* Start with bits set for DDI_AUX_CTL register */
381 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
382 aux_clock_divider);
383
384 /* Select only valid bits for SRD_AUX_CTL */
385 aux_ctl &= psr_aux_mask;
386 intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
387 aux_ctl);
388}
389
390static void intel_psr_enable_sink(struct intel_dp *intel_dp)
391{
392 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
393 u8 dpcd_val = DP_PSR_ENABLE;
394
395 /* Enable ALPM at sink for psr2 */
396 if (intel_dp->psr.psr2_enabled) {
397 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
398 DP_ALPM_ENABLE |
399 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
400
401 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
402 } else {
403 if (intel_dp->psr.link_standby)
404 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
405
406 if (DISPLAY_VER(dev_priv) >= 8)
407 dpcd_val |= DP_PSR_CRC_VERIFICATION;
408 }
409
410 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
411
412 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
413}
414
415static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
416{
417 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
418 u32 val = 0;
419
420 if (DISPLAY_VER(dev_priv) >= 11)
421 val |= EDP_PSR_TP4_TIME_0US;
422
423 if (dev_priv->params.psr_safest_params) {
424 val |= EDP_PSR_TP1_TIME_2500us;
425 val |= EDP_PSR_TP2_TP3_TIME_2500us;
426 goto check_tp3_sel;
427 }
428
429 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
430 val |= EDP_PSR_TP1_TIME_0us;
431 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
432 val |= EDP_PSR_TP1_TIME_100us;
433 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
434 val |= EDP_PSR_TP1_TIME_500us;
435 else
436 val |= EDP_PSR_TP1_TIME_2500us;
437
438 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
439 val |= EDP_PSR_TP2_TP3_TIME_0us;
440 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
441 val |= EDP_PSR_TP2_TP3_TIME_100us;
442 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
443 val |= EDP_PSR_TP2_TP3_TIME_500us;
444 else
445 val |= EDP_PSR_TP2_TP3_TIME_2500us;
446
447check_tp3_sel:
448 if (intel_dp_source_supports_hbr2(intel_dp) &&
449 drm_dp_tps3_supported(intel_dp->dpcd))
450 val |= EDP_PSR_TP1_TP3_SEL;
451 else
452 val |= EDP_PSR_TP1_TP2_SEL;
453
454 return val;
455}
456
457static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
458{
459 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
460 int idle_frames;
461
462 /* Let's use 6 as the minimum to cover all known cases including the
463 * off-by-one issue that HW has in some cases.
464 */
465 idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
466 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
467
468 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
469 idle_frames = 0xf;
470
471 return idle_frames;
472}
473
474static void hsw_activate_psr1(struct intel_dp *intel_dp)
475{
476 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
477 u32 max_sleep_time = 0x1f;
478 u32 val = EDP_PSR_ENABLE;
479
480 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
481
482 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
483 if (IS_HASWELL(dev_priv))
484 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
485
486 if (intel_dp->psr.link_standby)
487 val |= EDP_PSR_LINK_STANDBY;
488
489 val |= intel_psr1_get_tp_time(intel_dp);
490
491 if (DISPLAY_VER(dev_priv) >= 8)
492 val |= EDP_PSR_CRC_ENABLE;
493
494 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
495 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
496 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
497}
498
499static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
500{
501 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
502 u32 val = 0;
503
504 if (dev_priv->params.psr_safest_params)
505 return EDP_PSR2_TP2_TIME_2500us;
506
507 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
508 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
509 val |= EDP_PSR2_TP2_TIME_50us;
510 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
511 val |= EDP_PSR2_TP2_TIME_100us;
512 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
513 val |= EDP_PSR2_TP2_TIME_500us;
514 else
515 val |= EDP_PSR2_TP2_TIME_2500us;
516
517 return val;
518}
519
520static void hsw_activate_psr2(struct intel_dp *intel_dp)
521{
522 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
523 u32 val;
524
525 val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
526
527 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
528 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
529 val |= EDP_Y_COORDINATE_ENABLE;
530
531 val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
532 val |= intel_psr2_get_tp_time(intel_dp);
533
534 if (DISPLAY_VER(dev_priv) >= 12) {
535 /*
536 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
537 * values from BSpec. In order to setting an optimal power
538 * consumption, lower than 4k resoluition mode needs to decrese
539 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
540 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
541 */
542 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
543 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
544 val |= TGL_EDP_PSR2_FAST_WAKE(7);
545 } else if (DISPLAY_VER(dev_priv) >= 9) {
546 val |= EDP_PSR2_IO_BUFFER_WAKE(7);
547 val |= EDP_PSR2_FAST_WAKE(7);
548 }
549
550 if (intel_dp->psr.psr2_sel_fetch_enabled) {
551 /* WA 1408330847 */
552 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
553 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
554 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
555 DIS_RAM_BYPASS_PSR2_MAN_TRACK,
556 DIS_RAM_BYPASS_PSR2_MAN_TRACK);
557
558 intel_de_write(dev_priv,
559 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
560 PSR2_MAN_TRK_CTL_ENABLE);
561 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
562 intel_de_write(dev_priv,
563 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
564 }
565
566 /*
567 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
568 * recommending keep this bit unset while PSR2 is enabled.
569 */
570 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
571
572 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
573}
574
575static bool
576transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
577{
578 if (DISPLAY_VER(dev_priv) < 9)
579 return false;
580 else if (DISPLAY_VER(dev_priv) >= 12)
581 return trans == TRANSCODER_A;
582 else
583 return trans == TRANSCODER_EDP;
584}
585
586static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
587{
588 if (!cstate || !cstate->hw.active)
589 return 0;
590
591 return DIV_ROUND_UP(1000 * 1000,
592 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
593}
594
595static void psr2_program_idle_frames(struct intel_dp *intel_dp,
596 u32 idle_frames)
597{
598 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
599 u32 val;
600
601 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
602 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
603 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
604 val |= idle_frames;
605 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
606}
607
608static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
609{
610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
611
612 psr2_program_idle_frames(intel_dp, 0);
613 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
614}
615
616static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
617{
618 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
619
620 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
621 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
622}
623
624static void tgl_dc3co_disable_work(struct work_struct *work)
625{
626 struct intel_dp *intel_dp =
627 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
628
629 mutex_lock(&intel_dp->psr.lock);
630 /* If delayed work is pending, it is not idle */
631 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
632 goto unlock;
633
634 tgl_psr2_disable_dc3co(intel_dp);
635unlock:
636 mutex_unlock(&intel_dp->psr.lock);
637}
638
639static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
640{
641 if (!intel_dp->psr.dc3co_exitline)
642 return;
643
644 cancel_delayed_work(&intel_dp->psr.dc3co_work);
645 /* Before PSR2 exit disallow dc3co*/
646 tgl_psr2_disable_dc3co(intel_dp);
647}
648
649static bool
650dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
651 struct intel_crtc_state *crtc_state)
652{
653 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
654 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
655 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
656 enum port port = dig_port->base.port;
657
658 if (IS_ALDERLAKE_P(dev_priv))
659 return pipe <= PIPE_B && port <= PORT_B;
660 else
661 return pipe == PIPE_A && port == PORT_A;
662}
663
664static void
665tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
666 struct intel_crtc_state *crtc_state)
667{
668 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
669 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
670 u32 exit_scanlines;
671
672 /*
673 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
674 * disable DC3CO until the changed dc3co activating/deactivating sequence
675 * is applied. B.Specs:49196
676 */
677 return;
678
679 /*
680 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
681 * TODO: when the issue is addressed, this restriction should be removed.
682 */
683 if (crtc_state->enable_psr2_sel_fetch)
684 return;
685
686 if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
687 return;
688
689 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
690 return;
691
692 /*
693 * DC3CO Exit time 200us B.Spec 49196
694 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
695 */
696 exit_scanlines =
697 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
698
699 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
700 return;
701
702 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
703}
704
705static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
706 struct intel_crtc_state *crtc_state)
707{
708 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
709 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
710 struct intel_plane_state *plane_state;
711 struct intel_plane *plane;
712 int i;
713
714 if (!dev_priv->params.enable_psr2_sel_fetch &&
715 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
716 drm_dbg_kms(&dev_priv->drm,
717 "PSR2 sel fetch not enabled, disabled by parameter\n");
718 return false;
719 }
720
721 if (crtc_state->uapi.async_flip) {
722 drm_dbg_kms(&dev_priv->drm,
723 "PSR2 sel fetch not enabled, async flip enabled\n");
724 return false;
725 }
726
727 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
728 if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
729 drm_dbg_kms(&dev_priv->drm,
730 "PSR2 sel fetch not enabled, plane rotated\n");
731 return false;
732 }
733 }
734
735 /* Wa_14010254185 Wa_14010103792 */
736 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
737 drm_dbg_kms(&dev_priv->drm,
738 "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
739 return false;
740 }
741
742 return crtc_state->enable_psr2_sel_fetch = true;
743}
744
745static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
746 struct intel_crtc_state *crtc_state)
747{
748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
749 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
750 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
751 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
752
753 if (!intel_dp->psr.sink_psr2_support)
754 return false;
755
756 /* JSL and EHL only supports eDP 1.3 */
757 if (IS_JSL_EHL(dev_priv)) {
758 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
759 return false;
760 }
761
762 /* Wa_16011181250 */
763 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv)) {
764 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
765 return false;
766 }
767
768 /*
769 * We are missing the implementation of some workarounds to enabled PSR2
770 * in Alderlake_P, until ready PSR2 should be kept disabled.
771 */
772 if (IS_ALDERLAKE_P(dev_priv)) {
773 drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
774 return false;
775 }
776
777 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
778 drm_dbg_kms(&dev_priv->drm,
779 "PSR2 not supported in transcoder %s\n",
780 transcoder_name(crtc_state->cpu_transcoder));
781 return false;
782 }
783
784 if (!psr2_global_enabled(intel_dp)) {
785 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
786 return false;
787 }
788
789 /*
790 * DSC and PSR2 cannot be enabled simultaneously. If a requested
791 * resolution requires DSC to be enabled, priority is given to DSC
792 * over PSR2.
793 */
794 if (crtc_state->dsc.compression_enable) {
795 drm_dbg_kms(&dev_priv->drm,
796 "PSR2 cannot be enabled since DSC is enabled\n");
797 return false;
798 }
799
800 if (crtc_state->crc_enabled) {
801 drm_dbg_kms(&dev_priv->drm,
802 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
803 return false;
804 }
805
806 if (DISPLAY_VER(dev_priv) >= 12) {
807 psr_max_h = 5120;
808 psr_max_v = 3200;
809 max_bpp = 30;
810 } else if (DISPLAY_VER(dev_priv) >= 10) {
811 psr_max_h = 4096;
812 psr_max_v = 2304;
813 max_bpp = 24;
814 } else if (DISPLAY_VER(dev_priv) == 9) {
815 psr_max_h = 3640;
816 psr_max_v = 2304;
817 max_bpp = 24;
818 }
819
820 if (crtc_state->pipe_bpp > max_bpp) {
821 drm_dbg_kms(&dev_priv->drm,
822 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
823 crtc_state->pipe_bpp, max_bpp);
824 return false;
825 }
826
827 /*
828 * HW sends SU blocks of size four scan lines, which means the starting
829 * X coordinate and Y granularity requirements will always be met. We
830 * only need to validate the SU block width is a multiple of
831 * x granularity.
832 */
833 if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
834 drm_dbg_kms(&dev_priv->drm,
835 "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
836 crtc_hdisplay, intel_dp->psr.su_x_granularity);
837 return false;
838 }
839
840 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
841 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
842 !HAS_PSR_HW_TRACKING(dev_priv)) {
843 drm_dbg_kms(&dev_priv->drm,
844 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
845 return false;
846 }
847 }
848
849 /* Wa_2209313811 */
850 if (!crtc_state->enable_psr2_sel_fetch &&
851 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
852 drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
853 return false;
854 }
855
856 if (!crtc_state->enable_psr2_sel_fetch &&
857 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
858 drm_dbg_kms(&dev_priv->drm,
859 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
860 crtc_hdisplay, crtc_vdisplay,
861 psr_max_h, psr_max_v);
862 return false;
863 }
864
865 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
866 return true;
867}
868
869void intel_psr_compute_config(struct intel_dp *intel_dp,
870 struct intel_crtc_state *crtc_state)
871{
872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
873 const struct drm_display_mode *adjusted_mode =
874 &crtc_state->hw.adjusted_mode;
875 int psr_setup_time;
876
877 /*
878 * Current PSR panels dont work reliably with VRR enabled
879 * So if VRR is enabled, do not enable PSR.
880 */
881 if (crtc_state->vrr.enable)
882 return;
883
884 if (!CAN_PSR(intel_dp))
885 return;
886
887 if (!psr_global_enabled(intel_dp)) {
888 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
889 return;
890 }
891
892 if (intel_dp->psr.sink_not_reliable) {
893 drm_dbg_kms(&dev_priv->drm,
894 "PSR sink implementation is not reliable\n");
895 return;
896 }
897
898 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
899 drm_dbg_kms(&dev_priv->drm,
900 "PSR condition failed: Interlaced mode enabled\n");
901 return;
902 }
903
904 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
905 if (psr_setup_time < 0) {
906 drm_dbg_kms(&dev_priv->drm,
907 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
908 intel_dp->psr_dpcd[1]);
909 return;
910 }
911
912 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
913 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
914 drm_dbg_kms(&dev_priv->drm,
915 "PSR condition failed: PSR setup time (%d us) too long\n",
916 psr_setup_time);
917 return;
918 }
919
920 crtc_state->has_psr = true;
921 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
922 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
923}
924
925void intel_psr_get_config(struct intel_encoder *encoder,
926 struct intel_crtc_state *pipe_config)
927{
928 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
929 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
930 struct intel_dp *intel_dp;
931 u32 val;
932
933 if (!dig_port)
934 return;
935
936 intel_dp = &dig_port->dp;
937 if (!CAN_PSR(intel_dp))
938 return;
939
940 mutex_lock(&intel_dp->psr.lock);
941 if (!intel_dp->psr.enabled)
942 goto unlock;
943
944 /*
945 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
946 * enabled/disabled because of frontbuffer tracking and others.
947 */
948 pipe_config->has_psr = true;
949 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
950 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
951
952 if (!intel_dp->psr.psr2_enabled)
953 goto unlock;
954
955 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
956 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
957 if (val & PSR2_MAN_TRK_CTL_ENABLE)
958 pipe_config->enable_psr2_sel_fetch = true;
959 }
960
961 if (DISPLAY_VER(dev_priv) >= 12) {
962 val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
963 val &= EXITLINE_MASK;
964 pipe_config->dc3co_exitline = val;
965 }
966unlock:
967 mutex_unlock(&intel_dp->psr.lock);
968}
969
970static void intel_psr_activate(struct intel_dp *intel_dp)
971{
972 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
973 enum transcoder transcoder = intel_dp->psr.transcoder;
974
975 if (transcoder_has_psr2(dev_priv, transcoder))
976 drm_WARN_ON(&dev_priv->drm,
977 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
978
979 drm_WARN_ON(&dev_priv->drm,
980 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
981 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
982 lockdep_assert_held(&intel_dp->psr.lock);
983
984 /* psr1 and psr2 are mutually exclusive.*/
985 if (intel_dp->psr.psr2_enabled)
986 hsw_activate_psr2(intel_dp);
987 else
988 hsw_activate_psr1(intel_dp);
989
990 intel_dp->psr.active = true;
991}
992
993static void intel_psr_enable_source(struct intel_dp *intel_dp)
994{
995 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
996 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
997 u32 mask;
998
999 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
1000 * use hardcoded values PSR AUX transactions
1001 */
1002 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1003 hsw_psr_setup_aux(intel_dp);
1004
1005 if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
1006 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
1007 u32 chicken = intel_de_read(dev_priv, reg);
1008
1009 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
1010 PSR2_ADD_VERTICAL_LINE_COUNT;
1011 intel_de_write(dev_priv, reg, chicken);
1012 }
1013
1014 /*
1015 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1016 * mask LPSP to avoid dependency on other drivers that might block
1017 * runtime_pm besides preventing other hw tracking issues now we
1018 * can rely on frontbuffer tracking.
1019 */
1020 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1021 EDP_PSR_DEBUG_MASK_HPD |
1022 EDP_PSR_DEBUG_MASK_LPSP |
1023 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1024
1025 if (DISPLAY_VER(dev_priv) < 11)
1026 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1027
1028 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1029 mask);
1030
1031 psr_irq_control(intel_dp);
1032
1033 if (intel_dp->psr.dc3co_exitline) {
1034 u32 val;
1035
1036 /*
1037 * TODO: if future platforms supports DC3CO in more than one
1038 * transcoder, EXITLINE will need to be unset when disabling PSR
1039 */
1040 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
1041 val &= ~EXITLINE_MASK;
1042 val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
1043 val |= EXITLINE_ENABLE;
1044 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
1045 }
1046
1047 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1048 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1049 intel_dp->psr.psr2_sel_fetch_enabled ?
1050 IGNORE_PSR2_HW_TRACKING : 0);
1051}
1052
1053static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1054{
1055 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1056 u32 val;
1057
1058 /*
1059 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1060 * will still keep the error set even after the reset done in the
1061 * irq_preinstall and irq_uninstall hooks.
1062 * And enabling in this situation cause the screen to freeze in the
1063 * first time that PSR HW tries to activate so lets keep PSR disabled
1064 * to avoid any rendering problems.
1065 */
1066 if (DISPLAY_VER(dev_priv) >= 12) {
1067 val = intel_de_read(dev_priv,
1068 TRANS_PSR_IIR(intel_dp->psr.transcoder));
1069 val &= EDP_PSR_ERROR(0);
1070 } else {
1071 val = intel_de_read(dev_priv, EDP_PSR_IIR);
1072 val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
1073 }
1074 if (val) {
1075 intel_dp->psr.sink_not_reliable = true;
1076 drm_dbg_kms(&dev_priv->drm,
1077 "PSR interruption error set, not enabling PSR\n");
1078 return false;
1079 }
1080
1081 return true;
1082}
1083
1084static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1085 const struct intel_crtc_state *crtc_state,
1086 const struct drm_connector_state *conn_state)
1087{
1088 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1089 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1090 struct intel_encoder *encoder = &dig_port->base;
1091 u32 val;
1092
1093 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1094
1095 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1096 intel_dp->psr.busy_frontbuffer_bits = 0;
1097 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1098 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1099 /* DC5/DC6 requires at least 6 idle frames */
1100 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1101 intel_dp->psr.dc3co_exit_delay = val;
1102 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1103 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1104
1105 if (!psr_interrupt_error_check(intel_dp))
1106 return;
1107
1108 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1109 intel_dp->psr.psr2_enabled ? "2" : "1");
1110 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1111 &intel_dp->psr.vsc);
1112 intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1113 intel_psr_enable_sink(intel_dp);
1114 intel_psr_enable_source(intel_dp);
1115 intel_dp->psr.enabled = true;
1116 intel_dp->psr.paused = false;
1117
1118 intel_psr_activate(intel_dp);
1119}
1120
1121/**
1122 * intel_psr_enable - Enable PSR
1123 * @intel_dp: Intel DP
1124 * @crtc_state: new CRTC state
1125 * @conn_state: new CONNECTOR state
1126 *
1127 * This function can only be called after the pipe is fully trained and enabled.
1128 */
1129void intel_psr_enable(struct intel_dp *intel_dp,
1130 const struct intel_crtc_state *crtc_state,
1131 const struct drm_connector_state *conn_state)
1132{
1133 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1134
1135 if (!CAN_PSR(intel_dp))
1136 return;
1137
1138 if (!crtc_state->has_psr)
1139 return;
1140
1141 drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
1142
1143 mutex_lock(&intel_dp->psr.lock);
1144 intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1145 mutex_unlock(&intel_dp->psr.lock);
1146}
1147
1148static void intel_psr_exit(struct intel_dp *intel_dp)
1149{
1150 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1151 u32 val;
1152
1153 if (!intel_dp->psr.active) {
1154 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1155 val = intel_de_read(dev_priv,
1156 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1157 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1158 }
1159
1160 val = intel_de_read(dev_priv,
1161 EDP_PSR_CTL(intel_dp->psr.transcoder));
1162 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1163
1164 return;
1165 }
1166
1167 if (intel_dp->psr.psr2_enabled) {
1168 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1169 val = intel_de_read(dev_priv,
1170 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1171 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1172 val &= ~EDP_PSR2_ENABLE;
1173 intel_de_write(dev_priv,
1174 EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1175 } else {
1176 val = intel_de_read(dev_priv,
1177 EDP_PSR_CTL(intel_dp->psr.transcoder));
1178 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1179 val &= ~EDP_PSR_ENABLE;
1180 intel_de_write(dev_priv,
1181 EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1182 }
1183 intel_dp->psr.active = false;
1184}
1185
1186static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1187{
1188 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1189 i915_reg_t psr_status;
1190 u32 psr_status_mask;
1191
1192 if (intel_dp->psr.psr2_enabled) {
1193 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1194 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1195 } else {
1196 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1197 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1198 }
1199
1200 /* Wait till PSR is idle */
1201 if (intel_de_wait_for_clear(dev_priv, psr_status,
1202 psr_status_mask, 2000))
1203 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1204}
1205
1206static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1207{
1208 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1209
1210 lockdep_assert_held(&intel_dp->psr.lock);
1211
1212 if (!intel_dp->psr.enabled)
1213 return;
1214
1215 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1216 intel_dp->psr.psr2_enabled ? "2" : "1");
1217
1218 intel_psr_exit(intel_dp);
1219 intel_psr_wait_exit_locked(intel_dp);
1220
1221 /* WA 1408330847 */
1222 if (intel_dp->psr.psr2_sel_fetch_enabled &&
1223 (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
1224 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
1225 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
1226 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
1227
1228 /* Disable PSR on Sink */
1229 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1230
1231 if (intel_dp->psr.psr2_enabled)
1232 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1233
1234 intel_dp->psr.enabled = false;
1235}
1236
1237/**
1238 * intel_psr_disable - Disable PSR
1239 * @intel_dp: Intel DP
1240 * @old_crtc_state: old CRTC state
1241 *
1242 * This function needs to be called before disabling pipe.
1243 */
1244void intel_psr_disable(struct intel_dp *intel_dp,
1245 const struct intel_crtc_state *old_crtc_state)
1246{
1247 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1248
1249 if (!old_crtc_state->has_psr)
1250 return;
1251
1252 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1253 return;
1254
1255 mutex_lock(&intel_dp->psr.lock);
1256
1257 intel_psr_disable_locked(intel_dp);
1258
1259 mutex_unlock(&intel_dp->psr.lock);
1260 cancel_work_sync(&intel_dp->psr.work);
1261 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1262}
1263
1264/**
1265 * intel_psr_pause - Pause PSR
1266 * @intel_dp: Intel DP
1267 *
1268 * This function need to be called after enabling psr.
1269 */
1270void intel_psr_pause(struct intel_dp *intel_dp)
1271{
1272 struct intel_psr *psr = &intel_dp->psr;
1273
1274 if (!CAN_PSR(intel_dp))
1275 return;
1276
1277 mutex_lock(&psr->lock);
1278
1279 if (!psr->enabled) {
1280 mutex_unlock(&psr->lock);
1281 return;
1282 }
1283
1284 intel_psr_exit(intel_dp);
1285 intel_psr_wait_exit_locked(intel_dp);
1286 psr->paused = true;
1287
1288 mutex_unlock(&psr->lock);
1289
1290 cancel_work_sync(&psr->work);
1291 cancel_delayed_work_sync(&psr->dc3co_work);
1292}
1293
1294/**
1295 * intel_psr_resume - Resume PSR
1296 * @intel_dp: Intel DP
1297 *
1298 * This function need to be called after pausing psr.
1299 */
1300void intel_psr_resume(struct intel_dp *intel_dp)
1301{
1302 struct intel_psr *psr = &intel_dp->psr;
1303
1304 if (!CAN_PSR(intel_dp))
1305 return;
1306
1307 mutex_lock(&psr->lock);
1308
1309 if (!psr->paused)
1310 goto unlock;
1311
1312 psr->paused = false;
1313 intel_psr_activate(intel_dp);
1314
1315unlock:
1316 mutex_unlock(&psr->lock);
1317}
1318
1319static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1320{
1321 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1322
1323 if (DISPLAY_VER(dev_priv) >= 9)
1324 /*
1325 * Display WA #0884: skl+
1326 * This documented WA for bxt can be safely applied
1327 * broadly so we can force HW tracking to exit PSR
1328 * instead of disabling and re-enabling.
1329 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1330 * but it makes more sense write to the current active
1331 * pipe.
1332 */
1333 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1334 else
1335 /*
1336 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
1337 * on older gens so doing the manual exit instead.
1338 */
1339 intel_psr_exit(intel_dp);
1340}
1341
1342void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1343 const struct intel_crtc_state *crtc_state,
1344 const struct intel_plane_state *plane_state,
1345 int color_plane)
1346{
1347 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1348 enum pipe pipe = plane->pipe;
1349 const struct drm_rect *clip;
1350 u32 val, offset;
1351 int ret, x, y;
1352
1353 if (!crtc_state->enable_psr2_sel_fetch)
1354 return;
1355
1356 val = plane_state ? plane_state->ctl : 0;
1357 val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
1358 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
1359 if (!val || plane->id == PLANE_CURSOR)
1360 return;
1361
1362 clip = &plane_state->psr2_sel_fetch_area;
1363
1364 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1365 val |= plane_state->uapi.dst.x1;
1366 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1367
1368 /* TODO: consider auxiliary surfaces */
1369 x = plane_state->uapi.src.x1 >> 16;
1370 y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
1371 ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
1372 if (ret)
1373 drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
1374 ret);
1375 val = y << 16 | x;
1376 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1377 val);
1378
1379 /* Sizes are 0 based */
1380 val = (drm_rect_height(clip) - 1) << 16;
1381 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1382 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1383}
1384
1385void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1386{
1387 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1388
1389 if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
1390 !crtc_state->enable_psr2_sel_fetch)
1391 return;
1392
1393 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1394 crtc_state->psr2_man_track_ctl);
1395}
1396
1397static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1398 struct drm_rect *clip, bool full_update)
1399{
1400 u32 val = PSR2_MAN_TRK_CTL_ENABLE;
1401
1402 if (full_update) {
1403 val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1404 goto exit;
1405 }
1406
1407 if (clip->y1 == -1)
1408 goto exit;
1409
1410 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1411
1412 val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1413 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1414 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1415exit:
1416 crtc_state->psr2_man_track_ctl = val;
1417}
1418
1419static void clip_area_update(struct drm_rect *overlap_damage_area,
1420 struct drm_rect *damage_area)
1421{
1422 if (overlap_damage_area->y1 == -1) {
1423 overlap_damage_area->y1 = damage_area->y1;
1424 overlap_damage_area->y2 = damage_area->y2;
1425 return;
1426 }
1427
1428 if (damage_area->y1 < overlap_damage_area->y1)
1429 overlap_damage_area->y1 = damage_area->y1;
1430
1431 if (damage_area->y2 > overlap_damage_area->y2)
1432 overlap_damage_area->y2 = damage_area->y2;
1433}
1434
1435int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1436 struct intel_crtc *crtc)
1437{
1438 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1439 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1440 struct intel_plane_state *new_plane_state, *old_plane_state;
1441 struct intel_plane *plane;
1442 bool full_update = false;
1443 int i, ret;
1444
1445 if (!crtc_state->enable_psr2_sel_fetch)
1446 return 0;
1447
1448 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1449 if (ret)
1450 return ret;
1451
1452 /*
1453 * Calculate minimal selective fetch area of each plane and calculate
1454 * the pipe damaged area.
1455 * In the next loop the plane selective fetch area will actually be set
1456 * using whole pipe damaged area.
1457 */
1458 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1459 new_plane_state, i) {
1460 struct drm_rect src, damaged_area = { .y1 = -1 };
1461 struct drm_mode_rect *damaged_clips;
1462 u32 num_clips, j;
1463
1464 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1465 continue;
1466
1467 if (!new_plane_state->uapi.visible &&
1468 !old_plane_state->uapi.visible)
1469 continue;
1470
1471 /*
1472 * TODO: Not clear how to handle planes with negative position,
1473 * also planes are not updated if they have a negative X
1474 * position so for now doing a full update in this cases
1475 */
1476 if (new_plane_state->uapi.dst.y1 < 0 ||
1477 new_plane_state->uapi.dst.x1 < 0) {
1478 full_update = true;
1479 break;
1480 }
1481
1482 num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1483
1484 /*
1485 * If visibility or plane moved, mark the whole plane area as
1486 * damaged as it needs to be complete redraw in the new and old
1487 * position.
1488 */
1489 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1490 !drm_rect_equals(&new_plane_state->uapi.dst,
1491 &old_plane_state->uapi.dst)) {
1492 if (old_plane_state->uapi.visible) {
1493 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1494 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1495 clip_area_update(&pipe_clip, &damaged_area);
1496 }
1497
1498 if (new_plane_state->uapi.visible) {
1499 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1500 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1501 clip_area_update(&pipe_clip, &damaged_area);
1502 }
1503 continue;
1504 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
1505 (!num_clips &&
1506 new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
1507 /*
1508 * If the plane don't have damaged areas but the
1509 * framebuffer changed or alpha changed, mark the whole
1510 * plane area as damaged.
1511 */
1512 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1513 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1514 clip_area_update(&pipe_clip, &damaged_area);
1515 continue;
1516 }
1517
1518 drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
1519 damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
1520
1521 for (j = 0; j < num_clips; j++) {
1522 struct drm_rect clip;
1523
1524 clip.x1 = damaged_clips[j].x1;
1525 clip.y1 = damaged_clips[j].y1;
1526 clip.x2 = damaged_clips[j].x2;
1527 clip.y2 = damaged_clips[j].y2;
1528 if (drm_rect_intersect(&clip, &src))
1529 clip_area_update(&damaged_area, &clip);
1530 }
1531
1532 if (damaged_area.y1 == -1)
1533 continue;
1534
1535 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1536 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1537 clip_area_update(&pipe_clip, &damaged_area);
1538 }
1539
1540 if (full_update)
1541 goto skip_sel_fetch_set_loop;
1542
1543 /* It must be aligned to 4 lines */
1544 pipe_clip.y1 -= pipe_clip.y1 % 4;
1545 if (pipe_clip.y2 % 4)
1546 pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
1547
1548 /*
1549 * Now that we have the pipe damaged area check if it intersect with
1550 * every plane, if it does set the plane selective fetch area.
1551 */
1552 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1553 new_plane_state, i) {
1554 struct drm_rect *sel_fetch_area, inter;
1555
1556 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1557 !new_plane_state->uapi.visible)
1558 continue;
1559
1560 inter = pipe_clip;
1561 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1562 continue;
1563
1564 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1565 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1566 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1567 }
1568
1569skip_sel_fetch_set_loop:
1570 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1571 return 0;
1572}
1573
1574/**
1575 * intel_psr_update - Update PSR state
1576 * @intel_dp: Intel DP
1577 * @crtc_state: new CRTC state
1578 * @conn_state: new CONNECTOR state
1579 *
1580 * This functions will update PSR states, disabling, enabling or switching PSR
1581 * version when executing fastsets. For full modeset, intel_psr_disable() and
1582 * intel_psr_enable() should be called instead.
1583 */
1584void intel_psr_update(struct intel_dp *intel_dp,
1585 const struct intel_crtc_state *crtc_state,
1586 const struct drm_connector_state *conn_state)
1587{
1588 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1589 struct intel_psr *psr = &intel_dp->psr;
1590 bool enable, psr2_enable;
1591
1592 if (!CAN_PSR(intel_dp))
1593 return;
1594
1595 mutex_lock(&intel_dp->psr.lock);
1596
1597 enable = crtc_state->has_psr;
1598 psr2_enable = crtc_state->has_psr2;
1599
1600 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
1601 crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1602 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1603 if (crtc_state->crc_enabled && psr->enabled)
1604 psr_force_hw_tracking_exit(intel_dp);
1605 else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
1606 /*
1607 * Activate PSR again after a force exit when enabling
1608 * CRC in older gens
1609 */
1610 if (!intel_dp->psr.active &&
1611 !intel_dp->psr.busy_frontbuffer_bits)
1612 schedule_work(&intel_dp->psr.work);
1613 }
1614
1615 goto unlock;
1616 }
1617
1618 if (psr->enabled)
1619 intel_psr_disable_locked(intel_dp);
1620
1621 if (enable)
1622 intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1623
1624unlock:
1625 mutex_unlock(&intel_dp->psr.lock);
1626}
1627
1628/**
1629 * psr_wait_for_idle - wait for PSR1 to idle
1630 * @intel_dp: Intel DP
1631 * @out_value: PSR status in case of failure
1632 *
1633 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1634 *
1635 */
1636static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1637{
1638 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1639
1640 /*
1641 * From bspec: Panel Self Refresh (BDW+)
1642 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1643 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1644 * defensive enough to cover everything.
1645 */
1646 return __intel_wait_for_register(&dev_priv->uncore,
1647 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1648 EDP_PSR_STATUS_STATE_MASK,
1649 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1650 out_value);
1651}
1652
1653/**
1654 * intel_psr_wait_for_idle - wait for PSR1 to idle
1655 * @new_crtc_state: new CRTC state
1656 *
1657 * This function is expected to be called from pipe_update_start() where it is
1658 * not expected to race with PSR enable or disable.
1659 */
1660void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
1661{
1662 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
1663 struct intel_encoder *encoder;
1664
1665 if (!new_crtc_state->has_psr)
1666 return;
1667
1668 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1669 new_crtc_state->uapi.encoder_mask) {
1670 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1671 u32 psr_status;
1672
1673 mutex_lock(&intel_dp->psr.lock);
1674 if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
1675 mutex_unlock(&intel_dp->psr.lock);
1676 continue;
1677 }
1678
1679 /* when the PSR1 is enabled */
1680 if (psr_wait_for_idle(intel_dp, &psr_status))
1681 drm_err(&dev_priv->drm,
1682 "PSR idle timed out 0x%x, atomic update may fail\n",
1683 psr_status);
1684 mutex_unlock(&intel_dp->psr.lock);
1685 }
1686}
1687
1688static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
1689{
1690 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1691 i915_reg_t reg;
1692 u32 mask;
1693 int err;
1694
1695 if (!intel_dp->psr.enabled)
1696 return false;
1697
1698 if (intel_dp->psr.psr2_enabled) {
1699 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1700 mask = EDP_PSR2_STATUS_STATE_MASK;
1701 } else {
1702 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1703 mask = EDP_PSR_STATUS_STATE_MASK;
1704 }
1705
1706 mutex_unlock(&intel_dp->psr.lock);
1707
1708 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1709 if (err)
1710 drm_err(&dev_priv->drm,
1711 "Timed out waiting for PSR Idle for re-enable\n");
1712
1713 /* After the unlocked wait, verify that PSR is still wanted! */
1714 mutex_lock(&intel_dp->psr.lock);
1715 return err == 0 && intel_dp->psr.enabled;
1716}
1717
1718static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1719{
1720 struct drm_connector_list_iter conn_iter;
1721 struct drm_device *dev = &dev_priv->drm;
1722 struct drm_modeset_acquire_ctx ctx;
1723 struct drm_atomic_state *state;
1724 struct drm_connector *conn;
1725 int err = 0;
1726
1727 state = drm_atomic_state_alloc(dev);
1728 if (!state)
1729 return -ENOMEM;
1730
1731 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1732 state->acquire_ctx = &ctx;
1733
1734retry:
1735
1736 drm_connector_list_iter_begin(dev, &conn_iter);
1737 drm_for_each_connector_iter(conn, &conn_iter) {
1738 struct drm_connector_state *conn_state;
1739 struct drm_crtc_state *crtc_state;
1740
1741 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
1742 continue;
1743
1744 conn_state = drm_atomic_get_connector_state(state, conn);
1745 if (IS_ERR(conn_state)) {
1746 err = PTR_ERR(conn_state);
1747 break;
1748 }
1749
1750 if (!conn_state->crtc)
1751 continue;
1752
1753 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
1754 if (IS_ERR(crtc_state)) {
1755 err = PTR_ERR(crtc_state);
1756 break;
1757 }
1758
1759 /* Mark mode as changed to trigger a pipe->update() */
1760 crtc_state->mode_changed = true;
1761 }
1762 drm_connector_list_iter_end(&conn_iter);
1763
1764 if (err == 0)
1765 err = drm_atomic_commit(state);
1766
1767 if (err == -EDEADLK) {
1768 drm_atomic_state_clear(state);
1769 err = drm_modeset_backoff(&ctx);
1770 if (!err)
1771 goto retry;
1772 }
1773
1774 drm_modeset_drop_locks(&ctx);
1775 drm_modeset_acquire_fini(&ctx);
1776 drm_atomic_state_put(state);
1777
1778 return err;
1779}
1780
1781int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1782{
1783 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1784 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1785 u32 old_mode;
1786 int ret;
1787
1788 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1789 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1790 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1791 return -EINVAL;
1792 }
1793
1794 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1795 if (ret)
1796 return ret;
1797
1798 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1799 intel_dp->psr.debug = val;
1800
1801 /*
1802 * Do it right away if it's already enabled, otherwise it will be done
1803 * when enabling the source.
1804 */
1805 if (intel_dp->psr.enabled)
1806 psr_irq_control(intel_dp);
1807
1808 mutex_unlock(&intel_dp->psr.lock);
1809
1810 if (old_mode != mode)
1811 ret = intel_psr_fastset_force(dev_priv);
1812
1813 return ret;
1814}
1815
1816static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1817{
1818 struct intel_psr *psr = &intel_dp->psr;
1819
1820 intel_psr_disable_locked(intel_dp);
1821 psr->sink_not_reliable = true;
1822 /* let's make sure that sink is awaken */
1823 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1824}
1825
1826static void intel_psr_work(struct work_struct *work)
1827{
1828 struct intel_dp *intel_dp =
1829 container_of(work, typeof(*intel_dp), psr.work);
1830
1831 mutex_lock(&intel_dp->psr.lock);
1832
1833 if (!intel_dp->psr.enabled)
1834 goto unlock;
1835
1836 if (READ_ONCE(intel_dp->psr.irq_aux_error))
1837 intel_psr_handle_irq(intel_dp);
1838
1839 /*
1840 * We have to make sure PSR is ready for re-enable
1841 * otherwise it keeps disabled until next full enable/disable cycle.
1842 * PSR might take some time to get fully disabled
1843 * and be ready for re-enable.
1844 */
1845 if (!__psr_wait_for_idle_locked(intel_dp))
1846 goto unlock;
1847
1848 /*
1849 * The delayed work can race with an invalidate hence we need to
1850 * recheck. Since psr_flush first clears this and then reschedules we
1851 * won't ever miss a flush when bailing out here.
1852 */
1853 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
1854 goto unlock;
1855
1856 intel_psr_activate(intel_dp);
1857unlock:
1858 mutex_unlock(&intel_dp->psr.lock);
1859}
1860
1861/**
1862 * intel_psr_invalidate - Invalidade PSR
1863 * @dev_priv: i915 device
1864 * @frontbuffer_bits: frontbuffer plane tracking bits
1865 * @origin: which operation caused the invalidate
1866 *
1867 * Since the hardware frontbuffer tracking has gaps we need to integrate
1868 * with the software frontbuffer tracking. This function gets called every
1869 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1870 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1871 *
1872 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1873 */
1874void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1875 unsigned frontbuffer_bits, enum fb_op_origin origin)
1876{
1877 struct intel_encoder *encoder;
1878
1879 if (origin == ORIGIN_FLIP)
1880 return;
1881
1882 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1883 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
1884 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1885
1886 mutex_lock(&intel_dp->psr.lock);
1887 if (!intel_dp->psr.enabled) {
1888 mutex_unlock(&intel_dp->psr.lock);
1889 continue;
1890 }
1891
1892 pipe_frontbuffer_bits &=
1893 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
1894 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
1895
1896 if (pipe_frontbuffer_bits)
1897 intel_psr_exit(intel_dp);
1898
1899 mutex_unlock(&intel_dp->psr.lock);
1900 }
1901}
1902/*
1903 * When we will be completely rely on PSR2 S/W tracking in future,
1904 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1905 * event also therefore tgl_dc3co_flush() require to be changed
1906 * accordingly in future.
1907 */
1908static void
1909tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
1910 enum fb_op_origin origin)
1911{
1912 mutex_lock(&intel_dp->psr.lock);
1913
1914 if (!intel_dp->psr.dc3co_exitline)
1915 goto unlock;
1916
1917 if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
1918 goto unlock;
1919
1920 /*
1921 * At every frontbuffer flush flip event modified delay of delayed work,
1922 * when delayed work schedules that means display has been idle.
1923 */
1924 if (!(frontbuffer_bits &
1925 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
1926 goto unlock;
1927
1928 tgl_psr2_enable_dc3co(intel_dp);
1929 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
1930 intel_dp->psr.dc3co_exit_delay);
1931
1932unlock:
1933 mutex_unlock(&intel_dp->psr.lock);
1934}
1935
1936/**
1937 * intel_psr_flush - Flush PSR
1938 * @dev_priv: i915 device
1939 * @frontbuffer_bits: frontbuffer plane tracking bits
1940 * @origin: which operation caused the flush
1941 *
1942 * Since the hardware frontbuffer tracking has gaps we need to integrate
1943 * with the software frontbuffer tracking. This function gets called every
1944 * time frontbuffer rendering has completed and flushed out to memory. PSR
1945 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1946 *
1947 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1948 */
1949void intel_psr_flush(struct drm_i915_private *dev_priv,
1950 unsigned frontbuffer_bits, enum fb_op_origin origin)
1951{
1952 struct intel_encoder *encoder;
1953
1954 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1955 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
1956 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1957
1958 if (origin == ORIGIN_FLIP) {
1959 tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
1960 continue;
1961 }
1962
1963 mutex_lock(&intel_dp->psr.lock);
1964 if (!intel_dp->psr.enabled) {
1965 mutex_unlock(&intel_dp->psr.lock);
1966 continue;
1967 }
1968
1969 pipe_frontbuffer_bits &=
1970 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
1971 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
1972
1973 /*
1974 * If the PSR is paused by an explicit intel_psr_paused() call,
1975 * we have to ensure that the PSR is not activated until
1976 * intel_psr_resume() is called.
1977 */
1978 if (intel_dp->psr.paused) {
1979 mutex_unlock(&intel_dp->psr.lock);
1980 continue;
1981 }
1982
1983 /* By definition flush = invalidate + flush */
1984 if (pipe_frontbuffer_bits)
1985 psr_force_hw_tracking_exit(intel_dp);
1986
1987 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
1988 schedule_work(&intel_dp->psr.work);
1989 mutex_unlock(&intel_dp->psr.lock);
1990 }
1991}
1992
1993/**
1994 * intel_psr_init - Init basic PSR work and mutex.
1995 * @intel_dp: Intel DP
1996 *
1997 * This function is called after the initializing connector.
1998 * (the initializing of connector treats the handling of connector capabilities)
1999 * And it initializes basic PSR stuff for each DP Encoder.
2000 */
2001void intel_psr_init(struct intel_dp *intel_dp)
2002{
2003 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2004 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2005
2006 if (!HAS_PSR(dev_priv))
2007 return;
2008
2009 /*
2010 * HSW spec explicitly says PSR is tied to port A.
2011 * BDW+ platforms have a instance of PSR registers per transcoder but
2012 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2013 * than eDP one.
2014 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2015 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2016 * But GEN12 supports a instance of PSR registers per transcoder.
2017 */
2018 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2019 drm_dbg_kms(&dev_priv->drm,
2020 "PSR condition failed: Port not supported\n");
2021 return;
2022 }
2023
2024 intel_dp->psr.source_support = true;
2025
2026 if (IS_HASWELL(dev_priv))
2027 /*
2028 * HSW don't have PSR registers on the same space as transcoder
2029 * so set this to a value that when subtract to the register
2030 * in transcoder space results in the right offset for HSW
2031 */
2032 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
2033
2034 if (dev_priv->params.enable_psr == -1)
2035 if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
2036 dev_priv->params.enable_psr = 0;
2037
2038 /* Set link_standby x link_off defaults */
2039 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2040 /* HSW and BDW require workarounds that we don't implement. */
2041 intel_dp->psr.link_standby = false;
2042 else if (DISPLAY_VER(dev_priv) < 12)
2043 /* For new platforms up to TGL let's respect VBT back again */
2044 intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
2045
2046 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2047 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2048 mutex_init(&intel_dp->psr.lock);
2049}
2050
2051static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2052 u8 *status, u8 *error_status)
2053{
2054 struct drm_dp_aux *aux = &intel_dp->aux;
2055 int ret;
2056
2057 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2058 if (ret != 1)
2059 return ret;
2060
2061 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2062 if (ret != 1)
2063 return ret;
2064
2065 *status = *status & DP_PSR_SINK_STATE_MASK;
2066
2067 return 0;
2068}
2069
2070static void psr_alpm_check(struct intel_dp *intel_dp)
2071{
2072 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2073 struct drm_dp_aux *aux = &intel_dp->aux;
2074 struct intel_psr *psr = &intel_dp->psr;
2075 u8 val;
2076 int r;
2077
2078 if (!psr->psr2_enabled)
2079 return;
2080
2081 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2082 if (r != 1) {
2083 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2084 return;
2085 }
2086
2087 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2088 intel_psr_disable_locked(intel_dp);
2089 psr->sink_not_reliable = true;
2090 drm_dbg_kms(&dev_priv->drm,
2091 "ALPM lock timeout error, disabling PSR\n");
2092
2093 /* Clearing error */
2094 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2095 }
2096}
2097
2098static void psr_capability_changed_check(struct intel_dp *intel_dp)
2099{
2100 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2101 struct intel_psr *psr = &intel_dp->psr;
2102 u8 val;
2103 int r;
2104
2105 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2106 if (r != 1) {
2107 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2108 return;
2109 }
2110
2111 if (val & DP_PSR_CAPS_CHANGE) {
2112 intel_psr_disable_locked(intel_dp);
2113 psr->sink_not_reliable = true;
2114 drm_dbg_kms(&dev_priv->drm,
2115 "Sink PSR capability changed, disabling PSR\n");
2116
2117 /* Clearing it */
2118 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2119 }
2120}
2121
2122void intel_psr_short_pulse(struct intel_dp *intel_dp)
2123{
2124 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2125 struct intel_psr *psr = &intel_dp->psr;
2126 u8 status, error_status;
2127 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2128 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2129 DP_PSR_LINK_CRC_ERROR;
2130
2131 if (!CAN_PSR(intel_dp))
2132 return;
2133
2134 mutex_lock(&psr->lock);
2135
2136 if (!psr->enabled)
2137 goto exit;
2138
2139 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2140 drm_err(&dev_priv->drm,
2141 "Error reading PSR status or error status\n");
2142 goto exit;
2143 }
2144
2145 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2146 intel_psr_disable_locked(intel_dp);
2147 psr->sink_not_reliable = true;
2148 }
2149
2150 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2151 drm_dbg_kms(&dev_priv->drm,
2152 "PSR sink internal error, disabling PSR\n");
2153 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2154 drm_dbg_kms(&dev_priv->drm,
2155 "PSR RFB storage error, disabling PSR\n");
2156 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2157 drm_dbg_kms(&dev_priv->drm,
2158 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2159 if (error_status & DP_PSR_LINK_CRC_ERROR)
2160 drm_dbg_kms(&dev_priv->drm,
2161 "PSR Link CRC error, disabling PSR\n");
2162
2163 if (error_status & ~errors)
2164 drm_err(&dev_priv->drm,
2165 "PSR_ERROR_STATUS unhandled errors %x\n",
2166 error_status & ~errors);
2167 /* clear status register */
2168 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2169
2170 psr_alpm_check(intel_dp);
2171 psr_capability_changed_check(intel_dp);
2172
2173exit:
2174 mutex_unlock(&psr->lock);
2175}
2176
2177bool intel_psr_enabled(struct intel_dp *intel_dp)
2178{
2179 bool ret;
2180
2181 if (!CAN_PSR(intel_dp))
2182 return false;
2183
2184 mutex_lock(&intel_dp->psr.lock);
2185 ret = intel_dp->psr.enabled;
2186 mutex_unlock(&intel_dp->psr.lock);
2187
2188 return ret;
2189}
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <drm/drm_atomic_helper.h>
25#include <drm/drm_damage_helper.h>
26#include <drm/drm_debugfs.h>
27
28#include "i915_drv.h"
29#include "i915_reg.h"
30#include "intel_atomic.h"
31#include "intel_crtc.h"
32#include "intel_ddi.h"
33#include "intel_de.h"
34#include "intel_display_types.h"
35#include "intel_dp.h"
36#include "intel_dp_aux.h"
37#include "intel_frontbuffer.h"
38#include "intel_hdmi.h"
39#include "intel_psr.h"
40#include "intel_psr_regs.h"
41#include "intel_snps_phy.h"
42#include "skl_universal_plane.h"
43
44/**
45 * DOC: Panel Self Refresh (PSR/SRD)
46 *
47 * Since Haswell Display controller supports Panel Self-Refresh on display
48 * panels witch have a remote frame buffer (RFB) implemented according to PSR
49 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50 * when system is idle but display is on as it eliminates display refresh
51 * request to DDR memory completely as long as the frame buffer for that
52 * display is unchanged.
53 *
54 * Panel Self Refresh must be supported by both Hardware (source) and
55 * Panel (sink).
56 *
57 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58 * to power down the link and memory controller. For DSI panels the same idea
59 * is called "manual mode".
60 *
61 * The implementation uses the hardware-based PSR support which automatically
62 * enters/exits self-refresh mode. The hardware takes care of sending the
63 * required DP aux message and could even retrain the link (that part isn't
64 * enabled yet though). The hardware also keeps track of any frontbuffer
65 * changes to know when to exit self-refresh mode again. Unfortunately that
66 * part doesn't work too well, hence why the i915 PSR support uses the
67 * software frontbuffer tracking to make sure it doesn't miss a screen
68 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69 * get called by the frontbuffer tracking code. Note that because of locking
70 * issues the self-refresh re-enable code is done from a work queue, which
71 * must be correctly synchronized/cancelled when shutting down the pipe."
72 *
73 * DC3CO (DC3 clock off)
74 *
75 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76 * clock off automatically during PSR2 idle state.
77 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78 * entry/exit allows the HW to enter a low-power state even when page flipping
79 * periodically (for instance a 30fps video playback scenario).
80 *
81 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83 * frames, if no other flip occurs and the function above is executed, DC3CO is
84 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85 * of another flip.
86 * Front buffer modifications do not trigger DC3CO activation on purpose as it
87 * would bring a lot of complexity and most of the moderns systems will only
88 * use page flips.
89 */
90
91/*
92 * Description of PSR mask bits:
93 *
94 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95 *
96 * When unmasked (nearly) all display register writes (eg. even
97 * SWF) trigger a PSR exit. Some registers are excluded from this
98 * and they have a more specific mask (described below). On icl+
99 * this bit no longer exists and is effectively always set.
100 *
101 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102 *
103 * When unmasked (nearly) all pipe/plane register writes
104 * trigger a PSR exit. Some plane registers are excluded from this
105 * and they have a more specific mask (described below).
106 *
107 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110 *
111 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112 * SPR_SURF/CURBASE are not included in this and instead are
113 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115 *
116 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118 *
119 * When unmasked PSR is blocked as long as the sprite
120 * plane is enabled. skl+ with their universal planes no
121 * longer have a mask bit like this, and no plane being
122 * enabledb blocks PSR.
123 *
124 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126 *
127 * When umasked CURPOS writes trigger a PSR exit. On skl+
128 * this doesn't exit but CURPOS is included in the
129 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130 *
131 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133 *
134 * When unmasked PSR is blocked as long as vblank and/or vsync
135 * interrupt is unmasked in IMR *and* enabled in IER.
136 *
137 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139 *
140 * Selectcs whether PSR exit generates an extra vblank before
141 * the first frame is transmitted. Also note the opposite polarity
142 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143 * unmasked==do not generate the extra vblank).
144 *
145 * With DC states enabled the extra vblank happens after link training,
146 * with DC states disabled it happens immediately upuon PSR exit trigger.
147 * No idea as of now why there is a difference. HSW/BDW (which don't
148 * even have DMC) always generate it after link training. Go figure.
149 *
150 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
151 * and thus won't latch until the first vblank. So with DC states
152 * enabled the register effctively uses the reset value during DC5
153 * exit+PSR exit sequence, and thus the bit does nothing until
154 * latched by the vblank that it was trying to prevent from being
155 * generated in the first place. So we should probably call this
156 * one a chicken/egg bit instead on skl+.
157 *
158 * In standby mode (as opposed to link-off) this makes no difference
159 * as the timing generator keeps running the whole time generating
160 * normal periodic vblanks.
161 *
162 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163 * and doing so makes the behaviour match the skl+ reset value.
164 *
165 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167 *
168 * On BDW without this bit is no vblanks whatsoever are
169 * generated after PSR exit. On HSW this has no apparant effect.
170 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
171 *
172 * The rest of the bits are more self-explanatory and/or
173 * irrelevant for normal operation.
174 */
175
176bool intel_encoder_can_psr(struct intel_encoder *encoder)
177{
178 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
179 return CAN_PSR(enc_to_intel_dp(encoder)) ||
180 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
181 else
182 return false;
183}
184
185static bool psr_global_enabled(struct intel_dp *intel_dp)
186{
187 struct intel_connector *connector = intel_dp->attached_connector;
188 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
189
190 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
191 case I915_PSR_DEBUG_DEFAULT:
192 if (i915->display.params.enable_psr == -1)
193 return connector->panel.vbt.psr.enable;
194 return i915->display.params.enable_psr;
195 case I915_PSR_DEBUG_DISABLE:
196 return false;
197 default:
198 return true;
199 }
200}
201
202static bool psr2_global_enabled(struct intel_dp *intel_dp)
203{
204 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
205
206 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
207 case I915_PSR_DEBUG_DISABLE:
208 case I915_PSR_DEBUG_FORCE_PSR1:
209 return false;
210 default:
211 if (i915->display.params.enable_psr == 1)
212 return false;
213 return true;
214 }
215}
216
217static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
218{
219 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
220
221 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
222 EDP_PSR_ERROR(intel_dp->psr.transcoder);
223}
224
225static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
226{
227 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
228
229 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
230 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
231}
232
233static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
234{
235 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
236
237 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
238 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
239}
240
241static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
242{
243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
244
245 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
246 EDP_PSR_MASK(intel_dp->psr.transcoder);
247}
248
249static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
250 enum transcoder cpu_transcoder)
251{
252 if (DISPLAY_VER(dev_priv) >= 8)
253 return EDP_PSR_CTL(cpu_transcoder);
254 else
255 return HSW_SRD_CTL;
256}
257
258static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
259 enum transcoder cpu_transcoder)
260{
261 if (DISPLAY_VER(dev_priv) >= 8)
262 return EDP_PSR_DEBUG(cpu_transcoder);
263 else
264 return HSW_SRD_DEBUG;
265}
266
267static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
268 enum transcoder cpu_transcoder)
269{
270 if (DISPLAY_VER(dev_priv) >= 8)
271 return EDP_PSR_PERF_CNT(cpu_transcoder);
272 else
273 return HSW_SRD_PERF_CNT;
274}
275
276static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
277 enum transcoder cpu_transcoder)
278{
279 if (DISPLAY_VER(dev_priv) >= 8)
280 return EDP_PSR_STATUS(cpu_transcoder);
281 else
282 return HSW_SRD_STATUS;
283}
284
285static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
286 enum transcoder cpu_transcoder)
287{
288 if (DISPLAY_VER(dev_priv) >= 12)
289 return TRANS_PSR_IMR(cpu_transcoder);
290 else
291 return EDP_PSR_IMR;
292}
293
294static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
295 enum transcoder cpu_transcoder)
296{
297 if (DISPLAY_VER(dev_priv) >= 12)
298 return TRANS_PSR_IIR(cpu_transcoder);
299 else
300 return EDP_PSR_IIR;
301}
302
303static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
304 enum transcoder cpu_transcoder)
305{
306 if (DISPLAY_VER(dev_priv) >= 8)
307 return EDP_PSR_AUX_CTL(cpu_transcoder);
308 else
309 return HSW_SRD_AUX_CTL;
310}
311
312static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
313 enum transcoder cpu_transcoder, int i)
314{
315 if (DISPLAY_VER(dev_priv) >= 8)
316 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
317 else
318 return HSW_SRD_AUX_DATA(i);
319}
320
321static void psr_irq_control(struct intel_dp *intel_dp)
322{
323 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
324 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
325 u32 mask;
326
327 mask = psr_irq_psr_error_bit_get(intel_dp);
328 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
329 mask |= psr_irq_post_exit_bit_get(intel_dp) |
330 psr_irq_pre_entry_bit_get(intel_dp);
331
332 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
333 psr_irq_mask_get(intel_dp), ~mask);
334}
335
336static void psr_event_print(struct drm_i915_private *i915,
337 u32 val, bool psr2_enabled)
338{
339 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
340 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
341 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
342 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
343 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
344 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
345 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
346 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
347 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
348 if (val & PSR_EVENT_GRAPHICS_RESET)
349 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
350 if (val & PSR_EVENT_PCH_INTERRUPT)
351 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
352 if (val & PSR_EVENT_MEMORY_UP)
353 drm_dbg_kms(&i915->drm, "\tMemory up\n");
354 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
355 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
356 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
357 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
358 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
359 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
360 if (val & PSR_EVENT_REGISTER_UPDATE)
361 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
362 if (val & PSR_EVENT_HDCP_ENABLE)
363 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
364 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
365 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
366 if (val & PSR_EVENT_VBI_ENABLE)
367 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
368 if (val & PSR_EVENT_LPSP_MODE_EXIT)
369 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
370 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
371 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
372}
373
374void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
375{
376 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378 ktime_t time_ns = ktime_get();
379
380 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
381 intel_dp->psr.last_entry_attempt = time_ns;
382 drm_dbg_kms(&dev_priv->drm,
383 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
384 transcoder_name(cpu_transcoder));
385 }
386
387 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
388 intel_dp->psr.last_exit = time_ns;
389 drm_dbg_kms(&dev_priv->drm,
390 "[transcoder %s] PSR exit completed\n",
391 transcoder_name(cpu_transcoder));
392
393 if (DISPLAY_VER(dev_priv) >= 9) {
394 u32 val;
395
396 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
397
398 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
399 }
400 }
401
402 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
403 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
404 transcoder_name(cpu_transcoder));
405
406 intel_dp->psr.irq_aux_error = true;
407
408 /*
409 * If this interruption is not masked it will keep
410 * interrupting so fast that it prevents the scheduled
411 * work to run.
412 * Also after a PSR error, we don't want to arm PSR
413 * again so we don't care about unmask the interruption
414 * or unset irq_aux_error.
415 */
416 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
417 0, psr_irq_psr_error_bit_get(intel_dp));
418
419 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
420 }
421}
422
423static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
424{
425 u8 alpm_caps = 0;
426
427 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
428 &alpm_caps) != 1)
429 return false;
430 return alpm_caps & DP_ALPM_CAP;
431}
432
433static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
434{
435 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
436 u8 val = 8; /* assume the worst if we can't read the value */
437
438 if (drm_dp_dpcd_readb(&intel_dp->aux,
439 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
440 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
441 else
442 drm_dbg_kms(&i915->drm,
443 "Unable to get sink synchronization latency, assuming 8 frames\n");
444 return val;
445}
446
447static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
448{
449 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
450 ssize_t r;
451 u16 w;
452 u8 y;
453
454 /* If sink don't have specific granularity requirements set legacy ones */
455 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
456 /* As PSR2 HW sends full lines, we do not care about x granularity */
457 w = 4;
458 y = 4;
459 goto exit;
460 }
461
462 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
463 if (r != 2)
464 drm_dbg_kms(&i915->drm,
465 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
466 /*
467 * Spec says that if the value read is 0 the default granularity should
468 * be used instead.
469 */
470 if (r != 2 || w == 0)
471 w = 4;
472
473 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
474 if (r != 1) {
475 drm_dbg_kms(&i915->drm,
476 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
477 y = 4;
478 }
479 if (y == 0)
480 y = 1;
481
482exit:
483 intel_dp->psr.su_w_granularity = w;
484 intel_dp->psr.su_y_granularity = y;
485}
486
487static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
488{
489 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
490 u8 pr_dpcd = 0;
491
492 intel_dp->psr.sink_panel_replay_support = false;
493 drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
494
495 if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
496 drm_dbg_kms(&i915->drm,
497 "Panel replay is not supported by panel\n");
498 return;
499 }
500
501 drm_dbg_kms(&i915->drm,
502 "Panel replay is supported by panel\n");
503 intel_dp->psr.sink_panel_replay_support = true;
504}
505
506static void _psr_init_dpcd(struct intel_dp *intel_dp)
507{
508 struct drm_i915_private *i915 =
509 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
510
511 drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
512 intel_dp->psr_dpcd[0]);
513
514 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
515 drm_dbg_kms(&i915->drm,
516 "PSR support not currently available for this panel\n");
517 return;
518 }
519
520 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
521 drm_dbg_kms(&i915->drm,
522 "Panel lacks power state control, PSR cannot be enabled\n");
523 return;
524 }
525
526 intel_dp->psr.sink_support = true;
527 intel_dp->psr.sink_sync_latency =
528 intel_dp_get_sink_sync_latency(intel_dp);
529
530 if (DISPLAY_VER(i915) >= 9 &&
531 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
532 bool y_req = intel_dp->psr_dpcd[1] &
533 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
534 bool alpm = intel_dp_get_alpm_status(intel_dp);
535
536 /*
537 * All panels that supports PSR version 03h (PSR2 +
538 * Y-coordinate) can handle Y-coordinates in VSC but we are
539 * only sure that it is going to be used when required by the
540 * panel. This way panel is capable to do selective update
541 * without a aux frame sync.
542 *
543 * To support PSR version 02h and PSR version 03h without
544 * Y-coordinate requirement panels we would need to enable
545 * GTC first.
546 */
547 intel_dp->psr.sink_psr2_support = y_req && alpm;
548 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
549 intel_dp->psr.sink_psr2_support ? "" : "not ");
550 }
551}
552
553void intel_psr_init_dpcd(struct intel_dp *intel_dp)
554{
555 _panel_replay_init_dpcd(intel_dp);
556
557 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
558 sizeof(intel_dp->psr_dpcd));
559
560 if (intel_dp->psr_dpcd[0])
561 _psr_init_dpcd(intel_dp);
562
563 if (intel_dp->psr.sink_psr2_support) {
564 intel_dp->psr.colorimetry_support =
565 intel_dp_get_colorimetry_status(intel_dp);
566 intel_dp_get_su_granularity(intel_dp);
567 }
568}
569
570static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
571{
572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
574 u32 aux_clock_divider, aux_ctl;
575 /* write DP_SET_POWER=D0 */
576 static const u8 aux_msg[] = {
577 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
578 [1] = (DP_SET_POWER >> 8) & 0xff,
579 [2] = DP_SET_POWER & 0xff,
580 [3] = 1 - 1,
581 [4] = DP_SET_POWER_D0,
582 };
583 int i;
584
585 BUILD_BUG_ON(sizeof(aux_msg) > 20);
586 for (i = 0; i < sizeof(aux_msg); i += 4)
587 intel_de_write(dev_priv,
588 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
589 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
590
591 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
592
593 /* Start with bits set for DDI_AUX_CTL register */
594 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
595 aux_clock_divider);
596
597 /* Select only valid bits for SRD_AUX_CTL */
598 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
599 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
600 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
601 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
602
603 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
604 aux_ctl);
605}
606
607static void intel_psr_enable_sink(struct intel_dp *intel_dp)
608{
609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
610 u8 dpcd_val = DP_PSR_ENABLE;
611
612 if (intel_dp->psr.panel_replay_enabled)
613 return;
614
615 if (intel_dp->psr.psr2_enabled) {
616 /* Enable ALPM at sink for psr2 */
617 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
618 DP_ALPM_ENABLE |
619 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
620
621 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
622 } else {
623 if (intel_dp->psr.link_standby)
624 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
625
626 if (DISPLAY_VER(dev_priv) >= 8)
627 dpcd_val |= DP_PSR_CRC_VERIFICATION;
628 }
629
630 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
631 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
632
633 if (intel_dp->psr.entry_setup_frames > 0)
634 dpcd_val |= DP_PSR_FRAME_CAPTURE;
635
636 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
637
638 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
639}
640
641static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
642{
643 struct intel_connector *connector = intel_dp->attached_connector;
644 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
645 u32 val = 0;
646
647 if (DISPLAY_VER(dev_priv) >= 11)
648 val |= EDP_PSR_TP4_TIME_0us;
649
650 if (dev_priv->display.params.psr_safest_params) {
651 val |= EDP_PSR_TP1_TIME_2500us;
652 val |= EDP_PSR_TP2_TP3_TIME_2500us;
653 goto check_tp3_sel;
654 }
655
656 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
657 val |= EDP_PSR_TP1_TIME_0us;
658 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
659 val |= EDP_PSR_TP1_TIME_100us;
660 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
661 val |= EDP_PSR_TP1_TIME_500us;
662 else
663 val |= EDP_PSR_TP1_TIME_2500us;
664
665 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
666 val |= EDP_PSR_TP2_TP3_TIME_0us;
667 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
668 val |= EDP_PSR_TP2_TP3_TIME_100us;
669 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
670 val |= EDP_PSR_TP2_TP3_TIME_500us;
671 else
672 val |= EDP_PSR_TP2_TP3_TIME_2500us;
673
674 /*
675 * WA 0479: hsw,bdw
676 * "Do not skip both TP1 and TP2/TP3"
677 */
678 if (DISPLAY_VER(dev_priv) < 9 &&
679 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
680 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
681 val |= EDP_PSR_TP2_TP3_TIME_100us;
682
683check_tp3_sel:
684 if (intel_dp_source_supports_tps3(dev_priv) &&
685 drm_dp_tps3_supported(intel_dp->dpcd))
686 val |= EDP_PSR_TP_TP1_TP3;
687 else
688 val |= EDP_PSR_TP_TP1_TP2;
689
690 return val;
691}
692
693static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
694{
695 struct intel_connector *connector = intel_dp->attached_connector;
696 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
697 int idle_frames;
698
699 /* Let's use 6 as the minimum to cover all known cases including the
700 * off-by-one issue that HW has in some cases.
701 */
702 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
703 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
704
705 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
706 idle_frames = 0xf;
707
708 return idle_frames;
709}
710
711static void hsw_activate_psr1(struct intel_dp *intel_dp)
712{
713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
715 u32 max_sleep_time = 0x1f;
716 u32 val = EDP_PSR_ENABLE;
717
718 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
719
720 if (DISPLAY_VER(dev_priv) < 20)
721 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
722
723 if (IS_HASWELL(dev_priv))
724 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
725
726 if (intel_dp->psr.link_standby)
727 val |= EDP_PSR_LINK_STANDBY;
728
729 val |= intel_psr1_get_tp_time(intel_dp);
730
731 if (DISPLAY_VER(dev_priv) >= 8)
732 val |= EDP_PSR_CRC_ENABLE;
733
734 if (DISPLAY_VER(dev_priv) >= 20)
735 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
736
737 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
738 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
739}
740
741static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
742{
743 struct intel_connector *connector = intel_dp->attached_connector;
744 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
745 u32 val = 0;
746
747 if (dev_priv->display.params.psr_safest_params)
748 return EDP_PSR2_TP2_TIME_2500us;
749
750 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
751 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
752 val |= EDP_PSR2_TP2_TIME_50us;
753 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
754 val |= EDP_PSR2_TP2_TIME_100us;
755 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
756 val |= EDP_PSR2_TP2_TIME_500us;
757 else
758 val |= EDP_PSR2_TP2_TIME_2500us;
759
760 return val;
761}
762
763static int psr2_block_count_lines(struct intel_dp *intel_dp)
764{
765 return intel_dp->psr.io_wake_lines < 9 &&
766 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
767}
768
769static int psr2_block_count(struct intel_dp *intel_dp)
770{
771 return psr2_block_count_lines(intel_dp) / 4;
772}
773
774static u8 frames_before_su_entry(struct intel_dp *intel_dp)
775{
776 u8 frames_before_su_entry;
777
778 frames_before_su_entry = max_t(u8,
779 intel_dp->psr.sink_sync_latency + 1,
780 2);
781
782 /* Entry setup frames must be at least 1 less than frames before SU entry */
783 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
784 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
785
786 return frames_before_su_entry;
787}
788
789static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
790{
791 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
792
793 intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
794 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
795
796 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
797 TRANS_DP2_PANEL_REPLAY_ENABLE);
798}
799
800static void hsw_activate_psr2(struct intel_dp *intel_dp)
801{
802 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
804 u32 val = EDP_PSR2_ENABLE;
805 u32 psr_val = 0;
806
807 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
808
809 if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
810 val |= EDP_SU_TRACK_ENABLE;
811
812 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
813 val |= EDP_Y_COORDINATE_ENABLE;
814
815 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
816
817 val |= intel_psr2_get_tp_time(intel_dp);
818
819 if (DISPLAY_VER(dev_priv) >= 12) {
820 if (psr2_block_count(intel_dp) > 2)
821 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
822 else
823 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
824 }
825
826 /* Wa_22012278275:adl-p */
827 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
828 static const u8 map[] = {
829 2, /* 5 lines */
830 1, /* 6 lines */
831 0, /* 7 lines */
832 3, /* 8 lines */
833 6, /* 9 lines */
834 5, /* 10 lines */
835 4, /* 11 lines */
836 7, /* 12 lines */
837 };
838 /*
839 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
840 * comments bellow for more information
841 */
842 int tmp;
843
844 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
845 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
846
847 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
848 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
849 } else if (DISPLAY_VER(dev_priv) >= 12) {
850 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
851 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
852 } else if (DISPLAY_VER(dev_priv) >= 9) {
853 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
854 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
855 }
856
857 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
858 val |= EDP_PSR2_SU_SDP_SCANLINE;
859
860 if (DISPLAY_VER(dev_priv) >= 20)
861 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
862
863 if (intel_dp->psr.psr2_sel_fetch_enabled) {
864 u32 tmp;
865
866 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
867 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
868 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
869 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
870 }
871
872 /*
873 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
874 * recommending keep this bit unset while PSR2 is enabled.
875 */
876 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
877
878 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
879}
880
881static bool
882transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
883{
884 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
885 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
886 else if (DISPLAY_VER(dev_priv) >= 12)
887 return cpu_transcoder == TRANSCODER_A;
888 else if (DISPLAY_VER(dev_priv) >= 9)
889 return cpu_transcoder == TRANSCODER_EDP;
890 else
891 return false;
892}
893
894static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
895{
896 if (!crtc_state->hw.active)
897 return 0;
898
899 return DIV_ROUND_UP(1000 * 1000,
900 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
901}
902
903static void psr2_program_idle_frames(struct intel_dp *intel_dp,
904 u32 idle_frames)
905{
906 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
908
909 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
910 EDP_PSR2_IDLE_FRAMES_MASK,
911 EDP_PSR2_IDLE_FRAMES(idle_frames));
912}
913
914static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
915{
916 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
917
918 psr2_program_idle_frames(intel_dp, 0);
919 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
920}
921
922static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
923{
924 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925
926 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
927 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
928}
929
930static void tgl_dc3co_disable_work(struct work_struct *work)
931{
932 struct intel_dp *intel_dp =
933 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
934
935 mutex_lock(&intel_dp->psr.lock);
936 /* If delayed work is pending, it is not idle */
937 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
938 goto unlock;
939
940 tgl_psr2_disable_dc3co(intel_dp);
941unlock:
942 mutex_unlock(&intel_dp->psr.lock);
943}
944
945static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
946{
947 if (!intel_dp->psr.dc3co_exitline)
948 return;
949
950 cancel_delayed_work(&intel_dp->psr.dc3co_work);
951 /* Before PSR2 exit disallow dc3co*/
952 tgl_psr2_disable_dc3co(intel_dp);
953}
954
955static bool
956dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
957 struct intel_crtc_state *crtc_state)
958{
959 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
960 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
961 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962 enum port port = dig_port->base.port;
963
964 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
965 return pipe <= PIPE_B && port <= PORT_B;
966 else
967 return pipe == PIPE_A && port == PORT_A;
968}
969
970static void
971tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
972 struct intel_crtc_state *crtc_state)
973{
974 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
975 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
976 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
977 u32 exit_scanlines;
978
979 /*
980 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
981 * disable DC3CO until the changed dc3co activating/deactivating sequence
982 * is applied. B.Specs:49196
983 */
984 return;
985
986 /*
987 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
988 * TODO: when the issue is addressed, this restriction should be removed.
989 */
990 if (crtc_state->enable_psr2_sel_fetch)
991 return;
992
993 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
994 return;
995
996 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
997 return;
998
999 /* Wa_16011303918:adl-p */
1000 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1001 return;
1002
1003 /*
1004 * DC3CO Exit time 200us B.Spec 49196
1005 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1006 */
1007 exit_scanlines =
1008 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1009
1010 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1011 return;
1012
1013 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1014}
1015
1016static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1017 struct intel_crtc_state *crtc_state)
1018{
1019 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1020
1021 if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1022 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1023 drm_dbg_kms(&dev_priv->drm,
1024 "PSR2 sel fetch not enabled, disabled by parameter\n");
1025 return false;
1026 }
1027
1028 if (crtc_state->uapi.async_flip) {
1029 drm_dbg_kms(&dev_priv->drm,
1030 "PSR2 sel fetch not enabled, async flip enabled\n");
1031 return false;
1032 }
1033
1034 return crtc_state->enable_psr2_sel_fetch = true;
1035}
1036
1037static bool psr2_granularity_check(struct intel_dp *intel_dp,
1038 struct intel_crtc_state *crtc_state)
1039{
1040 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1042 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1043 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1044 u16 y_granularity = 0;
1045
1046 /* PSR2 HW only send full lines so we only need to validate the width */
1047 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1048 return false;
1049
1050 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1051 return false;
1052
1053 /* HW tracking is only aligned to 4 lines */
1054 if (!crtc_state->enable_psr2_sel_fetch)
1055 return intel_dp->psr.su_y_granularity == 4;
1056
1057 /*
1058 * adl_p and mtl platforms have 1 line granularity.
1059 * For other platforms with SW tracking we can adjust the y coordinates
1060 * to match sink requirement if multiple of 4.
1061 */
1062 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1063 y_granularity = intel_dp->psr.su_y_granularity;
1064 else if (intel_dp->psr.su_y_granularity <= 2)
1065 y_granularity = 4;
1066 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1067 y_granularity = intel_dp->psr.su_y_granularity;
1068
1069 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1070 return false;
1071
1072 if (crtc_state->dsc.compression_enable &&
1073 vdsc_cfg->slice_height % y_granularity)
1074 return false;
1075
1076 crtc_state->su_y_granularity = y_granularity;
1077 return true;
1078}
1079
1080static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1081 struct intel_crtc_state *crtc_state)
1082{
1083 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1084 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085 u32 hblank_total, hblank_ns, req_ns;
1086
1087 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1088 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1089
1090 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1091 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1092
1093 if ((hblank_ns - req_ns) > 100)
1094 return true;
1095
1096 /* Not supported <13 / Wa_22012279113:adl-p */
1097 if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1098 return false;
1099
1100 crtc_state->req_psr2_sdp_prior_scanline = true;
1101 return true;
1102}
1103
1104static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1105 struct intel_crtc_state *crtc_state)
1106{
1107 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1109 u8 max_wake_lines;
1110
1111 if (DISPLAY_VER(i915) >= 12) {
1112 io_wake_time = 42;
1113 /*
1114 * According to Bspec it's 42us, but based on testing
1115 * it is not enough -> use 45 us.
1116 */
1117 fast_wake_time = 45;
1118 max_wake_lines = 12;
1119 } else {
1120 io_wake_time = 50;
1121 fast_wake_time = 32;
1122 max_wake_lines = 8;
1123 }
1124
1125 io_wake_lines = intel_usecs_to_scanlines(
1126 &crtc_state->hw.adjusted_mode, io_wake_time);
1127 fast_wake_lines = intel_usecs_to_scanlines(
1128 &crtc_state->hw.adjusted_mode, fast_wake_time);
1129
1130 if (io_wake_lines > max_wake_lines ||
1131 fast_wake_lines > max_wake_lines)
1132 return false;
1133
1134 if (i915->display.params.psr_safest_params)
1135 io_wake_lines = fast_wake_lines = max_wake_lines;
1136
1137 /* According to Bspec lower limit should be set as 7 lines. */
1138 intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1139 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1140
1141 return true;
1142}
1143
1144static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1145 const struct drm_display_mode *adjusted_mode)
1146{
1147 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1149 int entry_setup_frames = 0;
1150
1151 if (psr_setup_time < 0) {
1152 drm_dbg_kms(&i915->drm,
1153 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1154 intel_dp->psr_dpcd[1]);
1155 return -ETIME;
1156 }
1157
1158 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1159 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1160 if (DISPLAY_VER(i915) >= 20) {
1161 /* setup entry frames can be up to 3 frames */
1162 entry_setup_frames = 1;
1163 drm_dbg_kms(&i915->drm,
1164 "PSR setup entry frames %d\n",
1165 entry_setup_frames);
1166 } else {
1167 drm_dbg_kms(&i915->drm,
1168 "PSR condition failed: PSR setup time (%d us) too long\n",
1169 psr_setup_time);
1170 return -ETIME;
1171 }
1172 }
1173
1174 return entry_setup_frames;
1175}
1176
1177static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1178 struct intel_crtc_state *crtc_state)
1179{
1180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1182 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1183 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1184
1185 if (!intel_dp->psr.sink_psr2_support)
1186 return false;
1187
1188 /* JSL and EHL only supports eDP 1.3 */
1189 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1190 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1191 return false;
1192 }
1193
1194 /* Wa_16011181250 */
1195 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1196 IS_DG2(dev_priv)) {
1197 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1198 return false;
1199 }
1200
1201 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1202 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1203 return false;
1204 }
1205
1206 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1207 drm_dbg_kms(&dev_priv->drm,
1208 "PSR2 not supported in transcoder %s\n",
1209 transcoder_name(crtc_state->cpu_transcoder));
1210 return false;
1211 }
1212
1213 if (!psr2_global_enabled(intel_dp)) {
1214 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1215 return false;
1216 }
1217
1218 /*
1219 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1220 * resolution requires DSC to be enabled, priority is given to DSC
1221 * over PSR2.
1222 */
1223 if (crtc_state->dsc.compression_enable &&
1224 (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1225 drm_dbg_kms(&dev_priv->drm,
1226 "PSR2 cannot be enabled since DSC is enabled\n");
1227 return false;
1228 }
1229
1230 if (crtc_state->crc_enabled) {
1231 drm_dbg_kms(&dev_priv->drm,
1232 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1233 return false;
1234 }
1235
1236 if (DISPLAY_VER(dev_priv) >= 12) {
1237 psr_max_h = 5120;
1238 psr_max_v = 3200;
1239 max_bpp = 30;
1240 } else if (DISPLAY_VER(dev_priv) >= 10) {
1241 psr_max_h = 4096;
1242 psr_max_v = 2304;
1243 max_bpp = 24;
1244 } else if (DISPLAY_VER(dev_priv) == 9) {
1245 psr_max_h = 3640;
1246 psr_max_v = 2304;
1247 max_bpp = 24;
1248 }
1249
1250 if (crtc_state->pipe_bpp > max_bpp) {
1251 drm_dbg_kms(&dev_priv->drm,
1252 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1253 crtc_state->pipe_bpp, max_bpp);
1254 return false;
1255 }
1256
1257 /* Wa_16011303918:adl-p */
1258 if (crtc_state->vrr.enable &&
1259 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1260 drm_dbg_kms(&dev_priv->drm,
1261 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1262 return false;
1263 }
1264
1265 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1266 drm_dbg_kms(&dev_priv->drm,
1267 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1268 return false;
1269 }
1270
1271 if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1272 drm_dbg_kms(&dev_priv->drm,
1273 "PSR2 not enabled, Unable to use long enough wake times\n");
1274 return false;
1275 }
1276
1277 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1278 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1279 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1280 psr2_block_count_lines(intel_dp)) {
1281 drm_dbg_kms(&dev_priv->drm,
1282 "PSR2 not enabled, too short vblank time\n");
1283 return false;
1284 }
1285
1286 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1287 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1288 !HAS_PSR_HW_TRACKING(dev_priv)) {
1289 drm_dbg_kms(&dev_priv->drm,
1290 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1291 return false;
1292 }
1293 }
1294
1295 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1296 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1297 goto unsupported;
1298 }
1299
1300 if (!crtc_state->enable_psr2_sel_fetch &&
1301 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1302 drm_dbg_kms(&dev_priv->drm,
1303 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1304 crtc_hdisplay, crtc_vdisplay,
1305 psr_max_h, psr_max_v);
1306 goto unsupported;
1307 }
1308
1309 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1310 return true;
1311
1312unsupported:
1313 crtc_state->enable_psr2_sel_fetch = false;
1314 return false;
1315}
1316
1317static bool _psr_compute_config(struct intel_dp *intel_dp,
1318 struct intel_crtc_state *crtc_state)
1319{
1320 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1321 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1322 int entry_setup_frames;
1323
1324 /*
1325 * Current PSR panels don't work reliably with VRR enabled
1326 * So if VRR is enabled, do not enable PSR.
1327 */
1328 if (crtc_state->vrr.enable)
1329 return false;
1330
1331 if (!CAN_PSR(intel_dp))
1332 return false;
1333
1334 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1335
1336 if (entry_setup_frames >= 0) {
1337 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1338 } else {
1339 drm_dbg_kms(&dev_priv->drm,
1340 "PSR condition failed: PSR setup timing not met\n");
1341 return false;
1342 }
1343
1344 return true;
1345}
1346
1347void intel_psr_compute_config(struct intel_dp *intel_dp,
1348 struct intel_crtc_state *crtc_state,
1349 struct drm_connector_state *conn_state)
1350{
1351 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1352 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1353
1354 if (!psr_global_enabled(intel_dp)) {
1355 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1356 return;
1357 }
1358
1359 if (intel_dp->psr.sink_not_reliable) {
1360 drm_dbg_kms(&dev_priv->drm,
1361 "PSR sink implementation is not reliable\n");
1362 return;
1363 }
1364
1365 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1366 drm_dbg_kms(&dev_priv->drm,
1367 "PSR condition failed: Interlaced mode enabled\n");
1368 return;
1369 }
1370
1371 if (CAN_PANEL_REPLAY(intel_dp))
1372 crtc_state->has_panel_replay = true;
1373 else
1374 crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1375
1376 if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1377 return;
1378
1379 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1380
1381 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1382 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1383 &crtc_state->psr_vsc);
1384}
1385
1386void intel_psr_get_config(struct intel_encoder *encoder,
1387 struct intel_crtc_state *pipe_config)
1388{
1389 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1390 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1391 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1392 struct intel_dp *intel_dp;
1393 u32 val;
1394
1395 if (!dig_port)
1396 return;
1397
1398 intel_dp = &dig_port->dp;
1399 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1400 return;
1401
1402 mutex_lock(&intel_dp->psr.lock);
1403 if (!intel_dp->psr.enabled)
1404 goto unlock;
1405
1406 if (intel_dp->psr.panel_replay_enabled) {
1407 pipe_config->has_panel_replay = true;
1408 } else {
1409 /*
1410 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1411 * enabled/disabled because of frontbuffer tracking and others.
1412 */
1413 pipe_config->has_psr = true;
1414 }
1415
1416 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1417 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1418
1419 if (!intel_dp->psr.psr2_enabled)
1420 goto unlock;
1421
1422 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1423 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1424 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1425 pipe_config->enable_psr2_sel_fetch = true;
1426 }
1427
1428 if (DISPLAY_VER(dev_priv) >= 12) {
1429 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1430 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1431 }
1432unlock:
1433 mutex_unlock(&intel_dp->psr.lock);
1434}
1435
1436static void intel_psr_activate(struct intel_dp *intel_dp)
1437{
1438 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1439 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1440
1441 drm_WARN_ON(&dev_priv->drm,
1442 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1443 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1444
1445 drm_WARN_ON(&dev_priv->drm,
1446 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1447
1448 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1449
1450 lockdep_assert_held(&intel_dp->psr.lock);
1451
1452 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1453 if (intel_dp->psr.panel_replay_enabled)
1454 dg2_activate_panel_replay(intel_dp);
1455 else if (intel_dp->psr.psr2_enabled)
1456 hsw_activate_psr2(intel_dp);
1457 else
1458 hsw_activate_psr1(intel_dp);
1459
1460 intel_dp->psr.active = true;
1461}
1462
1463static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1464{
1465 switch (intel_dp->psr.pipe) {
1466 case PIPE_A:
1467 return LATENCY_REPORTING_REMOVED_PIPE_A;
1468 case PIPE_B:
1469 return LATENCY_REPORTING_REMOVED_PIPE_B;
1470 case PIPE_C:
1471 return LATENCY_REPORTING_REMOVED_PIPE_C;
1472 case PIPE_D:
1473 return LATENCY_REPORTING_REMOVED_PIPE_D;
1474 default:
1475 MISSING_CASE(intel_dp->psr.pipe);
1476 return 0;
1477 }
1478}
1479
1480/*
1481 * Wa_16013835468
1482 * Wa_14015648006
1483 */
1484static void wm_optimization_wa(struct intel_dp *intel_dp,
1485 const struct intel_crtc_state *crtc_state)
1486{
1487 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488 bool set_wa_bit = false;
1489
1490 /* Wa_14015648006 */
1491 if (IS_DISPLAY_VER(dev_priv, 11, 14))
1492 set_wa_bit |= crtc_state->wm_level_disabled;
1493
1494 /* Wa_16013835468 */
1495 if (DISPLAY_VER(dev_priv) == 12)
1496 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1497 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1498
1499 if (set_wa_bit)
1500 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1501 0, wa_16013835468_bit_get(intel_dp));
1502 else
1503 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1504 wa_16013835468_bit_get(intel_dp), 0);
1505}
1506
1507static void intel_psr_enable_source(struct intel_dp *intel_dp,
1508 const struct intel_crtc_state *crtc_state)
1509{
1510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1512 u32 mask;
1513
1514 /*
1515 * Only HSW and BDW have PSR AUX registers that need to be setup.
1516 * SKL+ use hardcoded values PSR AUX transactions
1517 */
1518 if (DISPLAY_VER(dev_priv) < 9)
1519 hsw_psr_setup_aux(intel_dp);
1520
1521 /*
1522 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1523 * mask LPSP to avoid dependency on other drivers that might block
1524 * runtime_pm besides preventing other hw tracking issues now we
1525 * can rely on frontbuffer tracking.
1526 */
1527 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1528 EDP_PSR_DEBUG_MASK_HPD;
1529
1530 /*
1531 * For some unknown reason on HSW non-ULT (or at least on
1532 * Dell Latitude E6540) external displays start to flicker
1533 * when PSR is enabled on the eDP. SR/PC6 residency is much
1534 * higher than should be possible with an external display.
1535 * As a workaround leave LPSP unmasked to prevent PSR entry
1536 * when external displays are active.
1537 */
1538 if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1539 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1540
1541 if (DISPLAY_VER(dev_priv) < 20)
1542 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1543
1544 /*
1545 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1546 * registers in order to keep the CURSURFLIVE tricks working :(
1547 */
1548 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1549 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1550
1551 /* allow PSR with sprite enabled */
1552 if (IS_HASWELL(dev_priv))
1553 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1554
1555 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1556
1557 psr_irq_control(intel_dp);
1558
1559 /*
1560 * TODO: if future platforms supports DC3CO in more than one
1561 * transcoder, EXITLINE will need to be unset when disabling PSR
1562 */
1563 if (intel_dp->psr.dc3co_exitline)
1564 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1565 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1566
1567 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1568 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1569 intel_dp->psr.psr2_sel_fetch_enabled ?
1570 IGNORE_PSR2_HW_TRACKING : 0);
1571
1572 /*
1573 * Wa_16013835468
1574 * Wa_14015648006
1575 */
1576 wm_optimization_wa(intel_dp, crtc_state);
1577
1578 if (intel_dp->psr.psr2_enabled) {
1579 if (DISPLAY_VER(dev_priv) == 9)
1580 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1581 PSR2_VSC_ENABLE_PROG_HEADER |
1582 PSR2_ADD_VERTICAL_LINE_COUNT);
1583
1584 /*
1585 * Wa_16014451276:adlp,mtl[a0,b0]
1586 * All supported adlp panels have 1-based X granularity, this may
1587 * cause issues if non-supported panels are used.
1588 */
1589 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1590 IS_ALDERLAKE_P(dev_priv))
1591 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1592 0, ADLP_1_BASED_X_GRANULARITY);
1593
1594 /* Wa_16012604467:adlp,mtl[a0,b0] */
1595 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1596 intel_de_rmw(dev_priv,
1597 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1598 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1599 else if (IS_ALDERLAKE_P(dev_priv))
1600 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1601 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1602 }
1603}
1604
1605static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1606{
1607 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1608 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1609 u32 val;
1610
1611 /*
1612 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1613 * will still keep the error set even after the reset done in the
1614 * irq_preinstall and irq_uninstall hooks.
1615 * And enabling in this situation cause the screen to freeze in the
1616 * first time that PSR HW tries to activate so lets keep PSR disabled
1617 * to avoid any rendering problems.
1618 */
1619 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1620 val &= psr_irq_psr_error_bit_get(intel_dp);
1621 if (val) {
1622 intel_dp->psr.sink_not_reliable = true;
1623 drm_dbg_kms(&dev_priv->drm,
1624 "PSR interruption error set, not enabling PSR\n");
1625 return false;
1626 }
1627
1628 return true;
1629}
1630
1631static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1632 const struct intel_crtc_state *crtc_state)
1633{
1634 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1635 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1636 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1637 struct intel_encoder *encoder = &dig_port->base;
1638 u32 val;
1639
1640 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1641
1642 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1643 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1644 intel_dp->psr.busy_frontbuffer_bits = 0;
1645 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1646 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1647 /* DC5/DC6 requires at least 6 idle frames */
1648 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1649 intel_dp->psr.dc3co_exit_delay = val;
1650 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1651 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1652 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1653 intel_dp->psr.req_psr2_sdp_prior_scanline =
1654 crtc_state->req_psr2_sdp_prior_scanline;
1655
1656 if (!psr_interrupt_error_check(intel_dp))
1657 return;
1658
1659 if (intel_dp->psr.panel_replay_enabled)
1660 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1661 else
1662 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1663 intel_dp->psr.psr2_enabled ? "2" : "1");
1664
1665 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1666 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1667 intel_psr_enable_sink(intel_dp);
1668 intel_psr_enable_source(intel_dp, crtc_state);
1669 intel_dp->psr.enabled = true;
1670 intel_dp->psr.paused = false;
1671
1672 intel_psr_activate(intel_dp);
1673}
1674
1675static void intel_psr_exit(struct intel_dp *intel_dp)
1676{
1677 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1678 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1679 u32 val;
1680
1681 if (!intel_dp->psr.active) {
1682 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1683 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1684 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1685 }
1686
1687 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1688 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1689
1690 return;
1691 }
1692
1693 if (intel_dp->psr.panel_replay_enabled) {
1694 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1695 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1696 } else if (intel_dp->psr.psr2_enabled) {
1697 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1698
1699 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1700 EDP_PSR2_ENABLE, 0);
1701
1702 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1703 } else {
1704 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1705 EDP_PSR_ENABLE, 0);
1706
1707 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1708 }
1709 intel_dp->psr.active = false;
1710}
1711
1712static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1713{
1714 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1715 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1716 i915_reg_t psr_status;
1717 u32 psr_status_mask;
1718
1719 if (intel_dp->psr.psr2_enabled) {
1720 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1721 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1722 } else {
1723 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1724 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1725 }
1726
1727 /* Wait till PSR is idle */
1728 if (intel_de_wait_for_clear(dev_priv, psr_status,
1729 psr_status_mask, 2000))
1730 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1731}
1732
1733static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1734{
1735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1736 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1737 enum phy phy = intel_port_to_phy(dev_priv,
1738 dp_to_dig_port(intel_dp)->base.port);
1739
1740 lockdep_assert_held(&intel_dp->psr.lock);
1741
1742 if (!intel_dp->psr.enabled)
1743 return;
1744
1745 if (intel_dp->psr.panel_replay_enabled)
1746 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1747 else
1748 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1749 intel_dp->psr.psr2_enabled ? "2" : "1");
1750
1751 intel_psr_exit(intel_dp);
1752 intel_psr_wait_exit_locked(intel_dp);
1753
1754 /*
1755 * Wa_16013835468
1756 * Wa_14015648006
1757 */
1758 if (DISPLAY_VER(dev_priv) >= 11)
1759 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1760 wa_16013835468_bit_get(intel_dp), 0);
1761
1762 if (intel_dp->psr.psr2_enabled) {
1763 /* Wa_16012604467:adlp,mtl[a0,b0] */
1764 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1765 intel_de_rmw(dev_priv,
1766 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1767 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1768 else if (IS_ALDERLAKE_P(dev_priv))
1769 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1770 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1771 }
1772
1773 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1774
1775 /* Disable PSR on Sink */
1776 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1777
1778 if (intel_dp->psr.psr2_enabled)
1779 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1780
1781 intel_dp->psr.enabled = false;
1782 intel_dp->psr.panel_replay_enabled = false;
1783 intel_dp->psr.psr2_enabled = false;
1784 intel_dp->psr.psr2_sel_fetch_enabled = false;
1785 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1786}
1787
1788/**
1789 * intel_psr_disable - Disable PSR
1790 * @intel_dp: Intel DP
1791 * @old_crtc_state: old CRTC state
1792 *
1793 * This function needs to be called before disabling pipe.
1794 */
1795void intel_psr_disable(struct intel_dp *intel_dp,
1796 const struct intel_crtc_state *old_crtc_state)
1797{
1798 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1799
1800 if (!old_crtc_state->has_psr)
1801 return;
1802
1803 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1804 return;
1805
1806 mutex_lock(&intel_dp->psr.lock);
1807
1808 intel_psr_disable_locked(intel_dp);
1809
1810 mutex_unlock(&intel_dp->psr.lock);
1811 cancel_work_sync(&intel_dp->psr.work);
1812 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1813}
1814
1815/**
1816 * intel_psr_pause - Pause PSR
1817 * @intel_dp: Intel DP
1818 *
1819 * This function need to be called after enabling psr.
1820 */
1821void intel_psr_pause(struct intel_dp *intel_dp)
1822{
1823 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1824 struct intel_psr *psr = &intel_dp->psr;
1825
1826 if (!CAN_PSR(intel_dp))
1827 return;
1828
1829 mutex_lock(&psr->lock);
1830
1831 if (!psr->enabled) {
1832 mutex_unlock(&psr->lock);
1833 return;
1834 }
1835
1836 /* If we ever hit this, we will need to add refcount to pause/resume */
1837 drm_WARN_ON(&dev_priv->drm, psr->paused);
1838
1839 intel_psr_exit(intel_dp);
1840 intel_psr_wait_exit_locked(intel_dp);
1841 psr->paused = true;
1842
1843 mutex_unlock(&psr->lock);
1844
1845 cancel_work_sync(&psr->work);
1846 cancel_delayed_work_sync(&psr->dc3co_work);
1847}
1848
1849/**
1850 * intel_psr_resume - Resume PSR
1851 * @intel_dp: Intel DP
1852 *
1853 * This function need to be called after pausing psr.
1854 */
1855void intel_psr_resume(struct intel_dp *intel_dp)
1856{
1857 struct intel_psr *psr = &intel_dp->psr;
1858
1859 if (!CAN_PSR(intel_dp))
1860 return;
1861
1862 mutex_lock(&psr->lock);
1863
1864 if (!psr->paused)
1865 goto unlock;
1866
1867 psr->paused = false;
1868 intel_psr_activate(intel_dp);
1869
1870unlock:
1871 mutex_unlock(&psr->lock);
1872}
1873
1874static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1875{
1876 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1877 PSR2_MAN_TRK_CTL_ENABLE;
1878}
1879
1880static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1881{
1882 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1883 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1884 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1885}
1886
1887static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1888{
1889 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1890 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1891 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1892}
1893
1894static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1895{
1896 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1897 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1898 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1899}
1900
1901static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1902{
1903 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1904 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1905
1906 if (intel_dp->psr.psr2_sel_fetch_enabled)
1907 intel_de_write(dev_priv,
1908 PSR2_MAN_TRK_CTL(cpu_transcoder),
1909 man_trk_ctl_enable_bit_get(dev_priv) |
1910 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1911 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1912 man_trk_ctl_continuos_full_frame(dev_priv));
1913
1914 /*
1915 * Display WA #0884: skl+
1916 * This documented WA for bxt can be safely applied
1917 * broadly so we can force HW tracking to exit PSR
1918 * instead of disabling and re-enabling.
1919 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1920 * but it makes more sense write to the current active
1921 * pipe.
1922 *
1923 * This workaround do not exist for platforms with display 10 or newer
1924 * but testing proved that it works for up display 13, for newer
1925 * than that testing will be needed.
1926 */
1927 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1928}
1929
1930void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1931{
1932 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1933 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1934 struct intel_encoder *encoder;
1935
1936 if (!crtc_state->enable_psr2_sel_fetch)
1937 return;
1938
1939 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1940 crtc_state->uapi.encoder_mask) {
1941 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1942
1943 lockdep_assert_held(&intel_dp->psr.lock);
1944 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1945 return;
1946 break;
1947 }
1948
1949 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1950 crtc_state->psr2_man_track_ctl);
1951}
1952
1953static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1954 struct drm_rect *clip, bool full_update)
1955{
1956 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1957 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1958 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1959
1960 /* SF partial frame enable has to be set even on full update */
1961 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1962
1963 if (full_update) {
1964 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1965 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1966 goto exit;
1967 }
1968
1969 if (clip->y1 == -1)
1970 goto exit;
1971
1972 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1973 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1974 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1975 } else {
1976 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1977
1978 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1979 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1980 }
1981exit:
1982 crtc_state->psr2_man_track_ctl = val;
1983}
1984
1985static void clip_area_update(struct drm_rect *overlap_damage_area,
1986 struct drm_rect *damage_area,
1987 struct drm_rect *pipe_src)
1988{
1989 if (!drm_rect_intersect(damage_area, pipe_src))
1990 return;
1991
1992 if (overlap_damage_area->y1 == -1) {
1993 overlap_damage_area->y1 = damage_area->y1;
1994 overlap_damage_area->y2 = damage_area->y2;
1995 return;
1996 }
1997
1998 if (damage_area->y1 < overlap_damage_area->y1)
1999 overlap_damage_area->y1 = damage_area->y1;
2000
2001 if (damage_area->y2 > overlap_damage_area->y2)
2002 overlap_damage_area->y2 = damage_area->y2;
2003}
2004
2005static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
2006 struct drm_rect *pipe_clip)
2007{
2008 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2009 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2010 u16 y_alignment;
2011
2012 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2013 if (crtc_state->dsc.compression_enable &&
2014 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2015 y_alignment = vdsc_cfg->slice_height;
2016 else
2017 y_alignment = crtc_state->su_y_granularity;
2018
2019 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
2020 if (pipe_clip->y2 % y_alignment)
2021 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
2022}
2023
2024/*
2025 * TODO: Not clear how to handle planes with negative position,
2026 * also planes are not updated if they have a negative X
2027 * position so for now doing a full update in this cases
2028 *
2029 * Plane scaling and rotation is not supported by selective fetch and both
2030 * properties can change without a modeset, so need to be check at every
2031 * atomic commit.
2032 */
2033static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2034{
2035 if (plane_state->uapi.dst.y1 < 0 ||
2036 plane_state->uapi.dst.x1 < 0 ||
2037 plane_state->scaler_id >= 0 ||
2038 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2039 return false;
2040
2041 return true;
2042}
2043
2044/*
2045 * Check for pipe properties that is not supported by selective fetch.
2046 *
2047 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2048 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2049 * enabled and going to the full update path.
2050 */
2051static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2052{
2053 if (crtc_state->scaler_state.scaler_id >= 0)
2054 return false;
2055
2056 return true;
2057}
2058
2059int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2060 struct intel_crtc *crtc)
2061{
2062 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2063 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2064 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2065 struct intel_plane_state *new_plane_state, *old_plane_state;
2066 struct intel_plane *plane;
2067 bool full_update = false;
2068 int i, ret;
2069
2070 if (!crtc_state->enable_psr2_sel_fetch)
2071 return 0;
2072
2073 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2074 full_update = true;
2075 goto skip_sel_fetch_set_loop;
2076 }
2077
2078 /*
2079 * Calculate minimal selective fetch area of each plane and calculate
2080 * the pipe damaged area.
2081 * In the next loop the plane selective fetch area will actually be set
2082 * using whole pipe damaged area.
2083 */
2084 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2085 new_plane_state, i) {
2086 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2087 .x2 = INT_MAX };
2088
2089 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2090 continue;
2091
2092 if (!new_plane_state->uapi.visible &&
2093 !old_plane_state->uapi.visible)
2094 continue;
2095
2096 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2097 full_update = true;
2098 break;
2099 }
2100
2101 /*
2102 * If visibility or plane moved, mark the whole plane area as
2103 * damaged as it needs to be complete redraw in the new and old
2104 * position.
2105 */
2106 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2107 !drm_rect_equals(&new_plane_state->uapi.dst,
2108 &old_plane_state->uapi.dst)) {
2109 if (old_plane_state->uapi.visible) {
2110 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2111 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2112 clip_area_update(&pipe_clip, &damaged_area,
2113 &crtc_state->pipe_src);
2114 }
2115
2116 if (new_plane_state->uapi.visible) {
2117 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2118 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2119 clip_area_update(&pipe_clip, &damaged_area,
2120 &crtc_state->pipe_src);
2121 }
2122 continue;
2123 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2124 /* If alpha changed mark the whole plane area as damaged */
2125 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2126 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2127 clip_area_update(&pipe_clip, &damaged_area,
2128 &crtc_state->pipe_src);
2129 continue;
2130 }
2131
2132 src = drm_plane_state_src(&new_plane_state->uapi);
2133 drm_rect_fp_to_int(&src, &src);
2134
2135 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2136 &new_plane_state->uapi, &damaged_area))
2137 continue;
2138
2139 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2140 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2141 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2142 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2143
2144 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2145 }
2146
2147 /*
2148 * TODO: For now we are just using full update in case
2149 * selective fetch area calculation fails. To optimize this we
2150 * should identify cases where this happens and fix the area
2151 * calculation for those.
2152 */
2153 if (pipe_clip.y1 == -1) {
2154 drm_info_once(&dev_priv->drm,
2155 "Selective fetch area calculation failed in pipe %c\n",
2156 pipe_name(crtc->pipe));
2157 full_update = true;
2158 }
2159
2160 if (full_update)
2161 goto skip_sel_fetch_set_loop;
2162
2163 /* Wa_14014971492 */
2164 if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2165 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2166 crtc_state->splitter.enable)
2167 pipe_clip.y1 = 0;
2168
2169 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2170 if (ret)
2171 return ret;
2172
2173 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2174
2175 /*
2176 * Now that we have the pipe damaged area check if it intersect with
2177 * every plane, if it does set the plane selective fetch area.
2178 */
2179 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2180 new_plane_state, i) {
2181 struct drm_rect *sel_fetch_area, inter;
2182 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2183
2184 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2185 !new_plane_state->uapi.visible)
2186 continue;
2187
2188 inter = pipe_clip;
2189 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2190 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2191 sel_fetch_area->y1 = -1;
2192 sel_fetch_area->y2 = -1;
2193 /*
2194 * if plane sel fetch was previously enabled ->
2195 * disable it
2196 */
2197 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2198 crtc_state->update_planes |= BIT(plane->id);
2199
2200 continue;
2201 }
2202
2203 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2204 full_update = true;
2205 break;
2206 }
2207
2208 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2209 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2210 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2211 crtc_state->update_planes |= BIT(plane->id);
2212
2213 /*
2214 * Sel_fetch_area is calculated for UV plane. Use
2215 * same area for Y plane as well.
2216 */
2217 if (linked) {
2218 struct intel_plane_state *linked_new_plane_state;
2219 struct drm_rect *linked_sel_fetch_area;
2220
2221 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2222 if (IS_ERR(linked_new_plane_state))
2223 return PTR_ERR(linked_new_plane_state);
2224
2225 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2226 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2227 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2228 crtc_state->update_planes |= BIT(linked->id);
2229 }
2230 }
2231
2232skip_sel_fetch_set_loop:
2233 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2234 return 0;
2235}
2236
2237void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2238 struct intel_crtc *crtc)
2239{
2240 struct drm_i915_private *i915 = to_i915(state->base.dev);
2241 const struct intel_crtc_state *old_crtc_state =
2242 intel_atomic_get_old_crtc_state(state, crtc);
2243 const struct intel_crtc_state *new_crtc_state =
2244 intel_atomic_get_new_crtc_state(state, crtc);
2245 struct intel_encoder *encoder;
2246
2247 if (!HAS_PSR(i915))
2248 return;
2249
2250 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2251 old_crtc_state->uapi.encoder_mask) {
2252 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2253 struct intel_psr *psr = &intel_dp->psr;
2254 bool needs_to_disable = false;
2255
2256 mutex_lock(&psr->lock);
2257
2258 /*
2259 * Reasons to disable:
2260 * - PSR disabled in new state
2261 * - All planes will go inactive
2262 * - Changing between PSR versions
2263 * - Display WA #1136: skl, bxt
2264 */
2265 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2266 needs_to_disable |= !new_crtc_state->has_psr;
2267 needs_to_disable |= !new_crtc_state->active_planes;
2268 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2269 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2270 new_crtc_state->wm_level_disabled;
2271
2272 if (psr->enabled && needs_to_disable)
2273 intel_psr_disable_locked(intel_dp);
2274 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2275 /* Wa_14015648006 */
2276 wm_optimization_wa(intel_dp, new_crtc_state);
2277
2278 mutex_unlock(&psr->lock);
2279 }
2280}
2281
2282void intel_psr_post_plane_update(struct intel_atomic_state *state,
2283 struct intel_crtc *crtc)
2284{
2285 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2286 const struct intel_crtc_state *crtc_state =
2287 intel_atomic_get_new_crtc_state(state, crtc);
2288 struct intel_encoder *encoder;
2289
2290 if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2291 return;
2292
2293 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2294 crtc_state->uapi.encoder_mask) {
2295 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2296 struct intel_psr *psr = &intel_dp->psr;
2297 bool keep_disabled = false;
2298
2299 mutex_lock(&psr->lock);
2300
2301 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2302
2303 keep_disabled |= psr->sink_not_reliable;
2304 keep_disabled |= !crtc_state->active_planes;
2305
2306 /* Display WA #1136: skl, bxt */
2307 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2308 crtc_state->wm_level_disabled;
2309
2310 if (!psr->enabled && !keep_disabled)
2311 intel_psr_enable_locked(intel_dp, crtc_state);
2312 else if (psr->enabled && !crtc_state->wm_level_disabled)
2313 /* Wa_14015648006 */
2314 wm_optimization_wa(intel_dp, crtc_state);
2315
2316 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2317 if (crtc_state->crc_enabled && psr->enabled)
2318 psr_force_hw_tracking_exit(intel_dp);
2319
2320 /*
2321 * Clear possible busy bits in case we have
2322 * invalidate -> flip -> flush sequence.
2323 */
2324 intel_dp->psr.busy_frontbuffer_bits = 0;
2325
2326 mutex_unlock(&psr->lock);
2327 }
2328}
2329
2330static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2331{
2332 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2333 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2334
2335 /*
2336 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2337 * As all higher states has bit 4 of PSR2 state set we can just wait for
2338 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2339 */
2340 return intel_de_wait_for_clear(dev_priv,
2341 EDP_PSR2_STATUS(cpu_transcoder),
2342 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2343}
2344
2345static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2346{
2347 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2348 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2349
2350 /*
2351 * From bspec: Panel Self Refresh (BDW+)
2352 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2353 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2354 * defensive enough to cover everything.
2355 */
2356 return intel_de_wait_for_clear(dev_priv,
2357 psr_status_reg(dev_priv, cpu_transcoder),
2358 EDP_PSR_STATUS_STATE_MASK, 50);
2359}
2360
2361/**
2362 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2363 * @new_crtc_state: new CRTC state
2364 *
2365 * This function is expected to be called from pipe_update_start() where it is
2366 * not expected to race with PSR enable or disable.
2367 */
2368void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2369{
2370 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2371 struct intel_encoder *encoder;
2372
2373 if (!new_crtc_state->has_psr)
2374 return;
2375
2376 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2377 new_crtc_state->uapi.encoder_mask) {
2378 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2379 int ret;
2380
2381 lockdep_assert_held(&intel_dp->psr.lock);
2382
2383 if (!intel_dp->psr.enabled)
2384 continue;
2385
2386 if (intel_dp->psr.psr2_enabled)
2387 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2388 else
2389 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2390
2391 if (ret)
2392 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2393 }
2394}
2395
2396static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2397{
2398 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2399 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2400 i915_reg_t reg;
2401 u32 mask;
2402 int err;
2403
2404 if (!intel_dp->psr.enabled)
2405 return false;
2406
2407 if (intel_dp->psr.psr2_enabled) {
2408 reg = EDP_PSR2_STATUS(cpu_transcoder);
2409 mask = EDP_PSR2_STATUS_STATE_MASK;
2410 } else {
2411 reg = psr_status_reg(dev_priv, cpu_transcoder);
2412 mask = EDP_PSR_STATUS_STATE_MASK;
2413 }
2414
2415 mutex_unlock(&intel_dp->psr.lock);
2416
2417 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2418 if (err)
2419 drm_err(&dev_priv->drm,
2420 "Timed out waiting for PSR Idle for re-enable\n");
2421
2422 /* After the unlocked wait, verify that PSR is still wanted! */
2423 mutex_lock(&intel_dp->psr.lock);
2424 return err == 0 && intel_dp->psr.enabled;
2425}
2426
2427static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2428{
2429 struct drm_connector_list_iter conn_iter;
2430 struct drm_modeset_acquire_ctx ctx;
2431 struct drm_atomic_state *state;
2432 struct drm_connector *conn;
2433 int err = 0;
2434
2435 state = drm_atomic_state_alloc(&dev_priv->drm);
2436 if (!state)
2437 return -ENOMEM;
2438
2439 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2440
2441 state->acquire_ctx = &ctx;
2442 to_intel_atomic_state(state)->internal = true;
2443
2444retry:
2445 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2446 drm_for_each_connector_iter(conn, &conn_iter) {
2447 struct drm_connector_state *conn_state;
2448 struct drm_crtc_state *crtc_state;
2449
2450 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2451 continue;
2452
2453 conn_state = drm_atomic_get_connector_state(state, conn);
2454 if (IS_ERR(conn_state)) {
2455 err = PTR_ERR(conn_state);
2456 break;
2457 }
2458
2459 if (!conn_state->crtc)
2460 continue;
2461
2462 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2463 if (IS_ERR(crtc_state)) {
2464 err = PTR_ERR(crtc_state);
2465 break;
2466 }
2467
2468 /* Mark mode as changed to trigger a pipe->update() */
2469 crtc_state->mode_changed = true;
2470 }
2471 drm_connector_list_iter_end(&conn_iter);
2472
2473 if (err == 0)
2474 err = drm_atomic_commit(state);
2475
2476 if (err == -EDEADLK) {
2477 drm_atomic_state_clear(state);
2478 err = drm_modeset_backoff(&ctx);
2479 if (!err)
2480 goto retry;
2481 }
2482
2483 drm_modeset_drop_locks(&ctx);
2484 drm_modeset_acquire_fini(&ctx);
2485 drm_atomic_state_put(state);
2486
2487 return err;
2488}
2489
2490int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2491{
2492 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2493 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2494 u32 old_mode;
2495 int ret;
2496
2497 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2498 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2499 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2500 return -EINVAL;
2501 }
2502
2503 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2504 if (ret)
2505 return ret;
2506
2507 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2508 intel_dp->psr.debug = val;
2509
2510 /*
2511 * Do it right away if it's already enabled, otherwise it will be done
2512 * when enabling the source.
2513 */
2514 if (intel_dp->psr.enabled)
2515 psr_irq_control(intel_dp);
2516
2517 mutex_unlock(&intel_dp->psr.lock);
2518
2519 if (old_mode != mode)
2520 ret = intel_psr_fastset_force(dev_priv);
2521
2522 return ret;
2523}
2524
2525static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2526{
2527 struct intel_psr *psr = &intel_dp->psr;
2528
2529 intel_psr_disable_locked(intel_dp);
2530 psr->sink_not_reliable = true;
2531 /* let's make sure that sink is awaken */
2532 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2533}
2534
2535static void intel_psr_work(struct work_struct *work)
2536{
2537 struct intel_dp *intel_dp =
2538 container_of(work, typeof(*intel_dp), psr.work);
2539
2540 mutex_lock(&intel_dp->psr.lock);
2541
2542 if (!intel_dp->psr.enabled)
2543 goto unlock;
2544
2545 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2546 intel_psr_handle_irq(intel_dp);
2547
2548 /*
2549 * We have to make sure PSR is ready for re-enable
2550 * otherwise it keeps disabled until next full enable/disable cycle.
2551 * PSR might take some time to get fully disabled
2552 * and be ready for re-enable.
2553 */
2554 if (!__psr_wait_for_idle_locked(intel_dp))
2555 goto unlock;
2556
2557 /*
2558 * The delayed work can race with an invalidate hence we need to
2559 * recheck. Since psr_flush first clears this and then reschedules we
2560 * won't ever miss a flush when bailing out here.
2561 */
2562 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2563 goto unlock;
2564
2565 intel_psr_activate(intel_dp);
2566unlock:
2567 mutex_unlock(&intel_dp->psr.lock);
2568}
2569
2570static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2571{
2572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2573 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2574
2575 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2576 u32 val;
2577
2578 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2579 /* Send one update otherwise lag is observed in screen */
2580 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2581 return;
2582 }
2583
2584 val = man_trk_ctl_enable_bit_get(dev_priv) |
2585 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2586 man_trk_ctl_continuos_full_frame(dev_priv);
2587 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2588 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2589 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2590 } else {
2591 intel_psr_exit(intel_dp);
2592 }
2593}
2594
2595/**
2596 * intel_psr_invalidate - Invalidate PSR
2597 * @dev_priv: i915 device
2598 * @frontbuffer_bits: frontbuffer plane tracking bits
2599 * @origin: which operation caused the invalidate
2600 *
2601 * Since the hardware frontbuffer tracking has gaps we need to integrate
2602 * with the software frontbuffer tracking. This function gets called every
2603 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2604 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2605 *
2606 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2607 */
2608void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2609 unsigned frontbuffer_bits, enum fb_op_origin origin)
2610{
2611 struct intel_encoder *encoder;
2612
2613 if (origin == ORIGIN_FLIP)
2614 return;
2615
2616 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2617 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2618 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2619
2620 mutex_lock(&intel_dp->psr.lock);
2621 if (!intel_dp->psr.enabled) {
2622 mutex_unlock(&intel_dp->psr.lock);
2623 continue;
2624 }
2625
2626 pipe_frontbuffer_bits &=
2627 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2628 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2629
2630 if (pipe_frontbuffer_bits)
2631 _psr_invalidate_handle(intel_dp);
2632
2633 mutex_unlock(&intel_dp->psr.lock);
2634 }
2635}
2636/*
2637 * When we will be completely rely on PSR2 S/W tracking in future,
2638 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2639 * event also therefore tgl_dc3co_flush_locked() require to be changed
2640 * accordingly in future.
2641 */
2642static void
2643tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2644 enum fb_op_origin origin)
2645{
2646 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2647
2648 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2649 !intel_dp->psr.active)
2650 return;
2651
2652 /*
2653 * At every frontbuffer flush flip event modified delay of delayed work,
2654 * when delayed work schedules that means display has been idle.
2655 */
2656 if (!(frontbuffer_bits &
2657 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2658 return;
2659
2660 tgl_psr2_enable_dc3co(intel_dp);
2661 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2662 intel_dp->psr.dc3co_exit_delay);
2663}
2664
2665static void _psr_flush_handle(struct intel_dp *intel_dp)
2666{
2667 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2668 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2669
2670 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2671 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2672 /* can we turn CFF off? */
2673 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2674 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2675 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2676 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2677 man_trk_ctl_continuos_full_frame(dev_priv);
2678
2679 /*
2680 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2681 * updates. Still keep cff bit enabled as we don't have proper
2682 * SU configuration in case update is sent for any reason after
2683 * sff bit gets cleared by the HW on next vblank.
2684 */
2685 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2686 val);
2687 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2688 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2689 }
2690 } else {
2691 /*
2692 * continuous full frame is disabled, only a single full
2693 * frame is required
2694 */
2695 psr_force_hw_tracking_exit(intel_dp);
2696 }
2697 } else {
2698 psr_force_hw_tracking_exit(intel_dp);
2699
2700 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2701 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2702 }
2703}
2704
2705/**
2706 * intel_psr_flush - Flush PSR
2707 * @dev_priv: i915 device
2708 * @frontbuffer_bits: frontbuffer plane tracking bits
2709 * @origin: which operation caused the flush
2710 *
2711 * Since the hardware frontbuffer tracking has gaps we need to integrate
2712 * with the software frontbuffer tracking. This function gets called every
2713 * time frontbuffer rendering has completed and flushed out to memory. PSR
2714 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2715 *
2716 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2717 */
2718void intel_psr_flush(struct drm_i915_private *dev_priv,
2719 unsigned frontbuffer_bits, enum fb_op_origin origin)
2720{
2721 struct intel_encoder *encoder;
2722
2723 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2724 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2725 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2726
2727 mutex_lock(&intel_dp->psr.lock);
2728 if (!intel_dp->psr.enabled) {
2729 mutex_unlock(&intel_dp->psr.lock);
2730 continue;
2731 }
2732
2733 pipe_frontbuffer_bits &=
2734 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2735 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2736
2737 /*
2738 * If the PSR is paused by an explicit intel_psr_paused() call,
2739 * we have to ensure that the PSR is not activated until
2740 * intel_psr_resume() is called.
2741 */
2742 if (intel_dp->psr.paused)
2743 goto unlock;
2744
2745 if (origin == ORIGIN_FLIP ||
2746 (origin == ORIGIN_CURSOR_UPDATE &&
2747 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2748 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2749 goto unlock;
2750 }
2751
2752 if (pipe_frontbuffer_bits == 0)
2753 goto unlock;
2754
2755 /* By definition flush = invalidate + flush */
2756 _psr_flush_handle(intel_dp);
2757unlock:
2758 mutex_unlock(&intel_dp->psr.lock);
2759 }
2760}
2761
2762/**
2763 * intel_psr_init - Init basic PSR work and mutex.
2764 * @intel_dp: Intel DP
2765 *
2766 * This function is called after the initializing connector.
2767 * (the initializing of connector treats the handling of connector capabilities)
2768 * And it initializes basic PSR stuff for each DP Encoder.
2769 */
2770void intel_psr_init(struct intel_dp *intel_dp)
2771{
2772 struct intel_connector *connector = intel_dp->attached_connector;
2773 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2774 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2775
2776 if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2777 return;
2778
2779 /*
2780 * HSW spec explicitly says PSR is tied to port A.
2781 * BDW+ platforms have a instance of PSR registers per transcoder but
2782 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2783 * than eDP one.
2784 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2785 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2786 * But GEN12 supports a instance of PSR registers per transcoder.
2787 */
2788 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2789 drm_dbg_kms(&dev_priv->drm,
2790 "PSR condition failed: Port not supported\n");
2791 return;
2792 }
2793
2794 if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2795 intel_dp->psr.source_panel_replay_support = true;
2796 else
2797 intel_dp->psr.source_support = true;
2798
2799 /* Set link_standby x link_off defaults */
2800 if (DISPLAY_VER(dev_priv) < 12)
2801 /* For new platforms up to TGL let's respect VBT back again */
2802 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2803
2804 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2805 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2806 mutex_init(&intel_dp->psr.lock);
2807}
2808
2809static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2810 u8 *status, u8 *error_status)
2811{
2812 struct drm_dp_aux *aux = &intel_dp->aux;
2813 int ret;
2814 unsigned int offset;
2815
2816 offset = intel_dp->psr.panel_replay_enabled ?
2817 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2818
2819 ret = drm_dp_dpcd_readb(aux, offset, status);
2820 if (ret != 1)
2821 return ret;
2822
2823 offset = intel_dp->psr.panel_replay_enabled ?
2824 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2825
2826 ret = drm_dp_dpcd_readb(aux, offset, error_status);
2827 if (ret != 1)
2828 return ret;
2829
2830 *status = *status & DP_PSR_SINK_STATE_MASK;
2831
2832 return 0;
2833}
2834
2835static void psr_alpm_check(struct intel_dp *intel_dp)
2836{
2837 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2838 struct drm_dp_aux *aux = &intel_dp->aux;
2839 struct intel_psr *psr = &intel_dp->psr;
2840 u8 val;
2841 int r;
2842
2843 if (!psr->psr2_enabled)
2844 return;
2845
2846 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2847 if (r != 1) {
2848 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2849 return;
2850 }
2851
2852 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2853 intel_psr_disable_locked(intel_dp);
2854 psr->sink_not_reliable = true;
2855 drm_dbg_kms(&dev_priv->drm,
2856 "ALPM lock timeout error, disabling PSR\n");
2857
2858 /* Clearing error */
2859 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2860 }
2861}
2862
2863static void psr_capability_changed_check(struct intel_dp *intel_dp)
2864{
2865 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2866 struct intel_psr *psr = &intel_dp->psr;
2867 u8 val;
2868 int r;
2869
2870 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2871 if (r != 1) {
2872 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2873 return;
2874 }
2875
2876 if (val & DP_PSR_CAPS_CHANGE) {
2877 intel_psr_disable_locked(intel_dp);
2878 psr->sink_not_reliable = true;
2879 drm_dbg_kms(&dev_priv->drm,
2880 "Sink PSR capability changed, disabling PSR\n");
2881
2882 /* Clearing it */
2883 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2884 }
2885}
2886
2887void intel_psr_short_pulse(struct intel_dp *intel_dp)
2888{
2889 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2890 struct intel_psr *psr = &intel_dp->psr;
2891 u8 status, error_status;
2892 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2893 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2894 DP_PSR_LINK_CRC_ERROR;
2895
2896 if (!CAN_PSR(intel_dp))
2897 return;
2898
2899 mutex_lock(&psr->lock);
2900
2901 if (!psr->enabled)
2902 goto exit;
2903
2904 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2905 drm_err(&dev_priv->drm,
2906 "Error reading PSR status or error status\n");
2907 goto exit;
2908 }
2909
2910 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2911 intel_psr_disable_locked(intel_dp);
2912 psr->sink_not_reliable = true;
2913 }
2914
2915 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2916 drm_dbg_kms(&dev_priv->drm,
2917 "PSR sink internal error, disabling PSR\n");
2918 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2919 drm_dbg_kms(&dev_priv->drm,
2920 "PSR RFB storage error, disabling PSR\n");
2921 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2922 drm_dbg_kms(&dev_priv->drm,
2923 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2924 if (error_status & DP_PSR_LINK_CRC_ERROR)
2925 drm_dbg_kms(&dev_priv->drm,
2926 "PSR Link CRC error, disabling PSR\n");
2927
2928 if (error_status & ~errors)
2929 drm_err(&dev_priv->drm,
2930 "PSR_ERROR_STATUS unhandled errors %x\n",
2931 error_status & ~errors);
2932 /* clear status register */
2933 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2934
2935 psr_alpm_check(intel_dp);
2936 psr_capability_changed_check(intel_dp);
2937
2938exit:
2939 mutex_unlock(&psr->lock);
2940}
2941
2942bool intel_psr_enabled(struct intel_dp *intel_dp)
2943{
2944 bool ret;
2945
2946 if (!CAN_PSR(intel_dp))
2947 return false;
2948
2949 mutex_lock(&intel_dp->psr.lock);
2950 ret = intel_dp->psr.enabled;
2951 mutex_unlock(&intel_dp->psr.lock);
2952
2953 return ret;
2954}
2955
2956/**
2957 * intel_psr_lock - grab PSR lock
2958 * @crtc_state: the crtc state
2959 *
2960 * This is initially meant to be used by around CRTC update, when
2961 * vblank sensitive registers are updated and we need grab the lock
2962 * before it to avoid vblank evasion.
2963 */
2964void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2965{
2966 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2967 struct intel_encoder *encoder;
2968
2969 if (!crtc_state->has_psr)
2970 return;
2971
2972 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2973 crtc_state->uapi.encoder_mask) {
2974 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2975
2976 mutex_lock(&intel_dp->psr.lock);
2977 break;
2978 }
2979}
2980
2981/**
2982 * intel_psr_unlock - release PSR lock
2983 * @crtc_state: the crtc state
2984 *
2985 * Release the PSR lock that was held during pipe update.
2986 */
2987void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2988{
2989 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2990 struct intel_encoder *encoder;
2991
2992 if (!crtc_state->has_psr)
2993 return;
2994
2995 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2996 crtc_state->uapi.encoder_mask) {
2997 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2998
2999 mutex_unlock(&intel_dp->psr.lock);
3000 break;
3001 }
3002}
3003
3004static void
3005psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3006{
3007 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3008 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3009 const char *status = "unknown";
3010 u32 val, status_val;
3011
3012 if (intel_dp->psr.psr2_enabled) {
3013 static const char * const live_status[] = {
3014 "IDLE",
3015 "CAPTURE",
3016 "CAPTURE_FS",
3017 "SLEEP",
3018 "BUFON_FW",
3019 "ML_UP",
3020 "SU_STANDBY",
3021 "FAST_SLEEP",
3022 "DEEP_SLEEP",
3023 "BUF_ON",
3024 "TG_ON"
3025 };
3026 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3027 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3028 if (status_val < ARRAY_SIZE(live_status))
3029 status = live_status[status_val];
3030 } else {
3031 static const char * const live_status[] = {
3032 "IDLE",
3033 "SRDONACK",
3034 "SRDENT",
3035 "BUFOFF",
3036 "BUFON",
3037 "AUXACK",
3038 "SRDOFFACK",
3039 "SRDENT_ON",
3040 };
3041 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3042 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3043 if (status_val < ARRAY_SIZE(live_status))
3044 status = live_status[status_val];
3045 }
3046
3047 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3048}
3049
3050static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3051{
3052 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3053 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3054 struct intel_psr *psr = &intel_dp->psr;
3055 intel_wakeref_t wakeref;
3056 const char *status;
3057 bool enabled;
3058 u32 val;
3059
3060 seq_printf(m, "Sink support: PSR = %s",
3061 str_yes_no(psr->sink_support));
3062
3063 if (psr->sink_support)
3064 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3065 seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3066
3067 if (!(psr->sink_support || psr->sink_panel_replay_support))
3068 return 0;
3069
3070 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3071 mutex_lock(&psr->lock);
3072
3073 if (psr->panel_replay_enabled)
3074 status = "Panel Replay Enabled";
3075 else if (psr->enabled)
3076 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3077 else
3078 status = "disabled";
3079 seq_printf(m, "PSR mode: %s\n", status);
3080
3081 if (!psr->enabled) {
3082 seq_printf(m, "PSR sink not reliable: %s\n",
3083 str_yes_no(psr->sink_not_reliable));
3084
3085 goto unlock;
3086 }
3087
3088 if (psr->panel_replay_enabled) {
3089 val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3090 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3091 } else if (psr->psr2_enabled) {
3092 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3093 enabled = val & EDP_PSR2_ENABLE;
3094 } else {
3095 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3096 enabled = val & EDP_PSR_ENABLE;
3097 }
3098 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3099 str_enabled_disabled(enabled), val);
3100 psr_source_status(intel_dp, m);
3101 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3102 psr->busy_frontbuffer_bits);
3103
3104 /*
3105 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3106 */
3107 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3108 seq_printf(m, "Performance counter: %u\n",
3109 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3110
3111 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3112 seq_printf(m, "Last attempted entry at: %lld\n",
3113 psr->last_entry_attempt);
3114 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3115 }
3116
3117 if (psr->psr2_enabled) {
3118 u32 su_frames_val[3];
3119 int frame;
3120
3121 /*
3122 * Reading all 3 registers before hand to minimize crossing a
3123 * frame boundary between register reads
3124 */
3125 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3126 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3127 su_frames_val[frame / 3] = val;
3128 }
3129
3130 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3131
3132 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3133 u32 su_blocks;
3134
3135 su_blocks = su_frames_val[frame / 3] &
3136 PSR2_SU_STATUS_MASK(frame);
3137 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3138 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3139 }
3140
3141 seq_printf(m, "PSR2 selective fetch: %s\n",
3142 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3143 }
3144
3145unlock:
3146 mutex_unlock(&psr->lock);
3147 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3148
3149 return 0;
3150}
3151
3152static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3153{
3154 struct drm_i915_private *dev_priv = m->private;
3155 struct intel_dp *intel_dp = NULL;
3156 struct intel_encoder *encoder;
3157
3158 if (!HAS_PSR(dev_priv))
3159 return -ENODEV;
3160
3161 /* Find the first EDP which supports PSR */
3162 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3163 intel_dp = enc_to_intel_dp(encoder);
3164 break;
3165 }
3166
3167 if (!intel_dp)
3168 return -ENODEV;
3169
3170 return intel_psr_status(m, intel_dp);
3171}
3172DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3173
3174static int
3175i915_edp_psr_debug_set(void *data, u64 val)
3176{
3177 struct drm_i915_private *dev_priv = data;
3178 struct intel_encoder *encoder;
3179 intel_wakeref_t wakeref;
3180 int ret = -ENODEV;
3181
3182 if (!HAS_PSR(dev_priv))
3183 return ret;
3184
3185 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3186 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3187
3188 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3189
3190 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3191
3192 // TODO: split to each transcoder's PSR debug state
3193 ret = intel_psr_debug_set(intel_dp, val);
3194
3195 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3196 }
3197
3198 return ret;
3199}
3200
3201static int
3202i915_edp_psr_debug_get(void *data, u64 *val)
3203{
3204 struct drm_i915_private *dev_priv = data;
3205 struct intel_encoder *encoder;
3206
3207 if (!HAS_PSR(dev_priv))
3208 return -ENODEV;
3209
3210 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3211 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3212
3213 // TODO: split to each transcoder's PSR debug state
3214 *val = READ_ONCE(intel_dp->psr.debug);
3215 return 0;
3216 }
3217
3218 return -ENODEV;
3219}
3220
3221DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3222 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3223 "%llu\n");
3224
3225void intel_psr_debugfs_register(struct drm_i915_private *i915)
3226{
3227 struct drm_minor *minor = i915->drm.primary;
3228
3229 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3230 i915, &i915_edp_psr_debug_fops);
3231
3232 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3233 i915, &i915_edp_psr_status_fops);
3234}
3235
3236static const char *psr_mode_str(struct intel_dp *intel_dp)
3237{
3238 if (intel_dp->psr.panel_replay_enabled)
3239 return "PANEL-REPLAY";
3240 else if (intel_dp->psr.enabled)
3241 return "PSR";
3242
3243 return "unknown";
3244}
3245
3246static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3247{
3248 struct intel_connector *connector = m->private;
3249 struct intel_dp *intel_dp = intel_attached_dp(connector);
3250 static const char * const sink_status[] = {
3251 "inactive",
3252 "transition to active, capture and display",
3253 "active, display from RFB",
3254 "active, capture and display on sink device timings",
3255 "transition to inactive, capture and display, timing re-sync",
3256 "reserved",
3257 "reserved",
3258 "sink internal error",
3259 };
3260 static const char * const panel_replay_status[] = {
3261 "Sink device frame is locked to the Source device",
3262 "Sink device is coasting, using the VTotal target",
3263 "Sink device is governing the frame rate (frame rate unlock is granted)",
3264 "Sink device in the process of re-locking with the Source device",
3265 };
3266 const char *str;
3267 int ret;
3268 u8 status, error_status;
3269 u32 idx;
3270
3271 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3272 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3273 return -ENODEV;
3274 }
3275
3276 if (connector->base.status != connector_status_connected)
3277 return -ENODEV;
3278
3279 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3280 if (ret)
3281 return ret;
3282
3283 str = "unknown";
3284 if (intel_dp->psr.panel_replay_enabled) {
3285 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3286 if (idx < ARRAY_SIZE(panel_replay_status))
3287 str = panel_replay_status[idx];
3288 } else if (intel_dp->psr.enabled) {
3289 idx = status & DP_PSR_SINK_STATE_MASK;
3290 if (idx < ARRAY_SIZE(sink_status))
3291 str = sink_status[idx];
3292 }
3293
3294 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3295
3296 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3297
3298 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3299 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3300 DP_PSR_LINK_CRC_ERROR))
3301 seq_puts(m, ":\n");
3302 else
3303 seq_puts(m, "\n");
3304 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3305 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3306 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3307 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3308 if (error_status & DP_PSR_LINK_CRC_ERROR)
3309 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3310
3311 return ret;
3312}
3313DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3314
3315static int i915_psr_status_show(struct seq_file *m, void *data)
3316{
3317 struct intel_connector *connector = m->private;
3318 struct intel_dp *intel_dp = intel_attached_dp(connector);
3319
3320 return intel_psr_status(m, intel_dp);
3321}
3322DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3323
3324void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3325{
3326 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3327 struct dentry *root = connector->base.debugfs_entry;
3328
3329 /* TODO: Add support for MST connectors as well. */
3330 if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3331 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3332 connector->mst_port)
3333 return;
3334
3335 debugfs_create_file("i915_psr_sink_status", 0444, root,
3336 connector, &i915_psr_sink_status_fops);
3337
3338 if (HAS_PSR(i915) || HAS_DP20(i915))
3339 debugfs_create_file("i915_psr_status", 0444, root,
3340 connector, &i915_psr_status_fops);
3341}