Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 *
  5 */
  6
  7#include "i915_drv.h"
  8#include "i915_reg.h"
  9#include "intel_de.h"
 10#include "intel_display_types.h"
 11#include "intel_vrr.h"
 12
 13bool intel_vrr_is_capable(struct intel_connector *connector)
 14{
 15	const struct drm_display_info *info = &connector->base.display_info;
 16	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 17	struct intel_dp *intel_dp;
 18
 19	/*
 20	 * DP Sink is capable of VRR video timings if
 21	 * Ignore MSA bit is set in DPCD.
 22	 * EDID monitor range also should be atleast 10 for reasonable
 23	 * Adaptive Sync or Variable Refresh Rate end user experience.
 24	 */
 25	switch (connector->base.connector_type) {
 26	case DRM_MODE_CONNECTOR_eDP:
 27		if (!connector->panel.vbt.vrr)
 28			return false;
 29		fallthrough;
 30	case DRM_MODE_CONNECTOR_DisplayPort:
 31		intel_dp = intel_attached_dp(connector);
 32
 33		if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
 34			return false;
 35
 36		break;
 37	default:
 38		return false;
 39	}
 40
 41	return HAS_VRR(i915) &&
 42		info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
 43}
 44
 45bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh)
 46{
 47	const struct drm_display_info *info = &connector->base.display_info;
 48
 49	return intel_vrr_is_capable(connector) &&
 50		vrefresh >= info->monitor_range.min_vfreq &&
 51		vrefresh <= info->monitor_range.max_vfreq;
 52}
 53
 54void
 55intel_vrr_check_modeset(struct intel_atomic_state *state)
 56{
 57	int i;
 58	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
 59	struct intel_crtc *crtc;
 60
 61	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 62					    new_crtc_state, i) {
 63		if (new_crtc_state->uapi.vrr_enabled !=
 64		    old_crtc_state->uapi.vrr_enabled)
 65			new_crtc_state->uapi.mode_changed = true;
 66	}
 67}
 68
 69/*
 70 * Without VRR registers get latched at:
 71 *  vblank_start
 72 *
 73 * With VRR the earliest registers can get latched is:
 74 *  intel_vrr_vmin_vblank_start(), which if we want to maintain
 75 *  the correct min vtotal is >=vblank_start+1
 76 *
 77 * The latest point registers can get latched is the vmax decision boundary:
 78 *  intel_vrr_vmax_vblank_start()
 79 *
 80 * Between those two points the vblank exit starts (and hence registers get
 81 * latched) ASAP after a push is sent.
 82 *
 83 * framestart_delay is programmable 1-4.
 84 */
 85static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
 86{
 87	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 88	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 89
 90	if (DISPLAY_VER(i915) >= 13)
 91		return crtc_state->vrr.guardband;
 92	else
 93		/* The hw imposes the extra scanline before frame start */
 94		return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
 95}
 96
 97int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
 98{
 99	/* Min vblank actually determined by flipline that is always >=vmin+1 */
100	return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
101}
102
103int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
104{
105	return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
106}
107
108void
109intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
110			 struct drm_connector_state *conn_state)
111{
112	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
113	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
114	struct intel_connector *connector =
115		to_intel_connector(conn_state->connector);
116	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
117	const struct drm_display_info *info = &connector->base.display_info;
118	int vmin, vmax;
119
120	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
121		return;
122
123	crtc_state->vrr.in_range =
124		intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
125	if (!crtc_state->vrr.in_range)
126		return;
127
128	if (HAS_LRR(i915))
129		crtc_state->update_lrr = true;
130
131	vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
132			    adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
133	vmax = adjusted_mode->crtc_clock * 1000 /
134		(adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
135
136	vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
137	vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
138
139	if (vmin >= vmax)
140		return;
141
142	/*
143	 * flipline determines the min vblank length the hardware will
144	 * generate, and flipline>=vmin+1, hence we reduce vmin by one
145	 * to make sure we can get the actual min vblank length.
146	 */
147	crtc_state->vrr.vmin = vmin - 1;
148	crtc_state->vrr.vmax = vmax;
149
150	crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
151
152	/*
153	 * For XE_LPD+, we use guardband and pipeline override
154	 * is deprecated.
155	 */
156	if (DISPLAY_VER(i915) >= 13) {
157		crtc_state->vrr.guardband =
158			crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
159	} else {
160		crtc_state->vrr.pipeline_full =
161			min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
162			    crtc_state->framestart_delay - 1);
163	}
164
165	if (crtc_state->uapi.vrr_enabled) {
166		crtc_state->vrr.enable = true;
167		crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
168	}
169}
170
171static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
172{
173	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
174
175	if (DISPLAY_VER(i915) >= 13)
176		return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
177			XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
178	else
179		return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
180			VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
181			VRR_CTL_PIPELINE_FULL_OVERRIDE;
182}
183
184void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
185{
186	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
187	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
188
189	/*
190	 * TRANS_SET_CONTEXT_LATENCY with VRR enabled
191	 * requires this chicken bit on ADL/DG2.
192	 */
193	if (DISPLAY_VER(dev_priv) == 13)
194		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
195			     0, PIPE_VBLANK_WITH_DELAY);
196
197	if (!crtc_state->vrr.flipline) {
198		intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
199		return;
200	}
201
202	intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
203	intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
204	intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(crtc_state));
205	intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
206}
207
208void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
209{
210	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
211	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
212	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
213
214	if (!crtc_state->vrr.enable)
215		return;
216
217	intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
218		       TRANS_PUSH_EN | TRANS_PUSH_SEND);
219}
220
221bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
222{
223	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
224	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
225	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
226
227	if (!crtc_state->vrr.enable)
228		return false;
229
230	return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND;
231}
232
233void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
234{
235	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
236	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
237
238	if (!crtc_state->vrr.enable)
239		return;
240
241	intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
242	intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
243		       VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
244}
245
246void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
247{
248	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
249	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
250	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
251
252	if (!old_crtc_state->vrr.enable)
253		return;
254
255	intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
256		       trans_vrr_ctl(old_crtc_state));
257	intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
258				VRR_STATUS_VRR_EN_LIVE, 1000);
259	intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
260}
261
262void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
263{
264	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
265	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
266	u32 trans_vrr_ctl;
267
268	trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
269
270	crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
271
272	if (DISPLAY_VER(dev_priv) >= 13)
273		crtc_state->vrr.guardband =
274			REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
275	else
276		if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
277			crtc_state->vrr.pipeline_full =
278				REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
279
280	if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
281		crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
282		crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
283		crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
284	}
285
286	if (crtc_state->vrr.enable)
287		crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
288}