Loading...
Note: File does not exist in v5.14.15.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "intel_atomic.h"
9#include "intel_de.h"
10#include "intel_display_types.h"
11#include "intel_drrs.h"
12#include "intel_panel.h"
13
14/**
15 * DOC: Display Refresh Rate Switching (DRRS)
16 *
17 * Display Refresh Rate Switching (DRRS) is a power conservation feature
18 * which enables swtching between low and high refresh rates,
19 * dynamically, based on the usage scenario. This feature is applicable
20 * for internal panels.
21 *
22 * Indication that the panel supports DRRS is given by the panel EDID, which
23 * would list multiple refresh rates for one resolution.
24 *
25 * DRRS is of 2 types - static and seamless.
26 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
27 * (may appear as a blink on screen) and is used in dock-undock scenario.
28 * Seamless DRRS involves changing RR without any visual effect to the user
29 * and can be used during normal system usage. This is done by programming
30 * certain registers.
31 *
32 * Support for static/seamless DRRS may be indicated in the VBT based on
33 * inputs from the panel spec.
34 *
35 * DRRS saves power by switching to low RR based on usage scenarios.
36 *
37 * The implementation is based on frontbuffer tracking implementation. When
38 * there is a disturbance on the screen triggered by user activity or a periodic
39 * system activity, DRRS is disabled (RR is changed to high RR). When there is
40 * no movement on screen, after a timeout of 1 second, a switch to low RR is
41 * made.
42 *
43 * For integration with frontbuffer tracking code, intel_drrs_invalidate()
44 * and intel_drrs_flush() are called.
45 *
46 * DRRS can be further extended to support other internal panels and also
47 * the scenario of video playback wherein RR is set based on the rate
48 * requested by userspace.
49 */
50
51const char *intel_drrs_type_str(enum drrs_type drrs_type)
52{
53 static const char * const str[] = {
54 [DRRS_TYPE_NONE] = "none",
55 [DRRS_TYPE_STATIC] = "static",
56 [DRRS_TYPE_SEAMLESS] = "seamless",
57 };
58
59 if (drrs_type >= ARRAY_SIZE(str))
60 return "<invalid>";
61
62 return str[drrs_type];
63}
64
65static void
66intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
67 enum drrs_refresh_rate refresh_rate)
68{
69 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
70 enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
71 u32 val, bit;
72
73 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
74 bit = PIPECONF_REFRESH_RATE_ALT_VLV;
75 else
76 bit = PIPECONF_REFRESH_RATE_ALT_ILK;
77
78 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
79
80 if (refresh_rate == DRRS_REFRESH_RATE_LOW)
81 val |= bit;
82 else
83 val &= ~bit;
84
85 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
86}
87
88static void
89intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc,
90 enum drrs_refresh_rate refresh_rate)
91{
92 intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder,
93 refresh_rate == DRRS_REFRESH_RATE_LOW ?
94 &crtc->drrs.m2_n2 : &crtc->drrs.m_n);
95}
96
97bool intel_drrs_is_active(struct intel_crtc *crtc)
98{
99 return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER;
100}
101
102static void intel_drrs_set_state(struct intel_crtc *crtc,
103 enum drrs_refresh_rate refresh_rate)
104{
105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
106
107 if (refresh_rate == crtc->drrs.refresh_rate)
108 return;
109
110 if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
111 intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
112 else
113 intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
114
115 crtc->drrs.refresh_rate = refresh_rate;
116}
117
118static void intel_drrs_schedule_work(struct intel_crtc *crtc)
119{
120 mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
121}
122
123static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
124{
125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
126 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
127 unsigned int frontbuffer_bits;
128
129 frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
130
131 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
132 crtc_state->bigjoiner_pipes)
133 frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
134
135 return frontbuffer_bits;
136}
137
138/**
139 * intel_drrs_activate - activate DRRS
140 * @crtc_state: the crtc state
141 *
142 * Activates DRRS on the crtc.
143 */
144void intel_drrs_activate(const struct intel_crtc_state *crtc_state)
145{
146 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
147
148 if (!crtc_state->has_drrs)
149 return;
150
151 if (!crtc_state->hw.active)
152 return;
153
154 if (intel_crtc_is_bigjoiner_slave(crtc_state))
155 return;
156
157 mutex_lock(&crtc->drrs.mutex);
158
159 crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder;
160 crtc->drrs.m_n = crtc_state->dp_m_n;
161 crtc->drrs.m2_n2 = crtc_state->dp_m2_n2;
162 crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state);
163 crtc->drrs.busy_frontbuffer_bits = 0;
164
165 intel_drrs_schedule_work(crtc);
166
167 mutex_unlock(&crtc->drrs.mutex);
168}
169
170/**
171 * intel_drrs_deactivate - deactivate DRRS
172 * @old_crtc_state: the old crtc state
173 *
174 * Deactivates DRRS on the crtc.
175 */
176void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state)
177{
178 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
179
180 if (!old_crtc_state->has_drrs)
181 return;
182
183 if (!old_crtc_state->hw.active)
184 return;
185
186 if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
187 return;
188
189 mutex_lock(&crtc->drrs.mutex);
190
191 if (intel_drrs_is_active(crtc))
192 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
193
194 crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
195 crtc->drrs.frontbuffer_bits = 0;
196 crtc->drrs.busy_frontbuffer_bits = 0;
197
198 mutex_unlock(&crtc->drrs.mutex);
199
200 cancel_delayed_work_sync(&crtc->drrs.work);
201}
202
203static void intel_drrs_downclock_work(struct work_struct *work)
204{
205 struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work);
206
207 mutex_lock(&crtc->drrs.mutex);
208
209 if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits)
210 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW);
211
212 mutex_unlock(&crtc->drrs.mutex);
213}
214
215static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
216 unsigned int all_frontbuffer_bits,
217 bool invalidate)
218{
219 struct intel_crtc *crtc;
220
221 for_each_intel_crtc(&dev_priv->drm, crtc) {
222 unsigned int frontbuffer_bits;
223
224 mutex_lock(&crtc->drrs.mutex);
225
226 frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits;
227 if (!frontbuffer_bits) {
228 mutex_unlock(&crtc->drrs.mutex);
229 continue;
230 }
231
232 if (invalidate)
233 crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
234 else
235 crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
236
237 /* flush/invalidate means busy screen hence upclock */
238 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
239
240 /*
241 * flush also means no more activity hence schedule downclock, if all
242 * other fbs are quiescent too
243 */
244 if (!crtc->drrs.busy_frontbuffer_bits)
245 intel_drrs_schedule_work(crtc);
246 else
247 cancel_delayed_work(&crtc->drrs.work);
248
249 mutex_unlock(&crtc->drrs.mutex);
250 }
251}
252
253/**
254 * intel_drrs_invalidate - Disable Idleness DRRS
255 * @dev_priv: i915 device
256 * @frontbuffer_bits: frontbuffer plane tracking bits
257 *
258 * This function gets called everytime rendering on the given planes start.
259 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
260 *
261 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
262 */
263void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
264 unsigned int frontbuffer_bits)
265{
266 intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
267}
268
269/**
270 * intel_drrs_flush - Restart Idleness DRRS
271 * @dev_priv: i915 device
272 * @frontbuffer_bits: frontbuffer plane tracking bits
273 *
274 * This function gets called every time rendering on the given planes has
275 * completed or flip on a crtc is completed. So DRRS should be upclocked
276 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
277 * if no other planes are dirty.
278 *
279 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
280 */
281void intel_drrs_flush(struct drm_i915_private *dev_priv,
282 unsigned int frontbuffer_bits)
283{
284 intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
285}
286
287/**
288 * intel_drrs_crtc_init - Init DRRS for CRTC
289 * @crtc: crtc
290 *
291 * This function is called only once at driver load to initialize basic
292 * DRRS stuff.
293 *
294 */
295void intel_drrs_crtc_init(struct intel_crtc *crtc)
296{
297 INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
298 mutex_init(&crtc->drrs.mutex);
299 crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
300}
301
302static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
303{
304 struct intel_crtc *crtc = m->private;
305 const struct intel_crtc_state *crtc_state;
306 int ret;
307
308 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
309 if (ret)
310 return ret;
311
312 crtc_state = to_intel_crtc_state(crtc->base.state);
313
314 mutex_lock(&crtc->drrs.mutex);
315
316 seq_printf(m, "DRRS enabled: %s\n",
317 str_yes_no(crtc_state->has_drrs));
318
319 seq_printf(m, "DRRS active: %s\n",
320 str_yes_no(intel_drrs_is_active(crtc)));
321
322 seq_printf(m, "DRRS refresh rate: %s\n",
323 crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ?
324 "low" : "high");
325
326 seq_printf(m, "DRRS busy frontbuffer bits: 0x%x\n",
327 crtc->drrs.busy_frontbuffer_bits);
328
329 mutex_unlock(&crtc->drrs.mutex);
330
331 drm_modeset_unlock(&crtc->base.mutex);
332
333 return 0;
334}
335
336DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status);
337
338static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
339{
340 struct intel_crtc *crtc = data;
341 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
342 struct intel_crtc_state *crtc_state;
343 struct drm_crtc_commit *commit;
344 int ret;
345
346 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
347 if (ret)
348 return ret;
349
350 crtc_state = to_intel_crtc_state(crtc->base.state);
351
352 if (!crtc_state->hw.active ||
353 !crtc_state->has_drrs)
354 goto out;
355
356 commit = crtc_state->uapi.commit;
357 if (commit) {
358 ret = wait_for_completion_interruptible(&commit->hw_done);
359 if (ret)
360 goto out;
361 }
362
363 drm_dbg(&i915->drm,
364 "Manually %sactivating DRRS\n", val ? "" : "de");
365
366 if (val)
367 intel_drrs_activate(crtc_state);
368 else
369 intel_drrs_deactivate(crtc_state);
370
371out:
372 drm_modeset_unlock(&crtc->base.mutex);
373
374 return ret;
375}
376
377DEFINE_SIMPLE_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
378 NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
379
380void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc)
381{
382 debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry,
383 crtc, &intel_drrs_debugfs_status_fops);
384
385 debugfs_create_file("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
386 crtc, &intel_drrs_debugfs_ctl_fops);
387}
388
389static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused)
390{
391 struct intel_connector *connector = m->private;
392
393 seq_printf(m, "DRRS type: %s\n",
394 intel_drrs_type_str(intel_panel_drrs_type(connector)));
395
396 return 0;
397}
398
399DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_type);
400
401void intel_drrs_connector_debugfs_add(struct intel_connector *connector)
402{
403 if (intel_panel_drrs_type(connector) == DRRS_TYPE_NONE)
404 return;
405
406 debugfs_create_file("i915_drrs_type", 0444, connector->base.debugfs_entry,
407 connector, &intel_drrs_debugfs_type_fops);
408}