Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i9xx_wm.h"
8#include "intel_display_types.h"
9#include "intel_wm.h"
10#include "skl_watermark.h"
11
12/**
13 * intel_update_watermarks - update FIFO watermark values based on current modes
14 * @i915: i915 device
15 *
16 * Calculate watermark values for the various WM regs based on current mode
17 * and plane configuration.
18 *
19 * There are several cases to deal with here:
20 * - normal (i.e. non-self-refresh)
21 * - self-refresh (SR) mode
22 * - lines are large relative to FIFO size (buffer can hold up to 2)
23 * - lines are small relative to FIFO size (buffer can hold more than 2
24 * lines), so need to account for TLB latency
25 *
26 * The normal calculation is:
27 * watermark = dotclock * bytes per pixel * latency
28 * where latency is platform & configuration dependent (we assume pessimal
29 * values here).
30 *
31 * The SR calculation is:
32 * watermark = (trunc(latency/line time)+1) * surface width *
33 * bytes per pixel
34 * where
35 * line time = htotal / dotclock
36 * surface width = hdisplay for normal plane and 64 for cursor
37 * and latency is assumed to be high, as above.
38 *
39 * The final value programmed to the register should always be rounded up,
40 * and include an extra 2 entries to account for clock crossings.
41 *
42 * We don't use the sprite, so we can ignore that. And on Crestline we have
43 * to set the non-SR watermarks to 8.
44 */
45void intel_update_watermarks(struct drm_i915_private *i915)
46{
47 if (i915->display.funcs.wm->update_wm)
48 i915->display.funcs.wm->update_wm(i915);
49}
50
51int intel_compute_pipe_wm(struct intel_atomic_state *state,
52 struct intel_crtc *crtc)
53{
54 struct drm_i915_private *i915 = to_i915(state->base.dev);
55
56 if (i915->display.funcs.wm->compute_pipe_wm)
57 return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
58
59 return 0;
60}
61
62int intel_compute_intermediate_wm(struct intel_atomic_state *state,
63 struct intel_crtc *crtc)
64{
65 struct drm_i915_private *i915 = to_i915(state->base.dev);
66
67 if (!i915->display.funcs.wm->compute_intermediate_wm)
68 return 0;
69
70 if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
71 return 0;
72
73 return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
74}
75
76bool intel_initial_watermarks(struct intel_atomic_state *state,
77 struct intel_crtc *crtc)
78{
79 struct drm_i915_private *i915 = to_i915(state->base.dev);
80
81 if (i915->display.funcs.wm->initial_watermarks) {
82 i915->display.funcs.wm->initial_watermarks(state, crtc);
83 return true;
84 }
85
86 return false;
87}
88
89void intel_atomic_update_watermarks(struct intel_atomic_state *state,
90 struct intel_crtc *crtc)
91{
92 struct drm_i915_private *i915 = to_i915(state->base.dev);
93
94 if (i915->display.funcs.wm->atomic_update_watermarks)
95 i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
96}
97
98void intel_optimize_watermarks(struct intel_atomic_state *state,
99 struct intel_crtc *crtc)
100{
101 struct drm_i915_private *i915 = to_i915(state->base.dev);
102
103 if (i915->display.funcs.wm->optimize_watermarks)
104 i915->display.funcs.wm->optimize_watermarks(state, crtc);
105}
106
107int intel_compute_global_watermarks(struct intel_atomic_state *state)
108{
109 struct drm_i915_private *i915 = to_i915(state->base.dev);
110
111 if (i915->display.funcs.wm->compute_global_watermarks)
112 return i915->display.funcs.wm->compute_global_watermarks(state);
113
114 return 0;
115}
116
117void intel_wm_get_hw_state(struct drm_i915_private *i915)
118{
119 if (i915->display.funcs.wm->get_hw_state)
120 return i915->display.funcs.wm->get_hw_state(i915);
121}
122
123bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
124 const struct intel_plane_state *plane_state)
125{
126 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
127
128 /* FIXME check the 'enable' instead */
129 if (!crtc_state->hw.active)
130 return false;
131
132 /*
133 * Treat cursor with fb as always visible since cursor updates
134 * can happen faster than the vrefresh rate, and the current
135 * watermark code doesn't handle that correctly. Cursor updates
136 * which set/clear the fb or change the cursor size are going
137 * to get throttled by intel_legacy_cursor_update() to work
138 * around this problem with the watermark code.
139 */
140 if (plane->id == PLANE_CURSOR)
141 return plane_state->hw.fb != NULL;
142 else
143 return plane_state->uapi.visible;
144}
145
146void intel_print_wm_latency(struct drm_i915_private *dev_priv,
147 const char *name, const u16 wm[])
148{
149 int level;
150
151 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
152 unsigned int latency = wm[level];
153
154 if (latency == 0) {
155 drm_dbg_kms(&dev_priv->drm,
156 "%s WM%d latency not provided\n",
157 name, level);
158 continue;
159 }
160
161 /*
162 * - latencies are in us on gen9.
163 * - before then, WM1+ latency values are in 0.5us units
164 */
165 if (DISPLAY_VER(dev_priv) >= 9)
166 latency *= 10;
167 else if (level > 0)
168 latency *= 5;
169
170 drm_dbg_kms(&dev_priv->drm,
171 "%s WM%d latency %u (%u.%u usec)\n", name, level,
172 wm[level], latency / 10, latency % 10);
173 }
174}
175
176void intel_wm_init(struct drm_i915_private *i915)
177{
178 if (DISPLAY_VER(i915) >= 9)
179 skl_wm_init(i915);
180 else
181 i9xx_wm_init(i915);
182}
183
184static void wm_latency_show(struct seq_file *m, const u16 wm[8])
185{
186 struct drm_i915_private *dev_priv = m->private;
187 int level;
188
189 drm_modeset_lock_all(&dev_priv->drm);
190
191 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
192 unsigned int latency = wm[level];
193
194 /*
195 * - WM1+ latency values in 0.5us units
196 * - latencies are in us on gen9/vlv/chv
197 */
198 if (DISPLAY_VER(dev_priv) >= 9 ||
199 IS_VALLEYVIEW(dev_priv) ||
200 IS_CHERRYVIEW(dev_priv) ||
201 IS_G4X(dev_priv))
202 latency *= 10;
203 else if (level > 0)
204 latency *= 5;
205
206 seq_printf(m, "WM%d %u (%u.%u usec)\n",
207 level, wm[level], latency / 10, latency % 10);
208 }
209
210 drm_modeset_unlock_all(&dev_priv->drm);
211}
212
213static int pri_wm_latency_show(struct seq_file *m, void *data)
214{
215 struct drm_i915_private *dev_priv = m->private;
216 const u16 *latencies;
217
218 if (DISPLAY_VER(dev_priv) >= 9)
219 latencies = dev_priv->display.wm.skl_latency;
220 else
221 latencies = dev_priv->display.wm.pri_latency;
222
223 wm_latency_show(m, latencies);
224
225 return 0;
226}
227
228static int spr_wm_latency_show(struct seq_file *m, void *data)
229{
230 struct drm_i915_private *dev_priv = m->private;
231 const u16 *latencies;
232
233 if (DISPLAY_VER(dev_priv) >= 9)
234 latencies = dev_priv->display.wm.skl_latency;
235 else
236 latencies = dev_priv->display.wm.spr_latency;
237
238 wm_latency_show(m, latencies);
239
240 return 0;
241}
242
243static int cur_wm_latency_show(struct seq_file *m, void *data)
244{
245 struct drm_i915_private *dev_priv = m->private;
246 const u16 *latencies;
247
248 if (DISPLAY_VER(dev_priv) >= 9)
249 latencies = dev_priv->display.wm.skl_latency;
250 else
251 latencies = dev_priv->display.wm.cur_latency;
252
253 wm_latency_show(m, latencies);
254
255 return 0;
256}
257
258static int pri_wm_latency_open(struct inode *inode, struct file *file)
259{
260 struct drm_i915_private *dev_priv = inode->i_private;
261
262 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
263 return -ENODEV;
264
265 return single_open(file, pri_wm_latency_show, dev_priv);
266}
267
268static int spr_wm_latency_open(struct inode *inode, struct file *file)
269{
270 struct drm_i915_private *dev_priv = inode->i_private;
271
272 if (HAS_GMCH(dev_priv))
273 return -ENODEV;
274
275 return single_open(file, spr_wm_latency_show, dev_priv);
276}
277
278static int cur_wm_latency_open(struct inode *inode, struct file *file)
279{
280 struct drm_i915_private *dev_priv = inode->i_private;
281
282 if (HAS_GMCH(dev_priv))
283 return -ENODEV;
284
285 return single_open(file, cur_wm_latency_show, dev_priv);
286}
287
288static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
289 size_t len, loff_t *offp, u16 wm[8])
290{
291 struct seq_file *m = file->private_data;
292 struct drm_i915_private *dev_priv = m->private;
293 u16 new[8] = {};
294 int level;
295 int ret;
296 char tmp[32];
297
298 if (len >= sizeof(tmp))
299 return -EINVAL;
300
301 if (copy_from_user(tmp, ubuf, len))
302 return -EFAULT;
303
304 tmp[len] = '\0';
305
306 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
307 &new[0], &new[1], &new[2], &new[3],
308 &new[4], &new[5], &new[6], &new[7]);
309 if (ret != dev_priv->display.wm.num_levels)
310 return -EINVAL;
311
312 drm_modeset_lock_all(&dev_priv->drm);
313
314 for (level = 0; level < dev_priv->display.wm.num_levels; level++)
315 wm[level] = new[level];
316
317 drm_modeset_unlock_all(&dev_priv->drm);
318
319 return len;
320}
321
322static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
323 size_t len, loff_t *offp)
324{
325 struct seq_file *m = file->private_data;
326 struct drm_i915_private *dev_priv = m->private;
327 u16 *latencies;
328
329 if (DISPLAY_VER(dev_priv) >= 9)
330 latencies = dev_priv->display.wm.skl_latency;
331 else
332 latencies = dev_priv->display.wm.pri_latency;
333
334 return wm_latency_write(file, ubuf, len, offp, latencies);
335}
336
337static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
338 size_t len, loff_t *offp)
339{
340 struct seq_file *m = file->private_data;
341 struct drm_i915_private *dev_priv = m->private;
342 u16 *latencies;
343
344 if (DISPLAY_VER(dev_priv) >= 9)
345 latencies = dev_priv->display.wm.skl_latency;
346 else
347 latencies = dev_priv->display.wm.spr_latency;
348
349 return wm_latency_write(file, ubuf, len, offp, latencies);
350}
351
352static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
353 size_t len, loff_t *offp)
354{
355 struct seq_file *m = file->private_data;
356 struct drm_i915_private *dev_priv = m->private;
357 u16 *latencies;
358
359 if (DISPLAY_VER(dev_priv) >= 9)
360 latencies = dev_priv->display.wm.skl_latency;
361 else
362 latencies = dev_priv->display.wm.cur_latency;
363
364 return wm_latency_write(file, ubuf, len, offp, latencies);
365}
366
367static const struct file_operations i915_pri_wm_latency_fops = {
368 .owner = THIS_MODULE,
369 .open = pri_wm_latency_open,
370 .read = seq_read,
371 .llseek = seq_lseek,
372 .release = single_release,
373 .write = pri_wm_latency_write
374};
375
376static const struct file_operations i915_spr_wm_latency_fops = {
377 .owner = THIS_MODULE,
378 .open = spr_wm_latency_open,
379 .read = seq_read,
380 .llseek = seq_lseek,
381 .release = single_release,
382 .write = spr_wm_latency_write
383};
384
385static const struct file_operations i915_cur_wm_latency_fops = {
386 .owner = THIS_MODULE,
387 .open = cur_wm_latency_open,
388 .read = seq_read,
389 .llseek = seq_lseek,
390 .release = single_release,
391 .write = cur_wm_latency_write
392};
393
394void intel_wm_debugfs_register(struct drm_i915_private *i915)
395{
396 struct drm_minor *minor = i915->drm.primary;
397
398 debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
399 i915, &i915_pri_wm_latency_fops);
400
401 debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
402 i915, &i915_spr_wm_latency_fops);
403
404 debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
405 i915, &i915_cur_wm_latency_fops);
406
407 skl_watermark_debugfs_register(i915);
408}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/debugfs.h>
7
8#include "i915_drv.h"
9#include "i9xx_wm.h"
10#include "intel_display_types.h"
11#include "intel_wm.h"
12#include "skl_watermark.h"
13
14/**
15 * intel_update_watermarks - update FIFO watermark values based on current modes
16 * @i915: i915 device
17 *
18 * Calculate watermark values for the various WM regs based on current mode
19 * and plane configuration.
20 *
21 * There are several cases to deal with here:
22 * - normal (i.e. non-self-refresh)
23 * - self-refresh (SR) mode
24 * - lines are large relative to FIFO size (buffer can hold up to 2)
25 * - lines are small relative to FIFO size (buffer can hold more than 2
26 * lines), so need to account for TLB latency
27 *
28 * The normal calculation is:
29 * watermark = dotclock * bytes per pixel * latency
30 * where latency is platform & configuration dependent (we assume pessimal
31 * values here).
32 *
33 * The SR calculation is:
34 * watermark = (trunc(latency/line time)+1) * surface width *
35 * bytes per pixel
36 * where
37 * line time = htotal / dotclock
38 * surface width = hdisplay for normal plane and 64 for cursor
39 * and latency is assumed to be high, as above.
40 *
41 * The final value programmed to the register should always be rounded up,
42 * and include an extra 2 entries to account for clock crossings.
43 *
44 * We don't use the sprite, so we can ignore that. And on Crestline we have
45 * to set the non-SR watermarks to 8.
46 */
47void intel_update_watermarks(struct drm_i915_private *i915)
48{
49 if (i915->display.funcs.wm->update_wm)
50 i915->display.funcs.wm->update_wm(i915);
51}
52
53int intel_wm_compute(struct intel_atomic_state *state,
54 struct intel_crtc *crtc)
55{
56 struct intel_display *display = to_intel_display(state);
57
58 if (!display->funcs.wm->compute_watermarks)
59 return 0;
60
61 return display->funcs.wm->compute_watermarks(state, crtc);
62}
63
64bool intel_initial_watermarks(struct intel_atomic_state *state,
65 struct intel_crtc *crtc)
66{
67 struct drm_i915_private *i915 = to_i915(state->base.dev);
68
69 if (i915->display.funcs.wm->initial_watermarks) {
70 i915->display.funcs.wm->initial_watermarks(state, crtc);
71 return true;
72 }
73
74 return false;
75}
76
77void intel_atomic_update_watermarks(struct intel_atomic_state *state,
78 struct intel_crtc *crtc)
79{
80 struct drm_i915_private *i915 = to_i915(state->base.dev);
81
82 if (i915->display.funcs.wm->atomic_update_watermarks)
83 i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
84}
85
86void intel_optimize_watermarks(struct intel_atomic_state *state,
87 struct intel_crtc *crtc)
88{
89 struct drm_i915_private *i915 = to_i915(state->base.dev);
90
91 if (i915->display.funcs.wm->optimize_watermarks)
92 i915->display.funcs.wm->optimize_watermarks(state, crtc);
93}
94
95int intel_compute_global_watermarks(struct intel_atomic_state *state)
96{
97 struct drm_i915_private *i915 = to_i915(state->base.dev);
98
99 if (i915->display.funcs.wm->compute_global_watermarks)
100 return i915->display.funcs.wm->compute_global_watermarks(state);
101
102 return 0;
103}
104
105void intel_wm_get_hw_state(struct drm_i915_private *i915)
106{
107 if (i915->display.funcs.wm->get_hw_state)
108 return i915->display.funcs.wm->get_hw_state(i915);
109}
110
111bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
112 const struct intel_plane_state *plane_state)
113{
114 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
115
116 /* FIXME check the 'enable' instead */
117 if (!crtc_state->hw.active)
118 return false;
119
120 /*
121 * Treat cursor with fb as always visible since cursor updates
122 * can happen faster than the vrefresh rate, and the current
123 * watermark code doesn't handle that correctly. Cursor updates
124 * which set/clear the fb or change the cursor size are going
125 * to get throttled by intel_legacy_cursor_update() to work
126 * around this problem with the watermark code.
127 */
128 if (plane->id == PLANE_CURSOR)
129 return plane_state->hw.fb != NULL;
130 else
131 return plane_state->uapi.visible;
132}
133
134void intel_print_wm_latency(struct drm_i915_private *dev_priv,
135 const char *name, const u16 wm[])
136{
137 int level;
138
139 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
140 unsigned int latency = wm[level];
141
142 if (latency == 0) {
143 drm_dbg_kms(&dev_priv->drm,
144 "%s WM%d latency not provided\n",
145 name, level);
146 continue;
147 }
148
149 /*
150 * - latencies are in us on gen9.
151 * - before then, WM1+ latency values are in 0.5us units
152 */
153 if (DISPLAY_VER(dev_priv) >= 9)
154 latency *= 10;
155 else if (level > 0)
156 latency *= 5;
157
158 drm_dbg_kms(&dev_priv->drm,
159 "%s WM%d latency %u (%u.%u usec)\n", name, level,
160 wm[level], latency / 10, latency % 10);
161 }
162}
163
164void intel_wm_init(struct drm_i915_private *i915)
165{
166 if (DISPLAY_VER(i915) >= 9)
167 skl_wm_init(i915);
168 else
169 i9xx_wm_init(i915);
170}
171
172static void wm_latency_show(struct seq_file *m, const u16 wm[8])
173{
174 struct drm_i915_private *dev_priv = m->private;
175 int level;
176
177 drm_modeset_lock_all(&dev_priv->drm);
178
179 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
180 unsigned int latency = wm[level];
181
182 /*
183 * - WM1+ latency values in 0.5us units
184 * - latencies are in us on gen9/vlv/chv
185 */
186 if (DISPLAY_VER(dev_priv) >= 9 ||
187 IS_VALLEYVIEW(dev_priv) ||
188 IS_CHERRYVIEW(dev_priv) ||
189 IS_G4X(dev_priv))
190 latency *= 10;
191 else if (level > 0)
192 latency *= 5;
193
194 seq_printf(m, "WM%d %u (%u.%u usec)\n",
195 level, wm[level], latency / 10, latency % 10);
196 }
197
198 drm_modeset_unlock_all(&dev_priv->drm);
199}
200
201static int pri_wm_latency_show(struct seq_file *m, void *data)
202{
203 struct drm_i915_private *dev_priv = m->private;
204 const u16 *latencies;
205
206 if (DISPLAY_VER(dev_priv) >= 9)
207 latencies = dev_priv->display.wm.skl_latency;
208 else
209 latencies = dev_priv->display.wm.pri_latency;
210
211 wm_latency_show(m, latencies);
212
213 return 0;
214}
215
216static int spr_wm_latency_show(struct seq_file *m, void *data)
217{
218 struct drm_i915_private *dev_priv = m->private;
219 const u16 *latencies;
220
221 if (DISPLAY_VER(dev_priv) >= 9)
222 latencies = dev_priv->display.wm.skl_latency;
223 else
224 latencies = dev_priv->display.wm.spr_latency;
225
226 wm_latency_show(m, latencies);
227
228 return 0;
229}
230
231static int cur_wm_latency_show(struct seq_file *m, void *data)
232{
233 struct drm_i915_private *dev_priv = m->private;
234 const u16 *latencies;
235
236 if (DISPLAY_VER(dev_priv) >= 9)
237 latencies = dev_priv->display.wm.skl_latency;
238 else
239 latencies = dev_priv->display.wm.cur_latency;
240
241 wm_latency_show(m, latencies);
242
243 return 0;
244}
245
246static int pri_wm_latency_open(struct inode *inode, struct file *file)
247{
248 struct drm_i915_private *dev_priv = inode->i_private;
249
250 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
251 return -ENODEV;
252
253 return single_open(file, pri_wm_latency_show, dev_priv);
254}
255
256static int spr_wm_latency_open(struct inode *inode, struct file *file)
257{
258 struct drm_i915_private *dev_priv = inode->i_private;
259
260 if (HAS_GMCH(dev_priv))
261 return -ENODEV;
262
263 return single_open(file, spr_wm_latency_show, dev_priv);
264}
265
266static int cur_wm_latency_open(struct inode *inode, struct file *file)
267{
268 struct drm_i915_private *dev_priv = inode->i_private;
269
270 if (HAS_GMCH(dev_priv))
271 return -ENODEV;
272
273 return single_open(file, cur_wm_latency_show, dev_priv);
274}
275
276static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
277 size_t len, loff_t *offp, u16 wm[8])
278{
279 struct seq_file *m = file->private_data;
280 struct drm_i915_private *dev_priv = m->private;
281 u16 new[8] = {};
282 int level;
283 int ret;
284 char tmp[32];
285
286 if (len >= sizeof(tmp))
287 return -EINVAL;
288
289 if (copy_from_user(tmp, ubuf, len))
290 return -EFAULT;
291
292 tmp[len] = '\0';
293
294 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
295 &new[0], &new[1], &new[2], &new[3],
296 &new[4], &new[5], &new[6], &new[7]);
297 if (ret != dev_priv->display.wm.num_levels)
298 return -EINVAL;
299
300 drm_modeset_lock_all(&dev_priv->drm);
301
302 for (level = 0; level < dev_priv->display.wm.num_levels; level++)
303 wm[level] = new[level];
304
305 drm_modeset_unlock_all(&dev_priv->drm);
306
307 return len;
308}
309
310static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
311 size_t len, loff_t *offp)
312{
313 struct seq_file *m = file->private_data;
314 struct drm_i915_private *dev_priv = m->private;
315 u16 *latencies;
316
317 if (DISPLAY_VER(dev_priv) >= 9)
318 latencies = dev_priv->display.wm.skl_latency;
319 else
320 latencies = dev_priv->display.wm.pri_latency;
321
322 return wm_latency_write(file, ubuf, len, offp, latencies);
323}
324
325static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
326 size_t len, loff_t *offp)
327{
328 struct seq_file *m = file->private_data;
329 struct drm_i915_private *dev_priv = m->private;
330 u16 *latencies;
331
332 if (DISPLAY_VER(dev_priv) >= 9)
333 latencies = dev_priv->display.wm.skl_latency;
334 else
335 latencies = dev_priv->display.wm.spr_latency;
336
337 return wm_latency_write(file, ubuf, len, offp, latencies);
338}
339
340static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
341 size_t len, loff_t *offp)
342{
343 struct seq_file *m = file->private_data;
344 struct drm_i915_private *dev_priv = m->private;
345 u16 *latencies;
346
347 if (DISPLAY_VER(dev_priv) >= 9)
348 latencies = dev_priv->display.wm.skl_latency;
349 else
350 latencies = dev_priv->display.wm.cur_latency;
351
352 return wm_latency_write(file, ubuf, len, offp, latencies);
353}
354
355static const struct file_operations i915_pri_wm_latency_fops = {
356 .owner = THIS_MODULE,
357 .open = pri_wm_latency_open,
358 .read = seq_read,
359 .llseek = seq_lseek,
360 .release = single_release,
361 .write = pri_wm_latency_write
362};
363
364static const struct file_operations i915_spr_wm_latency_fops = {
365 .owner = THIS_MODULE,
366 .open = spr_wm_latency_open,
367 .read = seq_read,
368 .llseek = seq_lseek,
369 .release = single_release,
370 .write = spr_wm_latency_write
371};
372
373static const struct file_operations i915_cur_wm_latency_fops = {
374 .owner = THIS_MODULE,
375 .open = cur_wm_latency_open,
376 .read = seq_read,
377 .llseek = seq_lseek,
378 .release = single_release,
379 .write = cur_wm_latency_write
380};
381
382void intel_wm_debugfs_register(struct drm_i915_private *i915)
383{
384 struct drm_minor *minor = i915->drm.primary;
385
386 debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
387 i915, &i915_pri_wm_latency_fops);
388
389 debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
390 i915, &i915_spr_wm_latency_fops);
391
392 debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
393 i915, &i915_cur_wm_latency_fops);
394
395 skl_watermark_debugfs_register(i915);
396}