Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/dma-resv.h>
28#include <linux/i2c.h>
29#include <linux/input.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/string_helpers.h>
34
35#include <drm/display/drm_dp_helper.h>
36#include <drm/drm_atomic.h>
37#include <drm/drm_atomic_helper.h>
38#include <drm/drm_atomic_uapi.h>
39#include <drm/drm_damage_helper.h>
40#include <drm/drm_edid.h>
41#include <drm/drm_fourcc.h>
42#include <drm/drm_probe_helper.h>
43#include <drm/drm_rect.h>
44
45#include "gem/i915_gem_lmem.h"
46#include "gem/i915_gem_object.h"
47
48#include "g4x_dp.h"
49#include "g4x_hdmi.h"
50#include "hsw_ips.h"
51#include "i915_config.h"
52#include "i915_drv.h"
53#include "i915_reg.h"
54#include "i915_utils.h"
55#include "i9xx_plane.h"
56#include "i9xx_wm.h"
57#include "intel_atomic.h"
58#include "intel_atomic_plane.h"
59#include "intel_audio.h"
60#include "intel_bw.h"
61#include "intel_cdclk.h"
62#include "intel_clock_gating.h"
63#include "intel_color.h"
64#include "intel_crt.h"
65#include "intel_crtc.h"
66#include "intel_crtc_state_dump.h"
67#include "intel_ddi.h"
68#include "intel_de.h"
69#include "intel_display_driver.h"
70#include "intel_display_power.h"
71#include "intel_display_types.h"
72#include "intel_dmc.h"
73#include "intel_dp.h"
74#include "intel_dp_link_training.h"
75#include "intel_dp_mst.h"
76#include "intel_dpll.h"
77#include "intel_dpll_mgr.h"
78#include "intel_dpt.h"
79#include "intel_dpt_common.h"
80#include "intel_drrs.h"
81#include "intel_dsb.h"
82#include "intel_dsi.h"
83#include "intel_dvo.h"
84#include "intel_fb.h"
85#include "intel_fbc.h"
86#include "intel_fbdev.h"
87#include "intel_fdi.h"
88#include "intel_fifo_underrun.h"
89#include "intel_frontbuffer.h"
90#include "intel_hdmi.h"
91#include "intel_hotplug.h"
92#include "intel_link_bw.h"
93#include "intel_lvds.h"
94#include "intel_lvds_regs.h"
95#include "intel_modeset_setup.h"
96#include "intel_modeset_verify.h"
97#include "intel_overlay.h"
98#include "intel_panel.h"
99#include "intel_pch_display.h"
100#include "intel_pch_refclk.h"
101#include "intel_pcode.h"
102#include "intel_pipe_crc.h"
103#include "intel_plane_initial.h"
104#include "intel_pmdemand.h"
105#include "intel_pps.h"
106#include "intel_psr.h"
107#include "intel_sdvo.h"
108#include "intel_snps_phy.h"
109#include "intel_tc.h"
110#include "intel_tv.h"
111#include "intel_vblank.h"
112#include "intel_vdsc.h"
113#include "intel_vdsc_regs.h"
114#include "intel_vga.h"
115#include "intel_vrr.h"
116#include "intel_wm.h"
117#include "skl_scaler.h"
118#include "skl_universal_plane.h"
119#include "skl_watermark.h"
120#include "vlv_dsi.h"
121#include "vlv_dsi_pll.h"
122#include "vlv_dsi_regs.h"
123#include "vlv_sideband.h"
124
125static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
126static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
127static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
128static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
129
130/* returns HPLL frequency in kHz */
131int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
132{
133 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
134
135 /* Obtain SKU information */
136 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
137 CCK_FUSE_HPLL_FREQ_MASK;
138
139 return vco_freq[hpll_freq] * 1000;
140}
141
142int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
143 const char *name, u32 reg, int ref_freq)
144{
145 u32 val;
146 int divider;
147
148 val = vlv_cck_read(dev_priv, reg);
149 divider = val & CCK_FREQUENCY_VALUES;
150
151 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
152 (divider << CCK_FREQUENCY_STATUS_SHIFT),
153 "%s change in progress\n", name);
154
155 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
156}
157
158int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
159 const char *name, u32 reg)
160{
161 int hpll;
162
163 vlv_cck_get(dev_priv);
164
165 if (dev_priv->hpll_freq == 0)
166 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
167
168 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
169
170 vlv_cck_put(dev_priv);
171
172 return hpll;
173}
174
175void intel_update_czclk(struct drm_i915_private *dev_priv)
176{
177 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
178 return;
179
180 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
181 CCK_CZ_CLOCK_CONTROL);
182
183 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
184 dev_priv->czclk_freq);
185}
186
187static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
188{
189 return (crtc_state->active_planes &
190 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
191}
192
193/* WA Display #0827: Gen9:all */
194static void
195skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
196{
197 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
198 DUPS1_GATING_DIS | DUPS2_GATING_DIS,
199 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
200}
201
202/* Wa_2006604312:icl,ehl */
203static void
204icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
205 bool enable)
206{
207 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
208 DPFR_GATING_DIS,
209 enable ? DPFR_GATING_DIS : 0);
210}
211
212/* Wa_1604331009:icl,jsl,ehl */
213static void
214icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
215 bool enable)
216{
217 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
218 CURSOR_GATING_DIS,
219 enable ? CURSOR_GATING_DIS : 0);
220}
221
222static bool
223is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
224{
225 return crtc_state->master_transcoder != INVALID_TRANSCODER;
226}
227
228bool
229is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
230{
231 return crtc_state->sync_mode_slaves_mask != 0;
232}
233
234bool
235is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
236{
237 return is_trans_port_sync_master(crtc_state) ||
238 is_trans_port_sync_slave(crtc_state);
239}
240
241static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
242{
243 return ffs(crtc_state->bigjoiner_pipes) - 1;
244}
245
246u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
247{
248 if (crtc_state->bigjoiner_pipes)
249 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
250 else
251 return 0;
252}
253
254bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
255{
256 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
257
258 return crtc_state->bigjoiner_pipes &&
259 crtc->pipe != bigjoiner_master_pipe(crtc_state);
260}
261
262bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
263{
264 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
265
266 return crtc_state->bigjoiner_pipes &&
267 crtc->pipe == bigjoiner_master_pipe(crtc_state);
268}
269
270static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
271{
272 return hweight8(crtc_state->bigjoiner_pipes);
273}
274
275struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
276{
277 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
278
279 if (intel_crtc_is_bigjoiner_slave(crtc_state))
280 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
281 else
282 return to_intel_crtc(crtc_state->uapi.crtc);
283}
284
285static void
286intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
287{
288 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
289 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
290
291 if (DISPLAY_VER(dev_priv) >= 4) {
292 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
293
294 /* Wait for the Pipe State to go off */
295 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
296 TRANSCONF_STATE_ENABLE, 100))
297 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
298 } else {
299 intel_wait_for_pipe_scanline_stopped(crtc);
300 }
301}
302
303void assert_transcoder(struct drm_i915_private *dev_priv,
304 enum transcoder cpu_transcoder, bool state)
305{
306 bool cur_state;
307 enum intel_display_power_domain power_domain;
308 intel_wakeref_t wakeref;
309
310 /* we keep both pipes enabled on 830 */
311 if (IS_I830(dev_priv))
312 state = true;
313
314 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
315 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
316 if (wakeref) {
317 u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
318 cur_state = !!(val & TRANSCONF_ENABLE);
319
320 intel_display_power_put(dev_priv, power_domain, wakeref);
321 } else {
322 cur_state = false;
323 }
324
325 I915_STATE_WARN(dev_priv, cur_state != state,
326 "transcoder %s assertion failure (expected %s, current %s)\n",
327 transcoder_name(cpu_transcoder), str_on_off(state),
328 str_on_off(cur_state));
329}
330
331static void assert_plane(struct intel_plane *plane, bool state)
332{
333 struct drm_i915_private *i915 = to_i915(plane->base.dev);
334 enum pipe pipe;
335 bool cur_state;
336
337 cur_state = plane->get_hw_state(plane, &pipe);
338
339 I915_STATE_WARN(i915, cur_state != state,
340 "%s assertion failure (expected %s, current %s)\n",
341 plane->base.name, str_on_off(state),
342 str_on_off(cur_state));
343}
344
345#define assert_plane_enabled(p) assert_plane(p, true)
346#define assert_plane_disabled(p) assert_plane(p, false)
347
348static void assert_planes_disabled(struct intel_crtc *crtc)
349{
350 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
351 struct intel_plane *plane;
352
353 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
354 assert_plane_disabled(plane);
355}
356
357void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
358 struct intel_digital_port *dig_port,
359 unsigned int expected_mask)
360{
361 u32 port_mask;
362 i915_reg_t dpll_reg;
363
364 switch (dig_port->base.port) {
365 default:
366 MISSING_CASE(dig_port->base.port);
367 fallthrough;
368 case PORT_B:
369 port_mask = DPLL_PORTB_READY_MASK;
370 dpll_reg = DPLL(0);
371 break;
372 case PORT_C:
373 port_mask = DPLL_PORTC_READY_MASK;
374 dpll_reg = DPLL(0);
375 expected_mask <<= 4;
376 break;
377 case PORT_D:
378 port_mask = DPLL_PORTD_READY_MASK;
379 dpll_reg = DPIO_PHY_STATUS;
380 break;
381 }
382
383 if (intel_de_wait_for_register(dev_priv, dpll_reg,
384 port_mask, expected_mask, 1000))
385 drm_WARN(&dev_priv->drm, 1,
386 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
387 dig_port->base.base.base.id, dig_port->base.base.name,
388 intel_de_read(dev_priv, dpll_reg) & port_mask,
389 expected_mask);
390}
391
392void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
393{
394 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
396 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
397 enum pipe pipe = crtc->pipe;
398 u32 val;
399
400 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
401
402 assert_planes_disabled(crtc);
403
404 /*
405 * A pipe without a PLL won't actually be able to drive bits from
406 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
407 * need the check.
408 */
409 if (HAS_GMCH(dev_priv)) {
410 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
411 assert_dsi_pll_enabled(dev_priv);
412 else
413 assert_pll_enabled(dev_priv, pipe);
414 } else {
415 if (new_crtc_state->has_pch_encoder) {
416 /* if driving the PCH, we need FDI enabled */
417 assert_fdi_rx_pll_enabled(dev_priv,
418 intel_crtc_pch_transcoder(crtc));
419 assert_fdi_tx_pll_enabled(dev_priv,
420 (enum pipe) cpu_transcoder);
421 }
422 /* FIXME: assert CPU port conditions for SNB+ */
423 }
424
425 /* Wa_22012358565:adl-p */
426 if (DISPLAY_VER(dev_priv) == 13)
427 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
428 0, PIPE_ARB_USE_PROG_SLOTS);
429
430 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
431 if (val & TRANSCONF_ENABLE) {
432 /* we keep both pipes enabled on 830 */
433 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
434 return;
435 }
436
437 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
438 val | TRANSCONF_ENABLE);
439 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
440
441 /*
442 * Until the pipe starts PIPEDSL reads will return a stale value,
443 * which causes an apparent vblank timestamp jump when PIPEDSL
444 * resets to its proper value. That also messes up the frame count
445 * when it's derived from the timestamps. So let's wait for the
446 * pipe to start properly before we call drm_crtc_vblank_on()
447 */
448 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
449 intel_wait_for_pipe_scanline_moving(crtc);
450}
451
452void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
453{
454 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
455 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
456 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
457 enum pipe pipe = crtc->pipe;
458 u32 val;
459
460 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
461
462 /*
463 * Make sure planes won't keep trying to pump pixels to us,
464 * or we might hang the display.
465 */
466 assert_planes_disabled(crtc);
467
468 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
469 if ((val & TRANSCONF_ENABLE) == 0)
470 return;
471
472 /*
473 * Double wide has implications for planes
474 * so best keep it disabled when not needed.
475 */
476 if (old_crtc_state->double_wide)
477 val &= ~TRANSCONF_DOUBLE_WIDE;
478
479 /* Don't disable pipe or pipe PLLs if needed */
480 if (!IS_I830(dev_priv))
481 val &= ~TRANSCONF_ENABLE;
482
483 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
484
485 if (DISPLAY_VER(dev_priv) >= 12)
486 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
487 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
488
489 if ((val & TRANSCONF_ENABLE) == 0)
490 intel_wait_for_pipe_off(old_crtc_state);
491}
492
493unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
494{
495 unsigned int size = 0;
496 int i;
497
498 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
499 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
500
501 return size;
502}
503
504unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
505{
506 unsigned int size = 0;
507 int i;
508
509 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
510 unsigned int plane_size;
511
512 if (rem_info->plane[i].linear)
513 plane_size = rem_info->plane[i].size;
514 else
515 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
516
517 if (plane_size == 0)
518 continue;
519
520 if (rem_info->plane_alignment)
521 size = ALIGN(size, rem_info->plane_alignment);
522
523 size += plane_size;
524 }
525
526 return size;
527}
528
529bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
530{
531 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
532 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
533
534 return DISPLAY_VER(dev_priv) < 4 ||
535 (plane->fbc &&
536 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
537}
538
539/*
540 * Convert the x/y offsets into a linear offset.
541 * Only valid with 0/180 degree rotation, which is fine since linear
542 * offset is only used with linear buffers on pre-hsw and tiled buffers
543 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
544 */
545u32 intel_fb_xy_to_linear(int x, int y,
546 const struct intel_plane_state *state,
547 int color_plane)
548{
549 const struct drm_framebuffer *fb = state->hw.fb;
550 unsigned int cpp = fb->format->cpp[color_plane];
551 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
552
553 return y * pitch + x * cpp;
554}
555
556/*
557 * Add the x/y offsets derived from fb->offsets[] to the user
558 * specified plane src x/y offsets. The resulting x/y offsets
559 * specify the start of scanout from the beginning of the gtt mapping.
560 */
561void intel_add_fb_offsets(int *x, int *y,
562 const struct intel_plane_state *state,
563 int color_plane)
564
565{
566 *x += state->view.color_plane[color_plane].x;
567 *y += state->view.color_plane[color_plane].y;
568}
569
570u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
571 u32 pixel_format, u64 modifier)
572{
573 struct intel_crtc *crtc;
574 struct intel_plane *plane;
575
576 if (!HAS_DISPLAY(dev_priv))
577 return 0;
578
579 /*
580 * We assume the primary plane for pipe A has
581 * the highest stride limits of them all,
582 * if in case pipe A is disabled, use the first pipe from pipe_mask.
583 */
584 crtc = intel_first_crtc(dev_priv);
585 if (!crtc)
586 return 0;
587
588 plane = to_intel_plane(crtc->base.primary);
589
590 return plane->max_stride(plane, pixel_format, modifier,
591 DRM_MODE_ROTATE_0);
592}
593
594void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
595 struct intel_plane_state *plane_state,
596 bool visible)
597{
598 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
599
600 plane_state->uapi.visible = visible;
601
602 if (visible)
603 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
604 else
605 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
606}
607
608void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
609{
610 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
611 struct drm_plane *plane;
612
613 /*
614 * Active_planes aliases if multiple "primary" or cursor planes
615 * have been used on the same (or wrong) pipe. plane_mask uses
616 * unique ids, hence we can use that to reconstruct active_planes.
617 */
618 crtc_state->enabled_planes = 0;
619 crtc_state->active_planes = 0;
620
621 drm_for_each_plane_mask(plane, &dev_priv->drm,
622 crtc_state->uapi.plane_mask) {
623 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
624 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
625 }
626}
627
628void intel_plane_disable_noatomic(struct intel_crtc *crtc,
629 struct intel_plane *plane)
630{
631 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
632 struct intel_crtc_state *crtc_state =
633 to_intel_crtc_state(crtc->base.state);
634 struct intel_plane_state *plane_state =
635 to_intel_plane_state(plane->base.state);
636
637 drm_dbg_kms(&dev_priv->drm,
638 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
639 plane->base.base.id, plane->base.name,
640 crtc->base.base.id, crtc->base.name);
641
642 intel_set_plane_visible(crtc_state, plane_state, false);
643 intel_plane_fixup_bitmasks(crtc_state);
644 crtc_state->data_rate[plane->id] = 0;
645 crtc_state->data_rate_y[plane->id] = 0;
646 crtc_state->rel_data_rate[plane->id] = 0;
647 crtc_state->rel_data_rate_y[plane->id] = 0;
648 crtc_state->min_cdclk[plane->id] = 0;
649
650 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
651 hsw_ips_disable(crtc_state)) {
652 crtc_state->ips_enabled = false;
653 intel_crtc_wait_for_next_vblank(crtc);
654 }
655
656 /*
657 * Vblank time updates from the shadow to live plane control register
658 * are blocked if the memory self-refresh mode is active at that
659 * moment. So to make sure the plane gets truly disabled, disable
660 * first the self-refresh mode. The self-refresh enable bit in turn
661 * will be checked/applied by the HW only at the next frame start
662 * event which is after the vblank start event, so we need to have a
663 * wait-for-vblank between disabling the plane and the pipe.
664 */
665 if (HAS_GMCH(dev_priv) &&
666 intel_set_memory_cxsr(dev_priv, false))
667 intel_crtc_wait_for_next_vblank(crtc);
668
669 /*
670 * Gen2 reports pipe underruns whenever all planes are disabled.
671 * So disable underrun reporting before all the planes get disabled.
672 */
673 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
674 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
675
676 intel_plane_disable_arm(plane, crtc_state);
677 intel_crtc_wait_for_next_vblank(crtc);
678}
679
680unsigned int
681intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
682{
683 int x = 0, y = 0;
684
685 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
686 plane_state->view.color_plane[0].offset, 0);
687
688 return y;
689}
690
691static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
692{
693 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
694 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
695 enum pipe pipe = crtc->pipe;
696 u32 tmp;
697
698 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
699
700 /*
701 * Display WA #1153: icl
702 * enable hardware to bypass the alpha math
703 * and rounding for per-pixel values 00 and 0xff
704 */
705 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
706 /*
707 * Display WA # 1605353570: icl
708 * Set the pixel rounding bit to 1 for allowing
709 * passthrough of Frame buffer pixels unmodified
710 * across pipe
711 */
712 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
713
714 /*
715 * Underrun recovery must always be disabled on display 13+.
716 * DG2 chicken bit meaning is inverted compared to other platforms.
717 */
718 if (IS_DG2(dev_priv))
719 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
720 else if (DISPLAY_VER(dev_priv) >= 13)
721 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
722
723 /* Wa_14010547955:dg2 */
724 if (IS_DG2(dev_priv))
725 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
726
727 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
728}
729
730bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
731{
732 struct drm_crtc *crtc;
733 bool cleanup_done;
734
735 drm_for_each_crtc(crtc, &dev_priv->drm) {
736 struct drm_crtc_commit *commit;
737 spin_lock(&crtc->commit_lock);
738 commit = list_first_entry_or_null(&crtc->commit_list,
739 struct drm_crtc_commit, commit_entry);
740 cleanup_done = commit ?
741 try_wait_for_completion(&commit->cleanup_done) : true;
742 spin_unlock(&crtc->commit_lock);
743
744 if (cleanup_done)
745 continue;
746
747 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
748
749 return true;
750 }
751
752 return false;
753}
754
755/*
756 * Finds the encoder associated with the given CRTC. This can only be
757 * used when we know that the CRTC isn't feeding multiple encoders!
758 */
759struct intel_encoder *
760intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
761 const struct intel_crtc_state *crtc_state)
762{
763 const struct drm_connector_state *connector_state;
764 const struct drm_connector *connector;
765 struct intel_encoder *encoder = NULL;
766 struct intel_crtc *master_crtc;
767 int num_encoders = 0;
768 int i;
769
770 master_crtc = intel_master_crtc(crtc_state);
771
772 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
773 if (connector_state->crtc != &master_crtc->base)
774 continue;
775
776 encoder = to_intel_encoder(connector_state->best_encoder);
777 num_encoders++;
778 }
779
780 drm_WARN(state->base.dev, num_encoders != 1,
781 "%d encoders for pipe %c\n",
782 num_encoders, pipe_name(master_crtc->pipe));
783
784 return encoder;
785}
786
787static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
788{
789 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
790 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
791 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
792 enum pipe pipe = crtc->pipe;
793 int width = drm_rect_width(dst);
794 int height = drm_rect_height(dst);
795 int x = dst->x1;
796 int y = dst->y1;
797
798 if (!crtc_state->pch_pfit.enabled)
799 return;
800
801 /* Force use of hard-coded filter coefficients
802 * as some pre-programmed values are broken,
803 * e.g. x201.
804 */
805 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
806 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
807 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
808 else
809 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
810 PF_FILTER_MED_3x3);
811 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
812 PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
813 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
814 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
815}
816
817static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
818{
819 if (crtc->overlay)
820 (void) intel_overlay_switch_off(crtc->overlay);
821
822 /* Let userspace switch the overlay on again. In most cases userspace
823 * has to recompute where to put it anyway.
824 */
825}
826
827static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
828{
829 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
830
831 if (!crtc_state->nv12_planes)
832 return false;
833
834 /* WA Display #0827: Gen9:all */
835 if (DISPLAY_VER(dev_priv) == 9)
836 return true;
837
838 return false;
839}
840
841static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
842{
843 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
844
845 /* Wa_2006604312:icl,ehl */
846 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
847 return true;
848
849 return false;
850}
851
852static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
853{
854 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
855
856 /* Wa_1604331009:icl,jsl,ehl */
857 if (is_hdr_mode(crtc_state) &&
858 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
859 DISPLAY_VER(dev_priv) == 11)
860 return true;
861
862 return false;
863}
864
865static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
866 enum pipe pipe, bool enable)
867{
868 if (DISPLAY_VER(i915) == 9) {
869 /*
870 * "Plane N strech max must be programmed to 11b (x1)
871 * when Async flips are enabled on that plane."
872 */
873 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
874 SKL_PLANE1_STRETCH_MAX_MASK,
875 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
876 } else {
877 /* Also needed on HSW/BDW albeit undocumented */
878 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
879 HSW_PRI_STRETCH_MAX_MASK,
880 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
881 }
882}
883
884static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
885{
886 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
887
888 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
889 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
890}
891
892static void intel_encoders_audio_enable(struct intel_atomic_state *state,
893 struct intel_crtc *crtc)
894{
895 const struct intel_crtc_state *crtc_state =
896 intel_atomic_get_new_crtc_state(state, crtc);
897 const struct drm_connector_state *conn_state;
898 struct drm_connector *conn;
899 int i;
900
901 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
902 struct intel_encoder *encoder =
903 to_intel_encoder(conn_state->best_encoder);
904
905 if (conn_state->crtc != &crtc->base)
906 continue;
907
908 if (encoder->audio_enable)
909 encoder->audio_enable(encoder, crtc_state, conn_state);
910 }
911}
912
913static void intel_encoders_audio_disable(struct intel_atomic_state *state,
914 struct intel_crtc *crtc)
915{
916 const struct intel_crtc_state *old_crtc_state =
917 intel_atomic_get_old_crtc_state(state, crtc);
918 const struct drm_connector_state *old_conn_state;
919 struct drm_connector *conn;
920 int i;
921
922 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
923 struct intel_encoder *encoder =
924 to_intel_encoder(old_conn_state->best_encoder);
925
926 if (old_conn_state->crtc != &crtc->base)
927 continue;
928
929 if (encoder->audio_disable)
930 encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
931 }
932}
933
934#define is_enabling(feature, old_crtc_state, new_crtc_state) \
935 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
936 (new_crtc_state)->feature)
937#define is_disabling(feature, old_crtc_state, new_crtc_state) \
938 ((old_crtc_state)->feature && \
939 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))
940
941static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
942 const struct intel_crtc_state *new_crtc_state)
943{
944 if (!new_crtc_state->hw.active)
945 return false;
946
947 return is_enabling(active_planes, old_crtc_state, new_crtc_state);
948}
949
950static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
951 const struct intel_crtc_state *new_crtc_state)
952{
953 if (!old_crtc_state->hw.active)
954 return false;
955
956 return is_disabling(active_planes, old_crtc_state, new_crtc_state);
957}
958
959static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
960 const struct intel_crtc_state *new_crtc_state)
961{
962 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline ||
963 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
964 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
965 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
966 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
967}
968
969static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
970 const struct intel_crtc_state *new_crtc_state)
971{
972 if (!new_crtc_state->hw.active)
973 return false;
974
975 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
976 (new_crtc_state->vrr.enable &&
977 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
978 vrr_params_changed(old_crtc_state, new_crtc_state)));
979}
980
981static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
982 const struct intel_crtc_state *new_crtc_state)
983{
984 if (!old_crtc_state->hw.active)
985 return false;
986
987 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
988 (old_crtc_state->vrr.enable &&
989 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
990 vrr_params_changed(old_crtc_state, new_crtc_state)));
991}
992
993static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
994 const struct intel_crtc_state *new_crtc_state)
995{
996 if (!new_crtc_state->hw.active)
997 return false;
998
999 return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
1000 (new_crtc_state->has_audio &&
1001 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
1002}
1003
1004static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
1005 const struct intel_crtc_state *new_crtc_state)
1006{
1007 if (!old_crtc_state->hw.active)
1008 return false;
1009
1010 return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
1011 (old_crtc_state->has_audio &&
1012 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
1013}
1014
1015#undef is_disabling
1016#undef is_enabling
1017
1018static void intel_post_plane_update(struct intel_atomic_state *state,
1019 struct intel_crtc *crtc)
1020{
1021 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1022 const struct intel_crtc_state *old_crtc_state =
1023 intel_atomic_get_old_crtc_state(state, crtc);
1024 const struct intel_crtc_state *new_crtc_state =
1025 intel_atomic_get_new_crtc_state(state, crtc);
1026 enum pipe pipe = crtc->pipe;
1027
1028 intel_psr_post_plane_update(state, crtc);
1029
1030 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1031
1032 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1033 intel_update_watermarks(dev_priv);
1034
1035 intel_fbc_post_update(state, crtc);
1036
1037 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1038 !needs_async_flip_vtd_wa(new_crtc_state))
1039 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1040
1041 if (needs_nv12_wa(old_crtc_state) &&
1042 !needs_nv12_wa(new_crtc_state))
1043 skl_wa_827(dev_priv, pipe, false);
1044
1045 if (needs_scalerclk_wa(old_crtc_state) &&
1046 !needs_scalerclk_wa(new_crtc_state))
1047 icl_wa_scalerclkgating(dev_priv, pipe, false);
1048
1049 if (needs_cursorclk_wa(old_crtc_state) &&
1050 !needs_cursorclk_wa(new_crtc_state))
1051 icl_wa_cursorclkgating(dev_priv, pipe, false);
1052
1053 if (intel_crtc_needs_color_update(new_crtc_state))
1054 intel_color_post_update(new_crtc_state);
1055
1056 if (audio_enabling(old_crtc_state, new_crtc_state))
1057 intel_encoders_audio_enable(state, crtc);
1058}
1059
1060static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1061 struct intel_crtc *crtc)
1062{
1063 const struct intel_crtc_state *crtc_state =
1064 intel_atomic_get_new_crtc_state(state, crtc);
1065 u8 update_planes = crtc_state->update_planes;
1066 const struct intel_plane_state __maybe_unused *plane_state;
1067 struct intel_plane *plane;
1068 int i;
1069
1070 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1071 if (plane->pipe == crtc->pipe &&
1072 update_planes & BIT(plane->id))
1073 plane->enable_flip_done(plane);
1074 }
1075}
1076
1077static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1078 struct intel_crtc *crtc)
1079{
1080 const struct intel_crtc_state *crtc_state =
1081 intel_atomic_get_new_crtc_state(state, crtc);
1082 u8 update_planes = crtc_state->update_planes;
1083 const struct intel_plane_state __maybe_unused *plane_state;
1084 struct intel_plane *plane;
1085 int i;
1086
1087 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1088 if (plane->pipe == crtc->pipe &&
1089 update_planes & BIT(plane->id))
1090 plane->disable_flip_done(plane);
1091 }
1092}
1093
1094static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1095 struct intel_crtc *crtc)
1096{
1097 const struct intel_crtc_state *old_crtc_state =
1098 intel_atomic_get_old_crtc_state(state, crtc);
1099 const struct intel_crtc_state *new_crtc_state =
1100 intel_atomic_get_new_crtc_state(state, crtc);
1101 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
1102 ~new_crtc_state->async_flip_planes;
1103 const struct intel_plane_state *old_plane_state;
1104 struct intel_plane *plane;
1105 bool need_vbl_wait = false;
1106 int i;
1107
1108 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1109 if (plane->need_async_flip_disable_wa &&
1110 plane->pipe == crtc->pipe &&
1111 disable_async_flip_planes & BIT(plane->id)) {
1112 /*
1113 * Apart from the async flip bit we want to
1114 * preserve the old state for the plane.
1115 */
1116 plane->async_flip(plane, old_crtc_state,
1117 old_plane_state, false);
1118 need_vbl_wait = true;
1119 }
1120 }
1121
1122 if (need_vbl_wait)
1123 intel_crtc_wait_for_next_vblank(crtc);
1124}
1125
1126static void intel_pre_plane_update(struct intel_atomic_state *state,
1127 struct intel_crtc *crtc)
1128{
1129 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1130 const struct intel_crtc_state *old_crtc_state =
1131 intel_atomic_get_old_crtc_state(state, crtc);
1132 const struct intel_crtc_state *new_crtc_state =
1133 intel_atomic_get_new_crtc_state(state, crtc);
1134 enum pipe pipe = crtc->pipe;
1135
1136 if (vrr_disabling(old_crtc_state, new_crtc_state)) {
1137 intel_vrr_disable(old_crtc_state);
1138 intel_crtc_update_active_timings(old_crtc_state, false);
1139 }
1140
1141 if (audio_disabling(old_crtc_state, new_crtc_state))
1142 intel_encoders_audio_disable(state, crtc);
1143
1144 intel_drrs_deactivate(old_crtc_state);
1145
1146 intel_psr_pre_plane_update(state, crtc);
1147
1148 if (hsw_ips_pre_update(state, crtc))
1149 intel_crtc_wait_for_next_vblank(crtc);
1150
1151 if (intel_fbc_pre_update(state, crtc))
1152 intel_crtc_wait_for_next_vblank(crtc);
1153
1154 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1155 needs_async_flip_vtd_wa(new_crtc_state))
1156 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1157
1158 /* Display WA 827 */
1159 if (!needs_nv12_wa(old_crtc_state) &&
1160 needs_nv12_wa(new_crtc_state))
1161 skl_wa_827(dev_priv, pipe, true);
1162
1163 /* Wa_2006604312:icl,ehl */
1164 if (!needs_scalerclk_wa(old_crtc_state) &&
1165 needs_scalerclk_wa(new_crtc_state))
1166 icl_wa_scalerclkgating(dev_priv, pipe, true);
1167
1168 /* Wa_1604331009:icl,jsl,ehl */
1169 if (!needs_cursorclk_wa(old_crtc_state) &&
1170 needs_cursorclk_wa(new_crtc_state))
1171 icl_wa_cursorclkgating(dev_priv, pipe, true);
1172
1173 /*
1174 * Vblank time updates from the shadow to live plane control register
1175 * are blocked if the memory self-refresh mode is active at that
1176 * moment. So to make sure the plane gets truly disabled, disable
1177 * first the self-refresh mode. The self-refresh enable bit in turn
1178 * will be checked/applied by the HW only at the next frame start
1179 * event which is after the vblank start event, so we need to have a
1180 * wait-for-vblank between disabling the plane and the pipe.
1181 */
1182 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1183 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1184 intel_crtc_wait_for_next_vblank(crtc);
1185
1186 /*
1187 * IVB workaround: must disable low power watermarks for at least
1188 * one frame before enabling scaling. LP watermarks can be re-enabled
1189 * when scaling is disabled.
1190 *
1191 * WaCxSRDisabledForSpriteScaling:ivb
1192 */
1193 if (old_crtc_state->hw.active &&
1194 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1195 intel_crtc_wait_for_next_vblank(crtc);
1196
1197 /*
1198 * If we're doing a modeset we don't need to do any
1199 * pre-vblank watermark programming here.
1200 */
1201 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1202 /*
1203 * For platforms that support atomic watermarks, program the
1204 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1205 * will be the intermediate values that are safe for both pre- and
1206 * post- vblank; when vblank happens, the 'active' values will be set
1207 * to the final 'target' values and we'll do this again to get the
1208 * optimal watermarks. For gen9+ platforms, the values we program here
1209 * will be the final target values which will get automatically latched
1210 * at vblank time; no further programming will be necessary.
1211 *
1212 * If a platform hasn't been transitioned to atomic watermarks yet,
1213 * we'll continue to update watermarks the old way, if flags tell
1214 * us to.
1215 */
1216 if (!intel_initial_watermarks(state, crtc))
1217 if (new_crtc_state->update_wm_pre)
1218 intel_update_watermarks(dev_priv);
1219 }
1220
1221 /*
1222 * Gen2 reports pipe underruns whenever all planes are disabled.
1223 * So disable underrun reporting before all the planes get disabled.
1224 *
1225 * We do this after .initial_watermarks() so that we have a
1226 * chance of catching underruns with the intermediate watermarks
1227 * vs. the old plane configuration.
1228 */
1229 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1230 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1231
1232 /*
1233 * WA for platforms where async address update enable bit
1234 * is double buffered and only latched at start of vblank.
1235 */
1236 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
1237 intel_crtc_async_flip_disable_wa(state, crtc);
1238}
1239
1240static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1241 struct intel_crtc *crtc)
1242{
1243 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1244 const struct intel_crtc_state *new_crtc_state =
1245 intel_atomic_get_new_crtc_state(state, crtc);
1246 unsigned int update_mask = new_crtc_state->update_planes;
1247 const struct intel_plane_state *old_plane_state;
1248 struct intel_plane *plane;
1249 unsigned fb_bits = 0;
1250 int i;
1251
1252 intel_crtc_dpms_overlay_disable(crtc);
1253
1254 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1255 if (crtc->pipe != plane->pipe ||
1256 !(update_mask & BIT(plane->id)))
1257 continue;
1258
1259 intel_plane_disable_arm(plane, new_crtc_state);
1260
1261 if (old_plane_state->uapi.visible)
1262 fb_bits |= plane->frontbuffer_bit;
1263 }
1264
1265 intel_frontbuffer_flip(dev_priv, fb_bits);
1266}
1267
1268static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1269{
1270 struct drm_i915_private *i915 = to_i915(state->base.dev);
1271 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1272 struct intel_crtc *crtc;
1273 int i;
1274
1275 /*
1276 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1277 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1278 */
1279 if (i915->display.dpll.mgr) {
1280 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1281 if (intel_crtc_needs_modeset(new_crtc_state))
1282 continue;
1283
1284 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1285 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1286 }
1287 }
1288}
1289
1290static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1291 struct intel_crtc *crtc)
1292{
1293 const struct intel_crtc_state *crtc_state =
1294 intel_atomic_get_new_crtc_state(state, crtc);
1295 const struct drm_connector_state *conn_state;
1296 struct drm_connector *conn;
1297 int i;
1298
1299 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1300 struct intel_encoder *encoder =
1301 to_intel_encoder(conn_state->best_encoder);
1302
1303 if (conn_state->crtc != &crtc->base)
1304 continue;
1305
1306 if (encoder->pre_pll_enable)
1307 encoder->pre_pll_enable(state, encoder,
1308 crtc_state, conn_state);
1309 }
1310}
1311
1312static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1313 struct intel_crtc *crtc)
1314{
1315 const struct intel_crtc_state *crtc_state =
1316 intel_atomic_get_new_crtc_state(state, crtc);
1317 const struct drm_connector_state *conn_state;
1318 struct drm_connector *conn;
1319 int i;
1320
1321 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1322 struct intel_encoder *encoder =
1323 to_intel_encoder(conn_state->best_encoder);
1324
1325 if (conn_state->crtc != &crtc->base)
1326 continue;
1327
1328 if (encoder->pre_enable)
1329 encoder->pre_enable(state, encoder,
1330 crtc_state, conn_state);
1331 }
1332}
1333
1334static void intel_encoders_enable(struct intel_atomic_state *state,
1335 struct intel_crtc *crtc)
1336{
1337 const struct intel_crtc_state *crtc_state =
1338 intel_atomic_get_new_crtc_state(state, crtc);
1339 const struct drm_connector_state *conn_state;
1340 struct drm_connector *conn;
1341 int i;
1342
1343 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1344 struct intel_encoder *encoder =
1345 to_intel_encoder(conn_state->best_encoder);
1346
1347 if (conn_state->crtc != &crtc->base)
1348 continue;
1349
1350 if (encoder->enable)
1351 encoder->enable(state, encoder,
1352 crtc_state, conn_state);
1353 intel_opregion_notify_encoder(encoder, true);
1354 }
1355}
1356
1357static void intel_encoders_disable(struct intel_atomic_state *state,
1358 struct intel_crtc *crtc)
1359{
1360 const struct intel_crtc_state *old_crtc_state =
1361 intel_atomic_get_old_crtc_state(state, crtc);
1362 const struct drm_connector_state *old_conn_state;
1363 struct drm_connector *conn;
1364 int i;
1365
1366 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1367 struct intel_encoder *encoder =
1368 to_intel_encoder(old_conn_state->best_encoder);
1369
1370 if (old_conn_state->crtc != &crtc->base)
1371 continue;
1372
1373 intel_opregion_notify_encoder(encoder, false);
1374 if (encoder->disable)
1375 encoder->disable(state, encoder,
1376 old_crtc_state, old_conn_state);
1377 }
1378}
1379
1380static void intel_encoders_post_disable(struct intel_atomic_state *state,
1381 struct intel_crtc *crtc)
1382{
1383 const struct intel_crtc_state *old_crtc_state =
1384 intel_atomic_get_old_crtc_state(state, crtc);
1385 const struct drm_connector_state *old_conn_state;
1386 struct drm_connector *conn;
1387 int i;
1388
1389 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1390 struct intel_encoder *encoder =
1391 to_intel_encoder(old_conn_state->best_encoder);
1392
1393 if (old_conn_state->crtc != &crtc->base)
1394 continue;
1395
1396 if (encoder->post_disable)
1397 encoder->post_disable(state, encoder,
1398 old_crtc_state, old_conn_state);
1399 }
1400}
1401
1402static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1403 struct intel_crtc *crtc)
1404{
1405 const struct intel_crtc_state *old_crtc_state =
1406 intel_atomic_get_old_crtc_state(state, crtc);
1407 const struct drm_connector_state *old_conn_state;
1408 struct drm_connector *conn;
1409 int i;
1410
1411 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1412 struct intel_encoder *encoder =
1413 to_intel_encoder(old_conn_state->best_encoder);
1414
1415 if (old_conn_state->crtc != &crtc->base)
1416 continue;
1417
1418 if (encoder->post_pll_disable)
1419 encoder->post_pll_disable(state, encoder,
1420 old_crtc_state, old_conn_state);
1421 }
1422}
1423
1424static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1425 struct intel_crtc *crtc)
1426{
1427 const struct intel_crtc_state *crtc_state =
1428 intel_atomic_get_new_crtc_state(state, crtc);
1429 const struct drm_connector_state *conn_state;
1430 struct drm_connector *conn;
1431 int i;
1432
1433 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1434 struct intel_encoder *encoder =
1435 to_intel_encoder(conn_state->best_encoder);
1436
1437 if (conn_state->crtc != &crtc->base)
1438 continue;
1439
1440 if (encoder->update_pipe)
1441 encoder->update_pipe(state, encoder,
1442 crtc_state, conn_state);
1443 }
1444}
1445
1446static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1447{
1448 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1449 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1450
1451 plane->disable_arm(plane, crtc_state);
1452}
1453
1454static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1455{
1456 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1457 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1458
1459 if (crtc_state->has_pch_encoder) {
1460 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1461 &crtc_state->fdi_m_n);
1462 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1463 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1464 &crtc_state->dp_m_n);
1465 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1466 &crtc_state->dp_m2_n2);
1467 }
1468
1469 intel_set_transcoder_timings(crtc_state);
1470
1471 ilk_set_pipeconf(crtc_state);
1472}
1473
1474static void ilk_crtc_enable(struct intel_atomic_state *state,
1475 struct intel_crtc *crtc)
1476{
1477 const struct intel_crtc_state *new_crtc_state =
1478 intel_atomic_get_new_crtc_state(state, crtc);
1479 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1480 enum pipe pipe = crtc->pipe;
1481
1482 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1483 return;
1484
1485 /*
1486 * Sometimes spurious CPU pipe underruns happen during FDI
1487 * training, at least with VGA+HDMI cloning. Suppress them.
1488 *
1489 * On ILK we get an occasional spurious CPU pipe underruns
1490 * between eDP port A enable and vdd enable. Also PCH port
1491 * enable seems to result in the occasional CPU pipe underrun.
1492 *
1493 * Spurious PCH underruns also occur during PCH enabling.
1494 */
1495 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1496 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1497
1498 ilk_configure_cpu_transcoder(new_crtc_state);
1499
1500 intel_set_pipe_src_size(new_crtc_state);
1501
1502 crtc->active = true;
1503
1504 intel_encoders_pre_enable(state, crtc);
1505
1506 if (new_crtc_state->has_pch_encoder) {
1507 ilk_pch_pre_enable(state, crtc);
1508 } else {
1509 assert_fdi_tx_disabled(dev_priv, pipe);
1510 assert_fdi_rx_disabled(dev_priv, pipe);
1511 }
1512
1513 ilk_pfit_enable(new_crtc_state);
1514
1515 /*
1516 * On ILK+ LUT must be loaded before the pipe is running but with
1517 * clocks enabled
1518 */
1519 intel_color_load_luts(new_crtc_state);
1520 intel_color_commit_noarm(new_crtc_state);
1521 intel_color_commit_arm(new_crtc_state);
1522 /* update DSPCNTR to configure gamma for pipe bottom color */
1523 intel_disable_primary_plane(new_crtc_state);
1524
1525 intel_initial_watermarks(state, crtc);
1526 intel_enable_transcoder(new_crtc_state);
1527
1528 if (new_crtc_state->has_pch_encoder)
1529 ilk_pch_enable(state, crtc);
1530
1531 intel_crtc_vblank_on(new_crtc_state);
1532
1533 intel_encoders_enable(state, crtc);
1534
1535 if (HAS_PCH_CPT(dev_priv))
1536 intel_wait_for_pipe_scanline_moving(crtc);
1537
1538 /*
1539 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1540 * And a second vblank wait is needed at least on ILK with
1541 * some interlaced HDMI modes. Let's do the double wait always
1542 * in case there are more corner cases we don't know about.
1543 */
1544 if (new_crtc_state->has_pch_encoder) {
1545 intel_crtc_wait_for_next_vblank(crtc);
1546 intel_crtc_wait_for_next_vblank(crtc);
1547 }
1548 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1549 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1550}
1551
1552static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1553 enum pipe pipe, bool apply)
1554{
1555 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1556 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1557
1558 if (apply)
1559 val |= mask;
1560 else
1561 val &= ~mask;
1562
1563 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1564}
1565
1566static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1567{
1568 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1569 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1570
1571 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1572 HSW_LINETIME(crtc_state->linetime) |
1573 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1574}
1575
1576static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1577{
1578 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1579 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1580
1581 intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
1582 HSW_FRAME_START_DELAY_MASK,
1583 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
1584}
1585
1586static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1587 const struct intel_crtc_state *crtc_state)
1588{
1589 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1590
1591 /*
1592 * Enable sequence steps 1-7 on bigjoiner master
1593 */
1594 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1595 intel_encoders_pre_pll_enable(state, master_crtc);
1596
1597 if (crtc_state->shared_dpll)
1598 intel_enable_shared_dpll(crtc_state);
1599
1600 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1601 intel_encoders_pre_enable(state, master_crtc);
1602}
1603
1604static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1605{
1606 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1607 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1608 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1609
1610 if (crtc_state->has_pch_encoder) {
1611 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1612 &crtc_state->fdi_m_n);
1613 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1614 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1615 &crtc_state->dp_m_n);
1616 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1617 &crtc_state->dp_m2_n2);
1618 }
1619
1620 intel_set_transcoder_timings(crtc_state);
1621 if (HAS_VRR(dev_priv))
1622 intel_vrr_set_transcoder_timings(crtc_state);
1623
1624 if (cpu_transcoder != TRANSCODER_EDP)
1625 intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
1626 crtc_state->pixel_multiplier - 1);
1627
1628 hsw_set_frame_start_delay(crtc_state);
1629
1630 hsw_set_transconf(crtc_state);
1631}
1632
1633static void hsw_crtc_enable(struct intel_atomic_state *state,
1634 struct intel_crtc *crtc)
1635{
1636 const struct intel_crtc_state *new_crtc_state =
1637 intel_atomic_get_new_crtc_state(state, crtc);
1638 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1639 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1640 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1641 bool psl_clkgate_wa;
1642
1643 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1644 return;
1645
1646 intel_dmc_enable_pipe(dev_priv, crtc->pipe);
1647
1648 if (!new_crtc_state->bigjoiner_pipes) {
1649 intel_encoders_pre_pll_enable(state, crtc);
1650
1651 if (new_crtc_state->shared_dpll)
1652 intel_enable_shared_dpll(new_crtc_state);
1653
1654 intel_encoders_pre_enable(state, crtc);
1655 } else {
1656 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1657 }
1658
1659 intel_dsc_enable(new_crtc_state);
1660
1661 if (DISPLAY_VER(dev_priv) >= 13)
1662 intel_uncompressed_joiner_enable(new_crtc_state);
1663
1664 intel_set_pipe_src_size(new_crtc_state);
1665 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1666 bdw_set_pipe_misc(new_crtc_state);
1667
1668 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1669 !transcoder_is_dsi(cpu_transcoder))
1670 hsw_configure_cpu_transcoder(new_crtc_state);
1671
1672 crtc->active = true;
1673
1674 /* Display WA #1180: WaDisableScalarClockGating: glk */
1675 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1676 new_crtc_state->pch_pfit.enabled;
1677 if (psl_clkgate_wa)
1678 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1679
1680 if (DISPLAY_VER(dev_priv) >= 9)
1681 skl_pfit_enable(new_crtc_state);
1682 else
1683 ilk_pfit_enable(new_crtc_state);
1684
1685 /*
1686 * On ILK+ LUT must be loaded before the pipe is running but with
1687 * clocks enabled
1688 */
1689 intel_color_load_luts(new_crtc_state);
1690 intel_color_commit_noarm(new_crtc_state);
1691 intel_color_commit_arm(new_crtc_state);
1692 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1693 if (DISPLAY_VER(dev_priv) < 9)
1694 intel_disable_primary_plane(new_crtc_state);
1695
1696 hsw_set_linetime_wm(new_crtc_state);
1697
1698 if (DISPLAY_VER(dev_priv) >= 11)
1699 icl_set_pipe_chicken(new_crtc_state);
1700
1701 intel_initial_watermarks(state, crtc);
1702
1703 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1704 intel_crtc_vblank_on(new_crtc_state);
1705
1706 intel_encoders_enable(state, crtc);
1707
1708 if (psl_clkgate_wa) {
1709 intel_crtc_wait_for_next_vblank(crtc);
1710 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1711 }
1712
1713 /* If we change the relative order between pipe/planes enabling, we need
1714 * to change the workaround. */
1715 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1716 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1717 struct intel_crtc *wa_crtc;
1718
1719 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1720
1721 intel_crtc_wait_for_next_vblank(wa_crtc);
1722 intel_crtc_wait_for_next_vblank(wa_crtc);
1723 }
1724}
1725
1726void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1727{
1728 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1729 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1730 enum pipe pipe = crtc->pipe;
1731
1732 /* To avoid upsetting the power well on haswell only disable the pfit if
1733 * it's in use. The hw state code will make sure we get this right. */
1734 if (!old_crtc_state->pch_pfit.enabled)
1735 return;
1736
1737 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
1738 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
1739 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
1740}
1741
1742static void ilk_crtc_disable(struct intel_atomic_state *state,
1743 struct intel_crtc *crtc)
1744{
1745 const struct intel_crtc_state *old_crtc_state =
1746 intel_atomic_get_old_crtc_state(state, crtc);
1747 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1748 enum pipe pipe = crtc->pipe;
1749
1750 /*
1751 * Sometimes spurious CPU pipe underruns happen when the
1752 * pipe is already disabled, but FDI RX/TX is still enabled.
1753 * Happens at least with VGA+HDMI cloning. Suppress them.
1754 */
1755 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1756 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1757
1758 intel_encoders_disable(state, crtc);
1759
1760 intel_crtc_vblank_off(old_crtc_state);
1761
1762 intel_disable_transcoder(old_crtc_state);
1763
1764 ilk_pfit_disable(old_crtc_state);
1765
1766 if (old_crtc_state->has_pch_encoder)
1767 ilk_pch_disable(state, crtc);
1768
1769 intel_encoders_post_disable(state, crtc);
1770
1771 if (old_crtc_state->has_pch_encoder)
1772 ilk_pch_post_disable(state, crtc);
1773
1774 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1775 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1776
1777 intel_disable_shared_dpll(old_crtc_state);
1778}
1779
1780static void hsw_crtc_disable(struct intel_atomic_state *state,
1781 struct intel_crtc *crtc)
1782{
1783 const struct intel_crtc_state *old_crtc_state =
1784 intel_atomic_get_old_crtc_state(state, crtc);
1785 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1786
1787 /*
1788 * FIXME collapse everything to one hook.
1789 * Need care with mst->ddi interactions.
1790 */
1791 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
1792 intel_encoders_disable(state, crtc);
1793 intel_encoders_post_disable(state, crtc);
1794 }
1795
1796 intel_disable_shared_dpll(old_crtc_state);
1797
1798 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
1799 struct intel_crtc *slave_crtc;
1800
1801 intel_encoders_post_pll_disable(state, crtc);
1802
1803 intel_dmc_disable_pipe(i915, crtc->pipe);
1804
1805 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
1806 intel_crtc_bigjoiner_slave_pipes(old_crtc_state))
1807 intel_dmc_disable_pipe(i915, slave_crtc->pipe);
1808 }
1809}
1810
1811static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
1812{
1813 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1814 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1815
1816 if (!crtc_state->gmch_pfit.control)
1817 return;
1818
1819 /*
1820 * The panel fitter should only be adjusted whilst the pipe is disabled,
1821 * according to register description and PRM.
1822 */
1823 drm_WARN_ON(&dev_priv->drm,
1824 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
1825 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1826
1827 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
1828 crtc_state->gmch_pfit.pgm_ratios);
1829 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
1830
1831 /* Border color in case we don't scale up to the full screen. Black by
1832 * default, change to something else for debugging. */
1833 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
1834}
1835
1836bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
1837{
1838 if (phy == PHY_NONE)
1839 return false;
1840 else if (IS_ALDERLAKE_S(dev_priv))
1841 return phy <= PHY_E;
1842 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
1843 return phy <= PHY_D;
1844 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
1845 return phy <= PHY_C;
1846 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
1847 return phy <= PHY_B;
1848 else
1849 /*
1850 * DG2 outputs labelled as "combo PHY" in the bspec use
1851 * SNPS PHYs with completely different programming,
1852 * hence we always return false here.
1853 */
1854 return false;
1855}
1856
1857bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
1858{
1859 /*
1860 * DG2's "TC1", although TC-capable output, doesn't share the same flow
1861 * as other platforms on the display engine side and rather rely on the
1862 * SNPS PHY, that is programmed separately
1863 */
1864 if (IS_DG2(dev_priv))
1865 return false;
1866
1867 if (DISPLAY_VER(dev_priv) >= 13)
1868 return phy >= PHY_F && phy <= PHY_I;
1869 else if (IS_TIGERLAKE(dev_priv))
1870 return phy >= PHY_D && phy <= PHY_I;
1871 else if (IS_ICELAKE(dev_priv))
1872 return phy >= PHY_C && phy <= PHY_F;
1873
1874 return false;
1875}
1876
1877bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
1878{
1879 /*
1880 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
1881 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
1882 */
1883 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
1884}
1885
1886enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
1887{
1888 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
1889 return PHY_D + port - PORT_D_XELPD;
1890 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
1891 return PHY_F + port - PORT_TC1;
1892 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
1893 return PHY_B + port - PORT_TC1;
1894 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
1895 return PHY_C + port - PORT_TC1;
1896 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1897 port == PORT_D)
1898 return PHY_A;
1899
1900 return PHY_A + port - PORT_A;
1901}
1902
1903enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
1904{
1905 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
1906 return TC_PORT_NONE;
1907
1908 if (DISPLAY_VER(dev_priv) >= 12)
1909 return TC_PORT_1 + port - PORT_TC1;
1910 else
1911 return TC_PORT_1 + port - PORT_C;
1912}
1913
1914enum intel_display_power_domain
1915intel_aux_power_domain(struct intel_digital_port *dig_port)
1916{
1917 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1918
1919 if (intel_tc_port_in_tbt_alt_mode(dig_port))
1920 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
1921
1922 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
1923}
1924
1925static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
1926 struct intel_power_domain_mask *mask)
1927{
1928 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1929 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1930 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1931 struct drm_encoder *encoder;
1932 enum pipe pipe = crtc->pipe;
1933
1934 bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
1935
1936 if (!crtc_state->hw.active)
1937 return;
1938
1939 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
1940 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
1941 if (crtc_state->pch_pfit.enabled ||
1942 crtc_state->pch_pfit.force_thru)
1943 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
1944
1945 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
1946 crtc_state->uapi.encoder_mask) {
1947 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
1948
1949 set_bit(intel_encoder->power_domain, mask->bits);
1950 }
1951
1952 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
1953 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
1954
1955 if (crtc_state->shared_dpll)
1956 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
1957
1958 if (crtc_state->dsc.compression_enable)
1959 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
1960}
1961
1962void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
1963 struct intel_power_domain_mask *old_domains)
1964{
1965 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1966 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1967 enum intel_display_power_domain domain;
1968 struct intel_power_domain_mask domains, new_domains;
1969
1970 get_crtc_power_domains(crtc_state, &domains);
1971
1972 bitmap_andnot(new_domains.bits,
1973 domains.bits,
1974 crtc->enabled_power_domains.mask.bits,
1975 POWER_DOMAIN_NUM);
1976 bitmap_andnot(old_domains->bits,
1977 crtc->enabled_power_domains.mask.bits,
1978 domains.bits,
1979 POWER_DOMAIN_NUM);
1980
1981 for_each_power_domain(domain, &new_domains)
1982 intel_display_power_get_in_set(dev_priv,
1983 &crtc->enabled_power_domains,
1984 domain);
1985}
1986
1987void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
1988 struct intel_power_domain_mask *domains)
1989{
1990 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
1991 &crtc->enabled_power_domains,
1992 domains);
1993}
1994
1995static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1996{
1997 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1998 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1999
2000 if (intel_crtc_has_dp_encoder(crtc_state)) {
2001 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2002 &crtc_state->dp_m_n);
2003 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2004 &crtc_state->dp_m2_n2);
2005 }
2006
2007 intel_set_transcoder_timings(crtc_state);
2008
2009 i9xx_set_pipeconf(crtc_state);
2010}
2011
2012static void valleyview_crtc_enable(struct intel_atomic_state *state,
2013 struct intel_crtc *crtc)
2014{
2015 const struct intel_crtc_state *new_crtc_state =
2016 intel_atomic_get_new_crtc_state(state, crtc);
2017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2018 enum pipe pipe = crtc->pipe;
2019
2020 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2021 return;
2022
2023 i9xx_configure_cpu_transcoder(new_crtc_state);
2024
2025 intel_set_pipe_src_size(new_crtc_state);
2026
2027 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
2028
2029 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2030 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2031 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2032 }
2033
2034 crtc->active = true;
2035
2036 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2037
2038 intel_encoders_pre_pll_enable(state, crtc);
2039
2040 if (IS_CHERRYVIEW(dev_priv))
2041 chv_enable_pll(new_crtc_state);
2042 else
2043 vlv_enable_pll(new_crtc_state);
2044
2045 intel_encoders_pre_enable(state, crtc);
2046
2047 i9xx_pfit_enable(new_crtc_state);
2048
2049 intel_color_load_luts(new_crtc_state);
2050 intel_color_commit_noarm(new_crtc_state);
2051 intel_color_commit_arm(new_crtc_state);
2052 /* update DSPCNTR to configure gamma for pipe bottom color */
2053 intel_disable_primary_plane(new_crtc_state);
2054
2055 intel_initial_watermarks(state, crtc);
2056 intel_enable_transcoder(new_crtc_state);
2057
2058 intel_crtc_vblank_on(new_crtc_state);
2059
2060 intel_encoders_enable(state, crtc);
2061}
2062
2063static void i9xx_crtc_enable(struct intel_atomic_state *state,
2064 struct intel_crtc *crtc)
2065{
2066 const struct intel_crtc_state *new_crtc_state =
2067 intel_atomic_get_new_crtc_state(state, crtc);
2068 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2069 enum pipe pipe = crtc->pipe;
2070
2071 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2072 return;
2073
2074 i9xx_configure_cpu_transcoder(new_crtc_state);
2075
2076 intel_set_pipe_src_size(new_crtc_state);
2077
2078 crtc->active = true;
2079
2080 if (DISPLAY_VER(dev_priv) != 2)
2081 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2082
2083 intel_encoders_pre_enable(state, crtc);
2084
2085 i9xx_enable_pll(new_crtc_state);
2086
2087 i9xx_pfit_enable(new_crtc_state);
2088
2089 intel_color_load_luts(new_crtc_state);
2090 intel_color_commit_noarm(new_crtc_state);
2091 intel_color_commit_arm(new_crtc_state);
2092 /* update DSPCNTR to configure gamma for pipe bottom color */
2093 intel_disable_primary_plane(new_crtc_state);
2094
2095 if (!intel_initial_watermarks(state, crtc))
2096 intel_update_watermarks(dev_priv);
2097 intel_enable_transcoder(new_crtc_state);
2098
2099 intel_crtc_vblank_on(new_crtc_state);
2100
2101 intel_encoders_enable(state, crtc);
2102
2103 /* prevents spurious underruns */
2104 if (DISPLAY_VER(dev_priv) == 2)
2105 intel_crtc_wait_for_next_vblank(crtc);
2106}
2107
2108static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2109{
2110 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2111 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2112
2113 if (!old_crtc_state->gmch_pfit.control)
2114 return;
2115
2116 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2117
2118 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2119 intel_de_read(dev_priv, PFIT_CONTROL));
2120 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2121}
2122
2123static void i9xx_crtc_disable(struct intel_atomic_state *state,
2124 struct intel_crtc *crtc)
2125{
2126 struct intel_crtc_state *old_crtc_state =
2127 intel_atomic_get_old_crtc_state(state, crtc);
2128 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2129 enum pipe pipe = crtc->pipe;
2130
2131 /*
2132 * On gen2 planes are double buffered but the pipe isn't, so we must
2133 * wait for planes to fully turn off before disabling the pipe.
2134 */
2135 if (DISPLAY_VER(dev_priv) == 2)
2136 intel_crtc_wait_for_next_vblank(crtc);
2137
2138 intel_encoders_disable(state, crtc);
2139
2140 intel_crtc_vblank_off(old_crtc_state);
2141
2142 intel_disable_transcoder(old_crtc_state);
2143
2144 i9xx_pfit_disable(old_crtc_state);
2145
2146 intel_encoders_post_disable(state, crtc);
2147
2148 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2149 if (IS_CHERRYVIEW(dev_priv))
2150 chv_disable_pll(dev_priv, pipe);
2151 else if (IS_VALLEYVIEW(dev_priv))
2152 vlv_disable_pll(dev_priv, pipe);
2153 else
2154 i9xx_disable_pll(old_crtc_state);
2155 }
2156
2157 intel_encoders_post_pll_disable(state, crtc);
2158
2159 if (DISPLAY_VER(dev_priv) != 2)
2160 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2161
2162 if (!dev_priv->display.funcs.wm->initial_watermarks)
2163 intel_update_watermarks(dev_priv);
2164
2165 /* clock the pipe down to 640x480@60 to potentially save power */
2166 if (IS_I830(dev_priv))
2167 i830_enable_pipe(dev_priv, pipe);
2168}
2169
2170void intel_encoder_destroy(struct drm_encoder *encoder)
2171{
2172 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2173
2174 drm_encoder_cleanup(encoder);
2175 kfree(intel_encoder);
2176}
2177
2178static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2179{
2180 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2181
2182 /* GDG double wide on either pipe, otherwise pipe A only */
2183 return DISPLAY_VER(dev_priv) < 4 &&
2184 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2185}
2186
2187static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2188{
2189 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2190 struct drm_rect src;
2191
2192 /*
2193 * We only use IF-ID interlacing. If we ever use
2194 * PF-ID we'll need to adjust the pixel_rate here.
2195 */
2196
2197 if (!crtc_state->pch_pfit.enabled)
2198 return pixel_rate;
2199
2200 drm_rect_init(&src, 0, 0,
2201 drm_rect_width(&crtc_state->pipe_src) << 16,
2202 drm_rect_height(&crtc_state->pipe_src) << 16);
2203
2204 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2205 pixel_rate);
2206}
2207
2208static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2209 const struct drm_display_mode *timings)
2210{
2211 mode->hdisplay = timings->crtc_hdisplay;
2212 mode->htotal = timings->crtc_htotal;
2213 mode->hsync_start = timings->crtc_hsync_start;
2214 mode->hsync_end = timings->crtc_hsync_end;
2215
2216 mode->vdisplay = timings->crtc_vdisplay;
2217 mode->vtotal = timings->crtc_vtotal;
2218 mode->vsync_start = timings->crtc_vsync_start;
2219 mode->vsync_end = timings->crtc_vsync_end;
2220
2221 mode->flags = timings->flags;
2222 mode->type = DRM_MODE_TYPE_DRIVER;
2223
2224 mode->clock = timings->crtc_clock;
2225
2226 drm_mode_set_name(mode);
2227}
2228
2229static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2230{
2231 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2232
2233 if (HAS_GMCH(dev_priv))
2234 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2235 crtc_state->pixel_rate =
2236 crtc_state->hw.pipe_mode.crtc_clock;
2237 else
2238 crtc_state->pixel_rate =
2239 ilk_pipe_pixel_rate(crtc_state);
2240}
2241
2242static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2243 struct drm_display_mode *mode)
2244{
2245 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2246
2247 if (num_pipes < 2)
2248 return;
2249
2250 mode->crtc_clock /= num_pipes;
2251 mode->crtc_hdisplay /= num_pipes;
2252 mode->crtc_hblank_start /= num_pipes;
2253 mode->crtc_hblank_end /= num_pipes;
2254 mode->crtc_hsync_start /= num_pipes;
2255 mode->crtc_hsync_end /= num_pipes;
2256 mode->crtc_htotal /= num_pipes;
2257}
2258
2259static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2260 struct drm_display_mode *mode)
2261{
2262 int overlap = crtc_state->splitter.pixel_overlap;
2263 int n = crtc_state->splitter.link_count;
2264
2265 if (!crtc_state->splitter.enable)
2266 return;
2267
2268 /*
2269 * eDP MSO uses segment timings from EDID for transcoder
2270 * timings, but full mode for everything else.
2271 *
2272 * h_full = (h_segment - pixel_overlap) * link_count
2273 */
2274 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2275 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2276 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2277 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2278 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2279 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2280 mode->crtc_clock *= n;
2281}
2282
2283static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2284{
2285 struct drm_display_mode *mode = &crtc_state->hw.mode;
2286 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2287 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2288
2289 /*
2290 * Start with the adjusted_mode crtc timings, which
2291 * have been filled with the transcoder timings.
2292 */
2293 drm_mode_copy(pipe_mode, adjusted_mode);
2294
2295 /* Expand MSO per-segment transcoder timings to full */
2296 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2297
2298 /*
2299 * We want the full numbers in adjusted_mode normal timings,
2300 * adjusted_mode crtc timings are left with the raw transcoder
2301 * timings.
2302 */
2303 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2304
2305 /* Populate the "user" mode with full numbers */
2306 drm_mode_copy(mode, pipe_mode);
2307 intel_mode_from_crtc_timings(mode, mode);
2308 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2309 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2310 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2311
2312 /* Derive per-pipe timings in case bigjoiner is used */
2313 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2314 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2315
2316 intel_crtc_compute_pixel_rate(crtc_state);
2317}
2318
2319void intel_encoder_get_config(struct intel_encoder *encoder,
2320 struct intel_crtc_state *crtc_state)
2321{
2322 encoder->get_config(encoder, crtc_state);
2323
2324 intel_crtc_readout_derived_state(crtc_state);
2325}
2326
2327static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2328{
2329 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2330 int width, height;
2331
2332 if (num_pipes < 2)
2333 return;
2334
2335 width = drm_rect_width(&crtc_state->pipe_src);
2336 height = drm_rect_height(&crtc_state->pipe_src);
2337
2338 drm_rect_init(&crtc_state->pipe_src, 0, 0,
2339 width / num_pipes, height);
2340}
2341
2342static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2343{
2344 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2345 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2346
2347 intel_bigjoiner_compute_pipe_src(crtc_state);
2348
2349 /*
2350 * Pipe horizontal size must be even in:
2351 * - DVO ganged mode
2352 * - LVDS dual channel mode
2353 * - Double wide pipe
2354 */
2355 if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2356 if (crtc_state->double_wide) {
2357 drm_dbg_kms(&i915->drm,
2358 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2359 crtc->base.base.id, crtc->base.name);
2360 return -EINVAL;
2361 }
2362
2363 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2364 intel_is_dual_link_lvds(i915)) {
2365 drm_dbg_kms(&i915->drm,
2366 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2367 crtc->base.base.id, crtc->base.name);
2368 return -EINVAL;
2369 }
2370 }
2371
2372 return 0;
2373}
2374
2375static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2376{
2377 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2378 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2379 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2380 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2381 int clock_limit = i915->max_dotclk_freq;
2382
2383 /*
2384 * Start with the adjusted_mode crtc timings, which
2385 * have been filled with the transcoder timings.
2386 */
2387 drm_mode_copy(pipe_mode, adjusted_mode);
2388
2389 /* Expand MSO per-segment transcoder timings to full */
2390 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2391
2392 /* Derive per-pipe timings in case bigjoiner is used */
2393 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2394 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2395
2396 if (DISPLAY_VER(i915) < 4) {
2397 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2398
2399 /*
2400 * Enable double wide mode when the dot clock
2401 * is > 90% of the (display) core speed.
2402 */
2403 if (intel_crtc_supports_double_wide(crtc) &&
2404 pipe_mode->crtc_clock > clock_limit) {
2405 clock_limit = i915->max_dotclk_freq;
2406 crtc_state->double_wide = true;
2407 }
2408 }
2409
2410 if (pipe_mode->crtc_clock > clock_limit) {
2411 drm_dbg_kms(&i915->drm,
2412 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2413 crtc->base.base.id, crtc->base.name,
2414 pipe_mode->crtc_clock, clock_limit,
2415 str_yes_no(crtc_state->double_wide));
2416 return -EINVAL;
2417 }
2418
2419 return 0;
2420}
2421
2422static int intel_crtc_compute_config(struct intel_atomic_state *state,
2423 struct intel_crtc *crtc)
2424{
2425 struct intel_crtc_state *crtc_state =
2426 intel_atomic_get_new_crtc_state(state, crtc);
2427 int ret;
2428
2429 ret = intel_dpll_crtc_compute_clock(state, crtc);
2430 if (ret)
2431 return ret;
2432
2433 ret = intel_crtc_compute_pipe_src(crtc_state);
2434 if (ret)
2435 return ret;
2436
2437 ret = intel_crtc_compute_pipe_mode(crtc_state);
2438 if (ret)
2439 return ret;
2440
2441 intel_crtc_compute_pixel_rate(crtc_state);
2442
2443 if (crtc_state->has_pch_encoder)
2444 return ilk_fdi_compute_config(crtc, crtc_state);
2445
2446 return 0;
2447}
2448
2449static void
2450intel_reduce_m_n_ratio(u32 *num, u32 *den)
2451{
2452 while (*num > DATA_LINK_M_N_MASK ||
2453 *den > DATA_LINK_M_N_MASK) {
2454 *num >>= 1;
2455 *den >>= 1;
2456 }
2457}
2458
2459static void compute_m_n(u32 *ret_m, u32 *ret_n,
2460 u32 m, u32 n, u32 constant_n)
2461{
2462 if (constant_n)
2463 *ret_n = constant_n;
2464 else
2465 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2466
2467 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2468 intel_reduce_m_n_ratio(ret_m, ret_n);
2469}
2470
2471void
2472intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
2473 int pixel_clock, int link_clock,
2474 int bw_overhead,
2475 struct intel_link_m_n *m_n)
2476{
2477 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
2478 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
2479 bw_overhead);
2480 u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
2481
2482 /*
2483 * Windows/BIOS uses fixed M/N values always. Follow suit.
2484 *
2485 * Also several DP dongles in particular seem to be fussy
2486 * about too large link M/N values. Presumably the 20bit
2487 * value used by Windows/BIOS is acceptable to everyone.
2488 */
2489 m_n->tu = 64;
2490 compute_m_n(&m_n->data_m, &m_n->data_n,
2491 data_m, data_n,
2492 0x8000000);
2493
2494 compute_m_n(&m_n->link_m, &m_n->link_n,
2495 pixel_clock, link_symbol_clock,
2496 0x80000);
2497}
2498
2499void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2500{
2501 /*
2502 * There may be no VBT; and if the BIOS enabled SSC we can
2503 * just keep using it to avoid unnecessary flicker. Whereas if the
2504 * BIOS isn't using it, don't assume it will work even if the VBT
2505 * indicates as much.
2506 */
2507 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2508 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2509 PCH_DREF_CONTROL) &
2510 DREF_SSC1_ENABLE;
2511
2512 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2513 drm_dbg_kms(&dev_priv->drm,
2514 "SSC %s by BIOS, overriding VBT which says %s\n",
2515 str_enabled_disabled(bios_lvds_use_ssc),
2516 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2517 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2518 }
2519 }
2520}
2521
2522void intel_zero_m_n(struct intel_link_m_n *m_n)
2523{
2524 /* corresponds to 0 register value */
2525 memset(m_n, 0, sizeof(*m_n));
2526 m_n->tu = 1;
2527}
2528
2529void intel_set_m_n(struct drm_i915_private *i915,
2530 const struct intel_link_m_n *m_n,
2531 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2532 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2533{
2534 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2535 intel_de_write(i915, data_n_reg, m_n->data_n);
2536 intel_de_write(i915, link_m_reg, m_n->link_m);
2537 /*
2538 * On BDW+ writing LINK_N arms the double buffered update
2539 * of all the M/N registers, so it must be written last.
2540 */
2541 intel_de_write(i915, link_n_reg, m_n->link_n);
2542}
2543
2544bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2545 enum transcoder transcoder)
2546{
2547 if (IS_HASWELL(dev_priv))
2548 return transcoder == TRANSCODER_EDP;
2549
2550 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2551}
2552
2553void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2554 enum transcoder transcoder,
2555 const struct intel_link_m_n *m_n)
2556{
2557 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2558 enum pipe pipe = crtc->pipe;
2559
2560 if (DISPLAY_VER(dev_priv) >= 5)
2561 intel_set_m_n(dev_priv, m_n,
2562 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2563 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2564 else
2565 intel_set_m_n(dev_priv, m_n,
2566 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2567 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2568}
2569
2570void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2571 enum transcoder transcoder,
2572 const struct intel_link_m_n *m_n)
2573{
2574 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2575
2576 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2577 return;
2578
2579 intel_set_m_n(dev_priv, m_n,
2580 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2581 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2582}
2583
2584static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2585{
2586 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2587 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2588 enum pipe pipe = crtc->pipe;
2589 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2590 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2591 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2592 int vsyncshift = 0;
2593
2594 /* We need to be careful not to changed the adjusted mode, for otherwise
2595 * the hw state checker will get angry at the mismatch. */
2596 crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2597 crtc_vtotal = adjusted_mode->crtc_vtotal;
2598 crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2599 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2600
2601 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2602 /* the chip adds 2 halflines automatically */
2603 crtc_vtotal -= 1;
2604 crtc_vblank_end -= 1;
2605
2606 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2607 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2608 else
2609 vsyncshift = adjusted_mode->crtc_hsync_start -
2610 adjusted_mode->crtc_htotal / 2;
2611 if (vsyncshift < 0)
2612 vsyncshift += adjusted_mode->crtc_htotal;
2613 }
2614
2615 /*
2616 * VBLANK_START no longer works on ADL+, instead we must use
2617 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
2618 */
2619 if (DISPLAY_VER(dev_priv) >= 13) {
2620 intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
2621 crtc_vblank_start - crtc_vdisplay);
2622
2623 /*
2624 * VBLANK_START not used by hw, just clear it
2625 * to make it stand out in register dumps.
2626 */
2627 crtc_vblank_start = 1;
2628 }
2629
2630 if (DISPLAY_VER(dev_priv) >= 4)
2631 intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
2632 vsyncshift);
2633
2634 intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
2635 HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
2636 HTOTAL(adjusted_mode->crtc_htotal - 1));
2637 intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
2638 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
2639 HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
2640 intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
2641 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
2642 HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
2643
2644 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
2645 VACTIVE(crtc_vdisplay - 1) |
2646 VTOTAL(crtc_vtotal - 1));
2647 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
2648 VBLANK_START(crtc_vblank_start - 1) |
2649 VBLANK_END(crtc_vblank_end - 1));
2650 intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
2651 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
2652 VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
2653
2654 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2655 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2656 * documented on the DDI_FUNC_CTL register description, EDP Input Select
2657 * bits. */
2658 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2659 (pipe == PIPE_B || pipe == PIPE_C))
2660 intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
2661 VACTIVE(crtc_vdisplay - 1) |
2662 VTOTAL(crtc_vtotal - 1));
2663}
2664
2665static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
2666{
2667 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2668 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2669 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2670 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2671 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2672
2673 crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2674 crtc_vtotal = adjusted_mode->crtc_vtotal;
2675 crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2676 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2677
2678 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
2679
2680 /*
2681 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
2682 * But let's write it anyway to keep the state checker happy.
2683 */
2684 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
2685 VBLANK_START(crtc_vblank_start - 1) |
2686 VBLANK_END(crtc_vblank_end - 1));
2687 /*
2688 * The double buffer latch point for TRANS_VTOTAL
2689 * is the transcoder's undelayed vblank.
2690 */
2691 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
2692 VACTIVE(crtc_vdisplay - 1) |
2693 VTOTAL(crtc_vtotal - 1));
2694}
2695
2696static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2697{
2698 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2699 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2700 int width = drm_rect_width(&crtc_state->pipe_src);
2701 int height = drm_rect_height(&crtc_state->pipe_src);
2702 enum pipe pipe = crtc->pipe;
2703
2704 /* pipesrc controls the size that is scaled from, which should
2705 * always be the user's requested size.
2706 */
2707 intel_de_write(dev_priv, PIPESRC(pipe),
2708 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2709}
2710
2711static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2712{
2713 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2714 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2715
2716 if (DISPLAY_VER(dev_priv) == 2)
2717 return false;
2718
2719 if (DISPLAY_VER(dev_priv) >= 9 ||
2720 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2721 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
2722 else
2723 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
2724}
2725
2726static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2727 struct intel_crtc_state *pipe_config)
2728{
2729 struct drm_device *dev = crtc->base.dev;
2730 struct drm_i915_private *dev_priv = to_i915(dev);
2731 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2732 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2733 u32 tmp;
2734
2735 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
2736 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
2737 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
2738
2739 if (!transcoder_is_dsi(cpu_transcoder)) {
2740 tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
2741 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
2742 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
2743 }
2744
2745 tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
2746 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
2747 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
2748
2749 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
2750 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
2751 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
2752
2753 /* FIXME TGL+ DSI transcoders have this! */
2754 if (!transcoder_is_dsi(cpu_transcoder)) {
2755 tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
2756 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
2757 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
2758 }
2759 tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
2760 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
2761 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
2762
2763 if (intel_pipe_is_interlaced(pipe_config)) {
2764 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
2765 adjusted_mode->crtc_vtotal += 1;
2766 adjusted_mode->crtc_vblank_end += 1;
2767 }
2768
2769 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
2770 adjusted_mode->crtc_vblank_start =
2771 adjusted_mode->crtc_vdisplay +
2772 intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
2773}
2774
2775static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2776{
2777 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2778 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2779 enum pipe master_pipe, pipe = crtc->pipe;
2780 int width;
2781
2782 if (num_pipes < 2)
2783 return;
2784
2785 master_pipe = bigjoiner_master_pipe(crtc_state);
2786 width = drm_rect_width(&crtc_state->pipe_src);
2787
2788 drm_rect_translate_to(&crtc_state->pipe_src,
2789 (pipe - master_pipe) * width, 0);
2790}
2791
2792static void intel_get_pipe_src_size(struct intel_crtc *crtc,
2793 struct intel_crtc_state *pipe_config)
2794{
2795 struct drm_device *dev = crtc->base.dev;
2796 struct drm_i915_private *dev_priv = to_i915(dev);
2797 u32 tmp;
2798
2799 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
2800
2801 drm_rect_init(&pipe_config->pipe_src, 0, 0,
2802 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
2803 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
2804
2805 intel_bigjoiner_adjust_pipe_src(pipe_config);
2806}
2807
2808void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
2809{
2810 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2811 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2812 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2813 u32 val = 0;
2814
2815 /*
2816 * - We keep both pipes enabled on 830
2817 * - During modeset the pipe is still disabled and must remain so
2818 * - During fastset the pipe is already enabled and must remain so
2819 */
2820 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
2821 val |= TRANSCONF_ENABLE;
2822
2823 if (crtc_state->double_wide)
2824 val |= TRANSCONF_DOUBLE_WIDE;
2825
2826 /* only g4x and later have fancy bpc/dither controls */
2827 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2828 IS_CHERRYVIEW(dev_priv)) {
2829 /* Bspec claims that we can't use dithering for 30bpp pipes. */
2830 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
2831 val |= TRANSCONF_DITHER_EN |
2832 TRANSCONF_DITHER_TYPE_SP;
2833
2834 switch (crtc_state->pipe_bpp) {
2835 default:
2836 /* Case prevented by intel_choose_pipe_bpp_dither. */
2837 MISSING_CASE(crtc_state->pipe_bpp);
2838 fallthrough;
2839 case 18:
2840 val |= TRANSCONF_BPC_6;
2841 break;
2842 case 24:
2843 val |= TRANSCONF_BPC_8;
2844 break;
2845 case 30:
2846 val |= TRANSCONF_BPC_10;
2847 break;
2848 }
2849 }
2850
2851 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2852 if (DISPLAY_VER(dev_priv) < 4 ||
2853 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2854 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
2855 else
2856 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
2857 } else {
2858 val |= TRANSCONF_INTERLACE_PROGRESSIVE;
2859 }
2860
2861 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2862 crtc_state->limited_color_range)
2863 val |= TRANSCONF_COLOR_RANGE_SELECT;
2864
2865 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
2866
2867 if (crtc_state->wgc_enable)
2868 val |= TRANSCONF_WGC_ENABLE;
2869
2870 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
2871
2872 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
2873 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
2874}
2875
2876static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
2877{
2878 if (IS_I830(dev_priv))
2879 return false;
2880
2881 return DISPLAY_VER(dev_priv) >= 4 ||
2882 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
2883}
2884
2885static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
2886{
2887 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2889 enum pipe pipe;
2890 u32 tmp;
2891
2892 if (!i9xx_has_pfit(dev_priv))
2893 return;
2894
2895 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
2896 if (!(tmp & PFIT_ENABLE))
2897 return;
2898
2899 /* Check whether the pfit is attached to our pipe. */
2900 if (DISPLAY_VER(dev_priv) >= 4)
2901 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
2902 else
2903 pipe = PIPE_B;
2904
2905 if (pipe != crtc->pipe)
2906 return;
2907
2908 crtc_state->gmch_pfit.control = tmp;
2909 crtc_state->gmch_pfit.pgm_ratios =
2910 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
2911}
2912
2913static enum intel_output_format
2914bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
2915{
2916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2917 u32 tmp;
2918
2919 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
2920
2921 if (tmp & PIPE_MISC_YUV420_ENABLE) {
2922 /* We support 4:2:0 in full blend mode only */
2923 drm_WARN_ON(&dev_priv->drm,
2924 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
2925
2926 return INTEL_OUTPUT_FORMAT_YCBCR420;
2927 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
2928 return INTEL_OUTPUT_FORMAT_YCBCR444;
2929 } else {
2930 return INTEL_OUTPUT_FORMAT_RGB;
2931 }
2932}
2933
2934static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
2935 struct intel_crtc_state *pipe_config)
2936{
2937 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2938 enum intel_display_power_domain power_domain;
2939 intel_wakeref_t wakeref;
2940 u32 tmp;
2941 bool ret;
2942
2943 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
2944 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
2945 if (!wakeref)
2946 return false;
2947
2948 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2949 pipe_config->sink_format = pipe_config->output_format;
2950 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
2951 pipe_config->shared_dpll = NULL;
2952
2953 ret = false;
2954
2955 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
2956 if (!(tmp & TRANSCONF_ENABLE))
2957 goto out;
2958
2959 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2960 IS_CHERRYVIEW(dev_priv)) {
2961 switch (tmp & TRANSCONF_BPC_MASK) {
2962 case TRANSCONF_BPC_6:
2963 pipe_config->pipe_bpp = 18;
2964 break;
2965 case TRANSCONF_BPC_8:
2966 pipe_config->pipe_bpp = 24;
2967 break;
2968 case TRANSCONF_BPC_10:
2969 pipe_config->pipe_bpp = 30;
2970 break;
2971 default:
2972 MISSING_CASE(tmp);
2973 break;
2974 }
2975 }
2976
2977 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2978 (tmp & TRANSCONF_COLOR_RANGE_SELECT))
2979 pipe_config->limited_color_range = true;
2980
2981 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
2982
2983 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
2984
2985 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2986 (tmp & TRANSCONF_WGC_ENABLE))
2987 pipe_config->wgc_enable = true;
2988
2989 intel_color_get_config(pipe_config);
2990
2991 if (DISPLAY_VER(dev_priv) < 4)
2992 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
2993
2994 intel_get_transcoder_timings(crtc, pipe_config);
2995 intel_get_pipe_src_size(crtc, pipe_config);
2996
2997 i9xx_get_pfit_config(pipe_config);
2998
2999 if (DISPLAY_VER(dev_priv) >= 4) {
3000 /* No way to read it out on pipes B and C */
3001 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3002 tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
3003 else
3004 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3005 pipe_config->pixel_multiplier =
3006 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3007 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3008 pipe_config->dpll_hw_state.dpll_md = tmp;
3009 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3010 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3011 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3012 pipe_config->pixel_multiplier =
3013 ((tmp & SDVO_MULTIPLIER_MASK)
3014 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3015 } else {
3016 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3017 * port and will be fixed up in the encoder->get_config
3018 * function. */
3019 pipe_config->pixel_multiplier = 1;
3020 }
3021 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3022 DPLL(crtc->pipe));
3023 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3024 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3025 FP0(crtc->pipe));
3026 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3027 FP1(crtc->pipe));
3028 } else {
3029 /* Mask out read-only status bits. */
3030 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3031 DPLL_PORTC_READY_MASK |
3032 DPLL_PORTB_READY_MASK);
3033 }
3034
3035 if (IS_CHERRYVIEW(dev_priv))
3036 chv_crtc_clock_get(crtc, pipe_config);
3037 else if (IS_VALLEYVIEW(dev_priv))
3038 vlv_crtc_clock_get(crtc, pipe_config);
3039 else
3040 i9xx_crtc_clock_get(crtc, pipe_config);
3041
3042 /*
3043 * Normally the dotclock is filled in by the encoder .get_config()
3044 * but in case the pipe is enabled w/o any ports we need a sane
3045 * default.
3046 */
3047 pipe_config->hw.adjusted_mode.crtc_clock =
3048 pipe_config->port_clock / pipe_config->pixel_multiplier;
3049
3050 ret = true;
3051
3052out:
3053 intel_display_power_put(dev_priv, power_domain, wakeref);
3054
3055 return ret;
3056}
3057
3058void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3059{
3060 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3061 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3062 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3063 u32 val = 0;
3064
3065 /*
3066 * - During modeset the pipe is still disabled and must remain so
3067 * - During fastset the pipe is already enabled and must remain so
3068 */
3069 if (!intel_crtc_needs_modeset(crtc_state))
3070 val |= TRANSCONF_ENABLE;
3071
3072 switch (crtc_state->pipe_bpp) {
3073 default:
3074 /* Case prevented by intel_choose_pipe_bpp_dither. */
3075 MISSING_CASE(crtc_state->pipe_bpp);
3076 fallthrough;
3077 case 18:
3078 val |= TRANSCONF_BPC_6;
3079 break;
3080 case 24:
3081 val |= TRANSCONF_BPC_8;
3082 break;
3083 case 30:
3084 val |= TRANSCONF_BPC_10;
3085 break;
3086 case 36:
3087 val |= TRANSCONF_BPC_12;
3088 break;
3089 }
3090
3091 if (crtc_state->dither)
3092 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3093
3094 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3095 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3096 else
3097 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3098
3099 /*
3100 * This would end up with an odd purple hue over
3101 * the entire display. Make sure we don't do it.
3102 */
3103 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3104 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3105
3106 if (crtc_state->limited_color_range &&
3107 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3108 val |= TRANSCONF_COLOR_RANGE_SELECT;
3109
3110 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3111 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
3112
3113 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
3114
3115 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3116 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3117
3118 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3119 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3120}
3121
3122static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3123{
3124 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3126 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3127 u32 val = 0;
3128
3129 /*
3130 * - During modeset the pipe is still disabled and must remain so
3131 * - During fastset the pipe is already enabled and must remain so
3132 */
3133 if (!intel_crtc_needs_modeset(crtc_state))
3134 val |= TRANSCONF_ENABLE;
3135
3136 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3137 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3138
3139 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3140 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3141 else
3142 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3143
3144 if (IS_HASWELL(dev_priv) &&
3145 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3146 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
3147
3148 intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3149 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3150}
3151
3152static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
3153{
3154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3156 u32 val = 0;
3157
3158 switch (crtc_state->pipe_bpp) {
3159 case 18:
3160 val |= PIPE_MISC_BPC_6;
3161 break;
3162 case 24:
3163 val |= PIPE_MISC_BPC_8;
3164 break;
3165 case 30:
3166 val |= PIPE_MISC_BPC_10;
3167 break;
3168 case 36:
3169 /* Port output 12BPC defined for ADLP+ */
3170 if (DISPLAY_VER(dev_priv) >= 13)
3171 val |= PIPE_MISC_BPC_12_ADLP;
3172 break;
3173 default:
3174 MISSING_CASE(crtc_state->pipe_bpp);
3175 break;
3176 }
3177
3178 if (crtc_state->dither)
3179 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
3180
3181 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3182 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3183 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
3184
3185 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3186 val |= PIPE_MISC_YUV420_ENABLE |
3187 PIPE_MISC_YUV420_MODE_FULL_BLEND;
3188
3189 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3190 val |= PIPE_MISC_HDR_MODE_PRECISION;
3191
3192 if (DISPLAY_VER(dev_priv) >= 12)
3193 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
3194
3195 /* allow PSR with sprite enabled */
3196 if (IS_BROADWELL(dev_priv))
3197 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
3198
3199 intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
3200}
3201
3202int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
3203{
3204 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3205 u32 tmp;
3206
3207 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3208
3209 switch (tmp & PIPE_MISC_BPC_MASK) {
3210 case PIPE_MISC_BPC_6:
3211 return 18;
3212 case PIPE_MISC_BPC_8:
3213 return 24;
3214 case PIPE_MISC_BPC_10:
3215 return 30;
3216 /*
3217 * PORT OUTPUT 12 BPC defined for ADLP+.
3218 *
3219 * TODO:
3220 * For previous platforms with DSI interface, bits 5:7
3221 * are used for storing pipe_bpp irrespective of dithering.
3222 * Since the value of 12 BPC is not defined for these bits
3223 * on older platforms, need to find a workaround for 12 BPC
3224 * MIPI DSI HW readout.
3225 */
3226 case PIPE_MISC_BPC_12_ADLP:
3227 if (DISPLAY_VER(dev_priv) >= 13)
3228 return 36;
3229 fallthrough;
3230 default:
3231 MISSING_CASE(tmp);
3232 return 0;
3233 }
3234}
3235
3236int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3237{
3238 /*
3239 * Account for spread spectrum to avoid
3240 * oversubscribing the link. Max center spread
3241 * is 2.5%; use 5% for safety's sake.
3242 */
3243 u32 bps = target_clock * bpp * 21 / 20;
3244 return DIV_ROUND_UP(bps, link_bw * 8);
3245}
3246
3247void intel_get_m_n(struct drm_i915_private *i915,
3248 struct intel_link_m_n *m_n,
3249 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3250 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3251{
3252 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3253 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3254 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3255 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3256 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3257}
3258
3259void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3260 enum transcoder transcoder,
3261 struct intel_link_m_n *m_n)
3262{
3263 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3264 enum pipe pipe = crtc->pipe;
3265
3266 if (DISPLAY_VER(dev_priv) >= 5)
3267 intel_get_m_n(dev_priv, m_n,
3268 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3269 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3270 else
3271 intel_get_m_n(dev_priv, m_n,
3272 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3273 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3274}
3275
3276void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3277 enum transcoder transcoder,
3278 struct intel_link_m_n *m_n)
3279{
3280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3281
3282 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3283 return;
3284
3285 intel_get_m_n(dev_priv, m_n,
3286 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3287 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3288}
3289
3290static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3291{
3292 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3293 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3294 u32 ctl, pos, size;
3295 enum pipe pipe;
3296
3297 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3298 if ((ctl & PF_ENABLE) == 0)
3299 return;
3300
3301 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3302 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
3303 else
3304 pipe = crtc->pipe;
3305
3306 crtc_state->pch_pfit.enabled = true;
3307
3308 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3309 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3310
3311 drm_rect_init(&crtc_state->pch_pfit.dst,
3312 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
3313 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
3314 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
3315 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
3316
3317 /*
3318 * We currently do not free assignements of panel fitters on
3319 * ivb/hsw (since we don't use the higher upscaling modes which
3320 * differentiates them) so just WARN about this case for now.
3321 */
3322 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
3323}
3324
3325static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3326 struct intel_crtc_state *pipe_config)
3327{
3328 struct drm_device *dev = crtc->base.dev;
3329 struct drm_i915_private *dev_priv = to_i915(dev);
3330 enum intel_display_power_domain power_domain;
3331 intel_wakeref_t wakeref;
3332 u32 tmp;
3333 bool ret;
3334
3335 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3336 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3337 if (!wakeref)
3338 return false;
3339
3340 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3341 pipe_config->shared_dpll = NULL;
3342
3343 ret = false;
3344 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3345 if (!(tmp & TRANSCONF_ENABLE))
3346 goto out;
3347
3348 switch (tmp & TRANSCONF_BPC_MASK) {
3349 case TRANSCONF_BPC_6:
3350 pipe_config->pipe_bpp = 18;
3351 break;
3352 case TRANSCONF_BPC_8:
3353 pipe_config->pipe_bpp = 24;
3354 break;
3355 case TRANSCONF_BPC_10:
3356 pipe_config->pipe_bpp = 30;
3357 break;
3358 case TRANSCONF_BPC_12:
3359 pipe_config->pipe_bpp = 36;
3360 break;
3361 default:
3362 break;
3363 }
3364
3365 if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
3366 pipe_config->limited_color_range = true;
3367
3368 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
3369 case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
3370 case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
3371 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3372 break;
3373 default:
3374 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3375 break;
3376 }
3377
3378 pipe_config->sink_format = pipe_config->output_format;
3379
3380 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
3381
3382 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3383
3384 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
3385
3386 intel_color_get_config(pipe_config);
3387
3388 pipe_config->pixel_multiplier = 1;
3389
3390 ilk_pch_get_config(pipe_config);
3391
3392 intel_get_transcoder_timings(crtc, pipe_config);
3393 intel_get_pipe_src_size(crtc, pipe_config);
3394
3395 ilk_get_pfit_config(pipe_config);
3396
3397 ret = true;
3398
3399out:
3400 intel_display_power_put(dev_priv, power_domain, wakeref);
3401
3402 return ret;
3403}
3404
3405static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3406{
3407 u8 pipes;
3408
3409 if (DISPLAY_VER(i915) >= 12)
3410 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3411 else if (DISPLAY_VER(i915) >= 11)
3412 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3413 else
3414 pipes = 0;
3415
3416 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask;
3417}
3418
3419static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3420 enum transcoder cpu_transcoder)
3421{
3422 enum intel_display_power_domain power_domain;
3423 intel_wakeref_t wakeref;
3424 u32 tmp = 0;
3425
3426 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3427
3428 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3429 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3430
3431 return tmp & TRANS_DDI_FUNC_ENABLE;
3432}
3433
3434static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3435 u8 *master_pipes, u8 *slave_pipes)
3436{
3437 struct intel_crtc *crtc;
3438
3439 *master_pipes = 0;
3440 *slave_pipes = 0;
3441
3442 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3443 bigjoiner_pipes(dev_priv)) {
3444 enum intel_display_power_domain power_domain;
3445 enum pipe pipe = crtc->pipe;
3446 intel_wakeref_t wakeref;
3447
3448 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3449 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3450 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3451
3452 if (!(tmp & BIG_JOINER_ENABLE))
3453 continue;
3454
3455 if (tmp & MASTER_BIG_JOINER_ENABLE)
3456 *master_pipes |= BIT(pipe);
3457 else
3458 *slave_pipes |= BIT(pipe);
3459 }
3460
3461 if (DISPLAY_VER(dev_priv) < 13)
3462 continue;
3463
3464 power_domain = POWER_DOMAIN_PIPE(pipe);
3465 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3466 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3467
3468 if (tmp & UNCOMPRESSED_JOINER_MASTER)
3469 *master_pipes |= BIT(pipe);
3470 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3471 *slave_pipes |= BIT(pipe);
3472 }
3473 }
3474
3475 /* Bigjoiner pipes should always be consecutive master and slave */
3476 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3477 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3478 *master_pipes, *slave_pipes);
3479}
3480
3481static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3482{
3483 if ((slave_pipes & BIT(pipe)) == 0)
3484 return pipe;
3485
3486 /* ignore everything above our pipe */
3487 master_pipes &= ~GENMASK(7, pipe);
3488
3489 /* highest remaining bit should be our master pipe */
3490 return fls(master_pipes) - 1;
3491}
3492
3493static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3494{
3495 enum pipe master_pipe, next_master_pipe;
3496
3497 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3498
3499 if ((master_pipes & BIT(master_pipe)) == 0)
3500 return 0;
3501
3502 /* ignore our master pipe and everything below it */
3503 master_pipes &= ~GENMASK(master_pipe, 0);
3504 /* make sure a high bit is set for the ffs() */
3505 master_pipes |= BIT(7);
3506 /* lowest remaining bit should be the next master pipe */
3507 next_master_pipe = ffs(master_pipes) - 1;
3508
3509 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3510}
3511
3512static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3513{
3514 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3515
3516 if (DISPLAY_VER(i915) >= 11)
3517 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3518
3519 return panel_transcoder_mask;
3520}
3521
3522static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3523{
3524 struct drm_device *dev = crtc->base.dev;
3525 struct drm_i915_private *dev_priv = to_i915(dev);
3526 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3527 enum transcoder cpu_transcoder;
3528 u8 master_pipes, slave_pipes;
3529 u8 enabled_transcoders = 0;
3530
3531 /*
3532 * XXX: Do intel_display_power_get_if_enabled before reading this (for
3533 * consistency and less surprising code; it's in always on power).
3534 */
3535 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3536 panel_transcoder_mask) {
3537 enum intel_display_power_domain power_domain;
3538 intel_wakeref_t wakeref;
3539 enum pipe trans_pipe;
3540 u32 tmp = 0;
3541
3542 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3543 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3544 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3545
3546 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3547 continue;
3548
3549 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3550 default:
3551 drm_WARN(dev, 1,
3552 "unknown pipe linked to transcoder %s\n",
3553 transcoder_name(cpu_transcoder));
3554 fallthrough;
3555 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3556 case TRANS_DDI_EDP_INPUT_A_ON:
3557 trans_pipe = PIPE_A;
3558 break;
3559 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3560 trans_pipe = PIPE_B;
3561 break;
3562 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3563 trans_pipe = PIPE_C;
3564 break;
3565 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3566 trans_pipe = PIPE_D;
3567 break;
3568 }
3569
3570 if (trans_pipe == crtc->pipe)
3571 enabled_transcoders |= BIT(cpu_transcoder);
3572 }
3573
3574 /* single pipe or bigjoiner master */
3575 cpu_transcoder = (enum transcoder) crtc->pipe;
3576 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3577 enabled_transcoders |= BIT(cpu_transcoder);
3578
3579 /* bigjoiner slave -> consider the master pipe's transcoder as well */
3580 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3581 if (slave_pipes & BIT(crtc->pipe)) {
3582 cpu_transcoder = (enum transcoder)
3583 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3584 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3585 enabled_transcoders |= BIT(cpu_transcoder);
3586 }
3587
3588 return enabled_transcoders;
3589}
3590
3591static bool has_edp_transcoders(u8 enabled_transcoders)
3592{
3593 return enabled_transcoders & BIT(TRANSCODER_EDP);
3594}
3595
3596static bool has_dsi_transcoders(u8 enabled_transcoders)
3597{
3598 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3599 BIT(TRANSCODER_DSI_1));
3600}
3601
3602static bool has_pipe_transcoders(u8 enabled_transcoders)
3603{
3604 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3605 BIT(TRANSCODER_DSI_0) |
3606 BIT(TRANSCODER_DSI_1));
3607}
3608
3609static void assert_enabled_transcoders(struct drm_i915_private *i915,
3610 u8 enabled_transcoders)
3611{
3612 /* Only one type of transcoder please */
3613 drm_WARN_ON(&i915->drm,
3614 has_edp_transcoders(enabled_transcoders) +
3615 has_dsi_transcoders(enabled_transcoders) +
3616 has_pipe_transcoders(enabled_transcoders) > 1);
3617
3618 /* Only DSI transcoders can be ganged */
3619 drm_WARN_ON(&i915->drm,
3620 !has_dsi_transcoders(enabled_transcoders) &&
3621 !is_power_of_2(enabled_transcoders));
3622}
3623
3624static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3625 struct intel_crtc_state *pipe_config,
3626 struct intel_display_power_domain_set *power_domain_set)
3627{
3628 struct drm_device *dev = crtc->base.dev;
3629 struct drm_i915_private *dev_priv = to_i915(dev);
3630 unsigned long enabled_transcoders;
3631 u32 tmp;
3632
3633 enabled_transcoders = hsw_enabled_transcoders(crtc);
3634 if (!enabled_transcoders)
3635 return false;
3636
3637 assert_enabled_transcoders(dev_priv, enabled_transcoders);
3638
3639 /*
3640 * With the exception of DSI we should only ever have
3641 * a single enabled transcoder. With DSI let's just
3642 * pick the first one.
3643 */
3644 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3645
3646 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3647 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3648 return false;
3649
3650 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3651 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3652
3653 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3654 pipe_config->pch_pfit.force_thru = true;
3655 }
3656
3657 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3658
3659 return tmp & TRANSCONF_ENABLE;
3660}
3661
3662static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3663 struct intel_crtc_state *pipe_config,
3664 struct intel_display_power_domain_set *power_domain_set)
3665{
3666 struct drm_device *dev = crtc->base.dev;
3667 struct drm_i915_private *dev_priv = to_i915(dev);
3668 enum transcoder cpu_transcoder;
3669 enum port port;
3670 u32 tmp;
3671
3672 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3673 if (port == PORT_A)
3674 cpu_transcoder = TRANSCODER_DSI_A;
3675 else
3676 cpu_transcoder = TRANSCODER_DSI_C;
3677
3678 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3679 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3680 continue;
3681
3682 /*
3683 * The PLL needs to be enabled with a valid divider
3684 * configuration, otherwise accessing DSI registers will hang
3685 * the machine. See BSpec North Display Engine
3686 * registers/MIPI[BXT]. We can break out here early, since we
3687 * need the same DSI PLL to be enabled for both DSI ports.
3688 */
3689 if (!bxt_dsi_pll_is_enabled(dev_priv))
3690 break;
3691
3692 /* XXX: this works for video mode only */
3693 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
3694 if (!(tmp & DPI_ENABLE))
3695 continue;
3696
3697 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
3698 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
3699 continue;
3700
3701 pipe_config->cpu_transcoder = cpu_transcoder;
3702 break;
3703 }
3704
3705 return transcoder_is_dsi(pipe_config->cpu_transcoder);
3706}
3707
3708static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
3709{
3710 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3711 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3712 u8 master_pipes, slave_pipes;
3713 enum pipe pipe = crtc->pipe;
3714
3715 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
3716
3717 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
3718 return;
3719
3720 crtc_state->bigjoiner_pipes =
3721 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
3722 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
3723}
3724
3725static bool hsw_get_pipe_config(struct intel_crtc *crtc,
3726 struct intel_crtc_state *pipe_config)
3727{
3728 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3729 bool active;
3730 u32 tmp;
3731
3732 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3733 POWER_DOMAIN_PIPE(crtc->pipe)))
3734 return false;
3735
3736 pipe_config->shared_dpll = NULL;
3737
3738 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
3739
3740 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
3741 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
3742 drm_WARN_ON(&dev_priv->drm, active);
3743 active = true;
3744 }
3745
3746 if (!active)
3747 goto out;
3748
3749 intel_bigjoiner_get_config(pipe_config);
3750 intel_dsc_get_config(pipe_config);
3751
3752 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
3753 DISPLAY_VER(dev_priv) >= 11)
3754 intel_get_transcoder_timings(crtc, pipe_config);
3755
3756 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
3757 intel_vrr_get_config(pipe_config);
3758
3759 intel_get_pipe_src_size(crtc, pipe_config);
3760
3761 if (IS_HASWELL(dev_priv)) {
3762 u32 tmp = intel_de_read(dev_priv,
3763 TRANSCONF(pipe_config->cpu_transcoder));
3764
3765 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
3766 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3767 else
3768 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3769 } else {
3770 pipe_config->output_format =
3771 bdw_get_pipe_misc_output_format(crtc);
3772 }
3773
3774 pipe_config->sink_format = pipe_config->output_format;
3775
3776 intel_color_get_config(pipe_config);
3777
3778 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
3779 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
3780 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3781 pipe_config->ips_linetime =
3782 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
3783
3784 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3785 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
3786 if (DISPLAY_VER(dev_priv) >= 9)
3787 skl_scaler_get_config(pipe_config);
3788 else
3789 ilk_get_pfit_config(pipe_config);
3790 }
3791
3792 hsw_ips_get_config(pipe_config);
3793
3794 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
3795 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3796 pipe_config->pixel_multiplier =
3797 intel_de_read(dev_priv,
3798 TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
3799 } else {
3800 pipe_config->pixel_multiplier = 1;
3801 }
3802
3803 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3804 tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
3805
3806 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
3807 } else {
3808 /* no idea if this is correct */
3809 pipe_config->framestart_delay = 1;
3810 }
3811
3812out:
3813 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
3814
3815 return active;
3816}
3817
3818bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
3819{
3820 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3821 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3822
3823 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
3824 return false;
3825
3826 crtc_state->hw.active = true;
3827
3828 intel_crtc_readout_derived_state(crtc_state);
3829
3830 return true;
3831}
3832
3833int intel_dotclock_calculate(int link_freq,
3834 const struct intel_link_m_n *m_n)
3835{
3836 /*
3837 * The calculation for the data clock -> pixel clock is:
3838 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
3839 * But we want to avoid losing precison if possible, so:
3840 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
3841 *
3842 * and for link freq (10kbs units) -> pixel clock it is:
3843 * link_symbol_clock = link_freq * 10 / link_symbol_size
3844 * pixel_clock = (m * link_symbol_clock) / n
3845 * or for more precision:
3846 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
3847 */
3848
3849 if (!m_n->link_n)
3850 return 0;
3851
3852 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
3853 m_n->link_n * intel_dp_link_symbol_size(link_freq));
3854}
3855
3856int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
3857{
3858 int dotclock;
3859
3860 if (intel_crtc_has_dp_encoder(pipe_config))
3861 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
3862 &pipe_config->dp_m_n);
3863 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
3864 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
3865 pipe_config->pipe_bpp);
3866 else
3867 dotclock = pipe_config->port_clock;
3868
3869 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
3870 !intel_crtc_has_dp_encoder(pipe_config))
3871 dotclock *= 2;
3872
3873 if (pipe_config->pixel_multiplier)
3874 dotclock /= pipe_config->pixel_multiplier;
3875
3876 return dotclock;
3877}
3878
3879/* Returns the currently programmed mode of the given encoder. */
3880struct drm_display_mode *
3881intel_encoder_current_mode(struct intel_encoder *encoder)
3882{
3883 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3884 struct intel_crtc_state *crtc_state;
3885 struct drm_display_mode *mode;
3886 struct intel_crtc *crtc;
3887 enum pipe pipe;
3888
3889 if (!encoder->get_hw_state(encoder, &pipe))
3890 return NULL;
3891
3892 crtc = intel_crtc_for_pipe(dev_priv, pipe);
3893
3894 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
3895 if (!mode)
3896 return NULL;
3897
3898 crtc_state = intel_crtc_state_alloc(crtc);
3899 if (!crtc_state) {
3900 kfree(mode);
3901 return NULL;
3902 }
3903
3904 if (!intel_crtc_get_pipe_config(crtc_state)) {
3905 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
3906 kfree(mode);
3907 return NULL;
3908 }
3909
3910 intel_encoder_get_config(encoder, crtc_state);
3911
3912 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
3913
3914 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
3915
3916 return mode;
3917}
3918
3919static bool encoders_cloneable(const struct intel_encoder *a,
3920 const struct intel_encoder *b)
3921{
3922 /* masks could be asymmetric, so check both ways */
3923 return a == b || (a->cloneable & BIT(b->type) &&
3924 b->cloneable & BIT(a->type));
3925}
3926
3927static bool check_single_encoder_cloning(struct intel_atomic_state *state,
3928 struct intel_crtc *crtc,
3929 struct intel_encoder *encoder)
3930{
3931 struct intel_encoder *source_encoder;
3932 struct drm_connector *connector;
3933 struct drm_connector_state *connector_state;
3934 int i;
3935
3936 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
3937 if (connector_state->crtc != &crtc->base)
3938 continue;
3939
3940 source_encoder =
3941 to_intel_encoder(connector_state->best_encoder);
3942 if (!encoders_cloneable(encoder, source_encoder))
3943 return false;
3944 }
3945
3946 return true;
3947}
3948
3949static int icl_add_linked_planes(struct intel_atomic_state *state)
3950{
3951 struct intel_plane *plane, *linked;
3952 struct intel_plane_state *plane_state, *linked_plane_state;
3953 int i;
3954
3955 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3956 linked = plane_state->planar_linked_plane;
3957
3958 if (!linked)
3959 continue;
3960
3961 linked_plane_state = intel_atomic_get_plane_state(state, linked);
3962 if (IS_ERR(linked_plane_state))
3963 return PTR_ERR(linked_plane_state);
3964
3965 drm_WARN_ON(state->base.dev,
3966 linked_plane_state->planar_linked_plane != plane);
3967 drm_WARN_ON(state->base.dev,
3968 linked_plane_state->planar_slave == plane_state->planar_slave);
3969 }
3970
3971 return 0;
3972}
3973
3974static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
3975{
3976 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3977 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3978 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
3979 struct intel_plane *plane, *linked;
3980 struct intel_plane_state *plane_state;
3981 int i;
3982
3983 if (DISPLAY_VER(dev_priv) < 11)
3984 return 0;
3985
3986 /*
3987 * Destroy all old plane links and make the slave plane invisible
3988 * in the crtc_state->active_planes mask.
3989 */
3990 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3991 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
3992 continue;
3993
3994 plane_state->planar_linked_plane = NULL;
3995 if (plane_state->planar_slave && !plane_state->uapi.visible) {
3996 crtc_state->enabled_planes &= ~BIT(plane->id);
3997 crtc_state->active_planes &= ~BIT(plane->id);
3998 crtc_state->update_planes |= BIT(plane->id);
3999 crtc_state->data_rate[plane->id] = 0;
4000 crtc_state->rel_data_rate[plane->id] = 0;
4001 }
4002
4003 plane_state->planar_slave = false;
4004 }
4005
4006 if (!crtc_state->nv12_planes)
4007 return 0;
4008
4009 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4010 struct intel_plane_state *linked_state = NULL;
4011
4012 if (plane->pipe != crtc->pipe ||
4013 !(crtc_state->nv12_planes & BIT(plane->id)))
4014 continue;
4015
4016 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4017 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4018 continue;
4019
4020 if (crtc_state->active_planes & BIT(linked->id))
4021 continue;
4022
4023 linked_state = intel_atomic_get_plane_state(state, linked);
4024 if (IS_ERR(linked_state))
4025 return PTR_ERR(linked_state);
4026
4027 break;
4028 }
4029
4030 if (!linked_state) {
4031 drm_dbg_kms(&dev_priv->drm,
4032 "Need %d free Y planes for planar YUV\n",
4033 hweight8(crtc_state->nv12_planes));
4034
4035 return -EINVAL;
4036 }
4037
4038 plane_state->planar_linked_plane = linked;
4039
4040 linked_state->planar_slave = true;
4041 linked_state->planar_linked_plane = plane;
4042 crtc_state->enabled_planes |= BIT(linked->id);
4043 crtc_state->active_planes |= BIT(linked->id);
4044 crtc_state->update_planes |= BIT(linked->id);
4045 crtc_state->data_rate[linked->id] =
4046 crtc_state->data_rate_y[plane->id];
4047 crtc_state->rel_data_rate[linked->id] =
4048 crtc_state->rel_data_rate_y[plane->id];
4049 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4050 linked->base.name, plane->base.name);
4051
4052 /* Copy parameters to slave plane */
4053 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4054 linked_state->color_ctl = plane_state->color_ctl;
4055 linked_state->view = plane_state->view;
4056 linked_state->decrypt = plane_state->decrypt;
4057
4058 intel_plane_copy_hw_state(linked_state, plane_state);
4059 linked_state->uapi.src = plane_state->uapi.src;
4060 linked_state->uapi.dst = plane_state->uapi.dst;
4061
4062 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4063 if (linked->id == PLANE_SPRITE5)
4064 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4065 else if (linked->id == PLANE_SPRITE4)
4066 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4067 else if (linked->id == PLANE_SPRITE3)
4068 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4069 else if (linked->id == PLANE_SPRITE2)
4070 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4071 else
4072 MISSING_CASE(linked->id);
4073 }
4074 }
4075
4076 return 0;
4077}
4078
4079static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4080{
4081 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4082 struct intel_atomic_state *state =
4083 to_intel_atomic_state(new_crtc_state->uapi.state);
4084 const struct intel_crtc_state *old_crtc_state =
4085 intel_atomic_get_old_crtc_state(state, crtc);
4086
4087 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4088}
4089
4090static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4091{
4092 const struct drm_display_mode *pipe_mode =
4093 &crtc_state->hw.pipe_mode;
4094 int linetime_wm;
4095
4096 if (!crtc_state->hw.enable)
4097 return 0;
4098
4099 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4100 pipe_mode->crtc_clock);
4101
4102 return min(linetime_wm, 0x1ff);
4103}
4104
4105static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4106 const struct intel_cdclk_state *cdclk_state)
4107{
4108 const struct drm_display_mode *pipe_mode =
4109 &crtc_state->hw.pipe_mode;
4110 int linetime_wm;
4111
4112 if (!crtc_state->hw.enable)
4113 return 0;
4114
4115 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4116 cdclk_state->logical.cdclk);
4117
4118 return min(linetime_wm, 0x1ff);
4119}
4120
4121static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4122{
4123 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4124 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4125 const struct drm_display_mode *pipe_mode =
4126 &crtc_state->hw.pipe_mode;
4127 int linetime_wm;
4128
4129 if (!crtc_state->hw.enable)
4130 return 0;
4131
4132 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4133 crtc_state->pixel_rate);
4134
4135 /* Display WA #1135: BXT:ALL GLK:ALL */
4136 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4137 skl_watermark_ipc_enabled(dev_priv))
4138 linetime_wm /= 2;
4139
4140 return min(linetime_wm, 0x1ff);
4141}
4142
4143static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4144 struct intel_crtc *crtc)
4145{
4146 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4147 struct intel_crtc_state *crtc_state =
4148 intel_atomic_get_new_crtc_state(state, crtc);
4149 const struct intel_cdclk_state *cdclk_state;
4150
4151 if (DISPLAY_VER(dev_priv) >= 9)
4152 crtc_state->linetime = skl_linetime_wm(crtc_state);
4153 else
4154 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4155
4156 if (!hsw_crtc_supports_ips(crtc))
4157 return 0;
4158
4159 cdclk_state = intel_atomic_get_cdclk_state(state);
4160 if (IS_ERR(cdclk_state))
4161 return PTR_ERR(cdclk_state);
4162
4163 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4164 cdclk_state);
4165
4166 return 0;
4167}
4168
4169static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4170 struct intel_crtc *crtc)
4171{
4172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4173 struct intel_crtc_state *crtc_state =
4174 intel_atomic_get_new_crtc_state(state, crtc);
4175 int ret;
4176
4177 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4178 intel_crtc_needs_modeset(crtc_state) &&
4179 !crtc_state->hw.active)
4180 crtc_state->update_wm_post = true;
4181
4182 if (intel_crtc_needs_modeset(crtc_state)) {
4183 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4184 if (ret)
4185 return ret;
4186 }
4187
4188 /*
4189 * May need to update pipe gamma enable bits
4190 * when C8 planes are getting enabled/disabled.
4191 */
4192 if (c8_planes_changed(crtc_state))
4193 crtc_state->uapi.color_mgmt_changed = true;
4194
4195 if (intel_crtc_needs_color_update(crtc_state)) {
4196 ret = intel_color_check(crtc_state);
4197 if (ret)
4198 return ret;
4199 }
4200
4201 ret = intel_compute_pipe_wm(state, crtc);
4202 if (ret) {
4203 drm_dbg_kms(&dev_priv->drm,
4204 "Target pipe watermarks are invalid\n");
4205 return ret;
4206 }
4207
4208 /*
4209 * Calculate 'intermediate' watermarks that satisfy both the
4210 * old state and the new state. We can program these
4211 * immediately.
4212 */
4213 ret = intel_compute_intermediate_wm(state, crtc);
4214 if (ret) {
4215 drm_dbg_kms(&dev_priv->drm,
4216 "No valid intermediate pipe watermarks are possible\n");
4217 return ret;
4218 }
4219
4220 if (DISPLAY_VER(dev_priv) >= 9) {
4221 if (intel_crtc_needs_modeset(crtc_state) ||
4222 intel_crtc_needs_fastset(crtc_state)) {
4223 ret = skl_update_scaler_crtc(crtc_state);
4224 if (ret)
4225 return ret;
4226 }
4227
4228 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4229 if (ret)
4230 return ret;
4231 }
4232
4233 if (HAS_IPS(dev_priv)) {
4234 ret = hsw_ips_compute_config(state, crtc);
4235 if (ret)
4236 return ret;
4237 }
4238
4239 if (DISPLAY_VER(dev_priv) >= 9 ||
4240 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4241 ret = hsw_compute_linetime_wm(state, crtc);
4242 if (ret)
4243 return ret;
4244
4245 }
4246
4247 ret = intel_psr2_sel_fetch_update(state, crtc);
4248 if (ret)
4249 return ret;
4250
4251 return 0;
4252}
4253
4254static int
4255compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4256 struct intel_crtc_state *crtc_state)
4257{
4258 struct drm_connector *connector = conn_state->connector;
4259 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4260 const struct drm_display_info *info = &connector->display_info;
4261 int bpp;
4262
4263 switch (conn_state->max_bpc) {
4264 case 6 ... 7:
4265 bpp = 6 * 3;
4266 break;
4267 case 8 ... 9:
4268 bpp = 8 * 3;
4269 break;
4270 case 10 ... 11:
4271 bpp = 10 * 3;
4272 break;
4273 case 12 ... 16:
4274 bpp = 12 * 3;
4275 break;
4276 default:
4277 MISSING_CASE(conn_state->max_bpc);
4278 return -EINVAL;
4279 }
4280
4281 if (bpp < crtc_state->pipe_bpp) {
4282 drm_dbg_kms(&i915->drm,
4283 "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4284 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4285 connector->base.id, connector->name,
4286 bpp, 3 * info->bpc,
4287 3 * conn_state->max_requested_bpc,
4288 crtc_state->pipe_bpp);
4289
4290 crtc_state->pipe_bpp = bpp;
4291 }
4292
4293 return 0;
4294}
4295
4296static int
4297compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4298 struct intel_crtc *crtc)
4299{
4300 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4301 struct intel_crtc_state *crtc_state =
4302 intel_atomic_get_new_crtc_state(state, crtc);
4303 struct drm_connector *connector;
4304 struct drm_connector_state *connector_state;
4305 int bpp, i;
4306
4307 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4308 IS_CHERRYVIEW(dev_priv)))
4309 bpp = 10*3;
4310 else if (DISPLAY_VER(dev_priv) >= 5)
4311 bpp = 12*3;
4312 else
4313 bpp = 8*3;
4314
4315 crtc_state->pipe_bpp = bpp;
4316
4317 /* Clamp display bpp to connector max bpp */
4318 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4319 int ret;
4320
4321 if (connector_state->crtc != &crtc->base)
4322 continue;
4323
4324 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4325 if (ret)
4326 return ret;
4327 }
4328
4329 return 0;
4330}
4331
4332static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4333{
4334 struct drm_device *dev = state->base.dev;
4335 struct drm_connector *connector;
4336 struct drm_connector_list_iter conn_iter;
4337 unsigned int used_ports = 0;
4338 unsigned int used_mst_ports = 0;
4339 bool ret = true;
4340
4341 /*
4342 * We're going to peek into connector->state,
4343 * hence connection_mutex must be held.
4344 */
4345 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
4346
4347 /*
4348 * Walk the connector list instead of the encoder
4349 * list to detect the problem on ddi platforms
4350 * where there's just one encoder per digital port.
4351 */
4352 drm_connector_list_iter_begin(dev, &conn_iter);
4353 drm_for_each_connector_iter(connector, &conn_iter) {
4354 struct drm_connector_state *connector_state;
4355 struct intel_encoder *encoder;
4356
4357 connector_state =
4358 drm_atomic_get_new_connector_state(&state->base,
4359 connector);
4360 if (!connector_state)
4361 connector_state = connector->state;
4362
4363 if (!connector_state->best_encoder)
4364 continue;
4365
4366 encoder = to_intel_encoder(connector_state->best_encoder);
4367
4368 drm_WARN_ON(dev, !connector_state->crtc);
4369
4370 switch (encoder->type) {
4371 case INTEL_OUTPUT_DDI:
4372 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
4373 break;
4374 fallthrough;
4375 case INTEL_OUTPUT_DP:
4376 case INTEL_OUTPUT_HDMI:
4377 case INTEL_OUTPUT_EDP:
4378 /* the same port mustn't appear more than once */
4379 if (used_ports & BIT(encoder->port))
4380 ret = false;
4381
4382 used_ports |= BIT(encoder->port);
4383 break;
4384 case INTEL_OUTPUT_DP_MST:
4385 used_mst_ports |=
4386 1 << encoder->port;
4387 break;
4388 default:
4389 break;
4390 }
4391 }
4392 drm_connector_list_iter_end(&conn_iter);
4393
4394 /* can't mix MST and SST/HDMI on the same port */
4395 if (used_ports & used_mst_ports)
4396 return false;
4397
4398 return ret;
4399}
4400
4401static void
4402intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
4403 struct intel_crtc *crtc)
4404{
4405 struct intel_crtc_state *crtc_state =
4406 intel_atomic_get_new_crtc_state(state, crtc);
4407
4408 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4409
4410 drm_property_replace_blob(&crtc_state->hw.degamma_lut,
4411 crtc_state->uapi.degamma_lut);
4412 drm_property_replace_blob(&crtc_state->hw.gamma_lut,
4413 crtc_state->uapi.gamma_lut);
4414 drm_property_replace_blob(&crtc_state->hw.ctm,
4415 crtc_state->uapi.ctm);
4416}
4417
4418static void
4419intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
4420 struct intel_crtc *crtc)
4421{
4422 struct intel_crtc_state *crtc_state =
4423 intel_atomic_get_new_crtc_state(state, crtc);
4424
4425 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4426
4427 crtc_state->hw.enable = crtc_state->uapi.enable;
4428 crtc_state->hw.active = crtc_state->uapi.active;
4429 drm_mode_copy(&crtc_state->hw.mode,
4430 &crtc_state->uapi.mode);
4431 drm_mode_copy(&crtc_state->hw.adjusted_mode,
4432 &crtc_state->uapi.adjusted_mode);
4433 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
4434
4435 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
4436}
4437
4438static void
4439copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
4440 struct intel_crtc *slave_crtc)
4441{
4442 struct intel_crtc_state *slave_crtc_state =
4443 intel_atomic_get_new_crtc_state(state, slave_crtc);
4444 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4445 const struct intel_crtc_state *master_crtc_state =
4446 intel_atomic_get_new_crtc_state(state, master_crtc);
4447
4448 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
4449 master_crtc_state->hw.degamma_lut);
4450 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
4451 master_crtc_state->hw.gamma_lut);
4452 drm_property_replace_blob(&slave_crtc_state->hw.ctm,
4453 master_crtc_state->hw.ctm);
4454
4455 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
4456}
4457
4458static int
4459copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
4460 struct intel_crtc *slave_crtc)
4461{
4462 struct intel_crtc_state *slave_crtc_state =
4463 intel_atomic_get_new_crtc_state(state, slave_crtc);
4464 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4465 const struct intel_crtc_state *master_crtc_state =
4466 intel_atomic_get_new_crtc_state(state, master_crtc);
4467 struct intel_crtc_state *saved_state;
4468
4469 WARN_ON(master_crtc_state->bigjoiner_pipes !=
4470 slave_crtc_state->bigjoiner_pipes);
4471
4472 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
4473 if (!saved_state)
4474 return -ENOMEM;
4475
4476 /* preserve some things from the slave's original crtc state */
4477 saved_state->uapi = slave_crtc_state->uapi;
4478 saved_state->scaler_state = slave_crtc_state->scaler_state;
4479 saved_state->shared_dpll = slave_crtc_state->shared_dpll;
4480 saved_state->crc_enabled = slave_crtc_state->crc_enabled;
4481
4482 intel_crtc_free_hw_state(slave_crtc_state);
4483 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
4484 kfree(saved_state);
4485
4486 /* Re-init hw state */
4487 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
4488 slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
4489 slave_crtc_state->hw.active = master_crtc_state->hw.active;
4490 drm_mode_copy(&slave_crtc_state->hw.mode,
4491 &master_crtc_state->hw.mode);
4492 drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
4493 &master_crtc_state->hw.pipe_mode);
4494 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
4495 &master_crtc_state->hw.adjusted_mode);
4496 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
4497
4498 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
4499
4500 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
4501 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
4502 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
4503
4504 WARN_ON(master_crtc_state->bigjoiner_pipes !=
4505 slave_crtc_state->bigjoiner_pipes);
4506
4507 return 0;
4508}
4509
4510static int
4511intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
4512 struct intel_crtc *crtc)
4513{
4514 struct intel_crtc_state *crtc_state =
4515 intel_atomic_get_new_crtc_state(state, crtc);
4516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4517 struct intel_crtc_state *saved_state;
4518
4519 saved_state = intel_crtc_state_alloc(crtc);
4520 if (!saved_state)
4521 return -ENOMEM;
4522
4523 /* free the old crtc_state->hw members */
4524 intel_crtc_free_hw_state(crtc_state);
4525
4526 /* FIXME: before the switch to atomic started, a new pipe_config was
4527 * kzalloc'd. Code that depends on any field being zero should be
4528 * fixed, so that the crtc_state can be safely duplicated. For now,
4529 * only fields that are know to not cause problems are preserved. */
4530
4531 saved_state->uapi = crtc_state->uapi;
4532 saved_state->inherited = crtc_state->inherited;
4533 saved_state->scaler_state = crtc_state->scaler_state;
4534 saved_state->shared_dpll = crtc_state->shared_dpll;
4535 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
4536 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
4537 sizeof(saved_state->icl_port_dplls));
4538 saved_state->crc_enabled = crtc_state->crc_enabled;
4539 if (IS_G4X(dev_priv) ||
4540 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4541 saved_state->wm = crtc_state->wm;
4542
4543 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
4544 kfree(saved_state);
4545
4546 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
4547
4548 return 0;
4549}
4550
4551static int
4552intel_modeset_pipe_config(struct intel_atomic_state *state,
4553 struct intel_crtc *crtc,
4554 const struct intel_link_bw_limits *limits)
4555{
4556 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4557 struct intel_crtc_state *crtc_state =
4558 intel_atomic_get_new_crtc_state(state, crtc);
4559 struct drm_connector *connector;
4560 struct drm_connector_state *connector_state;
4561 int pipe_src_w, pipe_src_h;
4562 int base_bpp, ret, i;
4563
4564 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
4565
4566 crtc_state->framestart_delay = 1;
4567
4568 /*
4569 * Sanitize sync polarity flags based on requested ones. If neither
4570 * positive or negative polarity is requested, treat this as meaning
4571 * negative polarity.
4572 */
4573 if (!(crtc_state->hw.adjusted_mode.flags &
4574 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
4575 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4576
4577 if (!(crtc_state->hw.adjusted_mode.flags &
4578 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
4579 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4580
4581 ret = compute_baseline_pipe_bpp(state, crtc);
4582 if (ret)
4583 return ret;
4584
4585 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
4586 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
4587
4588 if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
4589 drm_dbg_kms(&i915->drm,
4590 "[CRTC:%d:%s] Link bpp limited to " BPP_X16_FMT "\n",
4591 crtc->base.base.id, crtc->base.name,
4592 BPP_X16_ARGS(crtc_state->max_link_bpp_x16));
4593 crtc_state->bw_constrained = true;
4594 }
4595
4596 base_bpp = crtc_state->pipe_bpp;
4597
4598 /*
4599 * Determine the real pipe dimensions. Note that stereo modes can
4600 * increase the actual pipe size due to the frame doubling and
4601 * insertion of additional space for blanks between the frame. This
4602 * is stored in the crtc timings. We use the requested mode to do this
4603 * computation to clearly distinguish it from the adjusted mode, which
4604 * can be changed by the connectors in the below retry loop.
4605 */
4606 drm_mode_get_hv_timing(&crtc_state->hw.mode,
4607 &pipe_src_w, &pipe_src_h);
4608 drm_rect_init(&crtc_state->pipe_src, 0, 0,
4609 pipe_src_w, pipe_src_h);
4610
4611 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4612 struct intel_encoder *encoder =
4613 to_intel_encoder(connector_state->best_encoder);
4614
4615 if (connector_state->crtc != &crtc->base)
4616 continue;
4617
4618 if (!check_single_encoder_cloning(state, crtc, encoder)) {
4619 drm_dbg_kms(&i915->drm,
4620 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
4621 encoder->base.base.id, encoder->base.name);
4622 return -EINVAL;
4623 }
4624
4625 /*
4626 * Determine output_types before calling the .compute_config()
4627 * hooks so that the hooks can use this information safely.
4628 */
4629 if (encoder->compute_output_type)
4630 crtc_state->output_types |=
4631 BIT(encoder->compute_output_type(encoder, crtc_state,
4632 connector_state));
4633 else
4634 crtc_state->output_types |= BIT(encoder->type);
4635 }
4636
4637 /* Ensure the port clock defaults are reset when retrying. */
4638 crtc_state->port_clock = 0;
4639 crtc_state->pixel_multiplier = 1;
4640
4641 /* Fill in default crtc timings, allow encoders to overwrite them. */
4642 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
4643 CRTC_STEREO_DOUBLE);
4644
4645 /* Pass our mode to the connectors and the CRTC to give them a chance to
4646 * adjust it according to limitations or connector properties, and also
4647 * a chance to reject the mode entirely.
4648 */
4649 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4650 struct intel_encoder *encoder =
4651 to_intel_encoder(connector_state->best_encoder);
4652
4653 if (connector_state->crtc != &crtc->base)
4654 continue;
4655
4656 ret = encoder->compute_config(encoder, crtc_state,
4657 connector_state);
4658 if (ret == -EDEADLK)
4659 return ret;
4660 if (ret < 0) {
4661 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
4662 encoder->base.base.id, encoder->base.name, ret);
4663 return ret;
4664 }
4665 }
4666
4667 /* Set default port clock if not overwritten by the encoder. Needs to be
4668 * done afterwards in case the encoder adjusts the mode. */
4669 if (!crtc_state->port_clock)
4670 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
4671 * crtc_state->pixel_multiplier;
4672
4673 ret = intel_crtc_compute_config(state, crtc);
4674 if (ret == -EDEADLK)
4675 return ret;
4676 if (ret < 0) {
4677 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
4678 crtc->base.base.id, crtc->base.name, ret);
4679 return ret;
4680 }
4681
4682 /* Dithering seems to not pass-through bits correctly when it should, so
4683 * only enable it on 6bpc panels and when its not a compliance
4684 * test requesting 6bpc video pattern.
4685 */
4686 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
4687 !crtc_state->dither_force_disable;
4688 drm_dbg_kms(&i915->drm,
4689 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
4690 crtc->base.base.id, crtc->base.name,
4691 base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
4692
4693 return 0;
4694}
4695
4696static int
4697intel_modeset_pipe_config_late(struct intel_atomic_state *state,
4698 struct intel_crtc *crtc)
4699{
4700 struct intel_crtc_state *crtc_state =
4701 intel_atomic_get_new_crtc_state(state, crtc);
4702 struct drm_connector_state *conn_state;
4703 struct drm_connector *connector;
4704 int i;
4705
4706 intel_bigjoiner_adjust_pipe_src(crtc_state);
4707
4708 for_each_new_connector_in_state(&state->base, connector,
4709 conn_state, i) {
4710 struct intel_encoder *encoder =
4711 to_intel_encoder(conn_state->best_encoder);
4712 int ret;
4713
4714 if (conn_state->crtc != &crtc->base ||
4715 !encoder->compute_config_late)
4716 continue;
4717
4718 ret = encoder->compute_config_late(encoder, crtc_state,
4719 conn_state);
4720 if (ret)
4721 return ret;
4722 }
4723
4724 return 0;
4725}
4726
4727bool intel_fuzzy_clock_check(int clock1, int clock2)
4728{
4729 int diff;
4730
4731 if (clock1 == clock2)
4732 return true;
4733
4734 if (!clock1 || !clock2)
4735 return false;
4736
4737 diff = abs(clock1 - clock2);
4738
4739 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
4740 return true;
4741
4742 return false;
4743}
4744
4745static bool
4746intel_compare_link_m_n(const struct intel_link_m_n *m_n,
4747 const struct intel_link_m_n *m2_n2)
4748{
4749 return m_n->tu == m2_n2->tu &&
4750 m_n->data_m == m2_n2->data_m &&
4751 m_n->data_n == m2_n2->data_n &&
4752 m_n->link_m == m2_n2->link_m &&
4753 m_n->link_n == m2_n2->link_n;
4754}
4755
4756static bool
4757intel_compare_infoframe(const union hdmi_infoframe *a,
4758 const union hdmi_infoframe *b)
4759{
4760 return memcmp(a, b, sizeof(*a)) == 0;
4761}
4762
4763static bool
4764intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
4765 const struct drm_dp_vsc_sdp *b)
4766{
4767 return memcmp(a, b, sizeof(*a)) == 0;
4768}
4769
4770static bool
4771intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
4772{
4773 return memcmp(a, b, len) == 0;
4774}
4775
4776static void
4777pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
4778 bool fastset, const char *name,
4779 const union hdmi_infoframe *a,
4780 const union hdmi_infoframe *b)
4781{
4782 if (fastset) {
4783 if (!drm_debug_enabled(DRM_UT_KMS))
4784 return;
4785
4786 drm_dbg_kms(&dev_priv->drm,
4787 "fastset requirement not met in %s infoframe\n", name);
4788 drm_dbg_kms(&dev_priv->drm, "expected:\n");
4789 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
4790 drm_dbg_kms(&dev_priv->drm, "found:\n");
4791 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
4792 } else {
4793 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
4794 drm_err(&dev_priv->drm, "expected:\n");
4795 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
4796 drm_err(&dev_priv->drm, "found:\n");
4797 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
4798 }
4799}
4800
4801static void
4802pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
4803 bool fastset, const char *name,
4804 const struct drm_dp_vsc_sdp *a,
4805 const struct drm_dp_vsc_sdp *b)
4806{
4807 if (fastset) {
4808 if (!drm_debug_enabled(DRM_UT_KMS))
4809 return;
4810
4811 drm_dbg_kms(&dev_priv->drm,
4812 "fastset requirement not met in %s dp sdp\n", name);
4813 drm_dbg_kms(&dev_priv->drm, "expected:\n");
4814 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
4815 drm_dbg_kms(&dev_priv->drm, "found:\n");
4816 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
4817 } else {
4818 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
4819 drm_err(&dev_priv->drm, "expected:\n");
4820 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
4821 drm_err(&dev_priv->drm, "found:\n");
4822 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
4823 }
4824}
4825
4826/* Returns the length up to and including the last differing byte */
4827static size_t
4828memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
4829{
4830 int i;
4831
4832 for (i = len - 1; i >= 0; i--) {
4833 if (a[i] != b[i])
4834 return i + 1;
4835 }
4836
4837 return 0;
4838}
4839
4840static void
4841pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
4842 bool fastset, const char *name,
4843 const u8 *a, const u8 *b, size_t len)
4844{
4845 if (fastset) {
4846 if (!drm_debug_enabled(DRM_UT_KMS))
4847 return;
4848
4849 /* only dump up to the last difference */
4850 len = memcmp_diff_len(a, b, len);
4851
4852 drm_dbg_kms(&dev_priv->drm,
4853 "fastset requirement not met in %s buffer\n", name);
4854 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
4855 16, 0, a, len, false);
4856 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
4857 16, 0, b, len, false);
4858 } else {
4859 /* only dump up to the last difference */
4860 len = memcmp_diff_len(a, b, len);
4861
4862 drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
4863 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
4864 16, 0, a, len, false);
4865 print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
4866 16, 0, b, len, false);
4867 }
4868}
4869
4870static void __printf(4, 5)
4871pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
4872 const char *name, const char *format, ...)
4873{
4874 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4875 struct va_format vaf;
4876 va_list args;
4877
4878 va_start(args, format);
4879 vaf.fmt = format;
4880 vaf.va = &args;
4881
4882 if (fastset)
4883 drm_dbg_kms(&i915->drm,
4884 "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
4885 crtc->base.base.id, crtc->base.name, name, &vaf);
4886 else
4887 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
4888 crtc->base.base.id, crtc->base.name, name, &vaf);
4889
4890 va_end(args);
4891}
4892
4893static bool fastboot_enabled(struct drm_i915_private *dev_priv)
4894{
4895 /* Enable fastboot by default on Skylake and newer */
4896 if (DISPLAY_VER(dev_priv) >= 9)
4897 return true;
4898
4899 /* Enable fastboot by default on VLV and CHV */
4900 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4901 return true;
4902
4903 /* Disabled by default on all others */
4904 return false;
4905}
4906
4907bool
4908intel_pipe_config_compare(const struct intel_crtc_state *current_config,
4909 const struct intel_crtc_state *pipe_config,
4910 bool fastset)
4911{
4912 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
4913 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
4914 bool ret = true;
4915 bool fixup_inherited = fastset &&
4916 current_config->inherited && !pipe_config->inherited;
4917
4918 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
4919 drm_dbg_kms(&dev_priv->drm,
4920 "initial modeset and fastboot not set\n");
4921 ret = false;
4922 }
4923
4924#define PIPE_CONF_CHECK_X(name) do { \
4925 if (current_config->name != pipe_config->name) { \
4926 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
4927 __stringify(name) " is bool"); \
4928 pipe_config_mismatch(fastset, crtc, __stringify(name), \
4929 "(expected 0x%08x, found 0x%08x)", \
4930 current_config->name, \
4931 pipe_config->name); \
4932 ret = false; \
4933 } \
4934} while (0)
4935
4936#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
4937 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
4938 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
4939 __stringify(name) " is bool"); \
4940 pipe_config_mismatch(fastset, crtc, __stringify(name), \
4941 "(expected 0x%08x, found 0x%08x)", \
4942 current_config->name & (mask), \
4943 pipe_config->name & (mask)); \
4944 ret = false; \
4945 } \
4946} while (0)
4947
4948#define PIPE_CONF_CHECK_I(name) do { \
4949 if (current_config->name != pipe_config->name) { \
4950 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
4951 __stringify(name) " is bool"); \
4952 pipe_config_mismatch(fastset, crtc, __stringify(name), \
4953 "(expected %i, found %i)", \
4954 current_config->name, \
4955 pipe_config->name); \
4956 ret = false; \
4957 } \
4958} while (0)
4959
4960#define PIPE_CONF_CHECK_BOOL(name) do { \
4961 if (current_config->name != pipe_config->name) { \
4962 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
4963 __stringify(name) " is not bool"); \
4964 pipe_config_mismatch(fastset, crtc, __stringify(name), \
4965 "(expected %s, found %s)", \
4966 str_yes_no(current_config->name), \
4967 str_yes_no(pipe_config->name)); \
4968 ret = false; \
4969 } \
4970} while (0)
4971
4972#define PIPE_CONF_CHECK_P(name) do { \
4973 if (current_config->name != pipe_config->name) { \
4974 pipe_config_mismatch(fastset, crtc, __stringify(name), \
4975 "(expected %p, found %p)", \
4976 current_config->name, \
4977 pipe_config->name); \
4978 ret = false; \
4979 } \
4980} while (0)
4981
4982#define PIPE_CONF_CHECK_M_N(name) do { \
4983 if (!intel_compare_link_m_n(¤t_config->name, \
4984 &pipe_config->name)) { \
4985 pipe_config_mismatch(fastset, crtc, __stringify(name), \
4986 "(expected tu %i data %i/%i link %i/%i, " \
4987 "found tu %i, data %i/%i link %i/%i)", \
4988 current_config->name.tu, \
4989 current_config->name.data_m, \
4990 current_config->name.data_n, \
4991 current_config->name.link_m, \
4992 current_config->name.link_n, \
4993 pipe_config->name.tu, \
4994 pipe_config->name.data_m, \
4995 pipe_config->name.data_n, \
4996 pipe_config->name.link_m, \
4997 pipe_config->name.link_n); \
4998 ret = false; \
4999 } \
5000} while (0)
5001
5002#define PIPE_CONF_CHECK_TIMINGS(name) do { \
5003 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5004 PIPE_CONF_CHECK_I(name.crtc_htotal); \
5005 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5006 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5007 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5008 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5009 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5010 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5011 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5012 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5013 if (!fastset || !pipe_config->update_lrr) { \
5014 PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5015 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5016 } \
5017} while (0)
5018
5019#define PIPE_CONF_CHECK_RECT(name) do { \
5020 PIPE_CONF_CHECK_I(name.x1); \
5021 PIPE_CONF_CHECK_I(name.x2); \
5022 PIPE_CONF_CHECK_I(name.y1); \
5023 PIPE_CONF_CHECK_I(name.y2); \
5024} while (0)
5025
5026#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5027 if ((current_config->name ^ pipe_config->name) & (mask)) { \
5028 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5029 "(%x) (expected %i, found %i)", \
5030 (mask), \
5031 current_config->name & (mask), \
5032 pipe_config->name & (mask)); \
5033 ret = false; \
5034 } \
5035} while (0)
5036
5037#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5038 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
5039 &pipe_config->infoframes.name)) { \
5040 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5041 ¤t_config->infoframes.name, \
5042 &pipe_config->infoframes.name); \
5043 ret = false; \
5044 } \
5045} while (0)
5046
5047#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5048 if (!current_config->has_psr && !pipe_config->has_psr && \
5049 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
5050 &pipe_config->infoframes.name)) { \
5051 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5052 ¤t_config->infoframes.name, \
5053 &pipe_config->infoframes.name); \
5054 ret = false; \
5055 } \
5056} while (0)
5057
5058#define PIPE_CONF_CHECK_BUFFER(name, len) do { \
5059 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
5060 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
5061 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
5062 pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
5063 current_config->name, \
5064 pipe_config->name, \
5065 (len)); \
5066 ret = false; \
5067 } \
5068} while (0)
5069
5070#define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
5071 if (current_config->gamma_mode == pipe_config->gamma_mode && \
5072 !intel_color_lut_equal(current_config, \
5073 current_config->lut, pipe_config->lut, \
5074 is_pre_csc_lut)) { \
5075 pipe_config_mismatch(fastset, crtc, __stringify(lut), \
5076 "hw_state doesn't match sw_state"); \
5077 ret = false; \
5078 } \
5079} while (0)
5080
5081#define PIPE_CONF_CHECK_CSC(name) do { \
5082 PIPE_CONF_CHECK_X(name.preoff[0]); \
5083 PIPE_CONF_CHECK_X(name.preoff[1]); \
5084 PIPE_CONF_CHECK_X(name.preoff[2]); \
5085 PIPE_CONF_CHECK_X(name.coeff[0]); \
5086 PIPE_CONF_CHECK_X(name.coeff[1]); \
5087 PIPE_CONF_CHECK_X(name.coeff[2]); \
5088 PIPE_CONF_CHECK_X(name.coeff[3]); \
5089 PIPE_CONF_CHECK_X(name.coeff[4]); \
5090 PIPE_CONF_CHECK_X(name.coeff[5]); \
5091 PIPE_CONF_CHECK_X(name.coeff[6]); \
5092 PIPE_CONF_CHECK_X(name.coeff[7]); \
5093 PIPE_CONF_CHECK_X(name.coeff[8]); \
5094 PIPE_CONF_CHECK_X(name.postoff[0]); \
5095 PIPE_CONF_CHECK_X(name.postoff[1]); \
5096 PIPE_CONF_CHECK_X(name.postoff[2]); \
5097} while (0)
5098
5099#define PIPE_CONF_QUIRK(quirk) \
5100 ((current_config->quirks | pipe_config->quirks) & (quirk))
5101
5102 PIPE_CONF_CHECK_BOOL(hw.enable);
5103 PIPE_CONF_CHECK_BOOL(hw.active);
5104
5105 PIPE_CONF_CHECK_I(cpu_transcoder);
5106 PIPE_CONF_CHECK_I(mst_master_transcoder);
5107
5108 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5109 PIPE_CONF_CHECK_I(fdi_lanes);
5110 PIPE_CONF_CHECK_M_N(fdi_m_n);
5111
5112 PIPE_CONF_CHECK_I(lane_count);
5113 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5114
5115 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5116 if (!fastset || !pipe_config->update_m_n)
5117 PIPE_CONF_CHECK_M_N(dp_m_n);
5118 } else {
5119 PIPE_CONF_CHECK_M_N(dp_m_n);
5120 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5121 }
5122
5123 PIPE_CONF_CHECK_X(output_types);
5124
5125 PIPE_CONF_CHECK_I(framestart_delay);
5126 PIPE_CONF_CHECK_I(msa_timing_delay);
5127
5128 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5129 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5130
5131 PIPE_CONF_CHECK_I(pixel_multiplier);
5132
5133 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5134 DRM_MODE_FLAG_INTERLACE);
5135
5136 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5137 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5138 DRM_MODE_FLAG_PHSYNC);
5139 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5140 DRM_MODE_FLAG_NHSYNC);
5141 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5142 DRM_MODE_FLAG_PVSYNC);
5143 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5144 DRM_MODE_FLAG_NVSYNC);
5145 }
5146
5147 PIPE_CONF_CHECK_I(output_format);
5148 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5149 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5150 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5151 PIPE_CONF_CHECK_BOOL(limited_color_range);
5152
5153 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5154 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5155 PIPE_CONF_CHECK_BOOL(has_infoframe);
5156 PIPE_CONF_CHECK_BOOL(enhanced_framing);
5157 PIPE_CONF_CHECK_BOOL(fec_enable);
5158
5159 if (!fastset) {
5160 PIPE_CONF_CHECK_BOOL(has_audio);
5161 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
5162 }
5163
5164 PIPE_CONF_CHECK_X(gmch_pfit.control);
5165 /* pfit ratios are autocomputed by the hw on gen4+ */
5166 if (DISPLAY_VER(dev_priv) < 4)
5167 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5168 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5169
5170 /*
5171 * Changing the EDP transcoder input mux
5172 * (A_ONOFF vs. A_ON) requires a full modeset.
5173 */
5174 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5175
5176 if (!fastset) {
5177 PIPE_CONF_CHECK_RECT(pipe_src);
5178
5179 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5180 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5181
5182 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5183 PIPE_CONF_CHECK_I(pixel_rate);
5184
5185 PIPE_CONF_CHECK_X(gamma_mode);
5186 if (IS_CHERRYVIEW(dev_priv))
5187 PIPE_CONF_CHECK_X(cgm_mode);
5188 else
5189 PIPE_CONF_CHECK_X(csc_mode);
5190 PIPE_CONF_CHECK_BOOL(gamma_enable);
5191 PIPE_CONF_CHECK_BOOL(csc_enable);
5192 PIPE_CONF_CHECK_BOOL(wgc_enable);
5193
5194 PIPE_CONF_CHECK_I(linetime);
5195 PIPE_CONF_CHECK_I(ips_linetime);
5196
5197 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
5198 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
5199
5200 PIPE_CONF_CHECK_CSC(csc);
5201 PIPE_CONF_CHECK_CSC(output_csc);
5202
5203 if (current_config->active_planes) {
5204 PIPE_CONF_CHECK_BOOL(has_psr);
5205 PIPE_CONF_CHECK_BOOL(has_psr2);
5206 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5207 PIPE_CONF_CHECK_I(dc3co_exitline);
5208 }
5209 }
5210
5211 PIPE_CONF_CHECK_BOOL(double_wide);
5212
5213 if (dev_priv->display.dpll.mgr) {
5214 PIPE_CONF_CHECK_P(shared_dpll);
5215
5216 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5217 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5218 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5219 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5220 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5221 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5222 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5223 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5224 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5225 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5226 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5227 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5228 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5229 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5230 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5231 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5232 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5233 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5234 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5235 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5236 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5237 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5238 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5239 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5240 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5241 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5242 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5243 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5244 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5245 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5246 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5247 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5248 }
5249
5250 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5251 PIPE_CONF_CHECK_X(dsi_pll.div);
5252
5253 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5254 PIPE_CONF_CHECK_I(pipe_bpp);
5255
5256 if (!fastset || !pipe_config->update_m_n) {
5257 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5258 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5259 }
5260 PIPE_CONF_CHECK_I(port_clock);
5261
5262 PIPE_CONF_CHECK_I(min_voltage_level);
5263
5264 if (current_config->has_psr || pipe_config->has_psr)
5265 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5266 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5267 else
5268 PIPE_CONF_CHECK_X(infoframes.enable);
5269
5270 PIPE_CONF_CHECK_X(infoframes.gcp);
5271 PIPE_CONF_CHECK_INFOFRAME(avi);
5272 PIPE_CONF_CHECK_INFOFRAME(spd);
5273 PIPE_CONF_CHECK_INFOFRAME(hdmi);
5274 PIPE_CONF_CHECK_INFOFRAME(drm);
5275 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5276
5277 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5278 PIPE_CONF_CHECK_I(master_transcoder);
5279 PIPE_CONF_CHECK_X(bigjoiner_pipes);
5280
5281 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable);
5282 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb);
5283 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422);
5284 PIPE_CONF_CHECK_BOOL(dsc.config.native_422);
5285 PIPE_CONF_CHECK_BOOL(dsc.config.native_420);
5286 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable);
5287 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth);
5288 PIPE_CONF_CHECK_I(dsc.config.bits_per_component);
5289 PIPE_CONF_CHECK_I(dsc.config.pic_width);
5290 PIPE_CONF_CHECK_I(dsc.config.pic_height);
5291 PIPE_CONF_CHECK_I(dsc.config.slice_width);
5292 PIPE_CONF_CHECK_I(dsc.config.slice_height);
5293 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay);
5294 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay);
5295 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval);
5296 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval);
5297 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value);
5298 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset);
5299 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp);
5300 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp);
5301 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset);
5302 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset);
5303 PIPE_CONF_CHECK_I(dsc.config.initial_offset);
5304 PIPE_CONF_CHECK_I(dsc.config.final_offset);
5305 PIPE_CONF_CHECK_I(dsc.config.rc_model_size);
5306 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0);
5307 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1);
5308 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size);
5309 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset);
5310 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
5311
5312 PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
5313 PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
5314 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
5315
5316 PIPE_CONF_CHECK_BOOL(splitter.enable);
5317 PIPE_CONF_CHECK_I(splitter.link_count);
5318 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5319
5320 if (!fastset) {
5321 PIPE_CONF_CHECK_BOOL(vrr.enable);
5322 PIPE_CONF_CHECK_I(vrr.vmin);
5323 PIPE_CONF_CHECK_I(vrr.vmax);
5324 PIPE_CONF_CHECK_I(vrr.flipline);
5325 PIPE_CONF_CHECK_I(vrr.pipeline_full);
5326 PIPE_CONF_CHECK_I(vrr.guardband);
5327 }
5328
5329#undef PIPE_CONF_CHECK_X
5330#undef PIPE_CONF_CHECK_I
5331#undef PIPE_CONF_CHECK_BOOL
5332#undef PIPE_CONF_CHECK_P
5333#undef PIPE_CONF_CHECK_FLAGS
5334#undef PIPE_CONF_CHECK_COLOR_LUT
5335#undef PIPE_CONF_CHECK_TIMINGS
5336#undef PIPE_CONF_CHECK_RECT
5337#undef PIPE_CONF_QUIRK
5338
5339 return ret;
5340}
5341
5342static void
5343intel_verify_planes(struct intel_atomic_state *state)
5344{
5345 struct intel_plane *plane;
5346 const struct intel_plane_state *plane_state;
5347 int i;
5348
5349 for_each_new_intel_plane_in_state(state, plane,
5350 plane_state, i)
5351 assert_plane(plane, plane_state->planar_slave ||
5352 plane_state->uapi.visible);
5353}
5354
5355static int intel_modeset_pipe(struct intel_atomic_state *state,
5356 struct intel_crtc_state *crtc_state,
5357 const char *reason)
5358{
5359 struct drm_i915_private *i915 = to_i915(state->base.dev);
5360 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5361 int ret;
5362
5363 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5364 crtc->base.base.id, crtc->base.name, reason);
5365
5366 ret = drm_atomic_add_affected_connectors(&state->base,
5367 &crtc->base);
5368 if (ret)
5369 return ret;
5370
5371 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5372 if (ret)
5373 return ret;
5374
5375 ret = intel_atomic_add_affected_planes(state, crtc);
5376 if (ret)
5377 return ret;
5378
5379 crtc_state->uapi.mode_changed = true;
5380
5381 return 0;
5382}
5383
5384/**
5385 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes
5386 * @state: intel atomic state
5387 * @reason: the reason for the full modeset
5388 * @mask: mask of pipes to modeset
5389 *
5390 * Add pipes in @mask to @state and force a full modeset on the enabled ones
5391 * due to the description in @reason.
5392 * This function can be called only before new plane states are computed.
5393 *
5394 * Returns 0 in case of success, negative error code otherwise.
5395 */
5396int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
5397 const char *reason, u8 mask)
5398{
5399 struct drm_i915_private *i915 = to_i915(state->base.dev);
5400 struct intel_crtc *crtc;
5401
5402 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) {
5403 struct intel_crtc_state *crtc_state;
5404 int ret;
5405
5406 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5407 if (IS_ERR(crtc_state))
5408 return PTR_ERR(crtc_state);
5409
5410 if (!crtc_state->hw.enable ||
5411 intel_crtc_needs_modeset(crtc_state))
5412 continue;
5413
5414 ret = intel_modeset_pipe(state, crtc_state, reason);
5415 if (ret)
5416 return ret;
5417 }
5418
5419 return 0;
5420}
5421
5422static void
5423intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
5424{
5425 crtc_state->uapi.mode_changed = true;
5426
5427 crtc_state->update_pipe = false;
5428 crtc_state->update_m_n = false;
5429 crtc_state->update_lrr = false;
5430}
5431
5432/**
5433 * intel_modeset_all_pipes_late - force a full modeset on all pipes
5434 * @state: intel atomic state
5435 * @reason: the reason for the full modeset
5436 *
5437 * Add all pipes to @state and force a full modeset on the active ones due to
5438 * the description in @reason.
5439 * This function can be called only after new plane states are computed already.
5440 *
5441 * Returns 0 in case of success, negative error code otherwise.
5442 */
5443int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
5444 const char *reason)
5445{
5446 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5447 struct intel_crtc *crtc;
5448
5449 for_each_intel_crtc(&dev_priv->drm, crtc) {
5450 struct intel_crtc_state *crtc_state;
5451 int ret;
5452
5453 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5454 if (IS_ERR(crtc_state))
5455 return PTR_ERR(crtc_state);
5456
5457 if (!crtc_state->hw.active ||
5458 intel_crtc_needs_modeset(crtc_state))
5459 continue;
5460
5461 ret = intel_modeset_pipe(state, crtc_state, reason);
5462 if (ret)
5463 return ret;
5464
5465 intel_crtc_flag_modeset(crtc_state);
5466
5467 crtc_state->update_planes |= crtc_state->active_planes;
5468 crtc_state->async_flip_planes = 0;
5469 crtc_state->do_async_flip = false;
5470 }
5471
5472 return 0;
5473}
5474
5475/*
5476 * This implements the workaround described in the "notes" section of the mode
5477 * set sequence documentation. When going from no pipes or single pipe to
5478 * multiple pipes, and planes are enabled after the pipe, we need to wait at
5479 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
5480 */
5481static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
5482{
5483 struct intel_crtc_state *crtc_state;
5484 struct intel_crtc *crtc;
5485 struct intel_crtc_state *first_crtc_state = NULL;
5486 struct intel_crtc_state *other_crtc_state = NULL;
5487 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
5488 int i;
5489
5490 /* look at all crtc's that are going to be enabled in during modeset */
5491 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5492 if (!crtc_state->hw.active ||
5493 !intel_crtc_needs_modeset(crtc_state))
5494 continue;
5495
5496 if (first_crtc_state) {
5497 other_crtc_state = crtc_state;
5498 break;
5499 } else {
5500 first_crtc_state = crtc_state;
5501 first_pipe = crtc->pipe;
5502 }
5503 }
5504
5505 /* No workaround needed? */
5506 if (!first_crtc_state)
5507 return 0;
5508
5509 /* w/a possibly needed, check how many crtc's are already enabled. */
5510 for_each_intel_crtc(state->base.dev, crtc) {
5511 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5512 if (IS_ERR(crtc_state))
5513 return PTR_ERR(crtc_state);
5514
5515 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
5516
5517 if (!crtc_state->hw.active ||
5518 intel_crtc_needs_modeset(crtc_state))
5519 continue;
5520
5521 /* 2 or more enabled crtcs means no need for w/a */
5522 if (enabled_pipe != INVALID_PIPE)
5523 return 0;
5524
5525 enabled_pipe = crtc->pipe;
5526 }
5527
5528 if (enabled_pipe != INVALID_PIPE)
5529 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
5530 else if (other_crtc_state)
5531 other_crtc_state->hsw_workaround_pipe = first_pipe;
5532
5533 return 0;
5534}
5535
5536u8 intel_calc_active_pipes(struct intel_atomic_state *state,
5537 u8 active_pipes)
5538{
5539 const struct intel_crtc_state *crtc_state;
5540 struct intel_crtc *crtc;
5541 int i;
5542
5543 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5544 if (crtc_state->hw.active)
5545 active_pipes |= BIT(crtc->pipe);
5546 else
5547 active_pipes &= ~BIT(crtc->pipe);
5548 }
5549
5550 return active_pipes;
5551}
5552
5553static int intel_modeset_checks(struct intel_atomic_state *state)
5554{
5555 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5556
5557 state->modeset = true;
5558
5559 if (IS_HASWELL(dev_priv))
5560 return hsw_mode_set_planes_workaround(state);
5561
5562 return 0;
5563}
5564
5565static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
5566 struct intel_crtc_state *new_crtc_state)
5567{
5568 struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
5569
5570 /* only allow LRR when the timings stay within the VRR range */
5571 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
5572 new_crtc_state->update_lrr = false;
5573
5574 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
5575 drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
5576 else
5577 new_crtc_state->uapi.mode_changed = false;
5578
5579 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
5580 &new_crtc_state->dp_m_n))
5581 new_crtc_state->update_m_n = false;
5582
5583 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
5584 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
5585 new_crtc_state->update_lrr = false;
5586
5587 if (intel_crtc_needs_modeset(new_crtc_state))
5588 intel_crtc_flag_modeset(new_crtc_state);
5589 else
5590 new_crtc_state->update_pipe = true;
5591}
5592
5593static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
5594 struct intel_crtc *crtc,
5595 u8 plane_ids_mask)
5596{
5597 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5598 struct intel_plane *plane;
5599
5600 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5601 struct intel_plane_state *plane_state;
5602
5603 if ((plane_ids_mask & BIT(plane->id)) == 0)
5604 continue;
5605
5606 plane_state = intel_atomic_get_plane_state(state, plane);
5607 if (IS_ERR(plane_state))
5608 return PTR_ERR(plane_state);
5609 }
5610
5611 return 0;
5612}
5613
5614int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
5615 struct intel_crtc *crtc)
5616{
5617 const struct intel_crtc_state *old_crtc_state =
5618 intel_atomic_get_old_crtc_state(state, crtc);
5619 const struct intel_crtc_state *new_crtc_state =
5620 intel_atomic_get_new_crtc_state(state, crtc);
5621
5622 return intel_crtc_add_planes_to_state(state, crtc,
5623 old_crtc_state->enabled_planes |
5624 new_crtc_state->enabled_planes);
5625}
5626
5627static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
5628{
5629 /* See {hsw,vlv,ivb}_plane_ratio() */
5630 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
5631 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5632 IS_IVYBRIDGE(dev_priv);
5633}
5634
5635static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
5636 struct intel_crtc *crtc,
5637 struct intel_crtc *other)
5638{
5639 const struct intel_plane_state __maybe_unused *plane_state;
5640 struct intel_plane *plane;
5641 u8 plane_ids = 0;
5642 int i;
5643
5644 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5645 if (plane->pipe == crtc->pipe)
5646 plane_ids |= BIT(plane->id);
5647 }
5648
5649 return intel_crtc_add_planes_to_state(state, other, plane_ids);
5650}
5651
5652static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
5653{
5654 struct drm_i915_private *i915 = to_i915(state->base.dev);
5655 const struct intel_crtc_state *crtc_state;
5656 struct intel_crtc *crtc;
5657 int i;
5658
5659 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5660 struct intel_crtc *other;
5661
5662 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
5663 crtc_state->bigjoiner_pipes) {
5664 int ret;
5665
5666 if (crtc == other)
5667 continue;
5668
5669 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
5670 if (ret)
5671 return ret;
5672 }
5673 }
5674
5675 return 0;
5676}
5677
5678static int intel_atomic_check_planes(struct intel_atomic_state *state)
5679{
5680 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5681 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
5682 struct intel_plane_state __maybe_unused *plane_state;
5683 struct intel_plane *plane;
5684 struct intel_crtc *crtc;
5685 int i, ret;
5686
5687 ret = icl_add_linked_planes(state);
5688 if (ret)
5689 return ret;
5690
5691 ret = intel_bigjoiner_add_affected_planes(state);
5692 if (ret)
5693 return ret;
5694
5695 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5696 ret = intel_plane_atomic_check(state, plane);
5697 if (ret) {
5698 drm_dbg_atomic(&dev_priv->drm,
5699 "[PLANE:%d:%s] atomic driver check failed\n",
5700 plane->base.base.id, plane->base.name);
5701 return ret;
5702 }
5703 }
5704
5705 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5706 new_crtc_state, i) {
5707 u8 old_active_planes, new_active_planes;
5708
5709 ret = icl_check_nv12_planes(new_crtc_state);
5710 if (ret)
5711 return ret;
5712
5713 /*
5714 * On some platforms the number of active planes affects
5715 * the planes' minimum cdclk calculation. Add such planes
5716 * to the state before we compute the minimum cdclk.
5717 */
5718 if (!active_planes_affects_min_cdclk(dev_priv))
5719 continue;
5720
5721 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5722 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5723
5724 if (hweight8(old_active_planes) == hweight8(new_active_planes))
5725 continue;
5726
5727 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
5728 if (ret)
5729 return ret;
5730 }
5731
5732 return 0;
5733}
5734
5735static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
5736{
5737 struct intel_crtc_state __maybe_unused *crtc_state;
5738 struct intel_crtc *crtc;
5739 int i;
5740
5741 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5742 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5743 int ret;
5744
5745 ret = intel_crtc_atomic_check(state, crtc);
5746 if (ret) {
5747 drm_dbg_atomic(&i915->drm,
5748 "[CRTC:%d:%s] atomic driver check failed\n",
5749 crtc->base.base.id, crtc->base.name);
5750 return ret;
5751 }
5752 }
5753
5754 return 0;
5755}
5756
5757static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
5758 u8 transcoders)
5759{
5760 const struct intel_crtc_state *new_crtc_state;
5761 struct intel_crtc *crtc;
5762 int i;
5763
5764 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5765 if (new_crtc_state->hw.enable &&
5766 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
5767 intel_crtc_needs_modeset(new_crtc_state))
5768 return true;
5769 }
5770
5771 return false;
5772}
5773
5774static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
5775 u8 pipes)
5776{
5777 const struct intel_crtc_state *new_crtc_state;
5778 struct intel_crtc *crtc;
5779 int i;
5780
5781 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5782 if (new_crtc_state->hw.enable &&
5783 pipes & BIT(crtc->pipe) &&
5784 intel_crtc_needs_modeset(new_crtc_state))
5785 return true;
5786 }
5787
5788 return false;
5789}
5790
5791static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
5792 struct intel_crtc *master_crtc)
5793{
5794 struct drm_i915_private *i915 = to_i915(state->base.dev);
5795 struct intel_crtc_state *master_crtc_state =
5796 intel_atomic_get_new_crtc_state(state, master_crtc);
5797 struct intel_crtc *slave_crtc;
5798
5799 if (!master_crtc_state->bigjoiner_pipes)
5800 return 0;
5801
5802 /* sanity check */
5803 if (drm_WARN_ON(&i915->drm,
5804 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
5805 return -EINVAL;
5806
5807 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
5808 drm_dbg_kms(&i915->drm,
5809 "[CRTC:%d:%s] Cannot act as big joiner master "
5810 "(need 0x%x as pipes, only 0x%x possible)\n",
5811 master_crtc->base.base.id, master_crtc->base.name,
5812 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
5813 return -EINVAL;
5814 }
5815
5816 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
5817 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
5818 struct intel_crtc_state *slave_crtc_state;
5819 int ret;
5820
5821 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
5822 if (IS_ERR(slave_crtc_state))
5823 return PTR_ERR(slave_crtc_state);
5824
5825 /* master being enabled, slave was already configured? */
5826 if (slave_crtc_state->uapi.enable) {
5827 drm_dbg_kms(&i915->drm,
5828 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
5829 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
5830 slave_crtc->base.base.id, slave_crtc->base.name,
5831 master_crtc->base.base.id, master_crtc->base.name);
5832 return -EINVAL;
5833 }
5834
5835 /*
5836 * The state copy logic assumes the master crtc gets processed
5837 * before the slave crtc during the main compute_config loop.
5838 * This works because the crtcs are created in pipe order,
5839 * and the hardware requires master pipe < slave pipe as well.
5840 * Should that change we need to rethink the logic.
5841 */
5842 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
5843 drm_crtc_index(&slave_crtc->base)))
5844 return -EINVAL;
5845
5846 drm_dbg_kms(&i915->drm,
5847 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
5848 slave_crtc->base.base.id, slave_crtc->base.name,
5849 master_crtc->base.base.id, master_crtc->base.name);
5850
5851 slave_crtc_state->bigjoiner_pipes =
5852 master_crtc_state->bigjoiner_pipes;
5853
5854 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
5855 if (ret)
5856 return ret;
5857 }
5858
5859 return 0;
5860}
5861
5862static void kill_bigjoiner_slave(struct intel_atomic_state *state,
5863 struct intel_crtc *master_crtc)
5864{
5865 struct drm_i915_private *i915 = to_i915(state->base.dev);
5866 struct intel_crtc_state *master_crtc_state =
5867 intel_atomic_get_new_crtc_state(state, master_crtc);
5868 struct intel_crtc *slave_crtc;
5869
5870 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
5871 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
5872 struct intel_crtc_state *slave_crtc_state =
5873 intel_atomic_get_new_crtc_state(state, slave_crtc);
5874
5875 slave_crtc_state->bigjoiner_pipes = 0;
5876
5877 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
5878 }
5879
5880 master_crtc_state->bigjoiner_pipes = 0;
5881}
5882
5883/**
5884 * DOC: asynchronous flip implementation
5885 *
5886 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
5887 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
5888 * Correspondingly, support is currently added for primary plane only.
5889 *
5890 * Async flip can only change the plane surface address, so anything else
5891 * changing is rejected from the intel_async_flip_check_hw() function.
5892 * Once this check is cleared, flip done interrupt is enabled using
5893 * the intel_crtc_enable_flip_done() function.
5894 *
5895 * As soon as the surface address register is written, flip done interrupt is
5896 * generated and the requested events are sent to the usersapce in the interrupt
5897 * handler itself. The timestamp and sequence sent during the flip done event
5898 * correspond to the last vblank and have no relation to the actual time when
5899 * the flip done event was sent.
5900 */
5901static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
5902 struct intel_crtc *crtc)
5903{
5904 struct drm_i915_private *i915 = to_i915(state->base.dev);
5905 const struct intel_crtc_state *new_crtc_state =
5906 intel_atomic_get_new_crtc_state(state, crtc);
5907 const struct intel_plane_state *old_plane_state;
5908 struct intel_plane_state *new_plane_state;
5909 struct intel_plane *plane;
5910 int i;
5911
5912 if (!new_crtc_state->uapi.async_flip)
5913 return 0;
5914
5915 if (!new_crtc_state->uapi.active) {
5916 drm_dbg_kms(&i915->drm,
5917 "[CRTC:%d:%s] not active\n",
5918 crtc->base.base.id, crtc->base.name);
5919 return -EINVAL;
5920 }
5921
5922 if (intel_crtc_needs_modeset(new_crtc_state)) {
5923 drm_dbg_kms(&i915->drm,
5924 "[CRTC:%d:%s] modeset required\n",
5925 crtc->base.base.id, crtc->base.name);
5926 return -EINVAL;
5927 }
5928
5929 /*
5930 * FIXME: Bigjoiner+async flip is busted currently.
5931 * Remove this check once the issues are fixed.
5932 */
5933 if (new_crtc_state->bigjoiner_pipes) {
5934 drm_dbg_kms(&i915->drm,
5935 "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
5936 crtc->base.base.id, crtc->base.name);
5937 return -EINVAL;
5938 }
5939
5940 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
5941 new_plane_state, i) {
5942 if (plane->pipe != crtc->pipe)
5943 continue;
5944
5945 /*
5946 * TODO: Async flip is only supported through the page flip IOCTL
5947 * as of now. So support currently added for primary plane only.
5948 * Support for other planes on platforms on which supports
5949 * this(vlv/chv and icl+) should be added when async flip is
5950 * enabled in the atomic IOCTL path.
5951 */
5952 if (!plane->async_flip) {
5953 drm_dbg_kms(&i915->drm,
5954 "[PLANE:%d:%s] async flip not supported\n",
5955 plane->base.base.id, plane->base.name);
5956 return -EINVAL;
5957 }
5958
5959 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
5960 drm_dbg_kms(&i915->drm,
5961 "[PLANE:%d:%s] no old or new framebuffer\n",
5962 plane->base.base.id, plane->base.name);
5963 return -EINVAL;
5964 }
5965 }
5966
5967 return 0;
5968}
5969
5970static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
5971{
5972 struct drm_i915_private *i915 = to_i915(state->base.dev);
5973 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
5974 const struct intel_plane_state *new_plane_state, *old_plane_state;
5975 struct intel_plane *plane;
5976 int i;
5977
5978 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
5979 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
5980
5981 if (!new_crtc_state->uapi.async_flip)
5982 return 0;
5983
5984 if (!new_crtc_state->hw.active) {
5985 drm_dbg_kms(&i915->drm,
5986 "[CRTC:%d:%s] not active\n",
5987 crtc->base.base.id, crtc->base.name);
5988 return -EINVAL;
5989 }
5990
5991 if (intel_crtc_needs_modeset(new_crtc_state)) {
5992 drm_dbg_kms(&i915->drm,
5993 "[CRTC:%d:%s] modeset required\n",
5994 crtc->base.base.id, crtc->base.name);
5995 return -EINVAL;
5996 }
5997
5998 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
5999 drm_dbg_kms(&i915->drm,
6000 "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6001 crtc->base.base.id, crtc->base.name);
6002 return -EINVAL;
6003 }
6004
6005 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6006 new_plane_state, i) {
6007 if (plane->pipe != crtc->pipe)
6008 continue;
6009
6010 /*
6011 * Only async flip capable planes should be in the state
6012 * if we're really about to ask the hardware to perform
6013 * an async flip. We should never get this far otherwise.
6014 */
6015 if (drm_WARN_ON(&i915->drm,
6016 new_crtc_state->do_async_flip && !plane->async_flip))
6017 return -EINVAL;
6018
6019 /*
6020 * Only check async flip capable planes other planes
6021 * may be involved in the initial commit due to
6022 * the wm0/ddb optimization.
6023 *
6024 * TODO maybe should track which planes actually
6025 * were requested to do the async flip...
6026 */
6027 if (!plane->async_flip)
6028 continue;
6029
6030 /*
6031 * FIXME: This check is kept generic for all platforms.
6032 * Need to verify this for all gen9 platforms to enable
6033 * this selectively if required.
6034 */
6035 switch (new_plane_state->hw.fb->modifier) {
6036 case DRM_FORMAT_MOD_LINEAR:
6037 /*
6038 * FIXME: Async on Linear buffer is supported on ICL as
6039 * but with additional alignment and fbc restrictions
6040 * need to be taken care of. These aren't applicable for
6041 * gen12+.
6042 */
6043 if (DISPLAY_VER(i915) < 12) {
6044 drm_dbg_kms(&i915->drm,
6045 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
6046 plane->base.base.id, plane->base.name,
6047 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
6048 return -EINVAL;
6049 }
6050 break;
6051
6052 case I915_FORMAT_MOD_X_TILED:
6053 case I915_FORMAT_MOD_Y_TILED:
6054 case I915_FORMAT_MOD_Yf_TILED:
6055 case I915_FORMAT_MOD_4_TILED:
6056 break;
6057 default:
6058 drm_dbg_kms(&i915->drm,
6059 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
6060 plane->base.base.id, plane->base.name,
6061 new_plane_state->hw.fb->modifier);
6062 return -EINVAL;
6063 }
6064
6065 if (new_plane_state->hw.fb->format->num_planes > 1) {
6066 drm_dbg_kms(&i915->drm,
6067 "[PLANE:%d:%s] Planar formats do not support async flips\n",
6068 plane->base.base.id, plane->base.name);
6069 return -EINVAL;
6070 }
6071
6072 if (old_plane_state->view.color_plane[0].mapping_stride !=
6073 new_plane_state->view.color_plane[0].mapping_stride) {
6074 drm_dbg_kms(&i915->drm,
6075 "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6076 plane->base.base.id, plane->base.name);
6077 return -EINVAL;
6078 }
6079
6080 if (old_plane_state->hw.fb->modifier !=
6081 new_plane_state->hw.fb->modifier) {
6082 drm_dbg_kms(&i915->drm,
6083 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6084 plane->base.base.id, plane->base.name);
6085 return -EINVAL;
6086 }
6087
6088 if (old_plane_state->hw.fb->format !=
6089 new_plane_state->hw.fb->format) {
6090 drm_dbg_kms(&i915->drm,
6091 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6092 plane->base.base.id, plane->base.name);
6093 return -EINVAL;
6094 }
6095
6096 if (old_plane_state->hw.rotation !=
6097 new_plane_state->hw.rotation) {
6098 drm_dbg_kms(&i915->drm,
6099 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6100 plane->base.base.id, plane->base.name);
6101 return -EINVAL;
6102 }
6103
6104 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6105 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6106 drm_dbg_kms(&i915->drm,
6107 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6108 plane->base.base.id, plane->base.name);
6109 return -EINVAL;
6110 }
6111
6112 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6113 drm_dbg_kms(&i915->drm,
6114 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6115 plane->base.base.id, plane->base.name);
6116 return -EINVAL;
6117 }
6118
6119 if (old_plane_state->hw.pixel_blend_mode !=
6120 new_plane_state->hw.pixel_blend_mode) {
6121 drm_dbg_kms(&i915->drm,
6122 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6123 plane->base.base.id, plane->base.name);
6124 return -EINVAL;
6125 }
6126
6127 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6128 drm_dbg_kms(&i915->drm,
6129 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6130 plane->base.base.id, plane->base.name);
6131 return -EINVAL;
6132 }
6133
6134 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6135 drm_dbg_kms(&i915->drm,
6136 "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6137 plane->base.base.id, plane->base.name);
6138 return -EINVAL;
6139 }
6140
6141 /* plane decryption is allow to change only in synchronous flips */
6142 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6143 drm_dbg_kms(&i915->drm,
6144 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6145 plane->base.base.id, plane->base.name);
6146 return -EINVAL;
6147 }
6148 }
6149
6150 return 0;
6151}
6152
6153static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6154{
6155 struct drm_i915_private *i915 = to_i915(state->base.dev);
6156 struct intel_crtc_state *crtc_state;
6157 struct intel_crtc *crtc;
6158 u8 affected_pipes = 0;
6159 u8 modeset_pipes = 0;
6160 int i;
6161
6162 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6163 affected_pipes |= crtc_state->bigjoiner_pipes;
6164 if (intel_crtc_needs_modeset(crtc_state))
6165 modeset_pipes |= crtc_state->bigjoiner_pipes;
6166 }
6167
6168 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6169 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6170 if (IS_ERR(crtc_state))
6171 return PTR_ERR(crtc_state);
6172 }
6173
6174 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6175 int ret;
6176
6177 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6178
6179 crtc_state->uapi.mode_changed = true;
6180
6181 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6182 if (ret)
6183 return ret;
6184
6185 ret = intel_atomic_add_affected_planes(state, crtc);
6186 if (ret)
6187 return ret;
6188 }
6189
6190 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6191 /* Kill old bigjoiner link, we may re-establish afterwards */
6192 if (intel_crtc_needs_modeset(crtc_state) &&
6193 intel_crtc_is_bigjoiner_master(crtc_state))
6194 kill_bigjoiner_slave(state, crtc);
6195 }
6196
6197 return 0;
6198}
6199
6200static int intel_atomic_check_config(struct intel_atomic_state *state,
6201 struct intel_link_bw_limits *limits,
6202 enum pipe *failed_pipe)
6203{
6204 struct drm_i915_private *i915 = to_i915(state->base.dev);
6205 struct intel_crtc_state *new_crtc_state;
6206 struct intel_crtc *crtc;
6207 int ret;
6208 int i;
6209
6210 *failed_pipe = INVALID_PIPE;
6211
6212 ret = intel_bigjoiner_add_affected_crtcs(state);
6213 if (ret)
6214 return ret;
6215
6216 ret = intel_fdi_add_affected_crtcs(state);
6217 if (ret)
6218 return ret;
6219
6220 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6221 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6222 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6223 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6224 else
6225 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6226 continue;
6227 }
6228
6229 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6230 drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable);
6231 continue;
6232 }
6233
6234 ret = intel_crtc_prepare_cleared_state(state, crtc);
6235 if (ret)
6236 break;
6237
6238 if (!new_crtc_state->hw.enable)
6239 continue;
6240
6241 ret = intel_modeset_pipe_config(state, crtc, limits);
6242 if (ret)
6243 break;
6244
6245 ret = intel_atomic_check_bigjoiner(state, crtc);
6246 if (ret)
6247 break;
6248 }
6249
6250 if (ret)
6251 *failed_pipe = crtc->pipe;
6252
6253 return ret;
6254}
6255
6256static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
6257{
6258 struct drm_i915_private *i915 = to_i915(state->base.dev);
6259 struct intel_link_bw_limits new_limits;
6260 struct intel_link_bw_limits old_limits;
6261 int ret;
6262
6263 intel_link_bw_init_limits(i915, &new_limits);
6264 old_limits = new_limits;
6265
6266 while (true) {
6267 enum pipe failed_pipe;
6268
6269 ret = intel_atomic_check_config(state, &new_limits,
6270 &failed_pipe);
6271 if (ret) {
6272 /*
6273 * The bpp limit for a pipe is below the minimum it supports, set the
6274 * limit to the minimum and recalculate the config.
6275 */
6276 if (ret == -EINVAL &&
6277 intel_link_bw_set_bpp_limit_for_pipe(state,
6278 &old_limits,
6279 &new_limits,
6280 failed_pipe))
6281 continue;
6282
6283 break;
6284 }
6285
6286 old_limits = new_limits;
6287
6288 ret = intel_link_bw_atomic_check(state, &new_limits);
6289 if (ret != -EAGAIN)
6290 break;
6291 }
6292
6293 return ret;
6294}
6295/**
6296 * intel_atomic_check - validate state object
6297 * @dev: drm device
6298 * @_state: state to validate
6299 */
6300int intel_atomic_check(struct drm_device *dev,
6301 struct drm_atomic_state *_state)
6302{
6303 struct drm_i915_private *dev_priv = to_i915(dev);
6304 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6305 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6306 struct intel_crtc *crtc;
6307 int ret, i;
6308 bool any_ms = false;
6309
6310 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6311 new_crtc_state, i) {
6312 /*
6313 * crtc's state no longer considered to be inherited
6314 * after the first userspace/client initiated commit.
6315 */
6316 if (!state->internal)
6317 new_crtc_state->inherited = false;
6318
6319 if (new_crtc_state->inherited != old_crtc_state->inherited)
6320 new_crtc_state->uapi.mode_changed = true;
6321
6322 if (new_crtc_state->uapi.scaling_filter !=
6323 old_crtc_state->uapi.scaling_filter)
6324 new_crtc_state->uapi.mode_changed = true;
6325 }
6326
6327 intel_vrr_check_modeset(state);
6328
6329 ret = drm_atomic_helper_check_modeset(dev, &state->base);
6330 if (ret)
6331 goto fail;
6332
6333 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6334 ret = intel_async_flip_check_uapi(state, crtc);
6335 if (ret)
6336 return ret;
6337 }
6338
6339 ret = intel_atomic_check_config_and_link(state);
6340 if (ret)
6341 goto fail;
6342
6343 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6344 new_crtc_state, i) {
6345 if (!intel_crtc_needs_modeset(new_crtc_state))
6346 continue;
6347
6348 if (new_crtc_state->hw.enable) {
6349 ret = intel_modeset_pipe_config_late(state, crtc);
6350 if (ret)
6351 goto fail;
6352 }
6353
6354 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6355 }
6356
6357 /**
6358 * Check if fastset is allowed by external dependencies like other
6359 * pipes and transcoders.
6360 *
6361 * Right now it only forces a fullmodeset when the MST master
6362 * transcoder did not changed but the pipe of the master transcoder
6363 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6364 * in case of port synced crtcs, if one of the synced crtcs
6365 * needs a full modeset, all other synced crtcs should be
6366 * forced a full modeset.
6367 */
6368 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6369 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6370 continue;
6371
6372 if (intel_dp_mst_crtc_needs_modeset(state, crtc))
6373 intel_crtc_flag_modeset(new_crtc_state);
6374
6375 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6376 enum transcoder master = new_crtc_state->mst_master_transcoder;
6377
6378 if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
6379 intel_crtc_flag_modeset(new_crtc_state);
6380 }
6381
6382 if (is_trans_port_sync_mode(new_crtc_state)) {
6383 u8 trans = new_crtc_state->sync_mode_slaves_mask;
6384
6385 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6386 trans |= BIT(new_crtc_state->master_transcoder);
6387
6388 if (intel_cpu_transcoders_need_modeset(state, trans))
6389 intel_crtc_flag_modeset(new_crtc_state);
6390 }
6391
6392 if (new_crtc_state->bigjoiner_pipes) {
6393 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes))
6394 intel_crtc_flag_modeset(new_crtc_state);
6395 }
6396 }
6397
6398 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6399 new_crtc_state, i) {
6400 if (!intel_crtc_needs_modeset(new_crtc_state))
6401 continue;
6402
6403 any_ms = true;
6404
6405 intel_release_shared_dplls(state, crtc);
6406 }
6407
6408 if (any_ms && !check_digital_port_conflicts(state)) {
6409 drm_dbg_kms(&dev_priv->drm,
6410 "rejecting conflicting digital port configuration\n");
6411 ret = -EINVAL;
6412 goto fail;
6413 }
6414
6415 ret = intel_atomic_check_planes(state);
6416 if (ret)
6417 goto fail;
6418
6419 ret = intel_compute_global_watermarks(state);
6420 if (ret)
6421 goto fail;
6422
6423 ret = intel_bw_atomic_check(state);
6424 if (ret)
6425 goto fail;
6426
6427 ret = intel_cdclk_atomic_check(state, &any_ms);
6428 if (ret)
6429 goto fail;
6430
6431 if (intel_any_crtc_needs_modeset(state))
6432 any_ms = true;
6433
6434 if (any_ms) {
6435 ret = intel_modeset_checks(state);
6436 if (ret)
6437 goto fail;
6438
6439 ret = intel_modeset_calc_cdclk(state);
6440 if (ret)
6441 return ret;
6442 }
6443
6444 ret = intel_pmdemand_atomic_check(state);
6445 if (ret)
6446 goto fail;
6447
6448 ret = intel_atomic_check_crtcs(state);
6449 if (ret)
6450 goto fail;
6451
6452 ret = intel_fbc_atomic_check(state);
6453 if (ret)
6454 goto fail;
6455
6456 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6457 new_crtc_state, i) {
6458 intel_color_assert_luts(new_crtc_state);
6459
6460 ret = intel_async_flip_check_hw(state, crtc);
6461 if (ret)
6462 goto fail;
6463
6464 /* Either full modeset or fastset (or neither), never both */
6465 drm_WARN_ON(&dev_priv->drm,
6466 intel_crtc_needs_modeset(new_crtc_state) &&
6467 intel_crtc_needs_fastset(new_crtc_state));
6468
6469 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6470 !intel_crtc_needs_fastset(new_crtc_state))
6471 continue;
6472
6473 intel_crtc_state_dump(new_crtc_state, state,
6474 intel_crtc_needs_modeset(new_crtc_state) ?
6475 "modeset" : "fastset");
6476 }
6477
6478 return 0;
6479
6480 fail:
6481 if (ret == -EDEADLK)
6482 return ret;
6483
6484 /*
6485 * FIXME would probably be nice to know which crtc specifically
6486 * caused the failure, in cases where we can pinpoint it.
6487 */
6488 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6489 new_crtc_state, i)
6490 intel_crtc_state_dump(new_crtc_state, state, "failed");
6491
6492 return ret;
6493}
6494
6495static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6496{
6497 struct intel_crtc_state *crtc_state;
6498 struct intel_crtc *crtc;
6499 int i, ret;
6500
6501 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6502 if (ret < 0)
6503 return ret;
6504
6505 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6506 if (intel_crtc_needs_color_update(crtc_state))
6507 intel_color_prepare_commit(crtc_state);
6508 }
6509
6510 return 0;
6511}
6512
6513void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6514 struct intel_crtc_state *crtc_state)
6515{
6516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6517
6518 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6519 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6520
6521 if (crtc_state->has_pch_encoder) {
6522 enum pipe pch_transcoder =
6523 intel_crtc_pch_transcoder(crtc);
6524
6525 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6526 }
6527}
6528
6529static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6530 const struct intel_crtc_state *new_crtc_state)
6531{
6532 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6534
6535 /*
6536 * Update pipe size and adjust fitter if needed: the reason for this is
6537 * that in compute_mode_changes we check the native mode (not the pfit
6538 * mode) to see if we can flip rather than do a full mode set. In the
6539 * fastboot case, we'll flip, but if we don't update the pipesrc and
6540 * pfit state, we'll end up with a big fb scanned out into the wrong
6541 * sized surface.
6542 */
6543 intel_set_pipe_src_size(new_crtc_state);
6544
6545 /* on skylake this is done by detaching scalers */
6546 if (DISPLAY_VER(dev_priv) >= 9) {
6547 if (new_crtc_state->pch_pfit.enabled)
6548 skl_pfit_enable(new_crtc_state);
6549 } else if (HAS_PCH_SPLIT(dev_priv)) {
6550 if (new_crtc_state->pch_pfit.enabled)
6551 ilk_pfit_enable(new_crtc_state);
6552 else if (old_crtc_state->pch_pfit.enabled)
6553 ilk_pfit_disable(old_crtc_state);
6554 }
6555
6556 /*
6557 * The register is supposedly single buffered so perhaps
6558 * not 100% correct to do this here. But SKL+ calculate
6559 * this based on the adjust pixel rate so pfit changes do
6560 * affect it and so it must be updated for fastsets.
6561 * HSW/BDW only really need this here for fastboot, after
6562 * that the value should not change without a full modeset.
6563 */
6564 if (DISPLAY_VER(dev_priv) >= 9 ||
6565 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6566 hsw_set_linetime_wm(new_crtc_state);
6567
6568 if (new_crtc_state->update_m_n)
6569 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
6570 &new_crtc_state->dp_m_n);
6571
6572 if (new_crtc_state->update_lrr)
6573 intel_set_transcoder_timings_lrr(new_crtc_state);
6574}
6575
6576static void commit_pipe_pre_planes(struct intel_atomic_state *state,
6577 struct intel_crtc *crtc)
6578{
6579 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6580 const struct intel_crtc_state *old_crtc_state =
6581 intel_atomic_get_old_crtc_state(state, crtc);
6582 const struct intel_crtc_state *new_crtc_state =
6583 intel_atomic_get_new_crtc_state(state, crtc);
6584 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6585
6586 /*
6587 * During modesets pipe configuration was programmed as the
6588 * CRTC was enabled.
6589 */
6590 if (!modeset) {
6591 if (intel_crtc_needs_color_update(new_crtc_state))
6592 intel_color_commit_arm(new_crtc_state);
6593
6594 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6595 bdw_set_pipe_misc(new_crtc_state);
6596
6597 if (intel_crtc_needs_fastset(new_crtc_state))
6598 intel_pipe_fastset(old_crtc_state, new_crtc_state);
6599 }
6600
6601 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
6602
6603 intel_atomic_update_watermarks(state, crtc);
6604}
6605
6606static void commit_pipe_post_planes(struct intel_atomic_state *state,
6607 struct intel_crtc *crtc)
6608{
6609 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6610 const struct intel_crtc_state *old_crtc_state =
6611 intel_atomic_get_old_crtc_state(state, crtc);
6612 const struct intel_crtc_state *new_crtc_state =
6613 intel_atomic_get_new_crtc_state(state, crtc);
6614
6615 /*
6616 * Disable the scaler(s) after the plane(s) so that we don't
6617 * get a catastrophic underrun even if the two operations
6618 * end up happening in two different frames.
6619 */
6620 if (DISPLAY_VER(dev_priv) >= 9 &&
6621 !intel_crtc_needs_modeset(new_crtc_state))
6622 skl_detach_scalers(new_crtc_state);
6623
6624 if (vrr_enabling(old_crtc_state, new_crtc_state))
6625 intel_vrr_enable(new_crtc_state);
6626}
6627
6628static void intel_enable_crtc(struct intel_atomic_state *state,
6629 struct intel_crtc *crtc)
6630{
6631 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6632 const struct intel_crtc_state *new_crtc_state =
6633 intel_atomic_get_new_crtc_state(state, crtc);
6634
6635 if (!intel_crtc_needs_modeset(new_crtc_state))
6636 return;
6637
6638 /* VRR will be enable later, if required */
6639 intel_crtc_update_active_timings(new_crtc_state, false);
6640
6641 dev_priv->display.funcs.display->crtc_enable(state, crtc);
6642
6643 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6644 return;
6645
6646 /* vblanks work again, re-enable pipe CRC. */
6647 intel_crtc_enable_pipe_crc(crtc);
6648}
6649
6650static void intel_pre_update_crtc(struct intel_atomic_state *state,
6651 struct intel_crtc *crtc)
6652{
6653 struct drm_i915_private *i915 = to_i915(state->base.dev);
6654 const struct intel_crtc_state *old_crtc_state =
6655 intel_atomic_get_old_crtc_state(state, crtc);
6656 struct intel_crtc_state *new_crtc_state =
6657 intel_atomic_get_new_crtc_state(state, crtc);
6658 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6659
6660 if (old_crtc_state->inherited ||
6661 intel_crtc_needs_modeset(new_crtc_state)) {
6662 if (HAS_DPT(i915))
6663 intel_dpt_configure(crtc);
6664 }
6665
6666 if (!modeset) {
6667 if (new_crtc_state->preload_luts &&
6668 intel_crtc_needs_color_update(new_crtc_state))
6669 intel_color_load_luts(new_crtc_state);
6670
6671 intel_pre_plane_update(state, crtc);
6672
6673 if (intel_crtc_needs_fastset(new_crtc_state))
6674 intel_encoders_update_pipe(state, crtc);
6675
6676 if (DISPLAY_VER(i915) >= 11 &&
6677 intel_crtc_needs_fastset(new_crtc_state))
6678 icl_set_pipe_chicken(new_crtc_state);
6679
6680 if (vrr_params_changed(old_crtc_state, new_crtc_state))
6681 intel_vrr_set_transcoder_timings(new_crtc_state);
6682 }
6683
6684 intel_fbc_update(state, crtc);
6685
6686 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
6687
6688 if (!modeset &&
6689 intel_crtc_needs_color_update(new_crtc_state))
6690 intel_color_commit_noarm(new_crtc_state);
6691
6692 intel_crtc_planes_update_noarm(state, crtc);
6693}
6694
6695static void intel_update_crtc(struct intel_atomic_state *state,
6696 struct intel_crtc *crtc)
6697{
6698 const struct intel_crtc_state *old_crtc_state =
6699 intel_atomic_get_old_crtc_state(state, crtc);
6700 struct intel_crtc_state *new_crtc_state =
6701 intel_atomic_get_new_crtc_state(state, crtc);
6702
6703 /* Perform vblank evasion around commit operation */
6704 intel_pipe_update_start(state, crtc);
6705
6706 commit_pipe_pre_planes(state, crtc);
6707
6708 intel_crtc_planes_update_arm(state, crtc);
6709
6710 commit_pipe_post_planes(state, crtc);
6711
6712 intel_pipe_update_end(state, crtc);
6713
6714 /*
6715 * VRR/Seamless M/N update may need to update frame timings.
6716 *
6717 * FIXME Should be synchronized with the start of vblank somehow...
6718 */
6719 if (vrr_enabling(old_crtc_state, new_crtc_state) ||
6720 new_crtc_state->update_m_n || new_crtc_state->update_lrr)
6721 intel_crtc_update_active_timings(new_crtc_state,
6722 new_crtc_state->vrr.enable);
6723
6724 /*
6725 * We usually enable FIFO underrun interrupts as part of the
6726 * CRTC enable sequence during modesets. But when we inherit a
6727 * valid pipe configuration from the BIOS we need to take care
6728 * of enabling them on the CRTC's first fastset.
6729 */
6730 if (intel_crtc_needs_fastset(new_crtc_state) &&
6731 old_crtc_state->inherited)
6732 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
6733}
6734
6735static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
6736 struct intel_crtc_state *old_crtc_state,
6737 struct intel_crtc_state *new_crtc_state,
6738 struct intel_crtc *crtc)
6739{
6740 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6741
6742 /*
6743 * We need to disable pipe CRC before disabling the pipe,
6744 * or we race against vblank off.
6745 */
6746 intel_crtc_disable_pipe_crc(crtc);
6747
6748 dev_priv->display.funcs.display->crtc_disable(state, crtc);
6749 crtc->active = false;
6750 intel_fbc_disable(crtc);
6751
6752 if (!new_crtc_state->hw.active)
6753 intel_initial_watermarks(state, crtc);
6754}
6755
6756static void intel_commit_modeset_disables(struct intel_atomic_state *state)
6757{
6758 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
6759 struct intel_crtc *crtc;
6760 u32 handled = 0;
6761 int i;
6762
6763 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6764 new_crtc_state, i) {
6765 if (!intel_crtc_needs_modeset(new_crtc_state))
6766 continue;
6767
6768 intel_pre_plane_update(state, crtc);
6769
6770 if (!old_crtc_state->hw.active)
6771 continue;
6772
6773 intel_crtc_disable_planes(state, crtc);
6774 }
6775
6776 /* Only disable port sync and MST slaves */
6777 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6778 new_crtc_state, i) {
6779 if (!intel_crtc_needs_modeset(new_crtc_state))
6780 continue;
6781
6782 if (!old_crtc_state->hw.active)
6783 continue;
6784
6785 /* In case of Transcoder port Sync master slave CRTCs can be
6786 * assigned in any order and we need to make sure that
6787 * slave CRTCs are disabled first and then master CRTC since
6788 * Slave vblanks are masked till Master Vblanks.
6789 */
6790 if (!is_trans_port_sync_slave(old_crtc_state) &&
6791 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
6792 !intel_crtc_is_bigjoiner_slave(old_crtc_state))
6793 continue;
6794
6795 intel_old_crtc_state_disables(state, old_crtc_state,
6796 new_crtc_state, crtc);
6797 handled |= BIT(crtc->pipe);
6798 }
6799
6800 /* Disable everything else left on */
6801 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6802 new_crtc_state, i) {
6803 if (!intel_crtc_needs_modeset(new_crtc_state) ||
6804 (handled & BIT(crtc->pipe)))
6805 continue;
6806
6807 if (!old_crtc_state->hw.active)
6808 continue;
6809
6810 intel_old_crtc_state_disables(state, old_crtc_state,
6811 new_crtc_state, crtc);
6812 }
6813}
6814
6815static void intel_commit_modeset_enables(struct intel_atomic_state *state)
6816{
6817 struct intel_crtc_state *new_crtc_state;
6818 struct intel_crtc *crtc;
6819 int i;
6820
6821 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6822 if (!new_crtc_state->hw.active)
6823 continue;
6824
6825 intel_enable_crtc(state, crtc);
6826 intel_pre_update_crtc(state, crtc);
6827 }
6828
6829 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6830 if (!new_crtc_state->hw.active)
6831 continue;
6832
6833 intel_update_crtc(state, crtc);
6834 }
6835}
6836
6837static void skl_commit_modeset_enables(struct intel_atomic_state *state)
6838{
6839 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6840 struct intel_crtc *crtc;
6841 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6842 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
6843 u8 update_pipes = 0, modeset_pipes = 0;
6844 int i;
6845
6846 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6847 enum pipe pipe = crtc->pipe;
6848
6849 if (!new_crtc_state->hw.active)
6850 continue;
6851
6852 /* ignore allocations for crtc's that have been turned off. */
6853 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6854 entries[pipe] = old_crtc_state->wm.skl.ddb;
6855 update_pipes |= BIT(pipe);
6856 } else {
6857 modeset_pipes |= BIT(pipe);
6858 }
6859 }
6860
6861 /*
6862 * Whenever the number of active pipes changes, we need to make sure we
6863 * update the pipes in the right order so that their ddb allocations
6864 * never overlap with each other between CRTC updates. Otherwise we'll
6865 * cause pipe underruns and other bad stuff.
6866 *
6867 * So first lets enable all pipes that do not need a fullmodeset as
6868 * those don't have any external dependency.
6869 */
6870 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6871 enum pipe pipe = crtc->pipe;
6872
6873 if ((update_pipes & BIT(pipe)) == 0)
6874 continue;
6875
6876 intel_pre_update_crtc(state, crtc);
6877 }
6878
6879 while (update_pipes) {
6880 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6881 new_crtc_state, i) {
6882 enum pipe pipe = crtc->pipe;
6883
6884 if ((update_pipes & BIT(pipe)) == 0)
6885 continue;
6886
6887 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
6888 entries, I915_MAX_PIPES, pipe))
6889 continue;
6890
6891 entries[pipe] = new_crtc_state->wm.skl.ddb;
6892 update_pipes &= ~BIT(pipe);
6893
6894 intel_update_crtc(state, crtc);
6895
6896 /*
6897 * If this is an already active pipe, it's DDB changed,
6898 * and this isn't the last pipe that needs updating
6899 * then we need to wait for a vblank to pass for the
6900 * new ddb allocation to take effect.
6901 */
6902 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
6903 &old_crtc_state->wm.skl.ddb) &&
6904 (update_pipes | modeset_pipes))
6905 intel_crtc_wait_for_next_vblank(crtc);
6906 }
6907 }
6908
6909 update_pipes = modeset_pipes;
6910
6911 /*
6912 * Enable all pipes that needs a modeset and do not depends on other
6913 * pipes
6914 */
6915 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6916 enum pipe pipe = crtc->pipe;
6917
6918 if ((modeset_pipes & BIT(pipe)) == 0)
6919 continue;
6920
6921 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
6922 is_trans_port_sync_master(new_crtc_state) ||
6923 intel_crtc_is_bigjoiner_master(new_crtc_state))
6924 continue;
6925
6926 modeset_pipes &= ~BIT(pipe);
6927
6928 intel_enable_crtc(state, crtc);
6929 }
6930
6931 /*
6932 * Then we enable all remaining pipes that depend on other
6933 * pipes: MST slaves and port sync masters, big joiner master
6934 */
6935 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6936 enum pipe pipe = crtc->pipe;
6937
6938 if ((modeset_pipes & BIT(pipe)) == 0)
6939 continue;
6940
6941 modeset_pipes &= ~BIT(pipe);
6942
6943 intel_enable_crtc(state, crtc);
6944 }
6945
6946 /*
6947 * Finally we do the plane updates/etc. for all pipes that got enabled.
6948 */
6949 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6950 enum pipe pipe = crtc->pipe;
6951
6952 if ((update_pipes & BIT(pipe)) == 0)
6953 continue;
6954
6955 intel_pre_update_crtc(state, crtc);
6956 }
6957
6958 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6959 enum pipe pipe = crtc->pipe;
6960
6961 if ((update_pipes & BIT(pipe)) == 0)
6962 continue;
6963
6964 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
6965 entries, I915_MAX_PIPES, pipe));
6966
6967 entries[pipe] = new_crtc_state->wm.skl.ddb;
6968 update_pipes &= ~BIT(pipe);
6969
6970 intel_update_crtc(state, crtc);
6971 }
6972
6973 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
6974 drm_WARN_ON(&dev_priv->drm, update_pipes);
6975}
6976
6977static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
6978{
6979 struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
6980 struct drm_plane *plane;
6981 struct drm_plane_state *new_plane_state;
6982 int ret, i;
6983
6984 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
6985 if (new_plane_state->fence) {
6986 ret = dma_fence_wait_timeout(new_plane_state->fence, false,
6987 i915_fence_timeout(i915));
6988 if (ret <= 0)
6989 break;
6990
6991 dma_fence_put(new_plane_state->fence);
6992 new_plane_state->fence = NULL;
6993 }
6994 }
6995}
6996
6997static void intel_atomic_cleanup_work(struct work_struct *work)
6998{
6999 struct intel_atomic_state *state =
7000 container_of(work, struct intel_atomic_state, base.commit_work);
7001 struct drm_i915_private *i915 = to_i915(state->base.dev);
7002 struct intel_crtc_state *old_crtc_state;
7003 struct intel_crtc *crtc;
7004 int i;
7005
7006 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
7007 intel_color_cleanup_commit(old_crtc_state);
7008
7009 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7010 drm_atomic_helper_commit_cleanup_done(&state->base);
7011 drm_atomic_state_put(&state->base);
7012}
7013
7014static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7015{
7016 struct drm_i915_private *i915 = to_i915(state->base.dev);
7017 struct intel_plane *plane;
7018 struct intel_plane_state *plane_state;
7019 int i;
7020
7021 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7022 struct drm_framebuffer *fb = plane_state->hw.fb;
7023 int cc_plane;
7024 int ret;
7025
7026 if (!fb)
7027 continue;
7028
7029 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7030 if (cc_plane < 0)
7031 continue;
7032
7033 /*
7034 * The layout of the fast clear color value expected by HW
7035 * (the DRM ABI requiring this value to be located in fb at
7036 * offset 0 of cc plane, plane #2 previous generations or
7037 * plane #1 for flat ccs):
7038 * - 4 x 4 bytes per-channel value
7039 * (in surface type specific float/int format provided by the fb user)
7040 * - 8 bytes native color value used by the display
7041 * (converted/written by GPU during a fast clear operation using the
7042 * above per-channel values)
7043 *
7044 * The commit's FB prepare hook already ensured that FB obj is pinned and the
7045 * caller made sure that the object is synced wrt. the related color clear value
7046 * GPU write on it.
7047 */
7048 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7049 fb->offsets[cc_plane] + 16,
7050 &plane_state->ccval,
7051 sizeof(plane_state->ccval));
7052 /* The above could only fail if the FB obj has an unexpected backing store type. */
7053 drm_WARN_ON(&i915->drm, ret);
7054 }
7055}
7056
7057static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7058{
7059 struct drm_device *dev = state->base.dev;
7060 struct drm_i915_private *dev_priv = to_i915(dev);
7061 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7062 struct intel_crtc *crtc;
7063 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7064 intel_wakeref_t wakeref = 0;
7065 int i;
7066
7067 intel_atomic_commit_fence_wait(state);
7068
7069 drm_atomic_helper_wait_for_dependencies(&state->base);
7070 drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7071
7072 /*
7073 * During full modesets we write a lot of registers, wait
7074 * for PLLs, etc. Doing that while DC states are enabled
7075 * is not a good idea.
7076 *
7077 * During fastsets and other updates we also need to
7078 * disable DC states due to the following scenario:
7079 * 1. DC5 exit and PSR exit happen
7080 * 2. Some or all _noarm() registers are written
7081 * 3. Due to some long delay PSR is re-entered
7082 * 4. DC5 entry -> DMC saves the already written new
7083 * _noarm() registers and the old not yet written
7084 * _arm() registers
7085 * 5. DC5 exit -> DMC restores a mixture of old and
7086 * new register values and arms the update
7087 * 6. PSR exit -> hardware latches a mixture of old and
7088 * new register values -> corrupted frame, or worse
7089 * 7. New _arm() registers are finally written
7090 * 8. Hardware finally latches a complete set of new
7091 * register values, and subsequent frames will be OK again
7092 *
7093 * Also note that due to the pipe CSC hardware issues on
7094 * SKL/GLK DC states must remain off until the pipe CSC
7095 * state readout has happened. Otherwise we risk corrupting
7096 * the CSC latched register values with the readout (see
7097 * skl_read_csc() and skl_color_commit_noarm()).
7098 */
7099 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
7100
7101 intel_atomic_prepare_plane_clear_colors(state);
7102
7103 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7104 new_crtc_state, i) {
7105 if (intel_crtc_needs_modeset(new_crtc_state) ||
7106 intel_crtc_needs_fastset(new_crtc_state))
7107 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7108 }
7109
7110 intel_commit_modeset_disables(state);
7111
7112 /* FIXME: Eventually get rid of our crtc->config pointer */
7113 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7114 crtc->config = new_crtc_state;
7115
7116 /*
7117 * In XE_LPD+ Pmdemand combines many parameters such as voltage index,
7118 * plls, cdclk frequency, QGV point selection parameter etc. Voltage
7119 * index, cdclk/ddiclk frequencies are supposed to be configured before
7120 * the cdclk config is set.
7121 */
7122 intel_pmdemand_pre_plane_update(state);
7123
7124 if (state->modeset) {
7125 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7126
7127 intel_set_cdclk_pre_plane_update(state);
7128
7129 intel_modeset_verify_disabled(state);
7130 }
7131
7132 intel_sagv_pre_plane_update(state);
7133
7134 /* Complete the events for pipes that have now been disabled */
7135 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7136 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7137
7138 /* Complete events for now disable pipes here. */
7139 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7140 spin_lock_irq(&dev->event_lock);
7141 drm_crtc_send_vblank_event(&crtc->base,
7142 new_crtc_state->uapi.event);
7143 spin_unlock_irq(&dev->event_lock);
7144
7145 new_crtc_state->uapi.event = NULL;
7146 }
7147 }
7148
7149 intel_encoders_update_prepare(state);
7150
7151 intel_dbuf_pre_plane_update(state);
7152 intel_mbus_dbox_update(state);
7153
7154 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7155 if (new_crtc_state->do_async_flip)
7156 intel_crtc_enable_flip_done(state, crtc);
7157 }
7158
7159 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7160 dev_priv->display.funcs.display->commit_modeset_enables(state);
7161
7162 if (state->modeset)
7163 intel_set_cdclk_post_plane_update(state);
7164
7165 intel_wait_for_vblank_workers(state);
7166
7167 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7168 * already, but still need the state for the delayed optimization. To
7169 * fix this:
7170 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7171 * - schedule that vblank worker _before_ calling hw_done
7172 * - at the start of commit_tail, cancel it _synchrously
7173 * - switch over to the vblank wait helper in the core after that since
7174 * we don't need out special handling any more.
7175 */
7176 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7177
7178 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7179 if (new_crtc_state->do_async_flip)
7180 intel_crtc_disable_flip_done(state, crtc);
7181
7182 intel_color_wait_commit(new_crtc_state);
7183 }
7184
7185 /*
7186 * Now that the vblank has passed, we can go ahead and program the
7187 * optimal watermarks on platforms that need two-step watermark
7188 * programming.
7189 *
7190 * TODO: Move this (and other cleanup) to an async worker eventually.
7191 */
7192 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7193 new_crtc_state, i) {
7194 /*
7195 * Gen2 reports pipe underruns whenever all planes are disabled.
7196 * So re-enable underrun reporting after some planes get enabled.
7197 *
7198 * We do this before .optimize_watermarks() so that we have a
7199 * chance of catching underruns with the intermediate watermarks
7200 * vs. the new plane configuration.
7201 */
7202 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7203 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7204
7205 intel_optimize_watermarks(state, crtc);
7206 }
7207
7208 intel_dbuf_post_plane_update(state);
7209
7210 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7211 intel_post_plane_update(state, crtc);
7212
7213 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7214
7215 intel_modeset_verify_crtc(state, crtc);
7216
7217 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
7218 hsw_ips_post_update(state, crtc);
7219
7220 /*
7221 * Activate DRRS after state readout to avoid
7222 * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7223 */
7224 intel_drrs_activate(new_crtc_state);
7225
7226 /*
7227 * DSB cleanup is done in cleanup_work aligning with framebuffer
7228 * cleanup. So copy and reset the dsb structure to sync with
7229 * commit_done and later do dsb cleanup in cleanup_work.
7230 *
7231 * FIXME get rid of this funny new->old swapping
7232 */
7233 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7234 }
7235
7236 /* Underruns don't always raise interrupts, so check manually */
7237 intel_check_cpu_fifo_underruns(dev_priv);
7238 intel_check_pch_fifo_underruns(dev_priv);
7239
7240 if (state->modeset)
7241 intel_verify_planes(state);
7242
7243 intel_sagv_post_plane_update(state);
7244 intel_pmdemand_post_plane_update(state);
7245
7246 drm_atomic_helper_commit_hw_done(&state->base);
7247
7248 if (state->modeset) {
7249 /* As one of the primary mmio accessors, KMS has a high
7250 * likelihood of triggering bugs in unclaimed access. After we
7251 * finish modesetting, see if an error has been flagged, and if
7252 * so enable debugging for the next modeset - and hope we catch
7253 * the culprit.
7254 */
7255 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7256 }
7257 /*
7258 * Delay re-enabling DC states by 17 ms to avoid the off->on->off
7259 * toggling overhead at and above 60 FPS.
7260 */
7261 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
7262 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7263
7264 /*
7265 * Defer the cleanup of the old state to a separate worker to not
7266 * impede the current task (userspace for blocking modesets) that
7267 * are executed inline. For out-of-line asynchronous modesets/flips,
7268 * deferring to a new worker seems overkill, but we would place a
7269 * schedule point (cond_resched()) here anyway to keep latencies
7270 * down.
7271 */
7272 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7273 queue_work(system_highpri_wq, &state->base.commit_work);
7274}
7275
7276static void intel_atomic_commit_work(struct work_struct *work)
7277{
7278 struct intel_atomic_state *state =
7279 container_of(work, struct intel_atomic_state, base.commit_work);
7280
7281 intel_atomic_commit_tail(state);
7282}
7283
7284static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7285{
7286 struct intel_plane_state *old_plane_state, *new_plane_state;
7287 struct intel_plane *plane;
7288 int i;
7289
7290 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7291 new_plane_state, i)
7292 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7293 to_intel_frontbuffer(new_plane_state->hw.fb),
7294 plane->frontbuffer_bit);
7295}
7296
7297int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
7298 bool nonblock)
7299{
7300 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7301 struct drm_i915_private *dev_priv = to_i915(dev);
7302 int ret = 0;
7303
7304 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7305
7306 /*
7307 * The intel_legacy_cursor_update() fast path takes care
7308 * of avoiding the vblank waits for simple cursor
7309 * movement and flips. For cursor on/off and size changes,
7310 * we want to perform the vblank waits so that watermark
7311 * updates happen during the correct frames. Gen9+ have
7312 * double buffered watermarks and so shouldn't need this.
7313 *
7314 * Unset state->legacy_cursor_update before the call to
7315 * drm_atomic_helper_setup_commit() because otherwise
7316 * drm_atomic_helper_wait_for_flip_done() is a noop and
7317 * we get FIFO underruns because we didn't wait
7318 * for vblank.
7319 *
7320 * FIXME doing watermarks and fb cleanup from a vblank worker
7321 * (assuming we had any) would solve these problems.
7322 */
7323 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7324 struct intel_crtc_state *new_crtc_state;
7325 struct intel_crtc *crtc;
7326 int i;
7327
7328 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7329 if (new_crtc_state->wm.need_postvbl_update ||
7330 new_crtc_state->update_wm_post)
7331 state->base.legacy_cursor_update = false;
7332 }
7333
7334 ret = intel_atomic_prepare_commit(state);
7335 if (ret) {
7336 drm_dbg_atomic(&dev_priv->drm,
7337 "Preparing state failed with %i\n", ret);
7338 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7339 return ret;
7340 }
7341
7342 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7343 if (!ret)
7344 ret = drm_atomic_helper_swap_state(&state->base, true);
7345 if (!ret)
7346 intel_atomic_swap_global_state(state);
7347
7348 if (ret) {
7349 struct intel_crtc_state *new_crtc_state;
7350 struct intel_crtc *crtc;
7351 int i;
7352
7353 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7354 intel_color_cleanup_commit(new_crtc_state);
7355
7356 drm_atomic_helper_unprepare_planes(dev, &state->base);
7357 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7358 return ret;
7359 }
7360 intel_shared_dpll_swap_state(state);
7361 intel_atomic_track_fbs(state);
7362
7363 drm_atomic_state_get(&state->base);
7364 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7365
7366 if (nonblock && state->modeset) {
7367 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7368 } else if (nonblock) {
7369 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7370 } else {
7371 if (state->modeset)
7372 flush_workqueue(dev_priv->display.wq.modeset);
7373 intel_atomic_commit_tail(state);
7374 }
7375
7376 return 0;
7377}
7378
7379/**
7380 * intel_plane_destroy - destroy a plane
7381 * @plane: plane to destroy
7382 *
7383 * Common destruction function for all types of planes (primary, cursor,
7384 * sprite).
7385 */
7386void intel_plane_destroy(struct drm_plane *plane)
7387{
7388 drm_plane_cleanup(plane);
7389 kfree(to_intel_plane(plane));
7390}
7391
7392int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7393 struct drm_file *file)
7394{
7395 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7396 struct drm_crtc *drmmode_crtc;
7397 struct intel_crtc *crtc;
7398
7399 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7400 if (!drmmode_crtc)
7401 return -ENOENT;
7402
7403 crtc = to_intel_crtc(drmmode_crtc);
7404 pipe_from_crtc_id->pipe = crtc->pipe;
7405
7406 return 0;
7407}
7408
7409static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7410{
7411 struct drm_device *dev = encoder->base.dev;
7412 struct intel_encoder *source_encoder;
7413 u32 possible_clones = 0;
7414
7415 for_each_intel_encoder(dev, source_encoder) {
7416 if (encoders_cloneable(encoder, source_encoder))
7417 possible_clones |= drm_encoder_mask(&source_encoder->base);
7418 }
7419
7420 return possible_clones;
7421}
7422
7423static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7424{
7425 struct drm_device *dev = encoder->base.dev;
7426 struct intel_crtc *crtc;
7427 u32 possible_crtcs = 0;
7428
7429 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7430 possible_crtcs |= drm_crtc_mask(&crtc->base);
7431
7432 return possible_crtcs;
7433}
7434
7435static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7436{
7437 if (!IS_MOBILE(dev_priv))
7438 return false;
7439
7440 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7441 return false;
7442
7443 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7444 return false;
7445
7446 return true;
7447}
7448
7449static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7450{
7451 if (DISPLAY_VER(dev_priv) >= 9)
7452 return false;
7453
7454 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
7455 return false;
7456
7457 if (HAS_PCH_LPT_H(dev_priv) &&
7458 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7459 return false;
7460
7461 /* DDI E can't be used if DDI A requires 4 lanes */
7462 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7463 return false;
7464
7465 if (!dev_priv->display.vbt.int_crt_support)
7466 return false;
7467
7468 return true;
7469}
7470
7471bool assert_port_valid(struct drm_i915_private *i915, enum port port)
7472{
7473 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
7474 "Platform does not support port %c\n", port_name(port));
7475}
7476
7477void intel_setup_outputs(struct drm_i915_private *dev_priv)
7478{
7479 struct intel_encoder *encoder;
7480 bool dpd_is_edp = false;
7481
7482 intel_pps_unlock_regs_wa(dev_priv);
7483
7484 if (!HAS_DISPLAY(dev_priv))
7485 return;
7486
7487 if (HAS_DDI(dev_priv)) {
7488 if (intel_ddi_crt_present(dev_priv))
7489 intel_crt_init(dev_priv);
7490
7491 intel_bios_for_each_encoder(dev_priv, intel_ddi_init);
7492
7493 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
7494 vlv_dsi_init(dev_priv);
7495 } else if (HAS_PCH_SPLIT(dev_priv)) {
7496 int found;
7497
7498 /*
7499 * intel_edp_init_connector() depends on this completing first,
7500 * to prevent the registration of both eDP and LVDS and the
7501 * incorrect sharing of the PPS.
7502 */
7503 intel_lvds_init(dev_priv);
7504 intel_crt_init(dev_priv);
7505
7506 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7507
7508 if (ilk_has_edp_a(dev_priv))
7509 g4x_dp_init(dev_priv, DP_A, PORT_A);
7510
7511 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7512 /* PCH SDVOB multiplex with HDMIB */
7513 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7514 if (!found)
7515 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7516 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7517 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7518 }
7519
7520 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7521 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7522
7523 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
7524 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
7525
7526 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
7527 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
7528
7529 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
7530 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
7531 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7532 bool has_edp, has_port;
7533
7534 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
7535 intel_crt_init(dev_priv);
7536
7537 /*
7538 * The DP_DETECTED bit is the latched state of the DDC
7539 * SDA pin at boot. However since eDP doesn't require DDC
7540 * (no way to plug in a DP->HDMI dongle) the DDC pins for
7541 * eDP ports may have been muxed to an alternate function.
7542 * Thus we can't rely on the DP_DETECTED bit alone to detect
7543 * eDP ports. Consult the VBT as well as DP_DETECTED to
7544 * detect eDP ports.
7545 *
7546 * Sadly the straps seem to be missing sometimes even for HDMI
7547 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
7548 * and VBT for the presence of the port. Additionally we can't
7549 * trust the port type the VBT declares as we've seen at least
7550 * HDMI ports that the VBT claim are DP or eDP.
7551 */
7552 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
7553 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
7554 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
7555 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
7556 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
7557 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
7558
7559 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
7560 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
7561 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
7562 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
7563 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
7564 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
7565
7566 if (IS_CHERRYVIEW(dev_priv)) {
7567 /*
7568 * eDP not supported on port D,
7569 * so no need to worry about it
7570 */
7571 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
7572 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
7573 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
7574 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
7575 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
7576 }
7577
7578 vlv_dsi_init(dev_priv);
7579 } else if (IS_PINEVIEW(dev_priv)) {
7580 intel_lvds_init(dev_priv);
7581 intel_crt_init(dev_priv);
7582 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
7583 bool found = false;
7584
7585 if (IS_MOBILE(dev_priv))
7586 intel_lvds_init(dev_priv);
7587
7588 intel_crt_init(dev_priv);
7589
7590 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7591 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
7592 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
7593 if (!found && IS_G4X(dev_priv)) {
7594 drm_dbg_kms(&dev_priv->drm,
7595 "probing HDMI on SDVOB\n");
7596 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
7597 }
7598
7599 if (!found && IS_G4X(dev_priv))
7600 g4x_dp_init(dev_priv, DP_B, PORT_B);
7601 }
7602
7603 /* Before G4X SDVOC doesn't have its own detect register */
7604
7605 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7606 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
7607 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
7608 }
7609
7610 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
7611
7612 if (IS_G4X(dev_priv)) {
7613 drm_dbg_kms(&dev_priv->drm,
7614 "probing HDMI on SDVOC\n");
7615 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
7616 }
7617 if (IS_G4X(dev_priv))
7618 g4x_dp_init(dev_priv, DP_C, PORT_C);
7619 }
7620
7621 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
7622 g4x_dp_init(dev_priv, DP_D, PORT_D);
7623
7624 if (SUPPORTS_TV(dev_priv))
7625 intel_tv_init(dev_priv);
7626 } else if (DISPLAY_VER(dev_priv) == 2) {
7627 if (IS_I85X(dev_priv))
7628 intel_lvds_init(dev_priv);
7629
7630 intel_crt_init(dev_priv);
7631 intel_dvo_init(dev_priv);
7632 }
7633
7634 for_each_intel_encoder(&dev_priv->drm, encoder) {
7635 encoder->base.possible_crtcs =
7636 intel_encoder_possible_crtcs(encoder);
7637 encoder->base.possible_clones =
7638 intel_encoder_possible_clones(encoder);
7639 }
7640
7641 intel_init_pch_refclk(dev_priv);
7642
7643 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
7644}
7645
7646static int max_dotclock(struct drm_i915_private *i915)
7647{
7648 int max_dotclock = i915->max_dotclk_freq;
7649
7650 /* icl+ might use bigjoiner */
7651 if (DISPLAY_VER(i915) >= 11)
7652 max_dotclock *= 2;
7653
7654 return max_dotclock;
7655}
7656
7657enum drm_mode_status intel_mode_valid(struct drm_device *dev,
7658 const struct drm_display_mode *mode)
7659{
7660 struct drm_i915_private *dev_priv = to_i915(dev);
7661 int hdisplay_max, htotal_max;
7662 int vdisplay_max, vtotal_max;
7663
7664 /*
7665 * Can't reject DBLSCAN here because Xorg ddxen can add piles
7666 * of DBLSCAN modes to the output's mode list when they detect
7667 * the scaling mode property on the connector. And they don't
7668 * ask the kernel to validate those modes in any way until
7669 * modeset time at which point the client gets a protocol error.
7670 * So in order to not upset those clients we silently ignore the
7671 * DBLSCAN flag on such connectors. For other connectors we will
7672 * reject modes with the DBLSCAN flag in encoder->compute_config().
7673 * And we always reject DBLSCAN modes in connector->mode_valid()
7674 * as we never want such modes on the connector's mode list.
7675 */
7676
7677 if (mode->vscan > 1)
7678 return MODE_NO_VSCAN;
7679
7680 if (mode->flags & DRM_MODE_FLAG_HSKEW)
7681 return MODE_H_ILLEGAL;
7682
7683 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
7684 DRM_MODE_FLAG_NCSYNC |
7685 DRM_MODE_FLAG_PCSYNC))
7686 return MODE_HSYNC;
7687
7688 if (mode->flags & (DRM_MODE_FLAG_BCAST |
7689 DRM_MODE_FLAG_PIXMUX |
7690 DRM_MODE_FLAG_CLKDIV2))
7691 return MODE_BAD;
7692
7693 /*
7694 * Reject clearly excessive dotclocks early to
7695 * avoid having to worry about huge integers later.
7696 */
7697 if (mode->clock > max_dotclock(dev_priv))
7698 return MODE_CLOCK_HIGH;
7699
7700 /* Transcoder timing limits */
7701 if (DISPLAY_VER(dev_priv) >= 11) {
7702 hdisplay_max = 16384;
7703 vdisplay_max = 8192;
7704 htotal_max = 16384;
7705 vtotal_max = 8192;
7706 } else if (DISPLAY_VER(dev_priv) >= 9 ||
7707 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7708 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
7709 vdisplay_max = 4096;
7710 htotal_max = 8192;
7711 vtotal_max = 8192;
7712 } else if (DISPLAY_VER(dev_priv) >= 3) {
7713 hdisplay_max = 4096;
7714 vdisplay_max = 4096;
7715 htotal_max = 8192;
7716 vtotal_max = 8192;
7717 } else {
7718 hdisplay_max = 2048;
7719 vdisplay_max = 2048;
7720 htotal_max = 4096;
7721 vtotal_max = 4096;
7722 }
7723
7724 if (mode->hdisplay > hdisplay_max ||
7725 mode->hsync_start > htotal_max ||
7726 mode->hsync_end > htotal_max ||
7727 mode->htotal > htotal_max)
7728 return MODE_H_ILLEGAL;
7729
7730 if (mode->vdisplay > vdisplay_max ||
7731 mode->vsync_start > vtotal_max ||
7732 mode->vsync_end > vtotal_max ||
7733 mode->vtotal > vtotal_max)
7734 return MODE_V_ILLEGAL;
7735
7736 return MODE_OK;
7737}
7738
7739enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
7740 const struct drm_display_mode *mode)
7741{
7742 /*
7743 * Additional transcoder timing limits,
7744 * excluding BXT/GLK DSI transcoders.
7745 */
7746 if (DISPLAY_VER(dev_priv) >= 5) {
7747 if (mode->hdisplay < 64 ||
7748 mode->htotal - mode->hdisplay < 32)
7749 return MODE_H_ILLEGAL;
7750
7751 if (mode->vtotal - mode->vdisplay < 5)
7752 return MODE_V_ILLEGAL;
7753 } else {
7754 if (mode->htotal - mode->hdisplay < 32)
7755 return MODE_H_ILLEGAL;
7756
7757 if (mode->vtotal - mode->vdisplay < 3)
7758 return MODE_V_ILLEGAL;
7759 }
7760
7761 /*
7762 * Cantiga+ cannot handle modes with a hsync front porch of 0.
7763 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7764 */
7765 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
7766 mode->hsync_start == mode->hdisplay)
7767 return MODE_H_ILLEGAL;
7768
7769 return MODE_OK;
7770}
7771
7772enum drm_mode_status
7773intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
7774 const struct drm_display_mode *mode,
7775 bool bigjoiner)
7776{
7777 int plane_width_max, plane_height_max;
7778
7779 /*
7780 * intel_mode_valid() should be
7781 * sufficient on older platforms.
7782 */
7783 if (DISPLAY_VER(dev_priv) < 9)
7784 return MODE_OK;
7785
7786 /*
7787 * Most people will probably want a fullscreen
7788 * plane so let's not advertize modes that are
7789 * too big for that.
7790 */
7791 if (DISPLAY_VER(dev_priv) >= 11) {
7792 plane_width_max = 5120 << bigjoiner;
7793 plane_height_max = 4320;
7794 } else {
7795 plane_width_max = 5120;
7796 plane_height_max = 4096;
7797 }
7798
7799 if (mode->hdisplay > plane_width_max)
7800 return MODE_H_ILLEGAL;
7801
7802 if (mode->vdisplay > plane_height_max)
7803 return MODE_V_ILLEGAL;
7804
7805 return MODE_OK;
7806}
7807
7808static const struct intel_display_funcs skl_display_funcs = {
7809 .get_pipe_config = hsw_get_pipe_config,
7810 .crtc_enable = hsw_crtc_enable,
7811 .crtc_disable = hsw_crtc_disable,
7812 .commit_modeset_enables = skl_commit_modeset_enables,
7813 .get_initial_plane_config = skl_get_initial_plane_config,
7814};
7815
7816static const struct intel_display_funcs ddi_display_funcs = {
7817 .get_pipe_config = hsw_get_pipe_config,
7818 .crtc_enable = hsw_crtc_enable,
7819 .crtc_disable = hsw_crtc_disable,
7820 .commit_modeset_enables = intel_commit_modeset_enables,
7821 .get_initial_plane_config = i9xx_get_initial_plane_config,
7822};
7823
7824static const struct intel_display_funcs pch_split_display_funcs = {
7825 .get_pipe_config = ilk_get_pipe_config,
7826 .crtc_enable = ilk_crtc_enable,
7827 .crtc_disable = ilk_crtc_disable,
7828 .commit_modeset_enables = intel_commit_modeset_enables,
7829 .get_initial_plane_config = i9xx_get_initial_plane_config,
7830};
7831
7832static const struct intel_display_funcs vlv_display_funcs = {
7833 .get_pipe_config = i9xx_get_pipe_config,
7834 .crtc_enable = valleyview_crtc_enable,
7835 .crtc_disable = i9xx_crtc_disable,
7836 .commit_modeset_enables = intel_commit_modeset_enables,
7837 .get_initial_plane_config = i9xx_get_initial_plane_config,
7838};
7839
7840static const struct intel_display_funcs i9xx_display_funcs = {
7841 .get_pipe_config = i9xx_get_pipe_config,
7842 .crtc_enable = i9xx_crtc_enable,
7843 .crtc_disable = i9xx_crtc_disable,
7844 .commit_modeset_enables = intel_commit_modeset_enables,
7845 .get_initial_plane_config = i9xx_get_initial_plane_config,
7846};
7847
7848/**
7849 * intel_init_display_hooks - initialize the display modesetting hooks
7850 * @dev_priv: device private
7851 */
7852void intel_init_display_hooks(struct drm_i915_private *dev_priv)
7853{
7854 if (DISPLAY_VER(dev_priv) >= 9) {
7855 dev_priv->display.funcs.display = &skl_display_funcs;
7856 } else if (HAS_DDI(dev_priv)) {
7857 dev_priv->display.funcs.display = &ddi_display_funcs;
7858 } else if (HAS_PCH_SPLIT(dev_priv)) {
7859 dev_priv->display.funcs.display = &pch_split_display_funcs;
7860 } else if (IS_CHERRYVIEW(dev_priv) ||
7861 IS_VALLEYVIEW(dev_priv)) {
7862 dev_priv->display.funcs.display = &vlv_display_funcs;
7863 } else {
7864 dev_priv->display.funcs.display = &i9xx_display_funcs;
7865 }
7866}
7867
7868int intel_initial_commit(struct drm_device *dev)
7869{
7870 struct drm_atomic_state *state = NULL;
7871 struct drm_modeset_acquire_ctx ctx;
7872 struct intel_crtc *crtc;
7873 int ret = 0;
7874
7875 state = drm_atomic_state_alloc(dev);
7876 if (!state)
7877 return -ENOMEM;
7878
7879 drm_modeset_acquire_init(&ctx, 0);
7880
7881 state->acquire_ctx = &ctx;
7882 to_intel_atomic_state(state)->internal = true;
7883
7884retry:
7885 for_each_intel_crtc(dev, crtc) {
7886 struct intel_crtc_state *crtc_state =
7887 intel_atomic_get_crtc_state(state, crtc);
7888
7889 if (IS_ERR(crtc_state)) {
7890 ret = PTR_ERR(crtc_state);
7891 goto out;
7892 }
7893
7894 if (crtc_state->hw.active) {
7895 struct intel_encoder *encoder;
7896
7897 ret = drm_atomic_add_affected_planes(state, &crtc->base);
7898 if (ret)
7899 goto out;
7900
7901 /*
7902 * FIXME hack to force a LUT update to avoid the
7903 * plane update forcing the pipe gamma on without
7904 * having a proper LUT loaded. Remove once we
7905 * have readout for pipe gamma enable.
7906 */
7907 crtc_state->uapi.color_mgmt_changed = true;
7908
7909 for_each_intel_encoder_mask(dev, encoder,
7910 crtc_state->uapi.encoder_mask) {
7911 if (encoder->initial_fastset_check &&
7912 !encoder->initial_fastset_check(encoder, crtc_state)) {
7913 ret = drm_atomic_add_affected_connectors(state,
7914 &crtc->base);
7915 if (ret)
7916 goto out;
7917 }
7918 }
7919 }
7920 }
7921
7922 ret = drm_atomic_commit(state);
7923
7924out:
7925 if (ret == -EDEADLK) {
7926 drm_atomic_state_clear(state);
7927 drm_modeset_backoff(&ctx);
7928 goto retry;
7929 }
7930
7931 drm_atomic_state_put(state);
7932
7933 drm_modeset_drop_locks(&ctx);
7934 drm_modeset_acquire_fini(&ctx);
7935
7936 return ret;
7937}
7938
7939void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
7940{
7941 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
7942 enum transcoder cpu_transcoder = (enum transcoder)pipe;
7943 /* 640x480@60Hz, ~25175 kHz */
7944 struct dpll clock = {
7945 .m1 = 18,
7946 .m2 = 7,
7947 .p1 = 13,
7948 .p2 = 4,
7949 .n = 2,
7950 };
7951 u32 dpll, fp;
7952 int i;
7953
7954 drm_WARN_ON(&dev_priv->drm,
7955 i9xx_calc_dpll_params(48000, &clock) != 25154);
7956
7957 drm_dbg_kms(&dev_priv->drm,
7958 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
7959 pipe_name(pipe), clock.vco, clock.dot);
7960
7961 fp = i9xx_dpll_compute_fp(&clock);
7962 dpll = DPLL_DVO_2X_MODE |
7963 DPLL_VGA_MODE_DIS |
7964 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
7965 PLL_P2_DIVIDE_BY_4 |
7966 PLL_REF_INPUT_DREFCLK |
7967 DPLL_VCO_ENABLE;
7968
7969 intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
7970 HACTIVE(640 - 1) | HTOTAL(800 - 1));
7971 intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
7972 HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
7973 intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
7974 HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
7975 intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
7976 VACTIVE(480 - 1) | VTOTAL(525 - 1));
7977 intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
7978 VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
7979 intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
7980 VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
7981 intel_de_write(dev_priv, PIPESRC(pipe),
7982 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
7983
7984 intel_de_write(dev_priv, FP0(pipe), fp);
7985 intel_de_write(dev_priv, FP1(pipe), fp);
7986
7987 /*
7988 * Apparently we need to have VGA mode enabled prior to changing
7989 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
7990 * dividers, even though the register value does change.
7991 */
7992 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
7993 intel_de_write(dev_priv, DPLL(pipe), dpll);
7994
7995 /* Wait for the clocks to stabilize. */
7996 intel_de_posting_read(dev_priv, DPLL(pipe));
7997 udelay(150);
7998
7999 /* The pixel multiplier can only be updated once the
8000 * DPLL is enabled and the clocks are stable.
8001 *
8002 * So write it again.
8003 */
8004 intel_de_write(dev_priv, DPLL(pipe), dpll);
8005
8006 /* We do this three times for luck */
8007 for (i = 0; i < 3 ; i++) {
8008 intel_de_write(dev_priv, DPLL(pipe), dpll);
8009 intel_de_posting_read(dev_priv, DPLL(pipe));
8010 udelay(150); /* wait for warmup */
8011 }
8012
8013 intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
8014 intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8015
8016 intel_wait_for_pipe_scanline_moving(crtc);
8017}
8018
8019void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8020{
8021 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8022
8023 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8024 pipe_name(pipe));
8025
8026 drm_WARN_ON(&dev_priv->drm,
8027 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8028 drm_WARN_ON(&dev_priv->drm,
8029 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8030 drm_WARN_ON(&dev_priv->drm,
8031 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8032 drm_WARN_ON(&dev_priv->drm,
8033 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8034 drm_WARN_ON(&dev_priv->drm,
8035 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8036
8037 intel_de_write(dev_priv, TRANSCONF(pipe), 0);
8038 intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8039
8040 intel_wait_for_pipe_scanline_stopped(crtc);
8041
8042 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8043 intel_de_posting_read(dev_priv, DPLL(pipe));
8044}
8045
8046void intel_hpd_poll_fini(struct drm_i915_private *i915)
8047{
8048 struct intel_connector *connector;
8049 struct drm_connector_list_iter conn_iter;
8050
8051 /* Kill all the work that may have been queued by hpd. */
8052 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8053 for_each_intel_connector_iter(connector, &conn_iter) {
8054 if (connector->modeset_retry_work.func)
8055 cancel_work_sync(&connector->modeset_retry_work);
8056 if (connector->hdcp.shim) {
8057 cancel_delayed_work_sync(&connector->hdcp.check_work);
8058 cancel_work_sync(&connector->hdcp.prop_work);
8059 }
8060 }
8061 drm_connector_list_iter_end(&conn_iter);
8062}
8063
8064bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
8065{
8066 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
8067}