Loading...
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include "drmP.h"
32#include "drm.h"
33#include "drm_crtc.h"
34#include "drm_crtc_helper.h"
35#include "drm_edid.h"
36#include "intel_drv.h"
37#include "i915_drm.h"
38#include "i915_drv.h"
39#include "drm_dp_helper.h"
40
41#define DP_RECEIVER_CAP_SIZE 0xf
42#define DP_LINK_STATUS_SIZE 6
43#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44
45#define DP_LINK_CONFIGURATION_SIZE 9
46
47struct intel_dp {
48 struct intel_encoder base;
49 uint32_t output_reg;
50 uint32_t DP;
51 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
52 bool has_audio;
53 enum hdmi_force_audio force_audio;
54 uint32_t color_range;
55 int dpms_mode;
56 uint8_t link_bw;
57 uint8_t lane_count;
58 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
59 struct i2c_adapter adapter;
60 struct i2c_algo_dp_aux_data algo;
61 bool is_pch_edp;
62 uint8_t train_set[4];
63 int panel_power_up_delay;
64 int panel_power_down_delay;
65 int panel_power_cycle_delay;
66 int backlight_on_delay;
67 int backlight_off_delay;
68 struct drm_display_mode *panel_fixed_mode; /* for eDP */
69 struct delayed_work panel_vdd_work;
70 bool want_panel_vdd;
71 struct edid *edid; /* cached EDID for eDP */
72 int edid_mode_count;
73};
74
75/**
76 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
77 * @intel_dp: DP struct
78 *
79 * If a CPU or PCH DP output is attached to an eDP panel, this function
80 * will return true, and false otherwise.
81 */
82static bool is_edp(struct intel_dp *intel_dp)
83{
84 return intel_dp->base.type == INTEL_OUTPUT_EDP;
85}
86
87/**
88 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
89 * @intel_dp: DP struct
90 *
91 * Returns true if the given DP struct corresponds to a PCH DP port attached
92 * to an eDP panel, false otherwise. Helpful for determining whether we
93 * may need FDI resources for a given DP output or not.
94 */
95static bool is_pch_edp(struct intel_dp *intel_dp)
96{
97 return intel_dp->is_pch_edp;
98}
99
100/**
101 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
102 * @intel_dp: DP struct
103 *
104 * Returns true if the given DP struct corresponds to a CPU eDP port.
105 */
106static bool is_cpu_edp(struct intel_dp *intel_dp)
107{
108 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
109}
110
111static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
112{
113 return container_of(encoder, struct intel_dp, base.base);
114}
115
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
118 return container_of(intel_attached_encoder(connector),
119 struct intel_dp, base);
120}
121
122/**
123 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
124 * @encoder: DRM encoder
125 *
126 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
127 * by intel_display.c.
128 */
129bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
130{
131 struct intel_dp *intel_dp;
132
133 if (!encoder)
134 return false;
135
136 intel_dp = enc_to_intel_dp(encoder);
137
138 return is_pch_edp(intel_dp);
139}
140
141static void intel_dp_start_link_train(struct intel_dp *intel_dp);
142static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
143static void intel_dp_link_down(struct intel_dp *intel_dp);
144
145void
146intel_edp_link_config(struct intel_encoder *intel_encoder,
147 int *lane_num, int *link_bw)
148{
149 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
150
151 *lane_num = intel_dp->lane_count;
152 if (intel_dp->link_bw == DP_LINK_BW_1_62)
153 *link_bw = 162000;
154 else if (intel_dp->link_bw == DP_LINK_BW_2_7)
155 *link_bw = 270000;
156}
157
158static int
159intel_dp_max_lane_count(struct intel_dp *intel_dp)
160{
161 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
162 switch (max_lane_count) {
163 case 1: case 2: case 4:
164 break;
165 default:
166 max_lane_count = 4;
167 }
168 return max_lane_count;
169}
170
171static int
172intel_dp_max_link_bw(struct intel_dp *intel_dp)
173{
174 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
175
176 switch (max_link_bw) {
177 case DP_LINK_BW_1_62:
178 case DP_LINK_BW_2_7:
179 break;
180 default:
181 max_link_bw = DP_LINK_BW_1_62;
182 break;
183 }
184 return max_link_bw;
185}
186
187static int
188intel_dp_link_clock(uint8_t link_bw)
189{
190 if (link_bw == DP_LINK_BW_2_7)
191 return 270000;
192 else
193 return 162000;
194}
195
196/*
197 * The units on the numbers in the next two are... bizarre. Examples will
198 * make it clearer; this one parallels an example in the eDP spec.
199 *
200 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
201 *
202 * 270000 * 1 * 8 / 10 == 216000
203 *
204 * The actual data capacity of that configuration is 2.16Gbit/s, so the
205 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
206 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
207 * 119000. At 18bpp that's 2142000 kilobits per second.
208 *
209 * Thus the strange-looking division by 10 in intel_dp_link_required, to
210 * get the result in decakilobits instead of kilobits.
211 */
212
213static int
214intel_dp_link_required(int pixel_clock, int bpp)
215{
216 return (pixel_clock * bpp + 9) / 10;
217}
218
219static int
220intel_dp_max_data_rate(int max_link_clock, int max_lanes)
221{
222 return (max_link_clock * max_lanes * 8) / 10;
223}
224
225static bool
226intel_dp_adjust_dithering(struct intel_dp *intel_dp,
227 struct drm_display_mode *mode,
228 struct drm_display_mode *adjusted_mode)
229{
230 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
231 int max_lanes = intel_dp_max_lane_count(intel_dp);
232 int max_rate, mode_rate;
233
234 mode_rate = intel_dp_link_required(mode->clock, 24);
235 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
236
237 if (mode_rate > max_rate) {
238 mode_rate = intel_dp_link_required(mode->clock, 18);
239 if (mode_rate > max_rate)
240 return false;
241
242 if (adjusted_mode)
243 adjusted_mode->private_flags
244 |= INTEL_MODE_DP_FORCE_6BPC;
245
246 return true;
247 }
248
249 return true;
250}
251
252static int
253intel_dp_mode_valid(struct drm_connector *connector,
254 struct drm_display_mode *mode)
255{
256 struct intel_dp *intel_dp = intel_attached_dp(connector);
257
258 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
259 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
260 return MODE_PANEL;
261
262 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
263 return MODE_PANEL;
264 }
265
266 if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
267 return MODE_CLOCK_HIGH;
268
269 if (mode->clock < 10000)
270 return MODE_CLOCK_LOW;
271
272 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
273 return MODE_H_ILLEGAL;
274
275 return MODE_OK;
276}
277
278static uint32_t
279pack_aux(uint8_t *src, int src_bytes)
280{
281 int i;
282 uint32_t v = 0;
283
284 if (src_bytes > 4)
285 src_bytes = 4;
286 for (i = 0; i < src_bytes; i++)
287 v |= ((uint32_t) src[i]) << ((3-i) * 8);
288 return v;
289}
290
291static void
292unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
293{
294 int i;
295 if (dst_bytes > 4)
296 dst_bytes = 4;
297 for (i = 0; i < dst_bytes; i++)
298 dst[i] = src >> ((3-i) * 8);
299}
300
301/* hrawclock is 1/4 the FSB frequency */
302static int
303intel_hrawclk(struct drm_device *dev)
304{
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 uint32_t clkcfg;
307
308 clkcfg = I915_READ(CLKCFG);
309 switch (clkcfg & CLKCFG_FSB_MASK) {
310 case CLKCFG_FSB_400:
311 return 100;
312 case CLKCFG_FSB_533:
313 return 133;
314 case CLKCFG_FSB_667:
315 return 166;
316 case CLKCFG_FSB_800:
317 return 200;
318 case CLKCFG_FSB_1067:
319 return 266;
320 case CLKCFG_FSB_1333:
321 return 333;
322 /* these two are just a guess; one of them might be right */
323 case CLKCFG_FSB_1600:
324 case CLKCFG_FSB_1600_ALT:
325 return 400;
326 default:
327 return 133;
328 }
329}
330
331static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
332{
333 struct drm_device *dev = intel_dp->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335
336 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
337}
338
339static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
340{
341 struct drm_device *dev = intel_dp->base.base.dev;
342 struct drm_i915_private *dev_priv = dev->dev_private;
343
344 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
345}
346
347static void
348intel_dp_check_edp(struct intel_dp *intel_dp)
349{
350 struct drm_device *dev = intel_dp->base.base.dev;
351 struct drm_i915_private *dev_priv = dev->dev_private;
352
353 if (!is_edp(intel_dp))
354 return;
355 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
356 WARN(1, "eDP powered off while attempting aux channel communication.\n");
357 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
358 I915_READ(PCH_PP_STATUS),
359 I915_READ(PCH_PP_CONTROL));
360 }
361}
362
363static int
364intel_dp_aux_ch(struct intel_dp *intel_dp,
365 uint8_t *send, int send_bytes,
366 uint8_t *recv, int recv_size)
367{
368 uint32_t output_reg = intel_dp->output_reg;
369 struct drm_device *dev = intel_dp->base.base.dev;
370 struct drm_i915_private *dev_priv = dev->dev_private;
371 uint32_t ch_ctl = output_reg + 0x10;
372 uint32_t ch_data = ch_ctl + 4;
373 int i;
374 int recv_bytes;
375 uint32_t status;
376 uint32_t aux_clock_divider;
377 int try, precharge;
378
379 intel_dp_check_edp(intel_dp);
380 /* The clock divider is based off the hrawclk,
381 * and would like to run at 2MHz. So, take the
382 * hrawclk value and divide by 2 and use that
383 *
384 * Note that PCH attached eDP panels should use a 125MHz input
385 * clock divider.
386 */
387 if (is_cpu_edp(intel_dp)) {
388 if (IS_GEN6(dev) || IS_GEN7(dev))
389 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
390 else
391 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
392 } else if (HAS_PCH_SPLIT(dev))
393 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
394 else
395 aux_clock_divider = intel_hrawclk(dev) / 2;
396
397 if (IS_GEN6(dev))
398 precharge = 3;
399 else
400 precharge = 5;
401
402 /* Try to wait for any previous AUX channel activity */
403 for (try = 0; try < 3; try++) {
404 status = I915_READ(ch_ctl);
405 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
406 break;
407 msleep(1);
408 }
409
410 if (try == 3) {
411 WARN(1, "dp_aux_ch not started status 0x%08x\n",
412 I915_READ(ch_ctl));
413 return -EBUSY;
414 }
415
416 /* Must try at least 3 times according to DP spec */
417 for (try = 0; try < 5; try++) {
418 /* Load the send data into the aux channel data registers */
419 for (i = 0; i < send_bytes; i += 4)
420 I915_WRITE(ch_data + i,
421 pack_aux(send + i, send_bytes - i));
422
423 /* Send the command and wait for it to complete */
424 I915_WRITE(ch_ctl,
425 DP_AUX_CH_CTL_SEND_BUSY |
426 DP_AUX_CH_CTL_TIME_OUT_400us |
427 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
428 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
429 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
430 DP_AUX_CH_CTL_DONE |
431 DP_AUX_CH_CTL_TIME_OUT_ERROR |
432 DP_AUX_CH_CTL_RECEIVE_ERROR);
433 for (;;) {
434 status = I915_READ(ch_ctl);
435 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
436 break;
437 udelay(100);
438 }
439
440 /* Clear done status and any errors */
441 I915_WRITE(ch_ctl,
442 status |
443 DP_AUX_CH_CTL_DONE |
444 DP_AUX_CH_CTL_TIME_OUT_ERROR |
445 DP_AUX_CH_CTL_RECEIVE_ERROR);
446
447 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
448 DP_AUX_CH_CTL_RECEIVE_ERROR))
449 continue;
450 if (status & DP_AUX_CH_CTL_DONE)
451 break;
452 }
453
454 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
455 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
456 return -EBUSY;
457 }
458
459 /* Check for timeout or receive error.
460 * Timeouts occur when the sink is not connected
461 */
462 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
463 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
464 return -EIO;
465 }
466
467 /* Timeouts occur when the device isn't connected, so they're
468 * "normal" -- don't fill the kernel log with these */
469 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
470 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
471 return -ETIMEDOUT;
472 }
473
474 /* Unload any bytes sent back from the other side */
475 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
476 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
477 if (recv_bytes > recv_size)
478 recv_bytes = recv_size;
479
480 for (i = 0; i < recv_bytes; i += 4)
481 unpack_aux(I915_READ(ch_data + i),
482 recv + i, recv_bytes - i);
483
484 return recv_bytes;
485}
486
487/* Write data to the aux channel in native mode */
488static int
489intel_dp_aux_native_write(struct intel_dp *intel_dp,
490 uint16_t address, uint8_t *send, int send_bytes)
491{
492 int ret;
493 uint8_t msg[20];
494 int msg_bytes;
495 uint8_t ack;
496
497 intel_dp_check_edp(intel_dp);
498 if (send_bytes > 16)
499 return -1;
500 msg[0] = AUX_NATIVE_WRITE << 4;
501 msg[1] = address >> 8;
502 msg[2] = address & 0xff;
503 msg[3] = send_bytes - 1;
504 memcpy(&msg[4], send, send_bytes);
505 msg_bytes = send_bytes + 4;
506 for (;;) {
507 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
508 if (ret < 0)
509 return ret;
510 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
511 break;
512 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
513 udelay(100);
514 else
515 return -EIO;
516 }
517 return send_bytes;
518}
519
520/* Write a single byte to the aux channel in native mode */
521static int
522intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
523 uint16_t address, uint8_t byte)
524{
525 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
526}
527
528/* read bytes from a native aux channel */
529static int
530intel_dp_aux_native_read(struct intel_dp *intel_dp,
531 uint16_t address, uint8_t *recv, int recv_bytes)
532{
533 uint8_t msg[4];
534 int msg_bytes;
535 uint8_t reply[20];
536 int reply_bytes;
537 uint8_t ack;
538 int ret;
539
540 intel_dp_check_edp(intel_dp);
541 msg[0] = AUX_NATIVE_READ << 4;
542 msg[1] = address >> 8;
543 msg[2] = address & 0xff;
544 msg[3] = recv_bytes - 1;
545
546 msg_bytes = 4;
547 reply_bytes = recv_bytes + 1;
548
549 for (;;) {
550 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
551 reply, reply_bytes);
552 if (ret == 0)
553 return -EPROTO;
554 if (ret < 0)
555 return ret;
556 ack = reply[0];
557 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
558 memcpy(recv, reply + 1, ret - 1);
559 return ret - 1;
560 }
561 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
562 udelay(100);
563 else
564 return -EIO;
565 }
566}
567
568static int
569intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
570 uint8_t write_byte, uint8_t *read_byte)
571{
572 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
573 struct intel_dp *intel_dp = container_of(adapter,
574 struct intel_dp,
575 adapter);
576 uint16_t address = algo_data->address;
577 uint8_t msg[5];
578 uint8_t reply[2];
579 unsigned retry;
580 int msg_bytes;
581 int reply_bytes;
582 int ret;
583
584 intel_dp_check_edp(intel_dp);
585 /* Set up the command byte */
586 if (mode & MODE_I2C_READ)
587 msg[0] = AUX_I2C_READ << 4;
588 else
589 msg[0] = AUX_I2C_WRITE << 4;
590
591 if (!(mode & MODE_I2C_STOP))
592 msg[0] |= AUX_I2C_MOT << 4;
593
594 msg[1] = address >> 8;
595 msg[2] = address;
596
597 switch (mode) {
598 case MODE_I2C_WRITE:
599 msg[3] = 0;
600 msg[4] = write_byte;
601 msg_bytes = 5;
602 reply_bytes = 1;
603 break;
604 case MODE_I2C_READ:
605 msg[3] = 0;
606 msg_bytes = 4;
607 reply_bytes = 2;
608 break;
609 default:
610 msg_bytes = 3;
611 reply_bytes = 1;
612 break;
613 }
614
615 for (retry = 0; retry < 5; retry++) {
616 ret = intel_dp_aux_ch(intel_dp,
617 msg, msg_bytes,
618 reply, reply_bytes);
619 if (ret < 0) {
620 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
621 return ret;
622 }
623
624 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
625 case AUX_NATIVE_REPLY_ACK:
626 /* I2C-over-AUX Reply field is only valid
627 * when paired with AUX ACK.
628 */
629 break;
630 case AUX_NATIVE_REPLY_NACK:
631 DRM_DEBUG_KMS("aux_ch native nack\n");
632 return -EREMOTEIO;
633 case AUX_NATIVE_REPLY_DEFER:
634 udelay(100);
635 continue;
636 default:
637 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
638 reply[0]);
639 return -EREMOTEIO;
640 }
641
642 switch (reply[0] & AUX_I2C_REPLY_MASK) {
643 case AUX_I2C_REPLY_ACK:
644 if (mode == MODE_I2C_READ) {
645 *read_byte = reply[1];
646 }
647 return reply_bytes - 1;
648 case AUX_I2C_REPLY_NACK:
649 DRM_DEBUG_KMS("aux_i2c nack\n");
650 return -EREMOTEIO;
651 case AUX_I2C_REPLY_DEFER:
652 DRM_DEBUG_KMS("aux_i2c defer\n");
653 udelay(100);
654 break;
655 default:
656 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
657 return -EREMOTEIO;
658 }
659 }
660
661 DRM_ERROR("too many retries, giving up\n");
662 return -EREMOTEIO;
663}
664
665static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
666static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
667
668static int
669intel_dp_i2c_init(struct intel_dp *intel_dp,
670 struct intel_connector *intel_connector, const char *name)
671{
672 int ret;
673
674 DRM_DEBUG_KMS("i2c_init %s\n", name);
675 intel_dp->algo.running = false;
676 intel_dp->algo.address = 0;
677 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
678
679 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
680 intel_dp->adapter.owner = THIS_MODULE;
681 intel_dp->adapter.class = I2C_CLASS_DDC;
682 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
683 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
684 intel_dp->adapter.algo_data = &intel_dp->algo;
685 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
686
687 ironlake_edp_panel_vdd_on(intel_dp);
688 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
689 ironlake_edp_panel_vdd_off(intel_dp, false);
690 return ret;
691}
692
693static bool
694intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
695 struct drm_display_mode *adjusted_mode)
696{
697 struct drm_device *dev = encoder->dev;
698 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
699 int lane_count, clock;
700 int max_lane_count = intel_dp_max_lane_count(intel_dp);
701 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
702 int bpp, mode_rate;
703 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
704
705 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
706 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
707 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
708 mode, adjusted_mode);
709 /*
710 * the mode->clock is used to calculate the Data&Link M/N
711 * of the pipe. For the eDP the fixed clock should be used.
712 */
713 mode->clock = intel_dp->panel_fixed_mode->clock;
714 }
715
716 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
717 return false;
718
719 DRM_DEBUG_KMS("DP link computation with max lane count %i "
720 "max bw %02x pixel clock %iKHz\n",
721 max_lane_count, bws[max_clock], mode->clock);
722
723 if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
724 return false;
725
726 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
727 mode_rate = intel_dp_link_required(mode->clock, bpp);
728
729 for (clock = 0; clock <= max_clock; clock++) {
730 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
731 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
732
733 if (mode_rate <= link_avail) {
734 intel_dp->link_bw = bws[clock];
735 intel_dp->lane_count = lane_count;
736 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
737 DRM_DEBUG_KMS("DP link bw %02x lane "
738 "count %d clock %d bpp %d\n",
739 intel_dp->link_bw, intel_dp->lane_count,
740 adjusted_mode->clock, bpp);
741 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
742 mode_rate, link_avail);
743 return true;
744 }
745 }
746 }
747
748 return false;
749}
750
751struct intel_dp_m_n {
752 uint32_t tu;
753 uint32_t gmch_m;
754 uint32_t gmch_n;
755 uint32_t link_m;
756 uint32_t link_n;
757};
758
759static void
760intel_reduce_ratio(uint32_t *num, uint32_t *den)
761{
762 while (*num > 0xffffff || *den > 0xffffff) {
763 *num >>= 1;
764 *den >>= 1;
765 }
766}
767
768static void
769intel_dp_compute_m_n(int bpp,
770 int nlanes,
771 int pixel_clock,
772 int link_clock,
773 struct intel_dp_m_n *m_n)
774{
775 m_n->tu = 64;
776 m_n->gmch_m = (pixel_clock * bpp) >> 3;
777 m_n->gmch_n = link_clock * nlanes;
778 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
779 m_n->link_m = pixel_clock;
780 m_n->link_n = link_clock;
781 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
782}
783
784void
785intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
786 struct drm_display_mode *adjusted_mode)
787{
788 struct drm_device *dev = crtc->dev;
789 struct drm_mode_config *mode_config = &dev->mode_config;
790 struct drm_encoder *encoder;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
793 int lane_count = 4;
794 struct intel_dp_m_n m_n;
795 int pipe = intel_crtc->pipe;
796
797 /*
798 * Find the lane count in the intel_encoder private
799 */
800 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
801 struct intel_dp *intel_dp;
802
803 if (encoder->crtc != crtc)
804 continue;
805
806 intel_dp = enc_to_intel_dp(encoder);
807 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
808 intel_dp->base.type == INTEL_OUTPUT_EDP)
809 {
810 lane_count = intel_dp->lane_count;
811 break;
812 }
813 }
814
815 /*
816 * Compute the GMCH and Link ratios. The '3' here is
817 * the number of bytes_per_pixel post-LUT, which we always
818 * set up for 8-bits of R/G/B, or 3 bytes total.
819 */
820 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
821 mode->clock, adjusted_mode->clock, &m_n);
822
823 if (HAS_PCH_SPLIT(dev)) {
824 I915_WRITE(TRANSDATA_M1(pipe),
825 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
826 m_n.gmch_m);
827 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
828 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
829 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
830 } else {
831 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
832 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
833 m_n.gmch_m);
834 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
835 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
836 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
837 }
838}
839
840static void ironlake_edp_pll_on(struct drm_encoder *encoder);
841static void ironlake_edp_pll_off(struct drm_encoder *encoder);
842
843static void
844intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
845 struct drm_display_mode *adjusted_mode)
846{
847 struct drm_device *dev = encoder->dev;
848 struct drm_i915_private *dev_priv = dev->dev_private;
849 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
850 struct drm_crtc *crtc = intel_dp->base.base.crtc;
851 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
852
853 /* Turn on the eDP PLL if needed */
854 if (is_edp(intel_dp)) {
855 if (!is_pch_edp(intel_dp))
856 ironlake_edp_pll_on(encoder);
857 else
858 ironlake_edp_pll_off(encoder);
859 }
860
861 /*
862 * There are four kinds of DP registers:
863 *
864 * IBX PCH
865 * SNB CPU
866 * IVB CPU
867 * CPT PCH
868 *
869 * IBX PCH and CPU are the same for almost everything,
870 * except that the CPU DP PLL is configured in this
871 * register
872 *
873 * CPT PCH is quite different, having many bits moved
874 * to the TRANS_DP_CTL register instead. That
875 * configuration happens (oddly) in ironlake_pch_enable
876 */
877
878 /* Preserve the BIOS-computed detected bit. This is
879 * supposed to be read-only.
880 */
881 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
882 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
883
884 /* Handle DP bits in common between all three register formats */
885
886 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
887
888 switch (intel_dp->lane_count) {
889 case 1:
890 intel_dp->DP |= DP_PORT_WIDTH_1;
891 break;
892 case 2:
893 intel_dp->DP |= DP_PORT_WIDTH_2;
894 break;
895 case 4:
896 intel_dp->DP |= DP_PORT_WIDTH_4;
897 break;
898 }
899 if (intel_dp->has_audio) {
900 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
901 pipe_name(intel_crtc->pipe));
902 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
903 intel_write_eld(encoder, adjusted_mode);
904 }
905 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
906 intel_dp->link_configuration[0] = intel_dp->link_bw;
907 intel_dp->link_configuration[1] = intel_dp->lane_count;
908 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
909 /*
910 * Check for DPCD version > 1.1 and enhanced framing support
911 */
912 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
913 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
914 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
915 }
916
917 /* Split out the IBX/CPU vs CPT settings */
918
919 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
920 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
921 intel_dp->DP |= DP_SYNC_HS_HIGH;
922 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
923 intel_dp->DP |= DP_SYNC_VS_HIGH;
924 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
925
926 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
927 intel_dp->DP |= DP_ENHANCED_FRAMING;
928
929 intel_dp->DP |= intel_crtc->pipe << 29;
930
931 /* don't miss out required setting for eDP */
932 intel_dp->DP |= DP_PLL_ENABLE;
933 if (adjusted_mode->clock < 200000)
934 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
935 else
936 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
937 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
938 intel_dp->DP |= intel_dp->color_range;
939
940 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
941 intel_dp->DP |= DP_SYNC_HS_HIGH;
942 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
943 intel_dp->DP |= DP_SYNC_VS_HIGH;
944 intel_dp->DP |= DP_LINK_TRAIN_OFF;
945
946 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
947 intel_dp->DP |= DP_ENHANCED_FRAMING;
948
949 if (intel_crtc->pipe == 1)
950 intel_dp->DP |= DP_PIPEB_SELECT;
951
952 if (is_cpu_edp(intel_dp)) {
953 /* don't miss out required setting for eDP */
954 intel_dp->DP |= DP_PLL_ENABLE;
955 if (adjusted_mode->clock < 200000)
956 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
957 else
958 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
959 }
960 } else {
961 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
962 }
963}
964
965#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
966#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
967
968#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
969#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
970
971#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
972#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
973
974static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
975 u32 mask,
976 u32 value)
977{
978 struct drm_device *dev = intel_dp->base.base.dev;
979 struct drm_i915_private *dev_priv = dev->dev_private;
980
981 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
982 mask, value,
983 I915_READ(PCH_PP_STATUS),
984 I915_READ(PCH_PP_CONTROL));
985
986 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
987 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
988 I915_READ(PCH_PP_STATUS),
989 I915_READ(PCH_PP_CONTROL));
990 }
991}
992
993static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
994{
995 DRM_DEBUG_KMS("Wait for panel power on\n");
996 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
997}
998
999static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
1000{
1001 DRM_DEBUG_KMS("Wait for panel power off time\n");
1002 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1003}
1004
1005static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1006{
1007 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1008 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1009}
1010
1011
1012/* Read the current pp_control value, unlocking the register if it
1013 * is locked
1014 */
1015
1016static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1017{
1018 u32 control = I915_READ(PCH_PP_CONTROL);
1019
1020 control &= ~PANEL_UNLOCK_MASK;
1021 control |= PANEL_UNLOCK_REGS;
1022 return control;
1023}
1024
1025static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1026{
1027 struct drm_device *dev = intel_dp->base.base.dev;
1028 struct drm_i915_private *dev_priv = dev->dev_private;
1029 u32 pp;
1030
1031 if (!is_edp(intel_dp))
1032 return;
1033 DRM_DEBUG_KMS("Turn eDP VDD on\n");
1034
1035 WARN(intel_dp->want_panel_vdd,
1036 "eDP VDD already requested on\n");
1037
1038 intel_dp->want_panel_vdd = true;
1039
1040 if (ironlake_edp_have_panel_vdd(intel_dp)) {
1041 DRM_DEBUG_KMS("eDP VDD already on\n");
1042 return;
1043 }
1044
1045 if (!ironlake_edp_have_panel_power(intel_dp))
1046 ironlake_wait_panel_power_cycle(intel_dp);
1047
1048 pp = ironlake_get_pp_control(dev_priv);
1049 pp |= EDP_FORCE_VDD;
1050 I915_WRITE(PCH_PP_CONTROL, pp);
1051 POSTING_READ(PCH_PP_CONTROL);
1052 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1053 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1054
1055 /*
1056 * If the panel wasn't on, delay before accessing aux channel
1057 */
1058 if (!ironlake_edp_have_panel_power(intel_dp)) {
1059 DRM_DEBUG_KMS("eDP was not running\n");
1060 msleep(intel_dp->panel_power_up_delay);
1061 }
1062}
1063
1064static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1065{
1066 struct drm_device *dev = intel_dp->base.base.dev;
1067 struct drm_i915_private *dev_priv = dev->dev_private;
1068 u32 pp;
1069
1070 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1071 pp = ironlake_get_pp_control(dev_priv);
1072 pp &= ~EDP_FORCE_VDD;
1073 I915_WRITE(PCH_PP_CONTROL, pp);
1074 POSTING_READ(PCH_PP_CONTROL);
1075
1076 /* Make sure sequencer is idle before allowing subsequent activity */
1077 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1078 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1079
1080 msleep(intel_dp->panel_power_down_delay);
1081 }
1082}
1083
1084static void ironlake_panel_vdd_work(struct work_struct *__work)
1085{
1086 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1087 struct intel_dp, panel_vdd_work);
1088 struct drm_device *dev = intel_dp->base.base.dev;
1089
1090 mutex_lock(&dev->mode_config.mutex);
1091 ironlake_panel_vdd_off_sync(intel_dp);
1092 mutex_unlock(&dev->mode_config.mutex);
1093}
1094
1095static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1096{
1097 if (!is_edp(intel_dp))
1098 return;
1099
1100 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1101 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1102
1103 intel_dp->want_panel_vdd = false;
1104
1105 if (sync) {
1106 ironlake_panel_vdd_off_sync(intel_dp);
1107 } else {
1108 /*
1109 * Queue the timer to fire a long
1110 * time from now (relative to the power down delay)
1111 * to keep the panel power up across a sequence of operations
1112 */
1113 schedule_delayed_work(&intel_dp->panel_vdd_work,
1114 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1115 }
1116}
1117
1118static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1119{
1120 struct drm_device *dev = intel_dp->base.base.dev;
1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 u32 pp;
1123
1124 if (!is_edp(intel_dp))
1125 return;
1126
1127 DRM_DEBUG_KMS("Turn eDP power on\n");
1128
1129 if (ironlake_edp_have_panel_power(intel_dp)) {
1130 DRM_DEBUG_KMS("eDP power already on\n");
1131 return;
1132 }
1133
1134 ironlake_wait_panel_power_cycle(intel_dp);
1135
1136 pp = ironlake_get_pp_control(dev_priv);
1137 if (IS_GEN5(dev)) {
1138 /* ILK workaround: disable reset around power sequence */
1139 pp &= ~PANEL_POWER_RESET;
1140 I915_WRITE(PCH_PP_CONTROL, pp);
1141 POSTING_READ(PCH_PP_CONTROL);
1142 }
1143
1144 pp |= POWER_TARGET_ON;
1145 if (!IS_GEN5(dev))
1146 pp |= PANEL_POWER_RESET;
1147
1148 I915_WRITE(PCH_PP_CONTROL, pp);
1149 POSTING_READ(PCH_PP_CONTROL);
1150
1151 ironlake_wait_panel_on(intel_dp);
1152
1153 if (IS_GEN5(dev)) {
1154 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1155 I915_WRITE(PCH_PP_CONTROL, pp);
1156 POSTING_READ(PCH_PP_CONTROL);
1157 }
1158}
1159
1160static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1161{
1162 struct drm_device *dev = intel_dp->base.base.dev;
1163 struct drm_i915_private *dev_priv = dev->dev_private;
1164 u32 pp;
1165
1166 if (!is_edp(intel_dp))
1167 return;
1168
1169 DRM_DEBUG_KMS("Turn eDP power off\n");
1170
1171 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1172
1173 pp = ironlake_get_pp_control(dev_priv);
1174 /* We need to switch off panel power _and_ force vdd, for otherwise some
1175 * panels get very unhappy and cease to work. */
1176 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1177 I915_WRITE(PCH_PP_CONTROL, pp);
1178 POSTING_READ(PCH_PP_CONTROL);
1179
1180 intel_dp->want_panel_vdd = false;
1181
1182 ironlake_wait_panel_off(intel_dp);
1183}
1184
1185static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1186{
1187 struct drm_device *dev = intel_dp->base.base.dev;
1188 struct drm_i915_private *dev_priv = dev->dev_private;
1189 u32 pp;
1190
1191 if (!is_edp(intel_dp))
1192 return;
1193
1194 DRM_DEBUG_KMS("\n");
1195 /*
1196 * If we enable the backlight right away following a panel power
1197 * on, we may see slight flicker as the panel syncs with the eDP
1198 * link. So delay a bit to make sure the image is solid before
1199 * allowing it to appear.
1200 */
1201 msleep(intel_dp->backlight_on_delay);
1202 pp = ironlake_get_pp_control(dev_priv);
1203 pp |= EDP_BLC_ENABLE;
1204 I915_WRITE(PCH_PP_CONTROL, pp);
1205 POSTING_READ(PCH_PP_CONTROL);
1206}
1207
1208static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1209{
1210 struct drm_device *dev = intel_dp->base.base.dev;
1211 struct drm_i915_private *dev_priv = dev->dev_private;
1212 u32 pp;
1213
1214 if (!is_edp(intel_dp))
1215 return;
1216
1217 DRM_DEBUG_KMS("\n");
1218 pp = ironlake_get_pp_control(dev_priv);
1219 pp &= ~EDP_BLC_ENABLE;
1220 I915_WRITE(PCH_PP_CONTROL, pp);
1221 POSTING_READ(PCH_PP_CONTROL);
1222 msleep(intel_dp->backlight_off_delay);
1223}
1224
1225static void ironlake_edp_pll_on(struct drm_encoder *encoder)
1226{
1227 struct drm_device *dev = encoder->dev;
1228 struct drm_i915_private *dev_priv = dev->dev_private;
1229 u32 dpa_ctl;
1230
1231 DRM_DEBUG_KMS("\n");
1232 dpa_ctl = I915_READ(DP_A);
1233 dpa_ctl |= DP_PLL_ENABLE;
1234 I915_WRITE(DP_A, dpa_ctl);
1235 POSTING_READ(DP_A);
1236 udelay(200);
1237}
1238
1239static void ironlake_edp_pll_off(struct drm_encoder *encoder)
1240{
1241 struct drm_device *dev = encoder->dev;
1242 struct drm_i915_private *dev_priv = dev->dev_private;
1243 u32 dpa_ctl;
1244
1245 dpa_ctl = I915_READ(DP_A);
1246 dpa_ctl &= ~DP_PLL_ENABLE;
1247 I915_WRITE(DP_A, dpa_ctl);
1248 POSTING_READ(DP_A);
1249 udelay(200);
1250}
1251
1252/* If the sink supports it, try to set the power state appropriately */
1253static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1254{
1255 int ret, i;
1256
1257 /* Should have a valid DPCD by this point */
1258 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1259 return;
1260
1261 if (mode != DRM_MODE_DPMS_ON) {
1262 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1263 DP_SET_POWER_D3);
1264 if (ret != 1)
1265 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1266 } else {
1267 /*
1268 * When turning on, we need to retry for 1ms to give the sink
1269 * time to wake up.
1270 */
1271 for (i = 0; i < 3; i++) {
1272 ret = intel_dp_aux_native_write_1(intel_dp,
1273 DP_SET_POWER,
1274 DP_SET_POWER_D0);
1275 if (ret == 1)
1276 break;
1277 msleep(1);
1278 }
1279 }
1280}
1281
1282static void intel_dp_prepare(struct drm_encoder *encoder)
1283{
1284 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1285
1286
1287 /* Make sure the panel is off before trying to change the mode. But also
1288 * ensure that we have vdd while we switch off the panel. */
1289 ironlake_edp_panel_vdd_on(intel_dp);
1290 ironlake_edp_backlight_off(intel_dp);
1291 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1292 ironlake_edp_panel_off(intel_dp);
1293 intel_dp_link_down(intel_dp);
1294}
1295
1296static void intel_dp_commit(struct drm_encoder *encoder)
1297{
1298 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1299 struct drm_device *dev = encoder->dev;
1300 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1301
1302 ironlake_edp_panel_vdd_on(intel_dp);
1303 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1304 intel_dp_start_link_train(intel_dp);
1305 ironlake_edp_panel_on(intel_dp);
1306 ironlake_edp_panel_vdd_off(intel_dp, true);
1307 intel_dp_complete_link_train(intel_dp);
1308 ironlake_edp_backlight_on(intel_dp);
1309
1310 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1311
1312 if (HAS_PCH_CPT(dev))
1313 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
1314}
1315
1316static void
1317intel_dp_dpms(struct drm_encoder *encoder, int mode)
1318{
1319 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1320 struct drm_device *dev = encoder->dev;
1321 struct drm_i915_private *dev_priv = dev->dev_private;
1322 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1323
1324 if (mode != DRM_MODE_DPMS_ON) {
1325 /* Switching the panel off requires vdd. */
1326 ironlake_edp_panel_vdd_on(intel_dp);
1327 ironlake_edp_backlight_off(intel_dp);
1328 intel_dp_sink_dpms(intel_dp, mode);
1329 ironlake_edp_panel_off(intel_dp);
1330 intel_dp_link_down(intel_dp);
1331
1332 if (is_cpu_edp(intel_dp))
1333 ironlake_edp_pll_off(encoder);
1334 } else {
1335 if (is_cpu_edp(intel_dp))
1336 ironlake_edp_pll_on(encoder);
1337
1338 ironlake_edp_panel_vdd_on(intel_dp);
1339 intel_dp_sink_dpms(intel_dp, mode);
1340 if (!(dp_reg & DP_PORT_EN)) {
1341 intel_dp_start_link_train(intel_dp);
1342 ironlake_edp_panel_on(intel_dp);
1343 ironlake_edp_panel_vdd_off(intel_dp, true);
1344 intel_dp_complete_link_train(intel_dp);
1345 } else
1346 ironlake_edp_panel_vdd_off(intel_dp, false);
1347 ironlake_edp_backlight_on(intel_dp);
1348 }
1349 intel_dp->dpms_mode = mode;
1350}
1351
1352/*
1353 * Native read with retry for link status and receiver capability reads for
1354 * cases where the sink may still be asleep.
1355 */
1356static bool
1357intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1358 uint8_t *recv, int recv_bytes)
1359{
1360 int ret, i;
1361
1362 /*
1363 * Sinks are *supposed* to come up within 1ms from an off state,
1364 * but we're also supposed to retry 3 times per the spec.
1365 */
1366 for (i = 0; i < 3; i++) {
1367 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1368 recv_bytes);
1369 if (ret == recv_bytes)
1370 return true;
1371 msleep(1);
1372 }
1373
1374 return false;
1375}
1376
1377/*
1378 * Fetch AUX CH registers 0x202 - 0x207 which contain
1379 * link status information
1380 */
1381static bool
1382intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1383{
1384 return intel_dp_aux_native_read_retry(intel_dp,
1385 DP_LANE0_1_STATUS,
1386 link_status,
1387 DP_LINK_STATUS_SIZE);
1388}
1389
1390static uint8_t
1391intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1392 int r)
1393{
1394 return link_status[r - DP_LANE0_1_STATUS];
1395}
1396
1397static uint8_t
1398intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1399 int lane)
1400{
1401 int s = ((lane & 1) ?
1402 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1403 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1404 uint8_t l = adjust_request[lane>>1];
1405
1406 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1407}
1408
1409static uint8_t
1410intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1411 int lane)
1412{
1413 int s = ((lane & 1) ?
1414 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1415 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1416 uint8_t l = adjust_request[lane>>1];
1417
1418 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1419}
1420
1421
1422#if 0
1423static char *voltage_names[] = {
1424 "0.4V", "0.6V", "0.8V", "1.2V"
1425};
1426static char *pre_emph_names[] = {
1427 "0dB", "3.5dB", "6dB", "9.5dB"
1428};
1429static char *link_train_names[] = {
1430 "pattern 1", "pattern 2", "idle", "off"
1431};
1432#endif
1433
1434/*
1435 * These are source-specific values; current Intel hardware supports
1436 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1437 */
1438
1439static uint8_t
1440intel_dp_voltage_max(struct intel_dp *intel_dp)
1441{
1442 struct drm_device *dev = intel_dp->base.base.dev;
1443
1444 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1445 return DP_TRAIN_VOLTAGE_SWING_800;
1446 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1447 return DP_TRAIN_VOLTAGE_SWING_1200;
1448 else
1449 return DP_TRAIN_VOLTAGE_SWING_800;
1450}
1451
1452static uint8_t
1453intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1454{
1455 struct drm_device *dev = intel_dp->base.base.dev;
1456
1457 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1458 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1459 case DP_TRAIN_VOLTAGE_SWING_400:
1460 return DP_TRAIN_PRE_EMPHASIS_6;
1461 case DP_TRAIN_VOLTAGE_SWING_600:
1462 case DP_TRAIN_VOLTAGE_SWING_800:
1463 return DP_TRAIN_PRE_EMPHASIS_3_5;
1464 default:
1465 return DP_TRAIN_PRE_EMPHASIS_0;
1466 }
1467 } else {
1468 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1469 case DP_TRAIN_VOLTAGE_SWING_400:
1470 return DP_TRAIN_PRE_EMPHASIS_6;
1471 case DP_TRAIN_VOLTAGE_SWING_600:
1472 return DP_TRAIN_PRE_EMPHASIS_6;
1473 case DP_TRAIN_VOLTAGE_SWING_800:
1474 return DP_TRAIN_PRE_EMPHASIS_3_5;
1475 case DP_TRAIN_VOLTAGE_SWING_1200:
1476 default:
1477 return DP_TRAIN_PRE_EMPHASIS_0;
1478 }
1479 }
1480}
1481
1482static void
1483intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1484{
1485 uint8_t v = 0;
1486 uint8_t p = 0;
1487 int lane;
1488 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1489 uint8_t voltage_max;
1490 uint8_t preemph_max;
1491
1492 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1493 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1494 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1495
1496 if (this_v > v)
1497 v = this_v;
1498 if (this_p > p)
1499 p = this_p;
1500 }
1501
1502 voltage_max = intel_dp_voltage_max(intel_dp);
1503 if (v >= voltage_max)
1504 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1505
1506 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1507 if (p >= preemph_max)
1508 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1509
1510 for (lane = 0; lane < 4; lane++)
1511 intel_dp->train_set[lane] = v | p;
1512}
1513
1514static uint32_t
1515intel_dp_signal_levels(uint8_t train_set)
1516{
1517 uint32_t signal_levels = 0;
1518
1519 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1520 case DP_TRAIN_VOLTAGE_SWING_400:
1521 default:
1522 signal_levels |= DP_VOLTAGE_0_4;
1523 break;
1524 case DP_TRAIN_VOLTAGE_SWING_600:
1525 signal_levels |= DP_VOLTAGE_0_6;
1526 break;
1527 case DP_TRAIN_VOLTAGE_SWING_800:
1528 signal_levels |= DP_VOLTAGE_0_8;
1529 break;
1530 case DP_TRAIN_VOLTAGE_SWING_1200:
1531 signal_levels |= DP_VOLTAGE_1_2;
1532 break;
1533 }
1534 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1535 case DP_TRAIN_PRE_EMPHASIS_0:
1536 default:
1537 signal_levels |= DP_PRE_EMPHASIS_0;
1538 break;
1539 case DP_TRAIN_PRE_EMPHASIS_3_5:
1540 signal_levels |= DP_PRE_EMPHASIS_3_5;
1541 break;
1542 case DP_TRAIN_PRE_EMPHASIS_6:
1543 signal_levels |= DP_PRE_EMPHASIS_6;
1544 break;
1545 case DP_TRAIN_PRE_EMPHASIS_9_5:
1546 signal_levels |= DP_PRE_EMPHASIS_9_5;
1547 break;
1548 }
1549 return signal_levels;
1550}
1551
1552/* Gen6's DP voltage swing and pre-emphasis control */
1553static uint32_t
1554intel_gen6_edp_signal_levels(uint8_t train_set)
1555{
1556 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1557 DP_TRAIN_PRE_EMPHASIS_MASK);
1558 switch (signal_levels) {
1559 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1560 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1561 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1562 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1563 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1564 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1565 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1566 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1567 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1568 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1569 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1570 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1571 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1572 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1573 default:
1574 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1575 "0x%x\n", signal_levels);
1576 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1577 }
1578}
1579
1580/* Gen7's DP voltage swing and pre-emphasis control */
1581static uint32_t
1582intel_gen7_edp_signal_levels(uint8_t train_set)
1583{
1584 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1585 DP_TRAIN_PRE_EMPHASIS_MASK);
1586 switch (signal_levels) {
1587 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1588 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1589 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1590 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1591 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1592 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1593
1594 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1595 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1596 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1597 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1598
1599 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1600 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1601 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1602 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1603
1604 default:
1605 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1606 "0x%x\n", signal_levels);
1607 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1608 }
1609}
1610
1611static uint8_t
1612intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1613 int lane)
1614{
1615 int s = (lane & 1) * 4;
1616 uint8_t l = link_status[lane>>1];
1617
1618 return (l >> s) & 0xf;
1619}
1620
1621/* Check for clock recovery is done on all channels */
1622static bool
1623intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1624{
1625 int lane;
1626 uint8_t lane_status;
1627
1628 for (lane = 0; lane < lane_count; lane++) {
1629 lane_status = intel_get_lane_status(link_status, lane);
1630 if ((lane_status & DP_LANE_CR_DONE) == 0)
1631 return false;
1632 }
1633 return true;
1634}
1635
1636/* Check to see if channel eq is done on all channels */
1637#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1638 DP_LANE_CHANNEL_EQ_DONE|\
1639 DP_LANE_SYMBOL_LOCKED)
1640static bool
1641intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1642{
1643 uint8_t lane_align;
1644 uint8_t lane_status;
1645 int lane;
1646
1647 lane_align = intel_dp_link_status(link_status,
1648 DP_LANE_ALIGN_STATUS_UPDATED);
1649 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1650 return false;
1651 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1652 lane_status = intel_get_lane_status(link_status, lane);
1653 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1654 return false;
1655 }
1656 return true;
1657}
1658
1659static bool
1660intel_dp_set_link_train(struct intel_dp *intel_dp,
1661 uint32_t dp_reg_value,
1662 uint8_t dp_train_pat)
1663{
1664 struct drm_device *dev = intel_dp->base.base.dev;
1665 struct drm_i915_private *dev_priv = dev->dev_private;
1666 int ret;
1667
1668 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1669 POSTING_READ(intel_dp->output_reg);
1670
1671 intel_dp_aux_native_write_1(intel_dp,
1672 DP_TRAINING_PATTERN_SET,
1673 dp_train_pat);
1674
1675 ret = intel_dp_aux_native_write(intel_dp,
1676 DP_TRAINING_LANE0_SET,
1677 intel_dp->train_set,
1678 intel_dp->lane_count);
1679 if (ret != intel_dp->lane_count)
1680 return false;
1681
1682 return true;
1683}
1684
1685/* Enable corresponding port and start training pattern 1 */
1686static void
1687intel_dp_start_link_train(struct intel_dp *intel_dp)
1688{
1689 struct drm_device *dev = intel_dp->base.base.dev;
1690 struct drm_i915_private *dev_priv = dev->dev_private;
1691 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1692 int i;
1693 uint8_t voltage;
1694 bool clock_recovery = false;
1695 int voltage_tries, loop_tries;
1696 u32 reg;
1697 uint32_t DP = intel_dp->DP;
1698
1699 /*
1700 * On CPT we have to enable the port in training pattern 1, which
1701 * will happen below in intel_dp_set_link_train. Otherwise, enable
1702 * the port and wait for it to become active.
1703 */
1704 if (!HAS_PCH_CPT(dev)) {
1705 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1706 POSTING_READ(intel_dp->output_reg);
1707 intel_wait_for_vblank(dev, intel_crtc->pipe);
1708 }
1709
1710 /* Write the link configuration data */
1711 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1712 intel_dp->link_configuration,
1713 DP_LINK_CONFIGURATION_SIZE);
1714
1715 DP |= DP_PORT_EN;
1716
1717 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1718 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1719 else
1720 DP &= ~DP_LINK_TRAIN_MASK;
1721 memset(intel_dp->train_set, 0, 4);
1722 voltage = 0xff;
1723 voltage_tries = 0;
1724 loop_tries = 0;
1725 clock_recovery = false;
1726 for (;;) {
1727 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1728 uint8_t link_status[DP_LINK_STATUS_SIZE];
1729 uint32_t signal_levels;
1730
1731
1732 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1733 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1734 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1735 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1736 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1737 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1738 } else {
1739 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1740 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1741 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1742 }
1743
1744 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1745 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1746 else
1747 reg = DP | DP_LINK_TRAIN_PAT_1;
1748
1749 if (!intel_dp_set_link_train(intel_dp, reg,
1750 DP_TRAINING_PATTERN_1 |
1751 DP_LINK_SCRAMBLING_DISABLE))
1752 break;
1753 /* Set training pattern 1 */
1754
1755 udelay(100);
1756 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1757 DRM_ERROR("failed to get link status\n");
1758 break;
1759 }
1760
1761 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1762 DRM_DEBUG_KMS("clock recovery OK\n");
1763 clock_recovery = true;
1764 break;
1765 }
1766
1767 /* Check to see if we've tried the max voltage */
1768 for (i = 0; i < intel_dp->lane_count; i++)
1769 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1770 break;
1771 if (i == intel_dp->lane_count) {
1772 ++loop_tries;
1773 if (loop_tries == 5) {
1774 DRM_DEBUG_KMS("too many full retries, give up\n");
1775 break;
1776 }
1777 memset(intel_dp->train_set, 0, 4);
1778 voltage_tries = 0;
1779 continue;
1780 }
1781
1782 /* Check to see if we've tried the same voltage 5 times */
1783 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1784 ++voltage_tries;
1785 if (voltage_tries == 5) {
1786 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1787 break;
1788 }
1789 } else
1790 voltage_tries = 0;
1791 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1792
1793 /* Compute new intel_dp->train_set as requested by target */
1794 intel_get_adjust_train(intel_dp, link_status);
1795 }
1796
1797 intel_dp->DP = DP;
1798}
1799
1800static void
1801intel_dp_complete_link_train(struct intel_dp *intel_dp)
1802{
1803 struct drm_device *dev = intel_dp->base.base.dev;
1804 struct drm_i915_private *dev_priv = dev->dev_private;
1805 bool channel_eq = false;
1806 int tries, cr_tries;
1807 u32 reg;
1808 uint32_t DP = intel_dp->DP;
1809
1810 /* channel equalization */
1811 tries = 0;
1812 cr_tries = 0;
1813 channel_eq = false;
1814 for (;;) {
1815 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1816 uint32_t signal_levels;
1817 uint8_t link_status[DP_LINK_STATUS_SIZE];
1818
1819 if (cr_tries > 5) {
1820 DRM_ERROR("failed to train DP, aborting\n");
1821 intel_dp_link_down(intel_dp);
1822 break;
1823 }
1824
1825 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1826 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1827 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1828 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1829 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1830 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1831 } else {
1832 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1833 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1834 }
1835
1836 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1837 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1838 else
1839 reg = DP | DP_LINK_TRAIN_PAT_2;
1840
1841 /* channel eq pattern */
1842 if (!intel_dp_set_link_train(intel_dp, reg,
1843 DP_TRAINING_PATTERN_2 |
1844 DP_LINK_SCRAMBLING_DISABLE))
1845 break;
1846
1847 udelay(400);
1848 if (!intel_dp_get_link_status(intel_dp, link_status))
1849 break;
1850
1851 /* Make sure clock is still ok */
1852 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1853 intel_dp_start_link_train(intel_dp);
1854 cr_tries++;
1855 continue;
1856 }
1857
1858 if (intel_channel_eq_ok(intel_dp, link_status)) {
1859 channel_eq = true;
1860 break;
1861 }
1862
1863 /* Try 5 times, then try clock recovery if that fails */
1864 if (tries > 5) {
1865 intel_dp_link_down(intel_dp);
1866 intel_dp_start_link_train(intel_dp);
1867 tries = 0;
1868 cr_tries++;
1869 continue;
1870 }
1871
1872 /* Compute new intel_dp->train_set as requested by target */
1873 intel_get_adjust_train(intel_dp, link_status);
1874 ++tries;
1875 }
1876
1877 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1878 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1879 else
1880 reg = DP | DP_LINK_TRAIN_OFF;
1881
1882 I915_WRITE(intel_dp->output_reg, reg);
1883 POSTING_READ(intel_dp->output_reg);
1884 intel_dp_aux_native_write_1(intel_dp,
1885 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1886}
1887
1888static void
1889intel_dp_link_down(struct intel_dp *intel_dp)
1890{
1891 struct drm_device *dev = intel_dp->base.base.dev;
1892 struct drm_i915_private *dev_priv = dev->dev_private;
1893 uint32_t DP = intel_dp->DP;
1894
1895 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1896 return;
1897
1898 DRM_DEBUG_KMS("\n");
1899
1900 if (is_edp(intel_dp)) {
1901 DP &= ~DP_PLL_ENABLE;
1902 I915_WRITE(intel_dp->output_reg, DP);
1903 POSTING_READ(intel_dp->output_reg);
1904 udelay(100);
1905 }
1906
1907 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1908 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1909 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1910 } else {
1911 DP &= ~DP_LINK_TRAIN_MASK;
1912 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1913 }
1914 POSTING_READ(intel_dp->output_reg);
1915
1916 msleep(17);
1917
1918 if (is_edp(intel_dp)) {
1919 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1920 DP |= DP_LINK_TRAIN_OFF_CPT;
1921 else
1922 DP |= DP_LINK_TRAIN_OFF;
1923 }
1924
1925 if (!HAS_PCH_CPT(dev) &&
1926 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1927 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1928
1929 /* Hardware workaround: leaving our transcoder select
1930 * set to transcoder B while it's off will prevent the
1931 * corresponding HDMI output on transcoder A.
1932 *
1933 * Combine this with another hardware workaround:
1934 * transcoder select bit can only be cleared while the
1935 * port is enabled.
1936 */
1937 DP &= ~DP_PIPEB_SELECT;
1938 I915_WRITE(intel_dp->output_reg, DP);
1939
1940 /* Changes to enable or select take place the vblank
1941 * after being written.
1942 */
1943 if (crtc == NULL) {
1944 /* We can arrive here never having been attached
1945 * to a CRTC, for instance, due to inheriting
1946 * random state from the BIOS.
1947 *
1948 * If the pipe is not running, play safe and
1949 * wait for the clocks to stabilise before
1950 * continuing.
1951 */
1952 POSTING_READ(intel_dp->output_reg);
1953 msleep(50);
1954 } else
1955 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
1956 }
1957
1958 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
1959 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1960 POSTING_READ(intel_dp->output_reg);
1961 msleep(intel_dp->panel_power_down_delay);
1962}
1963
1964static bool
1965intel_dp_get_dpcd(struct intel_dp *intel_dp)
1966{
1967 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
1968 sizeof(intel_dp->dpcd)) &&
1969 (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
1970 return true;
1971 }
1972
1973 return false;
1974}
1975
1976static void
1977intel_dp_probe_oui(struct intel_dp *intel_dp)
1978{
1979 u8 buf[3];
1980
1981 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1982 return;
1983
1984 ironlake_edp_panel_vdd_on(intel_dp);
1985
1986 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1987 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
1988 buf[0], buf[1], buf[2]);
1989
1990 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1991 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
1992 buf[0], buf[1], buf[2]);
1993
1994 ironlake_edp_panel_vdd_off(intel_dp, false);
1995}
1996
1997static bool
1998intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
1999{
2000 int ret;
2001
2002 ret = intel_dp_aux_native_read_retry(intel_dp,
2003 DP_DEVICE_SERVICE_IRQ_VECTOR,
2004 sink_irq_vector, 1);
2005 if (!ret)
2006 return false;
2007
2008 return true;
2009}
2010
2011static void
2012intel_dp_handle_test_request(struct intel_dp *intel_dp)
2013{
2014 /* NAK by default */
2015 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
2016}
2017
2018/*
2019 * According to DP spec
2020 * 5.1.2:
2021 * 1. Read DPCD
2022 * 2. Configure link according to Receiver Capabilities
2023 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2024 * 4. Check link status on receipt of hot-plug interrupt
2025 */
2026
2027static void
2028intel_dp_check_link_status(struct intel_dp *intel_dp)
2029{
2030 u8 sink_irq_vector;
2031 u8 link_status[DP_LINK_STATUS_SIZE];
2032
2033 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
2034 return;
2035
2036 if (!intel_dp->base.base.crtc)
2037 return;
2038
2039 /* Try to read receiver status if the link appears to be up */
2040 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2041 intel_dp_link_down(intel_dp);
2042 return;
2043 }
2044
2045 /* Now read the DPCD to see if it's actually running */
2046 if (!intel_dp_get_dpcd(intel_dp)) {
2047 intel_dp_link_down(intel_dp);
2048 return;
2049 }
2050
2051 /* Try to read the source of the interrupt */
2052 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2053 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2054 /* Clear interrupt source */
2055 intel_dp_aux_native_write_1(intel_dp,
2056 DP_DEVICE_SERVICE_IRQ_VECTOR,
2057 sink_irq_vector);
2058
2059 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2060 intel_dp_handle_test_request(intel_dp);
2061 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2062 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2063 }
2064
2065 if (!intel_channel_eq_ok(intel_dp, link_status)) {
2066 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2067 drm_get_encoder_name(&intel_dp->base.base));
2068 intel_dp_start_link_train(intel_dp);
2069 intel_dp_complete_link_train(intel_dp);
2070 }
2071}
2072
2073static enum drm_connector_status
2074intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2075{
2076 if (intel_dp_get_dpcd(intel_dp))
2077 return connector_status_connected;
2078 return connector_status_disconnected;
2079}
2080
2081static enum drm_connector_status
2082ironlake_dp_detect(struct intel_dp *intel_dp)
2083{
2084 enum drm_connector_status status;
2085
2086 /* Can't disconnect eDP, but you can close the lid... */
2087 if (is_edp(intel_dp)) {
2088 status = intel_panel_detect(intel_dp->base.base.dev);
2089 if (status == connector_status_unknown)
2090 status = connector_status_connected;
2091 return status;
2092 }
2093
2094 return intel_dp_detect_dpcd(intel_dp);
2095}
2096
2097static enum drm_connector_status
2098g4x_dp_detect(struct intel_dp *intel_dp)
2099{
2100 struct drm_device *dev = intel_dp->base.base.dev;
2101 struct drm_i915_private *dev_priv = dev->dev_private;
2102 uint32_t temp, bit;
2103
2104 switch (intel_dp->output_reg) {
2105 case DP_B:
2106 bit = DPB_HOTPLUG_INT_STATUS;
2107 break;
2108 case DP_C:
2109 bit = DPC_HOTPLUG_INT_STATUS;
2110 break;
2111 case DP_D:
2112 bit = DPD_HOTPLUG_INT_STATUS;
2113 break;
2114 default:
2115 return connector_status_unknown;
2116 }
2117
2118 temp = I915_READ(PORT_HOTPLUG_STAT);
2119
2120 if ((temp & bit) == 0)
2121 return connector_status_disconnected;
2122
2123 return intel_dp_detect_dpcd(intel_dp);
2124}
2125
2126static struct edid *
2127intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2128{
2129 struct intel_dp *intel_dp = intel_attached_dp(connector);
2130 struct edid *edid;
2131 int size;
2132
2133 if (is_edp(intel_dp)) {
2134 if (!intel_dp->edid)
2135 return NULL;
2136
2137 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
2138 edid = kmalloc(size, GFP_KERNEL);
2139 if (!edid)
2140 return NULL;
2141
2142 memcpy(edid, intel_dp->edid, size);
2143 return edid;
2144 }
2145
2146 edid = drm_get_edid(connector, adapter);
2147 return edid;
2148}
2149
2150static int
2151intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2152{
2153 struct intel_dp *intel_dp = intel_attached_dp(connector);
2154 int ret;
2155
2156 if (is_edp(intel_dp)) {
2157 drm_mode_connector_update_edid_property(connector,
2158 intel_dp->edid);
2159 ret = drm_add_edid_modes(connector, intel_dp->edid);
2160 drm_edid_to_eld(connector,
2161 intel_dp->edid);
2162 connector->display_info.raw_edid = NULL;
2163 return intel_dp->edid_mode_count;
2164 }
2165
2166 ret = intel_ddc_get_modes(connector, adapter);
2167 return ret;
2168}
2169
2170
2171/**
2172 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2173 *
2174 * \return true if DP port is connected.
2175 * \return false if DP port is disconnected.
2176 */
2177static enum drm_connector_status
2178intel_dp_detect(struct drm_connector *connector, bool force)
2179{
2180 struct intel_dp *intel_dp = intel_attached_dp(connector);
2181 struct drm_device *dev = intel_dp->base.base.dev;
2182 enum drm_connector_status status;
2183 struct edid *edid = NULL;
2184
2185 intel_dp->has_audio = false;
2186
2187 if (HAS_PCH_SPLIT(dev))
2188 status = ironlake_dp_detect(intel_dp);
2189 else
2190 status = g4x_dp_detect(intel_dp);
2191
2192 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2193 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
2194 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
2195 intel_dp->dpcd[6], intel_dp->dpcd[7]);
2196
2197 if (status != connector_status_connected)
2198 return status;
2199
2200 intel_dp_probe_oui(intel_dp);
2201
2202 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2203 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2204 } else {
2205 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2206 if (edid) {
2207 intel_dp->has_audio = drm_detect_monitor_audio(edid);
2208 connector->display_info.raw_edid = NULL;
2209 kfree(edid);
2210 }
2211 }
2212
2213 return connector_status_connected;
2214}
2215
2216static int intel_dp_get_modes(struct drm_connector *connector)
2217{
2218 struct intel_dp *intel_dp = intel_attached_dp(connector);
2219 struct drm_device *dev = intel_dp->base.base.dev;
2220 struct drm_i915_private *dev_priv = dev->dev_private;
2221 int ret;
2222
2223 /* We should parse the EDID data and find out if it has an audio sink
2224 */
2225
2226 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2227 if (ret) {
2228 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
2229 struct drm_display_mode *newmode;
2230 list_for_each_entry(newmode, &connector->probed_modes,
2231 head) {
2232 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
2233 intel_dp->panel_fixed_mode =
2234 drm_mode_duplicate(dev, newmode);
2235 break;
2236 }
2237 }
2238 }
2239 return ret;
2240 }
2241
2242 /* if eDP has no EDID, try to use fixed panel mode from VBT */
2243 if (is_edp(intel_dp)) {
2244 /* initialize panel mode from VBT if available for eDP */
2245 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
2246 intel_dp->panel_fixed_mode =
2247 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2248 if (intel_dp->panel_fixed_mode) {
2249 intel_dp->panel_fixed_mode->type |=
2250 DRM_MODE_TYPE_PREFERRED;
2251 }
2252 }
2253 if (intel_dp->panel_fixed_mode) {
2254 struct drm_display_mode *mode;
2255 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
2256 drm_mode_probed_add(connector, mode);
2257 return 1;
2258 }
2259 }
2260 return 0;
2261}
2262
2263static bool
2264intel_dp_detect_audio(struct drm_connector *connector)
2265{
2266 struct intel_dp *intel_dp = intel_attached_dp(connector);
2267 struct edid *edid;
2268 bool has_audio = false;
2269
2270 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2271 if (edid) {
2272 has_audio = drm_detect_monitor_audio(edid);
2273
2274 connector->display_info.raw_edid = NULL;
2275 kfree(edid);
2276 }
2277
2278 return has_audio;
2279}
2280
2281static int
2282intel_dp_set_property(struct drm_connector *connector,
2283 struct drm_property *property,
2284 uint64_t val)
2285{
2286 struct drm_i915_private *dev_priv = connector->dev->dev_private;
2287 struct intel_dp *intel_dp = intel_attached_dp(connector);
2288 int ret;
2289
2290 ret = drm_connector_property_set_value(connector, property, val);
2291 if (ret)
2292 return ret;
2293
2294 if (property == dev_priv->force_audio_property) {
2295 int i = val;
2296 bool has_audio;
2297
2298 if (i == intel_dp->force_audio)
2299 return 0;
2300
2301 intel_dp->force_audio = i;
2302
2303 if (i == HDMI_AUDIO_AUTO)
2304 has_audio = intel_dp_detect_audio(connector);
2305 else
2306 has_audio = (i == HDMI_AUDIO_ON);
2307
2308 if (has_audio == intel_dp->has_audio)
2309 return 0;
2310
2311 intel_dp->has_audio = has_audio;
2312 goto done;
2313 }
2314
2315 if (property == dev_priv->broadcast_rgb_property) {
2316 if (val == !!intel_dp->color_range)
2317 return 0;
2318
2319 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2320 goto done;
2321 }
2322
2323 return -EINVAL;
2324
2325done:
2326 if (intel_dp->base.base.crtc) {
2327 struct drm_crtc *crtc = intel_dp->base.base.crtc;
2328 drm_crtc_helper_set_mode(crtc, &crtc->mode,
2329 crtc->x, crtc->y,
2330 crtc->fb);
2331 }
2332
2333 return 0;
2334}
2335
2336static void
2337intel_dp_destroy(struct drm_connector *connector)
2338{
2339 struct drm_device *dev = connector->dev;
2340
2341 if (intel_dpd_is_edp(dev))
2342 intel_panel_destroy_backlight(dev);
2343
2344 drm_sysfs_connector_remove(connector);
2345 drm_connector_cleanup(connector);
2346 kfree(connector);
2347}
2348
2349static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2350{
2351 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2352
2353 i2c_del_adapter(&intel_dp->adapter);
2354 drm_encoder_cleanup(encoder);
2355 if (is_edp(intel_dp)) {
2356 kfree(intel_dp->edid);
2357 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2358 ironlake_panel_vdd_off_sync(intel_dp);
2359 }
2360 kfree(intel_dp);
2361}
2362
2363static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2364 .dpms = intel_dp_dpms,
2365 .mode_fixup = intel_dp_mode_fixup,
2366 .prepare = intel_dp_prepare,
2367 .mode_set = intel_dp_mode_set,
2368 .commit = intel_dp_commit,
2369};
2370
2371static const struct drm_connector_funcs intel_dp_connector_funcs = {
2372 .dpms = drm_helper_connector_dpms,
2373 .detect = intel_dp_detect,
2374 .fill_modes = drm_helper_probe_single_connector_modes,
2375 .set_property = intel_dp_set_property,
2376 .destroy = intel_dp_destroy,
2377};
2378
2379static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2380 .get_modes = intel_dp_get_modes,
2381 .mode_valid = intel_dp_mode_valid,
2382 .best_encoder = intel_best_encoder,
2383};
2384
2385static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2386 .destroy = intel_dp_encoder_destroy,
2387};
2388
2389static void
2390intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2391{
2392 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
2393
2394 intel_dp_check_link_status(intel_dp);
2395}
2396
2397/* Return which DP Port should be selected for Transcoder DP control */
2398int
2399intel_trans_dp_port_sel(struct drm_crtc *crtc)
2400{
2401 struct drm_device *dev = crtc->dev;
2402 struct drm_mode_config *mode_config = &dev->mode_config;
2403 struct drm_encoder *encoder;
2404
2405 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
2406 struct intel_dp *intel_dp;
2407
2408 if (encoder->crtc != crtc)
2409 continue;
2410
2411 intel_dp = enc_to_intel_dp(encoder);
2412 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2413 intel_dp->base.type == INTEL_OUTPUT_EDP)
2414 return intel_dp->output_reg;
2415 }
2416
2417 return -1;
2418}
2419
2420/* check the VBT to see whether the eDP is on DP-D port */
2421bool intel_dpd_is_edp(struct drm_device *dev)
2422{
2423 struct drm_i915_private *dev_priv = dev->dev_private;
2424 struct child_device_config *p_child;
2425 int i;
2426
2427 if (!dev_priv->child_dev_num)
2428 return false;
2429
2430 for (i = 0; i < dev_priv->child_dev_num; i++) {
2431 p_child = dev_priv->child_dev + i;
2432
2433 if (p_child->dvo_port == PORT_IDPD &&
2434 p_child->device_type == DEVICE_TYPE_eDP)
2435 return true;
2436 }
2437 return false;
2438}
2439
2440static void
2441intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2442{
2443 intel_attach_force_audio_property(connector);
2444 intel_attach_broadcast_rgb_property(connector);
2445}
2446
2447void
2448intel_dp_init(struct drm_device *dev, int output_reg)
2449{
2450 struct drm_i915_private *dev_priv = dev->dev_private;
2451 struct drm_connector *connector;
2452 struct intel_dp *intel_dp;
2453 struct intel_encoder *intel_encoder;
2454 struct intel_connector *intel_connector;
2455 const char *name = NULL;
2456 int type;
2457
2458 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2459 if (!intel_dp)
2460 return;
2461
2462 intel_dp->output_reg = output_reg;
2463 intel_dp->dpms_mode = -1;
2464
2465 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2466 if (!intel_connector) {
2467 kfree(intel_dp);
2468 return;
2469 }
2470 intel_encoder = &intel_dp->base;
2471
2472 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2473 if (intel_dpd_is_edp(dev))
2474 intel_dp->is_pch_edp = true;
2475
2476 if (output_reg == DP_A || is_pch_edp(intel_dp)) {
2477 type = DRM_MODE_CONNECTOR_eDP;
2478 intel_encoder->type = INTEL_OUTPUT_EDP;
2479 } else {
2480 type = DRM_MODE_CONNECTOR_DisplayPort;
2481 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2482 }
2483
2484 connector = &intel_connector->base;
2485 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2486 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2487
2488 connector->polled = DRM_CONNECTOR_POLL_HPD;
2489
2490 if (output_reg == DP_B || output_reg == PCH_DP_B)
2491 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
2492 else if (output_reg == DP_C || output_reg == PCH_DP_C)
2493 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
2494 else if (output_reg == DP_D || output_reg == PCH_DP_D)
2495 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
2496
2497 if (is_edp(intel_dp)) {
2498 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
2499 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2500 ironlake_panel_vdd_work);
2501 }
2502
2503 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2504
2505 connector->interlace_allowed = true;
2506 connector->doublescan_allowed = 0;
2507
2508 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2509 DRM_MODE_ENCODER_TMDS);
2510 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2511
2512 intel_connector_attach_encoder(intel_connector, intel_encoder);
2513 drm_sysfs_connector_add(connector);
2514
2515 /* Set up the DDC bus. */
2516 switch (output_reg) {
2517 case DP_A:
2518 name = "DPDDC-A";
2519 break;
2520 case DP_B:
2521 case PCH_DP_B:
2522 dev_priv->hotplug_supported_mask |=
2523 HDMIB_HOTPLUG_INT_STATUS;
2524 name = "DPDDC-B";
2525 break;
2526 case DP_C:
2527 case PCH_DP_C:
2528 dev_priv->hotplug_supported_mask |=
2529 HDMIC_HOTPLUG_INT_STATUS;
2530 name = "DPDDC-C";
2531 break;
2532 case DP_D:
2533 case PCH_DP_D:
2534 dev_priv->hotplug_supported_mask |=
2535 HDMID_HOTPLUG_INT_STATUS;
2536 name = "DPDDC-D";
2537 break;
2538 }
2539
2540 intel_dp_i2c_init(intel_dp, intel_connector, name);
2541
2542 /* Cache some DPCD data in the eDP case */
2543 if (is_edp(intel_dp)) {
2544 bool ret;
2545 struct edp_power_seq cur, vbt;
2546 u32 pp_on, pp_off, pp_div;
2547 struct edid *edid;
2548
2549 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2550 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2551 pp_div = I915_READ(PCH_PP_DIVISOR);
2552
2553 if (!pp_on || !pp_off || !pp_div) {
2554 DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2555 intel_dp_encoder_destroy(&intel_dp->base.base);
2556 intel_dp_destroy(&intel_connector->base);
2557 return;
2558 }
2559
2560 /* Pull timing values out of registers */
2561 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2562 PANEL_POWER_UP_DELAY_SHIFT;
2563
2564 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2565 PANEL_LIGHT_ON_DELAY_SHIFT;
2566
2567 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2568 PANEL_LIGHT_OFF_DELAY_SHIFT;
2569
2570 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2571 PANEL_POWER_DOWN_DELAY_SHIFT;
2572
2573 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2574 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2575
2576 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2577 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2578
2579 vbt = dev_priv->edp.pps;
2580
2581 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2582 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2583
2584#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
2585
2586 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2587 intel_dp->backlight_on_delay = get_delay(t8);
2588 intel_dp->backlight_off_delay = get_delay(t9);
2589 intel_dp->panel_power_down_delay = get_delay(t10);
2590 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2591
2592 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2593 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2594 intel_dp->panel_power_cycle_delay);
2595
2596 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2597 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2598
2599 ironlake_edp_panel_vdd_on(intel_dp);
2600 ret = intel_dp_get_dpcd(intel_dp);
2601 ironlake_edp_panel_vdd_off(intel_dp, false);
2602
2603 if (ret) {
2604 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2605 dev_priv->no_aux_handshake =
2606 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2607 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2608 } else {
2609 /* if this fails, presume the device is a ghost */
2610 DRM_INFO("failed to retrieve link info, disabling eDP\n");
2611 intel_dp_encoder_destroy(&intel_dp->base.base);
2612 intel_dp_destroy(&intel_connector->base);
2613 return;
2614 }
2615
2616 ironlake_edp_panel_vdd_on(intel_dp);
2617 edid = drm_get_edid(connector, &intel_dp->adapter);
2618 if (edid) {
2619 drm_mode_connector_update_edid_property(connector,
2620 edid);
2621 intel_dp->edid_mode_count =
2622 drm_add_edid_modes(connector, edid);
2623 drm_edid_to_eld(connector, edid);
2624 intel_dp->edid = edid;
2625 }
2626 ironlake_edp_panel_vdd_off(intel_dp, false);
2627 }
2628
2629 intel_encoder->hot_plug = intel_dp_hot_plug;
2630
2631 if (is_edp(intel_dp)) {
2632 dev_priv->int_edp_connector = connector;
2633 intel_panel_setup_backlight(dev);
2634 }
2635
2636 intel_dp_add_properties(intel_dp, connector);
2637
2638 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2639 * 0xd. Failure to do so will result in spurious interrupts being
2640 * generated on the port when a cable is not attached.
2641 */
2642 if (IS_G4X(dev) && !IS_GM45(dev)) {
2643 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2644 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2645 }
2646}
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
33#include <drm/drmP.h>
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
38#include "intel_drv.h"
39#include <drm/i915_drm.h>
40#include "i915_drv.h"
41
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
69static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
93
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
99
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112}
113
114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115{
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
119}
120
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124}
125
126static void intel_dp_link_down(struct intel_dp *intel_dp);
127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
132
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
140{
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
146 case DP_LINK_BW_5_4:
147 break;
148 default:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 u8 source_max, sink_max;
161
162 source_max = intel_dig_port->max_lanes;
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
185static int
186intel_dp_link_required(int pixel_clock, int bpp)
187{
188 return (pixel_clock * bpp + 9) / 10;
189}
190
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
197static enum drm_mode_status
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
201 struct intel_dp *intel_dp = intel_attached_dp(connector);
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
207
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
210 return MODE_PANEL;
211
212 if (mode->vdisplay > fixed_mode->vdisplay)
213 return MODE_PANEL;
214
215 target_clock = fixed_mode->clock;
216 }
217
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate || target_clock > max_dotclk)
225 return MODE_CLOCK_HIGH;
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
233 return MODE_OK;
234}
235
236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
257static void
258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
259 struct intel_dp *intel_dp);
260static void
261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
262 struct intel_dp *intel_dp);
263
264static void pps_lock(struct intel_dp *intel_dp)
265{
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
276 power_domain = intel_display_port_aux_power_domain(encoder);
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280}
281
282static void pps_unlock(struct intel_dp *intel_dp)
283{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
292 power_domain = intel_display_port_aux_power_domain(encoder);
293 intel_display_power_put(dev_priv, power_domain);
294}
295
296static void
297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
345 }
346
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
361
362 if (!pll_enabled) {
363 vlv_force_pll_off(dev, pipe);
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
368}
369
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378 enum pipe pipe;
379
380 lockdep_assert_held(&dev_priv->pps_mutex);
381
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 for_each_intel_encoder(dev, encoder) {
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
412
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
423
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
429
430 return intel_dp->pps_pipe;
431}
432
433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438{
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440}
441
442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444{
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446}
447
448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return true;
452}
453
454static enum pipe
455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
458{
459 enum pipe pipe;
460
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
471 return pipe;
472 }
473
474 return INVALID_PIPE;
475}
476
477static void
478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479{
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
505 }
506
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
512}
513
514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515{
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
532 for_each_intel_encoder(dev, encoder) {
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
541}
542
543static i915_reg_t
544_pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static i915_reg_t
557_pp_stat_reg(struct intel_dp *intel_dp)
558{
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567}
568
569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573{
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
582 pps_lock(intel_dp);
583
584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
586 i915_reg_t pp_ctrl_reg, pp_div_reg;
587 u32 pp_div;
588
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
600 pps_unlock(intel_dp);
601
602 return 0;
603}
604
605static bool edp_have_panel_power(struct intel_dp *intel_dp)
606{
607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
617}
618
619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
620{
621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
631}
632
633static void
634intel_dp_check_edp(struct intel_dp *intel_dp)
635{
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
639 if (!is_edp(intel_dp))
640 return;
641
642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
647 }
648}
649
650static uint32_t
651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652{
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
657 uint32_t status;
658 bool done;
659
660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
661 if (has_aux_irq)
662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
663 msecs_to_jiffies_timeout(10));
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669#undef C
670
671 return status;
672}
673
674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
675{
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
678
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
682 */
683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
684}
685
686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687{
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
690 struct drm_i915_private *dev_priv = dev->dev_private;
691
692 if (index)
693 return 0;
694
695 if (intel_dig_port->port == PORT_A) {
696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
697
698 } else {
699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
700 }
701}
702
703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (intel_dig_port->port == PORT_A) {
710 if (index)
711 return 0;
712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
713 } else if (HAS_PCH_LPT_H(dev_priv)) {
714 /* Workaround for non-ULT HSW */
715 switch (index) {
716 case 0: return 63;
717 case 1: return 72;
718 default: return 0;
719 }
720 } else {
721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
722 }
723}
724
725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 return index ? 0 : 100;
728}
729
730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731{
732 /*
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 */
737 return index ? 0 : 1;
738}
739
740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
741 bool has_aux_irq,
742 int send_bytes,
743 uint32_t aux_clock_divider)
744{
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
748
749 if (IS_GEN6(dev))
750 precharge = 3;
751 else
752 precharge = 5;
753
754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 else
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
759 return DP_AUX_CH_CTL_SEND_BUSY |
760 DP_AUX_CH_CTL_DONE |
761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
763 timeout |
764 DP_AUX_CH_CTL_RECEIVE_ERROR |
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
768}
769
770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 bool has_aux_irq,
772 int send_bytes,
773 uint32_t unused)
774{
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783}
784
785static int
786intel_dp_aux_ch(struct intel_dp *intel_dp,
787 const uint8_t *send, int send_bytes,
788 uint8_t *recv, int recv_size)
789{
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
794 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes;
796 uint32_t status;
797 int try, clock = 0;
798 bool has_aux_irq = HAS_AUX_IRQ(dev);
799 bool vdd;
800
801 pps_lock(intel_dp);
802
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
809 vdd = edp_panel_vdd_on(intel_dp);
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
818
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
821 status = I915_READ_NOTRACE(ch_ctl);
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
837 ret = -EBUSY;
838 goto out;
839 }
840
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
852
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
860
861 /* Send the command and wait for it to complete */
862 I915_WRITE(ch_ctl, send_ctl);
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
874 continue;
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
883 continue;
884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 goto done;
887 }
888 }
889
890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
892 ret = -EBUSY;
893 goto out;
894 }
895
896done:
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
902 ret = -EIO;
903 goto out;
904 }
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
910 ret = -ETIMEDOUT;
911 goto out;
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
917
918 /*
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
922 */
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925 recv_bytes);
926 /*
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
932 */
933 usleep_range(1000, 1500);
934 ret = -EBUSY;
935 goto out;
936 }
937
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
940
941 for (i = 0; i < recv_bytes; i += 4)
942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
943 recv + i, recv_bytes - i);
944
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948
949 if (vdd)
950 edp_panel_vdd_off(intel_dp, false);
951
952 pps_unlock(intel_dp);
953
954 return ret;
955}
956
957#define BARE_ADDRESS_SIZE 3
958#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
959static ssize_t
960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
961{
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
965 int ret;
966
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
972
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
978 rxsize = 2; /* 0 or 1 data bytes */
979
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
982
983 if (msg->buffer)
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985 else
986 WARN_ON(msg->size);
987
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
991
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
999 }
1000 break;
1001
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005 rxsize = msg->size + 1;
1006
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
1009
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
1021 }
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
1027 }
1028
1029 return ret;
1030}
1031
1032static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033 enum port port)
1034{
1035 switch (port) {
1036 case PORT_B:
1037 case PORT_C:
1038 case PORT_D:
1039 return DP_AUX_CH_CTL(port);
1040 default:
1041 MISSING_CASE(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1043 }
1044}
1045
1046static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
1048{
1049 switch (port) {
1050 case PORT_B:
1051 case PORT_C:
1052 case PORT_D:
1053 return DP_AUX_CH_DATA(port, index);
1054 default:
1055 MISSING_CASE(port);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1057 }
1058}
1059
1060static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061 enum port port)
1062{
1063 switch (port) {
1064 case PORT_A:
1065 return DP_AUX_CH_CTL(port);
1066 case PORT_B:
1067 case PORT_C:
1068 case PORT_D:
1069 return PCH_DP_AUX_CH_CTL(port);
1070 default:
1071 MISSING_CASE(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1073 }
1074}
1075
1076static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
1078{
1079 switch (port) {
1080 case PORT_A:
1081 return DP_AUX_CH_DATA(port, index);
1082 case PORT_B:
1083 case PORT_C:
1084 case PORT_D:
1085 return PCH_DP_AUX_CH_DATA(port, index);
1086 default:
1087 MISSING_CASE(port);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1089 }
1090}
1091
1092/*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097{
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101 switch (info->alternate_aux_channel) {
1102 case DP_AUX_A:
1103 return PORT_A;
1104 case DP_AUX_B:
1105 return PORT_B;
1106 case DP_AUX_C:
1107 return PORT_C;
1108 case DP_AUX_D:
1109 return PORT_D;
1110 default:
1111 MISSING_CASE(info->alternate_aux_channel);
1112 return PORT_A;
1113 }
1114}
1115
1116static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117 enum port port)
1118{
1119 if (port == PORT_E)
1120 port = skl_porte_aux_port(dev_priv);
1121
1122 switch (port) {
1123 case PORT_A:
1124 case PORT_B:
1125 case PORT_C:
1126 case PORT_D:
1127 return DP_AUX_CH_CTL(port);
1128 default:
1129 MISSING_CASE(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1131 }
1132}
1133
1134static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
1136{
1137 if (port == PORT_E)
1138 port = skl_porte_aux_port(dev_priv);
1139
1140 switch (port) {
1141 case PORT_A:
1142 case PORT_B:
1143 case PORT_C:
1144 case PORT_D:
1145 return DP_AUX_CH_DATA(port, index);
1146 default:
1147 MISSING_CASE(port);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1149 }
1150}
1151
1152static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153 enum port port)
1154{
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1159 else
1160 return g4x_aux_ctl_reg(dev_priv, port);
1161}
1162
1163static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
1165{
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1170 else
1171 return g4x_aux_data_reg(dev_priv, port, index);
1172}
1173
1174static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175{
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1178 int i;
1179
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183}
1184
1185static void
1186intel_dp_aux_fini(struct intel_dp *intel_dp)
1187{
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1190}
1191
1192static int
1193intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194{
1195 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1196 enum port port = intel_dig_port->port;
1197 int ret;
1198
1199 intel_aux_reg_init(intel_dp);
1200
1201 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1202 if (!intel_dp->aux.name)
1203 return -ENOMEM;
1204
1205 intel_dp->aux.dev = connector->base.kdev;
1206 intel_dp->aux.transfer = intel_dp_aux_transfer;
1207
1208 DRM_DEBUG_KMS("registering %s bus for %s\n",
1209 intel_dp->aux.name,
1210 connector->base.kdev->kobj.name);
1211
1212 ret = drm_dp_aux_register(&intel_dp->aux);
1213 if (ret < 0) {
1214 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1215 intel_dp->aux.name, ret);
1216 kfree(intel_dp->aux.name);
1217 return ret;
1218 }
1219
1220 return 0;
1221}
1222
1223static void
1224intel_dp_connector_unregister(struct intel_connector *intel_connector)
1225{
1226 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1227
1228 intel_dp_aux_fini(intel_dp);
1229 intel_connector_unregister(intel_connector);
1230}
1231
1232static void
1233skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1234{
1235 u32 ctrl1;
1236
1237 memset(&pipe_config->dpll_hw_state, 0,
1238 sizeof(pipe_config->dpll_hw_state));
1239
1240 pipe_config->ddi_pll_sel = SKL_DPLL0;
1241 pipe_config->dpll_hw_state.cfgcr1 = 0;
1242 pipe_config->dpll_hw_state.cfgcr2 = 0;
1243
1244 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1245 switch (pipe_config->port_clock / 2) {
1246 case 81000:
1247 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1248 SKL_DPLL0);
1249 break;
1250 case 135000:
1251 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1252 SKL_DPLL0);
1253 break;
1254 case 270000:
1255 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1256 SKL_DPLL0);
1257 break;
1258 case 162000:
1259 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1260 SKL_DPLL0);
1261 break;
1262 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1263 results in CDCLK change. Need to handle the change of CDCLK by
1264 disabling pipes and re-enabling them */
1265 case 108000:
1266 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1267 SKL_DPLL0);
1268 break;
1269 case 216000:
1270 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1271 SKL_DPLL0);
1272 break;
1273
1274 }
1275 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1276}
1277
1278void
1279hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1280{
1281 memset(&pipe_config->dpll_hw_state, 0,
1282 sizeof(pipe_config->dpll_hw_state));
1283
1284 switch (pipe_config->port_clock / 2) {
1285 case 81000:
1286 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1287 break;
1288 case 135000:
1289 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1290 break;
1291 case 270000:
1292 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1293 break;
1294 }
1295}
1296
1297static int
1298intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1299{
1300 if (intel_dp->num_sink_rates) {
1301 *sink_rates = intel_dp->sink_rates;
1302 return intel_dp->num_sink_rates;
1303 }
1304
1305 *sink_rates = default_rates;
1306
1307 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1308}
1309
1310bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1311{
1312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1313 struct drm_device *dev = dig_port->base.base.dev;
1314
1315 /* WaDisableHBR2:skl */
1316 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1317 return false;
1318
1319 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1320 (INTEL_INFO(dev)->gen >= 9))
1321 return true;
1322 else
1323 return false;
1324}
1325
1326static int
1327intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1328{
1329 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1330 struct drm_device *dev = dig_port->base.base.dev;
1331 int size;
1332
1333 if (IS_BROXTON(dev)) {
1334 *source_rates = bxt_rates;
1335 size = ARRAY_SIZE(bxt_rates);
1336 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1337 *source_rates = skl_rates;
1338 size = ARRAY_SIZE(skl_rates);
1339 } else {
1340 *source_rates = default_rates;
1341 size = ARRAY_SIZE(default_rates);
1342 }
1343
1344 /* This depends on the fact that 5.4 is last value in the array */
1345 if (!intel_dp_source_supports_hbr2(intel_dp))
1346 size--;
1347
1348 return size;
1349}
1350
1351static void
1352intel_dp_set_clock(struct intel_encoder *encoder,
1353 struct intel_crtc_state *pipe_config)
1354{
1355 struct drm_device *dev = encoder->base.dev;
1356 const struct dp_link_dpll *divisor = NULL;
1357 int i, count = 0;
1358
1359 if (IS_G4X(dev)) {
1360 divisor = gen4_dpll;
1361 count = ARRAY_SIZE(gen4_dpll);
1362 } else if (HAS_PCH_SPLIT(dev)) {
1363 divisor = pch_dpll;
1364 count = ARRAY_SIZE(pch_dpll);
1365 } else if (IS_CHERRYVIEW(dev)) {
1366 divisor = chv_dpll;
1367 count = ARRAY_SIZE(chv_dpll);
1368 } else if (IS_VALLEYVIEW(dev)) {
1369 divisor = vlv_dpll;
1370 count = ARRAY_SIZE(vlv_dpll);
1371 }
1372
1373 if (divisor && count) {
1374 for (i = 0; i < count; i++) {
1375 if (pipe_config->port_clock == divisor[i].clock) {
1376 pipe_config->dpll = divisor[i].dpll;
1377 pipe_config->clock_set = true;
1378 break;
1379 }
1380 }
1381 }
1382}
1383
1384static int intersect_rates(const int *source_rates, int source_len,
1385 const int *sink_rates, int sink_len,
1386 int *common_rates)
1387{
1388 int i = 0, j = 0, k = 0;
1389
1390 while (i < source_len && j < sink_len) {
1391 if (source_rates[i] == sink_rates[j]) {
1392 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1393 return k;
1394 common_rates[k] = source_rates[i];
1395 ++k;
1396 ++i;
1397 ++j;
1398 } else if (source_rates[i] < sink_rates[j]) {
1399 ++i;
1400 } else {
1401 ++j;
1402 }
1403 }
1404 return k;
1405}
1406
1407static int intel_dp_common_rates(struct intel_dp *intel_dp,
1408 int *common_rates)
1409{
1410 const int *source_rates, *sink_rates;
1411 int source_len, sink_len;
1412
1413 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1414 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1415
1416 return intersect_rates(source_rates, source_len,
1417 sink_rates, sink_len,
1418 common_rates);
1419}
1420
1421static void snprintf_int_array(char *str, size_t len,
1422 const int *array, int nelem)
1423{
1424 int i;
1425
1426 str[0] = '\0';
1427
1428 for (i = 0; i < nelem; i++) {
1429 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1430 if (r >= len)
1431 return;
1432 str += r;
1433 len -= r;
1434 }
1435}
1436
1437static void intel_dp_print_rates(struct intel_dp *intel_dp)
1438{
1439 const int *source_rates, *sink_rates;
1440 int source_len, sink_len, common_len;
1441 int common_rates[DP_MAX_SUPPORTED_RATES];
1442 char str[128]; /* FIXME: too big for stack? */
1443
1444 if ((drm_debug & DRM_UT_KMS) == 0)
1445 return;
1446
1447 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1448 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1449 DRM_DEBUG_KMS("source rates: %s\n", str);
1450
1451 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1452 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1453 DRM_DEBUG_KMS("sink rates: %s\n", str);
1454
1455 common_len = intel_dp_common_rates(intel_dp, common_rates);
1456 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1457 DRM_DEBUG_KMS("common rates: %s\n", str);
1458}
1459
1460static int rate_to_index(int find, const int *rates)
1461{
1462 int i = 0;
1463
1464 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1465 if (find == rates[i])
1466 break;
1467
1468 return i;
1469}
1470
1471int
1472intel_dp_max_link_rate(struct intel_dp *intel_dp)
1473{
1474 int rates[DP_MAX_SUPPORTED_RATES] = {};
1475 int len;
1476
1477 len = intel_dp_common_rates(intel_dp, rates);
1478 if (WARN_ON(len <= 0))
1479 return 162000;
1480
1481 return rates[rate_to_index(0, rates) - 1];
1482}
1483
1484int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1485{
1486 return rate_to_index(rate, intel_dp->sink_rates);
1487}
1488
1489void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1490 uint8_t *link_bw, uint8_t *rate_select)
1491{
1492 if (intel_dp->num_sink_rates) {
1493 *link_bw = 0;
1494 *rate_select =
1495 intel_dp_rate_select(intel_dp, port_clock);
1496 } else {
1497 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1498 *rate_select = 0;
1499 }
1500}
1501
1502bool
1503intel_dp_compute_config(struct intel_encoder *encoder,
1504 struct intel_crtc_state *pipe_config)
1505{
1506 struct drm_device *dev = encoder->base.dev;
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1510 enum port port = dp_to_dig_port(intel_dp)->port;
1511 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1512 struct intel_connector *intel_connector = intel_dp->attached_connector;
1513 int lane_count, clock;
1514 int min_lane_count = 1;
1515 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1516 /* Conveniently, the link BW constants become indices with a shift...*/
1517 int min_clock = 0;
1518 int max_clock;
1519 int bpp, mode_rate;
1520 int link_avail, link_clock;
1521 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1522 int common_len;
1523 uint8_t link_bw, rate_select;
1524
1525 common_len = intel_dp_common_rates(intel_dp, common_rates);
1526
1527 /* No common link rates between source and sink */
1528 WARN_ON(common_len <= 0);
1529
1530 max_clock = common_len - 1;
1531
1532 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1533 pipe_config->has_pch_encoder = true;
1534
1535 pipe_config->has_dp_encoder = true;
1536 pipe_config->has_drrs = false;
1537 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1538
1539 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1540 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1541 adjusted_mode);
1542
1543 if (INTEL_INFO(dev)->gen >= 9) {
1544 int ret;
1545 ret = skl_update_scaler_crtc(pipe_config);
1546 if (ret)
1547 return ret;
1548 }
1549
1550 if (HAS_GMCH_DISPLAY(dev))
1551 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1552 intel_connector->panel.fitting_mode);
1553 else
1554 intel_pch_panel_fitting(intel_crtc, pipe_config,
1555 intel_connector->panel.fitting_mode);
1556 }
1557
1558 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1559 return false;
1560
1561 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1562 "max bw %d pixel clock %iKHz\n",
1563 max_lane_count, common_rates[max_clock],
1564 adjusted_mode->crtc_clock);
1565
1566 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1567 * bpc in between. */
1568 bpp = pipe_config->pipe_bpp;
1569 if (is_edp(intel_dp)) {
1570
1571 /* Get bpp from vbt only for panels that dont have bpp in edid */
1572 if (intel_connector->base.display_info.bpc == 0 &&
1573 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1574 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1575 dev_priv->vbt.edp_bpp);
1576 bpp = dev_priv->vbt.edp_bpp;
1577 }
1578
1579 /*
1580 * Use the maximum clock and number of lanes the eDP panel
1581 * advertizes being capable of. The panels are generally
1582 * designed to support only a single clock and lane
1583 * configuration, and typically these values correspond to the
1584 * native resolution of the panel.
1585 */
1586 min_lane_count = max_lane_count;
1587 min_clock = max_clock;
1588 }
1589
1590 for (; bpp >= 6*3; bpp -= 2*3) {
1591 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1592 bpp);
1593
1594 for (clock = min_clock; clock <= max_clock; clock++) {
1595 for (lane_count = min_lane_count;
1596 lane_count <= max_lane_count;
1597 lane_count <<= 1) {
1598
1599 link_clock = common_rates[clock];
1600 link_avail = intel_dp_max_data_rate(link_clock,
1601 lane_count);
1602
1603 if (mode_rate <= link_avail) {
1604 goto found;
1605 }
1606 }
1607 }
1608 }
1609
1610 return false;
1611
1612found:
1613 if (intel_dp->color_range_auto) {
1614 /*
1615 * See:
1616 * CEA-861-E - 5.1 Default Encoding Parameters
1617 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1618 */
1619 pipe_config->limited_color_range =
1620 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1621 } else {
1622 pipe_config->limited_color_range =
1623 intel_dp->limited_color_range;
1624 }
1625
1626 pipe_config->lane_count = lane_count;
1627
1628 pipe_config->pipe_bpp = bpp;
1629 pipe_config->port_clock = common_rates[clock];
1630
1631 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1632 &link_bw, &rate_select);
1633
1634 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1635 link_bw, rate_select, pipe_config->lane_count,
1636 pipe_config->port_clock, bpp);
1637 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1638 mode_rate, link_avail);
1639
1640 intel_link_compute_m_n(bpp, lane_count,
1641 adjusted_mode->crtc_clock,
1642 pipe_config->port_clock,
1643 &pipe_config->dp_m_n);
1644
1645 if (intel_connector->panel.downclock_mode != NULL &&
1646 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1647 pipe_config->has_drrs = true;
1648 intel_link_compute_m_n(bpp, lane_count,
1649 intel_connector->panel.downclock_mode->clock,
1650 pipe_config->port_clock,
1651 &pipe_config->dp_m2_n2);
1652 }
1653
1654 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1655 skl_edp_set_pll_config(pipe_config);
1656 else if (IS_BROXTON(dev))
1657 /* handled in ddi */;
1658 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1659 hsw_dp_set_ddi_pll_sel(pipe_config);
1660 else
1661 intel_dp_set_clock(encoder, pipe_config);
1662
1663 return true;
1664}
1665
1666void intel_dp_set_link_params(struct intel_dp *intel_dp,
1667 const struct intel_crtc_state *pipe_config)
1668{
1669 intel_dp->link_rate = pipe_config->port_clock;
1670 intel_dp->lane_count = pipe_config->lane_count;
1671}
1672
1673static void intel_dp_prepare(struct intel_encoder *encoder)
1674{
1675 struct drm_device *dev = encoder->base.dev;
1676 struct drm_i915_private *dev_priv = dev->dev_private;
1677 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1678 enum port port = dp_to_dig_port(intel_dp)->port;
1679 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1681
1682 intel_dp_set_link_params(intel_dp, crtc->config);
1683
1684 /*
1685 * There are four kinds of DP registers:
1686 *
1687 * IBX PCH
1688 * SNB CPU
1689 * IVB CPU
1690 * CPT PCH
1691 *
1692 * IBX PCH and CPU are the same for almost everything,
1693 * except that the CPU DP PLL is configured in this
1694 * register
1695 *
1696 * CPT PCH is quite different, having many bits moved
1697 * to the TRANS_DP_CTL register instead. That
1698 * configuration happens (oddly) in ironlake_pch_enable
1699 */
1700
1701 /* Preserve the BIOS-computed detected bit. This is
1702 * supposed to be read-only.
1703 */
1704 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1705
1706 /* Handle DP bits in common between all three register formats */
1707 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1708 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1709
1710 /* Split out the IBX/CPU vs CPT settings */
1711
1712 if (IS_GEN7(dev) && port == PORT_A) {
1713 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1714 intel_dp->DP |= DP_SYNC_HS_HIGH;
1715 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1716 intel_dp->DP |= DP_SYNC_VS_HIGH;
1717 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1718
1719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720 intel_dp->DP |= DP_ENHANCED_FRAMING;
1721
1722 intel_dp->DP |= crtc->pipe << 29;
1723 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1724 u32 trans_dp;
1725
1726 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1727
1728 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1729 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1730 trans_dp |= TRANS_DP_ENH_FRAMING;
1731 else
1732 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1733 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1734 } else {
1735 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1736 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1737 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1738
1739 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1740 intel_dp->DP |= DP_SYNC_HS_HIGH;
1741 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1742 intel_dp->DP |= DP_SYNC_VS_HIGH;
1743 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1744
1745 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1746 intel_dp->DP |= DP_ENHANCED_FRAMING;
1747
1748 if (IS_CHERRYVIEW(dev))
1749 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1750 else if (crtc->pipe == PIPE_B)
1751 intel_dp->DP |= DP_PIPEB_SELECT;
1752 }
1753}
1754
1755#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1756#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1757
1758#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1759#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1760
1761#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1762#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1763
1764static void wait_panel_status(struct intel_dp *intel_dp,
1765 u32 mask,
1766 u32 value)
1767{
1768 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1769 struct drm_i915_private *dev_priv = dev->dev_private;
1770 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1771
1772 lockdep_assert_held(&dev_priv->pps_mutex);
1773
1774 pp_stat_reg = _pp_stat_reg(intel_dp);
1775 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1776
1777 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1778 mask, value,
1779 I915_READ(pp_stat_reg),
1780 I915_READ(pp_ctrl_reg));
1781
1782 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1783 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1784 I915_READ(pp_stat_reg),
1785 I915_READ(pp_ctrl_reg));
1786 }
1787
1788 DRM_DEBUG_KMS("Wait complete\n");
1789}
1790
1791static void wait_panel_on(struct intel_dp *intel_dp)
1792{
1793 DRM_DEBUG_KMS("Wait for panel power on\n");
1794 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1795}
1796
1797static void wait_panel_off(struct intel_dp *intel_dp)
1798{
1799 DRM_DEBUG_KMS("Wait for panel power off time\n");
1800 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1801}
1802
1803static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1804{
1805 ktime_t panel_power_on_time;
1806 s64 panel_power_off_duration;
1807
1808 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1809
1810 /* take the difference of currrent time and panel power off time
1811 * and then make panel wait for t11_t12 if needed. */
1812 panel_power_on_time = ktime_get_boottime();
1813 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1814
1815 /* When we disable the VDD override bit last we have to do the manual
1816 * wait. */
1817 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1818 wait_remaining_ms_from_jiffies(jiffies,
1819 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1820
1821 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1822}
1823
1824static void wait_backlight_on(struct intel_dp *intel_dp)
1825{
1826 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1827 intel_dp->backlight_on_delay);
1828}
1829
1830static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1831{
1832 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1833 intel_dp->backlight_off_delay);
1834}
1835
1836/* Read the current pp_control value, unlocking the register if it
1837 * is locked
1838 */
1839
1840static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1841{
1842 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1843 struct drm_i915_private *dev_priv = dev->dev_private;
1844 u32 control;
1845
1846 lockdep_assert_held(&dev_priv->pps_mutex);
1847
1848 control = I915_READ(_pp_ctrl_reg(intel_dp));
1849 if (!IS_BROXTON(dev)) {
1850 control &= ~PANEL_UNLOCK_MASK;
1851 control |= PANEL_UNLOCK_REGS;
1852 }
1853 return control;
1854}
1855
1856/*
1857 * Must be paired with edp_panel_vdd_off().
1858 * Must hold pps_mutex around the whole on/off sequence.
1859 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1860 */
1861static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1862{
1863 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1864 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1865 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1866 struct drm_i915_private *dev_priv = dev->dev_private;
1867 enum intel_display_power_domain power_domain;
1868 u32 pp;
1869 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1870 bool need_to_disable = !intel_dp->want_panel_vdd;
1871
1872 lockdep_assert_held(&dev_priv->pps_mutex);
1873
1874 if (!is_edp(intel_dp))
1875 return false;
1876
1877 cancel_delayed_work(&intel_dp->panel_vdd_work);
1878 intel_dp->want_panel_vdd = true;
1879
1880 if (edp_have_panel_vdd(intel_dp))
1881 return need_to_disable;
1882
1883 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1884 intel_display_power_get(dev_priv, power_domain);
1885
1886 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1887 port_name(intel_dig_port->port));
1888
1889 if (!edp_have_panel_power(intel_dp))
1890 wait_panel_power_cycle(intel_dp);
1891
1892 pp = ironlake_get_pp_control(intel_dp);
1893 pp |= EDP_FORCE_VDD;
1894
1895 pp_stat_reg = _pp_stat_reg(intel_dp);
1896 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1897
1898 I915_WRITE(pp_ctrl_reg, pp);
1899 POSTING_READ(pp_ctrl_reg);
1900 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1901 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1902 /*
1903 * If the panel wasn't on, delay before accessing aux channel
1904 */
1905 if (!edp_have_panel_power(intel_dp)) {
1906 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1907 port_name(intel_dig_port->port));
1908 msleep(intel_dp->panel_power_up_delay);
1909 }
1910
1911 return need_to_disable;
1912}
1913
1914/*
1915 * Must be paired with intel_edp_panel_vdd_off() or
1916 * intel_edp_panel_off().
1917 * Nested calls to these functions are not allowed since
1918 * we drop the lock. Caller must use some higher level
1919 * locking to prevent nested calls from other threads.
1920 */
1921void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1922{
1923 bool vdd;
1924
1925 if (!is_edp(intel_dp))
1926 return;
1927
1928 pps_lock(intel_dp);
1929 vdd = edp_panel_vdd_on(intel_dp);
1930 pps_unlock(intel_dp);
1931
1932 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1933 port_name(dp_to_dig_port(intel_dp)->port));
1934}
1935
1936static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1937{
1938 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1940 struct intel_digital_port *intel_dig_port =
1941 dp_to_dig_port(intel_dp);
1942 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1943 enum intel_display_power_domain power_domain;
1944 u32 pp;
1945 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1946
1947 lockdep_assert_held(&dev_priv->pps_mutex);
1948
1949 WARN_ON(intel_dp->want_panel_vdd);
1950
1951 if (!edp_have_panel_vdd(intel_dp))
1952 return;
1953
1954 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1955 port_name(intel_dig_port->port));
1956
1957 pp = ironlake_get_pp_control(intel_dp);
1958 pp &= ~EDP_FORCE_VDD;
1959
1960 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1961 pp_stat_reg = _pp_stat_reg(intel_dp);
1962
1963 I915_WRITE(pp_ctrl_reg, pp);
1964 POSTING_READ(pp_ctrl_reg);
1965
1966 /* Make sure sequencer is idle before allowing subsequent activity */
1967 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1968 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1969
1970 if ((pp & POWER_TARGET_ON) == 0)
1971 intel_dp->panel_power_off_time = ktime_get_boottime();
1972
1973 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1974 intel_display_power_put(dev_priv, power_domain);
1975}
1976
1977static void edp_panel_vdd_work(struct work_struct *__work)
1978{
1979 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1980 struct intel_dp, panel_vdd_work);
1981
1982 pps_lock(intel_dp);
1983 if (!intel_dp->want_panel_vdd)
1984 edp_panel_vdd_off_sync(intel_dp);
1985 pps_unlock(intel_dp);
1986}
1987
1988static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1989{
1990 unsigned long delay;
1991
1992 /*
1993 * Queue the timer to fire a long time from now (relative to the power
1994 * down delay) to keep the panel power up across a sequence of
1995 * operations.
1996 */
1997 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1998 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1999}
2000
2001/*
2002 * Must be paired with edp_panel_vdd_on().
2003 * Must hold pps_mutex around the whole on/off sequence.
2004 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2005 */
2006static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2007{
2008 struct drm_i915_private *dev_priv =
2009 intel_dp_to_dev(intel_dp)->dev_private;
2010
2011 lockdep_assert_held(&dev_priv->pps_mutex);
2012
2013 if (!is_edp(intel_dp))
2014 return;
2015
2016 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2017 port_name(dp_to_dig_port(intel_dp)->port));
2018
2019 intel_dp->want_panel_vdd = false;
2020
2021 if (sync)
2022 edp_panel_vdd_off_sync(intel_dp);
2023 else
2024 edp_panel_vdd_schedule_off(intel_dp);
2025}
2026
2027static void edp_panel_on(struct intel_dp *intel_dp)
2028{
2029 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2030 struct drm_i915_private *dev_priv = dev->dev_private;
2031 u32 pp;
2032 i915_reg_t pp_ctrl_reg;
2033
2034 lockdep_assert_held(&dev_priv->pps_mutex);
2035
2036 if (!is_edp(intel_dp))
2037 return;
2038
2039 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2040 port_name(dp_to_dig_port(intel_dp)->port));
2041
2042 if (WARN(edp_have_panel_power(intel_dp),
2043 "eDP port %c panel power already on\n",
2044 port_name(dp_to_dig_port(intel_dp)->port)))
2045 return;
2046
2047 wait_panel_power_cycle(intel_dp);
2048
2049 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2050 pp = ironlake_get_pp_control(intel_dp);
2051 if (IS_GEN5(dev)) {
2052 /* ILK workaround: disable reset around power sequence */
2053 pp &= ~PANEL_POWER_RESET;
2054 I915_WRITE(pp_ctrl_reg, pp);
2055 POSTING_READ(pp_ctrl_reg);
2056 }
2057
2058 pp |= POWER_TARGET_ON;
2059 if (!IS_GEN5(dev))
2060 pp |= PANEL_POWER_RESET;
2061
2062 I915_WRITE(pp_ctrl_reg, pp);
2063 POSTING_READ(pp_ctrl_reg);
2064
2065 wait_panel_on(intel_dp);
2066 intel_dp->last_power_on = jiffies;
2067
2068 if (IS_GEN5(dev)) {
2069 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2070 I915_WRITE(pp_ctrl_reg, pp);
2071 POSTING_READ(pp_ctrl_reg);
2072 }
2073}
2074
2075void intel_edp_panel_on(struct intel_dp *intel_dp)
2076{
2077 if (!is_edp(intel_dp))
2078 return;
2079
2080 pps_lock(intel_dp);
2081 edp_panel_on(intel_dp);
2082 pps_unlock(intel_dp);
2083}
2084
2085
2086static void edp_panel_off(struct intel_dp *intel_dp)
2087{
2088 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2092 enum intel_display_power_domain power_domain;
2093 u32 pp;
2094 i915_reg_t pp_ctrl_reg;
2095
2096 lockdep_assert_held(&dev_priv->pps_mutex);
2097
2098 if (!is_edp(intel_dp))
2099 return;
2100
2101 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2102 port_name(dp_to_dig_port(intel_dp)->port));
2103
2104 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2105 port_name(dp_to_dig_port(intel_dp)->port));
2106
2107 pp = ironlake_get_pp_control(intel_dp);
2108 /* We need to switch off panel power _and_ force vdd, for otherwise some
2109 * panels get very unhappy and cease to work. */
2110 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2111 EDP_BLC_ENABLE);
2112
2113 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2114
2115 intel_dp->want_panel_vdd = false;
2116
2117 I915_WRITE(pp_ctrl_reg, pp);
2118 POSTING_READ(pp_ctrl_reg);
2119
2120 intel_dp->panel_power_off_time = ktime_get_boottime();
2121 wait_panel_off(intel_dp);
2122
2123 /* We got a reference when we enabled the VDD. */
2124 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2125 intel_display_power_put(dev_priv, power_domain);
2126}
2127
2128void intel_edp_panel_off(struct intel_dp *intel_dp)
2129{
2130 if (!is_edp(intel_dp))
2131 return;
2132
2133 pps_lock(intel_dp);
2134 edp_panel_off(intel_dp);
2135 pps_unlock(intel_dp);
2136}
2137
2138/* Enable backlight in the panel power control. */
2139static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2140{
2141 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2142 struct drm_device *dev = intel_dig_port->base.base.dev;
2143 struct drm_i915_private *dev_priv = dev->dev_private;
2144 u32 pp;
2145 i915_reg_t pp_ctrl_reg;
2146
2147 /*
2148 * If we enable the backlight right away following a panel power
2149 * on, we may see slight flicker as the panel syncs with the eDP
2150 * link. So delay a bit to make sure the image is solid before
2151 * allowing it to appear.
2152 */
2153 wait_backlight_on(intel_dp);
2154
2155 pps_lock(intel_dp);
2156
2157 pp = ironlake_get_pp_control(intel_dp);
2158 pp |= EDP_BLC_ENABLE;
2159
2160 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2161
2162 I915_WRITE(pp_ctrl_reg, pp);
2163 POSTING_READ(pp_ctrl_reg);
2164
2165 pps_unlock(intel_dp);
2166}
2167
2168/* Enable backlight PWM and backlight PP control. */
2169void intel_edp_backlight_on(struct intel_dp *intel_dp)
2170{
2171 if (!is_edp(intel_dp))
2172 return;
2173
2174 DRM_DEBUG_KMS("\n");
2175
2176 intel_panel_enable_backlight(intel_dp->attached_connector);
2177 _intel_edp_backlight_on(intel_dp);
2178}
2179
2180/* Disable backlight in the panel power control. */
2181static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2182{
2183 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2185 u32 pp;
2186 i915_reg_t pp_ctrl_reg;
2187
2188 if (!is_edp(intel_dp))
2189 return;
2190
2191 pps_lock(intel_dp);
2192
2193 pp = ironlake_get_pp_control(intel_dp);
2194 pp &= ~EDP_BLC_ENABLE;
2195
2196 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2197
2198 I915_WRITE(pp_ctrl_reg, pp);
2199 POSTING_READ(pp_ctrl_reg);
2200
2201 pps_unlock(intel_dp);
2202
2203 intel_dp->last_backlight_off = jiffies;
2204 edp_wait_backlight_off(intel_dp);
2205}
2206
2207/* Disable backlight PP control and backlight PWM. */
2208void intel_edp_backlight_off(struct intel_dp *intel_dp)
2209{
2210 if (!is_edp(intel_dp))
2211 return;
2212
2213 DRM_DEBUG_KMS("\n");
2214
2215 _intel_edp_backlight_off(intel_dp);
2216 intel_panel_disable_backlight(intel_dp->attached_connector);
2217}
2218
2219/*
2220 * Hook for controlling the panel power control backlight through the bl_power
2221 * sysfs attribute. Take care to handle multiple calls.
2222 */
2223static void intel_edp_backlight_power(struct intel_connector *connector,
2224 bool enable)
2225{
2226 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2227 bool is_enabled;
2228
2229 pps_lock(intel_dp);
2230 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2231 pps_unlock(intel_dp);
2232
2233 if (is_enabled == enable)
2234 return;
2235
2236 DRM_DEBUG_KMS("panel power control backlight %s\n",
2237 enable ? "enable" : "disable");
2238
2239 if (enable)
2240 _intel_edp_backlight_on(intel_dp);
2241 else
2242 _intel_edp_backlight_off(intel_dp);
2243}
2244
2245static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2246{
2247 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2248 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2249 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2250
2251 I915_STATE_WARN(cur_state != state,
2252 "DP port %c state assertion failure (expected %s, current %s)\n",
2253 port_name(dig_port->port),
2254 onoff(state), onoff(cur_state));
2255}
2256#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2257
2258static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2259{
2260 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2261
2262 I915_STATE_WARN(cur_state != state,
2263 "eDP PLL state assertion failure (expected %s, current %s)\n",
2264 onoff(state), onoff(cur_state));
2265}
2266#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2267#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2268
2269static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2270{
2271 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2272 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2273 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2274
2275 assert_pipe_disabled(dev_priv, crtc->pipe);
2276 assert_dp_port_disabled(intel_dp);
2277 assert_edp_pll_disabled(dev_priv);
2278
2279 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2280 crtc->config->port_clock);
2281
2282 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2283
2284 if (crtc->config->port_clock == 162000)
2285 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2286 else
2287 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2288
2289 I915_WRITE(DP_A, intel_dp->DP);
2290 POSTING_READ(DP_A);
2291 udelay(500);
2292
2293 intel_dp->DP |= DP_PLL_ENABLE;
2294
2295 I915_WRITE(DP_A, intel_dp->DP);
2296 POSTING_READ(DP_A);
2297 udelay(200);
2298}
2299
2300static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2301{
2302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2303 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2305
2306 assert_pipe_disabled(dev_priv, crtc->pipe);
2307 assert_dp_port_disabled(intel_dp);
2308 assert_edp_pll_enabled(dev_priv);
2309
2310 DRM_DEBUG_KMS("disabling eDP PLL\n");
2311
2312 intel_dp->DP &= ~DP_PLL_ENABLE;
2313
2314 I915_WRITE(DP_A, intel_dp->DP);
2315 POSTING_READ(DP_A);
2316 udelay(200);
2317}
2318
2319/* If the sink supports it, try to set the power state appropriately */
2320void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2321{
2322 int ret, i;
2323
2324 /* Should have a valid DPCD by this point */
2325 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2326 return;
2327
2328 if (mode != DRM_MODE_DPMS_ON) {
2329 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2330 DP_SET_POWER_D3);
2331 } else {
2332 /*
2333 * When turning on, we need to retry for 1ms to give the sink
2334 * time to wake up.
2335 */
2336 for (i = 0; i < 3; i++) {
2337 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2338 DP_SET_POWER_D0);
2339 if (ret == 1)
2340 break;
2341 msleep(1);
2342 }
2343 }
2344
2345 if (ret != 1)
2346 DRM_DEBUG_KMS("failed to %s sink power state\n",
2347 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2348}
2349
2350static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2351 enum pipe *pipe)
2352{
2353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2354 enum port port = dp_to_dig_port(intel_dp)->port;
2355 struct drm_device *dev = encoder->base.dev;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 enum intel_display_power_domain power_domain;
2358 u32 tmp;
2359 bool ret;
2360
2361 power_domain = intel_display_port_power_domain(encoder);
2362 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2363 return false;
2364
2365 ret = false;
2366
2367 tmp = I915_READ(intel_dp->output_reg);
2368
2369 if (!(tmp & DP_PORT_EN))
2370 goto out;
2371
2372 if (IS_GEN7(dev) && port == PORT_A) {
2373 *pipe = PORT_TO_PIPE_CPT(tmp);
2374 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2375 enum pipe p;
2376
2377 for_each_pipe(dev_priv, p) {
2378 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2379 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2380 *pipe = p;
2381 ret = true;
2382
2383 goto out;
2384 }
2385 }
2386
2387 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2388 i915_mmio_reg_offset(intel_dp->output_reg));
2389 } else if (IS_CHERRYVIEW(dev)) {
2390 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2391 } else {
2392 *pipe = PORT_TO_PIPE(tmp);
2393 }
2394
2395 ret = true;
2396
2397out:
2398 intel_display_power_put(dev_priv, power_domain);
2399
2400 return ret;
2401}
2402
2403static void intel_dp_get_config(struct intel_encoder *encoder,
2404 struct intel_crtc_state *pipe_config)
2405{
2406 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2407 u32 tmp, flags = 0;
2408 struct drm_device *dev = encoder->base.dev;
2409 struct drm_i915_private *dev_priv = dev->dev_private;
2410 enum port port = dp_to_dig_port(intel_dp)->port;
2411 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2412 int dotclock;
2413
2414 tmp = I915_READ(intel_dp->output_reg);
2415
2416 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2417
2418 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2419 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2420
2421 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2422 flags |= DRM_MODE_FLAG_PHSYNC;
2423 else
2424 flags |= DRM_MODE_FLAG_NHSYNC;
2425
2426 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2427 flags |= DRM_MODE_FLAG_PVSYNC;
2428 else
2429 flags |= DRM_MODE_FLAG_NVSYNC;
2430 } else {
2431 if (tmp & DP_SYNC_HS_HIGH)
2432 flags |= DRM_MODE_FLAG_PHSYNC;
2433 else
2434 flags |= DRM_MODE_FLAG_NHSYNC;
2435
2436 if (tmp & DP_SYNC_VS_HIGH)
2437 flags |= DRM_MODE_FLAG_PVSYNC;
2438 else
2439 flags |= DRM_MODE_FLAG_NVSYNC;
2440 }
2441
2442 pipe_config->base.adjusted_mode.flags |= flags;
2443
2444 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2445 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2446 pipe_config->limited_color_range = true;
2447
2448 pipe_config->has_dp_encoder = true;
2449
2450 pipe_config->lane_count =
2451 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2452
2453 intel_dp_get_m_n(crtc, pipe_config);
2454
2455 if (port == PORT_A) {
2456 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2457 pipe_config->port_clock = 162000;
2458 else
2459 pipe_config->port_clock = 270000;
2460 }
2461
2462 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2463 &pipe_config->dp_m_n);
2464
2465 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2466 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2467
2468 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2469
2470 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2471 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2472 /*
2473 * This is a big fat ugly hack.
2474 *
2475 * Some machines in UEFI boot mode provide us a VBT that has 18
2476 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2477 * unknown we fail to light up. Yet the same BIOS boots up with
2478 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2479 * max, not what it tells us to use.
2480 *
2481 * Note: This will still be broken if the eDP panel is not lit
2482 * up by the BIOS, and thus we can't get the mode at module
2483 * load.
2484 */
2485 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2486 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2487 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2488 }
2489}
2490
2491static void intel_disable_dp(struct intel_encoder *encoder)
2492{
2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494 struct drm_device *dev = encoder->base.dev;
2495 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2496
2497 if (crtc->config->has_audio)
2498 intel_audio_codec_disable(encoder);
2499
2500 if (HAS_PSR(dev) && !HAS_DDI(dev))
2501 intel_psr_disable(intel_dp);
2502
2503 /* Make sure the panel is off before trying to change the mode. But also
2504 * ensure that we have vdd while we switch off the panel. */
2505 intel_edp_panel_vdd_on(intel_dp);
2506 intel_edp_backlight_off(intel_dp);
2507 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2508 intel_edp_panel_off(intel_dp);
2509
2510 /* disable the port before the pipe on g4x */
2511 if (INTEL_INFO(dev)->gen < 5)
2512 intel_dp_link_down(intel_dp);
2513}
2514
2515static void ilk_post_disable_dp(struct intel_encoder *encoder)
2516{
2517 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2518 enum port port = dp_to_dig_port(intel_dp)->port;
2519
2520 intel_dp_link_down(intel_dp);
2521
2522 /* Only ilk+ has port A */
2523 if (port == PORT_A)
2524 ironlake_edp_pll_off(intel_dp);
2525}
2526
2527static void vlv_post_disable_dp(struct intel_encoder *encoder)
2528{
2529 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2530
2531 intel_dp_link_down(intel_dp);
2532}
2533
2534static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2535 bool reset)
2536{
2537 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2538 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2540 enum pipe pipe = crtc->pipe;
2541 uint32_t val;
2542
2543 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2544 if (reset)
2545 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2546 else
2547 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2548 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2549
2550 if (crtc->config->lane_count > 2) {
2551 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2552 if (reset)
2553 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2554 else
2555 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2556 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2557 }
2558
2559 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2560 val |= CHV_PCS_REQ_SOFTRESET_EN;
2561 if (reset)
2562 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2563 else
2564 val |= DPIO_PCS_CLK_SOFT_RESET;
2565 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2566
2567 if (crtc->config->lane_count > 2) {
2568 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2569 val |= CHV_PCS_REQ_SOFTRESET_EN;
2570 if (reset)
2571 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2572 else
2573 val |= DPIO_PCS_CLK_SOFT_RESET;
2574 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2575 }
2576}
2577
2578static void chv_post_disable_dp(struct intel_encoder *encoder)
2579{
2580 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2581 struct drm_device *dev = encoder->base.dev;
2582 struct drm_i915_private *dev_priv = dev->dev_private;
2583
2584 intel_dp_link_down(intel_dp);
2585
2586 mutex_lock(&dev_priv->sb_lock);
2587
2588 /* Assert data lane reset */
2589 chv_data_lane_soft_reset(encoder, true);
2590
2591 mutex_unlock(&dev_priv->sb_lock);
2592}
2593
2594static void
2595_intel_dp_set_link_train(struct intel_dp *intel_dp,
2596 uint32_t *DP,
2597 uint8_t dp_train_pat)
2598{
2599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2600 struct drm_device *dev = intel_dig_port->base.base.dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602 enum port port = intel_dig_port->port;
2603
2604 if (HAS_DDI(dev)) {
2605 uint32_t temp = I915_READ(DP_TP_CTL(port));
2606
2607 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2608 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2609 else
2610 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2611
2612 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2613 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2614 case DP_TRAINING_PATTERN_DISABLE:
2615 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2616
2617 break;
2618 case DP_TRAINING_PATTERN_1:
2619 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2620 break;
2621 case DP_TRAINING_PATTERN_2:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2623 break;
2624 case DP_TRAINING_PATTERN_3:
2625 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2626 break;
2627 }
2628 I915_WRITE(DP_TP_CTL(port), temp);
2629
2630 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2631 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2632 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2633
2634 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2635 case DP_TRAINING_PATTERN_DISABLE:
2636 *DP |= DP_LINK_TRAIN_OFF_CPT;
2637 break;
2638 case DP_TRAINING_PATTERN_1:
2639 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2640 break;
2641 case DP_TRAINING_PATTERN_2:
2642 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2643 break;
2644 case DP_TRAINING_PATTERN_3:
2645 DRM_ERROR("DP training pattern 3 not supported\n");
2646 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2647 break;
2648 }
2649
2650 } else {
2651 if (IS_CHERRYVIEW(dev))
2652 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2653 else
2654 *DP &= ~DP_LINK_TRAIN_MASK;
2655
2656 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2657 case DP_TRAINING_PATTERN_DISABLE:
2658 *DP |= DP_LINK_TRAIN_OFF;
2659 break;
2660 case DP_TRAINING_PATTERN_1:
2661 *DP |= DP_LINK_TRAIN_PAT_1;
2662 break;
2663 case DP_TRAINING_PATTERN_2:
2664 *DP |= DP_LINK_TRAIN_PAT_2;
2665 break;
2666 case DP_TRAINING_PATTERN_3:
2667 if (IS_CHERRYVIEW(dev)) {
2668 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2669 } else {
2670 DRM_ERROR("DP training pattern 3 not supported\n");
2671 *DP |= DP_LINK_TRAIN_PAT_2;
2672 }
2673 break;
2674 }
2675 }
2676}
2677
2678static void intel_dp_enable_port(struct intel_dp *intel_dp)
2679{
2680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2681 struct drm_i915_private *dev_priv = dev->dev_private;
2682 struct intel_crtc *crtc =
2683 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2684
2685 /* enable with pattern 1 (as per spec) */
2686 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2687 DP_TRAINING_PATTERN_1);
2688
2689 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2690 POSTING_READ(intel_dp->output_reg);
2691
2692 /*
2693 * Magic for VLV/CHV. We _must_ first set up the register
2694 * without actually enabling the port, and then do another
2695 * write to enable the port. Otherwise link training will
2696 * fail when the power sequencer is freshly used for this port.
2697 */
2698 intel_dp->DP |= DP_PORT_EN;
2699 if (crtc->config->has_audio)
2700 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2701
2702 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2703 POSTING_READ(intel_dp->output_reg);
2704}
2705
2706static void intel_enable_dp(struct intel_encoder *encoder)
2707{
2708 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709 struct drm_device *dev = encoder->base.dev;
2710 struct drm_i915_private *dev_priv = dev->dev_private;
2711 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2712 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2713 enum port port = dp_to_dig_port(intel_dp)->port;
2714 enum pipe pipe = crtc->pipe;
2715
2716 if (WARN_ON(dp_reg & DP_PORT_EN))
2717 return;
2718
2719 pps_lock(intel_dp);
2720
2721 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2722 vlv_init_panel_power_sequencer(intel_dp);
2723
2724 /*
2725 * We get an occasional spurious underrun between the port
2726 * enable and vdd enable, when enabling port A eDP.
2727 *
2728 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2729 */
2730 if (port == PORT_A)
2731 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2732
2733 intel_dp_enable_port(intel_dp);
2734
2735 if (port == PORT_A && IS_GEN5(dev_priv)) {
2736 /*
2737 * Underrun reporting for the other pipe was disabled in
2738 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2739 * enabled, so it's now safe to re-enable underrun reporting.
2740 */
2741 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2742 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2743 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2744 }
2745
2746 edp_panel_vdd_on(intel_dp);
2747 edp_panel_on(intel_dp);
2748 edp_panel_vdd_off(intel_dp, true);
2749
2750 if (port == PORT_A)
2751 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2752
2753 pps_unlock(intel_dp);
2754
2755 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2756 unsigned int lane_mask = 0x0;
2757
2758 if (IS_CHERRYVIEW(dev))
2759 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2760
2761 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2762 lane_mask);
2763 }
2764
2765 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2766 intel_dp_start_link_train(intel_dp);
2767 intel_dp_stop_link_train(intel_dp);
2768
2769 if (crtc->config->has_audio) {
2770 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2771 pipe_name(pipe));
2772 intel_audio_codec_enable(encoder);
2773 }
2774}
2775
2776static void g4x_enable_dp(struct intel_encoder *encoder)
2777{
2778 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2779
2780 intel_enable_dp(encoder);
2781 intel_edp_backlight_on(intel_dp);
2782}
2783
2784static void vlv_enable_dp(struct intel_encoder *encoder)
2785{
2786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2787
2788 intel_edp_backlight_on(intel_dp);
2789 intel_psr_enable(intel_dp);
2790}
2791
2792static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2793{
2794 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2795 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2796 enum port port = dp_to_dig_port(intel_dp)->port;
2797 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2798
2799 intel_dp_prepare(encoder);
2800
2801 if (port == PORT_A && IS_GEN5(dev_priv)) {
2802 /*
2803 * We get FIFO underruns on the other pipe when
2804 * enabling the CPU eDP PLL, and when enabling CPU
2805 * eDP port. We could potentially avoid the PLL
2806 * underrun with a vblank wait just prior to enabling
2807 * the PLL, but that doesn't appear to help the port
2808 * enable case. Just sweep it all under the rug.
2809 */
2810 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2811 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2812 }
2813
2814 /* Only ilk+ has port A */
2815 if (port == PORT_A)
2816 ironlake_edp_pll_on(intel_dp);
2817}
2818
2819static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2820{
2821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2822 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2823 enum pipe pipe = intel_dp->pps_pipe;
2824 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2825
2826 edp_panel_vdd_off_sync(intel_dp);
2827
2828 /*
2829 * VLV seems to get confused when multiple power seqeuencers
2830 * have the same port selected (even if only one has power/vdd
2831 * enabled). The failure manifests as vlv_wait_port_ready() failing
2832 * CHV on the other hand doesn't seem to mind having the same port
2833 * selected in multiple power seqeuencers, but let's clear the
2834 * port select always when logically disconnecting a power sequencer
2835 * from a port.
2836 */
2837 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2838 pipe_name(pipe), port_name(intel_dig_port->port));
2839 I915_WRITE(pp_on_reg, 0);
2840 POSTING_READ(pp_on_reg);
2841
2842 intel_dp->pps_pipe = INVALID_PIPE;
2843}
2844
2845static void vlv_steal_power_sequencer(struct drm_device *dev,
2846 enum pipe pipe)
2847{
2848 struct drm_i915_private *dev_priv = dev->dev_private;
2849 struct intel_encoder *encoder;
2850
2851 lockdep_assert_held(&dev_priv->pps_mutex);
2852
2853 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2854 return;
2855
2856 for_each_intel_encoder(dev, encoder) {
2857 struct intel_dp *intel_dp;
2858 enum port port;
2859
2860 if (encoder->type != INTEL_OUTPUT_EDP)
2861 continue;
2862
2863 intel_dp = enc_to_intel_dp(&encoder->base);
2864 port = dp_to_dig_port(intel_dp)->port;
2865
2866 if (intel_dp->pps_pipe != pipe)
2867 continue;
2868
2869 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2870 pipe_name(pipe), port_name(port));
2871
2872 WARN(encoder->base.crtc,
2873 "stealing pipe %c power sequencer from active eDP port %c\n",
2874 pipe_name(pipe), port_name(port));
2875
2876 /* make sure vdd is off before we steal it */
2877 vlv_detach_power_sequencer(intel_dp);
2878 }
2879}
2880
2881static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2882{
2883 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2884 struct intel_encoder *encoder = &intel_dig_port->base;
2885 struct drm_device *dev = encoder->base.dev;
2886 struct drm_i915_private *dev_priv = dev->dev_private;
2887 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2888
2889 lockdep_assert_held(&dev_priv->pps_mutex);
2890
2891 if (!is_edp(intel_dp))
2892 return;
2893
2894 if (intel_dp->pps_pipe == crtc->pipe)
2895 return;
2896
2897 /*
2898 * If another power sequencer was being used on this
2899 * port previously make sure to turn off vdd there while
2900 * we still have control of it.
2901 */
2902 if (intel_dp->pps_pipe != INVALID_PIPE)
2903 vlv_detach_power_sequencer(intel_dp);
2904
2905 /*
2906 * We may be stealing the power
2907 * sequencer from another port.
2908 */
2909 vlv_steal_power_sequencer(dev, crtc->pipe);
2910
2911 /* now it's all ours */
2912 intel_dp->pps_pipe = crtc->pipe;
2913
2914 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2915 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2916
2917 /* init power sequencer on this pipe and port */
2918 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2919 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2920}
2921
2922static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2923{
2924 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2925 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2926 struct drm_device *dev = encoder->base.dev;
2927 struct drm_i915_private *dev_priv = dev->dev_private;
2928 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2929 enum dpio_channel port = vlv_dport_to_channel(dport);
2930 int pipe = intel_crtc->pipe;
2931 u32 val;
2932
2933 mutex_lock(&dev_priv->sb_lock);
2934
2935 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2936 val = 0;
2937 if (pipe)
2938 val |= (1<<21);
2939 else
2940 val &= ~(1<<21);
2941 val |= 0x001000c4;
2942 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2943 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2944 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2945
2946 mutex_unlock(&dev_priv->sb_lock);
2947
2948 intel_enable_dp(encoder);
2949}
2950
2951static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2952{
2953 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2954 struct drm_device *dev = encoder->base.dev;
2955 struct drm_i915_private *dev_priv = dev->dev_private;
2956 struct intel_crtc *intel_crtc =
2957 to_intel_crtc(encoder->base.crtc);
2958 enum dpio_channel port = vlv_dport_to_channel(dport);
2959 int pipe = intel_crtc->pipe;
2960
2961 intel_dp_prepare(encoder);
2962
2963 /* Program Tx lane resets to default */
2964 mutex_lock(&dev_priv->sb_lock);
2965 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2966 DPIO_PCS_TX_LANE2_RESET |
2967 DPIO_PCS_TX_LANE1_RESET);
2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2969 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2970 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2971 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2972 DPIO_PCS_CLK_SOFT_RESET);
2973
2974 /* Fix up inter-pair skew failure */
2975 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2976 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2977 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2978 mutex_unlock(&dev_priv->sb_lock);
2979}
2980
2981static void chv_pre_enable_dp(struct intel_encoder *encoder)
2982{
2983 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2984 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2985 struct drm_device *dev = encoder->base.dev;
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 struct intel_crtc *intel_crtc =
2988 to_intel_crtc(encoder->base.crtc);
2989 enum dpio_channel ch = vlv_dport_to_channel(dport);
2990 int pipe = intel_crtc->pipe;
2991 int data, i, stagger;
2992 u32 val;
2993
2994 mutex_lock(&dev_priv->sb_lock);
2995
2996 /* allow hardware to manage TX FIFO reset source */
2997 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2998 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3000
3001 if (intel_crtc->config->lane_count > 2) {
3002 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3003 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3004 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3005 }
3006
3007 /* Program Tx lane latency optimal setting*/
3008 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3009 /* Set the upar bit */
3010 if (intel_crtc->config->lane_count == 1)
3011 data = 0x0;
3012 else
3013 data = (i == 1) ? 0x0 : 0x1;
3014 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3015 data << DPIO_UPAR_SHIFT);
3016 }
3017
3018 /* Data lane stagger programming */
3019 if (intel_crtc->config->port_clock > 270000)
3020 stagger = 0x18;
3021 else if (intel_crtc->config->port_clock > 135000)
3022 stagger = 0xd;
3023 else if (intel_crtc->config->port_clock > 67500)
3024 stagger = 0x7;
3025 else if (intel_crtc->config->port_clock > 33750)
3026 stagger = 0x4;
3027 else
3028 stagger = 0x2;
3029
3030 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3031 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3032 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3033
3034 if (intel_crtc->config->lane_count > 2) {
3035 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3036 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3037 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3038 }
3039
3040 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3041 DPIO_LANESTAGGER_STRAP(stagger) |
3042 DPIO_LANESTAGGER_STRAP_OVRD |
3043 DPIO_TX1_STAGGER_MASK(0x1f) |
3044 DPIO_TX1_STAGGER_MULT(6) |
3045 DPIO_TX2_STAGGER_MULT(0));
3046
3047 if (intel_crtc->config->lane_count > 2) {
3048 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3049 DPIO_LANESTAGGER_STRAP(stagger) |
3050 DPIO_LANESTAGGER_STRAP_OVRD |
3051 DPIO_TX1_STAGGER_MASK(0x1f) |
3052 DPIO_TX1_STAGGER_MULT(7) |
3053 DPIO_TX2_STAGGER_MULT(5));
3054 }
3055
3056 /* Deassert data lane reset */
3057 chv_data_lane_soft_reset(encoder, false);
3058
3059 mutex_unlock(&dev_priv->sb_lock);
3060
3061 intel_enable_dp(encoder);
3062
3063 /* Second common lane will stay alive on its own now */
3064 if (dport->release_cl2_override) {
3065 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3066 dport->release_cl2_override = false;
3067 }
3068}
3069
3070static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3071{
3072 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3073 struct drm_device *dev = encoder->base.dev;
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct intel_crtc *intel_crtc =
3076 to_intel_crtc(encoder->base.crtc);
3077 enum dpio_channel ch = vlv_dport_to_channel(dport);
3078 enum pipe pipe = intel_crtc->pipe;
3079 unsigned int lane_mask =
3080 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3081 u32 val;
3082
3083 intel_dp_prepare(encoder);
3084
3085 /*
3086 * Must trick the second common lane into life.
3087 * Otherwise we can't even access the PLL.
3088 */
3089 if (ch == DPIO_CH0 && pipe == PIPE_B)
3090 dport->release_cl2_override =
3091 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3092
3093 chv_phy_powergate_lanes(encoder, true, lane_mask);
3094
3095 mutex_lock(&dev_priv->sb_lock);
3096
3097 /* Assert data lane reset */
3098 chv_data_lane_soft_reset(encoder, true);
3099
3100 /* program left/right clock distribution */
3101 if (pipe != PIPE_B) {
3102 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3103 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3104 if (ch == DPIO_CH0)
3105 val |= CHV_BUFLEFTENA1_FORCE;
3106 if (ch == DPIO_CH1)
3107 val |= CHV_BUFRIGHTENA1_FORCE;
3108 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3109 } else {
3110 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3111 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3112 if (ch == DPIO_CH0)
3113 val |= CHV_BUFLEFTENA2_FORCE;
3114 if (ch == DPIO_CH1)
3115 val |= CHV_BUFRIGHTENA2_FORCE;
3116 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3117 }
3118
3119 /* program clock channel usage */
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3121 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3122 if (pipe != PIPE_B)
3123 val &= ~CHV_PCS_USEDCLKCHANNEL;
3124 else
3125 val |= CHV_PCS_USEDCLKCHANNEL;
3126 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3127
3128 if (intel_crtc->config->lane_count > 2) {
3129 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3130 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3131 if (pipe != PIPE_B)
3132 val &= ~CHV_PCS_USEDCLKCHANNEL;
3133 else
3134 val |= CHV_PCS_USEDCLKCHANNEL;
3135 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3136 }
3137
3138 /*
3139 * This a a bit weird since generally CL
3140 * matches the pipe, but here we need to
3141 * pick the CL based on the port.
3142 */
3143 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3144 if (pipe != PIPE_B)
3145 val &= ~CHV_CMN_USEDCLKCHANNEL;
3146 else
3147 val |= CHV_CMN_USEDCLKCHANNEL;
3148 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3149
3150 mutex_unlock(&dev_priv->sb_lock);
3151}
3152
3153static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3154{
3155 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3156 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3157 u32 val;
3158
3159 mutex_lock(&dev_priv->sb_lock);
3160
3161 /* disable left/right clock distribution */
3162 if (pipe != PIPE_B) {
3163 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3164 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3165 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3166 } else {
3167 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3168 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3169 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3170 }
3171
3172 mutex_unlock(&dev_priv->sb_lock);
3173
3174 /*
3175 * Leave the power down bit cleared for at least one
3176 * lane so that chv_powergate_phy_ch() will power
3177 * on something when the channel is otherwise unused.
3178 * When the port is off and the override is removed
3179 * the lanes power down anyway, so otherwise it doesn't
3180 * really matter what the state of power down bits is
3181 * after this.
3182 */
3183 chv_phy_powergate_lanes(encoder, false, 0x0);
3184}
3185
3186/*
3187 * Native read with retry for link status and receiver capability reads for
3188 * cases where the sink may still be asleep.
3189 *
3190 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3191 * supposed to retry 3 times per the spec.
3192 */
3193static ssize_t
3194intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3195 void *buffer, size_t size)
3196{
3197 ssize_t ret;
3198 int i;
3199
3200 /*
3201 * Sometime we just get the same incorrect byte repeated
3202 * over the entire buffer. Doing just one throw away read
3203 * initially seems to "solve" it.
3204 */
3205 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3206
3207 for (i = 0; i < 3; i++) {
3208 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3209 if (ret == size)
3210 return ret;
3211 msleep(1);
3212 }
3213
3214 return ret;
3215}
3216
3217/*
3218 * Fetch AUX CH registers 0x202 - 0x207 which contain
3219 * link status information
3220 */
3221bool
3222intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3223{
3224 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3225 DP_LANE0_1_STATUS,
3226 link_status,
3227 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3228}
3229
3230/* These are source-specific values. */
3231uint8_t
3232intel_dp_voltage_max(struct intel_dp *intel_dp)
3233{
3234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3235 struct drm_i915_private *dev_priv = dev->dev_private;
3236 enum port port = dp_to_dig_port(intel_dp)->port;
3237
3238 if (IS_BROXTON(dev))
3239 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3240 else if (INTEL_INFO(dev)->gen >= 9) {
3241 if (dev_priv->edp_low_vswing && port == PORT_A)
3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3243 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3244 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3246 else if (IS_GEN7(dev) && port == PORT_A)
3247 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3248 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3249 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3250 else
3251 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3252}
3253
3254uint8_t
3255intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3256{
3257 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3258 enum port port = dp_to_dig_port(intel_dp)->port;
3259
3260 if (INTEL_INFO(dev)->gen >= 9) {
3261 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3263 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3270 default:
3271 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3272 }
3273 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3278 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3282 default:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3284 }
3285 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3286 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3288 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3290 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3292 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3294 default:
3295 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3296 }
3297 } else if (IS_GEN7(dev) && port == PORT_A) {
3298 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3300 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3304 default:
3305 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3306 }
3307 } else {
3308 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3310 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3312 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3314 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3316 default:
3317 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3318 }
3319 }
3320}
3321
3322static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3323{
3324 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3325 struct drm_i915_private *dev_priv = dev->dev_private;
3326 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3327 struct intel_crtc *intel_crtc =
3328 to_intel_crtc(dport->base.base.crtc);
3329 unsigned long demph_reg_value, preemph_reg_value,
3330 uniqtranscale_reg_value;
3331 uint8_t train_set = intel_dp->train_set[0];
3332 enum dpio_channel port = vlv_dport_to_channel(dport);
3333 int pipe = intel_crtc->pipe;
3334
3335 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3336 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3337 preemph_reg_value = 0x0004000;
3338 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340 demph_reg_value = 0x2B405555;
3341 uniqtranscale_reg_value = 0x552AB83A;
3342 break;
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344 demph_reg_value = 0x2B404040;
3345 uniqtranscale_reg_value = 0x5548B83A;
3346 break;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3348 demph_reg_value = 0x2B245555;
3349 uniqtranscale_reg_value = 0x5560B83A;
3350 break;
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3352 demph_reg_value = 0x2B405555;
3353 uniqtranscale_reg_value = 0x5598DA3A;
3354 break;
3355 default:
3356 return 0;
3357 }
3358 break;
3359 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3360 preemph_reg_value = 0x0002000;
3361 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3363 demph_reg_value = 0x2B404040;
3364 uniqtranscale_reg_value = 0x5552B83A;
3365 break;
3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3367 demph_reg_value = 0x2B404848;
3368 uniqtranscale_reg_value = 0x5580B83A;
3369 break;
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3371 demph_reg_value = 0x2B404040;
3372 uniqtranscale_reg_value = 0x55ADDA3A;
3373 break;
3374 default:
3375 return 0;
3376 }
3377 break;
3378 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3379 preemph_reg_value = 0x0000000;
3380 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3382 demph_reg_value = 0x2B305555;
3383 uniqtranscale_reg_value = 0x5570B83A;
3384 break;
3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3386 demph_reg_value = 0x2B2B4040;
3387 uniqtranscale_reg_value = 0x55ADDA3A;
3388 break;
3389 default:
3390 return 0;
3391 }
3392 break;
3393 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3394 preemph_reg_value = 0x0006000;
3395 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3397 demph_reg_value = 0x1B405555;
3398 uniqtranscale_reg_value = 0x55ADDA3A;
3399 break;
3400 default:
3401 return 0;
3402 }
3403 break;
3404 default:
3405 return 0;
3406 }
3407
3408 mutex_lock(&dev_priv->sb_lock);
3409 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3410 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3411 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3412 uniqtranscale_reg_value);
3413 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3415 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3416 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3417 mutex_unlock(&dev_priv->sb_lock);
3418
3419 return 0;
3420}
3421
3422static bool chv_need_uniq_trans_scale(uint8_t train_set)
3423{
3424 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3425 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3426}
3427
3428static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3429{
3430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3431 struct drm_i915_private *dev_priv = dev->dev_private;
3432 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3433 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3434 u32 deemph_reg_value, margin_reg_value, val;
3435 uint8_t train_set = intel_dp->train_set[0];
3436 enum dpio_channel ch = vlv_dport_to_channel(dport);
3437 enum pipe pipe = intel_crtc->pipe;
3438 int i;
3439
3440 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3441 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3442 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3444 deemph_reg_value = 128;
3445 margin_reg_value = 52;
3446 break;
3447 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3448 deemph_reg_value = 128;
3449 margin_reg_value = 77;
3450 break;
3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3452 deemph_reg_value = 128;
3453 margin_reg_value = 102;
3454 break;
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3456 deemph_reg_value = 128;
3457 margin_reg_value = 154;
3458 /* FIXME extra to set for 1200 */
3459 break;
3460 default:
3461 return 0;
3462 }
3463 break;
3464 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3465 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3467 deemph_reg_value = 85;
3468 margin_reg_value = 78;
3469 break;
3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3471 deemph_reg_value = 85;
3472 margin_reg_value = 116;
3473 break;
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3475 deemph_reg_value = 85;
3476 margin_reg_value = 154;
3477 break;
3478 default:
3479 return 0;
3480 }
3481 break;
3482 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3483 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3485 deemph_reg_value = 64;
3486 margin_reg_value = 104;
3487 break;
3488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3489 deemph_reg_value = 64;
3490 margin_reg_value = 154;
3491 break;
3492 default:
3493 return 0;
3494 }
3495 break;
3496 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3497 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3499 deemph_reg_value = 43;
3500 margin_reg_value = 154;
3501 break;
3502 default:
3503 return 0;
3504 }
3505 break;
3506 default:
3507 return 0;
3508 }
3509
3510 mutex_lock(&dev_priv->sb_lock);
3511
3512 /* Clear calc init */
3513 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3514 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3515 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3516 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3517 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3518
3519 if (intel_crtc->config->lane_count > 2) {
3520 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3521 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3522 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3523 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3524 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3525 }
3526
3527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3528 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3529 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3530 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3531
3532 if (intel_crtc->config->lane_count > 2) {
3533 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3534 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3535 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3536 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3537 }
3538
3539 /* Program swing deemph */
3540 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3541 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3542 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3543 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3544 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3545 }
3546
3547 /* Program swing margin */
3548 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3549 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3550
3551 val &= ~DPIO_SWING_MARGIN000_MASK;
3552 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3553
3554 /*
3555 * Supposedly this value shouldn't matter when unique transition
3556 * scale is disabled, but in fact it does matter. Let's just
3557 * always program the same value and hope it's OK.
3558 */
3559 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3560 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3561
3562 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3563 }
3564
3565 /*
3566 * The document said it needs to set bit 27 for ch0 and bit 26
3567 * for ch1. Might be a typo in the doc.
3568 * For now, for this unique transition scale selection, set bit
3569 * 27 for ch0 and ch1.
3570 */
3571 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3572 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3573 if (chv_need_uniq_trans_scale(train_set))
3574 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3575 else
3576 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3577 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3578 }
3579
3580 /* Start swing calculation */
3581 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3582 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3583 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3584
3585 if (intel_crtc->config->lane_count > 2) {
3586 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3587 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3588 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3589 }
3590
3591 mutex_unlock(&dev_priv->sb_lock);
3592
3593 return 0;
3594}
3595
3596static uint32_t
3597gen4_signal_levels(uint8_t train_set)
3598{
3599 uint32_t signal_levels = 0;
3600
3601 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3603 default:
3604 signal_levels |= DP_VOLTAGE_0_4;
3605 break;
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3607 signal_levels |= DP_VOLTAGE_0_6;
3608 break;
3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3610 signal_levels |= DP_VOLTAGE_0_8;
3611 break;
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3613 signal_levels |= DP_VOLTAGE_1_2;
3614 break;
3615 }
3616 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3617 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3618 default:
3619 signal_levels |= DP_PRE_EMPHASIS_0;
3620 break;
3621 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3622 signal_levels |= DP_PRE_EMPHASIS_3_5;
3623 break;
3624 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3625 signal_levels |= DP_PRE_EMPHASIS_6;
3626 break;
3627 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3628 signal_levels |= DP_PRE_EMPHASIS_9_5;
3629 break;
3630 }
3631 return signal_levels;
3632}
3633
3634/* Gen6's DP voltage swing and pre-emphasis control */
3635static uint32_t
3636gen6_edp_signal_levels(uint8_t train_set)
3637{
3638 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3639 DP_TRAIN_PRE_EMPHASIS_MASK);
3640 switch (signal_levels) {
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3643 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3645 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3648 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3651 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3654 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3655 default:
3656 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3657 "0x%x\n", signal_levels);
3658 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3659 }
3660}
3661
3662/* Gen7's DP voltage swing and pre-emphasis control */
3663static uint32_t
3664gen7_edp_signal_levels(uint8_t train_set)
3665{
3666 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3667 DP_TRAIN_PRE_EMPHASIS_MASK);
3668 switch (signal_levels) {
3669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3672 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3673 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3674 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3675
3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3677 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3678 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3679 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3680
3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3682 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3683 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3684 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3685
3686 default:
3687 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3688 "0x%x\n", signal_levels);
3689 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3690 }
3691}
3692
3693void
3694intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3695{
3696 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3697 enum port port = intel_dig_port->port;
3698 struct drm_device *dev = intel_dig_port->base.base.dev;
3699 struct drm_i915_private *dev_priv = to_i915(dev);
3700 uint32_t signal_levels, mask = 0;
3701 uint8_t train_set = intel_dp->train_set[0];
3702
3703 if (HAS_DDI(dev)) {
3704 signal_levels = ddi_signal_levels(intel_dp);
3705
3706 if (IS_BROXTON(dev))
3707 signal_levels = 0;
3708 else
3709 mask = DDI_BUF_EMP_MASK;
3710 } else if (IS_CHERRYVIEW(dev)) {
3711 signal_levels = chv_signal_levels(intel_dp);
3712 } else if (IS_VALLEYVIEW(dev)) {
3713 signal_levels = vlv_signal_levels(intel_dp);
3714 } else if (IS_GEN7(dev) && port == PORT_A) {
3715 signal_levels = gen7_edp_signal_levels(train_set);
3716 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3717 } else if (IS_GEN6(dev) && port == PORT_A) {
3718 signal_levels = gen6_edp_signal_levels(train_set);
3719 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3720 } else {
3721 signal_levels = gen4_signal_levels(train_set);
3722 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3723 }
3724
3725 if (mask)
3726 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3727
3728 DRM_DEBUG_KMS("Using vswing level %d\n",
3729 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3730 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3731 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3732 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3733
3734 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3735
3736 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3737 POSTING_READ(intel_dp->output_reg);
3738}
3739
3740void
3741intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3742 uint8_t dp_train_pat)
3743{
3744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3745 struct drm_i915_private *dev_priv =
3746 to_i915(intel_dig_port->base.base.dev);
3747
3748 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3749
3750 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3751 POSTING_READ(intel_dp->output_reg);
3752}
3753
3754void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3755{
3756 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3757 struct drm_device *dev = intel_dig_port->base.base.dev;
3758 struct drm_i915_private *dev_priv = dev->dev_private;
3759 enum port port = intel_dig_port->port;
3760 uint32_t val;
3761
3762 if (!HAS_DDI(dev))
3763 return;
3764
3765 val = I915_READ(DP_TP_CTL(port));
3766 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3767 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3768 I915_WRITE(DP_TP_CTL(port), val);
3769
3770 /*
3771 * On PORT_A we can have only eDP in SST mode. There the only reason
3772 * we need to set idle transmission mode is to work around a HW issue
3773 * where we enable the pipe while not in idle link-training mode.
3774 * In this case there is requirement to wait for a minimum number of
3775 * idle patterns to be sent.
3776 */
3777 if (port == PORT_A)
3778 return;
3779
3780 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3781 1))
3782 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3783}
3784
3785static void
3786intel_dp_link_down(struct intel_dp *intel_dp)
3787{
3788 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3789 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3790 enum port port = intel_dig_port->port;
3791 struct drm_device *dev = intel_dig_port->base.base.dev;
3792 struct drm_i915_private *dev_priv = dev->dev_private;
3793 uint32_t DP = intel_dp->DP;
3794
3795 if (WARN_ON(HAS_DDI(dev)))
3796 return;
3797
3798 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3799 return;
3800
3801 DRM_DEBUG_KMS("\n");
3802
3803 if ((IS_GEN7(dev) && port == PORT_A) ||
3804 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3805 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3806 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3807 } else {
3808 if (IS_CHERRYVIEW(dev))
3809 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3810 else
3811 DP &= ~DP_LINK_TRAIN_MASK;
3812 DP |= DP_LINK_TRAIN_PAT_IDLE;
3813 }
3814 I915_WRITE(intel_dp->output_reg, DP);
3815 POSTING_READ(intel_dp->output_reg);
3816
3817 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3818 I915_WRITE(intel_dp->output_reg, DP);
3819 POSTING_READ(intel_dp->output_reg);
3820
3821 /*
3822 * HW workaround for IBX, we need to move the port
3823 * to transcoder A after disabling it to allow the
3824 * matching HDMI port to be enabled on transcoder A.
3825 */
3826 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3827 /*
3828 * We get CPU/PCH FIFO underruns on the other pipe when
3829 * doing the workaround. Sweep them under the rug.
3830 */
3831 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3832 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3833
3834 /* always enable with pattern 1 (as per spec) */
3835 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3836 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3837 I915_WRITE(intel_dp->output_reg, DP);
3838 POSTING_READ(intel_dp->output_reg);
3839
3840 DP &= ~DP_PORT_EN;
3841 I915_WRITE(intel_dp->output_reg, DP);
3842 POSTING_READ(intel_dp->output_reg);
3843
3844 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3845 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3846 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3847 }
3848
3849 msleep(intel_dp->panel_power_down_delay);
3850
3851 intel_dp->DP = DP;
3852}
3853
3854static bool
3855intel_dp_get_dpcd(struct intel_dp *intel_dp)
3856{
3857 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3858 struct drm_device *dev = dig_port->base.base.dev;
3859 struct drm_i915_private *dev_priv = dev->dev_private;
3860 uint8_t rev;
3861
3862 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3863 sizeof(intel_dp->dpcd)) < 0)
3864 return false; /* aux transfer failed */
3865
3866 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3867
3868 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3869 return false; /* DPCD not present */
3870
3871 /* Check if the panel supports PSR */
3872 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3873 if (is_edp(intel_dp)) {
3874 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3875 intel_dp->psr_dpcd,
3876 sizeof(intel_dp->psr_dpcd));
3877 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3878 dev_priv->psr.sink_support = true;
3879 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3880 }
3881
3882 if (INTEL_INFO(dev)->gen >= 9 &&
3883 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3884 uint8_t frame_sync_cap;
3885
3886 dev_priv->psr.sink_support = true;
3887 intel_dp_dpcd_read_wake(&intel_dp->aux,
3888 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3889 &frame_sync_cap, 1);
3890 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3891 /* PSR2 needs frame sync as well */
3892 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3893 DRM_DEBUG_KMS("PSR2 %s on sink",
3894 dev_priv->psr.psr2_support ? "supported" : "not supported");
3895 }
3896 }
3897
3898 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3899 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3900 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3901
3902 /* Intermediate frequency support */
3903 if (is_edp(intel_dp) &&
3904 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3905 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3906 (rev >= 0x03)) { /* eDp v1.4 or higher */
3907 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3908 int i;
3909
3910 intel_dp_dpcd_read_wake(&intel_dp->aux,
3911 DP_SUPPORTED_LINK_RATES,
3912 sink_rates,
3913 sizeof(sink_rates));
3914
3915 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3916 int val = le16_to_cpu(sink_rates[i]);
3917
3918 if (val == 0)
3919 break;
3920
3921 /* Value read is in kHz while drm clock is saved in deca-kHz */
3922 intel_dp->sink_rates[i] = (val * 200) / 10;
3923 }
3924 intel_dp->num_sink_rates = i;
3925 }
3926
3927 intel_dp_print_rates(intel_dp);
3928
3929 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3930 DP_DWN_STRM_PORT_PRESENT))
3931 return true; /* native DP sink */
3932
3933 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3934 return true; /* no per-port downstream info */
3935
3936 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3937 intel_dp->downstream_ports,
3938 DP_MAX_DOWNSTREAM_PORTS) < 0)
3939 return false; /* downstream port status fetch failed */
3940
3941 return true;
3942}
3943
3944static void
3945intel_dp_probe_oui(struct intel_dp *intel_dp)
3946{
3947 u8 buf[3];
3948
3949 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3950 return;
3951
3952 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3953 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3954 buf[0], buf[1], buf[2]);
3955
3956 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3957 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3958 buf[0], buf[1], buf[2]);
3959}
3960
3961static bool
3962intel_dp_probe_mst(struct intel_dp *intel_dp)
3963{
3964 u8 buf[1];
3965
3966 if (!intel_dp->can_mst)
3967 return false;
3968
3969 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3970 return false;
3971
3972 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3973 if (buf[0] & DP_MST_CAP) {
3974 DRM_DEBUG_KMS("Sink is MST capable\n");
3975 intel_dp->is_mst = true;
3976 } else {
3977 DRM_DEBUG_KMS("Sink is not MST capable\n");
3978 intel_dp->is_mst = false;
3979 }
3980 }
3981
3982 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3983 return intel_dp->is_mst;
3984}
3985
3986static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3987{
3988 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3989 struct drm_device *dev = dig_port->base.base.dev;
3990 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3991 u8 buf;
3992 int ret = 0;
3993 int count = 0;
3994 int attempts = 10;
3995
3996 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3997 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3998 ret = -EIO;
3999 goto out;
4000 }
4001
4002 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4003 buf & ~DP_TEST_SINK_START) < 0) {
4004 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4005 ret = -EIO;
4006 goto out;
4007 }
4008
4009 do {
4010 intel_wait_for_vblank(dev, intel_crtc->pipe);
4011
4012 if (drm_dp_dpcd_readb(&intel_dp->aux,
4013 DP_TEST_SINK_MISC, &buf) < 0) {
4014 ret = -EIO;
4015 goto out;
4016 }
4017 count = buf & DP_TEST_COUNT_MASK;
4018 } while (--attempts && count);
4019
4020 if (attempts == 0) {
4021 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4022 ret = -ETIMEDOUT;
4023 }
4024
4025 out:
4026 hsw_enable_ips(intel_crtc);
4027 return ret;
4028}
4029
4030static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4031{
4032 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4033 struct drm_device *dev = dig_port->base.base.dev;
4034 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4035 u8 buf;
4036 int ret;
4037
4038 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4039 return -EIO;
4040
4041 if (!(buf & DP_TEST_CRC_SUPPORTED))
4042 return -ENOTTY;
4043
4044 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4045 return -EIO;
4046
4047 if (buf & DP_TEST_SINK_START) {
4048 ret = intel_dp_sink_crc_stop(intel_dp);
4049 if (ret)
4050 return ret;
4051 }
4052
4053 hsw_disable_ips(intel_crtc);
4054
4055 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4056 buf | DP_TEST_SINK_START) < 0) {
4057 hsw_enable_ips(intel_crtc);
4058 return -EIO;
4059 }
4060
4061 intel_wait_for_vblank(dev, intel_crtc->pipe);
4062 return 0;
4063}
4064
4065int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4066{
4067 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4068 struct drm_device *dev = dig_port->base.base.dev;
4069 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4070 u8 buf;
4071 int count, ret;
4072 int attempts = 6;
4073
4074 ret = intel_dp_sink_crc_start(intel_dp);
4075 if (ret)
4076 return ret;
4077
4078 do {
4079 intel_wait_for_vblank(dev, intel_crtc->pipe);
4080
4081 if (drm_dp_dpcd_readb(&intel_dp->aux,
4082 DP_TEST_SINK_MISC, &buf) < 0) {
4083 ret = -EIO;
4084 goto stop;
4085 }
4086 count = buf & DP_TEST_COUNT_MASK;
4087
4088 } while (--attempts && count == 0);
4089
4090 if (attempts == 0) {
4091 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4092 ret = -ETIMEDOUT;
4093 goto stop;
4094 }
4095
4096 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4097 ret = -EIO;
4098 goto stop;
4099 }
4100
4101stop:
4102 intel_dp_sink_crc_stop(intel_dp);
4103 return ret;
4104}
4105
4106static bool
4107intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4108{
4109 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4110 DP_DEVICE_SERVICE_IRQ_VECTOR,
4111 sink_irq_vector, 1) == 1;
4112}
4113
4114static bool
4115intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4116{
4117 int ret;
4118
4119 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4120 DP_SINK_COUNT_ESI,
4121 sink_irq_vector, 14);
4122 if (ret != 14)
4123 return false;
4124
4125 return true;
4126}
4127
4128static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4129{
4130 uint8_t test_result = DP_TEST_ACK;
4131 return test_result;
4132}
4133
4134static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4135{
4136 uint8_t test_result = DP_TEST_NAK;
4137 return test_result;
4138}
4139
4140static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4141{
4142 uint8_t test_result = DP_TEST_NAK;
4143 struct intel_connector *intel_connector = intel_dp->attached_connector;
4144 struct drm_connector *connector = &intel_connector->base;
4145
4146 if (intel_connector->detect_edid == NULL ||
4147 connector->edid_corrupt ||
4148 intel_dp->aux.i2c_defer_count > 6) {
4149 /* Check EDID read for NACKs, DEFERs and corruption
4150 * (DP CTS 1.2 Core r1.1)
4151 * 4.2.2.4 : Failed EDID read, I2C_NAK
4152 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4153 * 4.2.2.6 : EDID corruption detected
4154 * Use failsafe mode for all cases
4155 */
4156 if (intel_dp->aux.i2c_nack_count > 0 ||
4157 intel_dp->aux.i2c_defer_count > 0)
4158 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4159 intel_dp->aux.i2c_nack_count,
4160 intel_dp->aux.i2c_defer_count);
4161 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4162 } else {
4163 struct edid *block = intel_connector->detect_edid;
4164
4165 /* We have to write the checksum
4166 * of the last block read
4167 */
4168 block += intel_connector->detect_edid->extensions;
4169
4170 if (!drm_dp_dpcd_write(&intel_dp->aux,
4171 DP_TEST_EDID_CHECKSUM,
4172 &block->checksum,
4173 1))
4174 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4175
4176 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4177 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4178 }
4179
4180 /* Set test active flag here so userspace doesn't interrupt things */
4181 intel_dp->compliance_test_active = 1;
4182
4183 return test_result;
4184}
4185
4186static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4187{
4188 uint8_t test_result = DP_TEST_NAK;
4189 return test_result;
4190}
4191
4192static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4193{
4194 uint8_t response = DP_TEST_NAK;
4195 uint8_t rxdata = 0;
4196 int status = 0;
4197
4198 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4199 if (status <= 0) {
4200 DRM_DEBUG_KMS("Could not read test request from sink\n");
4201 goto update_status;
4202 }
4203
4204 switch (rxdata) {
4205 case DP_TEST_LINK_TRAINING:
4206 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4207 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4208 response = intel_dp_autotest_link_training(intel_dp);
4209 break;
4210 case DP_TEST_LINK_VIDEO_PATTERN:
4211 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4212 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4213 response = intel_dp_autotest_video_pattern(intel_dp);
4214 break;
4215 case DP_TEST_LINK_EDID_READ:
4216 DRM_DEBUG_KMS("EDID test requested\n");
4217 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4218 response = intel_dp_autotest_edid(intel_dp);
4219 break;
4220 case DP_TEST_LINK_PHY_TEST_PATTERN:
4221 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4222 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4223 response = intel_dp_autotest_phy_pattern(intel_dp);
4224 break;
4225 default:
4226 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4227 break;
4228 }
4229
4230update_status:
4231 status = drm_dp_dpcd_write(&intel_dp->aux,
4232 DP_TEST_RESPONSE,
4233 &response, 1);
4234 if (status <= 0)
4235 DRM_DEBUG_KMS("Could not write test response to sink\n");
4236}
4237
4238static int
4239intel_dp_check_mst_status(struct intel_dp *intel_dp)
4240{
4241 bool bret;
4242
4243 if (intel_dp->is_mst) {
4244 u8 esi[16] = { 0 };
4245 int ret = 0;
4246 int retry;
4247 bool handled;
4248 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4249go_again:
4250 if (bret == true) {
4251
4252 /* check link status - esi[10] = 0x200c */
4253 if (intel_dp->active_mst_links &&
4254 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4255 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4256 intel_dp_start_link_train(intel_dp);
4257 intel_dp_stop_link_train(intel_dp);
4258 }
4259
4260 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4261 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4262
4263 if (handled) {
4264 for (retry = 0; retry < 3; retry++) {
4265 int wret;
4266 wret = drm_dp_dpcd_write(&intel_dp->aux,
4267 DP_SINK_COUNT_ESI+1,
4268 &esi[1], 3);
4269 if (wret == 3) {
4270 break;
4271 }
4272 }
4273
4274 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4275 if (bret == true) {
4276 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4277 goto go_again;
4278 }
4279 } else
4280 ret = 0;
4281
4282 return ret;
4283 } else {
4284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4285 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4286 intel_dp->is_mst = false;
4287 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4288 /* send a hotplug event */
4289 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4290 }
4291 }
4292 return -EINVAL;
4293}
4294
4295/*
4296 * According to DP spec
4297 * 5.1.2:
4298 * 1. Read DPCD
4299 * 2. Configure link according to Receiver Capabilities
4300 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4301 * 4. Check link status on receipt of hot-plug interrupt
4302 */
4303static void
4304intel_dp_check_link_status(struct intel_dp *intel_dp)
4305{
4306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4307 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4308 u8 sink_irq_vector;
4309 u8 link_status[DP_LINK_STATUS_SIZE];
4310
4311 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4312
4313 /*
4314 * Clearing compliance test variables to allow capturing
4315 * of values for next automated test request.
4316 */
4317 intel_dp->compliance_test_active = 0;
4318 intel_dp->compliance_test_type = 0;
4319 intel_dp->compliance_test_data = 0;
4320
4321 if (!intel_encoder->base.crtc)
4322 return;
4323
4324 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4325 return;
4326
4327 /* Try to read receiver status if the link appears to be up */
4328 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4329 return;
4330 }
4331
4332 /* Now read the DPCD to see if it's actually running */
4333 if (!intel_dp_get_dpcd(intel_dp)) {
4334 return;
4335 }
4336
4337 /* Try to read the source of the interrupt */
4338 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4339 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4340 /* Clear interrupt source */
4341 drm_dp_dpcd_writeb(&intel_dp->aux,
4342 DP_DEVICE_SERVICE_IRQ_VECTOR,
4343 sink_irq_vector);
4344
4345 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4346 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4347 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4348 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4349 }
4350
4351 /* if link training is requested we should perform it always */
4352 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4353 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4354 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4355 intel_encoder->base.name);
4356 intel_dp_start_link_train(intel_dp);
4357 intel_dp_stop_link_train(intel_dp);
4358 }
4359}
4360
4361/* XXX this is probably wrong for multiple downstream ports */
4362static enum drm_connector_status
4363intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4364{
4365 uint8_t *dpcd = intel_dp->dpcd;
4366 uint8_t type;
4367
4368 if (!intel_dp_get_dpcd(intel_dp))
4369 return connector_status_disconnected;
4370
4371 /* if there's no downstream port, we're done */
4372 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4373 return connector_status_connected;
4374
4375 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4376 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4377 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4378 uint8_t reg;
4379
4380 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4381 ®, 1) < 0)
4382 return connector_status_unknown;
4383
4384 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4385 : connector_status_disconnected;
4386 }
4387
4388 /* If no HPD, poke DDC gently */
4389 if (drm_probe_ddc(&intel_dp->aux.ddc))
4390 return connector_status_connected;
4391
4392 /* Well we tried, say unknown for unreliable port types */
4393 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4394 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4395 if (type == DP_DS_PORT_TYPE_VGA ||
4396 type == DP_DS_PORT_TYPE_NON_EDID)
4397 return connector_status_unknown;
4398 } else {
4399 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4400 DP_DWN_STRM_PORT_TYPE_MASK;
4401 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4402 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4403 return connector_status_unknown;
4404 }
4405
4406 /* Anything else is out of spec, warn and ignore */
4407 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4408 return connector_status_disconnected;
4409}
4410
4411static enum drm_connector_status
4412edp_detect(struct intel_dp *intel_dp)
4413{
4414 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4415 enum drm_connector_status status;
4416
4417 status = intel_panel_detect(dev);
4418 if (status == connector_status_unknown)
4419 status = connector_status_connected;
4420
4421 return status;
4422}
4423
4424static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4425 struct intel_digital_port *port)
4426{
4427 u32 bit;
4428
4429 switch (port->port) {
4430 case PORT_A:
4431 return true;
4432 case PORT_B:
4433 bit = SDE_PORTB_HOTPLUG;
4434 break;
4435 case PORT_C:
4436 bit = SDE_PORTC_HOTPLUG;
4437 break;
4438 case PORT_D:
4439 bit = SDE_PORTD_HOTPLUG;
4440 break;
4441 default:
4442 MISSING_CASE(port->port);
4443 return false;
4444 }
4445
4446 return I915_READ(SDEISR) & bit;
4447}
4448
4449static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4450 struct intel_digital_port *port)
4451{
4452 u32 bit;
4453
4454 switch (port->port) {
4455 case PORT_A:
4456 return true;
4457 case PORT_B:
4458 bit = SDE_PORTB_HOTPLUG_CPT;
4459 break;
4460 case PORT_C:
4461 bit = SDE_PORTC_HOTPLUG_CPT;
4462 break;
4463 case PORT_D:
4464 bit = SDE_PORTD_HOTPLUG_CPT;
4465 break;
4466 case PORT_E:
4467 bit = SDE_PORTE_HOTPLUG_SPT;
4468 break;
4469 default:
4470 MISSING_CASE(port->port);
4471 return false;
4472 }
4473
4474 return I915_READ(SDEISR) & bit;
4475}
4476
4477static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4478 struct intel_digital_port *port)
4479{
4480 u32 bit;
4481
4482 switch (port->port) {
4483 case PORT_B:
4484 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4485 break;
4486 case PORT_C:
4487 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4488 break;
4489 case PORT_D:
4490 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4491 break;
4492 default:
4493 MISSING_CASE(port->port);
4494 return false;
4495 }
4496
4497 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4498}
4499
4500static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4501 struct intel_digital_port *port)
4502{
4503 u32 bit;
4504
4505 switch (port->port) {
4506 case PORT_B:
4507 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4508 break;
4509 case PORT_C:
4510 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4511 break;
4512 case PORT_D:
4513 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4514 break;
4515 default:
4516 MISSING_CASE(port->port);
4517 return false;
4518 }
4519
4520 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4521}
4522
4523static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4524 struct intel_digital_port *intel_dig_port)
4525{
4526 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4527 enum port port;
4528 u32 bit;
4529
4530 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4531 switch (port) {
4532 case PORT_A:
4533 bit = BXT_DE_PORT_HP_DDIA;
4534 break;
4535 case PORT_B:
4536 bit = BXT_DE_PORT_HP_DDIB;
4537 break;
4538 case PORT_C:
4539 bit = BXT_DE_PORT_HP_DDIC;
4540 break;
4541 default:
4542 MISSING_CASE(port);
4543 return false;
4544 }
4545
4546 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4547}
4548
4549/*
4550 * intel_digital_port_connected - is the specified port connected?
4551 * @dev_priv: i915 private structure
4552 * @port: the port to test
4553 *
4554 * Return %true if @port is connected, %false otherwise.
4555 */
4556bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4557 struct intel_digital_port *port)
4558{
4559 if (HAS_PCH_IBX(dev_priv))
4560 return ibx_digital_port_connected(dev_priv, port);
4561 else if (HAS_PCH_SPLIT(dev_priv))
4562 return cpt_digital_port_connected(dev_priv, port);
4563 else if (IS_BROXTON(dev_priv))
4564 return bxt_digital_port_connected(dev_priv, port);
4565 else if (IS_GM45(dev_priv))
4566 return gm45_digital_port_connected(dev_priv, port);
4567 else
4568 return g4x_digital_port_connected(dev_priv, port);
4569}
4570
4571static struct edid *
4572intel_dp_get_edid(struct intel_dp *intel_dp)
4573{
4574 struct intel_connector *intel_connector = intel_dp->attached_connector;
4575
4576 /* use cached edid if we have one */
4577 if (intel_connector->edid) {
4578 /* invalid edid */
4579 if (IS_ERR(intel_connector->edid))
4580 return NULL;
4581
4582 return drm_edid_duplicate(intel_connector->edid);
4583 } else
4584 return drm_get_edid(&intel_connector->base,
4585 &intel_dp->aux.ddc);
4586}
4587
4588static void
4589intel_dp_set_edid(struct intel_dp *intel_dp)
4590{
4591 struct intel_connector *intel_connector = intel_dp->attached_connector;
4592 struct edid *edid;
4593
4594 edid = intel_dp_get_edid(intel_dp);
4595 intel_connector->detect_edid = edid;
4596
4597 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4598 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4599 else
4600 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4601}
4602
4603static void
4604intel_dp_unset_edid(struct intel_dp *intel_dp)
4605{
4606 struct intel_connector *intel_connector = intel_dp->attached_connector;
4607
4608 kfree(intel_connector->detect_edid);
4609 intel_connector->detect_edid = NULL;
4610
4611 intel_dp->has_audio = false;
4612}
4613
4614static enum drm_connector_status
4615intel_dp_detect(struct drm_connector *connector, bool force)
4616{
4617 struct intel_dp *intel_dp = intel_attached_dp(connector);
4618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4619 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4620 struct drm_device *dev = connector->dev;
4621 enum drm_connector_status status;
4622 enum intel_display_power_domain power_domain;
4623 bool ret;
4624 u8 sink_irq_vector;
4625
4626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4627 connector->base.id, connector->name);
4628 intel_dp_unset_edid(intel_dp);
4629
4630 if (intel_dp->is_mst) {
4631 /* MST devices are disconnected from a monitor POV */
4632 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4633 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4634 return connector_status_disconnected;
4635 }
4636
4637 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4638 intel_display_power_get(to_i915(dev), power_domain);
4639
4640 /* Can't disconnect eDP, but you can close the lid... */
4641 if (is_edp(intel_dp))
4642 status = edp_detect(intel_dp);
4643 else if (intel_digital_port_connected(to_i915(dev),
4644 dp_to_dig_port(intel_dp)))
4645 status = intel_dp_detect_dpcd(intel_dp);
4646 else
4647 status = connector_status_disconnected;
4648
4649 if (status != connector_status_connected) {
4650 intel_dp->compliance_test_active = 0;
4651 intel_dp->compliance_test_type = 0;
4652 intel_dp->compliance_test_data = 0;
4653
4654 goto out;
4655 }
4656
4657 intel_dp_probe_oui(intel_dp);
4658
4659 ret = intel_dp_probe_mst(intel_dp);
4660 if (ret) {
4661 /* if we are in MST mode then this connector
4662 won't appear connected or have anything with EDID on it */
4663 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4664 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4665 status = connector_status_disconnected;
4666 goto out;
4667 }
4668
4669 /*
4670 * Clearing NACK and defer counts to get their exact values
4671 * while reading EDID which are required by Compliance tests
4672 * 4.2.2.4 and 4.2.2.5
4673 */
4674 intel_dp->aux.i2c_nack_count = 0;
4675 intel_dp->aux.i2c_defer_count = 0;
4676
4677 intel_dp_set_edid(intel_dp);
4678
4679 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4680 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4681 status = connector_status_connected;
4682
4683 /* Try to read the source of the interrupt */
4684 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4685 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4686 /* Clear interrupt source */
4687 drm_dp_dpcd_writeb(&intel_dp->aux,
4688 DP_DEVICE_SERVICE_IRQ_VECTOR,
4689 sink_irq_vector);
4690
4691 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4692 intel_dp_handle_test_request(intel_dp);
4693 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4694 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4695 }
4696
4697out:
4698 intel_display_power_put(to_i915(dev), power_domain);
4699 return status;
4700}
4701
4702static void
4703intel_dp_force(struct drm_connector *connector)
4704{
4705 struct intel_dp *intel_dp = intel_attached_dp(connector);
4706 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4707 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4708 enum intel_display_power_domain power_domain;
4709
4710 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4711 connector->base.id, connector->name);
4712 intel_dp_unset_edid(intel_dp);
4713
4714 if (connector->status != connector_status_connected)
4715 return;
4716
4717 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4718 intel_display_power_get(dev_priv, power_domain);
4719
4720 intel_dp_set_edid(intel_dp);
4721
4722 intel_display_power_put(dev_priv, power_domain);
4723
4724 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4725 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4726}
4727
4728static int intel_dp_get_modes(struct drm_connector *connector)
4729{
4730 struct intel_connector *intel_connector = to_intel_connector(connector);
4731 struct edid *edid;
4732
4733 edid = intel_connector->detect_edid;
4734 if (edid) {
4735 int ret = intel_connector_update_modes(connector, edid);
4736 if (ret)
4737 return ret;
4738 }
4739
4740 /* if eDP has no EDID, fall back to fixed mode */
4741 if (is_edp(intel_attached_dp(connector)) &&
4742 intel_connector->panel.fixed_mode) {
4743 struct drm_display_mode *mode;
4744
4745 mode = drm_mode_duplicate(connector->dev,
4746 intel_connector->panel.fixed_mode);
4747 if (mode) {
4748 drm_mode_probed_add(connector, mode);
4749 return 1;
4750 }
4751 }
4752
4753 return 0;
4754}
4755
4756static bool
4757intel_dp_detect_audio(struct drm_connector *connector)
4758{
4759 bool has_audio = false;
4760 struct edid *edid;
4761
4762 edid = to_intel_connector(connector)->detect_edid;
4763 if (edid)
4764 has_audio = drm_detect_monitor_audio(edid);
4765
4766 return has_audio;
4767}
4768
4769static int
4770intel_dp_set_property(struct drm_connector *connector,
4771 struct drm_property *property,
4772 uint64_t val)
4773{
4774 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4775 struct intel_connector *intel_connector = to_intel_connector(connector);
4776 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4777 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4778 int ret;
4779
4780 ret = drm_object_property_set_value(&connector->base, property, val);
4781 if (ret)
4782 return ret;
4783
4784 if (property == dev_priv->force_audio_property) {
4785 int i = val;
4786 bool has_audio;
4787
4788 if (i == intel_dp->force_audio)
4789 return 0;
4790
4791 intel_dp->force_audio = i;
4792
4793 if (i == HDMI_AUDIO_AUTO)
4794 has_audio = intel_dp_detect_audio(connector);
4795 else
4796 has_audio = (i == HDMI_AUDIO_ON);
4797
4798 if (has_audio == intel_dp->has_audio)
4799 return 0;
4800
4801 intel_dp->has_audio = has_audio;
4802 goto done;
4803 }
4804
4805 if (property == dev_priv->broadcast_rgb_property) {
4806 bool old_auto = intel_dp->color_range_auto;
4807 bool old_range = intel_dp->limited_color_range;
4808
4809 switch (val) {
4810 case INTEL_BROADCAST_RGB_AUTO:
4811 intel_dp->color_range_auto = true;
4812 break;
4813 case INTEL_BROADCAST_RGB_FULL:
4814 intel_dp->color_range_auto = false;
4815 intel_dp->limited_color_range = false;
4816 break;
4817 case INTEL_BROADCAST_RGB_LIMITED:
4818 intel_dp->color_range_auto = false;
4819 intel_dp->limited_color_range = true;
4820 break;
4821 default:
4822 return -EINVAL;
4823 }
4824
4825 if (old_auto == intel_dp->color_range_auto &&
4826 old_range == intel_dp->limited_color_range)
4827 return 0;
4828
4829 goto done;
4830 }
4831
4832 if (is_edp(intel_dp) &&
4833 property == connector->dev->mode_config.scaling_mode_property) {
4834 if (val == DRM_MODE_SCALE_NONE) {
4835 DRM_DEBUG_KMS("no scaling not supported\n");
4836 return -EINVAL;
4837 }
4838
4839 if (intel_connector->panel.fitting_mode == val) {
4840 /* the eDP scaling property is not changed */
4841 return 0;
4842 }
4843 intel_connector->panel.fitting_mode = val;
4844
4845 goto done;
4846 }
4847
4848 return -EINVAL;
4849
4850done:
4851 if (intel_encoder->base.crtc)
4852 intel_crtc_restore_mode(intel_encoder->base.crtc);
4853
4854 return 0;
4855}
4856
4857static void
4858intel_dp_connector_destroy(struct drm_connector *connector)
4859{
4860 struct intel_connector *intel_connector = to_intel_connector(connector);
4861
4862 kfree(intel_connector->detect_edid);
4863
4864 if (!IS_ERR_OR_NULL(intel_connector->edid))
4865 kfree(intel_connector->edid);
4866
4867 /* Can't call is_edp() since the encoder may have been destroyed
4868 * already. */
4869 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4870 intel_panel_fini(&intel_connector->panel);
4871
4872 drm_connector_cleanup(connector);
4873 kfree(connector);
4874}
4875
4876void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4877{
4878 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4879 struct intel_dp *intel_dp = &intel_dig_port->dp;
4880
4881 intel_dp_mst_encoder_cleanup(intel_dig_port);
4882 if (is_edp(intel_dp)) {
4883 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4884 /*
4885 * vdd might still be enabled do to the delayed vdd off.
4886 * Make sure vdd is actually turned off here.
4887 */
4888 pps_lock(intel_dp);
4889 edp_panel_vdd_off_sync(intel_dp);
4890 pps_unlock(intel_dp);
4891
4892 if (intel_dp->edp_notifier.notifier_call) {
4893 unregister_reboot_notifier(&intel_dp->edp_notifier);
4894 intel_dp->edp_notifier.notifier_call = NULL;
4895 }
4896 }
4897 drm_encoder_cleanup(encoder);
4898 kfree(intel_dig_port);
4899}
4900
4901void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4902{
4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4904
4905 if (!is_edp(intel_dp))
4906 return;
4907
4908 /*
4909 * vdd might still be enabled do to the delayed vdd off.
4910 * Make sure vdd is actually turned off here.
4911 */
4912 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4913 pps_lock(intel_dp);
4914 edp_panel_vdd_off_sync(intel_dp);
4915 pps_unlock(intel_dp);
4916}
4917
4918static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4919{
4920 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4921 struct drm_device *dev = intel_dig_port->base.base.dev;
4922 struct drm_i915_private *dev_priv = dev->dev_private;
4923 enum intel_display_power_domain power_domain;
4924
4925 lockdep_assert_held(&dev_priv->pps_mutex);
4926
4927 if (!edp_have_panel_vdd(intel_dp))
4928 return;
4929
4930 /*
4931 * The VDD bit needs a power domain reference, so if the bit is
4932 * already enabled when we boot or resume, grab this reference and
4933 * schedule a vdd off, so we don't hold on to the reference
4934 * indefinitely.
4935 */
4936 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4937 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4938 intel_display_power_get(dev_priv, power_domain);
4939
4940 edp_panel_vdd_schedule_off(intel_dp);
4941}
4942
4943void intel_dp_encoder_reset(struct drm_encoder *encoder)
4944{
4945 struct intel_dp *intel_dp;
4946
4947 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4948 return;
4949
4950 intel_dp = enc_to_intel_dp(encoder);
4951
4952 pps_lock(intel_dp);
4953
4954 /*
4955 * Read out the current power sequencer assignment,
4956 * in case the BIOS did something with it.
4957 */
4958 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4959 vlv_initial_power_sequencer_setup(intel_dp);
4960
4961 intel_edp_panel_vdd_sanitize(intel_dp);
4962
4963 pps_unlock(intel_dp);
4964}
4965
4966static const struct drm_connector_funcs intel_dp_connector_funcs = {
4967 .dpms = drm_atomic_helper_connector_dpms,
4968 .detect = intel_dp_detect,
4969 .force = intel_dp_force,
4970 .fill_modes = drm_helper_probe_single_connector_modes,
4971 .set_property = intel_dp_set_property,
4972 .atomic_get_property = intel_connector_atomic_get_property,
4973 .destroy = intel_dp_connector_destroy,
4974 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4975 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4976};
4977
4978static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4979 .get_modes = intel_dp_get_modes,
4980 .mode_valid = intel_dp_mode_valid,
4981 .best_encoder = intel_best_encoder,
4982};
4983
4984static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4985 .reset = intel_dp_encoder_reset,
4986 .destroy = intel_dp_encoder_destroy,
4987};
4988
4989enum irqreturn
4990intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4991{
4992 struct intel_dp *intel_dp = &intel_dig_port->dp;
4993 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4994 struct drm_device *dev = intel_dig_port->base.base.dev;
4995 struct drm_i915_private *dev_priv = dev->dev_private;
4996 enum intel_display_power_domain power_domain;
4997 enum irqreturn ret = IRQ_NONE;
4998
4999 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5000 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5001 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5002
5003 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5004 /*
5005 * vdd off can generate a long pulse on eDP which
5006 * would require vdd on to handle it, and thus we
5007 * would end up in an endless cycle of
5008 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5009 */
5010 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5011 port_name(intel_dig_port->port));
5012 return IRQ_HANDLED;
5013 }
5014
5015 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5016 port_name(intel_dig_port->port),
5017 long_hpd ? "long" : "short");
5018
5019 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5020 intel_display_power_get(dev_priv, power_domain);
5021
5022 if (long_hpd) {
5023 /* indicate that we need to restart link training */
5024 intel_dp->train_set_valid = false;
5025
5026 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5027 goto mst_fail;
5028
5029 if (!intel_dp_get_dpcd(intel_dp)) {
5030 goto mst_fail;
5031 }
5032
5033 intel_dp_probe_oui(intel_dp);
5034
5035 if (!intel_dp_probe_mst(intel_dp)) {
5036 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5037 intel_dp_check_link_status(intel_dp);
5038 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5039 goto mst_fail;
5040 }
5041 } else {
5042 if (intel_dp->is_mst) {
5043 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5044 goto mst_fail;
5045 }
5046
5047 if (!intel_dp->is_mst) {
5048 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5049 intel_dp_check_link_status(intel_dp);
5050 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5051 }
5052 }
5053
5054 ret = IRQ_HANDLED;
5055
5056 goto put_power;
5057mst_fail:
5058 /* if we were in MST mode, and device is not there get out of MST mode */
5059 if (intel_dp->is_mst) {
5060 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5061 intel_dp->is_mst = false;
5062 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5063 }
5064put_power:
5065 intel_display_power_put(dev_priv, power_domain);
5066
5067 return ret;
5068}
5069
5070/* check the VBT to see whether the eDP is on another port */
5071bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5072{
5073 struct drm_i915_private *dev_priv = dev->dev_private;
5074 union child_device_config *p_child;
5075 int i;
5076 static const short port_mapping[] = {
5077 [PORT_B] = DVO_PORT_DPB,
5078 [PORT_C] = DVO_PORT_DPC,
5079 [PORT_D] = DVO_PORT_DPD,
5080 [PORT_E] = DVO_PORT_DPE,
5081 };
5082
5083 /*
5084 * eDP not supported on g4x. so bail out early just
5085 * for a bit extra safety in case the VBT is bonkers.
5086 */
5087 if (INTEL_INFO(dev)->gen < 5)
5088 return false;
5089
5090 if (port == PORT_A)
5091 return true;
5092
5093 if (!dev_priv->vbt.child_dev_num)
5094 return false;
5095
5096 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5097 p_child = dev_priv->vbt.child_dev + i;
5098
5099 if (p_child->common.dvo_port == port_mapping[port] &&
5100 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5101 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5102 return true;
5103 }
5104 return false;
5105}
5106
5107void
5108intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5109{
5110 struct intel_connector *intel_connector = to_intel_connector(connector);
5111
5112 intel_attach_force_audio_property(connector);
5113 intel_attach_broadcast_rgb_property(connector);
5114 intel_dp->color_range_auto = true;
5115
5116 if (is_edp(intel_dp)) {
5117 drm_mode_create_scaling_mode_property(connector->dev);
5118 drm_object_attach_property(
5119 &connector->base,
5120 connector->dev->mode_config.scaling_mode_property,
5121 DRM_MODE_SCALE_ASPECT);
5122 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5123 }
5124}
5125
5126static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5127{
5128 intel_dp->panel_power_off_time = ktime_get_boottime();
5129 intel_dp->last_power_on = jiffies;
5130 intel_dp->last_backlight_off = jiffies;
5131}
5132
5133static void
5134intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5135 struct intel_dp *intel_dp)
5136{
5137 struct drm_i915_private *dev_priv = dev->dev_private;
5138 struct edp_power_seq cur, vbt, spec,
5139 *final = &intel_dp->pps_delays;
5140 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5141 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5142
5143 lockdep_assert_held(&dev_priv->pps_mutex);
5144
5145 /* already initialized? */
5146 if (final->t11_t12 != 0)
5147 return;
5148
5149 if (IS_BROXTON(dev)) {
5150 /*
5151 * TODO: BXT has 2 sets of PPS registers.
5152 * Correct Register for Broxton need to be identified
5153 * using VBT. hardcoding for now
5154 */
5155 pp_ctrl_reg = BXT_PP_CONTROL(0);
5156 pp_on_reg = BXT_PP_ON_DELAYS(0);
5157 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5158 } else if (HAS_PCH_SPLIT(dev)) {
5159 pp_ctrl_reg = PCH_PP_CONTROL;
5160 pp_on_reg = PCH_PP_ON_DELAYS;
5161 pp_off_reg = PCH_PP_OFF_DELAYS;
5162 pp_div_reg = PCH_PP_DIVISOR;
5163 } else {
5164 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5165
5166 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5167 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5168 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5169 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5170 }
5171
5172 /* Workaround: Need to write PP_CONTROL with the unlock key as
5173 * the very first thing. */
5174 pp_ctl = ironlake_get_pp_control(intel_dp);
5175
5176 pp_on = I915_READ(pp_on_reg);
5177 pp_off = I915_READ(pp_off_reg);
5178 if (!IS_BROXTON(dev)) {
5179 I915_WRITE(pp_ctrl_reg, pp_ctl);
5180 pp_div = I915_READ(pp_div_reg);
5181 }
5182
5183 /* Pull timing values out of registers */
5184 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5185 PANEL_POWER_UP_DELAY_SHIFT;
5186
5187 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5188 PANEL_LIGHT_ON_DELAY_SHIFT;
5189
5190 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5191 PANEL_LIGHT_OFF_DELAY_SHIFT;
5192
5193 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5194 PANEL_POWER_DOWN_DELAY_SHIFT;
5195
5196 if (IS_BROXTON(dev)) {
5197 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5198 BXT_POWER_CYCLE_DELAY_SHIFT;
5199 if (tmp > 0)
5200 cur.t11_t12 = (tmp - 1) * 1000;
5201 else
5202 cur.t11_t12 = 0;
5203 } else {
5204 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5205 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5206 }
5207
5208 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5209 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5210
5211 vbt = dev_priv->vbt.edp_pps;
5212
5213 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5214 * our hw here, which are all in 100usec. */
5215 spec.t1_t3 = 210 * 10;
5216 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5217 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5218 spec.t10 = 500 * 10;
5219 /* This one is special and actually in units of 100ms, but zero
5220 * based in the hw (so we need to add 100 ms). But the sw vbt
5221 * table multiplies it with 1000 to make it in units of 100usec,
5222 * too. */
5223 spec.t11_t12 = (510 + 100) * 10;
5224
5225 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5226 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5227
5228 /* Use the max of the register settings and vbt. If both are
5229 * unset, fall back to the spec limits. */
5230#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5231 spec.field : \
5232 max(cur.field, vbt.field))
5233 assign_final(t1_t3);
5234 assign_final(t8);
5235 assign_final(t9);
5236 assign_final(t10);
5237 assign_final(t11_t12);
5238#undef assign_final
5239
5240#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5241 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5242 intel_dp->backlight_on_delay = get_delay(t8);
5243 intel_dp->backlight_off_delay = get_delay(t9);
5244 intel_dp->panel_power_down_delay = get_delay(t10);
5245 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5246#undef get_delay
5247
5248 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5249 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5250 intel_dp->panel_power_cycle_delay);
5251
5252 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5253 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5254}
5255
5256static void
5257intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5258 struct intel_dp *intel_dp)
5259{
5260 struct drm_i915_private *dev_priv = dev->dev_private;
5261 u32 pp_on, pp_off, pp_div, port_sel = 0;
5262 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5263 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5264 enum port port = dp_to_dig_port(intel_dp)->port;
5265 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5266
5267 lockdep_assert_held(&dev_priv->pps_mutex);
5268
5269 if (IS_BROXTON(dev)) {
5270 /*
5271 * TODO: BXT has 2 sets of PPS registers.
5272 * Correct Register for Broxton need to be identified
5273 * using VBT. hardcoding for now
5274 */
5275 pp_ctrl_reg = BXT_PP_CONTROL(0);
5276 pp_on_reg = BXT_PP_ON_DELAYS(0);
5277 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5278
5279 } else if (HAS_PCH_SPLIT(dev)) {
5280 pp_on_reg = PCH_PP_ON_DELAYS;
5281 pp_off_reg = PCH_PP_OFF_DELAYS;
5282 pp_div_reg = PCH_PP_DIVISOR;
5283 } else {
5284 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5285
5286 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5287 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5288 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5289 }
5290
5291 /*
5292 * And finally store the new values in the power sequencer. The
5293 * backlight delays are set to 1 because we do manual waits on them. For
5294 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5295 * we'll end up waiting for the backlight off delay twice: once when we
5296 * do the manual sleep, and once when we disable the panel and wait for
5297 * the PP_STATUS bit to become zero.
5298 */
5299 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5300 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5301 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5302 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5303 /* Compute the divisor for the pp clock, simply match the Bspec
5304 * formula. */
5305 if (IS_BROXTON(dev)) {
5306 pp_div = I915_READ(pp_ctrl_reg);
5307 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5308 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5309 << BXT_POWER_CYCLE_DELAY_SHIFT);
5310 } else {
5311 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5312 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5313 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5314 }
5315
5316 /* Haswell doesn't have any port selection bits for the panel
5317 * power sequencer any more. */
5318 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5319 port_sel = PANEL_PORT_SELECT_VLV(port);
5320 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5321 if (port == PORT_A)
5322 port_sel = PANEL_PORT_SELECT_DPA;
5323 else
5324 port_sel = PANEL_PORT_SELECT_DPD;
5325 }
5326
5327 pp_on |= port_sel;
5328
5329 I915_WRITE(pp_on_reg, pp_on);
5330 I915_WRITE(pp_off_reg, pp_off);
5331 if (IS_BROXTON(dev))
5332 I915_WRITE(pp_ctrl_reg, pp_div);
5333 else
5334 I915_WRITE(pp_div_reg, pp_div);
5335
5336 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5337 I915_READ(pp_on_reg),
5338 I915_READ(pp_off_reg),
5339 IS_BROXTON(dev) ?
5340 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5341 I915_READ(pp_div_reg));
5342}
5343
5344/**
5345 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5346 * @dev: DRM device
5347 * @refresh_rate: RR to be programmed
5348 *
5349 * This function gets called when refresh rate (RR) has to be changed from
5350 * one frequency to another. Switches can be between high and low RR
5351 * supported by the panel or to any other RR based on media playback (in
5352 * this case, RR value needs to be passed from user space).
5353 *
5354 * The caller of this function needs to take a lock on dev_priv->drrs.
5355 */
5356static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5357{
5358 struct drm_i915_private *dev_priv = dev->dev_private;
5359 struct intel_encoder *encoder;
5360 struct intel_digital_port *dig_port = NULL;
5361 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5362 struct intel_crtc_state *config = NULL;
5363 struct intel_crtc *intel_crtc = NULL;
5364 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5365
5366 if (refresh_rate <= 0) {
5367 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5368 return;
5369 }
5370
5371 if (intel_dp == NULL) {
5372 DRM_DEBUG_KMS("DRRS not supported.\n");
5373 return;
5374 }
5375
5376 /*
5377 * FIXME: This needs proper synchronization with psr state for some
5378 * platforms that cannot have PSR and DRRS enabled at the same time.
5379 */
5380
5381 dig_port = dp_to_dig_port(intel_dp);
5382 encoder = &dig_port->base;
5383 intel_crtc = to_intel_crtc(encoder->base.crtc);
5384
5385 if (!intel_crtc) {
5386 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5387 return;
5388 }
5389
5390 config = intel_crtc->config;
5391
5392 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5393 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5394 return;
5395 }
5396
5397 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5398 refresh_rate)
5399 index = DRRS_LOW_RR;
5400
5401 if (index == dev_priv->drrs.refresh_rate_type) {
5402 DRM_DEBUG_KMS(
5403 "DRRS requested for previously set RR...ignoring\n");
5404 return;
5405 }
5406
5407 if (!intel_crtc->active) {
5408 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5409 return;
5410 }
5411
5412 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5413 switch (index) {
5414 case DRRS_HIGH_RR:
5415 intel_dp_set_m_n(intel_crtc, M1_N1);
5416 break;
5417 case DRRS_LOW_RR:
5418 intel_dp_set_m_n(intel_crtc, M2_N2);
5419 break;
5420 case DRRS_MAX_RR:
5421 default:
5422 DRM_ERROR("Unsupported refreshrate type\n");
5423 }
5424 } else if (INTEL_INFO(dev)->gen > 6) {
5425 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5426 u32 val;
5427
5428 val = I915_READ(reg);
5429 if (index > DRRS_HIGH_RR) {
5430 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5431 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5432 else
5433 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5434 } else {
5435 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5436 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5437 else
5438 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5439 }
5440 I915_WRITE(reg, val);
5441 }
5442
5443 dev_priv->drrs.refresh_rate_type = index;
5444
5445 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5446}
5447
5448/**
5449 * intel_edp_drrs_enable - init drrs struct if supported
5450 * @intel_dp: DP struct
5451 *
5452 * Initializes frontbuffer_bits and drrs.dp
5453 */
5454void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5455{
5456 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5457 struct drm_i915_private *dev_priv = dev->dev_private;
5458 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5459 struct drm_crtc *crtc = dig_port->base.base.crtc;
5460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5461
5462 if (!intel_crtc->config->has_drrs) {
5463 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5464 return;
5465 }
5466
5467 mutex_lock(&dev_priv->drrs.mutex);
5468 if (WARN_ON(dev_priv->drrs.dp)) {
5469 DRM_ERROR("DRRS already enabled\n");
5470 goto unlock;
5471 }
5472
5473 dev_priv->drrs.busy_frontbuffer_bits = 0;
5474
5475 dev_priv->drrs.dp = intel_dp;
5476
5477unlock:
5478 mutex_unlock(&dev_priv->drrs.mutex);
5479}
5480
5481/**
5482 * intel_edp_drrs_disable - Disable DRRS
5483 * @intel_dp: DP struct
5484 *
5485 */
5486void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5487{
5488 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5489 struct drm_i915_private *dev_priv = dev->dev_private;
5490 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5491 struct drm_crtc *crtc = dig_port->base.base.crtc;
5492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5493
5494 if (!intel_crtc->config->has_drrs)
5495 return;
5496
5497 mutex_lock(&dev_priv->drrs.mutex);
5498 if (!dev_priv->drrs.dp) {
5499 mutex_unlock(&dev_priv->drrs.mutex);
5500 return;
5501 }
5502
5503 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5504 intel_dp_set_drrs_state(dev_priv->dev,
5505 intel_dp->attached_connector->panel.
5506 fixed_mode->vrefresh);
5507
5508 dev_priv->drrs.dp = NULL;
5509 mutex_unlock(&dev_priv->drrs.mutex);
5510
5511 cancel_delayed_work_sync(&dev_priv->drrs.work);
5512}
5513
5514static void intel_edp_drrs_downclock_work(struct work_struct *work)
5515{
5516 struct drm_i915_private *dev_priv =
5517 container_of(work, typeof(*dev_priv), drrs.work.work);
5518 struct intel_dp *intel_dp;
5519
5520 mutex_lock(&dev_priv->drrs.mutex);
5521
5522 intel_dp = dev_priv->drrs.dp;
5523
5524 if (!intel_dp)
5525 goto unlock;
5526
5527 /*
5528 * The delayed work can race with an invalidate hence we need to
5529 * recheck.
5530 */
5531
5532 if (dev_priv->drrs.busy_frontbuffer_bits)
5533 goto unlock;
5534
5535 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5536 intel_dp_set_drrs_state(dev_priv->dev,
5537 intel_dp->attached_connector->panel.
5538 downclock_mode->vrefresh);
5539
5540unlock:
5541 mutex_unlock(&dev_priv->drrs.mutex);
5542}
5543
5544/**
5545 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5546 * @dev: DRM device
5547 * @frontbuffer_bits: frontbuffer plane tracking bits
5548 *
5549 * This function gets called everytime rendering on the given planes start.
5550 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5551 *
5552 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5553 */
5554void intel_edp_drrs_invalidate(struct drm_device *dev,
5555 unsigned frontbuffer_bits)
5556{
5557 struct drm_i915_private *dev_priv = dev->dev_private;
5558 struct drm_crtc *crtc;
5559 enum pipe pipe;
5560
5561 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5562 return;
5563
5564 cancel_delayed_work(&dev_priv->drrs.work);
5565
5566 mutex_lock(&dev_priv->drrs.mutex);
5567 if (!dev_priv->drrs.dp) {
5568 mutex_unlock(&dev_priv->drrs.mutex);
5569 return;
5570 }
5571
5572 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5573 pipe = to_intel_crtc(crtc)->pipe;
5574
5575 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5576 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5577
5578 /* invalidate means busy screen hence upclock */
5579 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5580 intel_dp_set_drrs_state(dev_priv->dev,
5581 dev_priv->drrs.dp->attached_connector->panel.
5582 fixed_mode->vrefresh);
5583
5584 mutex_unlock(&dev_priv->drrs.mutex);
5585}
5586
5587/**
5588 * intel_edp_drrs_flush - Restart Idleness DRRS
5589 * @dev: DRM device
5590 * @frontbuffer_bits: frontbuffer plane tracking bits
5591 *
5592 * This function gets called every time rendering on the given planes has
5593 * completed or flip on a crtc is completed. So DRRS should be upclocked
5594 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5595 * if no other planes are dirty.
5596 *
5597 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5598 */
5599void intel_edp_drrs_flush(struct drm_device *dev,
5600 unsigned frontbuffer_bits)
5601{
5602 struct drm_i915_private *dev_priv = dev->dev_private;
5603 struct drm_crtc *crtc;
5604 enum pipe pipe;
5605
5606 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5607 return;
5608
5609 cancel_delayed_work(&dev_priv->drrs.work);
5610
5611 mutex_lock(&dev_priv->drrs.mutex);
5612 if (!dev_priv->drrs.dp) {
5613 mutex_unlock(&dev_priv->drrs.mutex);
5614 return;
5615 }
5616
5617 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5618 pipe = to_intel_crtc(crtc)->pipe;
5619
5620 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5621 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5622
5623 /* flush means busy screen hence upclock */
5624 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5625 intel_dp_set_drrs_state(dev_priv->dev,
5626 dev_priv->drrs.dp->attached_connector->panel.
5627 fixed_mode->vrefresh);
5628
5629 /*
5630 * flush also means no more activity hence schedule downclock, if all
5631 * other fbs are quiescent too
5632 */
5633 if (!dev_priv->drrs.busy_frontbuffer_bits)
5634 schedule_delayed_work(&dev_priv->drrs.work,
5635 msecs_to_jiffies(1000));
5636 mutex_unlock(&dev_priv->drrs.mutex);
5637}
5638
5639/**
5640 * DOC: Display Refresh Rate Switching (DRRS)
5641 *
5642 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5643 * which enables swtching between low and high refresh rates,
5644 * dynamically, based on the usage scenario. This feature is applicable
5645 * for internal panels.
5646 *
5647 * Indication that the panel supports DRRS is given by the panel EDID, which
5648 * would list multiple refresh rates for one resolution.
5649 *
5650 * DRRS is of 2 types - static and seamless.
5651 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5652 * (may appear as a blink on screen) and is used in dock-undock scenario.
5653 * Seamless DRRS involves changing RR without any visual effect to the user
5654 * and can be used during normal system usage. This is done by programming
5655 * certain registers.
5656 *
5657 * Support for static/seamless DRRS may be indicated in the VBT based on
5658 * inputs from the panel spec.
5659 *
5660 * DRRS saves power by switching to low RR based on usage scenarios.
5661 *
5662 * eDP DRRS:-
5663 * The implementation is based on frontbuffer tracking implementation.
5664 * When there is a disturbance on the screen triggered by user activity or a
5665 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5666 * When there is no movement on screen, after a timeout of 1 second, a switch
5667 * to low RR is made.
5668 * For integration with frontbuffer tracking code,
5669 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5670 *
5671 * DRRS can be further extended to support other internal panels and also
5672 * the scenario of video playback wherein RR is set based on the rate
5673 * requested by userspace.
5674 */
5675
5676/**
5677 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5678 * @intel_connector: eDP connector
5679 * @fixed_mode: preferred mode of panel
5680 *
5681 * This function is called only once at driver load to initialize basic
5682 * DRRS stuff.
5683 *
5684 * Returns:
5685 * Downclock mode if panel supports it, else return NULL.
5686 * DRRS support is determined by the presence of downclock mode (apart
5687 * from VBT setting).
5688 */
5689static struct drm_display_mode *
5690intel_dp_drrs_init(struct intel_connector *intel_connector,
5691 struct drm_display_mode *fixed_mode)
5692{
5693 struct drm_connector *connector = &intel_connector->base;
5694 struct drm_device *dev = connector->dev;
5695 struct drm_i915_private *dev_priv = dev->dev_private;
5696 struct drm_display_mode *downclock_mode = NULL;
5697
5698 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5699 mutex_init(&dev_priv->drrs.mutex);
5700
5701 if (INTEL_INFO(dev)->gen <= 6) {
5702 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5703 return NULL;
5704 }
5705
5706 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5707 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5708 return NULL;
5709 }
5710
5711 downclock_mode = intel_find_panel_downclock
5712 (dev, fixed_mode, connector);
5713
5714 if (!downclock_mode) {
5715 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5716 return NULL;
5717 }
5718
5719 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5720
5721 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5722 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5723 return downclock_mode;
5724}
5725
5726static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5727 struct intel_connector *intel_connector)
5728{
5729 struct drm_connector *connector = &intel_connector->base;
5730 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5731 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5732 struct drm_device *dev = intel_encoder->base.dev;
5733 struct drm_i915_private *dev_priv = dev->dev_private;
5734 struct drm_display_mode *fixed_mode = NULL;
5735 struct drm_display_mode *downclock_mode = NULL;
5736 bool has_dpcd;
5737 struct drm_display_mode *scan;
5738 struct edid *edid;
5739 enum pipe pipe = INVALID_PIPE;
5740
5741 if (!is_edp(intel_dp))
5742 return true;
5743
5744 pps_lock(intel_dp);
5745 intel_edp_panel_vdd_sanitize(intel_dp);
5746 pps_unlock(intel_dp);
5747
5748 /* Cache DPCD and EDID for edp. */
5749 has_dpcd = intel_dp_get_dpcd(intel_dp);
5750
5751 if (has_dpcd) {
5752 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5753 dev_priv->no_aux_handshake =
5754 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5755 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5756 } else {
5757 /* if this fails, presume the device is a ghost */
5758 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5759 return false;
5760 }
5761
5762 /* We now know it's not a ghost, init power sequence regs. */
5763 pps_lock(intel_dp);
5764 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5765 pps_unlock(intel_dp);
5766
5767 mutex_lock(&dev->mode_config.mutex);
5768 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5769 if (edid) {
5770 if (drm_add_edid_modes(connector, edid)) {
5771 drm_mode_connector_update_edid_property(connector,
5772 edid);
5773 drm_edid_to_eld(connector, edid);
5774 } else {
5775 kfree(edid);
5776 edid = ERR_PTR(-EINVAL);
5777 }
5778 } else {
5779 edid = ERR_PTR(-ENOENT);
5780 }
5781 intel_connector->edid = edid;
5782
5783 /* prefer fixed mode from EDID if available */
5784 list_for_each_entry(scan, &connector->probed_modes, head) {
5785 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5786 fixed_mode = drm_mode_duplicate(dev, scan);
5787 downclock_mode = intel_dp_drrs_init(
5788 intel_connector, fixed_mode);
5789 break;
5790 }
5791 }
5792
5793 /* fallback to VBT if available for eDP */
5794 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5795 fixed_mode = drm_mode_duplicate(dev,
5796 dev_priv->vbt.lfp_lvds_vbt_mode);
5797 if (fixed_mode)
5798 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5799 }
5800 mutex_unlock(&dev->mode_config.mutex);
5801
5802 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5803 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5804 register_reboot_notifier(&intel_dp->edp_notifier);
5805
5806 /*
5807 * Figure out the current pipe for the initial backlight setup.
5808 * If the current pipe isn't valid, try the PPS pipe, and if that
5809 * fails just assume pipe A.
5810 */
5811 if (IS_CHERRYVIEW(dev))
5812 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5813 else
5814 pipe = PORT_TO_PIPE(intel_dp->DP);
5815
5816 if (pipe != PIPE_A && pipe != PIPE_B)
5817 pipe = intel_dp->pps_pipe;
5818
5819 if (pipe != PIPE_A && pipe != PIPE_B)
5820 pipe = PIPE_A;
5821
5822 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5823 pipe_name(pipe));
5824 }
5825
5826 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5827 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5828 intel_panel_setup_backlight(connector, pipe);
5829
5830 return true;
5831}
5832
5833bool
5834intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5835 struct intel_connector *intel_connector)
5836{
5837 struct drm_connector *connector = &intel_connector->base;
5838 struct intel_dp *intel_dp = &intel_dig_port->dp;
5839 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5840 struct drm_device *dev = intel_encoder->base.dev;
5841 struct drm_i915_private *dev_priv = dev->dev_private;
5842 enum port port = intel_dig_port->port;
5843 int type, ret;
5844
5845 if (WARN(intel_dig_port->max_lanes < 1,
5846 "Not enough lanes (%d) for DP on port %c\n",
5847 intel_dig_port->max_lanes, port_name(port)))
5848 return false;
5849
5850 intel_dp->pps_pipe = INVALID_PIPE;
5851
5852 /* intel_dp vfuncs */
5853 if (INTEL_INFO(dev)->gen >= 9)
5854 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5855 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5856 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5857 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5858 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5859 else if (HAS_PCH_SPLIT(dev))
5860 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5861 else
5862 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5863
5864 if (INTEL_INFO(dev)->gen >= 9)
5865 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5866 else
5867 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5868
5869 if (HAS_DDI(dev))
5870 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5871
5872 /* Preserve the current hw state. */
5873 intel_dp->DP = I915_READ(intel_dp->output_reg);
5874 intel_dp->attached_connector = intel_connector;
5875
5876 if (intel_dp_is_edp(dev, port))
5877 type = DRM_MODE_CONNECTOR_eDP;
5878 else
5879 type = DRM_MODE_CONNECTOR_DisplayPort;
5880
5881 /*
5882 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5883 * for DP the encoder type can be set by the caller to
5884 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5885 */
5886 if (type == DRM_MODE_CONNECTOR_eDP)
5887 intel_encoder->type = INTEL_OUTPUT_EDP;
5888
5889 /* eDP only on port B and/or C on vlv/chv */
5890 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5891 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5892 return false;
5893
5894 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5895 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5896 port_name(port));
5897
5898 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5899 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5900
5901 connector->interlace_allowed = true;
5902 connector->doublescan_allowed = 0;
5903
5904 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5905 edp_panel_vdd_work);
5906
5907 intel_connector_attach_encoder(intel_connector, intel_encoder);
5908 drm_connector_register(connector);
5909
5910 if (HAS_DDI(dev))
5911 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5912 else
5913 intel_connector->get_hw_state = intel_connector_get_hw_state;
5914 intel_connector->unregister = intel_dp_connector_unregister;
5915
5916 /* Set up the hotplug pin. */
5917 switch (port) {
5918 case PORT_A:
5919 intel_encoder->hpd_pin = HPD_PORT_A;
5920 break;
5921 case PORT_B:
5922 intel_encoder->hpd_pin = HPD_PORT_B;
5923 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5924 intel_encoder->hpd_pin = HPD_PORT_A;
5925 break;
5926 case PORT_C:
5927 intel_encoder->hpd_pin = HPD_PORT_C;
5928 break;
5929 case PORT_D:
5930 intel_encoder->hpd_pin = HPD_PORT_D;
5931 break;
5932 case PORT_E:
5933 intel_encoder->hpd_pin = HPD_PORT_E;
5934 break;
5935 default:
5936 BUG();
5937 }
5938
5939 if (is_edp(intel_dp)) {
5940 pps_lock(intel_dp);
5941 intel_dp_init_panel_power_timestamps(intel_dp);
5942 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5943 vlv_initial_power_sequencer_setup(intel_dp);
5944 else
5945 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5946 pps_unlock(intel_dp);
5947 }
5948
5949 ret = intel_dp_aux_init(intel_dp, intel_connector);
5950 if (ret)
5951 goto fail;
5952
5953 /* init MST on ports that can support it */
5954 if (HAS_DP_MST(dev) &&
5955 (port == PORT_B || port == PORT_C || port == PORT_D))
5956 intel_dp_mst_encoder_init(intel_dig_port,
5957 intel_connector->base.base.id);
5958
5959 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5960 intel_dp_aux_fini(intel_dp);
5961 intel_dp_mst_encoder_cleanup(intel_dig_port);
5962 goto fail;
5963 }
5964
5965 intel_dp_add_properties(intel_dp, connector);
5966
5967 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5968 * 0xd. Failure to do so will result in spurious interrupts being
5969 * generated on the port when a cable is not attached.
5970 */
5971 if (IS_G4X(dev) && !IS_GM45(dev)) {
5972 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5973 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5974 }
5975
5976 i915_debugfs_connector_add(connector);
5977
5978 return true;
5979
5980fail:
5981 if (is_edp(intel_dp)) {
5982 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5983 /*
5984 * vdd might still be enabled do to the delayed vdd off.
5985 * Make sure vdd is actually turned off here.
5986 */
5987 pps_lock(intel_dp);
5988 edp_panel_vdd_off_sync(intel_dp);
5989 pps_unlock(intel_dp);
5990 }
5991 drm_connector_unregister(connector);
5992 drm_connector_cleanup(connector);
5993
5994 return false;
5995}
5996
5997void
5998intel_dp_init(struct drm_device *dev,
5999 i915_reg_t output_reg, enum port port)
6000{
6001 struct drm_i915_private *dev_priv = dev->dev_private;
6002 struct intel_digital_port *intel_dig_port;
6003 struct intel_encoder *intel_encoder;
6004 struct drm_encoder *encoder;
6005 struct intel_connector *intel_connector;
6006
6007 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6008 if (!intel_dig_port)
6009 return;
6010
6011 intel_connector = intel_connector_alloc();
6012 if (!intel_connector)
6013 goto err_connector_alloc;
6014
6015 intel_encoder = &intel_dig_port->base;
6016 encoder = &intel_encoder->base;
6017
6018 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6019 DRM_MODE_ENCODER_TMDS, NULL))
6020 goto err_encoder_init;
6021
6022 intel_encoder->compute_config = intel_dp_compute_config;
6023 intel_encoder->disable = intel_disable_dp;
6024 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6025 intel_encoder->get_config = intel_dp_get_config;
6026 intel_encoder->suspend = intel_dp_encoder_suspend;
6027 if (IS_CHERRYVIEW(dev)) {
6028 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6029 intel_encoder->pre_enable = chv_pre_enable_dp;
6030 intel_encoder->enable = vlv_enable_dp;
6031 intel_encoder->post_disable = chv_post_disable_dp;
6032 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6033 } else if (IS_VALLEYVIEW(dev)) {
6034 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6035 intel_encoder->pre_enable = vlv_pre_enable_dp;
6036 intel_encoder->enable = vlv_enable_dp;
6037 intel_encoder->post_disable = vlv_post_disable_dp;
6038 } else {
6039 intel_encoder->pre_enable = g4x_pre_enable_dp;
6040 intel_encoder->enable = g4x_enable_dp;
6041 if (INTEL_INFO(dev)->gen >= 5)
6042 intel_encoder->post_disable = ilk_post_disable_dp;
6043 }
6044
6045 intel_dig_port->port = port;
6046 intel_dig_port->dp.output_reg = output_reg;
6047 intel_dig_port->max_lanes = 4;
6048
6049 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6050 if (IS_CHERRYVIEW(dev)) {
6051 if (port == PORT_D)
6052 intel_encoder->crtc_mask = 1 << 2;
6053 else
6054 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6055 } else {
6056 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6057 }
6058 intel_encoder->cloneable = 0;
6059
6060 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6061 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6062
6063 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6064 goto err_init_connector;
6065
6066 return;
6067
6068err_init_connector:
6069 drm_encoder_cleanup(encoder);
6070err_encoder_init:
6071 kfree(intel_connector);
6072err_connector_alloc:
6073 kfree(intel_dig_port);
6074
6075 return;
6076}
6077
6078void intel_dp_mst_suspend(struct drm_device *dev)
6079{
6080 struct drm_i915_private *dev_priv = dev->dev_private;
6081 int i;
6082
6083 /* disable MST */
6084 for (i = 0; i < I915_MAX_PORTS; i++) {
6085 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6086 if (!intel_dig_port)
6087 continue;
6088
6089 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6090 if (!intel_dig_port->dp.can_mst)
6091 continue;
6092 if (intel_dig_port->dp.is_mst)
6093 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6094 }
6095 }
6096}
6097
6098void intel_dp_mst_resume(struct drm_device *dev)
6099{
6100 struct drm_i915_private *dev_priv = dev->dev_private;
6101 int i;
6102
6103 for (i = 0; i < I915_MAX_PORTS; i++) {
6104 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6105 if (!intel_dig_port)
6106 continue;
6107 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6108 int ret;
6109
6110 if (!intel_dig_port->dp.can_mst)
6111 continue;
6112
6113 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6114 if (ret != 0) {
6115 intel_dp_check_mst_status(&intel_dig_port->dp);
6116 }
6117 }
6118 }
6119}