Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2007-8 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors: Dave Airlie
 24 *          Alex Deucher
 
 25 */
 26#include "drmP.h"
 27#include "radeon_drm.h"
 28#include "radeon.h"
 29
 30#include "atom.h"
 31#include "atom-bits.h"
 32#include "drm_dp_helper.h"
 33
 34/* move these to drm_dp_helper.c/h */
 35#define DP_LINK_CONFIGURATION_SIZE 9
 36#define DP_LINK_STATUS_SIZE	   6
 37#define DP_DPCD_SIZE	           8
 38
 39static char *voltage_names[] = {
 40        "0.4V", "0.6V", "0.8V", "1.2V"
 41};
 42static char *pre_emph_names[] = {
 43        "0dB", "3.5dB", "6dB", "9.5dB"
 44};
 45
 46/***** radeon AUX functions *****/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47union aux_channel_transaction {
 48	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
 49	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
 50};
 51
 52static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
 53				 u8 *send, int send_bytes,
 54				 u8 *recv, int recv_size,
 55				 u8 delay, u8 *ack)
 56{
 57	struct drm_device *dev = chan->dev;
 58	struct radeon_device *rdev = dev->dev_private;
 59	union aux_channel_transaction args;
 60	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
 61	unsigned char *base;
 62	int recv_bytes;
 
 63
 64	memset(&args, 0, sizeof(args));
 65
 66	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
 
 
 67
 68	memcpy(base, send, send_bytes);
 69
 70	args.v1.lpAuxRequest = 0;
 71	args.v1.lpDataOut = 16;
 72	args.v1.ucDataOutLen = 0;
 73	args.v1.ucChannelID = chan->rec.i2c_id;
 74	args.v1.ucDelay = delay / 10;
 75	if (ASIC_IS_DCE4(rdev))
 76		args.v2.ucHPD_ID = chan->rec.hpd;
 77
 78	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 79
 80	*ack = args.v1.ucReplyStatus;
 81
 82	/* timeout */
 83	if (args.v1.ucReplyStatus == 1) {
 84		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
 85		return -ETIMEDOUT;
 
 86	}
 87
 88	/* flags not zero */
 89	if (args.v1.ucReplyStatus == 2) {
 90		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
 91		return -EBUSY;
 
 92	}
 93
 94	/* error */
 95	if (args.v1.ucReplyStatus == 3) {
 96		DRM_DEBUG_KMS("dp_aux_ch error\n");
 97		return -EIO;
 
 98	}
 99
100	recv_bytes = args.v1.ucDataOutLen;
101	if (recv_bytes > recv_size)
102		recv_bytes = recv_size;
103
104	if (recv && recv_size)
105		memcpy(recv, base + 16, recv_bytes);
106
107	return recv_bytes;
108}
109
110static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
111				      u16 address, u8 *send, u8 send_bytes, u8 delay)
112{
113	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
114	int ret;
115	u8 msg[20];
116	int msg_bytes = send_bytes + 4;
117	u8 ack;
118	unsigned retry;
119
120	if (send_bytes > 16)
121		return -1;
 
 
122
123	msg[0] = address;
124	msg[1] = address >> 8;
125	msg[2] = AUX_NATIVE_WRITE << 4;
126	msg[3] = (msg_bytes << 4) | (send_bytes - 1);
127	memcpy(&msg[4], send, send_bytes);
128
129	for (retry = 0; retry < 4; retry++) {
130		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
131					    msg, msg_bytes, NULL, 0, delay, &ack);
132		if (ret == -EBUSY)
133			continue;
134		else if (ret < 0)
135			return ret;
136		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
137			return send_bytes;
138		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
139			udelay(400);
140		else
141			return -EIO;
142	}
143
144	return -EIO;
145}
146
147static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
148				     u16 address, u8 *recv, int recv_bytes, u8 delay)
149{
150	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
151	u8 msg[4];
152	int msg_bytes = 4;
153	u8 ack;
154	int ret;
155	unsigned retry;
156
157	msg[0] = address;
158	msg[1] = address >> 8;
159	msg[2] = AUX_NATIVE_READ << 4;
160	msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
161
162	for (retry = 0; retry < 4; retry++) {
163		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
164					    msg, msg_bytes, recv, recv_bytes, delay, &ack);
165		if (ret == -EBUSY)
166			continue;
167		else if (ret < 0)
168			return ret;
169		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
170			return ret;
171		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
172			udelay(400);
173		else if (ret == 0)
174			return -EPROTO;
175		else
176			return -EIO;
177	}
178
179	return -EIO;
180}
181
182static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
183				 u16 reg, u8 val)
184{
185	radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
186}
187
188static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
189			       u16 reg)
190{
191	u8 val = 0;
192
193	radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
194
195	return val;
196}
197
198int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
199			 u8 write_byte, u8 *read_byte)
200{
201	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
202	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
203	u16 address = algo_data->address;
204	u8 msg[5];
205	u8 reply[2];
206	unsigned retry;
207	int msg_bytes;
208	int reply_bytes = 1;
209	int ret;
210	u8 ack;
211
212	/* Set up the command byte */
213	if (mode & MODE_I2C_READ)
214		msg[2] = AUX_I2C_READ << 4;
215	else
216		msg[2] = AUX_I2C_WRITE << 4;
217
218	if (!(mode & MODE_I2C_STOP))
219		msg[2] |= AUX_I2C_MOT << 4;
220
221	msg[0] = address;
222	msg[1] = address >> 8;
223
224	switch (mode) {
225	case MODE_I2C_WRITE:
226		msg_bytes = 5;
227		msg[3] = msg_bytes << 4;
228		msg[4] = write_byte;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229		break;
230	case MODE_I2C_READ:
231		msg_bytes = 4;
232		msg[3] = msg_bytes << 4;
 
 
 
 
 
 
 
 
 
233		break;
234	default:
235		msg_bytes = 4;
236		msg[3] = 3 << 4;
237		break;
238	}
239
240	for (retry = 0; retry < 4; retry++) {
241		ret = radeon_process_aux_ch(auxch,
242					    msg, msg_bytes, reply, reply_bytes, 0, &ack);
243		if (ret == -EBUSY)
244			continue;
245		else if (ret < 0) {
246			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
247			return ret;
248		}
249
250		switch (ack & AUX_NATIVE_REPLY_MASK) {
251		case AUX_NATIVE_REPLY_ACK:
252			/* I2C-over-AUX Reply field is only valid
253			 * when paired with AUX ACK.
254			 */
255			break;
256		case AUX_NATIVE_REPLY_NACK:
257			DRM_DEBUG_KMS("aux_ch native nack\n");
258			return -EREMOTEIO;
259		case AUX_NATIVE_REPLY_DEFER:
260			DRM_DEBUG_KMS("aux_ch native defer\n");
261			udelay(400);
262			continue;
263		default:
264			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
265			return -EREMOTEIO;
266		}
267
268		switch (ack & AUX_I2C_REPLY_MASK) {
269		case AUX_I2C_REPLY_ACK:
270			if (mode == MODE_I2C_READ)
271				*read_byte = reply[0];
272			return ret;
273		case AUX_I2C_REPLY_NACK:
274			DRM_DEBUG_KMS("aux_i2c nack\n");
275			return -EREMOTEIO;
276		case AUX_I2C_REPLY_DEFER:
277			DRM_DEBUG_KMS("aux_i2c defer\n");
278			udelay(400);
279			break;
280		default:
281			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
282			return -EREMOTEIO;
283		}
284	}
285
286	DRM_ERROR("aux i2c too many retries, giving up\n");
287	return -EREMOTEIO;
288}
289
290/***** general DP utility functions *****/
291
292static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
293{
294	return link_status[r - DP_LANE0_1_STATUS];
295}
296
297static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
298			     int lane)
299{
300	int i = DP_LANE0_1_STATUS + (lane >> 1);
301	int s = (lane & 1) * 4;
302	u8 l = dp_link_status(link_status, i);
303	return (l >> s) & 0xf;
304}
305
306static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
307				 int lane_count)
308{
309	int lane;
310	u8 lane_status;
311
312	for (lane = 0; lane < lane_count; lane++) {
313		lane_status = dp_get_lane_status(link_status, lane);
314		if ((lane_status & DP_LANE_CR_DONE) == 0)
315			return false;
316	}
317	return true;
318}
319
320static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
321			     int lane_count)
322{
323	u8 lane_align;
324	u8 lane_status;
325	int lane;
326
327	lane_align = dp_link_status(link_status,
328				    DP_LANE_ALIGN_STATUS_UPDATED);
329	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
330		return false;
331	for (lane = 0; lane < lane_count; lane++) {
332		lane_status = dp_get_lane_status(link_status, lane);
333		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
334			return false;
 
335	}
336	return true;
337}
338
339static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
340					int lane)
341
342{
343	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
344	int s = ((lane & 1) ?
345		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
346		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
347	u8 l = dp_link_status(link_status, i);
348
349	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
350}
351
352static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
353					     int lane)
354{
355	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
356	int s = ((lane & 1) ?
357		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
358		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
359	u8 l = dp_link_status(link_status, i);
360
361	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
362}
363
364#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
365#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPHASIS_9_5
366
367static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
368				int lane_count,
369				u8 train_set[4])
370{
371	u8 v = 0;
372	u8 p = 0;
373	int lane;
374
375	for (lane = 0; lane < lane_count; lane++) {
376		u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
377		u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
378
379		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
380			  lane,
381			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
382			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
383
384		if (this_v > v)
385			v = this_v;
386		if (this_p > p)
387			p = this_p;
388	}
389
390	if (v >= DP_VOLTAGE_MAX)
391		v |= DP_TRAIN_MAX_SWING_REACHED;
392
393	if (p >= DP_PRE_EMPHASIS_MAX)
394		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
395
396	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
397		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
398		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
399
400	for (lane = 0; lane < 4; lane++)
401		train_set[lane] = v | p;
402}
403
404/* convert bits per color to bits per pixel */
405/* get bpc from the EDID */
406static int convert_bpc_to_bpp(int bpc)
407{
408	if (bpc == 0)
409		return 24;
410	else
411		return bpc * 3;
412}
413
414/* get the max pix clock supported by the link rate and lane num */
415static int dp_get_max_dp_pix_clock(int link_rate,
416				   int lane_num,
417				   int bpp)
418{
419	return (link_rate * lane_num * 8) / bpp;
420}
421
422static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
423{
424	switch (dpcd[DP_MAX_LINK_RATE]) {
425	case DP_LINK_BW_1_62:
426	default:
427		return 162000;
428	case DP_LINK_BW_2_7:
429		return 270000;
430	case DP_LINK_BW_5_4:
431		return 540000;
432	}
433}
434
435static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
436{
437	return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
438}
439
440static u8 dp_get_dp_link_rate_coded(int link_rate)
441{
442	switch (link_rate) {
443	case 162000:
444	default:
445		return DP_LINK_BW_1_62;
446	case 270000:
447		return DP_LINK_BW_2_7;
448	case 540000:
449		return DP_LINK_BW_5_4;
450	}
451}
452
453/***** radeon specific DP functions *****/
454
455/* First get the min lane# when low rate is used according to pixel clock
456 * (prefer low rate), second check max lane# supported by DP panel,
457 * if the max lane# < low rate lane# then use max lane# instead.
458 */
459static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
460					u8 dpcd[DP_DPCD_SIZE],
461					int pix_clock)
462{
463	int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
464	int max_link_rate = dp_get_max_link_rate(dpcd);
465	int max_lane_num = dp_get_max_lane_number(dpcd);
466	int lane_num;
467	int max_dp_pix_clock;
468
469	for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
470		max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
471		if (pix_clock <= max_dp_pix_clock)
472			break;
473	}
474
475	return lane_num;
476}
477
478static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
479				       u8 dpcd[DP_DPCD_SIZE],
480				       int pix_clock)
481{
482	int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
483	int lane_num, max_pix_clock;
484
485	if (radeon_connector_encoder_is_dp_bridge(connector))
486		return 270000;
487
488	lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
489	max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
490	if (pix_clock <= max_pix_clock)
491		return 162000;
492	max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
493	if (pix_clock <= max_pix_clock)
494		return 270000;
495	if (radeon_connector_is_dp12_capable(connector)) {
496		max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
497		if (pix_clock <= max_pix_clock)
498			return 540000;
499	}
500
501	return dp_get_max_link_rate(dpcd);
502}
503
504static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
505				    int action, int dp_clock,
506				    u8 ucconfig, u8 lane_num)
507{
508	DP_ENCODER_SERVICE_PARAMETERS args;
509	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
510
511	memset(&args, 0, sizeof(args));
512	args.ucLinkClock = dp_clock / 10;
513	args.ucConfig = ucconfig;
514	args.ucAction = action;
515	args.ucLaneNum = lane_num;
516	args.ucStatus = 0;
517
518	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
519	return args.ucStatus;
520}
521
522u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
523{
524	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
525	struct drm_device *dev = radeon_connector->base.dev;
526	struct radeon_device *rdev = dev->dev_private;
527
528	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
529					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530}
531
532bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
533{
534	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
535	u8 msg[25];
536	int ret, i;
 
 
 
 
 
 
 
 
 
 
537
538	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
539	if (ret > 0) {
540		memcpy(dig_connector->dpcd, msg, 8);
541		DRM_DEBUG_KMS("DPCD: ");
542		for (i = 0; i < 8; i++)
543			DRM_DEBUG_KMS("%02x ", msg[i]);
544		DRM_DEBUG_KMS("\n");
545		return true;
546	}
 
547	dig_connector->dpcd[0] = 0;
548	return false;
549}
550
551static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
552				     struct drm_connector *connector)
553{
554	struct drm_device *dev = encoder->dev;
555	struct radeon_device *rdev = dev->dev_private;
 
 
556	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 
 
557
558	if (!ASIC_IS_DCE4(rdev))
559		return;
560
561	if (radeon_connector_encoder_is_dp_bridge(connector))
562		panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
563
564	atombios_dig_encoder_setup(encoder,
565				   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
566				   panel_mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567}
568
569void radeon_dp_set_link_config(struct drm_connector *connector,
570			       struct drm_display_mode *mode)
571{
572	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
573	struct radeon_connector_atom_dig *dig_connector;
 
574
575	if (!radeon_connector->con_priv)
576		return;
577	dig_connector = radeon_connector->con_priv;
578
579	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
580	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
581		dig_connector->dp_clock =
582			radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
583		dig_connector->dp_lane_count =
584			radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
 
 
 
 
585	}
586}
587
588int radeon_dp_mode_valid_helper(struct drm_connector *connector,
589				struct drm_display_mode *mode)
590{
591	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
592	struct radeon_connector_atom_dig *dig_connector;
593	int dp_clock;
 
 
 
 
 
594
595	if (!radeon_connector->con_priv)
596		return MODE_CLOCK_HIGH;
597	dig_connector = radeon_connector->con_priv;
598
599	dp_clock =
600		radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
 
 
 
 
601
602	if ((dp_clock == 540000) &&
603	    (!radeon_connector_is_dp12_capable(connector)))
604		return MODE_CLOCK_HIGH;
605
606	return MODE_OK;
607}
608
609static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
610				      u8 link_status[DP_LINK_STATUS_SIZE])
611{
612	int ret;
613	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
614					link_status, DP_LINK_STATUS_SIZE, 100);
615	if (ret <= 0) {
616		DRM_ERROR("displayport link status failed\n");
617		return false;
618	}
619
620	DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
621		  link_status[0], link_status[1], link_status[2],
622		  link_status[3], link_status[4], link_status[5]);
623	return true;
624}
625
626bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
627{
628	u8 link_status[DP_LINK_STATUS_SIZE];
629	struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
630
631	if (!radeon_dp_get_link_status(radeon_connector, link_status))
 
632		return false;
633	if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
634		return false;
635	return true;
636}
637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638struct radeon_dp_link_train_info {
639	struct radeon_device *rdev;
640	struct drm_encoder *encoder;
641	struct drm_connector *connector;
642	struct radeon_connector *radeon_connector;
643	int enc_id;
644	int dp_clock;
645	int dp_lane_count;
646	int rd_interval;
647	bool tp3_supported;
648	u8 dpcd[8];
649	u8 train_set[4];
650	u8 link_status[DP_LINK_STATUS_SIZE];
651	u8 tries;
652	bool use_dpencoder;
 
653};
654
655static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
656{
657	/* set the initial vs/emph on the source */
658	atombios_dig_transmitter_setup(dp_info->encoder,
659				       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
660				       0, dp_info->train_set[0]); /* sets all lanes at once */
661
662	/* set the vs/emph on the sink */
663	radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
664				   dp_info->train_set, dp_info->dp_lane_count, 0);
665}
666
667static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
668{
669	int rtp = 0;
670
671	/* set training pattern on the source */
672	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
673		switch (tp) {
674		case DP_TRAINING_PATTERN_1:
675			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
676			break;
677		case DP_TRAINING_PATTERN_2:
678			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
679			break;
680		case DP_TRAINING_PATTERN_3:
681			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
682			break;
683		}
684		atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
685	} else {
686		switch (tp) {
687		case DP_TRAINING_PATTERN_1:
688			rtp = 0;
689			break;
690		case DP_TRAINING_PATTERN_2:
691			rtp = 1;
692			break;
693		}
694		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
695					  dp_info->dp_clock, dp_info->enc_id, rtp);
696	}
697
698	/* enable training pattern on the sink */
699	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
700}
701
702static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
703{
 
 
704	u8 tmp;
705
706	/* power up the sink */
707	if (dp_info->dpcd[0] >= 0x11)
708		radeon_write_dpcd_reg(dp_info->radeon_connector,
709				      DP_SET_POWER, DP_SET_POWER_D0);
710
711	/* possibly enable downspread on the sink */
712	if (dp_info->dpcd[3] & 0x1)
713		radeon_write_dpcd_reg(dp_info->radeon_connector,
714				      DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
715	else
716		radeon_write_dpcd_reg(dp_info->radeon_connector,
717				      DP_DOWNSPREAD_CTRL, 0);
718
719	radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
 
720
721	/* set the lane count on the sink */
722	tmp = dp_info->dp_lane_count;
723	if (dp_info->dpcd[0] >= 0x11)
724		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
725	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
726
727	/* set the link rate on the sink */
728	tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
729	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
730
731	/* start training on the source */
732	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
733		atombios_dig_encoder_setup(dp_info->encoder,
734					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
735	else
736		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
737					  dp_info->dp_clock, dp_info->enc_id, 0);
738
739	/* disable the training pattern on the sink */
740	radeon_write_dpcd_reg(dp_info->radeon_connector,
741			      DP_TRAINING_PATTERN_SET,
742			      DP_TRAINING_PATTERN_DISABLE);
743
744	return 0;
745}
746
747static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
748{
749	udelay(400);
750
751	/* disable the training pattern on the sink */
752	radeon_write_dpcd_reg(dp_info->radeon_connector,
753			      DP_TRAINING_PATTERN_SET,
754			      DP_TRAINING_PATTERN_DISABLE);
755
756	/* disable the training pattern on the source */
757	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
758		atombios_dig_encoder_setup(dp_info->encoder,
759					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
760	else
761		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
762					  dp_info->dp_clock, dp_info->enc_id, 0);
763
764	return 0;
765}
766
767static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
768{
769	bool clock_recovery;
770 	u8 voltage;
771	int i;
772
773	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
774	memset(dp_info->train_set, 0, 4);
775	radeon_dp_update_vs_emph(dp_info);
776
777	udelay(400);
778
779	/* clock recovery loop */
780	clock_recovery = false;
781	dp_info->tries = 0;
782	voltage = 0xff;
783	while (1) {
784		if (dp_info->rd_interval == 0)
785			udelay(100);
786		else
787			mdelay(dp_info->rd_interval * 4);
788
789		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
 
 
790			break;
 
791
792		if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
793			clock_recovery = true;
794			break;
795		}
796
797		for (i = 0; i < dp_info->dp_lane_count; i++) {
798			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
799				break;
800		}
801		if (i == dp_info->dp_lane_count) {
802			DRM_ERROR("clock recovery reached max voltage\n");
803			break;
804		}
805
806		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
807			++dp_info->tries;
808			if (dp_info->tries == 5) {
809				DRM_ERROR("clock recovery tried 5 times\n");
810				break;
811			}
812		} else
813			dp_info->tries = 0;
814
815		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
816
817		/* Compute new train_set as requested by sink */
818		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
819
820		radeon_dp_update_vs_emph(dp_info);
821	}
822	if (!clock_recovery) {
823		DRM_ERROR("clock recovery failed\n");
824		return -1;
825	} else {
826		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
827			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
828			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
829			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
830		return 0;
831	}
832}
833
834static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
835{
836	bool channel_eq;
837
838	if (dp_info->tp3_supported)
839		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
840	else
841		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
842
843	/* channel equalization loop */
844	dp_info->tries = 0;
845	channel_eq = false;
846	while (1) {
847		if (dp_info->rd_interval == 0)
848			udelay(400);
849		else
850			mdelay(dp_info->rd_interval * 4);
851
852		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
 
 
853			break;
 
854
855		if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
856			channel_eq = true;
857			break;
858		}
859
860		/* Try 5 times */
861		if (dp_info->tries > 5) {
862			DRM_ERROR("channel eq failed: 5 tries\n");
863			break;
864		}
865
866		/* Compute new train_set as requested by sink */
867		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
868
869		radeon_dp_update_vs_emph(dp_info);
870		dp_info->tries++;
871	}
872
873	if (!channel_eq) {
874		DRM_ERROR("channel eq failed\n");
875		return -1;
876	} else {
877		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
878			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
879			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
880			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
881		return 0;
882	}
883}
884
885void radeon_dp_link_train(struct drm_encoder *encoder,
886			  struct drm_connector *connector)
887{
888	struct drm_device *dev = encoder->dev;
889	struct radeon_device *rdev = dev->dev_private;
890	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
891	struct radeon_encoder_atom_dig *dig;
892	struct radeon_connector *radeon_connector;
893	struct radeon_connector_atom_dig *dig_connector;
894	struct radeon_dp_link_train_info dp_info;
895	int index;
896	u8 tmp, frev, crev;
897
898	if (!radeon_encoder->enc_priv)
899		return;
900	dig = radeon_encoder->enc_priv;
901
902	radeon_connector = to_radeon_connector(connector);
903	if (!radeon_connector->con_priv)
904		return;
905	dig_connector = radeon_connector->con_priv;
906
907	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
908	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
909		return;
910
911	/* DPEncoderService newer than 1.1 can't program properly the
912	 * training pattern. When facing such version use the
913	 * DIGXEncoderControl (X== 1 | 2)
914	 */
915	dp_info.use_dpencoder = true;
916	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
917	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
918		if (crev > 1) {
919			dp_info.use_dpencoder = false;
920		}
921	}
922
923	dp_info.enc_id = 0;
924	if (dig->dig_encoder)
925		dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
926	else
927		dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
928	if (dig->linkb)
929		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
930	else
931		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
932
933	dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
934	tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
935	if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
936		dp_info.tp3_supported = true;
937	else
 
 
938		dp_info.tp3_supported = false;
 
939
940	memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
941	dp_info.rdev = rdev;
942	dp_info.encoder = encoder;
943	dp_info.connector = connector;
944	dp_info.radeon_connector = radeon_connector;
945	dp_info.dp_lane_count = dig_connector->dp_lane_count;
946	dp_info.dp_clock = dig_connector->dp_clock;
 
947
948	if (radeon_dp_link_train_init(&dp_info))
949		goto done;
950	if (radeon_dp_link_train_cr(&dp_info))
951		goto done;
952	if (radeon_dp_link_train_ce(&dp_info))
953		goto done;
954done:
955	if (radeon_dp_link_train_finish(&dp_info))
956		return;
957}
v4.17
  1/*
  2 * Copyright 2007-8 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors: Dave Airlie
 24 *          Alex Deucher
 25 *          Jerome Glisse
 26 */
 27#include <drm/drmP.h>
 28#include <drm/radeon_drm.h>
 29#include "radeon.h"
 30
 31#include "atom.h"
 32#include "atom-bits.h"
 33#include <drm/drm_dp_helper.h>
 34
 35/* move these to drm_dp_helper.c/h */
 36#define DP_LINK_CONFIGURATION_SIZE 9
 37#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
 
 38
 39static char *voltage_names[] = {
 40	"0.4V", "0.6V", "0.8V", "1.2V"
 41};
 42static char *pre_emph_names[] = {
 43	"0dB", "3.5dB", "6dB", "9.5dB"
 44};
 45
 46/***** radeon AUX functions *****/
 47
 48/* Atom needs data in little endian format so swap as appropriate when copying
 49 * data to or from atom. Note that atom operates on dw units.
 50 *
 51 * Use to_le=true when sending data to atom and provide at least
 52 * ALIGN(num_bytes,4) bytes in the dst buffer.
 53 *
 54 * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
 55 * byes in the src buffer.
 56 */
 57void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
 58{
 59#ifdef __BIG_ENDIAN
 60	u32 src_tmp[5], dst_tmp[5];
 61	int i;
 62	u8 align_num_bytes = ALIGN(num_bytes, 4);
 63
 64	if (to_le) {
 65		memcpy(src_tmp, src, num_bytes);
 66		for (i = 0; i < align_num_bytes / 4; i++)
 67			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
 68		memcpy(dst, dst_tmp, align_num_bytes);
 69	} else {
 70		memcpy(src_tmp, src, align_num_bytes);
 71		for (i = 0; i < align_num_bytes / 4; i++)
 72			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
 73		memcpy(dst, dst_tmp, num_bytes);
 74	}
 75#else
 76	memcpy(dst, src, num_bytes);
 77#endif
 78}
 79
 80union aux_channel_transaction {
 81	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
 82	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
 83};
 84
 85static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
 86				 u8 *send, int send_bytes,
 87				 u8 *recv, int recv_size,
 88				 u8 delay, u8 *ack)
 89{
 90	struct drm_device *dev = chan->dev;
 91	struct radeon_device *rdev = dev->dev_private;
 92	union aux_channel_transaction args;
 93	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
 94	unsigned char *base;
 95	int recv_bytes;
 96	int r = 0;
 97
 98	memset(&args, 0, sizeof(args));
 99
100	mutex_lock(&chan->mutex);
101	mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
102
103	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
104
105	radeon_atom_copy_swap(base, send, send_bytes, true);
106
107	args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
108	args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
109	args.v1.ucDataOutLen = 0;
110	args.v1.ucChannelID = chan->rec.i2c_id;
111	args.v1.ucDelay = delay / 10;
112	if (ASIC_IS_DCE4(rdev))
113		args.v2.ucHPD_ID = chan->rec.hpd;
114
115	atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
116
117	*ack = args.v1.ucReplyStatus;
118
119	/* timeout */
120	if (args.v1.ucReplyStatus == 1) {
121		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
122		r = -ETIMEDOUT;
123		goto done;
124	}
125
126	/* flags not zero */
127	if (args.v1.ucReplyStatus == 2) {
128		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
129		r = -EIO;
130		goto done;
131	}
132
133	/* error */
134	if (args.v1.ucReplyStatus == 3) {
135		DRM_DEBUG_KMS("dp_aux_ch error\n");
136		r = -EIO;
137		goto done;
138	}
139
140	recv_bytes = args.v1.ucDataOutLen;
141	if (recv_bytes > recv_size)
142		recv_bytes = recv_size;
143
144	if (recv && recv_size)
145		radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
146
147	r = recv_bytes;
148done:
149	mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
150	mutex_unlock(&chan->mutex);
151
152	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153}
154
155#define BARE_ADDRESS_SIZE 3
156#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
158static ssize_t
159radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
160{
161	struct radeon_i2c_chan *chan =
162		container_of(aux, struct radeon_i2c_chan, aux);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163	int ret;
164	u8 tx_buf[20];
165	size_t tx_size;
166	u8 ack, delay = 0;
167
168	if (WARN_ON(msg->size > 16))
169		return -E2BIG;
170
171	tx_buf[0] = msg->address & 0xff;
172	tx_buf[1] = (msg->address >> 8) & 0xff;
173	tx_buf[2] = (msg->request << 4) |
174		((msg->address >> 16) & 0xf);
175	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
176
177	switch (msg->request & ~DP_AUX_I2C_MOT) {
178	case DP_AUX_NATIVE_WRITE:
179	case DP_AUX_I2C_WRITE:
180	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
181		/* The atom implementation only supports writes with a max payload of
182		 * 12 bytes since it uses 4 bits for the total count (header + payload)
183		 * in the parameter space.  The atom interface supports 16 byte
184		 * payloads for reads. The hw itself supports up to 16 bytes of payload.
185		 */
186		if (WARN_ON_ONCE(msg->size > 12))
187			return -E2BIG;
188		/* tx_size needs to be 4 even for bare address packets since the atom
189		 * table needs the info in tx_buf[3].
190		 */
191		tx_size = HEADER_SIZE + msg->size;
192		if (msg->size == 0)
193			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
194		else
195			tx_buf[3] |= tx_size << 4;
196		memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
197		ret = radeon_process_aux_ch(chan,
198					    tx_buf, tx_size, NULL, 0, delay, &ack);
199		if (ret >= 0)
200			/* Return payload size. */
201			ret = msg->size;
202		break;
203	case DP_AUX_NATIVE_READ:
204	case DP_AUX_I2C_READ:
205		/* tx_size needs to be 4 even for bare address packets since the atom
206		 * table needs the info in tx_buf[3].
207		 */
208		tx_size = HEADER_SIZE;
209		if (msg->size == 0)
210			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
211		else
212			tx_buf[3] |= tx_size << 4;
213		ret = radeon_process_aux_ch(chan,
214					    tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
215		break;
216	default:
217		ret = -EINVAL;
 
218		break;
219	}
220
221	if (ret >= 0)
222		msg->reply = ack >> 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
224	return ret;
 
225}
226
227void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
 
 
228{
229	struct drm_device *dev = radeon_connector->base.dev;
230	struct radeon_device *rdev = dev->dev_private;
231	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
233	radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
234	radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
235	if (ASIC_IS_DCE5(rdev)) {
236		if (radeon_auxch)
237			radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native;
238		else
239			radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
240	} else {
241		radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
242	}
 
 
 
 
 
243
244	ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
245	if (!ret)
246		radeon_connector->ddc_bus->has_aux = true;
 
 
 
247
248	WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
249}
250
251/***** general DP utility functions *****/
 
 
 
 
 
 
 
 
 
 
252
253#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
254#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPH_LEVEL_3
255
256static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
257				int lane_count,
258				u8 train_set[4])
259{
260	u8 v = 0;
261	u8 p = 0;
262	int lane;
263
264	for (lane = 0; lane < lane_count; lane++) {
265		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
266		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
267
268		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
269			  lane,
270			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
271			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
272
273		if (this_v > v)
274			v = this_v;
275		if (this_p > p)
276			p = this_p;
277	}
278
279	if (v >= DP_VOLTAGE_MAX)
280		v |= DP_TRAIN_MAX_SWING_REACHED;
281
282	if (p >= DP_PRE_EMPHASIS_MAX)
283		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
284
285	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
286		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
287		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
288
289	for (lane = 0; lane < 4; lane++)
290		train_set[lane] = v | p;
291}
292
293/* convert bits per color to bits per pixel */
294/* get bpc from the EDID */
295static int convert_bpc_to_bpp(int bpc)
296{
297	if (bpc == 0)
298		return 24;
299	else
300		return bpc * 3;
301}
302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303/***** radeon specific DP functions *****/
304
305static int radeon_dp_get_dp_link_config(struct drm_connector *connector,
306					const u8 dpcd[DP_DPCD_SIZE],
307					unsigned pix_clock,
308					unsigned *dp_lanes, unsigned *dp_rate)
309{
310	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
311	static const unsigned link_rates[3] = { 162000, 270000, 540000 };
312	unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
313	unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
314	unsigned lane_num, i, max_pix_clock;
315
316	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
317	    ENCODER_OBJECT_ID_NUTMEG) {
318		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
319			max_pix_clock = (lane_num * 270000 * 8) / bpp;
320			if (max_pix_clock >= pix_clock) {
321				*dp_lanes = lane_num;
322				*dp_rate = 270000;
323				return 0;
324			}
325		}
326	} else {
327		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
328			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
329				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
330				if (max_pix_clock >= pix_clock) {
331					*dp_lanes = lane_num;
332					*dp_rate = link_rates[i];
333					return 0;
334				}
335			}
336		}
 
 
 
 
 
 
 
 
 
 
 
 
337	}
338
339	return -EINVAL;
340}
341
342static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
343				    int action, int dp_clock,
344				    u8 ucconfig, u8 lane_num)
345{
346	DP_ENCODER_SERVICE_PARAMETERS args;
347	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
348
349	memset(&args, 0, sizeof(args));
350	args.ucLinkClock = dp_clock / 10;
351	args.ucConfig = ucconfig;
352	args.ucAction = action;
353	args.ucLaneNum = lane_num;
354	args.ucStatus = 0;
355
356	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
357	return args.ucStatus;
358}
359
360u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
361{
 
362	struct drm_device *dev = radeon_connector->base.dev;
363	struct radeon_device *rdev = dev->dev_private;
364
365	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
366					 radeon_connector->ddc_bus->rec.i2c_id, 0);
367}
368
369static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
370{
371	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
372	u8 buf[3];
373
374	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
375		return;
376
377	if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
378		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
379			      buf[0], buf[1], buf[2]);
380
381	if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
382		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
383			      buf[0], buf[1], buf[2]);
384}
385
386bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
387{
388	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
389	u8 msg[DP_DPCD_SIZE];
390	int ret;
391
392	ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
393			       DP_DPCD_SIZE);
394	if (ret == DP_DPCD_SIZE) {
395		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
396
397		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
398			      dig_connector->dpcd);
399
400		radeon_dp_probe_oui(radeon_connector);
401
 
 
 
 
 
 
 
402		return true;
403	}
404
405	dig_connector->dpcd[0] = 0;
406	return false;
407}
408
409int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
410			     struct drm_connector *connector)
411{
412	struct drm_device *dev = encoder->dev;
413	struct radeon_device *rdev = dev->dev_private;
414	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
415	struct radeon_connector_atom_dig *dig_connector;
416	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
417	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
418	u8 tmp;
419
420	if (!ASIC_IS_DCE4(rdev))
421		return panel_mode;
422
423	if (!radeon_connector->con_priv)
424		return panel_mode;
425
426	dig_connector = radeon_connector->con_priv;
427
428	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
429		/* DP bridge chips */
430		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
431				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
432			if (tmp & 1)
433				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
434			else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
435				 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
436				panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
437			else
438				panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
439		}
440	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
441		/* eDP */
442		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
443				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
444			if (tmp & 1)
445				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
446		}
447	}
448
449	return panel_mode;
450}
451
452void radeon_dp_set_link_config(struct drm_connector *connector,
453			       const struct drm_display_mode *mode)
454{
455	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
456	struct radeon_connector_atom_dig *dig_connector;
457	int ret;
458
459	if (!radeon_connector->con_priv)
460		return;
461	dig_connector = radeon_connector->con_priv;
462
463	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
464	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
465		ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
466						   mode->clock,
467						   &dig_connector->dp_lane_count,
468						   &dig_connector->dp_clock);
469		if (ret) {
470			dig_connector->dp_clock = 0;
471			dig_connector->dp_lane_count = 0;
472		}
473	}
474}
475
476int radeon_dp_mode_valid_helper(struct drm_connector *connector,
477				struct drm_display_mode *mode)
478{
479	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
480	struct radeon_connector_atom_dig *dig_connector;
481	unsigned dp_clock, dp_lanes;
482	int ret;
483
484	if ((mode->clock > 340000) &&
485	    (!radeon_connector_is_dp12_capable(connector)))
486		return MODE_CLOCK_HIGH;
487
488	if (!radeon_connector->con_priv)
489		return MODE_CLOCK_HIGH;
490	dig_connector = radeon_connector->con_priv;
491
492	ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
493					   mode->clock,
494					   &dp_lanes,
495					   &dp_clock);
496	if (ret)
497		return MODE_CLOCK_HIGH;
498
499	if ((dp_clock == 540000) &&
500	    (!radeon_connector_is_dp12_capable(connector)))
501		return MODE_CLOCK_HIGH;
502
503	return MODE_OK;
504}
505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
507{
508	u8 link_status[DP_LINK_STATUS_SIZE];
509	struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
510
511	if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
512	    <= 0)
513		return false;
514	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
515		return false;
516	return true;
517}
518
519void radeon_dp_set_rx_power_state(struct drm_connector *connector,
520				  u8 power_state)
521{
522	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
523	struct radeon_connector_atom_dig *dig_connector;
524
525	if (!radeon_connector->con_priv)
526		return;
527
528	dig_connector = radeon_connector->con_priv;
529
530	/* power up/down the sink */
531	if (dig_connector->dpcd[0] >= 0x11) {
532		drm_dp_dpcd_writeb(&radeon_connector->ddc_bus->aux,
533				   DP_SET_POWER, power_state);
534		usleep_range(1000, 2000);
535	}
536}
537
538
539struct radeon_dp_link_train_info {
540	struct radeon_device *rdev;
541	struct drm_encoder *encoder;
542	struct drm_connector *connector;
 
543	int enc_id;
544	int dp_clock;
545	int dp_lane_count;
 
546	bool tp3_supported;
547	u8 dpcd[DP_RECEIVER_CAP_SIZE];
548	u8 train_set[4];
549	u8 link_status[DP_LINK_STATUS_SIZE];
550	u8 tries;
551	bool use_dpencoder;
552	struct drm_dp_aux *aux;
553};
554
555static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
556{
557	/* set the initial vs/emph on the source */
558	atombios_dig_transmitter_setup(dp_info->encoder,
559				       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
560				       0, dp_info->train_set[0]); /* sets all lanes at once */
561
562	/* set the vs/emph on the sink */
563	drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
564			  dp_info->train_set, dp_info->dp_lane_count);
565}
566
567static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
568{
569	int rtp = 0;
570
571	/* set training pattern on the source */
572	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
573		switch (tp) {
574		case DP_TRAINING_PATTERN_1:
575			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
576			break;
577		case DP_TRAINING_PATTERN_2:
578			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
579			break;
580		case DP_TRAINING_PATTERN_3:
581			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
582			break;
583		}
584		atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
585	} else {
586		switch (tp) {
587		case DP_TRAINING_PATTERN_1:
588			rtp = 0;
589			break;
590		case DP_TRAINING_PATTERN_2:
591			rtp = 1;
592			break;
593		}
594		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
595					  dp_info->dp_clock, dp_info->enc_id, rtp);
596	}
597
598	/* enable training pattern on the sink */
599	drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
600}
601
602static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
603{
604	struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
605	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
606	u8 tmp;
607
608	/* power up the sink */
609	radeon_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
 
 
610
611	/* possibly enable downspread on the sink */
612	if (dp_info->dpcd[3] & 0x1)
613		drm_dp_dpcd_writeb(dp_info->aux,
614				   DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
615	else
616		drm_dp_dpcd_writeb(dp_info->aux,
617				   DP_DOWNSPREAD_CTRL, 0);
618
619	if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
620		drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
621
622	/* set the lane count on the sink */
623	tmp = dp_info->dp_lane_count;
624	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
625		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
626	drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
627
628	/* set the link rate on the sink */
629	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
630	drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
631
632	/* start training on the source */
633	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
634		atombios_dig_encoder_setup(dp_info->encoder,
635					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
636	else
637		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
638					  dp_info->dp_clock, dp_info->enc_id, 0);
639
640	/* disable the training pattern on the sink */
641	drm_dp_dpcd_writeb(dp_info->aux,
642			   DP_TRAINING_PATTERN_SET,
643			   DP_TRAINING_PATTERN_DISABLE);
644
645	return 0;
646}
647
648static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
649{
650	udelay(400);
651
652	/* disable the training pattern on the sink */
653	drm_dp_dpcd_writeb(dp_info->aux,
654			   DP_TRAINING_PATTERN_SET,
655			   DP_TRAINING_PATTERN_DISABLE);
656
657	/* disable the training pattern on the source */
658	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
659		atombios_dig_encoder_setup(dp_info->encoder,
660					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
661	else
662		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
663					  dp_info->dp_clock, dp_info->enc_id, 0);
664
665	return 0;
666}
667
668static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
669{
670	bool clock_recovery;
671 	u8 voltage;
672	int i;
673
674	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
675	memset(dp_info->train_set, 0, 4);
676	radeon_dp_update_vs_emph(dp_info);
677
678	udelay(400);
679
680	/* clock recovery loop */
681	clock_recovery = false;
682	dp_info->tries = 0;
683	voltage = 0xff;
684	while (1) {
685		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
 
 
 
686
687		if (drm_dp_dpcd_read_link_status(dp_info->aux,
688						 dp_info->link_status) <= 0) {
689			DRM_ERROR("displayport link status failed\n");
690			break;
691		}
692
693		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
694			clock_recovery = true;
695			break;
696		}
697
698		for (i = 0; i < dp_info->dp_lane_count; i++) {
699			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
700				break;
701		}
702		if (i == dp_info->dp_lane_count) {
703			DRM_ERROR("clock recovery reached max voltage\n");
704			break;
705		}
706
707		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
708			++dp_info->tries;
709			if (dp_info->tries == 5) {
710				DRM_ERROR("clock recovery tried 5 times\n");
711				break;
712			}
713		} else
714			dp_info->tries = 0;
715
716		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
717
718		/* Compute new train_set as requested by sink */
719		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
720
721		radeon_dp_update_vs_emph(dp_info);
722	}
723	if (!clock_recovery) {
724		DRM_ERROR("clock recovery failed\n");
725		return -1;
726	} else {
727		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
728			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
729			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
730			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
731		return 0;
732	}
733}
734
735static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
736{
737	bool channel_eq;
738
739	if (dp_info->tp3_supported)
740		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
741	else
742		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
743
744	/* channel equalization loop */
745	dp_info->tries = 0;
746	channel_eq = false;
747	while (1) {
748		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
 
 
 
749
750		if (drm_dp_dpcd_read_link_status(dp_info->aux,
751						 dp_info->link_status) <= 0) {
752			DRM_ERROR("displayport link status failed\n");
753			break;
754		}
755
756		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
757			channel_eq = true;
758			break;
759		}
760
761		/* Try 5 times */
762		if (dp_info->tries > 5) {
763			DRM_ERROR("channel eq failed: 5 tries\n");
764			break;
765		}
766
767		/* Compute new train_set as requested by sink */
768		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
769
770		radeon_dp_update_vs_emph(dp_info);
771		dp_info->tries++;
772	}
773
774	if (!channel_eq) {
775		DRM_ERROR("channel eq failed\n");
776		return -1;
777	} else {
778		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
779			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
780			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
781			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
782		return 0;
783	}
784}
785
786void radeon_dp_link_train(struct drm_encoder *encoder,
787			  struct drm_connector *connector)
788{
789	struct drm_device *dev = encoder->dev;
790	struct radeon_device *rdev = dev->dev_private;
791	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
792	struct radeon_encoder_atom_dig *dig;
793	struct radeon_connector *radeon_connector;
794	struct radeon_connector_atom_dig *dig_connector;
795	struct radeon_dp_link_train_info dp_info;
796	int index;
797	u8 tmp, frev, crev;
798
799	if (!radeon_encoder->enc_priv)
800		return;
801	dig = radeon_encoder->enc_priv;
802
803	radeon_connector = to_radeon_connector(connector);
804	if (!radeon_connector->con_priv)
805		return;
806	dig_connector = radeon_connector->con_priv;
807
808	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
809	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
810		return;
811
812	/* DPEncoderService newer than 1.1 can't program properly the
813	 * training pattern. When facing such version use the
814	 * DIGXEncoderControl (X== 1 | 2)
815	 */
816	dp_info.use_dpencoder = true;
817	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
818	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
819		if (crev > 1) {
820			dp_info.use_dpencoder = false;
821		}
822	}
823
824	dp_info.enc_id = 0;
825	if (dig->dig_encoder)
826		dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
827	else
828		dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
829	if (dig->linkb)
830		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
831	else
832		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
833
834	if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
835	    == 1) {
836		if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
837			dp_info.tp3_supported = true;
838		else
839			dp_info.tp3_supported = false;
840	} else {
841		dp_info.tp3_supported = false;
842	}
843
844	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
845	dp_info.rdev = rdev;
846	dp_info.encoder = encoder;
847	dp_info.connector = connector;
 
848	dp_info.dp_lane_count = dig_connector->dp_lane_count;
849	dp_info.dp_clock = dig_connector->dp_clock;
850	dp_info.aux = &radeon_connector->ddc_bus->aux;
851
852	if (radeon_dp_link_train_init(&dp_info))
853		goto done;
854	if (radeon_dp_link_train_cr(&dp_info))
855		goto done;
856	if (radeon_dp_link_train_ce(&dp_info))
857		goto done;
858done:
859	if (radeon_dp_link_train_finish(&dp_info))
860		return;
861}