Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: Chris Zhong <zyw@rock-chips.com>
5 */
6
7#include <linux/clk.h>
8#include <linux/component.h>
9#include <linux/extcon.h>
10#include <linux/firmware.h>
11#include <linux/mfd/syscon.h>
12#include <linux/phy/phy.h>
13#include <linux/regmap.h>
14#include <linux/reset.h>
15
16#include <sound/hdmi-codec.h>
17
18#include <drm/drm_atomic_helper.h>
19#include <drm/drm_dp_helper.h>
20#include <drm/drm_edid.h>
21#include <drm/drm_of.h>
22#include <drm/drm_probe_helper.h>
23#include <drm/drm_simple_kms_helper.h>
24
25#include "cdn-dp-core.h"
26#include "cdn-dp-reg.h"
27#include "rockchip_drm_vop.h"
28
29#define connector_to_dp(c) \
30 container_of(c, struct cdn_dp_device, connector)
31
32#define encoder_to_dp(c) \
33 container_of(c, struct cdn_dp_device, encoder)
34
35#define GRF_SOC_CON9 0x6224
36#define DP_SEL_VOP_LIT BIT(12)
37#define GRF_SOC_CON26 0x6268
38#define DPTX_HPD_SEL (3 << 12)
39#define DPTX_HPD_DEL (2 << 12)
40#define DPTX_HPD_SEL_MASK (3 << 28)
41
42#define CDN_FW_TIMEOUT_MS (64 * 1000)
43#define CDN_DPCD_TIMEOUT_MS 5000
44#define CDN_DP_FIRMWARE "rockchip/dptx.bin"
45MODULE_FIRMWARE(CDN_DP_FIRMWARE);
46
47struct cdn_dp_data {
48 u8 max_phy;
49};
50
51struct cdn_dp_data rk3399_cdn_dp = {
52 .max_phy = 2,
53};
54
55static const struct of_device_id cdn_dp_dt_ids[] = {
56 { .compatible = "rockchip,rk3399-cdn-dp",
57 .data = (void *)&rk3399_cdn_dp },
58 {}
59};
60
61MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
62
63static int cdn_dp_grf_write(struct cdn_dp_device *dp,
64 unsigned int reg, unsigned int val)
65{
66 int ret;
67
68 ret = clk_prepare_enable(dp->grf_clk);
69 if (ret) {
70 DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
71 return ret;
72 }
73
74 ret = regmap_write(dp->grf, reg, val);
75 if (ret) {
76 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
77 clk_disable_unprepare(dp->grf_clk);
78 return ret;
79 }
80
81 clk_disable_unprepare(dp->grf_clk);
82
83 return 0;
84}
85
86static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
87{
88 int ret;
89 unsigned long rate;
90
91 ret = clk_prepare_enable(dp->pclk);
92 if (ret < 0) {
93 DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
94 goto err_pclk;
95 }
96
97 ret = clk_prepare_enable(dp->core_clk);
98 if (ret < 0) {
99 DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
100 goto err_core_clk;
101 }
102
103 ret = pm_runtime_get_sync(dp->dev);
104 if (ret < 0) {
105 DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
106 goto err_pm_runtime_get;
107 }
108
109 reset_control_assert(dp->core_rst);
110 reset_control_assert(dp->dptx_rst);
111 reset_control_assert(dp->apb_rst);
112 reset_control_deassert(dp->core_rst);
113 reset_control_deassert(dp->dptx_rst);
114 reset_control_deassert(dp->apb_rst);
115
116 rate = clk_get_rate(dp->core_clk);
117 if (!rate) {
118 DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
119 ret = -EINVAL;
120 goto err_set_rate;
121 }
122
123 cdn_dp_set_fw_clk(dp, rate);
124 cdn_dp_clock_reset(dp);
125
126 return 0;
127
128err_set_rate:
129 pm_runtime_put(dp->dev);
130err_pm_runtime_get:
131 clk_disable_unprepare(dp->core_clk);
132err_core_clk:
133 clk_disable_unprepare(dp->pclk);
134err_pclk:
135 return ret;
136}
137
138static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
139{
140 pm_runtime_put_sync(dp->dev);
141 clk_disable_unprepare(dp->pclk);
142 clk_disable_unprepare(dp->core_clk);
143}
144
145static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
146{
147 struct extcon_dev *edev = port->extcon;
148 union extcon_property_value property;
149 int dptx;
150 u8 lanes;
151
152 dptx = extcon_get_state(edev, EXTCON_DISP_DP);
153 if (dptx > 0) {
154 extcon_get_property(edev, EXTCON_DISP_DP,
155 EXTCON_PROP_USB_SS, &property);
156 if (property.intval)
157 lanes = 2;
158 else
159 lanes = 4;
160 } else {
161 lanes = 0;
162 }
163
164 return lanes;
165}
166
167static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
168{
169 int ret;
170 u8 value;
171
172 *sink_count = 0;
173 ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
174 if (ret)
175 return ret;
176
177 *sink_count = DP_GET_SINK_COUNT(value);
178 return 0;
179}
180
181static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
182{
183 struct cdn_dp_port *port;
184 int i, lanes;
185
186 for (i = 0; i < dp->ports; i++) {
187 port = dp->port[i];
188 lanes = cdn_dp_get_port_lanes(port);
189 if (lanes)
190 return port;
191 }
192 return NULL;
193}
194
195static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
196{
197 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
198 struct cdn_dp_port *port;
199 u8 sink_count = 0;
200
201 if (dp->active_port < 0 || dp->active_port >= dp->ports) {
202 DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
203 return false;
204 }
205
206 port = dp->port[dp->active_port];
207
208 /*
209 * Attempt to read sink count, retry in case the sink may not be ready.
210 *
211 * Sinks are *supposed* to come up within 1ms from an off state, but
212 * some docks need more time to power up.
213 */
214 while (time_before(jiffies, timeout)) {
215 if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
216 return false;
217
218 if (!cdn_dp_get_sink_count(dp, &sink_count))
219 return sink_count ? true : false;
220
221 usleep_range(5000, 10000);
222 }
223
224 DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
225 return false;
226}
227
228static enum drm_connector_status
229cdn_dp_connector_detect(struct drm_connector *connector, bool force)
230{
231 struct cdn_dp_device *dp = connector_to_dp(connector);
232 enum drm_connector_status status = connector_status_disconnected;
233
234 mutex_lock(&dp->lock);
235 if (dp->connected)
236 status = connector_status_connected;
237 mutex_unlock(&dp->lock);
238
239 return status;
240}
241
242static void cdn_dp_connector_destroy(struct drm_connector *connector)
243{
244 drm_connector_unregister(connector);
245 drm_connector_cleanup(connector);
246}
247
248static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
249 .detect = cdn_dp_connector_detect,
250 .destroy = cdn_dp_connector_destroy,
251 .fill_modes = drm_helper_probe_single_connector_modes,
252 .reset = drm_atomic_helper_connector_reset,
253 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
254 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
255};
256
257static int cdn_dp_connector_get_modes(struct drm_connector *connector)
258{
259 struct cdn_dp_device *dp = connector_to_dp(connector);
260 struct edid *edid;
261 int ret = 0;
262
263 mutex_lock(&dp->lock);
264 edid = dp->edid;
265 if (edid) {
266 DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
267 edid->width_cm, edid->height_cm);
268
269 dp->sink_has_audio = drm_detect_monitor_audio(edid);
270 ret = drm_add_edid_modes(connector, edid);
271 if (ret)
272 drm_connector_update_edid_property(connector,
273 edid);
274 }
275 mutex_unlock(&dp->lock);
276
277 return ret;
278}
279
280static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
281 struct drm_display_mode *mode)
282{
283 struct cdn_dp_device *dp = connector_to_dp(connector);
284 struct drm_display_info *display_info = &dp->connector.display_info;
285 u32 requested, actual, rate, sink_max, source_max = 0;
286 u8 lanes, bpc;
287
288 /* If DP is disconnected, every mode is invalid */
289 if (!dp->connected)
290 return MODE_BAD;
291
292 switch (display_info->bpc) {
293 case 10:
294 bpc = 10;
295 break;
296 case 6:
297 bpc = 6;
298 break;
299 default:
300 bpc = 8;
301 break;
302 }
303
304 requested = mode->clock * bpc * 3 / 1000;
305
306 source_max = dp->lanes;
307 sink_max = drm_dp_max_lane_count(dp->dpcd);
308 lanes = min(source_max, sink_max);
309
310 source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
311 sink_max = drm_dp_max_link_rate(dp->dpcd);
312 rate = min(source_max, sink_max);
313
314 actual = rate * lanes / 100;
315
316 /* efficiency is about 0.8 */
317 actual = actual * 8 / 10;
318
319 if (requested > actual) {
320 DRM_DEV_DEBUG_KMS(dp->dev,
321 "requested=%d, actual=%d, clock=%d\n",
322 requested, actual, mode->clock);
323 return MODE_CLOCK_HIGH;
324 }
325
326 return MODE_OK;
327}
328
329static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
330 .get_modes = cdn_dp_connector_get_modes,
331 .mode_valid = cdn_dp_connector_mode_valid,
332};
333
334static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
335{
336 int ret;
337 const u32 *iram_data, *dram_data;
338 const struct firmware *fw = dp->fw;
339 const struct cdn_firmware_header *hdr;
340
341 hdr = (struct cdn_firmware_header *)fw->data;
342 if (fw->size != le32_to_cpu(hdr->size_bytes)) {
343 DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
344 return -EINVAL;
345 }
346
347 iram_data = (const u32 *)(fw->data + hdr->header_size);
348 dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
349
350 ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
351 dram_data, hdr->dram_size);
352 if (ret)
353 return ret;
354
355 ret = cdn_dp_set_firmware_active(dp, true);
356 if (ret) {
357 DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
358 return ret;
359 }
360
361 return cdn_dp_event_config(dp);
362}
363
364static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
365{
366 int ret;
367
368 if (!cdn_dp_check_sink_connection(dp))
369 return -ENODEV;
370
371 ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
372 DP_RECEIVER_CAP_SIZE);
373 if (ret) {
374 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
375 return ret;
376 }
377
378 kfree(dp->edid);
379 dp->edid = drm_do_get_edid(&dp->connector,
380 cdn_dp_get_edid_block, dp);
381 return 0;
382}
383
384static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
385{
386 union extcon_property_value property;
387 int ret;
388
389 if (!port->phy_enabled) {
390 ret = phy_power_on(port->phy);
391 if (ret) {
392 DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
393 ret);
394 goto err_phy;
395 }
396 port->phy_enabled = true;
397 }
398
399 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
400 DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
401 if (ret) {
402 DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
403 goto err_power_on;
404 }
405
406 ret = cdn_dp_get_hpd_status(dp);
407 if (ret <= 0) {
408 if (!ret)
409 DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
410 goto err_power_on;
411 }
412
413 ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
414 EXTCON_PROP_USB_TYPEC_POLARITY, &property);
415 if (ret) {
416 DRM_DEV_ERROR(dp->dev, "get property failed\n");
417 goto err_power_on;
418 }
419
420 port->lanes = cdn_dp_get_port_lanes(port);
421 ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
422 if (ret) {
423 DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
424 ret);
425 goto err_power_on;
426 }
427
428 dp->active_port = port->id;
429 return 0;
430
431err_power_on:
432 if (phy_power_off(port->phy))
433 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
434 else
435 port->phy_enabled = false;
436
437err_phy:
438 cdn_dp_grf_write(dp, GRF_SOC_CON26,
439 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
440 return ret;
441}
442
443static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
444 struct cdn_dp_port *port)
445{
446 int ret;
447
448 if (port->phy_enabled) {
449 ret = phy_power_off(port->phy);
450 if (ret) {
451 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
452 return ret;
453 }
454 }
455
456 port->phy_enabled = false;
457 port->lanes = 0;
458 dp->active_port = -1;
459 return 0;
460}
461
462static int cdn_dp_disable(struct cdn_dp_device *dp)
463{
464 int ret, i;
465
466 if (!dp->active)
467 return 0;
468
469 for (i = 0; i < dp->ports; i++)
470 cdn_dp_disable_phy(dp, dp->port[i]);
471
472 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
473 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
474 if (ret) {
475 DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
476 ret);
477 return ret;
478 }
479
480 cdn_dp_set_firmware_active(dp, false);
481 cdn_dp_clk_disable(dp);
482 dp->active = false;
483 dp->max_lanes = 0;
484 dp->max_rate = 0;
485 if (!dp->connected) {
486 kfree(dp->edid);
487 dp->edid = NULL;
488 }
489
490 return 0;
491}
492
493static int cdn_dp_enable(struct cdn_dp_device *dp)
494{
495 int ret, i, lanes;
496 struct cdn_dp_port *port;
497
498 port = cdn_dp_connected_port(dp);
499 if (!port) {
500 DRM_DEV_ERROR(dp->dev,
501 "Can't enable without connection\n");
502 return -ENODEV;
503 }
504
505 if (dp->active)
506 return 0;
507
508 ret = cdn_dp_clk_enable(dp);
509 if (ret)
510 return ret;
511
512 ret = cdn_dp_firmware_init(dp);
513 if (ret) {
514 DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
515 goto err_clk_disable;
516 }
517
518 /* only enable the port that connected with downstream device */
519 for (i = port->id; i < dp->ports; i++) {
520 port = dp->port[i];
521 lanes = cdn_dp_get_port_lanes(port);
522 if (lanes) {
523 ret = cdn_dp_enable_phy(dp, port);
524 if (ret)
525 continue;
526
527 ret = cdn_dp_get_sink_capability(dp);
528 if (ret) {
529 cdn_dp_disable_phy(dp, port);
530 } else {
531 dp->active = true;
532 dp->lanes = port->lanes;
533 return 0;
534 }
535 }
536 }
537
538err_clk_disable:
539 cdn_dp_clk_disable(dp);
540 return ret;
541}
542
543static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
544 struct drm_display_mode *mode,
545 struct drm_display_mode *adjusted)
546{
547 struct cdn_dp_device *dp = encoder_to_dp(encoder);
548 struct drm_display_info *display_info = &dp->connector.display_info;
549 struct video_info *video = &dp->video_info;
550
551 switch (display_info->bpc) {
552 case 10:
553 video->color_depth = 10;
554 break;
555 case 6:
556 video->color_depth = 6;
557 break;
558 default:
559 video->color_depth = 8;
560 break;
561 }
562
563 video->color_fmt = PXL_RGB;
564 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
565 video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
566
567 memcpy(&dp->mode, adjusted, sizeof(*mode));
568}
569
570static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
571{
572 u8 link_status[DP_LINK_STATUS_SIZE];
573 struct cdn_dp_port *port = cdn_dp_connected_port(dp);
574 u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
575
576 if (!port || !dp->max_rate || !dp->max_lanes)
577 return false;
578
579 if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
580 DP_LINK_STATUS_SIZE)) {
581 DRM_ERROR("Failed to get link status\n");
582 return false;
583 }
584
585 /* if link training is requested we should perform it always */
586 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
587}
588
589static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
590{
591 struct cdn_dp_device *dp = encoder_to_dp(encoder);
592 int ret, val;
593
594 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
595 if (ret < 0) {
596 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
597 return;
598 }
599
600 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
601 (ret) ? "LIT" : "BIG");
602 if (ret)
603 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
604 else
605 val = DP_SEL_VOP_LIT << 16;
606
607 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
608 if (ret)
609 return;
610
611 mutex_lock(&dp->lock);
612
613 ret = cdn_dp_enable(dp);
614 if (ret) {
615 DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
616 ret);
617 goto out;
618 }
619 if (!cdn_dp_check_link_status(dp)) {
620 ret = cdn_dp_train_link(dp);
621 if (ret) {
622 DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
623 goto out;
624 }
625 }
626
627 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
628 if (ret) {
629 DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
630 goto out;
631 }
632
633 ret = cdn_dp_config_video(dp);
634 if (ret) {
635 DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
636 goto out;
637 }
638
639 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
640 if (ret) {
641 DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
642 goto out;
643 }
644out:
645 mutex_unlock(&dp->lock);
646}
647
648static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
649{
650 struct cdn_dp_device *dp = encoder_to_dp(encoder);
651 int ret;
652
653 mutex_lock(&dp->lock);
654 if (dp->active) {
655 ret = cdn_dp_disable(dp);
656 if (ret) {
657 DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
658 ret);
659 }
660 }
661 mutex_unlock(&dp->lock);
662
663 /*
664 * In the following 2 cases, we need to run the event_work to re-enable
665 * the DP:
666 * 1. If there is not just one port device is connected, and remove one
667 * device from a port, the DP will be disabled here, at this case,
668 * run the event_work to re-open DP for the other port.
669 * 2. If re-training or re-config failed, the DP will be disabled here.
670 * run the event_work to re-connect it.
671 */
672 if (!dp->connected && cdn_dp_connected_port(dp))
673 schedule_work(&dp->event_work);
674}
675
676static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
677 struct drm_crtc_state *crtc_state,
678 struct drm_connector_state *conn_state)
679{
680 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
681
682 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
683 s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
684
685 return 0;
686}
687
688static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
689 .mode_set = cdn_dp_encoder_mode_set,
690 .enable = cdn_dp_encoder_enable,
691 .disable = cdn_dp_encoder_disable,
692 .atomic_check = cdn_dp_encoder_atomic_check,
693};
694
695static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
696{
697 struct device *dev = dp->dev;
698 struct device_node *np = dev->of_node;
699 struct platform_device *pdev = to_platform_device(dev);
700 struct resource *res;
701
702 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
703 if (IS_ERR(dp->grf)) {
704 DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
705 return PTR_ERR(dp->grf);
706 }
707
708 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
709 dp->regs = devm_ioremap_resource(dev, res);
710 if (IS_ERR(dp->regs)) {
711 DRM_DEV_ERROR(dev, "ioremap reg failed\n");
712 return PTR_ERR(dp->regs);
713 }
714
715 dp->core_clk = devm_clk_get(dev, "core-clk");
716 if (IS_ERR(dp->core_clk)) {
717 DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
718 return PTR_ERR(dp->core_clk);
719 }
720
721 dp->pclk = devm_clk_get(dev, "pclk");
722 if (IS_ERR(dp->pclk)) {
723 DRM_DEV_ERROR(dev, "cannot get pclk\n");
724 return PTR_ERR(dp->pclk);
725 }
726
727 dp->spdif_clk = devm_clk_get(dev, "spdif");
728 if (IS_ERR(dp->spdif_clk)) {
729 DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
730 return PTR_ERR(dp->spdif_clk);
731 }
732
733 dp->grf_clk = devm_clk_get(dev, "grf");
734 if (IS_ERR(dp->grf_clk)) {
735 DRM_DEV_ERROR(dev, "cannot get grf clk\n");
736 return PTR_ERR(dp->grf_clk);
737 }
738
739 dp->spdif_rst = devm_reset_control_get(dev, "spdif");
740 if (IS_ERR(dp->spdif_rst)) {
741 DRM_DEV_ERROR(dev, "no spdif reset control found\n");
742 return PTR_ERR(dp->spdif_rst);
743 }
744
745 dp->dptx_rst = devm_reset_control_get(dev, "dptx");
746 if (IS_ERR(dp->dptx_rst)) {
747 DRM_DEV_ERROR(dev, "no uphy reset control found\n");
748 return PTR_ERR(dp->dptx_rst);
749 }
750
751 dp->core_rst = devm_reset_control_get(dev, "core");
752 if (IS_ERR(dp->core_rst)) {
753 DRM_DEV_ERROR(dev, "no core reset control found\n");
754 return PTR_ERR(dp->core_rst);
755 }
756
757 dp->apb_rst = devm_reset_control_get(dev, "apb");
758 if (IS_ERR(dp->apb_rst)) {
759 DRM_DEV_ERROR(dev, "no apb reset control found\n");
760 return PTR_ERR(dp->apb_rst);
761 }
762
763 return 0;
764}
765
766static int cdn_dp_audio_hw_params(struct device *dev, void *data,
767 struct hdmi_codec_daifmt *daifmt,
768 struct hdmi_codec_params *params)
769{
770 struct cdn_dp_device *dp = dev_get_drvdata(dev);
771 struct audio_info audio = {
772 .sample_width = params->sample_width,
773 .sample_rate = params->sample_rate,
774 .channels = params->channels,
775 };
776 int ret;
777
778 mutex_lock(&dp->lock);
779 if (!dp->active) {
780 ret = -ENODEV;
781 goto out;
782 }
783
784 switch (daifmt->fmt) {
785 case HDMI_I2S:
786 audio.format = AFMT_I2S;
787 break;
788 case HDMI_SPDIF:
789 audio.format = AFMT_SPDIF;
790 break;
791 default:
792 DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
793 ret = -EINVAL;
794 goto out;
795 }
796
797 ret = cdn_dp_audio_config(dp, &audio);
798 if (!ret)
799 dp->audio_info = audio;
800
801out:
802 mutex_unlock(&dp->lock);
803 return ret;
804}
805
806static void cdn_dp_audio_shutdown(struct device *dev, void *data)
807{
808 struct cdn_dp_device *dp = dev_get_drvdata(dev);
809 int ret;
810
811 mutex_lock(&dp->lock);
812 if (!dp->active)
813 goto out;
814
815 ret = cdn_dp_audio_stop(dp, &dp->audio_info);
816 if (!ret)
817 dp->audio_info.format = AFMT_UNUSED;
818out:
819 mutex_unlock(&dp->lock);
820}
821
822static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
823 bool enable, int direction)
824{
825 struct cdn_dp_device *dp = dev_get_drvdata(dev);
826 int ret;
827
828 mutex_lock(&dp->lock);
829 if (!dp->active) {
830 ret = -ENODEV;
831 goto out;
832 }
833
834 ret = cdn_dp_audio_mute(dp, enable);
835
836out:
837 mutex_unlock(&dp->lock);
838 return ret;
839}
840
841static int cdn_dp_audio_get_eld(struct device *dev, void *data,
842 u8 *buf, size_t len)
843{
844 struct cdn_dp_device *dp = dev_get_drvdata(dev);
845
846 memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
847
848 return 0;
849}
850
851static const struct hdmi_codec_ops audio_codec_ops = {
852 .hw_params = cdn_dp_audio_hw_params,
853 .audio_shutdown = cdn_dp_audio_shutdown,
854 .mute_stream = cdn_dp_audio_mute_stream,
855 .get_eld = cdn_dp_audio_get_eld,
856 .no_capture_mute = 1,
857};
858
859static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
860 struct device *dev)
861{
862 struct hdmi_codec_pdata codec_data = {
863 .i2s = 1,
864 .spdif = 1,
865 .ops = &audio_codec_ops,
866 .max_i2s_channels = 8,
867 };
868
869 dp->audio_pdev = platform_device_register_data(
870 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
871 &codec_data, sizeof(codec_data));
872
873 return PTR_ERR_OR_ZERO(dp->audio_pdev);
874}
875
876static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
877{
878 int ret;
879 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
880 unsigned long sleep = 1000;
881
882 WARN_ON(!mutex_is_locked(&dp->lock));
883
884 if (dp->fw_loaded)
885 return 0;
886
887 /* Drop the lock before getting the firmware to avoid blocking boot */
888 mutex_unlock(&dp->lock);
889
890 while (time_before(jiffies, timeout)) {
891 ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
892 if (ret == -ENOENT) {
893 msleep(sleep);
894 sleep *= 2;
895 continue;
896 } else if (ret) {
897 DRM_DEV_ERROR(dp->dev,
898 "failed to request firmware: %d\n", ret);
899 goto out;
900 }
901
902 dp->fw_loaded = true;
903 ret = 0;
904 goto out;
905 }
906
907 DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
908 ret = -ETIMEDOUT;
909out:
910 mutex_lock(&dp->lock);
911 return ret;
912}
913
914static void cdn_dp_pd_event_work(struct work_struct *work)
915{
916 struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
917 event_work);
918 struct drm_connector *connector = &dp->connector;
919 enum drm_connector_status old_status;
920
921 int ret;
922
923 mutex_lock(&dp->lock);
924
925 if (dp->suspended)
926 goto out;
927
928 ret = cdn_dp_request_firmware(dp);
929 if (ret)
930 goto out;
931
932 dp->connected = true;
933
934 /* Not connected, notify userspace to disable the block */
935 if (!cdn_dp_connected_port(dp)) {
936 DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
937 dp->connected = false;
938
939 /* Connected but not enabled, enable the block */
940 } else if (!dp->active) {
941 DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
942 ret = cdn_dp_enable(dp);
943 if (ret) {
944 DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
945 dp->connected = false;
946 }
947
948 /* Enabled and connected to a dongle without a sink, notify userspace */
949 } else if (!cdn_dp_check_sink_connection(dp)) {
950 DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
951 dp->connected = false;
952
953 /* Enabled and connected with a sink, re-train if requested */
954 } else if (!cdn_dp_check_link_status(dp)) {
955 unsigned int rate = dp->max_rate;
956 unsigned int lanes = dp->max_lanes;
957 struct drm_display_mode *mode = &dp->mode;
958
959 DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
960 ret = cdn_dp_train_link(dp);
961 if (ret) {
962 dp->connected = false;
963 DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
964 goto out;
965 }
966
967 /* If training result is changed, update the video config */
968 if (mode->clock &&
969 (rate != dp->max_rate || lanes != dp->max_lanes)) {
970 ret = cdn_dp_config_video(dp);
971 if (ret) {
972 dp->connected = false;
973 DRM_DEV_ERROR(dp->dev,
974 "Failed to config video %d\n",
975 ret);
976 }
977 }
978 }
979
980out:
981 mutex_unlock(&dp->lock);
982
983 old_status = connector->status;
984 connector->status = connector->funcs->detect(connector, false);
985 if (old_status != connector->status)
986 drm_kms_helper_hotplug_event(dp->drm_dev);
987}
988
989static int cdn_dp_pd_event(struct notifier_block *nb,
990 unsigned long event, void *priv)
991{
992 struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
993 event_nb);
994 struct cdn_dp_device *dp = port->dp;
995
996 /*
997 * It would be nice to be able to just do the work inline right here.
998 * However, we need to make a bunch of calls that might sleep in order
999 * to turn on the block/phy, so use a worker instead.
1000 */
1001 schedule_work(&dp->event_work);
1002
1003 return NOTIFY_DONE;
1004}
1005
1006static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
1007{
1008 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1009 struct drm_encoder *encoder;
1010 struct drm_connector *connector;
1011 struct cdn_dp_port *port;
1012 struct drm_device *drm_dev = data;
1013 int ret, i;
1014
1015 ret = cdn_dp_parse_dt(dp);
1016 if (ret < 0)
1017 return ret;
1018
1019 dp->drm_dev = drm_dev;
1020 dp->connected = false;
1021 dp->active = false;
1022 dp->active_port = -1;
1023 dp->fw_loaded = false;
1024
1025 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
1026
1027 encoder = &dp->encoder;
1028
1029 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
1030 dev->of_node);
1031 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1032
1033 ret = drm_simple_encoder_init(drm_dev, encoder,
1034 DRM_MODE_ENCODER_TMDS);
1035 if (ret) {
1036 DRM_ERROR("failed to initialize encoder with drm\n");
1037 return ret;
1038 }
1039
1040 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
1041
1042 connector = &dp->connector;
1043 connector->polled = DRM_CONNECTOR_POLL_HPD;
1044 connector->dpms = DRM_MODE_DPMS_OFF;
1045
1046 ret = drm_connector_init(drm_dev, connector,
1047 &cdn_dp_atomic_connector_funcs,
1048 DRM_MODE_CONNECTOR_DisplayPort);
1049 if (ret) {
1050 DRM_ERROR("failed to initialize connector with drm\n");
1051 goto err_free_encoder;
1052 }
1053
1054 drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
1055
1056 ret = drm_connector_attach_encoder(connector, encoder);
1057 if (ret) {
1058 DRM_ERROR("failed to attach connector and encoder\n");
1059 goto err_free_connector;
1060 }
1061
1062 for (i = 0; i < dp->ports; i++) {
1063 port = dp->port[i];
1064
1065 port->event_nb.notifier_call = cdn_dp_pd_event;
1066 ret = devm_extcon_register_notifier(dp->dev, port->extcon,
1067 EXTCON_DISP_DP,
1068 &port->event_nb);
1069 if (ret) {
1070 DRM_DEV_ERROR(dev,
1071 "register EXTCON_DISP_DP notifier err\n");
1072 goto err_free_connector;
1073 }
1074 }
1075
1076 pm_runtime_enable(dev);
1077
1078 schedule_work(&dp->event_work);
1079
1080 return 0;
1081
1082err_free_connector:
1083 drm_connector_cleanup(connector);
1084err_free_encoder:
1085 drm_encoder_cleanup(encoder);
1086 return ret;
1087}
1088
1089static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
1090{
1091 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1092 struct drm_encoder *encoder = &dp->encoder;
1093 struct drm_connector *connector = &dp->connector;
1094
1095 cancel_work_sync(&dp->event_work);
1096 cdn_dp_encoder_disable(encoder);
1097 encoder->funcs->destroy(encoder);
1098 connector->funcs->destroy(connector);
1099
1100 pm_runtime_disable(dev);
1101 if (dp->fw_loaded)
1102 release_firmware(dp->fw);
1103 kfree(dp->edid);
1104 dp->edid = NULL;
1105}
1106
1107static const struct component_ops cdn_dp_component_ops = {
1108 .bind = cdn_dp_bind,
1109 .unbind = cdn_dp_unbind,
1110};
1111
1112static int cdn_dp_suspend(struct device *dev)
1113{
1114 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1115 int ret = 0;
1116
1117 mutex_lock(&dp->lock);
1118 if (dp->active)
1119 ret = cdn_dp_disable(dp);
1120 dp->suspended = true;
1121 mutex_unlock(&dp->lock);
1122
1123 return ret;
1124}
1125
1126static __maybe_unused int cdn_dp_resume(struct device *dev)
1127{
1128 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1129
1130 mutex_lock(&dp->lock);
1131 dp->suspended = false;
1132 if (dp->fw_loaded)
1133 schedule_work(&dp->event_work);
1134 mutex_unlock(&dp->lock);
1135
1136 return 0;
1137}
1138
1139static int cdn_dp_probe(struct platform_device *pdev)
1140{
1141 struct device *dev = &pdev->dev;
1142 const struct of_device_id *match;
1143 struct cdn_dp_data *dp_data;
1144 struct cdn_dp_port *port;
1145 struct cdn_dp_device *dp;
1146 struct extcon_dev *extcon;
1147 struct phy *phy;
1148 int i;
1149
1150 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
1151 if (!dp)
1152 return -ENOMEM;
1153 dp->dev = dev;
1154
1155 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
1156 dp_data = (struct cdn_dp_data *)match->data;
1157
1158 for (i = 0; i < dp_data->max_phy; i++) {
1159 extcon = extcon_get_edev_by_phandle(dev, i);
1160 phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
1161
1162 if (PTR_ERR(extcon) == -EPROBE_DEFER ||
1163 PTR_ERR(phy) == -EPROBE_DEFER)
1164 return -EPROBE_DEFER;
1165
1166 if (IS_ERR(extcon) || IS_ERR(phy))
1167 continue;
1168
1169 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1170 if (!port)
1171 return -ENOMEM;
1172
1173 port->extcon = extcon;
1174 port->phy = phy;
1175 port->dp = dp;
1176 port->id = i;
1177 dp->port[dp->ports++] = port;
1178 }
1179
1180 if (!dp->ports) {
1181 DRM_DEV_ERROR(dev, "missing extcon or phy\n");
1182 return -EINVAL;
1183 }
1184
1185 mutex_init(&dp->lock);
1186 dev_set_drvdata(dev, dp);
1187
1188 cdn_dp_audio_codec_init(dp, dev);
1189
1190 return component_add(dev, &cdn_dp_component_ops);
1191}
1192
1193static int cdn_dp_remove(struct platform_device *pdev)
1194{
1195 struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1196
1197 platform_device_unregister(dp->audio_pdev);
1198 cdn_dp_suspend(dp->dev);
1199 component_del(&pdev->dev, &cdn_dp_component_ops);
1200
1201 return 0;
1202}
1203
1204static void cdn_dp_shutdown(struct platform_device *pdev)
1205{
1206 struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1207
1208 cdn_dp_suspend(dp->dev);
1209}
1210
1211static const struct dev_pm_ops cdn_dp_pm_ops = {
1212 SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
1213 cdn_dp_resume)
1214};
1215
1216struct platform_driver cdn_dp_driver = {
1217 .probe = cdn_dp_probe,
1218 .remove = cdn_dp_remove,
1219 .shutdown = cdn_dp_shutdown,
1220 .driver = {
1221 .name = "cdn-dp",
1222 .owner = THIS_MODULE,
1223 .of_match_table = of_match_ptr(cdn_dp_dt_ids),
1224 .pm = &cdn_dp_pm_ops,
1225 },
1226};