Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cadence MHDP8546 DP bridge driver.
4 *
5 * Copyright (C) 2020 Cadence Design Systems, Inc.
6 *
7 * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8 * Swapnil Jakhade <sjakhade@cadence.com>
9 * Yuti Amonkar <yamonkar@cadence.com>
10 * Tomi Valkeinen <tomi.valkeinen@ti.com>
11 * Jyri Sarha <jsarha@ti.com>
12 *
13 * TODO:
14 * - Implement optimized mailbox communication using mailbox interrupts
15 * - Add support for power management
16 * - Add support for features like audio, MST and fast link training
17 * - Implement request_fw_cancel to handle HW_STATE
18 * - Fix asynchronous loading of firmware implementation
19 * - Add DRM helper function for cdns_mhdp_lower_link_rate
20 */
21
22#include <linux/clk.h>
23#include <linux/delay.h>
24#include <linux/err.h>
25#include <linux/firmware.h>
26#include <linux/io.h>
27#include <linux/iopoll.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/phy/phy.h>
33#include <linux/phy/phy-dp.h>
34#include <linux/platform_device.h>
35#include <linux/slab.h>
36#include <linux/wait.h>
37
38#include <drm/drm_atomic.h>
39#include <drm/drm_atomic_helper.h>
40#include <drm/drm_atomic_state_helper.h>
41#include <drm/drm_bridge.h>
42#include <drm/drm_connector.h>
43#include <drm/drm_crtc_helper.h>
44#include <drm/drm_dp_helper.h>
45#include <drm/drm_hdcp.h>
46#include <drm/drm_modeset_helper_vtables.h>
47#include <drm/drm_print.h>
48#include <drm/drm_probe_helper.h>
49
50#include <asm/unaligned.h>
51
52#include "cdns-mhdp8546-core.h"
53#include "cdns-mhdp8546-hdcp.h"
54#include "cdns-mhdp8546-j721e.h"
55
56static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
57{
58 int ret, empty;
59
60 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
61
62 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
63 empty, !empty, MAILBOX_RETRY_US,
64 MAILBOX_TIMEOUT_US);
65 if (ret < 0)
66 return ret;
67
68 return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
69}
70
71static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
72{
73 int ret, full;
74
75 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
76
77 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
78 full, !full, MAILBOX_RETRY_US,
79 MAILBOX_TIMEOUT_US);
80 if (ret < 0)
81 return ret;
82
83 writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
84
85 return 0;
86}
87
88static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
89 u8 module_id, u8 opcode,
90 u16 req_size)
91{
92 u32 mbox_size, i;
93 u8 header[4];
94 int ret;
95
96 /* read the header of the message */
97 for (i = 0; i < sizeof(header); i++) {
98 ret = cdns_mhdp_mailbox_read(mhdp);
99 if (ret < 0)
100 return ret;
101
102 header[i] = ret;
103 }
104
105 mbox_size = get_unaligned_be16(header + 2);
106
107 if (opcode != header[0] || module_id != header[1] ||
108 req_size != mbox_size) {
109 /*
110 * If the message in mailbox is not what we want, we need to
111 * clear the mailbox by reading its contents.
112 */
113 for (i = 0; i < mbox_size; i++)
114 if (cdns_mhdp_mailbox_read(mhdp) < 0)
115 break;
116
117 return -EINVAL;
118 }
119
120 return 0;
121}
122
123static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
124 u8 *buff, u16 buff_size)
125{
126 u32 i;
127 int ret;
128
129 for (i = 0; i < buff_size; i++) {
130 ret = cdns_mhdp_mailbox_read(mhdp);
131 if (ret < 0)
132 return ret;
133
134 buff[i] = ret;
135 }
136
137 return 0;
138}
139
140static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
141 u8 opcode, u16 size, u8 *message)
142{
143 u8 header[4];
144 int ret, i;
145
146 header[0] = opcode;
147 header[1] = module_id;
148 put_unaligned_be16(size, header + 2);
149
150 for (i = 0; i < sizeof(header); i++) {
151 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
152 if (ret)
153 return ret;
154 }
155
156 for (i = 0; i < size; i++) {
157 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
158 if (ret)
159 return ret;
160 }
161
162 return 0;
163}
164
165static
166int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
167{
168 u8 msg[4], resp[8];
169 int ret;
170
171 put_unaligned_be32(addr, msg);
172
173 mutex_lock(&mhdp->mbox_mutex);
174
175 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
176 GENERAL_REGISTER_READ,
177 sizeof(msg), msg);
178 if (ret)
179 goto out;
180
181 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
182 GENERAL_REGISTER_READ,
183 sizeof(resp));
184 if (ret)
185 goto out;
186
187 ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
188 if (ret)
189 goto out;
190
191 /* Returned address value should be the same as requested */
192 if (memcmp(msg, resp, sizeof(msg))) {
193 ret = -EINVAL;
194 goto out;
195 }
196
197 *value = get_unaligned_be32(resp + 4);
198
199out:
200 mutex_unlock(&mhdp->mbox_mutex);
201 if (ret) {
202 dev_err(mhdp->dev, "Failed to read register\n");
203 *value = 0;
204 }
205
206 return ret;
207}
208
209static
210int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
211{
212 u8 msg[6];
213 int ret;
214
215 put_unaligned_be16(addr, msg);
216 put_unaligned_be32(val, msg + 2);
217
218 mutex_lock(&mhdp->mbox_mutex);
219
220 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
221 DPTX_WRITE_REGISTER, sizeof(msg), msg);
222
223 mutex_unlock(&mhdp->mbox_mutex);
224
225 return ret;
226}
227
228static
229int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
230 u8 start_bit, u8 bits_no, u32 val)
231{
232 u8 field[8];
233 int ret;
234
235 put_unaligned_be16(addr, field);
236 field[2] = start_bit;
237 field[3] = bits_no;
238 put_unaligned_be32(val, field + 4);
239
240 mutex_lock(&mhdp->mbox_mutex);
241
242 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
243 DPTX_WRITE_FIELD, sizeof(field), field);
244
245 mutex_unlock(&mhdp->mbox_mutex);
246
247 return ret;
248}
249
250static
251int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
252 u32 addr, u8 *data, u16 len)
253{
254 u8 msg[5], reg[5];
255 int ret;
256
257 put_unaligned_be16(len, msg);
258 put_unaligned_be24(addr, msg + 2);
259
260 mutex_lock(&mhdp->mbox_mutex);
261
262 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
263 DPTX_READ_DPCD, sizeof(msg), msg);
264 if (ret)
265 goto out;
266
267 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
268 DPTX_READ_DPCD,
269 sizeof(reg) + len);
270 if (ret)
271 goto out;
272
273 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
274 if (ret)
275 goto out;
276
277 ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
278
279out:
280 mutex_unlock(&mhdp->mbox_mutex);
281
282 return ret;
283}
284
285static
286int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
287{
288 u8 msg[6], reg[5];
289 int ret;
290
291 put_unaligned_be16(1, msg);
292 put_unaligned_be24(addr, msg + 2);
293 msg[5] = value;
294
295 mutex_lock(&mhdp->mbox_mutex);
296
297 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
298 DPTX_WRITE_DPCD, sizeof(msg), msg);
299 if (ret)
300 goto out;
301
302 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
303 DPTX_WRITE_DPCD, sizeof(reg));
304 if (ret)
305 goto out;
306
307 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
308 if (ret)
309 goto out;
310
311 if (addr != get_unaligned_be24(reg + 2))
312 ret = -EINVAL;
313
314out:
315 mutex_unlock(&mhdp->mbox_mutex);
316
317 if (ret)
318 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
319 return ret;
320}
321
322static
323int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
324{
325 u8 msg[5];
326 int ret, i;
327
328 msg[0] = GENERAL_MAIN_CONTROL;
329 msg[1] = MB_MODULE_ID_GENERAL;
330 msg[2] = 0;
331 msg[3] = 1;
332 msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
333
334 mutex_lock(&mhdp->mbox_mutex);
335
336 for (i = 0; i < sizeof(msg); i++) {
337 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
338 if (ret)
339 goto out;
340 }
341
342 /* read the firmware state */
343 ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
344 if (ret)
345 goto out;
346
347 ret = 0;
348
349out:
350 mutex_unlock(&mhdp->mbox_mutex);
351
352 if (ret < 0)
353 dev_err(mhdp->dev, "set firmware active failed\n");
354 return ret;
355}
356
357static
358int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
359{
360 u8 status;
361 int ret;
362
363 mutex_lock(&mhdp->mbox_mutex);
364
365 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
366 DPTX_HPD_STATE, 0, NULL);
367 if (ret)
368 goto err_get_hpd;
369
370 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
371 DPTX_HPD_STATE,
372 sizeof(status));
373 if (ret)
374 goto err_get_hpd;
375
376 ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
377 if (ret)
378 goto err_get_hpd;
379
380 mutex_unlock(&mhdp->mbox_mutex);
381
382 dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
383 status ? "" : "un");
384
385 return status;
386
387err_get_hpd:
388 mutex_unlock(&mhdp->mbox_mutex);
389
390 return ret;
391}
392
393static
394int cdns_mhdp_get_edid_block(void *data, u8 *edid,
395 unsigned int block, size_t length)
396{
397 struct cdns_mhdp_device *mhdp = data;
398 u8 msg[2], reg[2], i;
399 int ret;
400
401 mutex_lock(&mhdp->mbox_mutex);
402
403 for (i = 0; i < 4; i++) {
404 msg[0] = block / 2;
405 msg[1] = block % 2;
406
407 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
408 DPTX_GET_EDID, sizeof(msg), msg);
409 if (ret)
410 continue;
411
412 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
413 DPTX_GET_EDID,
414 sizeof(reg) + length);
415 if (ret)
416 continue;
417
418 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
419 if (ret)
420 continue;
421
422 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
423 if (ret)
424 continue;
425
426 if (reg[0] == length && reg[1] == block / 2)
427 break;
428 }
429
430 mutex_unlock(&mhdp->mbox_mutex);
431
432 if (ret)
433 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
434 block, ret);
435
436 return ret;
437}
438
439static
440int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
441{
442 u8 event = 0;
443 int ret;
444
445 mutex_lock(&mhdp->mbox_mutex);
446
447 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
448 DPTX_READ_EVENT, 0, NULL);
449 if (ret)
450 goto out;
451
452 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
453 DPTX_READ_EVENT, sizeof(event));
454 if (ret < 0)
455 goto out;
456
457 ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
458out:
459 mutex_unlock(&mhdp->mbox_mutex);
460
461 if (ret < 0)
462 return ret;
463
464 dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
465 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
466 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
467 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
468 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
469
470 return event;
471}
472
473static
474int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
475 unsigned int udelay, const u8 *lanes_data,
476 u8 link_status[DP_LINK_STATUS_SIZE])
477{
478 u8 payload[7];
479 u8 hdr[5]; /* For DPCD read response header */
480 u32 addr;
481 int ret;
482
483 if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
484 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
485 ret = -EINVAL;
486 goto out;
487 }
488
489 payload[0] = nlanes;
490 put_unaligned_be16(udelay, payload + 1);
491 memcpy(payload + 3, lanes_data, nlanes);
492
493 mutex_lock(&mhdp->mbox_mutex);
494
495 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
496 DPTX_ADJUST_LT,
497 sizeof(payload), payload);
498 if (ret)
499 goto out;
500
501 /* Yes, read the DPCD read command response */
502 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
503 DPTX_READ_DPCD,
504 sizeof(hdr) + DP_LINK_STATUS_SIZE);
505 if (ret)
506 goto out;
507
508 ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
509 if (ret)
510 goto out;
511
512 addr = get_unaligned_be24(hdr + 2);
513 if (addr != DP_LANE0_1_STATUS)
514 goto out;
515
516 ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
517 DP_LINK_STATUS_SIZE);
518
519out:
520 mutex_unlock(&mhdp->mbox_mutex);
521
522 if (ret)
523 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
524
525 return ret;
526}
527
528/**
529 * cdns_mhdp_link_power_up() - power up a DisplayPort link
530 * @aux: DisplayPort AUX channel
531 * @link: pointer to a structure containing the link configuration
532 *
533 * Returns 0 on success or a negative error code on failure.
534 */
535static
536int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
537{
538 u8 value;
539 int err;
540
541 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
542 if (link->revision < 0x11)
543 return 0;
544
545 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
546 if (err < 0)
547 return err;
548
549 value &= ~DP_SET_POWER_MASK;
550 value |= DP_SET_POWER_D0;
551
552 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
553 if (err < 0)
554 return err;
555
556 /*
557 * According to the DP 1.1 specification, a "Sink Device must exit the
558 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
559 * Control Field" (register 0x600).
560 */
561 usleep_range(1000, 2000);
562
563 return 0;
564}
565
566/**
567 * cdns_mhdp_link_power_down() - power down a DisplayPort link
568 * @aux: DisplayPort AUX channel
569 * @link: pointer to a structure containing the link configuration
570 *
571 * Returns 0 on success or a negative error code on failure.
572 */
573static
574int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
575 struct cdns_mhdp_link *link)
576{
577 u8 value;
578 int err;
579
580 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
581 if (link->revision < 0x11)
582 return 0;
583
584 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
585 if (err < 0)
586 return err;
587
588 value &= ~DP_SET_POWER_MASK;
589 value |= DP_SET_POWER_D3;
590
591 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
592 if (err < 0)
593 return err;
594
595 return 0;
596}
597
598/**
599 * cdns_mhdp_link_configure() - configure a DisplayPort link
600 * @aux: DisplayPort AUX channel
601 * @link: pointer to a structure containing the link configuration
602 *
603 * Returns 0 on success or a negative error code on failure.
604 */
605static
606int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
607 struct cdns_mhdp_link *link)
608{
609 u8 values[2];
610 int err;
611
612 values[0] = drm_dp_link_rate_to_bw_code(link->rate);
613 values[1] = link->num_lanes;
614
615 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
616 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
617
618 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
619 if (err < 0)
620 return err;
621
622 return 0;
623}
624
625static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
626{
627 return min(mhdp->host.link_rate, mhdp->sink.link_rate);
628}
629
630static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
631{
632 return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
633}
634
635static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
636{
637 return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
638}
639
640static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
641{
642 /* Check if SSC is supported by both sides */
643 return mhdp->host.ssc && mhdp->sink.ssc;
644}
645
646static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
647{
648 dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
649
650 if (mhdp->plugged)
651 return connector_status_connected;
652 else
653 return connector_status_disconnected;
654}
655
656static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
657{
658 u32 major_num, minor_num, revision;
659 u32 fw_ver, lib_ver;
660
661 fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
662 | readl(mhdp->regs + CDNS_VER_L);
663
664 lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
665 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
666
667 if (lib_ver < 33984) {
668 /*
669 * Older FW versions with major number 1, used to store FW
670 * version information by storing repository revision number
671 * in registers. This is for identifying these FW versions.
672 */
673 major_num = 1;
674 minor_num = 2;
675 if (fw_ver == 26098) {
676 revision = 15;
677 } else if (lib_ver == 0 && fw_ver == 0) {
678 revision = 17;
679 } else {
680 dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
681 fw_ver, lib_ver);
682 return -ENODEV;
683 }
684 } else {
685 /* To identify newer FW versions with major number 2 onwards. */
686 major_num = fw_ver / 10000;
687 minor_num = (fw_ver / 100) % 100;
688 revision = (fw_ver % 10000) % 100;
689 }
690
691 dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
692 revision);
693 return 0;
694}
695
696static int cdns_mhdp_fw_activate(const struct firmware *fw,
697 struct cdns_mhdp_device *mhdp)
698{
699 unsigned int reg;
700 int ret;
701
702 /* Release uCPU reset and stall it. */
703 writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
704
705 memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
706
707 /* Leave debug mode, release stall */
708 writel(0, mhdp->regs + CDNS_APB_CTRL);
709
710 /*
711 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
712 * Updated each sched "tick" (~2ms)
713 */
714 ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
715 reg & CDNS_KEEP_ALIVE_MASK, 500,
716 CDNS_KEEP_ALIVE_TIMEOUT);
717 if (ret) {
718 dev_err(mhdp->dev,
719 "device didn't give any life sign: reg %d\n", reg);
720 return ret;
721 }
722
723 ret = cdns_mhdp_check_fw_version(mhdp);
724 if (ret)
725 return ret;
726
727 /* Init events to 0 as it's not cleared by FW at boot but on read */
728 readl(mhdp->regs + CDNS_SW_EVENT0);
729 readl(mhdp->regs + CDNS_SW_EVENT1);
730 readl(mhdp->regs + CDNS_SW_EVENT2);
731 readl(mhdp->regs + CDNS_SW_EVENT3);
732
733 /* Activate uCPU */
734 ret = cdns_mhdp_set_firmware_active(mhdp, true);
735 if (ret)
736 return ret;
737
738 spin_lock(&mhdp->start_lock);
739
740 mhdp->hw_state = MHDP_HW_READY;
741
742 /*
743 * Here we must keep the lock while enabling the interrupts
744 * since it would otherwise be possible that interrupt enable
745 * code is executed after the bridge is detached. The similar
746 * situation is not possible in attach()/detach() callbacks
747 * since the hw_state changes from MHDP_HW_READY to
748 * MHDP_HW_STOPPED happens only due to driver removal when
749 * bridge should already be detached.
750 */
751 if (mhdp->bridge_attached)
752 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
753 mhdp->regs + CDNS_APB_INT_MASK);
754
755 spin_unlock(&mhdp->start_lock);
756
757 wake_up(&mhdp->fw_load_wq);
758 dev_dbg(mhdp->dev, "DP FW activated\n");
759
760 return 0;
761}
762
763static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
764{
765 struct cdns_mhdp_device *mhdp = context;
766 bool bridge_attached;
767 int ret;
768
769 dev_dbg(mhdp->dev, "firmware callback\n");
770
771 if (!fw || !fw->data) {
772 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
773 return;
774 }
775
776 ret = cdns_mhdp_fw_activate(fw, mhdp);
777
778 release_firmware(fw);
779
780 if (ret)
781 return;
782
783 /*
784 * XXX how to make sure the bridge is still attached when
785 * calling drm_kms_helper_hotplug_event() after releasing
786 * the lock? We should not hold the spin lock when
787 * calling drm_kms_helper_hotplug_event() since it may
788 * cause a dead lock. FB-dev console calls detect from the
789 * same thread just down the call stack started here.
790 */
791 spin_lock(&mhdp->start_lock);
792 bridge_attached = mhdp->bridge_attached;
793 spin_unlock(&mhdp->start_lock);
794 if (bridge_attached) {
795 if (mhdp->connector.dev)
796 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
797 else
798 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
799 }
800}
801
802static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
803{
804 int ret;
805
806 ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
807 GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
808 if (ret) {
809 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
810 FW_NAME, ret);
811 return ret;
812 }
813
814 return 0;
815}
816
817static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
818 struct drm_dp_aux_msg *msg)
819{
820 struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
821 int ret;
822
823 if (msg->request != DP_AUX_NATIVE_WRITE &&
824 msg->request != DP_AUX_NATIVE_READ)
825 return -EOPNOTSUPP;
826
827 if (msg->request == DP_AUX_NATIVE_WRITE) {
828 const u8 *buf = msg->buffer;
829 unsigned int i;
830
831 for (i = 0; i < msg->size; ++i) {
832 ret = cdns_mhdp_dpcd_write(mhdp,
833 msg->address + i, buf[i]);
834 if (!ret)
835 continue;
836
837 dev_err(mhdp->dev,
838 "Failed to write DPCD addr %u\n",
839 msg->address + i);
840
841 return ret;
842 }
843 } else {
844 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
845 msg->buffer, msg->size);
846 if (ret) {
847 dev_err(mhdp->dev,
848 "Failed to read DPCD addr %u\n",
849 msg->address);
850
851 return ret;
852 }
853 }
854
855 return msg->size;
856}
857
858static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
859{
860 union phy_configure_opts phy_cfg;
861 u32 reg32;
862 int ret;
863
864 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
865 DP_TRAINING_PATTERN_DISABLE);
866
867 /* Reset PHY configuration */
868 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
869 if (!mhdp->host.scrambler)
870 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
871
872 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
873
874 cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
875 mhdp->sink.enhanced & mhdp->host.enhanced);
876
877 cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
878 CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
879
880 cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
881 phy_cfg.dp.link_rate = mhdp->link.rate / 100;
882 phy_cfg.dp.lanes = mhdp->link.num_lanes;
883
884 memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
885 memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
886
887 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
888 phy_cfg.dp.set_lanes = true;
889 phy_cfg.dp.set_rate = true;
890 phy_cfg.dp.set_voltages = true;
891 ret = phy_configure(mhdp->phy, &phy_cfg);
892 if (ret) {
893 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
894 __func__, ret);
895 return ret;
896 }
897
898 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
899 CDNS_PHY_COMMON_CONFIG |
900 CDNS_PHY_TRAINING_EN |
901 CDNS_PHY_TRAINING_TYPE(1) |
902 CDNS_PHY_SCRAMBLER_BYPASS);
903
904 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
905 DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
906
907 return 0;
908}
909
910static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
911 u8 link_status[DP_LINK_STATUS_SIZE],
912 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
913 union phy_configure_opts *phy_cfg)
914{
915 u8 adjust, max_pre_emph, max_volt_swing;
916 u8 set_volt, set_pre;
917 unsigned int i;
918
919 max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
920 << DP_TRAIN_PRE_EMPHASIS_SHIFT;
921 max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
922
923 for (i = 0; i < mhdp->link.num_lanes; i++) {
924 /* Check if Voltage swing and pre-emphasis are within limits */
925 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
926 set_volt = min(adjust, max_volt_swing);
927
928 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
929 set_pre = min(adjust, max_pre_emph)
930 >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
931
932 /*
933 * Voltage swing level and pre-emphasis level combination is
934 * not allowed: leaving pre-emphasis as-is, and adjusting
935 * voltage swing.
936 */
937 if (set_volt + set_pre > 3)
938 set_volt = 3 - set_pre;
939
940 phy_cfg->dp.voltage[i] = set_volt;
941 lanes_data[i] = set_volt;
942
943 if (set_volt == max_volt_swing)
944 lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
945
946 phy_cfg->dp.pre[i] = set_pre;
947 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
948
949 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
950 lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
951 }
952}
953
954static
955void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
956 unsigned int lane, u8 volt)
957{
958 unsigned int s = ((lane & 1) ?
959 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
960 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
961 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
962
963 link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
964 link_status[idx] |= volt << s;
965}
966
967static
968void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
969 unsigned int lane, u8 pre_emphasis)
970{
971 unsigned int s = ((lane & 1) ?
972 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
973 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
974 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
975
976 link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
977 link_status[idx] |= pre_emphasis << s;
978}
979
980static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
981 u8 link_status[DP_LINK_STATUS_SIZE])
982{
983 u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
984 u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
985 unsigned int i;
986 u8 volt, pre;
987
988 for (i = 0; i < mhdp->link.num_lanes; i++) {
989 volt = drm_dp_get_adjust_request_voltage(link_status, i);
990 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
991 if (volt + pre > 3)
992 cdns_mhdp_set_adjust_request_voltage(link_status, i,
993 3 - pre);
994 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
995 cdns_mhdp_set_adjust_request_voltage(link_status, i,
996 max_volt);
997 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
998 cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
999 i, max_pre);
1000 }
1001}
1002
1003static void cdns_mhdp_print_lt_status(const char *prefix,
1004 struct cdns_mhdp_device *mhdp,
1005 union phy_configure_opts *phy_cfg)
1006{
1007 char vs[8] = "0/0/0/0";
1008 char pe[8] = "0/0/0/0";
1009 unsigned int i;
1010
1011 for (i = 0; i < mhdp->link.num_lanes; i++) {
1012 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1013 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1014 }
1015
1016 vs[i * 2 - 1] = '\0';
1017 pe[i * 2 - 1] = '\0';
1018
1019 dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1020 prefix,
1021 mhdp->link.num_lanes, mhdp->link.rate / 100,
1022 vs, pe);
1023}
1024
1025static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1026 u8 eq_tps,
1027 unsigned int training_interval)
1028{
1029 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1030 u8 link_status[DP_LINK_STATUS_SIZE];
1031 union phy_configure_opts phy_cfg;
1032 u32 reg32;
1033 int ret;
1034 bool r;
1035
1036 dev_dbg(mhdp->dev, "Starting EQ phase\n");
1037
1038 /* Enable link training TPS[eq_tps] in PHY */
1039 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1040 CDNS_PHY_TRAINING_TYPE(eq_tps);
1041 if (eq_tps != 4)
1042 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1043 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1044
1045 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1046 (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1047 CDNS_DP_TRAINING_PATTERN_4);
1048
1049 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1050
1051 do {
1052 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1053 &phy_cfg);
1054 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1055 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1056 phy_cfg.dp.set_lanes = false;
1057 phy_cfg.dp.set_rate = false;
1058 phy_cfg.dp.set_voltages = true;
1059 ret = phy_configure(mhdp->phy, &phy_cfg);
1060 if (ret) {
1061 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1062 __func__, ret);
1063 goto err;
1064 }
1065
1066 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1067 training_interval, lanes_data, link_status);
1068
1069 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1070 if (!r)
1071 goto err;
1072
1073 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1074 cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1075 &phy_cfg);
1076 return true;
1077 }
1078
1079 fail_counter_short++;
1080
1081 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1082 } while (fail_counter_short < 5);
1083
1084err:
1085 cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1086
1087 return false;
1088}
1089
1090static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1091 u8 link_status[DP_LINK_STATUS_SIZE],
1092 u8 *req_volt, u8 *req_pre)
1093{
1094 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1095 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1096 unsigned int i;
1097
1098 for (i = 0; i < mhdp->link.num_lanes; i++) {
1099 u8 val;
1100
1101 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1102 max_volt : req_volt[i];
1103 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1104
1105 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1106 max_pre : req_pre[i];
1107 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1108 }
1109}
1110
1111static
1112void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1113 bool *same_before_adjust, bool *max_swing_reached,
1114 u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1115 u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1116 u8 *req_pre)
1117{
1118 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1119 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1120 bool same_pre, same_volt;
1121 unsigned int i;
1122 u8 adjust;
1123
1124 *same_before_adjust = false;
1125 *max_swing_reached = false;
1126 *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1127
1128 for (i = 0; i < mhdp->link.num_lanes; i++) {
1129 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1130 req_volt[i] = min(adjust, max_volt);
1131
1132 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1133 DP_TRAIN_PRE_EMPHASIS_SHIFT;
1134 req_pre[i] = min(adjust, max_pre);
1135
1136 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1137 req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1138 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1139 req_volt[i];
1140 if (same_pre && same_volt)
1141 *same_before_adjust = true;
1142
1143 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1144 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1145 *max_swing_reached = true;
1146 return;
1147 }
1148 }
1149}
1150
1151static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1152{
1153 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1154 fail_counter_short = 0, fail_counter_cr_long = 0;
1155 u8 link_status[DP_LINK_STATUS_SIZE];
1156 bool cr_done;
1157 union phy_configure_opts phy_cfg;
1158 int ret;
1159
1160 dev_dbg(mhdp->dev, "Starting CR phase\n");
1161
1162 ret = cdns_mhdp_link_training_init(mhdp);
1163 if (ret)
1164 goto err;
1165
1166 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1167
1168 do {
1169 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1170 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1171 bool same_before_adjust, max_swing_reached;
1172
1173 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1174 &phy_cfg);
1175 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1176 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1177 phy_cfg.dp.set_lanes = false;
1178 phy_cfg.dp.set_rate = false;
1179 phy_cfg.dp.set_voltages = true;
1180 ret = phy_configure(mhdp->phy, &phy_cfg);
1181 if (ret) {
1182 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1183 __func__, ret);
1184 goto err;
1185 }
1186
1187 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1188 lanes_data, link_status);
1189
1190 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1191 &max_swing_reached, lanes_data,
1192 link_status,
1193 requested_adjust_volt_swing,
1194 requested_adjust_pre_emphasis);
1195
1196 if (max_swing_reached) {
1197 dev_err(mhdp->dev, "CR: max swing reached\n");
1198 goto err;
1199 }
1200
1201 if (cr_done) {
1202 cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1203 &phy_cfg);
1204 return true;
1205 }
1206
1207 /* Not all CR_DONE bits set */
1208 fail_counter_cr_long++;
1209
1210 if (same_before_adjust) {
1211 fail_counter_short++;
1212 continue;
1213 }
1214
1215 fail_counter_short = 0;
1216 /*
1217 * Voltage swing/pre-emphasis adjust requested
1218 * during CR phase
1219 */
1220 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1221 requested_adjust_volt_swing,
1222 requested_adjust_pre_emphasis);
1223 } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1224
1225err:
1226 cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1227
1228 return false;
1229}
1230
1231static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1232{
1233 switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1234 case DP_LINK_BW_2_7:
1235 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1236 break;
1237 case DP_LINK_BW_5_4:
1238 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1239 break;
1240 case DP_LINK_BW_8_1:
1241 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1242 break;
1243 }
1244}
1245
1246static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1247 unsigned int training_interval)
1248{
1249 u32 reg32;
1250 const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1251 int ret;
1252
1253 while (1) {
1254 if (!cdns_mhdp_link_training_cr(mhdp)) {
1255 if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1256 DP_LINK_BW_1_62) {
1257 dev_dbg(mhdp->dev,
1258 "Reducing link rate during CR phase\n");
1259 cdns_mhdp_lower_link_rate(&mhdp->link);
1260
1261 continue;
1262 } else if (mhdp->link.num_lanes > 1) {
1263 dev_dbg(mhdp->dev,
1264 "Reducing lanes number during CR phase\n");
1265 mhdp->link.num_lanes >>= 1;
1266 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1267
1268 continue;
1269 }
1270
1271 dev_err(mhdp->dev,
1272 "Link training failed during CR phase\n");
1273 goto err;
1274 }
1275
1276 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1277 training_interval))
1278 break;
1279
1280 if (mhdp->link.num_lanes > 1) {
1281 dev_dbg(mhdp->dev,
1282 "Reducing lanes number during EQ phase\n");
1283 mhdp->link.num_lanes >>= 1;
1284
1285 continue;
1286 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1287 DP_LINK_BW_1_62) {
1288 dev_dbg(mhdp->dev,
1289 "Reducing link rate during EQ phase\n");
1290 cdns_mhdp_lower_link_rate(&mhdp->link);
1291 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1292
1293 continue;
1294 }
1295
1296 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1297 goto err;
1298 }
1299
1300 dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1301 mhdp->link.num_lanes, mhdp->link.rate / 100);
1302
1303 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1304 mhdp->host.scrambler ? 0 :
1305 DP_LINK_SCRAMBLING_DISABLE);
1306
1307 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32);
1308 if (ret < 0) {
1309 dev_err(mhdp->dev,
1310 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1311 ret);
1312 return ret;
1313 }
1314 reg32 &= ~GENMASK(1, 0);
1315 reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1316 reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1317 reg32 |= CDNS_DP_FRAMER_EN;
1318 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1319
1320 /* Reset PHY config */
1321 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1322 if (!mhdp->host.scrambler)
1323 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1324 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1325
1326 return 0;
1327err:
1328 /* Reset PHY config */
1329 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1330 if (!mhdp->host.scrambler)
1331 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1332 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1333
1334 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1335 DP_TRAINING_PATTERN_DISABLE);
1336
1337 return -EIO;
1338}
1339
1340static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1341 u32 interval)
1342{
1343 if (interval == 0)
1344 return 400;
1345 if (interval < 5)
1346 return 4000 << (interval - 1);
1347 dev_err(mhdp->dev,
1348 "wrong training interval returned by DPCD: %d\n", interval);
1349 return 0;
1350}
1351
1352static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1353{
1354 unsigned int link_rate;
1355
1356 /* Get source capabilities based on PHY attributes */
1357
1358 mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1359 if (!mhdp->host.lanes_cnt)
1360 mhdp->host.lanes_cnt = 4;
1361
1362 link_rate = mhdp->phy->attrs.max_link_rate;
1363 if (!link_rate)
1364 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1365 else
1366 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1367 link_rate *= 100;
1368
1369 mhdp->host.link_rate = link_rate;
1370 mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1371 mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1372 mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1373 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1374 CDNS_SUPPORT_TPS(4);
1375 mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1376 mhdp->host.fast_link = false;
1377 mhdp->host.enhanced = true;
1378 mhdp->host.scrambler = true;
1379 mhdp->host.ssc = false;
1380}
1381
1382static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1383 u8 dpcd[DP_RECEIVER_CAP_SIZE])
1384{
1385 mhdp->sink.link_rate = mhdp->link.rate;
1386 mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1387 mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1388 DP_LINK_CAP_ENHANCED_FRAMING);
1389
1390 /* Set SSC support */
1391 mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1392 DP_MAX_DOWNSPREAD_0_5);
1393
1394 /* Set TPS support */
1395 mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1396 if (drm_dp_tps3_supported(dpcd))
1397 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1398 if (drm_dp_tps4_supported(dpcd))
1399 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1400
1401 /* Set fast link support */
1402 mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1403 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1404}
1405
1406static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1407{
1408 u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1409 u32 resp, interval, interval_us;
1410 u8 ext_cap_chk = 0;
1411 unsigned int addr;
1412 int err;
1413
1414 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1415
1416 drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1417 &ext_cap_chk);
1418
1419 if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1420 addr = DP_DP13_DPCD_REV;
1421 else
1422 addr = DP_DPCD_REV;
1423
1424 err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1425 if (err < 0) {
1426 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1427 return err;
1428 }
1429
1430 mhdp->link.revision = dpcd[0];
1431 mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1432 mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1433
1434 if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1435 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1436
1437 dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1438 cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1439
1440 cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1441
1442 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1443 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1444
1445 /* Disable framer for link training */
1446 err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1447 if (err < 0) {
1448 dev_err(mhdp->dev,
1449 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1450 err);
1451 return err;
1452 }
1453
1454 resp &= ~CDNS_DP_FRAMER_EN;
1455 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1456
1457 /* Spread AMP if required, enable 8b/10b coding */
1458 amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1459 amp[1] = DP_SET_ANSI_8B10B;
1460 drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1461
1462 if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1463 dev_err(mhdp->dev, "fastlink not supported\n");
1464 return -EOPNOTSUPP;
1465 }
1466
1467 interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1468 interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1469 if (!interval_us ||
1470 cdns_mhdp_link_training(mhdp, interval_us)) {
1471 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1472 return -EIO;
1473 }
1474
1475 mhdp->link_up = true;
1476
1477 return 0;
1478}
1479
1480static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1481{
1482 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1483
1484 if (mhdp->plugged)
1485 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1486
1487 mhdp->link_up = false;
1488}
1489
1490static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1491 struct drm_connector *connector)
1492{
1493 if (!mhdp->plugged)
1494 return NULL;
1495
1496 return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1497}
1498
1499static int cdns_mhdp_get_modes(struct drm_connector *connector)
1500{
1501 struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1502 struct edid *edid;
1503 int num_modes;
1504
1505 if (!mhdp->plugged)
1506 return 0;
1507
1508 edid = cdns_mhdp_get_edid(mhdp, connector);
1509 if (!edid) {
1510 dev_err(mhdp->dev, "Failed to read EDID\n");
1511 return 0;
1512 }
1513
1514 drm_connector_update_edid_property(connector, edid);
1515 num_modes = drm_add_edid_modes(connector, edid);
1516 kfree(edid);
1517
1518 /*
1519 * HACK: Warn about unsupported display formats until we deal
1520 * with them correctly.
1521 */
1522 if (connector->display_info.color_formats &&
1523 !(connector->display_info.color_formats &
1524 mhdp->display_fmt.color_format))
1525 dev_warn(mhdp->dev,
1526 "%s: No supported color_format found (0x%08x)\n",
1527 __func__, connector->display_info.color_formats);
1528
1529 if (connector->display_info.bpc &&
1530 connector->display_info.bpc < mhdp->display_fmt.bpc)
1531 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1532 __func__, connector->display_info.bpc,
1533 mhdp->display_fmt.bpc);
1534
1535 return num_modes;
1536}
1537
1538static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1539 struct drm_modeset_acquire_ctx *ctx,
1540 bool force)
1541{
1542 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1543
1544 return cdns_mhdp_detect(mhdp);
1545}
1546
1547static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1548{
1549 u32 bpp;
1550
1551 if (fmt->y_only)
1552 return fmt->bpc;
1553
1554 switch (fmt->color_format) {
1555 case DRM_COLOR_FORMAT_RGB444:
1556 case DRM_COLOR_FORMAT_YCRCB444:
1557 bpp = fmt->bpc * 3;
1558 break;
1559 case DRM_COLOR_FORMAT_YCRCB422:
1560 bpp = fmt->bpc * 2;
1561 break;
1562 case DRM_COLOR_FORMAT_YCRCB420:
1563 bpp = fmt->bpc * 3 / 2;
1564 break;
1565 default:
1566 bpp = fmt->bpc * 3;
1567 WARN_ON(1);
1568 }
1569 return bpp;
1570}
1571
1572static
1573bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1574 const struct drm_display_mode *mode,
1575 unsigned int lanes, unsigned int rate)
1576{
1577 u32 max_bw, req_bw, bpp;
1578
1579 /*
1580 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1581 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1582 * value thus equals the bandwidth in 10kb/s units, which matches the
1583 * units of the rate parameter.
1584 */
1585
1586 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1587 req_bw = mode->clock * bpp / 8;
1588 max_bw = lanes * rate;
1589 if (req_bw > max_bw) {
1590 dev_dbg(mhdp->dev,
1591 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1592 mode->name, req_bw, max_bw);
1593
1594 return false;
1595 }
1596
1597 return true;
1598}
1599
1600static
1601enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1602 struct drm_display_mode *mode)
1603{
1604 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1605
1606 mutex_lock(&mhdp->link_mutex);
1607
1608 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1609 mhdp->link.rate)) {
1610 mutex_unlock(&mhdp->link_mutex);
1611 return MODE_CLOCK_HIGH;
1612 }
1613
1614 mutex_unlock(&mhdp->link_mutex);
1615 return MODE_OK;
1616}
1617
1618static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1619 struct drm_atomic_state *state)
1620{
1621 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1622 struct drm_connector_state *old_state, *new_state;
1623 struct drm_crtc_state *crtc_state;
1624 u64 old_cp, new_cp;
1625
1626 if (!mhdp->hdcp_supported)
1627 return 0;
1628
1629 old_state = drm_atomic_get_old_connector_state(state, conn);
1630 new_state = drm_atomic_get_new_connector_state(state, conn);
1631 old_cp = old_state->content_protection;
1632 new_cp = new_state->content_protection;
1633
1634 if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1635 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1636 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1637 goto mode_changed;
1638 }
1639
1640 if (!new_state->crtc) {
1641 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1642 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1643 return 0;
1644 }
1645
1646 if (old_cp == new_cp ||
1647 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1648 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1649 return 0;
1650
1651mode_changed:
1652 crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1653 crtc_state->mode_changed = true;
1654
1655 return 0;
1656}
1657
1658static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1659 .detect_ctx = cdns_mhdp_connector_detect,
1660 .get_modes = cdns_mhdp_get_modes,
1661 .mode_valid = cdns_mhdp_mode_valid,
1662 .atomic_check = cdns_mhdp_connector_atomic_check,
1663};
1664
1665static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1666 .fill_modes = drm_helper_probe_single_connector_modes,
1667 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1668 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1669 .reset = drm_atomic_helper_connector_reset,
1670 .destroy = drm_connector_cleanup,
1671};
1672
1673static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1674{
1675 u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1676 struct drm_connector *conn = &mhdp->connector;
1677 struct drm_bridge *bridge = &mhdp->bridge;
1678 int ret;
1679
1680 if (!bridge->encoder) {
1681 dev_err(mhdp->dev, "Parent encoder object not found");
1682 return -ENODEV;
1683 }
1684
1685 conn->polled = DRM_CONNECTOR_POLL_HPD;
1686
1687 ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1688 DRM_MODE_CONNECTOR_DisplayPort);
1689 if (ret) {
1690 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1691 return ret;
1692 }
1693
1694 drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1695
1696 ret = drm_display_info_set_bus_formats(&conn->display_info,
1697 &bus_format, 1);
1698 if (ret)
1699 return ret;
1700
1701 ret = drm_connector_attach_encoder(conn, bridge->encoder);
1702 if (ret) {
1703 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1704 return ret;
1705 }
1706
1707 if (mhdp->hdcp_supported)
1708 ret = drm_connector_attach_content_protection_property(conn, true);
1709
1710 return ret;
1711}
1712
1713static int cdns_mhdp_attach(struct drm_bridge *bridge,
1714 enum drm_bridge_attach_flags flags)
1715{
1716 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1717 bool hw_ready;
1718 int ret;
1719
1720 dev_dbg(mhdp->dev, "%s\n", __func__);
1721
1722 mhdp->aux.drm_dev = bridge->dev;
1723 ret = drm_dp_aux_register(&mhdp->aux);
1724 if (ret < 0)
1725 return ret;
1726
1727 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1728 ret = cdns_mhdp_connector_init(mhdp);
1729 if (ret)
1730 goto aux_unregister;
1731 }
1732
1733 spin_lock(&mhdp->start_lock);
1734
1735 mhdp->bridge_attached = true;
1736 hw_ready = mhdp->hw_state == MHDP_HW_READY;
1737
1738 spin_unlock(&mhdp->start_lock);
1739
1740 /* Enable SW event interrupts */
1741 if (hw_ready)
1742 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1743 mhdp->regs + CDNS_APB_INT_MASK);
1744
1745 return 0;
1746aux_unregister:
1747 drm_dp_aux_unregister(&mhdp->aux);
1748 return ret;
1749}
1750
1751static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1752 const struct drm_display_mode *mode)
1753{
1754 unsigned int dp_framer_sp = 0, msa_horizontal_1,
1755 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1756 misc0 = 0, misc1 = 0, pxl_repr,
1757 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1758 dp_vertical_1;
1759 u8 stream_id = mhdp->stream_id;
1760 u32 bpp, bpc, pxlfmt, framer;
1761 int ret;
1762
1763 pxlfmt = mhdp->display_fmt.color_format;
1764 bpc = mhdp->display_fmt.bpc;
1765
1766 /*
1767 * If YCBCR supported and stream not SD, use ITU709
1768 * Need to handle ITU version with YCBCR420 when supported
1769 */
1770 if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
1771 pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
1772 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1773
1774 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1775
1776 switch (pxlfmt) {
1777 case DRM_COLOR_FORMAT_RGB444:
1778 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1779 misc0 |= DP_COLOR_FORMAT_RGB;
1780 break;
1781 case DRM_COLOR_FORMAT_YCRCB444:
1782 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1783 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1784 break;
1785 case DRM_COLOR_FORMAT_YCRCB422:
1786 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1787 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1788 break;
1789 case DRM_COLOR_FORMAT_YCRCB420:
1790 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1791 break;
1792 default:
1793 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1794 }
1795
1796 switch (bpc) {
1797 case 6:
1798 misc0 |= DP_TEST_BIT_DEPTH_6;
1799 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1800 break;
1801 case 8:
1802 misc0 |= DP_TEST_BIT_DEPTH_8;
1803 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1804 break;
1805 case 10:
1806 misc0 |= DP_TEST_BIT_DEPTH_10;
1807 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1808 break;
1809 case 12:
1810 misc0 |= DP_TEST_BIT_DEPTH_12;
1811 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1812 break;
1813 case 16:
1814 misc0 |= DP_TEST_BIT_DEPTH_16;
1815 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1816 break;
1817 }
1818
1819 bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1820 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1821 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1822
1823 cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1824 bnd_hsync2vsync);
1825
1826 hsync2vsync_pol_ctrl = 0;
1827 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1828 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1829 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1830 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1831 cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1832 hsync2vsync_pol_ctrl);
1833
1834 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1835
1836 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1837 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1838 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1839 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1840 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1841 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1842 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1843
1844 front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1845 back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1846 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1847 CDNS_DP_FRONT_PORCH(front_porch) |
1848 CDNS_DP_BACK_PORCH(back_porch));
1849
1850 cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1851 mode->crtc_hdisplay * bpp / 8);
1852
1853 msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1854 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1855 CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1856 CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1857
1858 hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1859 msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1860 CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1861 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1862 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1863 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1864 msa_horizontal_1);
1865
1866 msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1867 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1868 CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1869 CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1870
1871 vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1872 msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1873 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1874 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1875 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1876 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1877 msa_vertical_1);
1878
1879 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1880 mode->crtc_vtotal % 2 == 0)
1881 misc1 = DP_TEST_INTERLACED;
1882 if (mhdp->display_fmt.y_only)
1883 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1884 /* Use VSC SDP for Y420 */
1885 if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
1886 misc1 = CDNS_DP_TEST_VSC_SDP;
1887
1888 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1889 misc0 | (misc1 << 8));
1890
1891 cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1892 CDNS_DP_H_HSYNC_WIDTH(hsync) |
1893 CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1894
1895 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1896 CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1897 CDNS_DP_V0_VSTART(msa_v0));
1898
1899 dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1900 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1901 mode->crtc_vtotal % 2 == 0)
1902 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1903
1904 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1905
1906 cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1907 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1908 CDNS_DP_VB_ID_INTERLACED : 0);
1909
1910 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1911 if (ret < 0) {
1912 dev_err(mhdp->dev,
1913 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1914 ret);
1915 return;
1916 }
1917 framer |= CDNS_DP_FRAMER_EN;
1918 framer &= ~CDNS_DP_NO_VIDEO_MODE;
1919 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1920}
1921
1922static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1923 const struct drm_display_mode *mode)
1924{
1925 u32 rate, vs, required_bandwidth, available_bandwidth;
1926 s32 line_thresh1, line_thresh2, line_thresh = 0;
1927 int pxlclock = mode->crtc_clock;
1928 u32 tu_size = 64;
1929 u32 bpp;
1930
1931 /* Get rate in MSymbols per second per lane */
1932 rate = mhdp->link.rate / 1000;
1933
1934 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1935
1936 required_bandwidth = pxlclock * bpp / 8;
1937 available_bandwidth = mhdp->link.num_lanes * rate;
1938
1939 vs = tu_size * required_bandwidth / available_bandwidth;
1940 vs /= 1000;
1941
1942 if (vs == tu_size)
1943 vs = tu_size - 1;
1944
1945 line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1946 line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1947 line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1948 line_thresh = (line_thresh >> 5) + 2;
1949
1950 mhdp->stream_id = 0;
1951
1952 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1953 CDNS_DP_FRAMER_TU_VS(vs) |
1954 CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1955 CDNS_DP_FRAMER_TU_CNT_RST_EN);
1956
1957 cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1958 line_thresh & GENMASK(5, 0));
1959
1960 cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1961 CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1962 0 : tu_size - vs));
1963
1964 cdns_mhdp_configure_video(mhdp, mode);
1965}
1966
1967static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1968 struct drm_bridge_state *bridge_state)
1969{
1970 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1971 struct drm_atomic_state *state = bridge_state->base.state;
1972 struct cdns_mhdp_bridge_state *mhdp_state;
1973 struct drm_crtc_state *crtc_state;
1974 struct drm_connector *connector;
1975 struct drm_connector_state *conn_state;
1976 struct drm_bridge_state *new_state;
1977 const struct drm_display_mode *mode;
1978 u32 resp;
1979 int ret;
1980
1981 dev_dbg(mhdp->dev, "bridge enable\n");
1982
1983 mutex_lock(&mhdp->link_mutex);
1984
1985 if (mhdp->plugged && !mhdp->link_up) {
1986 ret = cdns_mhdp_link_up(mhdp);
1987 if (ret < 0)
1988 goto out;
1989 }
1990
1991 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1992 mhdp->info->ops->enable(mhdp);
1993
1994 /* Enable VIF clock for stream 0 */
1995 ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1996 if (ret < 0) {
1997 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
1998 goto out;
1999 }
2000
2001 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2002 resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2003
2004 connector = drm_atomic_get_new_connector_for_encoder(state,
2005 bridge->encoder);
2006 if (WARN_ON(!connector))
2007 goto out;
2008
2009 conn_state = drm_atomic_get_new_connector_state(state, connector);
2010 if (WARN_ON(!conn_state))
2011 goto out;
2012
2013 if (mhdp->hdcp_supported &&
2014 mhdp->hw_state == MHDP_HW_READY &&
2015 conn_state->content_protection ==
2016 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2017 mutex_unlock(&mhdp->link_mutex);
2018 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2019 mutex_lock(&mhdp->link_mutex);
2020 }
2021
2022 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2023 if (WARN_ON(!crtc_state))
2024 goto out;
2025
2026 mode = &crtc_state->adjusted_mode;
2027
2028 new_state = drm_atomic_get_new_bridge_state(state, bridge);
2029 if (WARN_ON(!new_state))
2030 goto out;
2031
2032 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2033 mhdp->link.rate)) {
2034 ret = -EINVAL;
2035 goto out;
2036 }
2037
2038 cdns_mhdp_sst_enable(mhdp, mode);
2039
2040 mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2041
2042 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2043 drm_mode_set_name(mhdp_state->current_mode);
2044
2045 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2046
2047 mhdp->bridge_enabled = true;
2048
2049out:
2050 mutex_unlock(&mhdp->link_mutex);
2051 if (ret < 0)
2052 schedule_work(&mhdp->modeset_retry_work);
2053}
2054
2055static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2056 struct drm_bridge_state *bridge_state)
2057{
2058 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2059 u32 resp;
2060
2061 dev_dbg(mhdp->dev, "%s\n", __func__);
2062
2063 mutex_lock(&mhdp->link_mutex);
2064
2065 if (mhdp->hdcp_supported)
2066 cdns_mhdp_hdcp_disable(mhdp);
2067
2068 mhdp->bridge_enabled = false;
2069 cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2070 resp &= ~CDNS_DP_FRAMER_EN;
2071 resp |= CDNS_DP_NO_VIDEO_MODE;
2072 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2073
2074 cdns_mhdp_link_down(mhdp);
2075
2076 /* Disable VIF clock for stream 0 */
2077 cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2078 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2079 resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2080
2081 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2082 mhdp->info->ops->disable(mhdp);
2083
2084 mutex_unlock(&mhdp->link_mutex);
2085}
2086
2087static void cdns_mhdp_detach(struct drm_bridge *bridge)
2088{
2089 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2090
2091 dev_dbg(mhdp->dev, "%s\n", __func__);
2092
2093 drm_dp_aux_unregister(&mhdp->aux);
2094
2095 spin_lock(&mhdp->start_lock);
2096
2097 mhdp->bridge_attached = false;
2098
2099 spin_unlock(&mhdp->start_lock);
2100
2101 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2102}
2103
2104static struct drm_bridge_state *
2105cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2106{
2107 struct cdns_mhdp_bridge_state *state;
2108
2109 state = kzalloc(sizeof(*state), GFP_KERNEL);
2110 if (!state)
2111 return NULL;
2112
2113 __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2114
2115 return &state->base;
2116}
2117
2118static void
2119cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2120 struct drm_bridge_state *state)
2121{
2122 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2123
2124 cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2125
2126 if (cdns_mhdp_state->current_mode) {
2127 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2128 cdns_mhdp_state->current_mode = NULL;
2129 }
2130
2131 kfree(cdns_mhdp_state);
2132}
2133
2134static struct drm_bridge_state *
2135cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2136{
2137 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2138
2139 cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2140 if (!cdns_mhdp_state)
2141 return NULL;
2142
2143 __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2144
2145 return &cdns_mhdp_state->base;
2146}
2147
2148static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2149 struct drm_bridge_state *bridge_state,
2150 struct drm_crtc_state *crtc_state,
2151 struct drm_connector_state *conn_state)
2152{
2153 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2154 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2155
2156 mutex_lock(&mhdp->link_mutex);
2157
2158 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2159 mhdp->link.rate)) {
2160 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2161 __func__, mode->name, mhdp->link.num_lanes,
2162 mhdp->link.rate / 100);
2163 mutex_unlock(&mhdp->link_mutex);
2164 return -EINVAL;
2165 }
2166
2167 mutex_unlock(&mhdp->link_mutex);
2168 return 0;
2169}
2170
2171static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2172{
2173 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2174
2175 return cdns_mhdp_detect(mhdp);
2176}
2177
2178static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2179 struct drm_connector *connector)
2180{
2181 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2182
2183 return cdns_mhdp_get_edid(mhdp, connector);
2184}
2185
2186static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2187{
2188 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2189
2190 /* Enable SW event interrupts */
2191 if (mhdp->bridge_attached)
2192 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2193 mhdp->regs + CDNS_APB_INT_MASK);
2194}
2195
2196static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2197{
2198 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2199
2200 writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2201}
2202
2203static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2204 .atomic_enable = cdns_mhdp_atomic_enable,
2205 .atomic_disable = cdns_mhdp_atomic_disable,
2206 .atomic_check = cdns_mhdp_atomic_check,
2207 .attach = cdns_mhdp_attach,
2208 .detach = cdns_mhdp_detach,
2209 .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2210 .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2211 .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2212 .detect = cdns_mhdp_bridge_detect,
2213 .get_edid = cdns_mhdp_bridge_get_edid,
2214 .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2215 .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2216};
2217
2218static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2219{
2220 int hpd_event, hpd_status;
2221
2222 *hpd_pulse = false;
2223
2224 hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2225
2226 /* Getting event bits failed, bail out */
2227 if (hpd_event < 0) {
2228 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2229 __func__, hpd_event);
2230 return false;
2231 }
2232
2233 hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2234 if (hpd_status < 0) {
2235 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2236 __func__, hpd_status);
2237 return false;
2238 }
2239
2240 if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2241 *hpd_pulse = true;
2242
2243 return !!hpd_status;
2244}
2245
2246static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2247{
2248 struct cdns_mhdp_bridge_state *cdns_bridge_state;
2249 struct drm_display_mode *current_mode;
2250 bool old_plugged = mhdp->plugged;
2251 struct drm_bridge_state *state;
2252 u8 status[DP_LINK_STATUS_SIZE];
2253 bool hpd_pulse;
2254 int ret = 0;
2255
2256 mutex_lock(&mhdp->link_mutex);
2257
2258 mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2259
2260 if (!mhdp->plugged) {
2261 cdns_mhdp_link_down(mhdp);
2262 mhdp->link.rate = mhdp->host.link_rate;
2263 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2264 goto out;
2265 }
2266
2267 /*
2268 * If we get a HPD pulse event and we were and still are connected,
2269 * check the link status. If link status is ok, there's nothing to do
2270 * as we don't handle DP interrupts. If link status is bad, continue
2271 * with full link setup.
2272 */
2273 if (hpd_pulse && old_plugged == mhdp->plugged) {
2274 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2275
2276 /*
2277 * If everything looks fine, just return, as we don't handle
2278 * DP IRQs.
2279 */
2280 if (ret > 0 &&
2281 drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2282 drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2283 goto out;
2284
2285 /* If link is bad, mark link as down so that we do a new LT */
2286 mhdp->link_up = false;
2287 }
2288
2289 if (!mhdp->link_up) {
2290 ret = cdns_mhdp_link_up(mhdp);
2291 if (ret < 0)
2292 goto out;
2293 }
2294
2295 if (mhdp->bridge_enabled) {
2296 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2297 if (!state) {
2298 ret = -EINVAL;
2299 goto out;
2300 }
2301
2302 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2303 if (!cdns_bridge_state) {
2304 ret = -EINVAL;
2305 goto out;
2306 }
2307
2308 current_mode = cdns_bridge_state->current_mode;
2309 if (!current_mode) {
2310 ret = -EINVAL;
2311 goto out;
2312 }
2313
2314 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2315 mhdp->link.rate)) {
2316 ret = -EINVAL;
2317 goto out;
2318 }
2319
2320 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2321 current_mode->name);
2322
2323 cdns_mhdp_sst_enable(mhdp, current_mode);
2324 }
2325out:
2326 mutex_unlock(&mhdp->link_mutex);
2327 return ret;
2328}
2329
2330static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2331{
2332 struct cdns_mhdp_device *mhdp;
2333 struct drm_connector *conn;
2334
2335 mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2336
2337 conn = &mhdp->connector;
2338
2339 /* Grab the locks before changing connector property */
2340 mutex_lock(&conn->dev->mode_config.mutex);
2341
2342 /*
2343 * Set connector link status to BAD and send a Uevent to notify
2344 * userspace to do a modeset.
2345 */
2346 drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2347 mutex_unlock(&conn->dev->mode_config.mutex);
2348
2349 /* Send Hotplug uevent so userspace can reprobe */
2350 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2351}
2352
2353static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2354{
2355 struct cdns_mhdp_device *mhdp = data;
2356 u32 apb_stat, sw_ev0;
2357 bool bridge_attached;
2358
2359 apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2360 if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2361 return IRQ_NONE;
2362
2363 sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2364
2365 /*
2366 * Calling drm_kms_helper_hotplug_event() when not attached
2367 * to drm device causes an oops because the drm_bridge->dev
2368 * is NULL. See cdns_mhdp_fw_cb() comments for details about the
2369 * problems related drm_kms_helper_hotplug_event() call.
2370 */
2371 spin_lock(&mhdp->start_lock);
2372 bridge_attached = mhdp->bridge_attached;
2373 spin_unlock(&mhdp->start_lock);
2374
2375 if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2376 schedule_work(&mhdp->hpd_work);
2377 }
2378
2379 if (sw_ev0 & ~CDNS_DPTX_HPD) {
2380 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2381 wake_up(&mhdp->sw_events_wq);
2382 }
2383
2384 return IRQ_HANDLED;
2385}
2386
2387u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2388{
2389 u32 ret;
2390
2391 ret = wait_event_timeout(mhdp->sw_events_wq,
2392 mhdp->sw_events & event,
2393 msecs_to_jiffies(500));
2394 if (!ret) {
2395 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2396 goto sw_event_out;
2397 }
2398
2399 ret = mhdp->sw_events;
2400 mhdp->sw_events &= ~event;
2401
2402sw_event_out:
2403 return ret;
2404}
2405
2406static void cdns_mhdp_hpd_work(struct work_struct *work)
2407{
2408 struct cdns_mhdp_device *mhdp = container_of(work,
2409 struct cdns_mhdp_device,
2410 hpd_work);
2411 int ret;
2412
2413 ret = cdns_mhdp_update_link_status(mhdp);
2414 if (mhdp->connector.dev) {
2415 if (ret < 0)
2416 schedule_work(&mhdp->modeset_retry_work);
2417 else
2418 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2419 } else {
2420 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2421 }
2422}
2423
2424static int cdns_mhdp_probe(struct platform_device *pdev)
2425{
2426 struct device *dev = &pdev->dev;
2427 struct cdns_mhdp_device *mhdp;
2428 unsigned long rate;
2429 struct clk *clk;
2430 int ret;
2431 int irq;
2432
2433 mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2434 if (!mhdp)
2435 return -ENOMEM;
2436
2437 clk = devm_clk_get(dev, NULL);
2438 if (IS_ERR(clk)) {
2439 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2440 return PTR_ERR(clk);
2441 }
2442
2443 mhdp->clk = clk;
2444 mhdp->dev = dev;
2445 mutex_init(&mhdp->mbox_mutex);
2446 mutex_init(&mhdp->link_mutex);
2447 spin_lock_init(&mhdp->start_lock);
2448
2449 drm_dp_aux_init(&mhdp->aux);
2450 mhdp->aux.dev = dev;
2451 mhdp->aux.transfer = cdns_mhdp_transfer;
2452
2453 mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2454 if (IS_ERR(mhdp->regs)) {
2455 dev_err(dev, "Failed to get memory resource\n");
2456 return PTR_ERR(mhdp->regs);
2457 }
2458
2459 mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2460 if (IS_ERR(mhdp->sapb_regs)) {
2461 mhdp->hdcp_supported = false;
2462 dev_warn(dev,
2463 "Failed to get SAPB memory resource, HDCP not supported\n");
2464 } else {
2465 mhdp->hdcp_supported = true;
2466 }
2467
2468 mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2469 if (IS_ERR(mhdp->phy)) {
2470 dev_err(dev, "no PHY configured\n");
2471 return PTR_ERR(mhdp->phy);
2472 }
2473
2474 platform_set_drvdata(pdev, mhdp);
2475
2476 mhdp->info = of_device_get_match_data(dev);
2477
2478 clk_prepare_enable(clk);
2479
2480 pm_runtime_enable(dev);
2481 ret = pm_runtime_resume_and_get(dev);
2482 if (ret < 0) {
2483 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2484 pm_runtime_disable(dev);
2485 goto clk_disable;
2486 }
2487
2488 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2489 ret = mhdp->info->ops->init(mhdp);
2490 if (ret != 0) {
2491 dev_err(dev, "MHDP platform initialization failed: %d\n",
2492 ret);
2493 goto runtime_put;
2494 }
2495 }
2496
2497 rate = clk_get_rate(clk);
2498 writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2499 writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2500
2501 dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2502
2503 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2504
2505 irq = platform_get_irq(pdev, 0);
2506 ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2507 cdns_mhdp_irq_handler, IRQF_ONESHOT,
2508 "mhdp8546", mhdp);
2509 if (ret) {
2510 dev_err(dev, "cannot install IRQ %d\n", irq);
2511 ret = -EIO;
2512 goto plat_fini;
2513 }
2514
2515 cdns_mhdp_fill_host_caps(mhdp);
2516
2517 /* Initialize link rate and num of lanes to host values */
2518 mhdp->link.rate = mhdp->host.link_rate;
2519 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2520
2521 /* The only currently supported format */
2522 mhdp->display_fmt.y_only = false;
2523 mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2524 mhdp->display_fmt.bpc = 8;
2525
2526 mhdp->bridge.of_node = pdev->dev.of_node;
2527 mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2528 mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2529 DRM_BRIDGE_OP_HPD;
2530 mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2531 if (mhdp->info)
2532 mhdp->bridge.timings = mhdp->info->timings;
2533
2534 ret = phy_init(mhdp->phy);
2535 if (ret) {
2536 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2537 goto plat_fini;
2538 }
2539
2540 /* Initialize the work for modeset in case of link train failure */
2541 INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2542 INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2543
2544 init_waitqueue_head(&mhdp->fw_load_wq);
2545 init_waitqueue_head(&mhdp->sw_events_wq);
2546
2547 ret = cdns_mhdp_load_firmware(mhdp);
2548 if (ret)
2549 goto phy_exit;
2550
2551 if (mhdp->hdcp_supported)
2552 cdns_mhdp_hdcp_init(mhdp);
2553
2554 drm_bridge_add(&mhdp->bridge);
2555
2556 return 0;
2557
2558phy_exit:
2559 phy_exit(mhdp->phy);
2560plat_fini:
2561 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2562 mhdp->info->ops->exit(mhdp);
2563runtime_put:
2564 pm_runtime_put_sync(dev);
2565 pm_runtime_disable(dev);
2566clk_disable:
2567 clk_disable_unprepare(mhdp->clk);
2568
2569 return ret;
2570}
2571
2572static int cdns_mhdp_remove(struct platform_device *pdev)
2573{
2574 struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2575 unsigned long timeout = msecs_to_jiffies(100);
2576 bool stop_fw = false;
2577 int ret;
2578
2579 drm_bridge_remove(&mhdp->bridge);
2580
2581 ret = wait_event_timeout(mhdp->fw_load_wq,
2582 mhdp->hw_state == MHDP_HW_READY,
2583 timeout);
2584 if (ret == 0)
2585 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2586 __func__);
2587 else
2588 stop_fw = true;
2589
2590 spin_lock(&mhdp->start_lock);
2591 mhdp->hw_state = MHDP_HW_STOPPED;
2592 spin_unlock(&mhdp->start_lock);
2593
2594 if (stop_fw)
2595 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2596
2597 phy_exit(mhdp->phy);
2598
2599 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2600 mhdp->info->ops->exit(mhdp);
2601
2602 pm_runtime_put_sync(&pdev->dev);
2603 pm_runtime_disable(&pdev->dev);
2604
2605 cancel_work_sync(&mhdp->modeset_retry_work);
2606 flush_scheduled_work();
2607
2608 clk_disable_unprepare(mhdp->clk);
2609
2610 return ret;
2611}
2612
2613static const struct of_device_id mhdp_ids[] = {
2614 { .compatible = "cdns,mhdp8546", },
2615#ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2616 { .compatible = "ti,j721e-mhdp8546",
2617 .data = &(const struct cdns_mhdp_platform_info) {
2618 .timings = &mhdp_ti_j721e_bridge_timings,
2619 .ops = &mhdp_ti_j721e_ops,
2620 },
2621 },
2622#endif
2623 { /* sentinel */ }
2624};
2625MODULE_DEVICE_TABLE(of, mhdp_ids);
2626
2627static struct platform_driver mhdp_driver = {
2628 .driver = {
2629 .name = "cdns-mhdp8546",
2630 .of_match_table = of_match_ptr(mhdp_ids),
2631 },
2632 .probe = cdns_mhdp_probe,
2633 .remove = cdns_mhdp_remove,
2634};
2635module_platform_driver(mhdp_driver);
2636
2637MODULE_FIRMWARE(FW_NAME);
2638
2639MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2640MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2641MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2642MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2643MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2644MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2645MODULE_LICENSE("GPL");
2646MODULE_ALIAS("platform:cdns-mhdp8546");