Loading...
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/platform_device.h>
15
16#include "dsi_phy.h"
17
18#define S_DIV_ROUND_UP(n, d) \
19 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
20
21static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
22 s32 min_result, bool even)
23{
24 s32 v;
25
26 v = (tmax - tmin) * percent;
27 v = S_DIV_ROUND_UP(v, 100) + tmin;
28 if (even && (v & 0x1))
29 return max_t(s32, min_result, v - 1);
30 else
31 return max_t(s32, min_result, v);
32}
33
34static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
35 s32 ui, s32 coeff, s32 pcnt)
36{
37 s32 tmax, tmin, clk_z;
38 s32 temp;
39
40 /* reset */
41 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
42 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
43 if (tmin > 255) {
44 tmax = 511;
45 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
46 } else {
47 tmax = 255;
48 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
49 }
50
51 /* adjust */
52 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
53 timing->clk_zero = clk_z + 8 - temp;
54}
55
56int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
57 const unsigned long bit_rate, const unsigned long esc_rate)
58{
59 s32 ui, lpx;
60 s32 tmax, tmin;
61 s32 pcnt0 = 10;
62 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
63 s32 pcnt2 = 10;
64 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
65 s32 coeff = 1000; /* Precision, should avoid overflow */
66 s32 temp;
67
68 if (!bit_rate || !esc_rate)
69 return -EINVAL;
70
71 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
72 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
73
74 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
75 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
76 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
77
78 temp = lpx / ui;
79 if (temp & 0x1)
80 timing->hs_rqst = temp;
81 else
82 timing->hs_rqst = max_t(s32, 0, temp - 2);
83
84 /* Calculate clk_zero after clk_prepare and hs_rqst */
85 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
86
87 temp = 105 * coeff + 12 * ui - 20 * coeff;
88 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
89 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
90 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
91
92 temp = 85 * coeff + 6 * ui;
93 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
94 temp = 40 * coeff + 4 * ui;
95 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
96 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
97
98 tmax = 255;
99 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
100 temp = 145 * coeff + 10 * ui - temp;
101 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
102 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
103
104 temp = 105 * coeff + 12 * ui - 20 * coeff;
105 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
106 temp = 60 * coeff + 4 * ui;
107 tmin = DIV_ROUND_UP(temp, ui) - 2;
108 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
109
110 tmax = 255;
111 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
112 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
113
114 tmax = 63;
115 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
116 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
117 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
118 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
119
120 tmax = 63;
121 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
122 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
123 temp += 8 * ui + lpx;
124 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
125 if (tmin > tmax) {
126 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
127 timing->clk_pre = temp >> 1;
128 } else {
129 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
130 }
131
132 timing->ta_go = 3;
133 timing->ta_sure = 0;
134 timing->ta_get = 4;
135
136 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
137 timing->clk_pre, timing->clk_post, timing->clk_zero,
138 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
139 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
140 timing->hs_rqst);
141
142 return 0;
143}
144
145void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
146 u32 bit_mask)
147{
148 int phy_id = phy->id;
149 u32 val;
150
151 if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
152 return;
153
154 val = dsi_phy_read(phy->base + reg);
155
156 if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
157 dsi_phy_write(phy->base + reg, val | bit_mask);
158 else
159 dsi_phy_write(phy->base + reg, val & (~bit_mask));
160}
161
162static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
163{
164 struct regulator_bulk_data *s = phy->supplies;
165 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
166 struct device *dev = &phy->pdev->dev;
167 int num = phy->cfg->reg_cfg.num;
168 int i, ret;
169
170 for (i = 0; i < num; i++)
171 s[i].supply = regs[i].name;
172
173 ret = devm_regulator_bulk_get(dev, num, s);
174 if (ret < 0) {
175 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
176 __func__, ret);
177 return ret;
178 }
179
180 return 0;
181}
182
183static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
184{
185 struct regulator_bulk_data *s = phy->supplies;
186 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
187 int num = phy->cfg->reg_cfg.num;
188 int i;
189
190 DBG("");
191 for (i = num - 1; i >= 0; i--)
192 if (regs[i].disable_load >= 0)
193 regulator_set_load(s[i].consumer, regs[i].disable_load);
194
195 regulator_bulk_disable(num, s);
196}
197
198static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
199{
200 struct regulator_bulk_data *s = phy->supplies;
201 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
202 struct device *dev = &phy->pdev->dev;
203 int num = phy->cfg->reg_cfg.num;
204 int ret, i;
205
206 DBG("");
207 for (i = 0; i < num; i++) {
208 if (regs[i].enable_load >= 0) {
209 ret = regulator_set_load(s[i].consumer,
210 regs[i].enable_load);
211 if (ret < 0) {
212 dev_err(dev,
213 "regulator %d set op mode failed, %d\n",
214 i, ret);
215 goto fail;
216 }
217 }
218 }
219
220 ret = regulator_bulk_enable(num, s);
221 if (ret < 0) {
222 dev_err(dev, "regulator enable failed, %d\n", ret);
223 goto fail;
224 }
225
226 return 0;
227
228fail:
229 for (i--; i >= 0; i--)
230 regulator_set_load(s[i].consumer, regs[i].disable_load);
231 return ret;
232}
233
234static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
235{
236 struct device *dev = &phy->pdev->dev;
237 int ret;
238
239 pm_runtime_get_sync(dev);
240
241 ret = clk_prepare_enable(phy->ahb_clk);
242 if (ret) {
243 dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
244 pm_runtime_put_sync(dev);
245 }
246
247 return ret;
248}
249
250static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
251{
252 clk_disable_unprepare(phy->ahb_clk);
253 pm_runtime_put_sync(&phy->pdev->dev);
254}
255
256static const struct of_device_id dsi_phy_dt_match[] = {
257#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
258 { .compatible = "qcom,dsi-phy-28nm-hpm",
259 .data = &dsi_phy_28nm_hpm_cfgs },
260 { .compatible = "qcom,dsi-phy-28nm-lp",
261 .data = &dsi_phy_28nm_lp_cfgs },
262#endif
263#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
264 { .compatible = "qcom,dsi-phy-20nm",
265 .data = &dsi_phy_20nm_cfgs },
266#endif
267#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
268 { .compatible = "qcom,dsi-phy-28nm-8960",
269 .data = &dsi_phy_28nm_8960_cfgs },
270#endif
271 {}
272};
273
274/*
275 * Currently, we only support one SoC for each PHY type. When we have multiple
276 * SoCs for the same PHY, we can try to make the index searching a bit more
277 * clever.
278 */
279static int dsi_phy_get_id(struct msm_dsi_phy *phy)
280{
281 struct platform_device *pdev = phy->pdev;
282 const struct msm_dsi_phy_cfg *cfg = phy->cfg;
283 struct resource *res;
284 int i;
285
286 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
287 if (!res)
288 return -EINVAL;
289
290 for (i = 0; i < cfg->num_dsi_phy; i++) {
291 if (cfg->io_start[i] == res->start)
292 return i;
293 }
294
295 return -EINVAL;
296}
297
298static int dsi_phy_driver_probe(struct platform_device *pdev)
299{
300 struct msm_dsi_phy *phy;
301 struct device *dev = &pdev->dev;
302 const struct of_device_id *match;
303 int ret;
304
305 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
306 if (!phy)
307 return -ENOMEM;
308
309 match = of_match_node(dsi_phy_dt_match, dev->of_node);
310 if (!match)
311 return -ENODEV;
312
313 phy->cfg = match->data;
314 phy->pdev = pdev;
315
316 phy->id = dsi_phy_get_id(phy);
317 if (phy->id < 0) {
318 ret = phy->id;
319 dev_err(dev, "%s: couldn't identify PHY index, %d\n",
320 __func__, ret);
321 goto fail;
322 }
323
324 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
325 "qcom,dsi-phy-regulator-ldo-mode");
326
327 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
328 if (IS_ERR(phy->base)) {
329 dev_err(dev, "%s: failed to map phy base\n", __func__);
330 ret = -ENOMEM;
331 goto fail;
332 }
333
334 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
335 "DSI_PHY_REG");
336 if (IS_ERR(phy->reg_base)) {
337 dev_err(dev, "%s: failed to map phy regulator base\n",
338 __func__);
339 ret = -ENOMEM;
340 goto fail;
341 }
342
343 ret = dsi_phy_regulator_init(phy);
344 if (ret) {
345 dev_err(dev, "%s: failed to init regulator\n", __func__);
346 goto fail;
347 }
348
349 phy->ahb_clk = devm_clk_get(dev, "iface_clk");
350 if (IS_ERR(phy->ahb_clk)) {
351 dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
352 ret = PTR_ERR(phy->ahb_clk);
353 goto fail;
354 }
355
356 /* PLL init will call into clk_register which requires
357 * register access, so we need to enable power and ahb clock.
358 */
359 ret = dsi_phy_enable_resource(phy);
360 if (ret)
361 goto fail;
362
363 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
364 if (!phy->pll)
365 dev_info(dev,
366 "%s: pll init failed, need separate pll clk driver\n",
367 __func__);
368
369 dsi_phy_disable_resource(phy);
370
371 platform_set_drvdata(pdev, phy);
372
373 return 0;
374
375fail:
376 return ret;
377}
378
379static int dsi_phy_driver_remove(struct platform_device *pdev)
380{
381 struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
382
383 if (phy && phy->pll) {
384 msm_dsi_pll_destroy(phy->pll);
385 phy->pll = NULL;
386 }
387
388 platform_set_drvdata(pdev, NULL);
389
390 return 0;
391}
392
393static struct platform_driver dsi_phy_platform_driver = {
394 .probe = dsi_phy_driver_probe,
395 .remove = dsi_phy_driver_remove,
396 .driver = {
397 .name = "msm_dsi_phy",
398 .of_match_table = dsi_phy_dt_match,
399 },
400};
401
402void __init msm_dsi_phy_driver_register(void)
403{
404 platform_driver_register(&dsi_phy_platform_driver);
405}
406
407void __exit msm_dsi_phy_driver_unregister(void)
408{
409 platform_driver_unregister(&dsi_phy_platform_driver);
410}
411
412int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
413 const unsigned long bit_rate, const unsigned long esc_rate)
414{
415 struct device *dev = &phy->pdev->dev;
416 int ret;
417
418 if (!phy || !phy->cfg->ops.enable)
419 return -EINVAL;
420
421 ret = dsi_phy_regulator_enable(phy);
422 if (ret) {
423 dev_err(dev, "%s: regulator enable failed, %d\n",
424 __func__, ret);
425 return ret;
426 }
427
428 ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
429 if (ret) {
430 dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
431 dsi_phy_regulator_disable(phy);
432 return ret;
433 }
434
435 return 0;
436}
437
438void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
439{
440 if (!phy || !phy->cfg->ops.disable)
441 return;
442
443 phy->cfg->ops.disable(phy);
444
445 dsi_phy_regulator_disable(phy);
446}
447
448void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
449 u32 *clk_pre, u32 *clk_post)
450{
451 if (!phy)
452 return;
453
454 if (clk_pre)
455 *clk_pre = phy->timing.clk_pre;
456 if (clk_post)
457 *clk_post = phy->timing.clk_post;
458}
459
460struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
461{
462 if (!phy)
463 return NULL;
464
465 return phy->pll;
466}
467
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/clk-provider.h>
7#include <linux/platform_device.h>
8
9#include "dsi_phy.h"
10
11#define S_DIV_ROUND_UP(n, d) \
12 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
13
14static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
15 s32 min_result, bool even)
16{
17 s32 v;
18
19 v = (tmax - tmin) * percent;
20 v = S_DIV_ROUND_UP(v, 100) + tmin;
21 if (even && (v & 0x1))
22 return max_t(s32, min_result, v - 1);
23 else
24 return max_t(s32, min_result, v);
25}
26
27static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
28 s32 ui, s32 coeff, s32 pcnt)
29{
30 s32 tmax, tmin, clk_z;
31 s32 temp;
32
33 /* reset */
34 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
35 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
36 if (tmin > 255) {
37 tmax = 511;
38 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
39 } else {
40 tmax = 255;
41 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
42 }
43
44 /* adjust */
45 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
46 timing->clk_zero = clk_z + 8 - temp;
47}
48
49int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
50 struct msm_dsi_phy_clk_request *clk_req)
51{
52 const unsigned long bit_rate = clk_req->bitclk_rate;
53 const unsigned long esc_rate = clk_req->escclk_rate;
54 s32 ui, lpx;
55 s32 tmax, tmin;
56 s32 pcnt0 = 10;
57 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
58 s32 pcnt2 = 10;
59 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
60 s32 coeff = 1000; /* Precision, should avoid overflow */
61 s32 temp;
62
63 if (!bit_rate || !esc_rate)
64 return -EINVAL;
65
66 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
67 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
68
69 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
70 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
71 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
72
73 temp = lpx / ui;
74 if (temp & 0x1)
75 timing->hs_rqst = temp;
76 else
77 timing->hs_rqst = max_t(s32, 0, temp - 2);
78
79 /* Calculate clk_zero after clk_prepare and hs_rqst */
80 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
81
82 temp = 105 * coeff + 12 * ui - 20 * coeff;
83 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
84 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
85 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
86
87 temp = 85 * coeff + 6 * ui;
88 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
89 temp = 40 * coeff + 4 * ui;
90 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
91 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
92
93 tmax = 255;
94 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
95 temp = 145 * coeff + 10 * ui - temp;
96 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
97 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
98
99 temp = 105 * coeff + 12 * ui - 20 * coeff;
100 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
101 temp = 60 * coeff + 4 * ui;
102 tmin = DIV_ROUND_UP(temp, ui) - 2;
103 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
104
105 tmax = 255;
106 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
107 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
108
109 tmax = 63;
110 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
111 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
112 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
113 timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
114 false);
115 tmax = 63;
116 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
117 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
118 temp += 8 * ui + lpx;
119 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
120 if (tmin > tmax) {
121 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
122 timing->shared_timings.clk_pre = temp >> 1;
123 timing->shared_timings.clk_pre_inc_by_2 = true;
124 } else {
125 timing->shared_timings.clk_pre =
126 linear_inter(tmax, tmin, pcnt2, 0, false);
127 timing->shared_timings.clk_pre_inc_by_2 = false;
128 }
129
130 timing->ta_go = 3;
131 timing->ta_sure = 0;
132 timing->ta_get = 4;
133
134 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
135 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
136 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
137 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
138 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
139 timing->hs_rqst);
140
141 return 0;
142}
143
144int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
145 struct msm_dsi_phy_clk_request *clk_req)
146{
147 const unsigned long bit_rate = clk_req->bitclk_rate;
148 const unsigned long esc_rate = clk_req->escclk_rate;
149 s32 ui, ui_x8;
150 s32 tmax, tmin;
151 s32 pcnt0 = 50;
152 s32 pcnt1 = 50;
153 s32 pcnt2 = 10;
154 s32 pcnt3 = 30;
155 s32 pcnt4 = 10;
156 s32 pcnt5 = 2;
157 s32 coeff = 1000; /* Precision, should avoid overflow */
158 s32 hb_en, hb_en_ckln, pd_ckln, pd;
159 s32 val, val_ckln;
160 s32 temp;
161
162 if (!bit_rate || !esc_rate)
163 return -EINVAL;
164
165 timing->hs_halfbyte_en = 0;
166 hb_en = 0;
167 timing->hs_halfbyte_en_ckln = 0;
168 hb_en_ckln = 0;
169 timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
170 pd_ckln = timing->hs_prep_dly_ckln;
171 timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
172 pd = timing->hs_prep_dly;
173
174 val = (hb_en << 2) + (pd << 1);
175 val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
176
177 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
178 ui_x8 = ui << 3;
179
180 temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
181 tmin = max_t(s32, temp, 0);
182 temp = (95 * coeff - val_ckln * ui) / ui_x8;
183 tmax = max_t(s32, temp, 0);
184 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
185
186 temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
187 tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
188 tmax = (tmin > 255) ? 511 : 255;
189 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
190
191 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
192 temp = 105 * coeff + 12 * ui - 20 * coeff;
193 tmax = (temp + 3 * ui) / ui_x8;
194 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
195
196 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
197 tmin = max_t(s32, temp, 0);
198 temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
199 tmax = max_t(s32, temp, 0);
200 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
201
202 temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
203 tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
204 tmax = 255;
205 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
206
207 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
208 temp = 105 * coeff + 12 * ui - 20 * coeff;
209 tmax = (temp + 3 * ui) / ui_x8;
210 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
211
212 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
213 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
214
215 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
216 tmax = 255;
217 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
218
219 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
220 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
221
222 temp = 60 * coeff + 52 * ui - 43 * ui;
223 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
224 tmax = 63;
225 timing->shared_timings.clk_post =
226 linear_inter(tmax, tmin, pcnt2, 0, false);
227
228 temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
229 temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
230 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
231 (((timing->hs_rqst_ckln << 3) + 8) * ui);
232 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
233 tmax = 63;
234 if (tmin > tmax) {
235 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
236 timing->shared_timings.clk_pre = temp >> 1;
237 timing->shared_timings.clk_pre_inc_by_2 = 1;
238 } else {
239 timing->shared_timings.clk_pre =
240 linear_inter(tmax, tmin, pcnt2, 0, false);
241 timing->shared_timings.clk_pre_inc_by_2 = 0;
242 }
243
244 timing->ta_go = 3;
245 timing->ta_sure = 0;
246 timing->ta_get = 4;
247
248 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
249 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
250 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
251 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
252 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
253 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
254 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
255 timing->hs_prep_dly_ckln);
256
257 return 0;
258}
259
260int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
261 struct msm_dsi_phy_clk_request *clk_req)
262{
263 const unsigned long bit_rate = clk_req->bitclk_rate;
264 const unsigned long esc_rate = clk_req->escclk_rate;
265 s32 ui, ui_x8;
266 s32 tmax, tmin;
267 s32 pcnt0 = 50;
268 s32 pcnt1 = 50;
269 s32 pcnt2 = 10;
270 s32 pcnt3 = 30;
271 s32 pcnt4 = 10;
272 s32 pcnt5 = 2;
273 s32 coeff = 1000; /* Precision, should avoid overflow */
274 s32 hb_en, hb_en_ckln;
275 s32 temp;
276
277 if (!bit_rate || !esc_rate)
278 return -EINVAL;
279
280 timing->hs_halfbyte_en = 0;
281 hb_en = 0;
282 timing->hs_halfbyte_en_ckln = 0;
283 hb_en_ckln = 0;
284
285 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
286 ui_x8 = ui << 3;
287
288 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
289 tmin = max_t(s32, temp, 0);
290 temp = (95 * coeff) / ui_x8;
291 tmax = max_t(s32, temp, 0);
292 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
293
294 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
295 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
296 tmax = (tmin > 255) ? 511 : 255;
297 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
298
299 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
300 temp = 105 * coeff + 12 * ui - 20 * coeff;
301 tmax = (temp + 3 * ui) / ui_x8;
302 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
303
304 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
305 tmin = max_t(s32, temp, 0);
306 temp = (85 * coeff + 6 * ui) / ui_x8;
307 tmax = max_t(s32, temp, 0);
308 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
309
310 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
311 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
312 tmax = 255;
313 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
314
315 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
316 temp = 105 * coeff + 12 * ui - 20 * coeff;
317 tmax = (temp / ui_x8) - 1;
318 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
319
320 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
321 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
322
323 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
324 tmax = 255;
325 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
326
327 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
328 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
329
330 temp = 60 * coeff + 52 * ui - 43 * ui;
331 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
332 tmax = 63;
333 timing->shared_timings.clk_post =
334 linear_inter(tmax, tmin, pcnt2, 0, false);
335
336 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
337 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
338 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
339 (((timing->hs_rqst_ckln << 3) + 8) * ui);
340 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
341 tmax = 63;
342 if (tmin > tmax) {
343 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
344 timing->shared_timings.clk_pre = temp >> 1;
345 timing->shared_timings.clk_pre_inc_by_2 = 1;
346 } else {
347 timing->shared_timings.clk_pre =
348 linear_inter(tmax, tmin, pcnt2, 0, false);
349 timing->shared_timings.clk_pre_inc_by_2 = 0;
350 }
351
352 timing->ta_go = 3;
353 timing->ta_sure = 0;
354 timing->ta_get = 4;
355
356 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
357 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
358 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
359 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
360 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
361 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
362 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
363 timing->hs_prep_dly_ckln);
364
365 return 0;
366}
367
368int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
369 struct msm_dsi_phy_clk_request *clk_req)
370{
371 const unsigned long bit_rate = clk_req->bitclk_rate;
372 const unsigned long esc_rate = clk_req->escclk_rate;
373 s32 ui, ui_x8;
374 s32 tmax, tmin;
375 s32 pcnt_clk_prep = 50;
376 s32 pcnt_clk_zero = 2;
377 s32 pcnt_clk_trail = 30;
378 s32 pcnt_hs_prep = 50;
379 s32 pcnt_hs_zero = 10;
380 s32 pcnt_hs_trail = 30;
381 s32 pcnt_hs_exit = 10;
382 s32 coeff = 1000; /* Precision, should avoid overflow */
383 s32 hb_en;
384 s32 temp;
385
386 if (!bit_rate || !esc_rate)
387 return -EINVAL;
388
389 hb_en = 0;
390
391 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
392 ui_x8 = ui << 3;
393
394 /* TODO: verify these calculations against latest downstream driver
395 * everything except clk_post/clk_pre uses calculations from v3 based
396 * on the downstream driver having the same calculations for v3 and v4
397 */
398
399 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
400 tmin = max_t(s32, temp, 0);
401 temp = (95 * coeff) / ui_x8;
402 tmax = max_t(s32, temp, 0);
403 timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
404
405 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
406 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
407 tmax = (tmin > 255) ? 511 : 255;
408 timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
409
410 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
411 temp = 105 * coeff + 12 * ui - 20 * coeff;
412 tmax = (temp + 3 * ui) / ui_x8;
413 timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
414
415 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
416 tmin = max_t(s32, temp, 0);
417 temp = (85 * coeff + 6 * ui) / ui_x8;
418 tmax = max_t(s32, temp, 0);
419 timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
420
421 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
422 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
423 tmax = 255;
424 timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
425
426 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
427 temp = 105 * coeff + 12 * ui - 20 * coeff;
428 tmax = (temp / ui_x8) - 1;
429 timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
430
431 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
432 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
433
434 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
435 tmax = 255;
436 timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
437
438 /* recommended min
439 * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
440 */
441 temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
442 tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
443 tmax = 255;
444 timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
445
446 /* recommended min
447 * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
448 * val2 = (16 * bit_clk_ns)
449 * final = roundup(val1/val2, 0) - 1
450 */
451 temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
452 tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
453 tmax = 255;
454 timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
455
456 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
457 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
458 timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
459 timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
460
461 return 0;
462}
463
464static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
465{
466 struct regulator_bulk_data *s = phy->supplies;
467 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
468 struct device *dev = &phy->pdev->dev;
469 int num = phy->cfg->reg_cfg.num;
470 int i, ret;
471
472 for (i = 0; i < num; i++)
473 s[i].supply = regs[i].name;
474
475 ret = devm_regulator_bulk_get(dev, num, s);
476 if (ret < 0) {
477 if (ret != -EPROBE_DEFER) {
478 DRM_DEV_ERROR(dev,
479 "%s: failed to init regulator, ret=%d\n",
480 __func__, ret);
481 }
482
483 return ret;
484 }
485
486 return 0;
487}
488
489static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
490{
491 struct regulator_bulk_data *s = phy->supplies;
492 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
493 int num = phy->cfg->reg_cfg.num;
494 int i;
495
496 DBG("");
497 for (i = num - 1; i >= 0; i--)
498 if (regs[i].disable_load >= 0)
499 regulator_set_load(s[i].consumer, regs[i].disable_load);
500
501 regulator_bulk_disable(num, s);
502}
503
504static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
505{
506 struct regulator_bulk_data *s = phy->supplies;
507 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
508 struct device *dev = &phy->pdev->dev;
509 int num = phy->cfg->reg_cfg.num;
510 int ret, i;
511
512 DBG("");
513 for (i = 0; i < num; i++) {
514 if (regs[i].enable_load >= 0) {
515 ret = regulator_set_load(s[i].consumer,
516 regs[i].enable_load);
517 if (ret < 0) {
518 DRM_DEV_ERROR(dev,
519 "regulator %d set op mode failed, %d\n",
520 i, ret);
521 goto fail;
522 }
523 }
524 }
525
526 ret = regulator_bulk_enable(num, s);
527 if (ret < 0) {
528 DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
529 goto fail;
530 }
531
532 return 0;
533
534fail:
535 for (i--; i >= 0; i--)
536 regulator_set_load(s[i].consumer, regs[i].disable_load);
537 return ret;
538}
539
540static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
541{
542 struct device *dev = &phy->pdev->dev;
543 int ret;
544
545 pm_runtime_get_sync(dev);
546
547 ret = clk_prepare_enable(phy->ahb_clk);
548 if (ret) {
549 DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
550 pm_runtime_put_sync(dev);
551 }
552
553 return ret;
554}
555
556static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
557{
558 clk_disable_unprepare(phy->ahb_clk);
559 pm_runtime_put_autosuspend(&phy->pdev->dev);
560}
561
562static const struct of_device_id dsi_phy_dt_match[] = {
563#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
564 { .compatible = "qcom,dsi-phy-28nm-hpm",
565 .data = &dsi_phy_28nm_hpm_cfgs },
566 { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
567 .data = &dsi_phy_28nm_hpm_famb_cfgs },
568 { .compatible = "qcom,dsi-phy-28nm-lp",
569 .data = &dsi_phy_28nm_lp_cfgs },
570#endif
571#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
572 { .compatible = "qcom,dsi-phy-20nm",
573 .data = &dsi_phy_20nm_cfgs },
574#endif
575#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
576 { .compatible = "qcom,dsi-phy-28nm-8960",
577 .data = &dsi_phy_28nm_8960_cfgs },
578#endif
579#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
580 { .compatible = "qcom,dsi-phy-14nm",
581 .data = &dsi_phy_14nm_cfgs },
582 { .compatible = "qcom,dsi-phy-14nm-660",
583 .data = &dsi_phy_14nm_660_cfgs },
584#endif
585#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
586 { .compatible = "qcom,dsi-phy-10nm",
587 .data = &dsi_phy_10nm_cfgs },
588 { .compatible = "qcom,dsi-phy-10nm-8998",
589 .data = &dsi_phy_10nm_8998_cfgs },
590#endif
591#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
592 { .compatible = "qcom,dsi-phy-7nm",
593 .data = &dsi_phy_7nm_cfgs },
594 { .compatible = "qcom,dsi-phy-7nm-8150",
595 .data = &dsi_phy_7nm_8150_cfgs },
596#endif
597 {}
598};
599
600/*
601 * Currently, we only support one SoC for each PHY type. When we have multiple
602 * SoCs for the same PHY, we can try to make the index searching a bit more
603 * clever.
604 */
605static int dsi_phy_get_id(struct msm_dsi_phy *phy)
606{
607 struct platform_device *pdev = phy->pdev;
608 const struct msm_dsi_phy_cfg *cfg = phy->cfg;
609 struct resource *res;
610 int i;
611
612 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
613 if (!res)
614 return -EINVAL;
615
616 for (i = 0; i < cfg->num_dsi_phy; i++) {
617 if (cfg->io_start[i] == res->start)
618 return i;
619 }
620
621 return -EINVAL;
622}
623
624static int dsi_phy_driver_probe(struct platform_device *pdev)
625{
626 struct msm_dsi_phy *phy;
627 struct device *dev = &pdev->dev;
628 const struct of_device_id *match;
629 int ret;
630
631 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
632 if (!phy)
633 return -ENOMEM;
634
635 match = of_match_node(dsi_phy_dt_match, dev->of_node);
636 if (!match)
637 return -ENODEV;
638
639 phy->provided_clocks = devm_kzalloc(dev,
640 struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
641 GFP_KERNEL);
642 if (!phy->provided_clocks)
643 return -ENOMEM;
644
645 phy->provided_clocks->num = NUM_PROVIDED_CLKS;
646
647 phy->cfg = match->data;
648 phy->pdev = pdev;
649
650 phy->id = dsi_phy_get_id(phy);
651 if (phy->id < 0) {
652 ret = phy->id;
653 DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
654 __func__, ret);
655 goto fail;
656 }
657
658 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
659 "qcom,dsi-phy-regulator-ldo-mode");
660
661 phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size);
662 if (IS_ERR(phy->base)) {
663 DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
664 ret = -ENOMEM;
665 goto fail;
666 }
667
668 phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", "DSI_PLL", &phy->pll_size);
669 if (IS_ERR(phy->pll_base)) {
670 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
671 ret = -ENOMEM;
672 goto fail;
673 }
674
675 if (phy->cfg->has_phy_lane) {
676 phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", "DSI_PHY_LANE", &phy->lane_size);
677 if (IS_ERR(phy->lane_base)) {
678 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
679 ret = -ENOMEM;
680 goto fail;
681 }
682 }
683
684 if (phy->cfg->has_phy_regulator) {
685 phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", "DSI_PHY_REG", &phy->reg_size);
686 if (IS_ERR(phy->reg_base)) {
687 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
688 ret = -ENOMEM;
689 goto fail;
690 }
691 }
692
693 ret = dsi_phy_regulator_init(phy);
694 if (ret)
695 goto fail;
696
697 phy->ahb_clk = msm_clk_get(pdev, "iface");
698 if (IS_ERR(phy->ahb_clk)) {
699 DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
700 ret = PTR_ERR(phy->ahb_clk);
701 goto fail;
702 }
703
704 /* PLL init will call into clk_register which requires
705 * register access, so we need to enable power and ahb clock.
706 */
707 ret = dsi_phy_enable_resource(phy);
708 if (ret)
709 goto fail;
710
711 if (phy->cfg->ops.pll_init) {
712 ret = phy->cfg->ops.pll_init(phy);
713 if (ret) {
714 DRM_DEV_INFO(dev,
715 "%s: pll init failed: %d, need separate pll clk driver\n",
716 __func__, ret);
717 goto fail;
718 }
719 }
720
721 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
722 phy->provided_clocks);
723 if (ret) {
724 DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
725 goto fail;
726 }
727
728 dsi_phy_disable_resource(phy);
729
730 platform_set_drvdata(pdev, phy);
731
732 return 0;
733
734fail:
735 return ret;
736}
737
738static struct platform_driver dsi_phy_platform_driver = {
739 .probe = dsi_phy_driver_probe,
740 .driver = {
741 .name = "msm_dsi_phy",
742 .of_match_table = dsi_phy_dt_match,
743 },
744};
745
746void __init msm_dsi_phy_driver_register(void)
747{
748 platform_driver_register(&dsi_phy_platform_driver);
749}
750
751void __exit msm_dsi_phy_driver_unregister(void)
752{
753 platform_driver_unregister(&dsi_phy_platform_driver);
754}
755
756int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
757 struct msm_dsi_phy_clk_request *clk_req)
758{
759 struct device *dev = &phy->pdev->dev;
760 int ret;
761
762 if (!phy || !phy->cfg->ops.enable)
763 return -EINVAL;
764
765 ret = dsi_phy_enable_resource(phy);
766 if (ret) {
767 DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
768 __func__, ret);
769 goto res_en_fail;
770 }
771
772 ret = dsi_phy_regulator_enable(phy);
773 if (ret) {
774 DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
775 __func__, ret);
776 goto reg_en_fail;
777 }
778
779 ret = phy->cfg->ops.enable(phy, clk_req);
780 if (ret) {
781 DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
782 goto phy_en_fail;
783 }
784
785 /*
786 * Resetting DSI PHY silently changes its PLL registers to reset status,
787 * which will confuse clock driver and result in wrong output rate of
788 * link clocks. Restore PLL status if its PLL is being used as clock
789 * source.
790 */
791 if (phy->usecase != MSM_DSI_PHY_SLAVE) {
792 ret = msm_dsi_phy_pll_restore_state(phy);
793 if (ret) {
794 DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
795 __func__, ret);
796 goto pll_restor_fail;
797 }
798 }
799
800 return 0;
801
802pll_restor_fail:
803 if (phy->cfg->ops.disable)
804 phy->cfg->ops.disable(phy);
805phy_en_fail:
806 dsi_phy_regulator_disable(phy);
807reg_en_fail:
808 dsi_phy_disable_resource(phy);
809res_en_fail:
810 return ret;
811}
812
813void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
814{
815 if (!phy || !phy->cfg->ops.disable)
816 return;
817
818 phy->cfg->ops.disable(phy);
819
820 dsi_phy_regulator_disable(phy);
821 dsi_phy_disable_resource(phy);
822}
823
824void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
825 struct msm_dsi_phy_shared_timings *shared_timings)
826{
827 memcpy(shared_timings, &phy->timing.shared_timings,
828 sizeof(*shared_timings));
829}
830
831void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
832 enum msm_dsi_phy_usecase uc)
833{
834 if (phy)
835 phy->usecase = uc;
836}
837
838int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
839 struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
840{
841 if (byte_clk_provider)
842 *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
843 if (pixel_clk_provider)
844 *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
845
846 return 0;
847}
848
849void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
850{
851 if (phy->cfg->ops.save_pll_state) {
852 phy->cfg->ops.save_pll_state(phy);
853 phy->state_saved = true;
854 }
855}
856
857int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
858{
859 int ret;
860
861 if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
862 ret = phy->cfg->ops.restore_pll_state(phy);
863 if (ret)
864 return ret;
865
866 phy->state_saved = false;
867 }
868
869 return 0;
870}
871
872void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
873{
874 msm_disp_snapshot_add_block(disp_state,
875 phy->base_size, phy->base,
876 "dsi%d_phy", phy->id);
877
878 /* Do not try accessing PLL registers if it is switched off */
879 if (phy->pll_on)
880 msm_disp_snapshot_add_block(disp_state,
881 phy->pll_size, phy->pll_base,
882 "dsi%d_pll", phy->id);
883
884 if (phy->lane_base)
885 msm_disp_snapshot_add_block(disp_state,
886 phy->lane_size, phy->lane_base,
887 "dsi%d_lane", phy->id);
888
889 if (phy->reg_base)
890 msm_disp_snapshot_add_block(disp_state,
891 phy->reg_size, phy->reg_base,
892 "dsi%d_reg", phy->id);
893}