Loading...
1/*
2 * DaVinci MDIO Module driver
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
7 *
8 * Copyright (C) 2009 Texas Instruments.
9 *
10 * ---------------------------------------------------------------------------
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------
26 */
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38#include <linux/davinci_emac.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
41#include <linux/of_mdio.h>
42#include <linux/pinctrl/consumer.h>
43
44/*
45 * This timeout definition is a worst-case ultra defensive measure against
46 * unexpected controller lock ups. Ideally, we should never ever hit this
47 * scenario in practice.
48 */
49#define MDIO_TIMEOUT 100 /* msecs */
50
51#define PHY_REG_MASK 0x1f
52#define PHY_ID_MASK 0x1f
53
54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
55
56struct davinci_mdio_of_param {
57 int autosuspend_delay_ms;
58};
59
60struct davinci_mdio_regs {
61 u32 version;
62 u32 control;
63#define CONTROL_IDLE BIT(31)
64#define CONTROL_ENABLE BIT(30)
65#define CONTROL_MAX_DIV (0xffff)
66
67 u32 alive;
68 u32 link;
69 u32 linkintraw;
70 u32 linkintmasked;
71 u32 __reserved_0[2];
72 u32 userintraw;
73 u32 userintmasked;
74 u32 userintmaskset;
75 u32 userintmaskclr;
76 u32 __reserved_1[20];
77
78 struct {
79 u32 access;
80#define USERACCESS_GO BIT(31)
81#define USERACCESS_WRITE BIT(30)
82#define USERACCESS_ACK BIT(29)
83#define USERACCESS_READ (0)
84#define USERACCESS_DATA (0xffff)
85
86 u32 physel;
87 } user[0];
88};
89
90static const struct mdio_platform_data default_pdata = {
91 .bus_freq = DEF_OUT_FREQ,
92};
93
94struct davinci_mdio_data {
95 struct mdio_platform_data pdata;
96 struct davinci_mdio_regs __iomem *regs;
97 struct clk *clk;
98 struct device *dev;
99 struct mii_bus *bus;
100 bool active_in_suspend;
101 unsigned long access_time; /* jiffies */
102 /* Indicates that driver shouldn't modify phy_mask in case
103 * if MDIO bus is registered from DT.
104 */
105 bool skip_scan;
106 u32 clk_div;
107};
108
109static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
110{
111 u32 mdio_in, div, mdio_out_khz, access_time;
112
113 mdio_in = clk_get_rate(data->clk);
114 div = (mdio_in / data->pdata.bus_freq) - 1;
115 if (div > CONTROL_MAX_DIV)
116 div = CONTROL_MAX_DIV;
117
118 data->clk_div = div;
119 /*
120 * One mdio transaction consists of:
121 * 32 bits of preamble
122 * 32 bits of transferred data
123 * 24 bits of bus yield (not needed unless shared?)
124 */
125 mdio_out_khz = mdio_in / (1000 * (div + 1));
126 access_time = (88 * 1000) / mdio_out_khz;
127
128 /*
129 * In the worst case, we could be kicking off a user-access immediately
130 * after the mdio bus scan state-machine triggered its own read. If
131 * so, our request could get deferred by one access cycle. We
132 * defensively allow for 4 access cycles.
133 */
134 data->access_time = usecs_to_jiffies(access_time * 4);
135 if (!data->access_time)
136 data->access_time = 1;
137}
138
139static void davinci_mdio_enable(struct davinci_mdio_data *data)
140{
141 /* set enable and clock divider */
142 __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
143}
144
145static int davinci_mdio_reset(struct mii_bus *bus)
146{
147 struct davinci_mdio_data *data = bus->priv;
148 u32 phy_mask, ver;
149 int ret;
150
151 ret = pm_runtime_get_sync(data->dev);
152 if (ret < 0) {
153 pm_runtime_put_noidle(data->dev);
154 return ret;
155 }
156
157 /* wait for scan logic to settle */
158 msleep(PHY_MAX_ADDR * data->access_time);
159
160 /* dump hardware version info */
161 ver = __raw_readl(&data->regs->version);
162 dev_info(data->dev,
163 "davinci mdio revision %d.%d, bus freq %ld\n",
164 (ver >> 8) & 0xff, ver & 0xff,
165 data->pdata.bus_freq);
166
167 if (data->skip_scan)
168 goto done;
169
170 /* get phy mask from the alive register */
171 phy_mask = __raw_readl(&data->regs->alive);
172 if (phy_mask) {
173 /* restrict mdio bus to live phys only */
174 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
175 phy_mask = ~phy_mask;
176 } else {
177 /* desperately scan all phys */
178 dev_warn(data->dev, "no live phy, scanning all\n");
179 phy_mask = 0;
180 }
181 data->bus->phy_mask = phy_mask;
182
183done:
184 pm_runtime_mark_last_busy(data->dev);
185 pm_runtime_put_autosuspend(data->dev);
186
187 return 0;
188}
189
190/* wait until hardware is ready for another user access */
191static inline int wait_for_user_access(struct davinci_mdio_data *data)
192{
193 struct davinci_mdio_regs __iomem *regs = data->regs;
194 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
195 u32 reg;
196
197 while (time_after(timeout, jiffies)) {
198 reg = __raw_readl(®s->user[0].access);
199 if ((reg & USERACCESS_GO) == 0)
200 return 0;
201
202 reg = __raw_readl(®s->control);
203 if ((reg & CONTROL_IDLE) == 0) {
204 usleep_range(100, 200);
205 continue;
206 }
207
208 /*
209 * An emac soft_reset may have clobbered the mdio controller's
210 * state machine. We need to reset and retry the current
211 * operation
212 */
213 dev_warn(data->dev, "resetting idled controller\n");
214 davinci_mdio_enable(data);
215 return -EAGAIN;
216 }
217
218 reg = __raw_readl(®s->user[0].access);
219 if ((reg & USERACCESS_GO) == 0)
220 return 0;
221
222 dev_err(data->dev, "timed out waiting for user access\n");
223 return -ETIMEDOUT;
224}
225
226/* wait until hardware state machine is idle */
227static inline int wait_for_idle(struct davinci_mdio_data *data)
228{
229 struct davinci_mdio_regs __iomem *regs = data->regs;
230 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
231
232 while (time_after(timeout, jiffies)) {
233 if (__raw_readl(®s->control) & CONTROL_IDLE)
234 return 0;
235 }
236 dev_err(data->dev, "timed out waiting for idle\n");
237 return -ETIMEDOUT;
238}
239
240static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
241{
242 struct davinci_mdio_data *data = bus->priv;
243 u32 reg;
244 int ret;
245
246 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
247 return -EINVAL;
248
249 ret = pm_runtime_get_sync(data->dev);
250 if (ret < 0) {
251 pm_runtime_put_noidle(data->dev);
252 return ret;
253 }
254
255 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
256 (phy_id << 16));
257
258 while (1) {
259 ret = wait_for_user_access(data);
260 if (ret == -EAGAIN)
261 continue;
262 if (ret < 0)
263 break;
264
265 __raw_writel(reg, &data->regs->user[0].access);
266
267 ret = wait_for_user_access(data);
268 if (ret == -EAGAIN)
269 continue;
270 if (ret < 0)
271 break;
272
273 reg = __raw_readl(&data->regs->user[0].access);
274 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
275 break;
276 }
277
278 pm_runtime_mark_last_busy(data->dev);
279 pm_runtime_put_autosuspend(data->dev);
280 return ret;
281}
282
283static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
284 int phy_reg, u16 phy_data)
285{
286 struct davinci_mdio_data *data = bus->priv;
287 u32 reg;
288 int ret;
289
290 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
291 return -EINVAL;
292
293 ret = pm_runtime_get_sync(data->dev);
294 if (ret < 0) {
295 pm_runtime_put_noidle(data->dev);
296 return ret;
297 }
298
299 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
300 (phy_id << 16) | (phy_data & USERACCESS_DATA));
301
302 while (1) {
303 ret = wait_for_user_access(data);
304 if (ret == -EAGAIN)
305 continue;
306 if (ret < 0)
307 break;
308
309 __raw_writel(reg, &data->regs->user[0].access);
310
311 ret = wait_for_user_access(data);
312 if (ret == -EAGAIN)
313 continue;
314 break;
315 }
316
317 pm_runtime_mark_last_busy(data->dev);
318 pm_runtime_put_autosuspend(data->dev);
319
320 return ret;
321}
322
323#if IS_ENABLED(CONFIG_OF)
324static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
325 struct platform_device *pdev)
326{
327 struct device_node *node = pdev->dev.of_node;
328 u32 prop;
329
330 if (!node)
331 return -EINVAL;
332
333 if (of_property_read_u32(node, "bus_freq", &prop)) {
334 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
335 return -EINVAL;
336 }
337 data->bus_freq = prop;
338
339 return 0;
340}
341#endif
342
343#if IS_ENABLED(CONFIG_OF)
344static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
345 .autosuspend_delay_ms = 100,
346};
347
348static const struct of_device_id davinci_mdio_of_mtable[] = {
349 { .compatible = "ti,davinci_mdio", },
350 { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
351 { /* sentinel */ },
352};
353MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
354#endif
355
356static int davinci_mdio_probe(struct platform_device *pdev)
357{
358 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
359 struct device *dev = &pdev->dev;
360 struct davinci_mdio_data *data;
361 struct resource *res;
362 struct phy_device *phy;
363 int ret, addr;
364 int autosuspend_delay_ms = -1;
365
366 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
367 if (!data)
368 return -ENOMEM;
369
370 data->bus = devm_mdiobus_alloc(dev);
371 if (!data->bus) {
372 dev_err(dev, "failed to alloc mii bus\n");
373 return -ENOMEM;
374 }
375
376 if (dev->of_node) {
377 const struct of_device_id *of_id;
378
379 ret = davinci_mdio_probe_dt(&data->pdata, pdev);
380 if (ret)
381 return ret;
382 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
383
384 of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
385 if (of_id) {
386 const struct davinci_mdio_of_param *of_mdio_data;
387
388 of_mdio_data = of_id->data;
389 if (of_mdio_data)
390 autosuspend_delay_ms =
391 of_mdio_data->autosuspend_delay_ms;
392 }
393 } else {
394 data->pdata = pdata ? (*pdata) : default_pdata;
395 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
396 pdev->name, pdev->id);
397 }
398
399 data->bus->name = dev_name(dev);
400 data->bus->read = davinci_mdio_read,
401 data->bus->write = davinci_mdio_write,
402 data->bus->reset = davinci_mdio_reset,
403 data->bus->parent = dev;
404 data->bus->priv = data;
405
406 data->clk = devm_clk_get(dev, "fck");
407 if (IS_ERR(data->clk)) {
408 dev_err(dev, "failed to get device clock\n");
409 return PTR_ERR(data->clk);
410 }
411
412 dev_set_drvdata(dev, data);
413 data->dev = dev;
414
415 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
416 data->regs = devm_ioremap_resource(dev, res);
417 if (IS_ERR(data->regs))
418 return PTR_ERR(data->regs);
419
420 davinci_mdio_init_clk(data);
421
422 pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
423 pm_runtime_use_autosuspend(&pdev->dev);
424 pm_runtime_enable(&pdev->dev);
425
426 /* register the mii bus
427 * Create PHYs from DT only in case if PHY child nodes are explicitly
428 * defined to support backward compatibility with DTs which assume that
429 * Davinci MDIO will always scan the bus for PHYs detection.
430 */
431 if (dev->of_node && of_get_child_count(dev->of_node)) {
432 data->skip_scan = true;
433 ret = of_mdiobus_register(data->bus, dev->of_node);
434 } else {
435 ret = mdiobus_register(data->bus);
436 }
437 if (ret)
438 goto bail_out;
439
440 /* scan and dump the bus */
441 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
442 phy = mdiobus_get_phy(data->bus, addr);
443 if (phy) {
444 dev_info(dev, "phy[%d]: device %s, driver %s\n",
445 phy->mdio.addr, phydev_name(phy),
446 phy->drv ? phy->drv->name : "unknown");
447 }
448 }
449
450 return 0;
451
452bail_out:
453 pm_runtime_dont_use_autosuspend(&pdev->dev);
454 pm_runtime_disable(&pdev->dev);
455 return ret;
456}
457
458static int davinci_mdio_remove(struct platform_device *pdev)
459{
460 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
461
462 if (data->bus)
463 mdiobus_unregister(data->bus);
464
465 pm_runtime_dont_use_autosuspend(&pdev->dev);
466 pm_runtime_disable(&pdev->dev);
467
468 return 0;
469}
470
471#ifdef CONFIG_PM
472static int davinci_mdio_runtime_suspend(struct device *dev)
473{
474 struct davinci_mdio_data *data = dev_get_drvdata(dev);
475 u32 ctrl;
476
477 /* shutdown the scan state machine */
478 ctrl = __raw_readl(&data->regs->control);
479 ctrl &= ~CONTROL_ENABLE;
480 __raw_writel(ctrl, &data->regs->control);
481 wait_for_idle(data);
482
483 return 0;
484}
485
486static int davinci_mdio_runtime_resume(struct device *dev)
487{
488 struct davinci_mdio_data *data = dev_get_drvdata(dev);
489
490 davinci_mdio_enable(data);
491 return 0;
492}
493#endif
494
495#ifdef CONFIG_PM_SLEEP
496static int davinci_mdio_suspend(struct device *dev)
497{
498 struct davinci_mdio_data *data = dev_get_drvdata(dev);
499 int ret = 0;
500
501 data->active_in_suspend = !pm_runtime_status_suspended(dev);
502 if (data->active_in_suspend)
503 ret = pm_runtime_force_suspend(dev);
504 if (ret < 0)
505 return ret;
506
507 /* Select sleep pin state */
508 pinctrl_pm_select_sleep_state(dev);
509
510 return 0;
511}
512
513static int davinci_mdio_resume(struct device *dev)
514{
515 struct davinci_mdio_data *data = dev_get_drvdata(dev);
516
517 /* Select default pin state */
518 pinctrl_pm_select_default_state(dev);
519
520 if (data->active_in_suspend)
521 pm_runtime_force_resume(dev);
522
523 return 0;
524}
525#endif
526
527static const struct dev_pm_ops davinci_mdio_pm_ops = {
528 SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
529 davinci_mdio_runtime_resume, NULL)
530 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
531};
532
533static struct platform_driver davinci_mdio_driver = {
534 .driver = {
535 .name = "davinci_mdio",
536 .pm = &davinci_mdio_pm_ops,
537 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
538 },
539 .probe = davinci_mdio_probe,
540 .remove = davinci_mdio_remove,
541};
542
543static int __init davinci_mdio_init(void)
544{
545 return platform_driver_register(&davinci_mdio_driver);
546}
547device_initcall(davinci_mdio_init);
548
549static void __exit davinci_mdio_exit(void)
550{
551 platform_driver_unregister(&davinci_mdio_driver);
552}
553module_exit(davinci_mdio_exit);
554
555MODULE_LICENSE("GPL");
556MODULE_DESCRIPTION("DaVinci MDIO driver");
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * DaVinci MDIO Module driver
4 *
5 * Copyright (C) 2010 Texas Instruments.
6 *
7 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
8 *
9 * Copyright (C) 2009 Texas Instruments.
10 *
11 */
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/phy.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/io.h>
22#include <linux/iopoll.h>
23#include <linux/pm_runtime.h>
24#include <linux/davinci_emac.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/of_mdio.h>
28#include <linux/pinctrl/consumer.h>
29
30/*
31 * This timeout definition is a worst-case ultra defensive measure against
32 * unexpected controller lock ups. Ideally, we should never ever hit this
33 * scenario in practice.
34 */
35#define MDIO_TIMEOUT 100 /* msecs */
36
37#define PHY_REG_MASK 0x1f
38#define PHY_ID_MASK 0x1f
39
40#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
41
42struct davinci_mdio_of_param {
43 int autosuspend_delay_ms;
44};
45
46struct davinci_mdio_regs {
47 u32 version;
48 u32 control;
49#define CONTROL_IDLE BIT(31)
50#define CONTROL_ENABLE BIT(30)
51#define CONTROL_MAX_DIV (0xffff)
52
53 u32 alive;
54 u32 link;
55 u32 linkintraw;
56 u32 linkintmasked;
57 u32 __reserved_0[2];
58 u32 userintraw;
59 u32 userintmasked;
60 u32 userintmaskset;
61 u32 userintmaskclr;
62 u32 __reserved_1[20];
63
64 struct {
65 u32 access;
66#define USERACCESS_GO BIT(31)
67#define USERACCESS_WRITE BIT(30)
68#define USERACCESS_ACK BIT(29)
69#define USERACCESS_READ (0)
70#define USERACCESS_DATA (0xffff)
71
72 u32 physel;
73 } user[0];
74};
75
76static const struct mdio_platform_data default_pdata = {
77 .bus_freq = DEF_OUT_FREQ,
78};
79
80struct davinci_mdio_data {
81 struct mdio_platform_data pdata;
82 struct davinci_mdio_regs __iomem *regs;
83 struct clk *clk;
84 struct device *dev;
85 struct mii_bus *bus;
86 bool active_in_suspend;
87 unsigned long access_time; /* jiffies */
88 /* Indicates that driver shouldn't modify phy_mask in case
89 * if MDIO bus is registered from DT.
90 */
91 bool skip_scan;
92 u32 clk_div;
93};
94
95static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
96{
97 u32 mdio_in, div, mdio_out_khz, access_time;
98
99 mdio_in = clk_get_rate(data->clk);
100 div = (mdio_in / data->pdata.bus_freq) - 1;
101 if (div > CONTROL_MAX_DIV)
102 div = CONTROL_MAX_DIV;
103
104 data->clk_div = div;
105 /*
106 * One mdio transaction consists of:
107 * 32 bits of preamble
108 * 32 bits of transferred data
109 * 24 bits of bus yield (not needed unless shared?)
110 */
111 mdio_out_khz = mdio_in / (1000 * (div + 1));
112 access_time = (88 * 1000) / mdio_out_khz;
113
114 /*
115 * In the worst case, we could be kicking off a user-access immediately
116 * after the mdio bus scan state-machine triggered its own read. If
117 * so, our request could get deferred by one access cycle. We
118 * defensively allow for 4 access cycles.
119 */
120 data->access_time = usecs_to_jiffies(access_time * 4);
121 if (!data->access_time)
122 data->access_time = 1;
123}
124
125static void davinci_mdio_enable(struct davinci_mdio_data *data)
126{
127 /* set enable and clock divider */
128 writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
129}
130
131static int davinci_mdio_reset(struct mii_bus *bus)
132{
133 struct davinci_mdio_data *data = bus->priv;
134 u32 phy_mask, ver;
135 int ret;
136
137 ret = pm_runtime_get_sync(data->dev);
138 if (ret < 0) {
139 pm_runtime_put_noidle(data->dev);
140 return ret;
141 }
142
143 /* wait for scan logic to settle */
144 msleep(PHY_MAX_ADDR * data->access_time);
145
146 /* dump hardware version info */
147 ver = readl(&data->regs->version);
148 dev_info(data->dev,
149 "davinci mdio revision %d.%d, bus freq %ld\n",
150 (ver >> 8) & 0xff, ver & 0xff,
151 data->pdata.bus_freq);
152
153 if (data->skip_scan)
154 goto done;
155
156 /* get phy mask from the alive register */
157 phy_mask = readl(&data->regs->alive);
158 if (phy_mask) {
159 /* restrict mdio bus to live phys only */
160 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
161 phy_mask = ~phy_mask;
162 } else {
163 /* desperately scan all phys */
164 dev_warn(data->dev, "no live phy, scanning all\n");
165 phy_mask = 0;
166 }
167 data->bus->phy_mask = phy_mask;
168
169done:
170 pm_runtime_mark_last_busy(data->dev);
171 pm_runtime_put_autosuspend(data->dev);
172
173 return 0;
174}
175
176/* wait until hardware is ready for another user access */
177static inline int wait_for_user_access(struct davinci_mdio_data *data)
178{
179 struct davinci_mdio_regs __iomem *regs = data->regs;
180 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
181 u32 reg;
182
183 while (time_after(timeout, jiffies)) {
184 reg = readl(®s->user[0].access);
185 if ((reg & USERACCESS_GO) == 0)
186 return 0;
187
188 reg = readl(®s->control);
189 if ((reg & CONTROL_IDLE) == 0) {
190 usleep_range(100, 200);
191 continue;
192 }
193
194 /*
195 * An emac soft_reset may have clobbered the mdio controller's
196 * state machine. We need to reset and retry the current
197 * operation
198 */
199 dev_warn(data->dev, "resetting idled controller\n");
200 davinci_mdio_enable(data);
201 return -EAGAIN;
202 }
203
204 reg = readl(®s->user[0].access);
205 if ((reg & USERACCESS_GO) == 0)
206 return 0;
207
208 dev_err(data->dev, "timed out waiting for user access\n");
209 return -ETIMEDOUT;
210}
211
212/* wait until hardware state machine is idle */
213static inline int wait_for_idle(struct davinci_mdio_data *data)
214{
215 struct davinci_mdio_regs __iomem *regs = data->regs;
216 u32 val, ret;
217
218 ret = readl_poll_timeout(®s->control, val, val & CONTROL_IDLE,
219 0, MDIO_TIMEOUT * 1000);
220 if (ret)
221 dev_err(data->dev, "timed out waiting for idle\n");
222
223 return ret;
224}
225
226static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
227{
228 struct davinci_mdio_data *data = bus->priv;
229 u32 reg;
230 int ret;
231
232 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
233 return -EINVAL;
234
235 ret = pm_runtime_get_sync(data->dev);
236 if (ret < 0) {
237 pm_runtime_put_noidle(data->dev);
238 return ret;
239 }
240
241 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
242 (phy_id << 16));
243
244 while (1) {
245 ret = wait_for_user_access(data);
246 if (ret == -EAGAIN)
247 continue;
248 if (ret < 0)
249 break;
250
251 writel(reg, &data->regs->user[0].access);
252
253 ret = wait_for_user_access(data);
254 if (ret == -EAGAIN)
255 continue;
256 if (ret < 0)
257 break;
258
259 reg = readl(&data->regs->user[0].access);
260 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
261 break;
262 }
263
264 pm_runtime_mark_last_busy(data->dev);
265 pm_runtime_put_autosuspend(data->dev);
266 return ret;
267}
268
269static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
270 int phy_reg, u16 phy_data)
271{
272 struct davinci_mdio_data *data = bus->priv;
273 u32 reg;
274 int ret;
275
276 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
277 return -EINVAL;
278
279 ret = pm_runtime_get_sync(data->dev);
280 if (ret < 0) {
281 pm_runtime_put_noidle(data->dev);
282 return ret;
283 }
284
285 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
286 (phy_id << 16) | (phy_data & USERACCESS_DATA));
287
288 while (1) {
289 ret = wait_for_user_access(data);
290 if (ret == -EAGAIN)
291 continue;
292 if (ret < 0)
293 break;
294
295 writel(reg, &data->regs->user[0].access);
296
297 ret = wait_for_user_access(data);
298 if (ret == -EAGAIN)
299 continue;
300 break;
301 }
302
303 pm_runtime_mark_last_busy(data->dev);
304 pm_runtime_put_autosuspend(data->dev);
305
306 return ret;
307}
308
309static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
310 struct platform_device *pdev)
311{
312 struct device_node *node = pdev->dev.of_node;
313 u32 prop;
314
315 if (!node)
316 return -EINVAL;
317
318 if (of_property_read_u32(node, "bus_freq", &prop)) {
319 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
320 return -EINVAL;
321 }
322 data->bus_freq = prop;
323
324 return 0;
325}
326
327#if IS_ENABLED(CONFIG_OF)
328static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
329 .autosuspend_delay_ms = 100,
330};
331
332static const struct of_device_id davinci_mdio_of_mtable[] = {
333 { .compatible = "ti,davinci_mdio", },
334 { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
335 { /* sentinel */ },
336};
337MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
338#endif
339
340static int davinci_mdio_probe(struct platform_device *pdev)
341{
342 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
343 struct device *dev = &pdev->dev;
344 struct davinci_mdio_data *data;
345 struct resource *res;
346 struct phy_device *phy;
347 int ret, addr;
348 int autosuspend_delay_ms = -1;
349
350 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
351 if (!data)
352 return -ENOMEM;
353
354 data->bus = devm_mdiobus_alloc(dev);
355 if (!data->bus) {
356 dev_err(dev, "failed to alloc mii bus\n");
357 return -ENOMEM;
358 }
359
360 if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
361 const struct of_device_id *of_id;
362
363 ret = davinci_mdio_probe_dt(&data->pdata, pdev);
364 if (ret)
365 return ret;
366 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
367
368 of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
369 if (of_id) {
370 const struct davinci_mdio_of_param *of_mdio_data;
371
372 of_mdio_data = of_id->data;
373 if (of_mdio_data)
374 autosuspend_delay_ms =
375 of_mdio_data->autosuspend_delay_ms;
376 }
377 } else {
378 data->pdata = pdata ? (*pdata) : default_pdata;
379 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
380 pdev->name, pdev->id);
381 }
382
383 data->bus->name = dev_name(dev);
384 data->bus->read = davinci_mdio_read,
385 data->bus->write = davinci_mdio_write,
386 data->bus->reset = davinci_mdio_reset,
387 data->bus->parent = dev;
388 data->bus->priv = data;
389
390 data->clk = devm_clk_get(dev, "fck");
391 if (IS_ERR(data->clk)) {
392 dev_err(dev, "failed to get device clock\n");
393 return PTR_ERR(data->clk);
394 }
395
396 dev_set_drvdata(dev, data);
397 data->dev = dev;
398
399 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
400 if (!res)
401 return -EINVAL;
402 data->regs = devm_ioremap(dev, res->start, resource_size(res));
403 if (!data->regs)
404 return -ENOMEM;
405
406 davinci_mdio_init_clk(data);
407
408 pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
409 pm_runtime_use_autosuspend(&pdev->dev);
410 pm_runtime_enable(&pdev->dev);
411
412 /* register the mii bus
413 * Create PHYs from DT only in case if PHY child nodes are explicitly
414 * defined to support backward compatibility with DTs which assume that
415 * Davinci MDIO will always scan the bus for PHYs detection.
416 */
417 if (dev->of_node && of_get_child_count(dev->of_node))
418 data->skip_scan = true;
419
420 ret = of_mdiobus_register(data->bus, dev->of_node);
421 if (ret)
422 goto bail_out;
423
424 /* scan and dump the bus */
425 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
426 phy = mdiobus_get_phy(data->bus, addr);
427 if (phy) {
428 dev_info(dev, "phy[%d]: device %s, driver %s\n",
429 phy->mdio.addr, phydev_name(phy),
430 phy->drv ? phy->drv->name : "unknown");
431 }
432 }
433
434 return 0;
435
436bail_out:
437 pm_runtime_dont_use_autosuspend(&pdev->dev);
438 pm_runtime_disable(&pdev->dev);
439 return ret;
440}
441
442static int davinci_mdio_remove(struct platform_device *pdev)
443{
444 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
445
446 if (data->bus)
447 mdiobus_unregister(data->bus);
448
449 pm_runtime_dont_use_autosuspend(&pdev->dev);
450 pm_runtime_disable(&pdev->dev);
451
452 return 0;
453}
454
455#ifdef CONFIG_PM
456static int davinci_mdio_runtime_suspend(struct device *dev)
457{
458 struct davinci_mdio_data *data = dev_get_drvdata(dev);
459 u32 ctrl;
460
461 /* shutdown the scan state machine */
462 ctrl = readl(&data->regs->control);
463 ctrl &= ~CONTROL_ENABLE;
464 writel(ctrl, &data->regs->control);
465 wait_for_idle(data);
466
467 return 0;
468}
469
470static int davinci_mdio_runtime_resume(struct device *dev)
471{
472 struct davinci_mdio_data *data = dev_get_drvdata(dev);
473
474 davinci_mdio_enable(data);
475 return 0;
476}
477#endif
478
479#ifdef CONFIG_PM_SLEEP
480static int davinci_mdio_suspend(struct device *dev)
481{
482 struct davinci_mdio_data *data = dev_get_drvdata(dev);
483 int ret = 0;
484
485 data->active_in_suspend = !pm_runtime_status_suspended(dev);
486 if (data->active_in_suspend)
487 ret = pm_runtime_force_suspend(dev);
488 if (ret < 0)
489 return ret;
490
491 /* Select sleep pin state */
492 pinctrl_pm_select_sleep_state(dev);
493
494 return 0;
495}
496
497static int davinci_mdio_resume(struct device *dev)
498{
499 struct davinci_mdio_data *data = dev_get_drvdata(dev);
500
501 /* Select default pin state */
502 pinctrl_pm_select_default_state(dev);
503
504 if (data->active_in_suspend)
505 pm_runtime_force_resume(dev);
506
507 return 0;
508}
509#endif
510
511static const struct dev_pm_ops davinci_mdio_pm_ops = {
512 SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
513 davinci_mdio_runtime_resume, NULL)
514 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
515};
516
517static struct platform_driver davinci_mdio_driver = {
518 .driver = {
519 .name = "davinci_mdio",
520 .pm = &davinci_mdio_pm_ops,
521 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
522 },
523 .probe = davinci_mdio_probe,
524 .remove = davinci_mdio_remove,
525};
526
527static int __init davinci_mdio_init(void)
528{
529 return platform_driver_register(&davinci_mdio_driver);
530}
531device_initcall(davinci_mdio_init);
532
533static void __exit davinci_mdio_exit(void)
534{
535 platform_driver_unregister(&davinci_mdio_driver);
536}
537module_exit(davinci_mdio_exit);
538
539MODULE_LICENSE("GPL");
540MODULE_DESCRIPTION("DaVinci MDIO driver");