Loading...
1/*
2 * DaVinci MDIO Module driver
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
7 *
8 * Copyright (C) 2009 Texas Instruments.
9 *
10 * ---------------------------------------------------------------------------
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------
26 */
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38#include <linux/davinci_emac.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
41#include <linux/of_mdio.h>
42#include <linux/pinctrl/consumer.h>
43
44/*
45 * This timeout definition is a worst-case ultra defensive measure against
46 * unexpected controller lock ups. Ideally, we should never ever hit this
47 * scenario in practice.
48 */
49#define MDIO_TIMEOUT 100 /* msecs */
50
51#define PHY_REG_MASK 0x1f
52#define PHY_ID_MASK 0x1f
53
54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
55
56struct davinci_mdio_of_param {
57 int autosuspend_delay_ms;
58};
59
60struct davinci_mdio_regs {
61 u32 version;
62 u32 control;
63#define CONTROL_IDLE BIT(31)
64#define CONTROL_ENABLE BIT(30)
65#define CONTROL_MAX_DIV (0xffff)
66
67 u32 alive;
68 u32 link;
69 u32 linkintraw;
70 u32 linkintmasked;
71 u32 __reserved_0[2];
72 u32 userintraw;
73 u32 userintmasked;
74 u32 userintmaskset;
75 u32 userintmaskclr;
76 u32 __reserved_1[20];
77
78 struct {
79 u32 access;
80#define USERACCESS_GO BIT(31)
81#define USERACCESS_WRITE BIT(30)
82#define USERACCESS_ACK BIT(29)
83#define USERACCESS_READ (0)
84#define USERACCESS_DATA (0xffff)
85
86 u32 physel;
87 } user[0];
88};
89
90static const struct mdio_platform_data default_pdata = {
91 .bus_freq = DEF_OUT_FREQ,
92};
93
94struct davinci_mdio_data {
95 struct mdio_platform_data pdata;
96 struct davinci_mdio_regs __iomem *regs;
97 struct clk *clk;
98 struct device *dev;
99 struct mii_bus *bus;
100 bool active_in_suspend;
101 unsigned long access_time; /* jiffies */
102 /* Indicates that driver shouldn't modify phy_mask in case
103 * if MDIO bus is registered from DT.
104 */
105 bool skip_scan;
106 u32 clk_div;
107};
108
109static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
110{
111 u32 mdio_in, div, mdio_out_khz, access_time;
112
113 mdio_in = clk_get_rate(data->clk);
114 div = (mdio_in / data->pdata.bus_freq) - 1;
115 if (div > CONTROL_MAX_DIV)
116 div = CONTROL_MAX_DIV;
117
118 data->clk_div = div;
119 /*
120 * One mdio transaction consists of:
121 * 32 bits of preamble
122 * 32 bits of transferred data
123 * 24 bits of bus yield (not needed unless shared?)
124 */
125 mdio_out_khz = mdio_in / (1000 * (div + 1));
126 access_time = (88 * 1000) / mdio_out_khz;
127
128 /*
129 * In the worst case, we could be kicking off a user-access immediately
130 * after the mdio bus scan state-machine triggered its own read. If
131 * so, our request could get deferred by one access cycle. We
132 * defensively allow for 4 access cycles.
133 */
134 data->access_time = usecs_to_jiffies(access_time * 4);
135 if (!data->access_time)
136 data->access_time = 1;
137}
138
139static void davinci_mdio_enable(struct davinci_mdio_data *data)
140{
141 /* set enable and clock divider */
142 __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
143}
144
145static int davinci_mdio_reset(struct mii_bus *bus)
146{
147 struct davinci_mdio_data *data = bus->priv;
148 u32 phy_mask, ver;
149 int ret;
150
151 ret = pm_runtime_get_sync(data->dev);
152 if (ret < 0) {
153 pm_runtime_put_noidle(data->dev);
154 return ret;
155 }
156
157 /* wait for scan logic to settle */
158 msleep(PHY_MAX_ADDR * data->access_time);
159
160 /* dump hardware version info */
161 ver = __raw_readl(&data->regs->version);
162 dev_info(data->dev,
163 "davinci mdio revision %d.%d, bus freq %ld\n",
164 (ver >> 8) & 0xff, ver & 0xff,
165 data->pdata.bus_freq);
166
167 if (data->skip_scan)
168 goto done;
169
170 /* get phy mask from the alive register */
171 phy_mask = __raw_readl(&data->regs->alive);
172 if (phy_mask) {
173 /* restrict mdio bus to live phys only */
174 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
175 phy_mask = ~phy_mask;
176 } else {
177 /* desperately scan all phys */
178 dev_warn(data->dev, "no live phy, scanning all\n");
179 phy_mask = 0;
180 }
181 data->bus->phy_mask = phy_mask;
182
183done:
184 pm_runtime_mark_last_busy(data->dev);
185 pm_runtime_put_autosuspend(data->dev);
186
187 return 0;
188}
189
190/* wait until hardware is ready for another user access */
191static inline int wait_for_user_access(struct davinci_mdio_data *data)
192{
193 struct davinci_mdio_regs __iomem *regs = data->regs;
194 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
195 u32 reg;
196
197 while (time_after(timeout, jiffies)) {
198 reg = __raw_readl(®s->user[0].access);
199 if ((reg & USERACCESS_GO) == 0)
200 return 0;
201
202 reg = __raw_readl(®s->control);
203 if ((reg & CONTROL_IDLE) == 0) {
204 usleep_range(100, 200);
205 continue;
206 }
207
208 /*
209 * An emac soft_reset may have clobbered the mdio controller's
210 * state machine. We need to reset and retry the current
211 * operation
212 */
213 dev_warn(data->dev, "resetting idled controller\n");
214 davinci_mdio_enable(data);
215 return -EAGAIN;
216 }
217
218 reg = __raw_readl(®s->user[0].access);
219 if ((reg & USERACCESS_GO) == 0)
220 return 0;
221
222 dev_err(data->dev, "timed out waiting for user access\n");
223 return -ETIMEDOUT;
224}
225
226/* wait until hardware state machine is idle */
227static inline int wait_for_idle(struct davinci_mdio_data *data)
228{
229 struct davinci_mdio_regs __iomem *regs = data->regs;
230 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
231
232 while (time_after(timeout, jiffies)) {
233 if (__raw_readl(®s->control) & CONTROL_IDLE)
234 return 0;
235 }
236 dev_err(data->dev, "timed out waiting for idle\n");
237 return -ETIMEDOUT;
238}
239
240static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
241{
242 struct davinci_mdio_data *data = bus->priv;
243 u32 reg;
244 int ret;
245
246 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
247 return -EINVAL;
248
249 ret = pm_runtime_get_sync(data->dev);
250 if (ret < 0) {
251 pm_runtime_put_noidle(data->dev);
252 return ret;
253 }
254
255 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
256 (phy_id << 16));
257
258 while (1) {
259 ret = wait_for_user_access(data);
260 if (ret == -EAGAIN)
261 continue;
262 if (ret < 0)
263 break;
264
265 __raw_writel(reg, &data->regs->user[0].access);
266
267 ret = wait_for_user_access(data);
268 if (ret == -EAGAIN)
269 continue;
270 if (ret < 0)
271 break;
272
273 reg = __raw_readl(&data->regs->user[0].access);
274 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
275 break;
276 }
277
278 pm_runtime_mark_last_busy(data->dev);
279 pm_runtime_put_autosuspend(data->dev);
280 return ret;
281}
282
283static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
284 int phy_reg, u16 phy_data)
285{
286 struct davinci_mdio_data *data = bus->priv;
287 u32 reg;
288 int ret;
289
290 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
291 return -EINVAL;
292
293 ret = pm_runtime_get_sync(data->dev);
294 if (ret < 0) {
295 pm_runtime_put_noidle(data->dev);
296 return ret;
297 }
298
299 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
300 (phy_id << 16) | (phy_data & USERACCESS_DATA));
301
302 while (1) {
303 ret = wait_for_user_access(data);
304 if (ret == -EAGAIN)
305 continue;
306 if (ret < 0)
307 break;
308
309 __raw_writel(reg, &data->regs->user[0].access);
310
311 ret = wait_for_user_access(data);
312 if (ret == -EAGAIN)
313 continue;
314 break;
315 }
316
317 pm_runtime_mark_last_busy(data->dev);
318 pm_runtime_put_autosuspend(data->dev);
319
320 return ret;
321}
322
323#if IS_ENABLED(CONFIG_OF)
324static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
325 struct platform_device *pdev)
326{
327 struct device_node *node = pdev->dev.of_node;
328 u32 prop;
329
330 if (!node)
331 return -EINVAL;
332
333 if (of_property_read_u32(node, "bus_freq", &prop)) {
334 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
335 return -EINVAL;
336 }
337 data->bus_freq = prop;
338
339 return 0;
340}
341#endif
342
343#if IS_ENABLED(CONFIG_OF)
344static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
345 .autosuspend_delay_ms = 100,
346};
347
348static const struct of_device_id davinci_mdio_of_mtable[] = {
349 { .compatible = "ti,davinci_mdio", },
350 { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
351 { /* sentinel */ },
352};
353MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
354#endif
355
356static int davinci_mdio_probe(struct platform_device *pdev)
357{
358 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
359 struct device *dev = &pdev->dev;
360 struct davinci_mdio_data *data;
361 struct resource *res;
362 struct phy_device *phy;
363 int ret, addr;
364 int autosuspend_delay_ms = -1;
365
366 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
367 if (!data)
368 return -ENOMEM;
369
370 data->bus = devm_mdiobus_alloc(dev);
371 if (!data->bus) {
372 dev_err(dev, "failed to alloc mii bus\n");
373 return -ENOMEM;
374 }
375
376 if (dev->of_node) {
377 const struct of_device_id *of_id;
378
379 ret = davinci_mdio_probe_dt(&data->pdata, pdev);
380 if (ret)
381 return ret;
382 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
383
384 of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
385 if (of_id) {
386 const struct davinci_mdio_of_param *of_mdio_data;
387
388 of_mdio_data = of_id->data;
389 if (of_mdio_data)
390 autosuspend_delay_ms =
391 of_mdio_data->autosuspend_delay_ms;
392 }
393 } else {
394 data->pdata = pdata ? (*pdata) : default_pdata;
395 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
396 pdev->name, pdev->id);
397 }
398
399 data->bus->name = dev_name(dev);
400 data->bus->read = davinci_mdio_read,
401 data->bus->write = davinci_mdio_write,
402 data->bus->reset = davinci_mdio_reset,
403 data->bus->parent = dev;
404 data->bus->priv = data;
405
406 data->clk = devm_clk_get(dev, "fck");
407 if (IS_ERR(data->clk)) {
408 dev_err(dev, "failed to get device clock\n");
409 return PTR_ERR(data->clk);
410 }
411
412 dev_set_drvdata(dev, data);
413 data->dev = dev;
414
415 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
416 data->regs = devm_ioremap_resource(dev, res);
417 if (IS_ERR(data->regs))
418 return PTR_ERR(data->regs);
419
420 davinci_mdio_init_clk(data);
421
422 pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
423 pm_runtime_use_autosuspend(&pdev->dev);
424 pm_runtime_enable(&pdev->dev);
425
426 /* register the mii bus
427 * Create PHYs from DT only in case if PHY child nodes are explicitly
428 * defined to support backward compatibility with DTs which assume that
429 * Davinci MDIO will always scan the bus for PHYs detection.
430 */
431 if (dev->of_node && of_get_child_count(dev->of_node)) {
432 data->skip_scan = true;
433 ret = of_mdiobus_register(data->bus, dev->of_node);
434 } else {
435 ret = mdiobus_register(data->bus);
436 }
437 if (ret)
438 goto bail_out;
439
440 /* scan and dump the bus */
441 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
442 phy = mdiobus_get_phy(data->bus, addr);
443 if (phy) {
444 dev_info(dev, "phy[%d]: device %s, driver %s\n",
445 phy->mdio.addr, phydev_name(phy),
446 phy->drv ? phy->drv->name : "unknown");
447 }
448 }
449
450 return 0;
451
452bail_out:
453 pm_runtime_dont_use_autosuspend(&pdev->dev);
454 pm_runtime_disable(&pdev->dev);
455 return ret;
456}
457
458static int davinci_mdio_remove(struct platform_device *pdev)
459{
460 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
461
462 if (data->bus)
463 mdiobus_unregister(data->bus);
464
465 pm_runtime_dont_use_autosuspend(&pdev->dev);
466 pm_runtime_disable(&pdev->dev);
467
468 return 0;
469}
470
471#ifdef CONFIG_PM
472static int davinci_mdio_runtime_suspend(struct device *dev)
473{
474 struct davinci_mdio_data *data = dev_get_drvdata(dev);
475 u32 ctrl;
476
477 /* shutdown the scan state machine */
478 ctrl = __raw_readl(&data->regs->control);
479 ctrl &= ~CONTROL_ENABLE;
480 __raw_writel(ctrl, &data->regs->control);
481 wait_for_idle(data);
482
483 return 0;
484}
485
486static int davinci_mdio_runtime_resume(struct device *dev)
487{
488 struct davinci_mdio_data *data = dev_get_drvdata(dev);
489
490 davinci_mdio_enable(data);
491 return 0;
492}
493#endif
494
495#ifdef CONFIG_PM_SLEEP
496static int davinci_mdio_suspend(struct device *dev)
497{
498 struct davinci_mdio_data *data = dev_get_drvdata(dev);
499 int ret = 0;
500
501 data->active_in_suspend = !pm_runtime_status_suspended(dev);
502 if (data->active_in_suspend)
503 ret = pm_runtime_force_suspend(dev);
504 if (ret < 0)
505 return ret;
506
507 /* Select sleep pin state */
508 pinctrl_pm_select_sleep_state(dev);
509
510 return 0;
511}
512
513static int davinci_mdio_resume(struct device *dev)
514{
515 struct davinci_mdio_data *data = dev_get_drvdata(dev);
516
517 /* Select default pin state */
518 pinctrl_pm_select_default_state(dev);
519
520 if (data->active_in_suspend)
521 pm_runtime_force_resume(dev);
522
523 return 0;
524}
525#endif
526
527static const struct dev_pm_ops davinci_mdio_pm_ops = {
528 SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
529 davinci_mdio_runtime_resume, NULL)
530 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
531};
532
533static struct platform_driver davinci_mdio_driver = {
534 .driver = {
535 .name = "davinci_mdio",
536 .pm = &davinci_mdio_pm_ops,
537 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
538 },
539 .probe = davinci_mdio_probe,
540 .remove = davinci_mdio_remove,
541};
542
543static int __init davinci_mdio_init(void)
544{
545 return platform_driver_register(&davinci_mdio_driver);
546}
547device_initcall(davinci_mdio_init);
548
549static void __exit davinci_mdio_exit(void)
550{
551 platform_driver_unregister(&davinci_mdio_driver);
552}
553module_exit(davinci_mdio_exit);
554
555MODULE_LICENSE("GPL");
556MODULE_DESCRIPTION("DaVinci MDIO driver");
1/*
2 * DaVinci MDIO Module driver
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
7 *
8 * Copyright (C) 2009 Texas Instruments.
9 *
10 * ---------------------------------------------------------------------------
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------
26 */
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38#include <linux/davinci_emac.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
41#include <linux/of_mdio.h>
42#include <linux/pinctrl/consumer.h>
43
44/*
45 * This timeout definition is a worst-case ultra defensive measure against
46 * unexpected controller lock ups. Ideally, we should never ever hit this
47 * scenario in practice.
48 */
49#define MDIO_TIMEOUT 100 /* msecs */
50
51#define PHY_REG_MASK 0x1f
52#define PHY_ID_MASK 0x1f
53
54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
55
56struct davinci_mdio_regs {
57 u32 version;
58 u32 control;
59#define CONTROL_IDLE BIT(31)
60#define CONTROL_ENABLE BIT(30)
61#define CONTROL_MAX_DIV (0xffff)
62
63 u32 alive;
64 u32 link;
65 u32 linkintraw;
66 u32 linkintmasked;
67 u32 __reserved_0[2];
68 u32 userintraw;
69 u32 userintmasked;
70 u32 userintmaskset;
71 u32 userintmaskclr;
72 u32 __reserved_1[20];
73
74 struct {
75 u32 access;
76#define USERACCESS_GO BIT(31)
77#define USERACCESS_WRITE BIT(30)
78#define USERACCESS_ACK BIT(29)
79#define USERACCESS_READ (0)
80#define USERACCESS_DATA (0xffff)
81
82 u32 physel;
83 } user[0];
84};
85
86static const struct mdio_platform_data default_pdata = {
87 .bus_freq = DEF_OUT_FREQ,
88};
89
90struct davinci_mdio_data {
91 struct mdio_platform_data pdata;
92 struct davinci_mdio_regs __iomem *regs;
93 spinlock_t lock;
94 struct clk *clk;
95 struct device *dev;
96 struct mii_bus *bus;
97 bool suspended;
98 unsigned long access_time; /* jiffies */
99 /* Indicates that driver shouldn't modify phy_mask in case
100 * if MDIO bus is registered from DT.
101 */
102 bool skip_scan;
103};
104
105static void __davinci_mdio_reset(struct davinci_mdio_data *data)
106{
107 u32 mdio_in, div, mdio_out_khz, access_time;
108
109 mdio_in = clk_get_rate(data->clk);
110 div = (mdio_in / data->pdata.bus_freq) - 1;
111 if (div > CONTROL_MAX_DIV)
112 div = CONTROL_MAX_DIV;
113
114 /* set enable and clock divider */
115 __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
116
117 /*
118 * One mdio transaction consists of:
119 * 32 bits of preamble
120 * 32 bits of transferred data
121 * 24 bits of bus yield (not needed unless shared?)
122 */
123 mdio_out_khz = mdio_in / (1000 * (div + 1));
124 access_time = (88 * 1000) / mdio_out_khz;
125
126 /*
127 * In the worst case, we could be kicking off a user-access immediately
128 * after the mdio bus scan state-machine triggered its own read. If
129 * so, our request could get deferred by one access cycle. We
130 * defensively allow for 4 access cycles.
131 */
132 data->access_time = usecs_to_jiffies(access_time * 4);
133 if (!data->access_time)
134 data->access_time = 1;
135}
136
137static int davinci_mdio_reset(struct mii_bus *bus)
138{
139 struct davinci_mdio_data *data = bus->priv;
140 u32 phy_mask, ver;
141
142 __davinci_mdio_reset(data);
143
144 /* wait for scan logic to settle */
145 msleep(PHY_MAX_ADDR * data->access_time);
146
147 /* dump hardware version info */
148 ver = __raw_readl(&data->regs->version);
149 dev_info(data->dev, "davinci mdio revision %d.%d\n",
150 (ver >> 8) & 0xff, ver & 0xff);
151
152 if (data->skip_scan)
153 return 0;
154
155 /* get phy mask from the alive register */
156 phy_mask = __raw_readl(&data->regs->alive);
157 if (phy_mask) {
158 /* restrict mdio bus to live phys only */
159 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
160 phy_mask = ~phy_mask;
161 } else {
162 /* desperately scan all phys */
163 dev_warn(data->dev, "no live phy, scanning all\n");
164 phy_mask = 0;
165 }
166 data->bus->phy_mask = phy_mask;
167
168 return 0;
169}
170
171/* wait until hardware is ready for another user access */
172static inline int wait_for_user_access(struct davinci_mdio_data *data)
173{
174 struct davinci_mdio_regs __iomem *regs = data->regs;
175 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
176 u32 reg;
177
178 while (time_after(timeout, jiffies)) {
179 reg = __raw_readl(®s->user[0].access);
180 if ((reg & USERACCESS_GO) == 0)
181 return 0;
182
183 reg = __raw_readl(®s->control);
184 if ((reg & CONTROL_IDLE) == 0)
185 continue;
186
187 /*
188 * An emac soft_reset may have clobbered the mdio controller's
189 * state machine. We need to reset and retry the current
190 * operation
191 */
192 dev_warn(data->dev, "resetting idled controller\n");
193 __davinci_mdio_reset(data);
194 return -EAGAIN;
195 }
196
197 reg = __raw_readl(®s->user[0].access);
198 if ((reg & USERACCESS_GO) == 0)
199 return 0;
200
201 dev_err(data->dev, "timed out waiting for user access\n");
202 return -ETIMEDOUT;
203}
204
205/* wait until hardware state machine is idle */
206static inline int wait_for_idle(struct davinci_mdio_data *data)
207{
208 struct davinci_mdio_regs __iomem *regs = data->regs;
209 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
210
211 while (time_after(timeout, jiffies)) {
212 if (__raw_readl(®s->control) & CONTROL_IDLE)
213 return 0;
214 }
215 dev_err(data->dev, "timed out waiting for idle\n");
216 return -ETIMEDOUT;
217}
218
219static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
220{
221 struct davinci_mdio_data *data = bus->priv;
222 u32 reg;
223 int ret;
224
225 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
226 return -EINVAL;
227
228 spin_lock(&data->lock);
229
230 if (data->suspended) {
231 spin_unlock(&data->lock);
232 return -ENODEV;
233 }
234
235 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
236 (phy_id << 16));
237
238 while (1) {
239 ret = wait_for_user_access(data);
240 if (ret == -EAGAIN)
241 continue;
242 if (ret < 0)
243 break;
244
245 __raw_writel(reg, &data->regs->user[0].access);
246
247 ret = wait_for_user_access(data);
248 if (ret == -EAGAIN)
249 continue;
250 if (ret < 0)
251 break;
252
253 reg = __raw_readl(&data->regs->user[0].access);
254 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
255 break;
256 }
257
258 spin_unlock(&data->lock);
259
260 return ret;
261}
262
263static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
264 int phy_reg, u16 phy_data)
265{
266 struct davinci_mdio_data *data = bus->priv;
267 u32 reg;
268 int ret;
269
270 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
271 return -EINVAL;
272
273 spin_lock(&data->lock);
274
275 if (data->suspended) {
276 spin_unlock(&data->lock);
277 return -ENODEV;
278 }
279
280 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
281 (phy_id << 16) | (phy_data & USERACCESS_DATA));
282
283 while (1) {
284 ret = wait_for_user_access(data);
285 if (ret == -EAGAIN)
286 continue;
287 if (ret < 0)
288 break;
289
290 __raw_writel(reg, &data->regs->user[0].access);
291
292 ret = wait_for_user_access(data);
293 if (ret == -EAGAIN)
294 continue;
295 break;
296 }
297
298 spin_unlock(&data->lock);
299
300 return 0;
301}
302
303#if IS_ENABLED(CONFIG_OF)
304static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
305 struct platform_device *pdev)
306{
307 struct device_node *node = pdev->dev.of_node;
308 u32 prop;
309
310 if (!node)
311 return -EINVAL;
312
313 if (of_property_read_u32(node, "bus_freq", &prop)) {
314 dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
315 return -EINVAL;
316 }
317 data->bus_freq = prop;
318
319 return 0;
320}
321#endif
322
323static int davinci_mdio_probe(struct platform_device *pdev)
324{
325 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
326 struct device *dev = &pdev->dev;
327 struct davinci_mdio_data *data;
328 struct resource *res;
329 struct phy_device *phy;
330 int ret, addr;
331
332 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
333 if (!data)
334 return -ENOMEM;
335
336 data->bus = devm_mdiobus_alloc(dev);
337 if (!data->bus) {
338 dev_err(dev, "failed to alloc mii bus\n");
339 return -ENOMEM;
340 }
341
342 if (dev->of_node) {
343 if (davinci_mdio_probe_dt(&data->pdata, pdev))
344 data->pdata = default_pdata;
345 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
346 } else {
347 data->pdata = pdata ? (*pdata) : default_pdata;
348 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
349 pdev->name, pdev->id);
350 }
351
352 data->bus->name = dev_name(dev);
353 data->bus->read = davinci_mdio_read,
354 data->bus->write = davinci_mdio_write,
355 data->bus->reset = davinci_mdio_reset,
356 data->bus->parent = dev;
357 data->bus->priv = data;
358
359 pm_runtime_enable(&pdev->dev);
360 pm_runtime_get_sync(&pdev->dev);
361 data->clk = devm_clk_get(dev, "fck");
362 if (IS_ERR(data->clk)) {
363 dev_err(dev, "failed to get device clock\n");
364 ret = PTR_ERR(data->clk);
365 data->clk = NULL;
366 goto bail_out;
367 }
368
369 dev_set_drvdata(dev, data);
370 data->dev = dev;
371 spin_lock_init(&data->lock);
372
373 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
374 data->regs = devm_ioremap_resource(dev, res);
375 if (IS_ERR(data->regs)) {
376 ret = PTR_ERR(data->regs);
377 goto bail_out;
378 }
379
380 /* register the mii bus
381 * Create PHYs from DT only in case if PHY child nodes are explicitly
382 * defined to support backward compatibility with DTs which assume that
383 * Davinci MDIO will always scan the bus for PHYs detection.
384 */
385 if (dev->of_node && of_get_child_count(dev->of_node)) {
386 data->skip_scan = true;
387 ret = of_mdiobus_register(data->bus, dev->of_node);
388 } else {
389 ret = mdiobus_register(data->bus);
390 }
391 if (ret)
392 goto bail_out;
393
394 /* scan and dump the bus */
395 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
396 phy = mdiobus_get_phy(data->bus, addr);
397 if (phy) {
398 dev_info(dev, "phy[%d]: device %s, driver %s\n",
399 phy->mdio.addr, phydev_name(phy),
400 phy->drv ? phy->drv->name : "unknown");
401 }
402 }
403
404 return 0;
405
406bail_out:
407 pm_runtime_put_sync(&pdev->dev);
408 pm_runtime_disable(&pdev->dev);
409
410 return ret;
411}
412
413static int davinci_mdio_remove(struct platform_device *pdev)
414{
415 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
416
417 if (data->bus)
418 mdiobus_unregister(data->bus);
419
420 pm_runtime_put_sync(&pdev->dev);
421 pm_runtime_disable(&pdev->dev);
422
423 return 0;
424}
425
426#ifdef CONFIG_PM_SLEEP
427static int davinci_mdio_suspend(struct device *dev)
428{
429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
430 u32 ctrl;
431
432 spin_lock(&data->lock);
433
434 /* shutdown the scan state machine */
435 ctrl = __raw_readl(&data->regs->control);
436 ctrl &= ~CONTROL_ENABLE;
437 __raw_writel(ctrl, &data->regs->control);
438 wait_for_idle(data);
439
440 data->suspended = true;
441 spin_unlock(&data->lock);
442 pm_runtime_put_sync(data->dev);
443
444 /* Select sleep pin state */
445 pinctrl_pm_select_sleep_state(dev);
446
447 return 0;
448}
449
450static int davinci_mdio_resume(struct device *dev)
451{
452 struct davinci_mdio_data *data = dev_get_drvdata(dev);
453
454 /* Select default pin state */
455 pinctrl_pm_select_default_state(dev);
456
457 pm_runtime_get_sync(data->dev);
458
459 spin_lock(&data->lock);
460 /* restart the scan state machine */
461 __davinci_mdio_reset(data);
462
463 data->suspended = false;
464 spin_unlock(&data->lock);
465
466 return 0;
467}
468#endif
469
470static const struct dev_pm_ops davinci_mdio_pm_ops = {
471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
472};
473
474#if IS_ENABLED(CONFIG_OF)
475static const struct of_device_id davinci_mdio_of_mtable[] = {
476 { .compatible = "ti,davinci_mdio", },
477 { /* sentinel */ },
478};
479MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
480#endif
481
482static struct platform_driver davinci_mdio_driver = {
483 .driver = {
484 .name = "davinci_mdio",
485 .pm = &davinci_mdio_pm_ops,
486 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
487 },
488 .probe = davinci_mdio_probe,
489 .remove = davinci_mdio_remove,
490};
491
492static int __init davinci_mdio_init(void)
493{
494 return platform_driver_register(&davinci_mdio_driver);
495}
496device_initcall(davinci_mdio_init);
497
498static void __exit davinci_mdio_exit(void)
499{
500 platform_driver_unregister(&davinci_mdio_driver);
501}
502module_exit(davinci_mdio_exit);
503
504MODULE_LICENSE("GPL");
505MODULE_DESCRIPTION("DaVinci MDIO driver");