Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * DaVinci MDIO Module driver
  4 *
  5 * Copyright (C) 2010 Texas Instruments.
  6 *
  7 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
  8 *
  9 * Copyright (C) 2009 Texas Instruments.
 10 *
 11 */
 12#include <linux/module.h>
 13#include <linux/kernel.h>
 14#include <linux/platform_device.h>
 15#include <linux/delay.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/phy.h>
 19#include <linux/clk.h>
 20#include <linux/err.h>
 21#include <linux/io.h>
 22#include <linux/iopoll.h>
 23#include <linux/pm_runtime.h>
 24#include <linux/davinci_emac.h>
 25#include <linux/of.h>
 
 26#include <linux/of_mdio.h>
 27#include <linux/pinctrl/consumer.h>
 28#include <linux/mdio-bitbang.h>
 29#include <linux/sys_soc.h>
 30
 31/*
 32 * This timeout definition is a worst-case ultra defensive measure against
 33 * unexpected controller lock ups.  Ideally, we should never ever hit this
 34 * scenario in practice.
 35 */
 36#define MDIO_TIMEOUT		100 /* msecs */
 37
 38#define PHY_REG_MASK		0x1f
 39#define PHY_ID_MASK		0x1f
 40
 41#define DEF_OUT_FREQ		2200000		/* 2.2 MHz */
 42
 43struct davinci_mdio_of_param {
 44	int autosuspend_delay_ms;
 45	bool manual_mode;
 46};
 47
 48struct davinci_mdio_regs {
 49	u32	version;
 50	u32	control;
 51#define CONTROL_IDLE		BIT(31)
 52#define CONTROL_ENABLE		BIT(30)
 53#define CONTROL_MAX_DIV		(0xffff)
 54#define CONTROL_CLKDIV		GENMASK(15, 0)
 55
 56#define MDIO_MAN_MDCLK_O	BIT(2)
 57#define MDIO_MAN_OE		BIT(1)
 58#define MDIO_MAN_PIN		BIT(0)
 59#define MDIO_MANUALMODE		BIT(31)
 60
 61#define MDIO_PIN               0
 62
 63
 64	u32	alive;
 65	u32	link;
 66	u32	linkintraw;
 67	u32	linkintmasked;
 68	u32	__reserved_0[2];
 69	u32	userintraw;
 70	u32	userintmasked;
 71	u32	userintmaskset;
 72	u32	userintmaskclr;
 73	u32	manualif;
 74	u32	poll;
 75	u32	__reserved_1[18];
 76
 77	struct {
 78		u32	access;
 79#define USERACCESS_GO		BIT(31)
 80#define USERACCESS_WRITE	BIT(30)
 81#define USERACCESS_ACK		BIT(29)
 82#define USERACCESS_READ		(0)
 83#define USERACCESS_DATA		(0xffff)
 84
 85		u32	physel;
 86	}	user[];
 87};
 88
 89static const struct mdio_platform_data default_pdata = {
 90	.bus_freq = DEF_OUT_FREQ,
 91};
 92
 93struct davinci_mdio_data {
 94	struct mdio_platform_data pdata;
 95	struct mdiobb_ctrl bb_ctrl;
 96	struct davinci_mdio_regs __iomem *regs;
 97	struct clk	*clk;
 98	struct device	*dev;
 99	struct mii_bus	*bus;
100	bool            active_in_suspend;
101	unsigned long	access_time; /* jiffies */
102	/* Indicates that driver shouldn't modify phy_mask in case
103	 * if MDIO bus is registered from DT.
104	 */
105	bool		skip_scan;
106	u32		clk_div;
107	bool		manual_mode;
108};
109
110static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
111{
112	u32 mdio_in, div, mdio_out_khz, access_time;
113
114	mdio_in = clk_get_rate(data->clk);
115	div = (mdio_in / data->pdata.bus_freq) - 1;
116	if (div > CONTROL_MAX_DIV)
117		div = CONTROL_MAX_DIV;
118
119	data->clk_div = div;
120	/*
121	 * One mdio transaction consists of:
122	 *	32 bits of preamble
123	 *	32 bits of transferred data
124	 *	24 bits of bus yield (not needed unless shared?)
125	 */
126	mdio_out_khz = mdio_in / (1000 * (div + 1));
127	access_time  = (88 * 1000) / mdio_out_khz;
128
129	/*
130	 * In the worst case, we could be kicking off a user-access immediately
131	 * after the mdio bus scan state-machine triggered its own read.  If
132	 * so, our request could get deferred by one access cycle.  We
133	 * defensively allow for 4 access cycles.
134	 */
135	data->access_time = usecs_to_jiffies(access_time * 4);
136	if (!data->access_time)
137		data->access_time = 1;
138}
139
140static void davinci_mdio_enable(struct davinci_mdio_data *data)
141{
142	/* set enable and clock divider */
143	writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
144}
145
146static void davinci_mdio_disable(struct davinci_mdio_data *data)
147{
148	u32 reg;
149
150	/* Disable MDIO state machine */
151	reg = readl(&data->regs->control);
152
153	reg &= ~CONTROL_CLKDIV;
154	reg |= data->clk_div;
155
156	reg &= ~CONTROL_ENABLE;
157	writel(reg, &data->regs->control);
158}
159
160static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
161{
162	u32 reg;
163	/* set manual mode */
164	reg = readl(&data->regs->poll);
165	reg |= MDIO_MANUALMODE;
166	writel(reg, &data->regs->poll);
167}
168
169static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
170{
171	struct davinci_mdio_data *data;
172	u32 reg;
173
174	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
175	reg = readl(&data->regs->manualif);
176
177	if (level)
178		reg |= MDIO_MAN_MDCLK_O;
179	else
180		reg &= ~MDIO_MAN_MDCLK_O;
181
182	writel(reg, &data->regs->manualif);
183}
184
185static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
186{
187	struct davinci_mdio_data *data;
188	u32 reg;
189
190	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
191	reg = readl(&data->regs->manualif);
192
193	if (output)
194		reg |= MDIO_MAN_OE;
195	else
196		reg &= ~MDIO_MAN_OE;
197
198	writel(reg, &data->regs->manualif);
199}
200
201static void  davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
202{
203	struct davinci_mdio_data *data;
204	u32 reg;
205
206	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
207	reg = readl(&data->regs->manualif);
208
209	if (value)
210		reg |= MDIO_MAN_PIN;
211	else
212		reg &= ~MDIO_MAN_PIN;
213
214	writel(reg, &data->regs->manualif);
215}
216
217static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
218{
219	struct davinci_mdio_data *data;
220	unsigned long reg;
221
222	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
223	reg = readl(&data->regs->manualif);
224	return test_bit(MDIO_PIN, &reg);
225}
226
227static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
228{
229	int ret;
230
231	ret = pm_runtime_resume_and_get(bus->parent);
232	if (ret < 0)
233		return ret;
234
235	ret = mdiobb_read_c22(bus, phy, reg);
236
237	pm_runtime_mark_last_busy(bus->parent);
238	pm_runtime_put_autosuspend(bus->parent);
239
240	return ret;
241}
242
243static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg,
244				    u16 val)
245{
246	int ret;
247
248	ret = pm_runtime_resume_and_get(bus->parent);
249	if (ret < 0)
250		return ret;
251
252	ret = mdiobb_write_c22(bus, phy, reg, val);
253
254	pm_runtime_mark_last_busy(bus->parent);
255	pm_runtime_put_autosuspend(bus->parent);
256
257	return ret;
258}
259
260static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad,
261				   int reg)
262{
263	int ret;
264
265	ret = pm_runtime_resume_and_get(bus->parent);
266	if (ret < 0)
267		return ret;
268
269	ret = mdiobb_read_c45(bus, phy, devad, reg);
270
271	pm_runtime_mark_last_busy(bus->parent);
272	pm_runtime_put_autosuspend(bus->parent);
273
274	return ret;
275}
276
277static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
278				    int reg, u16 val)
279{
280	int ret;
281
282	ret = pm_runtime_resume_and_get(bus->parent);
283	if (ret < 0)
284		return ret;
285
286	ret = mdiobb_write_c45(bus, phy, devad, reg, val);
287
288	pm_runtime_mark_last_busy(bus->parent);
289	pm_runtime_put_autosuspend(bus->parent);
290
291	return ret;
292}
293
294static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
295{
 
296	u32 phy_mask, ver;
297	int ret;
298
299	ret = pm_runtime_resume_and_get(data->dev);
300	if (ret < 0)
 
301		return ret;
302
303	if (data->manual_mode) {
304		davinci_mdio_disable(data);
305		davinci_mdio_enable_manual_mode(data);
306	}
307
308	/* wait for scan logic to settle */
309	msleep(PHY_MAX_ADDR * data->access_time);
310
311	/* dump hardware version info */
312	ver = readl(&data->regs->version);
313	dev_info(data->dev,
314		 "davinci mdio revision %d.%d, bus freq %ld\n",
315		 (ver >> 8) & 0xff, ver & 0xff,
316		 data->pdata.bus_freq);
317
318	if (data->skip_scan)
319		goto done;
320
321	/* get phy mask from the alive register */
322	phy_mask = readl(&data->regs->alive);
323	if (phy_mask) {
324		/* restrict mdio bus to live phys only */
325		dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
326		phy_mask = ~phy_mask;
327	} else {
328		/* desperately scan all phys */
329		dev_warn(data->dev, "no live phy, scanning all\n");
330		phy_mask = 0;
331	}
332	data->bus->phy_mask = phy_mask;
333
334done:
335	pm_runtime_mark_last_busy(data->dev);
336	pm_runtime_put_autosuspend(data->dev);
337
338	return 0;
339}
340
341static int davinci_mdio_reset(struct mii_bus *bus)
342{
343	struct davinci_mdio_data *data = bus->priv;
344
345	return davinci_mdio_common_reset(data);
346}
347
348static int davinci_mdiobb_reset(struct mii_bus *bus)
349{
350	struct mdiobb_ctrl *ctrl = bus->priv;
351	struct davinci_mdio_data *data;
352
353	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
354
355	return davinci_mdio_common_reset(data);
356}
357
358/* wait until hardware is ready for another user access */
359static inline int wait_for_user_access(struct davinci_mdio_data *data)
360{
361	struct davinci_mdio_regs __iomem *regs = data->regs;
362	unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
363	u32 reg;
364
365	while (time_after(timeout, jiffies)) {
366		reg = readl(&regs->user[0].access);
367		if ((reg & USERACCESS_GO) == 0)
368			return 0;
369
370		reg = readl(&regs->control);
371		if ((reg & CONTROL_IDLE) == 0) {
372			usleep_range(100, 200);
373			continue;
374		}
375
376		/*
377		 * An emac soft_reset may have clobbered the mdio controller's
378		 * state machine.  We need to reset and retry the current
379		 * operation
380		 */
381		dev_warn(data->dev, "resetting idled controller\n");
382		davinci_mdio_enable(data);
383		return -EAGAIN;
384	}
385
386	reg = readl(&regs->user[0].access);
387	if ((reg & USERACCESS_GO) == 0)
388		return 0;
389
390	dev_err(data->dev, "timed out waiting for user access\n");
391	return -ETIMEDOUT;
392}
393
394/* wait until hardware state machine is idle */
395static inline int wait_for_idle(struct davinci_mdio_data *data)
396{
397	struct davinci_mdio_regs __iomem *regs = data->regs;
398	u32 val, ret;
399
400	ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
401				 0, MDIO_TIMEOUT * 1000);
402	if (ret)
403		dev_err(data->dev, "timed out waiting for idle\n");
404
405	return ret;
406}
407
408static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
409{
410	struct davinci_mdio_data *data = bus->priv;
411	u32 reg;
412	int ret;
413
414	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
415		return -EINVAL;
416
417	ret = pm_runtime_resume_and_get(data->dev);
418	if (ret < 0)
 
419		return ret;
 
420
421	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
422	       (phy_id << 16));
423
424	while (1) {
425		ret = wait_for_user_access(data);
426		if (ret == -EAGAIN)
427			continue;
428		if (ret < 0)
429			break;
430
431		writel(reg, &data->regs->user[0].access);
432
433		ret = wait_for_user_access(data);
434		if (ret == -EAGAIN)
435			continue;
436		if (ret < 0)
437			break;
438
439		reg = readl(&data->regs->user[0].access);
440		ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
441		break;
442	}
443
444	pm_runtime_mark_last_busy(data->dev);
445	pm_runtime_put_autosuspend(data->dev);
446	return ret;
447}
448
449static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
450			      int phy_reg, u16 phy_data)
451{
452	struct davinci_mdio_data *data = bus->priv;
453	u32 reg;
454	int ret;
455
456	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
457		return -EINVAL;
458
459	ret = pm_runtime_resume_and_get(data->dev);
460	if (ret < 0)
 
461		return ret;
 
462
463	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
464		   (phy_id << 16) | (phy_data & USERACCESS_DATA));
465
466	while (1) {
467		ret = wait_for_user_access(data);
468		if (ret == -EAGAIN)
469			continue;
470		if (ret < 0)
471			break;
472
473		writel(reg, &data->regs->user[0].access);
474
475		ret = wait_for_user_access(data);
476		if (ret == -EAGAIN)
477			continue;
478		break;
479	}
480
481	pm_runtime_mark_last_busy(data->dev);
482	pm_runtime_put_autosuspend(data->dev);
483
484	return ret;
485}
486
487static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
488			 struct platform_device *pdev)
489{
490	struct device_node *node = pdev->dev.of_node;
491	u32 prop;
492
493	if (!node)
494		return -EINVAL;
495
496	if (of_property_read_u32(node, "bus_freq", &prop)) {
497		dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
498		return -EINVAL;
499	}
500	data->bus_freq = prop;
501
502	return 0;
503}
504
505struct k3_mdio_soc_data {
506	bool manual_mode;
507};
508
509static const struct k3_mdio_soc_data am65_mdio_soc_data = {
510	.manual_mode = true,
511};
512
513static const struct soc_device_attribute k3_mdio_socinfo[] = {
514	{ .family = "AM62X", .data = &am65_mdio_soc_data },
515	{ .family = "AM64X", .data = &am65_mdio_soc_data },
516	{ .family = "AM65X", .data = &am65_mdio_soc_data },
517	{ .family = "J7200", .data = &am65_mdio_soc_data },
518	{ .family = "J721E", .data = &am65_mdio_soc_data },
519	{ .family = "J721S2", .data = &am65_mdio_soc_data },
520	{ /* sentinel */ },
521};
522
523#if IS_ENABLED(CONFIG_OF)
524static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
525	.autosuspend_delay_ms = 100,
526};
527
528static const struct of_device_id davinci_mdio_of_mtable[] = {
529	{ .compatible = "ti,davinci_mdio", },
530	{ .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
531	{ /* sentinel */ },
532};
533MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
534#endif
535
536static const struct mdiobb_ops davinci_mdiobb_ops = {
537	.owner = THIS_MODULE,
538	.set_mdc = davinci_set_mdc,
539	.set_mdio_dir = davinci_set_mdio_dir,
540	.set_mdio_data = davinci_set_mdio_data,
541	.get_mdio_data = davinci_get_mdio_data,
542};
543
544static int davinci_mdio_probe(struct platform_device *pdev)
545{
546	struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
547	struct device *dev = &pdev->dev;
548	struct davinci_mdio_data *data;
549	struct resource *res;
550	struct phy_device *phy;
551	int ret, addr;
552	int autosuspend_delay_ms = -1;
553
554	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
555	if (!data)
556		return -ENOMEM;
557
558	data->manual_mode = false;
559	data->bb_ctrl.ops = &davinci_mdiobb_ops;
560
561	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
562		const struct soc_device_attribute *soc_match_data;
563
564		soc_match_data = soc_device_match(k3_mdio_socinfo);
565		if (soc_match_data && soc_match_data->data) {
566			const struct k3_mdio_soc_data *socdata =
567						soc_match_data->data;
568
569			data->manual_mode = socdata->manual_mode;
570		}
571	}
572
573	if (data->manual_mode)
574		data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
575	else
576		data->bus = devm_mdiobus_alloc(dev);
577
578	if (!data->bus) {
579		dev_err(dev, "failed to alloc mii bus\n");
580		return -ENOMEM;
581	}
582
583	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
584		const struct davinci_mdio_of_param *of_mdio_data;
585
586		ret = davinci_mdio_probe_dt(&data->pdata, pdev);
587		if (ret)
588			return ret;
589		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
590
591		of_mdio_data = of_device_get_match_data(&pdev->dev);
592		if (of_mdio_data) {
593			autosuspend_delay_ms =
 
 
 
 
594					of_mdio_data->autosuspend_delay_ms;
595		}
596	} else {
597		data->pdata = pdata ? (*pdata) : default_pdata;
598		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
599			 pdev->name, pdev->id);
600	}
601
602	data->bus->name		= dev_name(dev);
603
604	if (data->manual_mode) {
605		data->bus->read		= davinci_mdiobb_read_c22;
606		data->bus->write	= davinci_mdiobb_write_c22;
607		data->bus->read_c45	= davinci_mdiobb_read_c45;
608		data->bus->write_c45	= davinci_mdiobb_write_c45;
609		data->bus->reset	= davinci_mdiobb_reset;
610
611		dev_info(dev, "Configuring MDIO in manual mode\n");
612	} else {
613		data->bus->read		= davinci_mdio_read;
614		data->bus->write	= davinci_mdio_write;
615		data->bus->reset	= davinci_mdio_reset;
616		data->bus->priv		= data;
617	}
618	data->bus->parent	= dev;
 
619
620	data->clk = devm_clk_get(dev, "fck");
621	if (IS_ERR(data->clk)) {
622		dev_err(dev, "failed to get device clock\n");
623		return PTR_ERR(data->clk);
624	}
625
626	dev_set_drvdata(dev, data);
627	data->dev = dev;
628
629	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
630	if (!res)
631		return -EINVAL;
632	data->regs = devm_ioremap(dev, res->start, resource_size(res));
633	if (!data->regs)
634		return -ENOMEM;
635
636	davinci_mdio_init_clk(data);
637
638	pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
639	pm_runtime_use_autosuspend(&pdev->dev);
640	pm_runtime_enable(&pdev->dev);
641
642	/* register the mii bus
643	 * Create PHYs from DT only in case if PHY child nodes are explicitly
644	 * defined to support backward compatibility with DTs which assume that
645	 * Davinci MDIO will always scan the bus for PHYs detection.
646	 */
647	if (dev->of_node && of_get_child_count(dev->of_node))
648		data->skip_scan = true;
649
650	ret = of_mdiobus_register(data->bus, dev->of_node);
651	if (ret)
652		goto bail_out;
653
654	/* scan and dump the bus */
655	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
656		phy = mdiobus_get_phy(data->bus, addr);
657		if (phy) {
658			dev_info(dev, "phy[%d]: device %s, driver %s\n",
659				 phy->mdio.addr, phydev_name(phy),
660				 phy->drv ? phy->drv->name : "unknown");
661		}
662	}
663
664	return 0;
665
666bail_out:
667	pm_runtime_dont_use_autosuspend(&pdev->dev);
668	pm_runtime_disable(&pdev->dev);
669	return ret;
670}
671
672static void davinci_mdio_remove(struct platform_device *pdev)
673{
674	struct davinci_mdio_data *data = platform_get_drvdata(pdev);
675
676	if (data->bus) {
677		mdiobus_unregister(data->bus);
678
679		if (data->manual_mode)
680			free_mdio_bitbang(data->bus);
681	}
682
683	pm_runtime_dont_use_autosuspend(&pdev->dev);
684	pm_runtime_disable(&pdev->dev);
 
 
685}
686
687#ifdef CONFIG_PM
688static int davinci_mdio_runtime_suspend(struct device *dev)
689{
690	struct davinci_mdio_data *data = dev_get_drvdata(dev);
691	u32 ctrl;
692
693	/* shutdown the scan state machine */
694	ctrl = readl(&data->regs->control);
695	ctrl &= ~CONTROL_ENABLE;
696	writel(ctrl, &data->regs->control);
697
698	if (!data->manual_mode)
699		wait_for_idle(data);
700
701	return 0;
702}
703
704static int davinci_mdio_runtime_resume(struct device *dev)
705{
706	struct davinci_mdio_data *data = dev_get_drvdata(dev);
707
708	if (data->manual_mode) {
709		davinci_mdio_disable(data);
710		davinci_mdio_enable_manual_mode(data);
711	} else {
712		davinci_mdio_enable(data);
713	}
714	return 0;
715}
716#endif
717
718#ifdef CONFIG_PM_SLEEP
719static int davinci_mdio_suspend(struct device *dev)
720{
721	struct davinci_mdio_data *data = dev_get_drvdata(dev);
722	int ret = 0;
723
724	data->active_in_suspend = !pm_runtime_status_suspended(dev);
725	if (data->active_in_suspend)
726		ret = pm_runtime_force_suspend(dev);
727	if (ret < 0)
728		return ret;
729
730	/* Select sleep pin state */
731	pinctrl_pm_select_sleep_state(dev);
732
733	return 0;
734}
735
736static int davinci_mdio_resume(struct device *dev)
737{
738	struct davinci_mdio_data *data = dev_get_drvdata(dev);
739
740	/* Select default pin state */
741	pinctrl_pm_select_default_state(dev);
742
743	if (data->active_in_suspend)
744		pm_runtime_force_resume(dev);
745
746	return 0;
747}
748#endif
749
750static const struct dev_pm_ops davinci_mdio_pm_ops = {
751	SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
752			   davinci_mdio_runtime_resume, NULL)
753	SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
754};
755
756static struct platform_driver davinci_mdio_driver = {
757	.driver = {
758		.name	 = "davinci_mdio",
759		.pm	 = &davinci_mdio_pm_ops,
760		.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
761	},
762	.probe = davinci_mdio_probe,
763	.remove = davinci_mdio_remove,
764};
765
766static int __init davinci_mdio_init(void)
767{
768	return platform_driver_register(&davinci_mdio_driver);
769}
770device_initcall(davinci_mdio_init);
771
772static void __exit davinci_mdio_exit(void)
773{
774	platform_driver_unregister(&davinci_mdio_driver);
775}
776module_exit(davinci_mdio_exit);
777
778MODULE_LICENSE("GPL");
779MODULE_DESCRIPTION("DaVinci MDIO driver");
v5.9
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * DaVinci MDIO Module driver
  4 *
  5 * Copyright (C) 2010 Texas Instruments.
  6 *
  7 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
  8 *
  9 * Copyright (C) 2009 Texas Instruments.
 10 *
 11 */
 12#include <linux/module.h>
 13#include <linux/kernel.h>
 14#include <linux/platform_device.h>
 15#include <linux/delay.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/phy.h>
 19#include <linux/clk.h>
 20#include <linux/err.h>
 21#include <linux/io.h>
 22#include <linux/iopoll.h>
 23#include <linux/pm_runtime.h>
 24#include <linux/davinci_emac.h>
 25#include <linux/of.h>
 26#include <linux/of_device.h>
 27#include <linux/of_mdio.h>
 28#include <linux/pinctrl/consumer.h>
 
 
 29
 30/*
 31 * This timeout definition is a worst-case ultra defensive measure against
 32 * unexpected controller lock ups.  Ideally, we should never ever hit this
 33 * scenario in practice.
 34 */
 35#define MDIO_TIMEOUT		100 /* msecs */
 36
 37#define PHY_REG_MASK		0x1f
 38#define PHY_ID_MASK		0x1f
 39
 40#define DEF_OUT_FREQ		2200000		/* 2.2 MHz */
 41
 42struct davinci_mdio_of_param {
 43	int autosuspend_delay_ms;
 
 44};
 45
 46struct davinci_mdio_regs {
 47	u32	version;
 48	u32	control;
 49#define CONTROL_IDLE		BIT(31)
 50#define CONTROL_ENABLE		BIT(30)
 51#define CONTROL_MAX_DIV		(0xffff)
 
 
 
 
 
 
 
 
 
 52
 53	u32	alive;
 54	u32	link;
 55	u32	linkintraw;
 56	u32	linkintmasked;
 57	u32	__reserved_0[2];
 58	u32	userintraw;
 59	u32	userintmasked;
 60	u32	userintmaskset;
 61	u32	userintmaskclr;
 62	u32	__reserved_1[20];
 
 
 63
 64	struct {
 65		u32	access;
 66#define USERACCESS_GO		BIT(31)
 67#define USERACCESS_WRITE	BIT(30)
 68#define USERACCESS_ACK		BIT(29)
 69#define USERACCESS_READ		(0)
 70#define USERACCESS_DATA		(0xffff)
 71
 72		u32	physel;
 73	}	user[0];
 74};
 75
 76static const struct mdio_platform_data default_pdata = {
 77	.bus_freq = DEF_OUT_FREQ,
 78};
 79
 80struct davinci_mdio_data {
 81	struct mdio_platform_data pdata;
 
 82	struct davinci_mdio_regs __iomem *regs;
 83	struct clk	*clk;
 84	struct device	*dev;
 85	struct mii_bus	*bus;
 86	bool            active_in_suspend;
 87	unsigned long	access_time; /* jiffies */
 88	/* Indicates that driver shouldn't modify phy_mask in case
 89	 * if MDIO bus is registered from DT.
 90	 */
 91	bool		skip_scan;
 92	u32		clk_div;
 
 93};
 94
 95static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
 96{
 97	u32 mdio_in, div, mdio_out_khz, access_time;
 98
 99	mdio_in = clk_get_rate(data->clk);
100	div = (mdio_in / data->pdata.bus_freq) - 1;
101	if (div > CONTROL_MAX_DIV)
102		div = CONTROL_MAX_DIV;
103
104	data->clk_div = div;
105	/*
106	 * One mdio transaction consists of:
107	 *	32 bits of preamble
108	 *	32 bits of transferred data
109	 *	24 bits of bus yield (not needed unless shared?)
110	 */
111	mdio_out_khz = mdio_in / (1000 * (div + 1));
112	access_time  = (88 * 1000) / mdio_out_khz;
113
114	/*
115	 * In the worst case, we could be kicking off a user-access immediately
116	 * after the mdio bus scan state-machine triggered its own read.  If
117	 * so, our request could get deferred by one access cycle.  We
118	 * defensively allow for 4 access cycles.
119	 */
120	data->access_time = usecs_to_jiffies(access_time * 4);
121	if (!data->access_time)
122		data->access_time = 1;
123}
124
125static void davinci_mdio_enable(struct davinci_mdio_data *data)
126{
127	/* set enable and clock divider */
128	writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
129}
130
131static int davinci_mdio_reset(struct mii_bus *bus)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132{
133	struct davinci_mdio_data *data = bus->priv;
134	u32 phy_mask, ver;
135	int ret;
136
137	ret = pm_runtime_get_sync(data->dev);
138	if (ret < 0) {
139		pm_runtime_put_noidle(data->dev);
140		return ret;
 
 
 
 
141	}
142
143	/* wait for scan logic to settle */
144	msleep(PHY_MAX_ADDR * data->access_time);
145
146	/* dump hardware version info */
147	ver = readl(&data->regs->version);
148	dev_info(data->dev,
149		 "davinci mdio revision %d.%d, bus freq %ld\n",
150		 (ver >> 8) & 0xff, ver & 0xff,
151		 data->pdata.bus_freq);
152
153	if (data->skip_scan)
154		goto done;
155
156	/* get phy mask from the alive register */
157	phy_mask = readl(&data->regs->alive);
158	if (phy_mask) {
159		/* restrict mdio bus to live phys only */
160		dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
161		phy_mask = ~phy_mask;
162	} else {
163		/* desperately scan all phys */
164		dev_warn(data->dev, "no live phy, scanning all\n");
165		phy_mask = 0;
166	}
167	data->bus->phy_mask = phy_mask;
168
169done:
170	pm_runtime_mark_last_busy(data->dev);
171	pm_runtime_put_autosuspend(data->dev);
172
173	return 0;
174}
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176/* wait until hardware is ready for another user access */
177static inline int wait_for_user_access(struct davinci_mdio_data *data)
178{
179	struct davinci_mdio_regs __iomem *regs = data->regs;
180	unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
181	u32 reg;
182
183	while (time_after(timeout, jiffies)) {
184		reg = readl(&regs->user[0].access);
185		if ((reg & USERACCESS_GO) == 0)
186			return 0;
187
188		reg = readl(&regs->control);
189		if ((reg & CONTROL_IDLE) == 0) {
190			usleep_range(100, 200);
191			continue;
192		}
193
194		/*
195		 * An emac soft_reset may have clobbered the mdio controller's
196		 * state machine.  We need to reset and retry the current
197		 * operation
198		 */
199		dev_warn(data->dev, "resetting idled controller\n");
200		davinci_mdio_enable(data);
201		return -EAGAIN;
202	}
203
204	reg = readl(&regs->user[0].access);
205	if ((reg & USERACCESS_GO) == 0)
206		return 0;
207
208	dev_err(data->dev, "timed out waiting for user access\n");
209	return -ETIMEDOUT;
210}
211
212/* wait until hardware state machine is idle */
213static inline int wait_for_idle(struct davinci_mdio_data *data)
214{
215	struct davinci_mdio_regs __iomem *regs = data->regs;
216	u32 val, ret;
217
218	ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
219				 0, MDIO_TIMEOUT * 1000);
220	if (ret)
221		dev_err(data->dev, "timed out waiting for idle\n");
222
223	return ret;
224}
225
226static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
227{
228	struct davinci_mdio_data *data = bus->priv;
229	u32 reg;
230	int ret;
231
232	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
233		return -EINVAL;
234
235	ret = pm_runtime_get_sync(data->dev);
236	if (ret < 0) {
237		pm_runtime_put_noidle(data->dev);
238		return ret;
239	}
240
241	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
242	       (phy_id << 16));
243
244	while (1) {
245		ret = wait_for_user_access(data);
246		if (ret == -EAGAIN)
247			continue;
248		if (ret < 0)
249			break;
250
251		writel(reg, &data->regs->user[0].access);
252
253		ret = wait_for_user_access(data);
254		if (ret == -EAGAIN)
255			continue;
256		if (ret < 0)
257			break;
258
259		reg = readl(&data->regs->user[0].access);
260		ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
261		break;
262	}
263
264	pm_runtime_mark_last_busy(data->dev);
265	pm_runtime_put_autosuspend(data->dev);
266	return ret;
267}
268
269static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
270			      int phy_reg, u16 phy_data)
271{
272	struct davinci_mdio_data *data = bus->priv;
273	u32 reg;
274	int ret;
275
276	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
277		return -EINVAL;
278
279	ret = pm_runtime_get_sync(data->dev);
280	if (ret < 0) {
281		pm_runtime_put_noidle(data->dev);
282		return ret;
283	}
284
285	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
286		   (phy_id << 16) | (phy_data & USERACCESS_DATA));
287
288	while (1) {
289		ret = wait_for_user_access(data);
290		if (ret == -EAGAIN)
291			continue;
292		if (ret < 0)
293			break;
294
295		writel(reg, &data->regs->user[0].access);
296
297		ret = wait_for_user_access(data);
298		if (ret == -EAGAIN)
299			continue;
300		break;
301	}
302
303	pm_runtime_mark_last_busy(data->dev);
304	pm_runtime_put_autosuspend(data->dev);
305
306	return ret;
307}
308
309static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
310			 struct platform_device *pdev)
311{
312	struct device_node *node = pdev->dev.of_node;
313	u32 prop;
314
315	if (!node)
316		return -EINVAL;
317
318	if (of_property_read_u32(node, "bus_freq", &prop)) {
319		dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
320		return -EINVAL;
321	}
322	data->bus_freq = prop;
323
324	return 0;
325}
326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327#if IS_ENABLED(CONFIG_OF)
328static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
329	.autosuspend_delay_ms = 100,
330};
331
332static const struct of_device_id davinci_mdio_of_mtable[] = {
333	{ .compatible = "ti,davinci_mdio", },
334	{ .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
335	{ /* sentinel */ },
336};
337MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
338#endif
339
 
 
 
 
 
 
 
 
340static int davinci_mdio_probe(struct platform_device *pdev)
341{
342	struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
343	struct device *dev = &pdev->dev;
344	struct davinci_mdio_data *data;
345	struct resource *res;
346	struct phy_device *phy;
347	int ret, addr;
348	int autosuspend_delay_ms = -1;
349
350	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
351	if (!data)
352		return -ENOMEM;
353
354	data->bus = devm_mdiobus_alloc(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355	if (!data->bus) {
356		dev_err(dev, "failed to alloc mii bus\n");
357		return -ENOMEM;
358	}
359
360	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
361		const struct of_device_id	*of_id;
362
363		ret = davinci_mdio_probe_dt(&data->pdata, pdev);
364		if (ret)
365			return ret;
366		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
367
368		of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
369		if (of_id) {
370			const struct davinci_mdio_of_param *of_mdio_data;
371
372			of_mdio_data = of_id->data;
373			if (of_mdio_data)
374				autosuspend_delay_ms =
375					of_mdio_data->autosuspend_delay_ms;
376		}
377	} else {
378		data->pdata = pdata ? (*pdata) : default_pdata;
379		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
380			 pdev->name, pdev->id);
381	}
382
383	data->bus->name		= dev_name(dev);
384	data->bus->read		= davinci_mdio_read,
385	data->bus->write	= davinci_mdio_write,
386	data->bus->reset	= davinci_mdio_reset,
 
 
 
 
 
 
 
 
 
 
 
 
387	data->bus->parent	= dev;
388	data->bus->priv		= data;
389
390	data->clk = devm_clk_get(dev, "fck");
391	if (IS_ERR(data->clk)) {
392		dev_err(dev, "failed to get device clock\n");
393		return PTR_ERR(data->clk);
394	}
395
396	dev_set_drvdata(dev, data);
397	data->dev = dev;
398
399	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
400	if (!res)
401		return -EINVAL;
402	data->regs = devm_ioremap(dev, res->start, resource_size(res));
403	if (!data->regs)
404		return -ENOMEM;
405
406	davinci_mdio_init_clk(data);
407
408	pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
409	pm_runtime_use_autosuspend(&pdev->dev);
410	pm_runtime_enable(&pdev->dev);
411
412	/* register the mii bus
413	 * Create PHYs from DT only in case if PHY child nodes are explicitly
414	 * defined to support backward compatibility with DTs which assume that
415	 * Davinci MDIO will always scan the bus for PHYs detection.
416	 */
417	if (dev->of_node && of_get_child_count(dev->of_node))
418		data->skip_scan = true;
419
420	ret = of_mdiobus_register(data->bus, dev->of_node);
421	if (ret)
422		goto bail_out;
423
424	/* scan and dump the bus */
425	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
426		phy = mdiobus_get_phy(data->bus, addr);
427		if (phy) {
428			dev_info(dev, "phy[%d]: device %s, driver %s\n",
429				 phy->mdio.addr, phydev_name(phy),
430				 phy->drv ? phy->drv->name : "unknown");
431		}
432	}
433
434	return 0;
435
436bail_out:
437	pm_runtime_dont_use_autosuspend(&pdev->dev);
438	pm_runtime_disable(&pdev->dev);
439	return ret;
440}
441
442static int davinci_mdio_remove(struct platform_device *pdev)
443{
444	struct davinci_mdio_data *data = platform_get_drvdata(pdev);
445
446	if (data->bus)
447		mdiobus_unregister(data->bus);
448
 
 
 
 
449	pm_runtime_dont_use_autosuspend(&pdev->dev);
450	pm_runtime_disable(&pdev->dev);
451
452	return 0;
453}
454
455#ifdef CONFIG_PM
456static int davinci_mdio_runtime_suspend(struct device *dev)
457{
458	struct davinci_mdio_data *data = dev_get_drvdata(dev);
459	u32 ctrl;
460
461	/* shutdown the scan state machine */
462	ctrl = readl(&data->regs->control);
463	ctrl &= ~CONTROL_ENABLE;
464	writel(ctrl, &data->regs->control);
465	wait_for_idle(data);
 
 
466
467	return 0;
468}
469
470static int davinci_mdio_runtime_resume(struct device *dev)
471{
472	struct davinci_mdio_data *data = dev_get_drvdata(dev);
473
474	davinci_mdio_enable(data);
 
 
 
 
 
475	return 0;
476}
477#endif
478
479#ifdef CONFIG_PM_SLEEP
480static int davinci_mdio_suspend(struct device *dev)
481{
482	struct davinci_mdio_data *data = dev_get_drvdata(dev);
483	int ret = 0;
484
485	data->active_in_suspend = !pm_runtime_status_suspended(dev);
486	if (data->active_in_suspend)
487		ret = pm_runtime_force_suspend(dev);
488	if (ret < 0)
489		return ret;
490
491	/* Select sleep pin state */
492	pinctrl_pm_select_sleep_state(dev);
493
494	return 0;
495}
496
497static int davinci_mdio_resume(struct device *dev)
498{
499	struct davinci_mdio_data *data = dev_get_drvdata(dev);
500
501	/* Select default pin state */
502	pinctrl_pm_select_default_state(dev);
503
504	if (data->active_in_suspend)
505		pm_runtime_force_resume(dev);
506
507	return 0;
508}
509#endif
510
511static const struct dev_pm_ops davinci_mdio_pm_ops = {
512	SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
513			   davinci_mdio_runtime_resume, NULL)
514	SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
515};
516
517static struct platform_driver davinci_mdio_driver = {
518	.driver = {
519		.name	 = "davinci_mdio",
520		.pm	 = &davinci_mdio_pm_ops,
521		.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
522	},
523	.probe = davinci_mdio_probe,
524	.remove = davinci_mdio_remove,
525};
526
527static int __init davinci_mdio_init(void)
528{
529	return platform_driver_register(&davinci_mdio_driver);
530}
531device_initcall(davinci_mdio_init);
532
533static void __exit davinci_mdio_exit(void)
534{
535	platform_driver_unregister(&davinci_mdio_driver);
536}
537module_exit(davinci_mdio_exit);
538
539MODULE_LICENSE("GPL");
540MODULE_DESCRIPTION("DaVinci MDIO driver");