Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Dave DNET Ethernet Controller driver
4 *
5 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
6 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
7 */
8#include <linux/io.h>
9#include <linux/module.h>
10#include <linux/moduleparam.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/dma-mapping.h>
19#include <linux/platform_device.h>
20#include <linux/phy.h>
21
22#include "dnet.h"
23
24#undef DEBUG
25
26/* function for reading internal MAC register */
27static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
28{
29 u16 data_read;
30
31 /* issue a read */
32 dnet_writel(bp, reg, MACREG_ADDR);
33
34 /* since a read/write op to the MAC is very slow,
35 * we must wait before reading the data */
36 ndelay(500);
37
38 /* read data read from the MAC register */
39 data_read = dnet_readl(bp, MACREG_DATA);
40
41 /* all done */
42 return data_read;
43}
44
45/* function for writing internal MAC register */
46static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
47{
48 /* load data to write */
49 dnet_writel(bp, val, MACREG_DATA);
50
51 /* issue a write */
52 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
53
54 /* since a read/write op to the MAC is very slow,
55 * we must wait before exiting */
56 ndelay(500);
57}
58
59static void __dnet_set_hwaddr(struct dnet *bp)
60{
61 u16 tmp;
62
63 tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
64 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
65 tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
66 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
67 tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
69}
70
71static void dnet_get_hwaddr(struct dnet *bp)
72{
73 u16 tmp;
74 u8 addr[6];
75
76 /*
77 * from MAC docs:
78 * "Note that the MAC address is stored in the registers in Hexadecimal
79 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
80 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
81 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
82 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
83 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
84 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
85 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
86 * Mac_addr[15:0]).
87 */
88 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
89 *((__be16 *)addr) = cpu_to_be16(tmp);
90 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
91 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
93 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
94
95 if (is_valid_ether_addr(addr))
96 eth_hw_addr_set(bp->dev, addr);
97}
98
99static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
100{
101 struct dnet *bp = bus->priv;
102 u16 value;
103
104 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
105 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
106 cpu_relax();
107
108 /* only 5 bits allowed for phy-addr and reg_offset */
109 mii_id &= 0x1f;
110 regnum &= 0x1f;
111
112 /* prepare reg_value for a read */
113 value = (mii_id << 8);
114 value |= regnum;
115
116 /* write control word */
117 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
118
119 /* wait for end of transfer */
120 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
121 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
122 cpu_relax();
123
124 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
125
126 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
127
128 return value;
129}
130
131static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
132 u16 value)
133{
134 struct dnet *bp = bus->priv;
135 u16 tmp;
136
137 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
138
139 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
140 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
141 cpu_relax();
142
143 /* prepare for a write operation */
144 tmp = (1 << 13);
145
146 /* only 5 bits allowed for phy-addr and reg_offset */
147 mii_id &= 0x1f;
148 regnum &= 0x1f;
149
150 /* only 16 bits on data */
151 value &= 0xffff;
152
153 /* prepare reg_value for a write */
154 tmp |= (mii_id << 8);
155 tmp |= regnum;
156
157 /* write data to write first */
158 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
159
160 /* write control word */
161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
162
163 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
164 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
165 cpu_relax();
166
167 return 0;
168}
169
170static void dnet_handle_link_change(struct net_device *dev)
171{
172 struct dnet *bp = netdev_priv(dev);
173 struct phy_device *phydev = dev->phydev;
174 unsigned long flags;
175 u32 mode_reg, ctl_reg;
176
177 int status_change = 0;
178
179 spin_lock_irqsave(&bp->lock, flags);
180
181 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
182 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
183
184 if (phydev->link) {
185 if (bp->duplex != phydev->duplex) {
186 if (phydev->duplex)
187 ctl_reg &=
188 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
189 else
190 ctl_reg |=
191 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
192
193 bp->duplex = phydev->duplex;
194 status_change = 1;
195 }
196
197 if (bp->speed != phydev->speed) {
198 status_change = 1;
199 switch (phydev->speed) {
200 case 1000:
201 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
202 break;
203 case 100:
204 case 10:
205 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
206 break;
207 default:
208 printk(KERN_WARNING
209 "%s: Ack! Speed (%d) is not "
210 "10/100/1000!\n", dev->name,
211 phydev->speed);
212 break;
213 }
214 bp->speed = phydev->speed;
215 }
216 }
217
218 if (phydev->link != bp->link) {
219 if (phydev->link) {
220 mode_reg |=
221 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
222 } else {
223 mode_reg &=
224 ~(DNET_INTERNAL_MODE_RXEN |
225 DNET_INTERNAL_MODE_TXEN);
226 bp->speed = 0;
227 bp->duplex = -1;
228 }
229 bp->link = phydev->link;
230
231 status_change = 1;
232 }
233
234 if (status_change) {
235 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
236 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
237 }
238
239 spin_unlock_irqrestore(&bp->lock, flags);
240
241 if (status_change) {
242 if (phydev->link)
243 printk(KERN_INFO "%s: link up (%d/%s)\n",
244 dev->name, phydev->speed,
245 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
246 else
247 printk(KERN_INFO "%s: link down\n", dev->name);
248 }
249}
250
251static int dnet_mii_probe(struct net_device *dev)
252{
253 struct dnet *bp = netdev_priv(dev);
254 struct phy_device *phydev = NULL;
255
256 /* find the first phy */
257 phydev = phy_find_first(bp->mii_bus);
258
259 if (!phydev) {
260 printk(KERN_ERR "%s: no PHY found\n", dev->name);
261 return -ENODEV;
262 }
263
264 /* TODO : add pin_irq */
265
266 /* attach the mac to the phy */
267 if (bp->capabilities & DNET_HAS_RMII) {
268 phydev = phy_connect(dev, phydev_name(phydev),
269 &dnet_handle_link_change,
270 PHY_INTERFACE_MODE_RMII);
271 } else {
272 phydev = phy_connect(dev, phydev_name(phydev),
273 &dnet_handle_link_change,
274 PHY_INTERFACE_MODE_MII);
275 }
276
277 if (IS_ERR(phydev)) {
278 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
279 return PTR_ERR(phydev);
280 }
281
282 /* mask with MAC supported features */
283 if (bp->capabilities & DNET_HAS_GIGABIT)
284 phy_set_max_speed(phydev, SPEED_1000);
285 else
286 phy_set_max_speed(phydev, SPEED_100);
287
288 phy_support_asym_pause(phydev);
289
290 bp->link = 0;
291 bp->speed = 0;
292 bp->duplex = -1;
293
294 return 0;
295}
296
297static int dnet_mii_init(struct dnet *bp)
298{
299 int err;
300
301 bp->mii_bus = mdiobus_alloc();
302 if (bp->mii_bus == NULL)
303 return -ENOMEM;
304
305 bp->mii_bus->name = "dnet_mii_bus";
306 bp->mii_bus->read = &dnet_mdio_read;
307 bp->mii_bus->write = &dnet_mdio_write;
308
309 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
310 bp->pdev->name, bp->pdev->id);
311
312 bp->mii_bus->priv = bp;
313
314 if (mdiobus_register(bp->mii_bus)) {
315 err = -ENXIO;
316 goto err_out;
317 }
318
319 if (dnet_mii_probe(bp->dev) != 0) {
320 err = -ENXIO;
321 goto err_out_unregister_bus;
322 }
323
324 return 0;
325
326err_out_unregister_bus:
327 mdiobus_unregister(bp->mii_bus);
328err_out:
329 mdiobus_free(bp->mii_bus);
330 return err;
331}
332
333/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
334static int dnet_phy_marvell_fixup(struct phy_device *phydev)
335{
336 return phy_write(phydev, 0x18, 0x4148);
337}
338
339static void dnet_update_stats(struct dnet *bp)
340{
341 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
342 u32 *p = &bp->hw_stats.rx_pkt_ignr;
343 u32 *end = &bp->hw_stats.rx_byte + 1;
344
345 WARN_ON((unsigned long)(end - p - 1) !=
346 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
347
348 for (; p < end; p++, reg++)
349 *p += readl(reg);
350
351 reg = bp->regs + DNET_TX_UNICAST_CNT;
352 p = &bp->hw_stats.tx_unicast;
353 end = &bp->hw_stats.tx_byte + 1;
354
355 WARN_ON((unsigned long)(end - p - 1) !=
356 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
357
358 for (; p < end; p++, reg++)
359 *p += readl(reg);
360}
361
362static int dnet_poll(struct napi_struct *napi, int budget)
363{
364 struct dnet *bp = container_of(napi, struct dnet, napi);
365 struct net_device *dev = bp->dev;
366 int npackets = 0;
367 unsigned int pkt_len;
368 struct sk_buff *skb;
369 unsigned int *data_ptr;
370 u32 int_enable;
371 u32 cmd_word;
372 int i;
373
374 while (npackets < budget) {
375 /*
376 * break out of while loop if there are no more
377 * packets waiting
378 */
379 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
380 break;
381
382 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
383 pkt_len = cmd_word & 0xFFFF;
384
385 if (cmd_word & 0xDF180000)
386 printk(KERN_ERR "%s packet receive error %x\n",
387 __func__, cmd_word);
388
389 skb = netdev_alloc_skb(dev, pkt_len + 5);
390 if (skb != NULL) {
391 /* Align IP on 16 byte boundaries */
392 skb_reserve(skb, 2);
393 /*
394 * 'skb_put()' points to the start of sk_buff
395 * data area.
396 */
397 data_ptr = skb_put(skb, pkt_len);
398 for (i = 0; i < (pkt_len + 3) >> 2; i++)
399 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
400 skb->protocol = eth_type_trans(skb, dev);
401 netif_receive_skb(skb);
402 npackets++;
403 } else
404 printk(KERN_NOTICE
405 "%s: No memory to allocate a sk_buff of "
406 "size %u.\n", dev->name, pkt_len);
407 }
408
409 if (npackets < budget) {
410 /* We processed all packets available. Tell NAPI it can
411 * stop polling then re-enable rx interrupts.
412 */
413 napi_complete_done(napi, npackets);
414 int_enable = dnet_readl(bp, INTR_ENB);
415 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
416 dnet_writel(bp, int_enable, INTR_ENB);
417 }
418
419 return npackets;
420}
421
422static irqreturn_t dnet_interrupt(int irq, void *dev_id)
423{
424 struct net_device *dev = dev_id;
425 struct dnet *bp = netdev_priv(dev);
426 u32 int_src, int_enable, int_current;
427 unsigned long flags;
428 unsigned int handled = 0;
429
430 spin_lock_irqsave(&bp->lock, flags);
431
432 /* read and clear the DNET irq (clear on read) */
433 int_src = dnet_readl(bp, INTR_SRC);
434 int_enable = dnet_readl(bp, INTR_ENB);
435 int_current = int_src & int_enable;
436
437 /* restart the queue if we had stopped it for TX fifo almost full */
438 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
439 int_enable = dnet_readl(bp, INTR_ENB);
440 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
441 dnet_writel(bp, int_enable, INTR_ENB);
442 netif_wake_queue(dev);
443 handled = 1;
444 }
445
446 /* RX FIFO error checking */
447 if (int_current &
448 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
449 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
450 dnet_readl(bp, RX_STATUS), int_current);
451 /* we can only flush the RX FIFOs */
452 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
453 ndelay(500);
454 dnet_writel(bp, 0, SYS_CTL);
455 handled = 1;
456 }
457
458 /* TX FIFO error checking */
459 if (int_current &
460 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
461 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
462 dnet_readl(bp, TX_STATUS), int_current);
463 /* we can only flush the TX FIFOs */
464 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
465 ndelay(500);
466 dnet_writel(bp, 0, SYS_CTL);
467 handled = 1;
468 }
469
470 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
471 if (napi_schedule_prep(&bp->napi)) {
472 /*
473 * There's no point taking any more interrupts
474 * until we have processed the buffers
475 */
476 /* Disable Rx interrupts and schedule NAPI poll */
477 int_enable = dnet_readl(bp, INTR_ENB);
478 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
479 dnet_writel(bp, int_enable, INTR_ENB);
480 __napi_schedule(&bp->napi);
481 }
482 handled = 1;
483 }
484
485 if (!handled)
486 pr_debug("%s: irq %x remains\n", __func__, int_current);
487
488 spin_unlock_irqrestore(&bp->lock, flags);
489
490 return IRQ_RETVAL(handled);
491}
492
493#ifdef DEBUG
494static inline void dnet_print_skb(struct sk_buff *skb)
495{
496 int k;
497 printk(KERN_DEBUG PFX "data:");
498 for (k = 0; k < skb->len; k++)
499 printk(" %02x", (unsigned int)skb->data[k]);
500 printk("\n");
501}
502#else
503#define dnet_print_skb(skb) do {} while (0)
504#endif
505
506static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
507{
508
509 struct dnet *bp = netdev_priv(dev);
510 unsigned int i, tx_cmd, wrsz;
511 unsigned long flags;
512 unsigned int *bufp;
513 u32 irq_enable;
514
515 dnet_readl(bp, TX_STATUS);
516
517 pr_debug("start_xmit: len %u head %p data %p\n",
518 skb->len, skb->head, skb->data);
519 dnet_print_skb(skb);
520
521 spin_lock_irqsave(&bp->lock, flags);
522
523 dnet_readl(bp, TX_STATUS);
524
525 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
526 wrsz = (u32) skb->len + 3;
527 wrsz += ((unsigned long) skb->data) & 0x3;
528 wrsz >>= 2;
529 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
530
531 /* check if there is enough room for the current frame */
532 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
533 for (i = 0; i < wrsz; i++)
534 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
535
536 /*
537 * inform MAC that a packet's written and ready to be
538 * shipped out
539 */
540 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
541 }
542
543 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
544 netif_stop_queue(dev);
545 dnet_readl(bp, INTR_SRC);
546 irq_enable = dnet_readl(bp, INTR_ENB);
547 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
548 dnet_writel(bp, irq_enable, INTR_ENB);
549 }
550
551 skb_tx_timestamp(skb);
552
553 spin_unlock_irqrestore(&bp->lock, flags);
554
555 /* free the buffer */
556 dev_kfree_skb(skb);
557
558 return NETDEV_TX_OK;
559}
560
561static void dnet_reset_hw(struct dnet *bp)
562{
563 /* put ts_mac in IDLE state i.e. disable rx/tx */
564 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
565
566 /*
567 * RX FIFO almost full threshold: only cmd FIFO almost full is
568 * implemented for RX side
569 */
570 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
571 /*
572 * TX FIFO almost empty threshold: only data FIFO almost empty
573 * is implemented for TX side
574 */
575 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
576
577 /* flush rx/tx fifos */
578 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
579 SYS_CTL);
580 msleep(1);
581 dnet_writel(bp, 0, SYS_CTL);
582}
583
584static void dnet_init_hw(struct dnet *bp)
585{
586 u32 config;
587
588 dnet_reset_hw(bp);
589 __dnet_set_hwaddr(bp);
590
591 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
592
593 if (bp->dev->flags & IFF_PROMISC)
594 /* Copy All Frames */
595 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
596 if (!(bp->dev->flags & IFF_BROADCAST))
597 /* No BroadCast */
598 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
599
600 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
601 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
602 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
603 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
604
605 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
606
607 /* clear irq before enabling them */
608 config = dnet_readl(bp, INTR_SRC);
609
610 /* enable RX/TX interrupt, recv packet ready interrupt */
611 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
612 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
613 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
614 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
615 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
616}
617
618static int dnet_open(struct net_device *dev)
619{
620 struct dnet *bp = netdev_priv(dev);
621
622 /* if the phy is not yet register, retry later */
623 if (!dev->phydev)
624 return -EAGAIN;
625
626 napi_enable(&bp->napi);
627 dnet_init_hw(bp);
628
629 phy_start_aneg(dev->phydev);
630
631 /* schedule a link state check */
632 phy_start(dev->phydev);
633
634 netif_start_queue(dev);
635
636 return 0;
637}
638
639static int dnet_close(struct net_device *dev)
640{
641 struct dnet *bp = netdev_priv(dev);
642
643 netif_stop_queue(dev);
644 napi_disable(&bp->napi);
645
646 if (dev->phydev)
647 phy_stop(dev->phydev);
648
649 dnet_reset_hw(bp);
650 netif_carrier_off(dev);
651
652 return 0;
653}
654
655static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
656{
657 pr_debug("%s\n", __func__);
658 pr_debug("----------------------------- RX statistics "
659 "-------------------------------\n");
660 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
661 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
662 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
663 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
664 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
665 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
666 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
667 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
668 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
669 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
670 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
671 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
672 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
673 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
674 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
675 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
676 pr_debug("----------------------------- TX statistics "
677 "-------------------------------\n");
678 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
679 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
680 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
681 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
682 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
683 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
684 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
685 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
686}
687
688static struct net_device_stats *dnet_get_stats(struct net_device *dev)
689{
690
691 struct dnet *bp = netdev_priv(dev);
692 struct net_device_stats *nstat = &dev->stats;
693 struct dnet_stats *hwstat = &bp->hw_stats;
694
695 /* read stats from hardware */
696 dnet_update_stats(bp);
697
698 /* Convert HW stats into netdevice stats */
699 nstat->rx_errors = (hwstat->rx_len_chk_err +
700 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
701 /* ignore IGP violation error
702 hwstat->rx_ipg_viol + */
703 hwstat->rx_crc_err +
704 hwstat->rx_pre_shrink +
705 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
706 nstat->tx_errors = hwstat->tx_bad_fcs;
707 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
708 hwstat->rx_lng_frm +
709 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
710 nstat->rx_crc_errors = hwstat->rx_crc_err;
711 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
712 nstat->rx_packets = hwstat->rx_ok_pkt;
713 nstat->tx_packets = (hwstat->tx_unicast +
714 hwstat->tx_multicast + hwstat->tx_brdcast);
715 nstat->rx_bytes = hwstat->rx_byte;
716 nstat->tx_bytes = hwstat->tx_byte;
717 nstat->multicast = hwstat->rx_multicast;
718 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
719
720 dnet_print_pretty_hwstats(hwstat);
721
722 return nstat;
723}
724
725static void dnet_get_drvinfo(struct net_device *dev,
726 struct ethtool_drvinfo *info)
727{
728 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
729 strscpy(info->bus_info, "0", sizeof(info->bus_info));
730}
731
732static const struct ethtool_ops dnet_ethtool_ops = {
733 .get_drvinfo = dnet_get_drvinfo,
734 .get_link = ethtool_op_get_link,
735 .get_ts_info = ethtool_op_get_ts_info,
736 .get_link_ksettings = phy_ethtool_get_link_ksettings,
737 .set_link_ksettings = phy_ethtool_set_link_ksettings,
738};
739
740static const struct net_device_ops dnet_netdev_ops = {
741 .ndo_open = dnet_open,
742 .ndo_stop = dnet_close,
743 .ndo_get_stats = dnet_get_stats,
744 .ndo_start_xmit = dnet_start_xmit,
745 .ndo_eth_ioctl = phy_do_ioctl_running,
746 .ndo_set_mac_address = eth_mac_addr,
747 .ndo_validate_addr = eth_validate_addr,
748};
749
750static int dnet_probe(struct platform_device *pdev)
751{
752 struct resource *res;
753 struct net_device *dev;
754 struct dnet *bp;
755 struct phy_device *phydev;
756 int err;
757 unsigned int irq;
758
759 irq = platform_get_irq(pdev, 0);
760
761 dev = alloc_etherdev(sizeof(*bp));
762 if (!dev)
763 return -ENOMEM;
764
765 /* TODO: Actually, we have some interesting features... */
766 dev->features |= 0;
767
768 bp = netdev_priv(dev);
769 bp->dev = dev;
770
771 platform_set_drvdata(pdev, dev);
772 SET_NETDEV_DEV(dev, &pdev->dev);
773
774 spin_lock_init(&bp->lock);
775
776 bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
777 if (IS_ERR(bp->regs)) {
778 err = PTR_ERR(bp->regs);
779 goto err_out_free_dev;
780 }
781
782 dev->irq = irq;
783 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
784 if (err) {
785 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
786 irq, err);
787 goto err_out_free_dev;
788 }
789
790 dev->netdev_ops = &dnet_netdev_ops;
791 netif_napi_add(dev, &bp->napi, dnet_poll);
792 dev->ethtool_ops = &dnet_ethtool_ops;
793
794 dev->base_addr = (unsigned long)bp->regs;
795
796 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
797
798 dnet_get_hwaddr(bp);
799
800 if (!is_valid_ether_addr(dev->dev_addr)) {
801 /* choose a random ethernet address */
802 eth_hw_addr_random(dev);
803 __dnet_set_hwaddr(bp);
804 }
805
806 err = register_netdev(dev);
807 if (err) {
808 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
809 goto err_out_free_irq;
810 }
811
812 /* register the PHY board fixup (for Marvell 88E1111) */
813 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
814 dnet_phy_marvell_fixup);
815 /* we can live without it, so just issue a warning */
816 if (err)
817 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
818
819 err = dnet_mii_init(bp);
820 if (err)
821 goto err_out_unregister_netdev;
822
823 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
824 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
825 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
826 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
827 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
828 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
829 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
830 phydev = dev->phydev;
831 phy_attached_info(phydev);
832
833 return 0;
834
835err_out_unregister_netdev:
836 unregister_netdev(dev);
837err_out_free_irq:
838 free_irq(dev->irq, dev);
839err_out_free_dev:
840 free_netdev(dev);
841 return err;
842}
843
844static void dnet_remove(struct platform_device *pdev)
845{
846
847 struct net_device *dev;
848 struct dnet *bp;
849
850 dev = platform_get_drvdata(pdev);
851
852 if (dev) {
853 bp = netdev_priv(dev);
854 if (dev->phydev)
855 phy_disconnect(dev->phydev);
856 mdiobus_unregister(bp->mii_bus);
857 mdiobus_free(bp->mii_bus);
858 unregister_netdev(dev);
859 free_irq(dev->irq, dev);
860 free_netdev(dev);
861 }
862}
863
864static struct platform_driver dnet_driver = {
865 .probe = dnet_probe,
866 .remove_new = dnet_remove,
867 .driver = {
868 .name = "dnet",
869 },
870};
871
872module_platform_driver(dnet_driver);
873
874MODULE_LICENSE("GPL");
875MODULE_DESCRIPTION("Dave DNET Ethernet driver");
876MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
877 "Matteo Vit <matteo.vit@dave.eu>");
1/*
2 * Dave DNET Ethernet Controller driver
3 *
4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/platform_device.h>
23#include <linux/phy.h>
24
25#include "dnet.h"
26
27#undef DEBUG
28
29/* function for reading internal MAC register */
30static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
31{
32 u16 data_read;
33
34 /* issue a read */
35 dnet_writel(bp, reg, MACREG_ADDR);
36
37 /* since a read/write op to the MAC is very slow,
38 * we must wait before reading the data */
39 ndelay(500);
40
41 /* read data read from the MAC register */
42 data_read = dnet_readl(bp, MACREG_DATA);
43
44 /* all done */
45 return data_read;
46}
47
48/* function for writing internal MAC register */
49static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
50{
51 /* load data to write */
52 dnet_writel(bp, val, MACREG_DATA);
53
54 /* issue a write */
55 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
56
57 /* since a read/write op to the MAC is very slow,
58 * we must wait before exiting */
59 ndelay(500);
60}
61
62static void __dnet_set_hwaddr(struct dnet *bp)
63{
64 u16 tmp;
65
66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
72}
73
74static void dnet_get_hwaddr(struct dnet *bp)
75{
76 u16 tmp;
77 u8 addr[6];
78
79 /*
80 * from MAC docs:
81 * "Note that the MAC address is stored in the registers in Hexadecimal
82 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
83 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
84 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
85 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
86 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
87 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
88 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
89 * Mac_addr[15:0]).
90 */
91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
92 *((__be16 *)addr) = cpu_to_be16(tmp);
93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
94 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
96 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
97
98 if (is_valid_ether_addr(addr))
99 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
100}
101
102static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
103{
104 struct dnet *bp = bus->priv;
105 u16 value;
106
107 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
108 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
109 cpu_relax();
110
111 /* only 5 bits allowed for phy-addr and reg_offset */
112 mii_id &= 0x1f;
113 regnum &= 0x1f;
114
115 /* prepare reg_value for a read */
116 value = (mii_id << 8);
117 value |= regnum;
118
119 /* write control word */
120 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
121
122 /* wait for end of transfer */
123 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
124 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
125 cpu_relax();
126
127 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
128
129 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
130
131 return value;
132}
133
134static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
135 u16 value)
136{
137 struct dnet *bp = bus->priv;
138 u16 tmp;
139
140 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
141
142 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
143 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
144 cpu_relax();
145
146 /* prepare for a write operation */
147 tmp = (1 << 13);
148
149 /* only 5 bits allowed for phy-addr and reg_offset */
150 mii_id &= 0x1f;
151 regnum &= 0x1f;
152
153 /* only 16 bits on data */
154 value &= 0xffff;
155
156 /* prepare reg_value for a write */
157 tmp |= (mii_id << 8);
158 tmp |= regnum;
159
160 /* write data to write first */
161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
162
163 /* write control word */
164 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
165
166 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
167 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
168 cpu_relax();
169
170 return 0;
171}
172
173static void dnet_handle_link_change(struct net_device *dev)
174{
175 struct dnet *bp = netdev_priv(dev);
176 struct phy_device *phydev = bp->phy_dev;
177 unsigned long flags;
178 u32 mode_reg, ctl_reg;
179
180 int status_change = 0;
181
182 spin_lock_irqsave(&bp->lock, flags);
183
184 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
185 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
186
187 if (phydev->link) {
188 if (bp->duplex != phydev->duplex) {
189 if (phydev->duplex)
190 ctl_reg &=
191 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
192 else
193 ctl_reg |=
194 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
195
196 bp->duplex = phydev->duplex;
197 status_change = 1;
198 }
199
200 if (bp->speed != phydev->speed) {
201 status_change = 1;
202 switch (phydev->speed) {
203 case 1000:
204 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
205 break;
206 case 100:
207 case 10:
208 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
209 break;
210 default:
211 printk(KERN_WARNING
212 "%s: Ack! Speed (%d) is not "
213 "10/100/1000!\n", dev->name,
214 phydev->speed);
215 break;
216 }
217 bp->speed = phydev->speed;
218 }
219 }
220
221 if (phydev->link != bp->link) {
222 if (phydev->link) {
223 mode_reg |=
224 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
225 } else {
226 mode_reg &=
227 ~(DNET_INTERNAL_MODE_RXEN |
228 DNET_INTERNAL_MODE_TXEN);
229 bp->speed = 0;
230 bp->duplex = -1;
231 }
232 bp->link = phydev->link;
233
234 status_change = 1;
235 }
236
237 if (status_change) {
238 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
239 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
240 }
241
242 spin_unlock_irqrestore(&bp->lock, flags);
243
244 if (status_change) {
245 if (phydev->link)
246 printk(KERN_INFO "%s: link up (%d/%s)\n",
247 dev->name, phydev->speed,
248 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
249 else
250 printk(KERN_INFO "%s: link down\n", dev->name);
251 }
252}
253
254static int dnet_mii_probe(struct net_device *dev)
255{
256 struct dnet *bp = netdev_priv(dev);
257 struct phy_device *phydev = NULL;
258
259 /* find the first phy */
260 phydev = phy_find_first(bp->mii_bus);
261
262 if (!phydev) {
263 printk(KERN_ERR "%s: no PHY found\n", dev->name);
264 return -ENODEV;
265 }
266
267 /* TODO : add pin_irq */
268
269 /* attach the mac to the phy */
270 if (bp->capabilities & DNET_HAS_RMII) {
271 phydev = phy_connect(dev, phydev_name(phydev),
272 &dnet_handle_link_change,
273 PHY_INTERFACE_MODE_RMII);
274 } else {
275 phydev = phy_connect(dev, phydev_name(phydev),
276 &dnet_handle_link_change,
277 PHY_INTERFACE_MODE_MII);
278 }
279
280 if (IS_ERR(phydev)) {
281 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
282 return PTR_ERR(phydev);
283 }
284
285 /* mask with MAC supported features */
286 if (bp->capabilities & DNET_HAS_GIGABIT)
287 phydev->supported &= PHY_GBIT_FEATURES;
288 else
289 phydev->supported &= PHY_BASIC_FEATURES;
290
291 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
292
293 phydev->advertising = phydev->supported;
294
295 bp->link = 0;
296 bp->speed = 0;
297 bp->duplex = -1;
298 bp->phy_dev = phydev;
299
300 return 0;
301}
302
303static int dnet_mii_init(struct dnet *bp)
304{
305 int err;
306
307 bp->mii_bus = mdiobus_alloc();
308 if (bp->mii_bus == NULL)
309 return -ENOMEM;
310
311 bp->mii_bus->name = "dnet_mii_bus";
312 bp->mii_bus->read = &dnet_mdio_read;
313 bp->mii_bus->write = &dnet_mdio_write;
314
315 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
316 bp->pdev->name, bp->pdev->id);
317
318 bp->mii_bus->priv = bp;
319
320 if (mdiobus_register(bp->mii_bus)) {
321 err = -ENXIO;
322 goto err_out;
323 }
324
325 if (dnet_mii_probe(bp->dev) != 0) {
326 err = -ENXIO;
327 goto err_out_unregister_bus;
328 }
329
330 return 0;
331
332err_out_unregister_bus:
333 mdiobus_unregister(bp->mii_bus);
334err_out:
335 mdiobus_free(bp->mii_bus);
336 return err;
337}
338
339/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
340static int dnet_phy_marvell_fixup(struct phy_device *phydev)
341{
342 return phy_write(phydev, 0x18, 0x4148);
343}
344
345static void dnet_update_stats(struct dnet *bp)
346{
347 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
348 u32 *p = &bp->hw_stats.rx_pkt_ignr;
349 u32 *end = &bp->hw_stats.rx_byte + 1;
350
351 WARN_ON((unsigned long)(end - p - 1) !=
352 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
353
354 for (; p < end; p++, reg++)
355 *p += readl(reg);
356
357 reg = bp->regs + DNET_TX_UNICAST_CNT;
358 p = &bp->hw_stats.tx_unicast;
359 end = &bp->hw_stats.tx_byte + 1;
360
361 WARN_ON((unsigned long)(end - p - 1) !=
362 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
363
364 for (; p < end; p++, reg++)
365 *p += readl(reg);
366}
367
368static int dnet_poll(struct napi_struct *napi, int budget)
369{
370 struct dnet *bp = container_of(napi, struct dnet, napi);
371 struct net_device *dev = bp->dev;
372 int npackets = 0;
373 unsigned int pkt_len;
374 struct sk_buff *skb;
375 unsigned int *data_ptr;
376 u32 int_enable;
377 u32 cmd_word;
378 int i;
379
380 while (npackets < budget) {
381 /*
382 * break out of while loop if there are no more
383 * packets waiting
384 */
385 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
386 break;
387
388 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
389 pkt_len = cmd_word & 0xFFFF;
390
391 if (cmd_word & 0xDF180000)
392 printk(KERN_ERR "%s packet receive error %x\n",
393 __func__, cmd_word);
394
395 skb = netdev_alloc_skb(dev, pkt_len + 5);
396 if (skb != NULL) {
397 /* Align IP on 16 byte boundaries */
398 skb_reserve(skb, 2);
399 /*
400 * 'skb_put()' points to the start of sk_buff
401 * data area.
402 */
403 data_ptr = (unsigned int *)skb_put(skb, pkt_len);
404 for (i = 0; i < (pkt_len + 3) >> 2; i++)
405 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
406 skb->protocol = eth_type_trans(skb, dev);
407 netif_receive_skb(skb);
408 npackets++;
409 } else
410 printk(KERN_NOTICE
411 "%s: No memory to allocate a sk_buff of "
412 "size %u.\n", dev->name, pkt_len);
413 }
414
415 if (npackets < budget) {
416 /* We processed all packets available. Tell NAPI it can
417 * stop polling then re-enable rx interrupts.
418 */
419 napi_complete(napi);
420 int_enable = dnet_readl(bp, INTR_ENB);
421 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
422 dnet_writel(bp, int_enable, INTR_ENB);
423 }
424
425 return npackets;
426}
427
428static irqreturn_t dnet_interrupt(int irq, void *dev_id)
429{
430 struct net_device *dev = dev_id;
431 struct dnet *bp = netdev_priv(dev);
432 u32 int_src, int_enable, int_current;
433 unsigned long flags;
434 unsigned int handled = 0;
435
436 spin_lock_irqsave(&bp->lock, flags);
437
438 /* read and clear the DNET irq (clear on read) */
439 int_src = dnet_readl(bp, INTR_SRC);
440 int_enable = dnet_readl(bp, INTR_ENB);
441 int_current = int_src & int_enable;
442
443 /* restart the queue if we had stopped it for TX fifo almost full */
444 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
445 int_enable = dnet_readl(bp, INTR_ENB);
446 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
447 dnet_writel(bp, int_enable, INTR_ENB);
448 netif_wake_queue(dev);
449 handled = 1;
450 }
451
452 /* RX FIFO error checking */
453 if (int_current &
454 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
455 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
456 dnet_readl(bp, RX_STATUS), int_current);
457 /* we can only flush the RX FIFOs */
458 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
459 ndelay(500);
460 dnet_writel(bp, 0, SYS_CTL);
461 handled = 1;
462 }
463
464 /* TX FIFO error checking */
465 if (int_current &
466 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
467 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
468 dnet_readl(bp, TX_STATUS), int_current);
469 /* we can only flush the TX FIFOs */
470 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
471 ndelay(500);
472 dnet_writel(bp, 0, SYS_CTL);
473 handled = 1;
474 }
475
476 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
477 if (napi_schedule_prep(&bp->napi)) {
478 /*
479 * There's no point taking any more interrupts
480 * until we have processed the buffers
481 */
482 /* Disable Rx interrupts and schedule NAPI poll */
483 int_enable = dnet_readl(bp, INTR_ENB);
484 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
485 dnet_writel(bp, int_enable, INTR_ENB);
486 __napi_schedule(&bp->napi);
487 }
488 handled = 1;
489 }
490
491 if (!handled)
492 pr_debug("%s: irq %x remains\n", __func__, int_current);
493
494 spin_unlock_irqrestore(&bp->lock, flags);
495
496 return IRQ_RETVAL(handled);
497}
498
499#ifdef DEBUG
500static inline void dnet_print_skb(struct sk_buff *skb)
501{
502 int k;
503 printk(KERN_DEBUG PFX "data:");
504 for (k = 0; k < skb->len; k++)
505 printk(" %02x", (unsigned int)skb->data[k]);
506 printk("\n");
507}
508#else
509#define dnet_print_skb(skb) do {} while (0)
510#endif
511
512static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
513{
514
515 struct dnet *bp = netdev_priv(dev);
516 u32 tx_status, irq_enable;
517 unsigned int len, i, tx_cmd, wrsz;
518 unsigned long flags;
519 unsigned int *bufp;
520
521 tx_status = dnet_readl(bp, TX_STATUS);
522
523 pr_debug("start_xmit: len %u head %p data %p\n",
524 skb->len, skb->head, skb->data);
525 dnet_print_skb(skb);
526
527 /* frame size (words) */
528 len = (skb->len + 3) >> 2;
529
530 spin_lock_irqsave(&bp->lock, flags);
531
532 tx_status = dnet_readl(bp, TX_STATUS);
533
534 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
535 wrsz = (u32) skb->len + 3;
536 wrsz += ((unsigned long) skb->data) & 0x3;
537 wrsz >>= 2;
538 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
539
540 /* check if there is enough room for the current frame */
541 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
542 for (i = 0; i < wrsz; i++)
543 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
544
545 /*
546 * inform MAC that a packet's written and ready to be
547 * shipped out
548 */
549 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
550 }
551
552 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
553 netif_stop_queue(dev);
554 tx_status = dnet_readl(bp, INTR_SRC);
555 irq_enable = dnet_readl(bp, INTR_ENB);
556 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
557 dnet_writel(bp, irq_enable, INTR_ENB);
558 }
559
560 skb_tx_timestamp(skb);
561
562 /* free the buffer */
563 dev_kfree_skb(skb);
564
565 spin_unlock_irqrestore(&bp->lock, flags);
566
567 return NETDEV_TX_OK;
568}
569
570static void dnet_reset_hw(struct dnet *bp)
571{
572 /* put ts_mac in IDLE state i.e. disable rx/tx */
573 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
574
575 /*
576 * RX FIFO almost full threshold: only cmd FIFO almost full is
577 * implemented for RX side
578 */
579 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
580 /*
581 * TX FIFO almost empty threshold: only data FIFO almost empty
582 * is implemented for TX side
583 */
584 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
585
586 /* flush rx/tx fifos */
587 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
588 SYS_CTL);
589 msleep(1);
590 dnet_writel(bp, 0, SYS_CTL);
591}
592
593static void dnet_init_hw(struct dnet *bp)
594{
595 u32 config;
596
597 dnet_reset_hw(bp);
598 __dnet_set_hwaddr(bp);
599
600 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
601
602 if (bp->dev->flags & IFF_PROMISC)
603 /* Copy All Frames */
604 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
605 if (!(bp->dev->flags & IFF_BROADCAST))
606 /* No BroadCast */
607 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
608
609 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
610 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
611 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
612 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
613
614 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
615
616 /* clear irq before enabling them */
617 config = dnet_readl(bp, INTR_SRC);
618
619 /* enable RX/TX interrupt, recv packet ready interrupt */
620 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
621 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
622 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
623 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
624 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
625}
626
627static int dnet_open(struct net_device *dev)
628{
629 struct dnet *bp = netdev_priv(dev);
630
631 /* if the phy is not yet register, retry later */
632 if (!bp->phy_dev)
633 return -EAGAIN;
634
635 napi_enable(&bp->napi);
636 dnet_init_hw(bp);
637
638 phy_start_aneg(bp->phy_dev);
639
640 /* schedule a link state check */
641 phy_start(bp->phy_dev);
642
643 netif_start_queue(dev);
644
645 return 0;
646}
647
648static int dnet_close(struct net_device *dev)
649{
650 struct dnet *bp = netdev_priv(dev);
651
652 netif_stop_queue(dev);
653 napi_disable(&bp->napi);
654
655 if (bp->phy_dev)
656 phy_stop(bp->phy_dev);
657
658 dnet_reset_hw(bp);
659 netif_carrier_off(dev);
660
661 return 0;
662}
663
664static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
665{
666 pr_debug("%s\n", __func__);
667 pr_debug("----------------------------- RX statistics "
668 "-------------------------------\n");
669 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
670 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
671 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
672 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
673 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
674 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
675 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
676 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
677 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
678 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
679 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
680 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
681 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
682 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
683 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
684 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
685 pr_debug("----------------------------- TX statistics "
686 "-------------------------------\n");
687 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
688 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
689 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
690 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
691 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
692 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
693 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
694 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
695}
696
697static struct net_device_stats *dnet_get_stats(struct net_device *dev)
698{
699
700 struct dnet *bp = netdev_priv(dev);
701 struct net_device_stats *nstat = &dev->stats;
702 struct dnet_stats *hwstat = &bp->hw_stats;
703
704 /* read stats from hardware */
705 dnet_update_stats(bp);
706
707 /* Convert HW stats into netdevice stats */
708 nstat->rx_errors = (hwstat->rx_len_chk_err +
709 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
710 /* ignore IGP violation error
711 hwstat->rx_ipg_viol + */
712 hwstat->rx_crc_err +
713 hwstat->rx_pre_shrink +
714 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
715 nstat->tx_errors = hwstat->tx_bad_fcs;
716 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
717 hwstat->rx_lng_frm +
718 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
719 nstat->rx_crc_errors = hwstat->rx_crc_err;
720 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
721 nstat->rx_packets = hwstat->rx_ok_pkt;
722 nstat->tx_packets = (hwstat->tx_unicast +
723 hwstat->tx_multicast + hwstat->tx_brdcast);
724 nstat->rx_bytes = hwstat->rx_byte;
725 nstat->tx_bytes = hwstat->tx_byte;
726 nstat->multicast = hwstat->rx_multicast;
727 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
728
729 dnet_print_pretty_hwstats(hwstat);
730
731 return nstat;
732}
733
734static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
735{
736 struct dnet *bp = netdev_priv(dev);
737 struct phy_device *phydev = bp->phy_dev;
738
739 if (!phydev)
740 return -ENODEV;
741
742 return phy_ethtool_gset(phydev, cmd);
743}
744
745static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
746{
747 struct dnet *bp = netdev_priv(dev);
748 struct phy_device *phydev = bp->phy_dev;
749
750 if (!phydev)
751 return -ENODEV;
752
753 return phy_ethtool_sset(phydev, cmd);
754}
755
756static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
757{
758 struct dnet *bp = netdev_priv(dev);
759 struct phy_device *phydev = bp->phy_dev;
760
761 if (!netif_running(dev))
762 return -EINVAL;
763
764 if (!phydev)
765 return -ENODEV;
766
767 return phy_mii_ioctl(phydev, rq, cmd);
768}
769
770static void dnet_get_drvinfo(struct net_device *dev,
771 struct ethtool_drvinfo *info)
772{
773 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
774 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
775 strlcpy(info->bus_info, "0", sizeof(info->bus_info));
776}
777
778static const struct ethtool_ops dnet_ethtool_ops = {
779 .get_settings = dnet_get_settings,
780 .set_settings = dnet_set_settings,
781 .get_drvinfo = dnet_get_drvinfo,
782 .get_link = ethtool_op_get_link,
783 .get_ts_info = ethtool_op_get_ts_info,
784};
785
786static const struct net_device_ops dnet_netdev_ops = {
787 .ndo_open = dnet_open,
788 .ndo_stop = dnet_close,
789 .ndo_get_stats = dnet_get_stats,
790 .ndo_start_xmit = dnet_start_xmit,
791 .ndo_do_ioctl = dnet_ioctl,
792 .ndo_set_mac_address = eth_mac_addr,
793 .ndo_validate_addr = eth_validate_addr,
794 .ndo_change_mtu = eth_change_mtu,
795};
796
797static int dnet_probe(struct platform_device *pdev)
798{
799 struct resource *res;
800 struct net_device *dev;
801 struct dnet *bp;
802 struct phy_device *phydev;
803 int err;
804 unsigned int irq;
805
806 irq = platform_get_irq(pdev, 0);
807
808 dev = alloc_etherdev(sizeof(*bp));
809 if (!dev)
810 return -ENOMEM;
811
812 /* TODO: Actually, we have some interesting features... */
813 dev->features |= 0;
814
815 bp = netdev_priv(dev);
816 bp->dev = dev;
817
818 platform_set_drvdata(pdev, dev);
819 SET_NETDEV_DEV(dev, &pdev->dev);
820
821 spin_lock_init(&bp->lock);
822
823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
824 bp->regs = devm_ioremap_resource(&pdev->dev, res);
825 if (IS_ERR(bp->regs)) {
826 err = PTR_ERR(bp->regs);
827 goto err_out_free_dev;
828 }
829
830 dev->irq = irq;
831 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
832 if (err) {
833 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
834 irq, err);
835 goto err_out_free_dev;
836 }
837
838 dev->netdev_ops = &dnet_netdev_ops;
839 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
840 dev->ethtool_ops = &dnet_ethtool_ops;
841
842 dev->base_addr = (unsigned long)bp->regs;
843
844 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
845
846 dnet_get_hwaddr(bp);
847
848 if (!is_valid_ether_addr(dev->dev_addr)) {
849 /* choose a random ethernet address */
850 eth_hw_addr_random(dev);
851 __dnet_set_hwaddr(bp);
852 }
853
854 err = register_netdev(dev);
855 if (err) {
856 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
857 goto err_out_free_irq;
858 }
859
860 /* register the PHY board fixup (for Marvell 88E1111) */
861 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
862 dnet_phy_marvell_fixup);
863 /* we can live without it, so just issue a warning */
864 if (err)
865 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
866
867 err = dnet_mii_init(bp);
868 if (err)
869 goto err_out_unregister_netdev;
870
871 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
872 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
873 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
874 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
875 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
876 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
877 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
878 phydev = bp->phy_dev;
879 phy_attached_info(phydev);
880
881 return 0;
882
883err_out_unregister_netdev:
884 unregister_netdev(dev);
885err_out_free_irq:
886 free_irq(dev->irq, dev);
887err_out_free_dev:
888 free_netdev(dev);
889 return err;
890}
891
892static int dnet_remove(struct platform_device *pdev)
893{
894
895 struct net_device *dev;
896 struct dnet *bp;
897
898 dev = platform_get_drvdata(pdev);
899
900 if (dev) {
901 bp = netdev_priv(dev);
902 if (bp->phy_dev)
903 phy_disconnect(bp->phy_dev);
904 mdiobus_unregister(bp->mii_bus);
905 mdiobus_free(bp->mii_bus);
906 unregister_netdev(dev);
907 free_irq(dev->irq, dev);
908 free_netdev(dev);
909 }
910
911 return 0;
912}
913
914static struct platform_driver dnet_driver = {
915 .probe = dnet_probe,
916 .remove = dnet_remove,
917 .driver = {
918 .name = "dnet",
919 },
920};
921
922module_platform_driver(dnet_driver);
923
924MODULE_LICENSE("GPL");
925MODULE_DESCRIPTION("Dave DNET Ethernet driver");
926MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
927 "Matteo Vit <matteo.vit@dave.eu>");