Loading...
Note: File does not exist in v6.9.4.
1/*
2 * Blackfin On-Chip MAC Driver
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#define DRV_VERSION "1.1"
12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/delay.h>
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/ioport.h>
27#include <linux/crc32.h>
28#include <linux/device.h>
29#include <linux/spinlock.h>
30#include <linux/mii.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/platform_device.h>
36
37#include <asm/dma.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/div64.h>
41#include <asm/dpmc.h>
42#include <asm/blackfin.h>
43#include <asm/cacheflush.h>
44#include <asm/portmux.h>
45#include <mach/pll.h>
46
47#include "bfin_mac.h"
48
49MODULE_AUTHOR("Bryan Wu, Luke Yang");
50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac");
53
54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else
58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif
63
64#define PKT_BUF_SZ 1580
65
66#define MAX_TIMEOUT_CNT 500
67
68/* pointers to maintain transmit list */
69static struct net_dma_desc_tx *tx_list_head;
70static struct net_dma_desc_tx *tx_list_tail;
71static struct net_dma_desc_rx *rx_list_head;
72static struct net_dma_desc_rx *rx_list_tail;
73static struct net_dma_desc_rx *current_rx_ptr;
74static struct net_dma_desc_tx *current_tx_ptr;
75static struct net_dma_desc_tx *tx_desc;
76static struct net_dma_desc_rx *rx_desc;
77
78static void desc_list_free(void)
79{
80 struct net_dma_desc_rx *r;
81 struct net_dma_desc_tx *t;
82 int i;
83#if !defined(CONFIG_BFIN_MAC_USE_L1)
84 dma_addr_t dma_handle = 0;
85#endif
86
87 if (tx_desc) {
88 t = tx_list_head;
89 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
90 if (t) {
91 if (t->skb) {
92 dev_kfree_skb(t->skb);
93 t->skb = NULL;
94 }
95 t = t->next;
96 }
97 }
98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 }
100
101 if (rx_desc) {
102 r = rx_list_head;
103 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
104 if (r) {
105 if (r->skb) {
106 dev_kfree_skb(r->skb);
107 r->skb = NULL;
108 }
109 r = r->next;
110 }
111 }
112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 }
114}
115
116static int desc_list_init(struct net_device *dev)
117{
118 int i;
119 struct sk_buff *new_skb;
120#if !defined(CONFIG_BFIN_MAC_USE_L1)
121 /*
122 * This dma_handle is useless in Blackfin dma_alloc_coherent().
123 * The real dma handler is the return value of dma_alloc_coherent().
124 */
125 dma_addr_t dma_handle;
126#endif
127
128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL)
132 goto init_error;
133
134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL)
138 goto init_error;
139
140 /* init tx_list */
141 tx_list_head = tx_list_tail = tx_desc;
142
143 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
144 struct net_dma_desc_tx *t = tx_desc + i;
145 struct dma_descriptor *a = &(t->desc_a);
146 struct dma_descriptor *b = &(t->desc_b);
147
148 /*
149 * disable DMA
150 * read from memory WNR = 0
151 * wordsize is 32 bits
152 * 6 half words is desc size
153 * large desc flow
154 */
155 a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
156 a->start_addr = (unsigned long)t->packet;
157 a->x_count = 0;
158 a->next_dma_desc = b;
159
160 /*
161 * enabled DMA
162 * write to memory WNR = 1
163 * wordsize is 32 bits
164 * disable interrupt
165 * 6 half words is desc size
166 * large desc flow
167 */
168 b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
169 b->start_addr = (unsigned long)(&(t->status));
170 b->x_count = 0;
171
172 t->skb = NULL;
173 tx_list_tail->desc_b.next_dma_desc = a;
174 tx_list_tail->next = t;
175 tx_list_tail = t;
176 }
177 tx_list_tail->next = tx_list_head; /* tx_list is a circle */
178 tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
179 current_tx_ptr = tx_list_head;
180
181 /* init rx_list */
182 rx_list_head = rx_list_tail = rx_desc;
183
184 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
185 struct net_dma_desc_rx *r = rx_desc + i;
186 struct dma_descriptor *a = &(r->desc_a);
187 struct dma_descriptor *b = &(r->desc_b);
188
189 /* allocate a new skb for next time receive */
190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb)
192 goto init_error;
193
194 skb_reserve(new_skb, NET_IP_ALIGN);
195 /* Invidate the data cache of skb->data range when it is write back
196 * cache. It will prevent overwritting the new data from DMA
197 */
198 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
199 (unsigned long)new_skb->end);
200 r->skb = new_skb;
201
202 /*
203 * enabled DMA
204 * write to memory WNR = 1
205 * wordsize is 32 bits
206 * disable interrupt
207 * 6 half words is desc size
208 * large desc flow
209 */
210 a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
211 /* since RXDWA is enabled */
212 a->start_addr = (unsigned long)new_skb->data - 2;
213 a->x_count = 0;
214 a->next_dma_desc = b;
215
216 /*
217 * enabled DMA
218 * write to memory WNR = 1
219 * wordsize is 32 bits
220 * enable interrupt
221 * 6 half words is desc size
222 * large desc flow
223 */
224 b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
225 NDSIZE_6 | DMAFLOW_LARGE;
226 b->start_addr = (unsigned long)(&(r->status));
227 b->x_count = 0;
228
229 rx_list_tail->desc_b.next_dma_desc = a;
230 rx_list_tail->next = r;
231 rx_list_tail = r;
232 }
233 rx_list_tail->next = rx_list_head; /* rx_list is a circle */
234 rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
235 current_rx_ptr = rx_list_head;
236
237 return 0;
238
239init_error:
240 desc_list_free();
241 pr_err("kmalloc failed\n");
242 return -ENOMEM;
243}
244
245
246/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
247
248/*
249 * MII operations
250 */
251/* Wait until the previous MDC/MDIO transaction has completed */
252static int bfin_mdio_poll(void)
253{
254 int timeout_cnt = MAX_TIMEOUT_CNT;
255
256 /* poll the STABUSY bit */
257 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
258 udelay(1);
259 if (timeout_cnt-- < 0) {
260 pr_err("wait MDC/MDIO transaction to complete timeout\n");
261 return -ETIMEDOUT;
262 }
263 }
264
265 return 0;
266}
267
268/* Read an off-chip register in a PHY through the MDC/MDIO port */
269static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
270{
271 int ret;
272
273 ret = bfin_mdio_poll();
274 if (ret)
275 return ret;
276
277 /* read mode */
278 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
279 SET_REGAD((u16) regnum) |
280 STABUSY);
281
282 ret = bfin_mdio_poll();
283 if (ret)
284 return ret;
285
286 return (int) bfin_read_EMAC_STADAT();
287}
288
289/* Write an off-chip register in a PHY through the MDC/MDIO port */
290static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
291 u16 value)
292{
293 int ret;
294
295 ret = bfin_mdio_poll();
296 if (ret)
297 return ret;
298
299 bfin_write_EMAC_STADAT((u32) value);
300
301 /* write mode */
302 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
303 SET_REGAD((u16) regnum) |
304 STAOP |
305 STABUSY);
306
307 return bfin_mdio_poll();
308}
309
310static void bfin_mac_adjust_link(struct net_device *dev)
311{
312 struct bfin_mac_local *lp = netdev_priv(dev);
313 struct phy_device *phydev = lp->phydev;
314 unsigned long flags;
315 int new_state = 0;
316
317 spin_lock_irqsave(&lp->lock, flags);
318 if (phydev->link) {
319 /* Now we make sure that we can be in full duplex mode.
320 * If not, we operate in half-duplex mode. */
321 if (phydev->duplex != lp->old_duplex) {
322 u32 opmode = bfin_read_EMAC_OPMODE();
323 new_state = 1;
324
325 if (phydev->duplex)
326 opmode |= FDMODE;
327 else
328 opmode &= ~(FDMODE);
329
330 bfin_write_EMAC_OPMODE(opmode);
331 lp->old_duplex = phydev->duplex;
332 }
333
334 if (phydev->speed != lp->old_speed) {
335 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
336 u32 opmode = bfin_read_EMAC_OPMODE();
337 switch (phydev->speed) {
338 case 10:
339 opmode |= RMII_10;
340 break;
341 case 100:
342 opmode &= ~RMII_10;
343 break;
344 default:
345 netdev_warn(dev,
346 "Ack! Speed (%d) is not 10/100!\n",
347 phydev->speed);
348 break;
349 }
350 bfin_write_EMAC_OPMODE(opmode);
351 }
352
353 new_state = 1;
354 lp->old_speed = phydev->speed;
355 }
356
357 if (!lp->old_link) {
358 new_state = 1;
359 lp->old_link = 1;
360 }
361 } else if (lp->old_link) {
362 new_state = 1;
363 lp->old_link = 0;
364 lp->old_speed = 0;
365 lp->old_duplex = -1;
366 }
367
368 if (new_state) {
369 u32 opmode = bfin_read_EMAC_OPMODE();
370 phy_print_status(phydev);
371 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
372 }
373
374 spin_unlock_irqrestore(&lp->lock, flags);
375}
376
377/* MDC = 2.5 MHz */
378#define MDC_CLK 2500000
379
380static int mii_probe(struct net_device *dev, int phy_mode)
381{
382 struct bfin_mac_local *lp = netdev_priv(dev);
383 struct phy_device *phydev = NULL;
384 unsigned short sysctl;
385 int i;
386 u32 sclk, mdc_div;
387
388 /* Enable PHY output early */
389 if (!(bfin_read_VR_CTL() & CLKBUFOE))
390 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
391
392 sclk = get_sclk();
393 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
394
395 sysctl = bfin_read_EMAC_SYSCTL();
396 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
397 bfin_write_EMAC_SYSCTL(sysctl);
398
399 /* search for connected PHY device */
400 for (i = 0; i < PHY_MAX_ADDR; ++i) {
401 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
402
403 if (!tmp_phydev)
404 continue; /* no PHY here... */
405
406 phydev = tmp_phydev;
407 break; /* found it */
408 }
409
410 /* now we are supposed to have a proper phydev, to attach to... */
411 if (!phydev) {
412 netdev_err(dev, "no phy device found\n");
413 return -ENODEV;
414 }
415
416 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
417 phy_mode != PHY_INTERFACE_MODE_MII) {
418 netdev_err(dev, "invalid phy interface mode\n");
419 return -EINVAL;
420 }
421
422 phydev = phy_connect(dev, dev_name(&phydev->dev),
423 &bfin_mac_adjust_link, phy_mode);
424
425 if (IS_ERR(phydev)) {
426 netdev_err(dev, "could not attach PHY\n");
427 return PTR_ERR(phydev);
428 }
429
430 /* mask with MAC supported features */
431 phydev->supported &= (SUPPORTED_10baseT_Half
432 | SUPPORTED_10baseT_Full
433 | SUPPORTED_100baseT_Half
434 | SUPPORTED_100baseT_Full
435 | SUPPORTED_Autoneg
436 | SUPPORTED_Pause | SUPPORTED_Asym_Pause
437 | SUPPORTED_MII
438 | SUPPORTED_TP);
439
440 phydev->advertising = phydev->supported;
441
442 lp->old_link = 0;
443 lp->old_speed = 0;
444 lp->old_duplex = -1;
445 lp->phydev = phydev;
446
447 pr_info("attached PHY driver [%s] "
448 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
449 phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
450 MDC_CLK, mdc_div, sclk/1000000);
451
452 return 0;
453}
454
455/*
456 * Ethtool support
457 */
458
459/*
460 * interrupt routine for magic packet wakeup
461 */
462static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
463{
464 return IRQ_HANDLED;
465}
466
467static int
468bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
469{
470 struct bfin_mac_local *lp = netdev_priv(dev);
471
472 if (lp->phydev)
473 return phy_ethtool_gset(lp->phydev, cmd);
474
475 return -EINVAL;
476}
477
478static int
479bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
480{
481 struct bfin_mac_local *lp = netdev_priv(dev);
482
483 if (!capable(CAP_NET_ADMIN))
484 return -EPERM;
485
486 if (lp->phydev)
487 return phy_ethtool_sset(lp->phydev, cmd);
488
489 return -EINVAL;
490}
491
492static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
493 struct ethtool_drvinfo *info)
494{
495 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
496 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
497 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
498 strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
499}
500
501static void bfin_mac_ethtool_getwol(struct net_device *dev,
502 struct ethtool_wolinfo *wolinfo)
503{
504 struct bfin_mac_local *lp = netdev_priv(dev);
505
506 wolinfo->supported = WAKE_MAGIC;
507 wolinfo->wolopts = lp->wol;
508}
509
510static int bfin_mac_ethtool_setwol(struct net_device *dev,
511 struct ethtool_wolinfo *wolinfo)
512{
513 struct bfin_mac_local *lp = netdev_priv(dev);
514 int rc;
515
516 if (wolinfo->wolopts & (WAKE_MAGICSECURE |
517 WAKE_UCAST |
518 WAKE_MCAST |
519 WAKE_BCAST |
520 WAKE_ARP))
521 return -EOPNOTSUPP;
522
523 lp->wol = wolinfo->wolopts;
524
525 if (lp->wol && !lp->irq_wake_requested) {
526 /* register wake irq handler */
527 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
528 0, "EMAC_WAKE", dev);
529 if (rc)
530 return rc;
531 lp->irq_wake_requested = true;
532 }
533
534 if (!lp->wol && lp->irq_wake_requested) {
535 free_irq(IRQ_MAC_WAKEDET, dev);
536 lp->irq_wake_requested = false;
537 }
538
539 /* Make sure the PHY driver doesn't suspend */
540 device_init_wakeup(&dev->dev, lp->wol);
541
542 return 0;
543}
544
545#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
546static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
547 struct ethtool_ts_info *info)
548{
549 struct bfin_mac_local *lp = netdev_priv(dev);
550
551 info->so_timestamping =
552 SOF_TIMESTAMPING_TX_HARDWARE |
553 SOF_TIMESTAMPING_RX_HARDWARE |
554 SOF_TIMESTAMPING_RAW_HARDWARE;
555 info->phc_index = lp->phc_index;
556 info->tx_types =
557 (1 << HWTSTAMP_TX_OFF) |
558 (1 << HWTSTAMP_TX_ON);
559 info->rx_filters =
560 (1 << HWTSTAMP_FILTER_NONE) |
561 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
562 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
563 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
564 return 0;
565}
566#endif
567
568static const struct ethtool_ops bfin_mac_ethtool_ops = {
569 .get_settings = bfin_mac_ethtool_getsettings,
570 .set_settings = bfin_mac_ethtool_setsettings,
571 .get_link = ethtool_op_get_link,
572 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
573 .get_wol = bfin_mac_ethtool_getwol,
574 .set_wol = bfin_mac_ethtool_setwol,
575#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
576 .get_ts_info = bfin_mac_ethtool_get_ts_info,
577#endif
578};
579
580/**************************************************************************/
581static void setup_system_regs(struct net_device *dev)
582{
583 struct bfin_mac_local *lp = netdev_priv(dev);
584 int i;
585 unsigned short sysctl;
586
587 /*
588 * Odd word alignment for Receive Frame DMA word
589 * Configure checksum support and rcve frame word alignment
590 */
591 sysctl = bfin_read_EMAC_SYSCTL();
592 /*
593 * check if interrupt is requested for any PHY,
594 * enable PHY interrupt only if needed
595 */
596 for (i = 0; i < PHY_MAX_ADDR; ++i)
597 if (lp->mii_bus->irq[i] != PHY_POLL)
598 break;
599 if (i < PHY_MAX_ADDR)
600 sysctl |= PHYIE;
601 sysctl |= RXDWA;
602#if defined(BFIN_MAC_CSUM_OFFLOAD)
603 sysctl |= RXCKS;
604#else
605 sysctl &= ~RXCKS;
606#endif
607 bfin_write_EMAC_SYSCTL(sysctl);
608
609 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
610
611 /* Set vlan regs to let 1522 bytes long packets pass through */
612 bfin_write_EMAC_VLAN1(lp->vlan1_mask);
613 bfin_write_EMAC_VLAN2(lp->vlan2_mask);
614
615 /* Initialize the TX DMA channel registers */
616 bfin_write_DMA2_X_COUNT(0);
617 bfin_write_DMA2_X_MODIFY(4);
618 bfin_write_DMA2_Y_COUNT(0);
619 bfin_write_DMA2_Y_MODIFY(0);
620
621 /* Initialize the RX DMA channel registers */
622 bfin_write_DMA1_X_COUNT(0);
623 bfin_write_DMA1_X_MODIFY(4);
624 bfin_write_DMA1_Y_COUNT(0);
625 bfin_write_DMA1_Y_MODIFY(0);
626}
627
628static void setup_mac_addr(u8 *mac_addr)
629{
630 u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
631 u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
632
633 /* this depends on a little-endian machine */
634 bfin_write_EMAC_ADDRLO(addr_low);
635 bfin_write_EMAC_ADDRHI(addr_hi);
636}
637
638static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
639{
640 struct sockaddr *addr = p;
641 if (netif_running(dev))
642 return -EBUSY;
643 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
644 setup_mac_addr(dev->dev_addr);
645 return 0;
646}
647
648#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
649#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
650
651static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
652{
653 u32 ipn = 1000000000UL / input_clk;
654 u32 ppn = 1;
655 unsigned int shift = 0;
656
657 while (ppn <= ipn) {
658 ppn <<= 1;
659 shift++;
660 }
661 *shift_result = shift;
662 return 1000000000UL / ppn;
663}
664
665static int bfin_mac_hwtstamp_set(struct net_device *netdev,
666 struct ifreq *ifr)
667{
668 struct hwtstamp_config config;
669 struct bfin_mac_local *lp = netdev_priv(netdev);
670 u16 ptpctl;
671 u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
672
673 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
674 return -EFAULT;
675
676 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
677 __func__, config.flags, config.tx_type, config.rx_filter);
678
679 /* reserved for future extensions */
680 if (config.flags)
681 return -EINVAL;
682
683 if ((config.tx_type != HWTSTAMP_TX_OFF) &&
684 (config.tx_type != HWTSTAMP_TX_ON))
685 return -ERANGE;
686
687 ptpctl = bfin_read_EMAC_PTP_CTL();
688
689 switch (config.rx_filter) {
690 case HWTSTAMP_FILTER_NONE:
691 /*
692 * Dont allow any timestamping
693 */
694 ptpfv3 = 0xFFFFFFFF;
695 bfin_write_EMAC_PTP_FV3(ptpfv3);
696 break;
697 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
698 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
699 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
700 /*
701 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
702 * to enable all the field matches.
703 */
704 ptpctl &= ~0x1F00;
705 bfin_write_EMAC_PTP_CTL(ptpctl);
706 /*
707 * Keep the default values of the EMAC_PTP_FOFF register.
708 */
709 ptpfoff = 0x4A24170C;
710 bfin_write_EMAC_PTP_FOFF(ptpfoff);
711 /*
712 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
713 * registers.
714 */
715 ptpfv1 = 0x11040800;
716 bfin_write_EMAC_PTP_FV1(ptpfv1);
717 ptpfv2 = 0x0140013F;
718 bfin_write_EMAC_PTP_FV2(ptpfv2);
719 /*
720 * The default value (0xFFFC) allows the timestamping of both
721 * received Sync messages and Delay_Req messages.
722 */
723 ptpfv3 = 0xFFFFFFFC;
724 bfin_write_EMAC_PTP_FV3(ptpfv3);
725
726 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
727 break;
728 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
729 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
730 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
731 /* Clear all five comparison mask bits (bits[12:8]) in the
732 * EMAC_PTP_CTL register to enable all the field matches.
733 */
734 ptpctl &= ~0x1F00;
735 bfin_write_EMAC_PTP_CTL(ptpctl);
736 /*
737 * Keep the default values of the EMAC_PTP_FOFF register, except set
738 * the PTPCOF field to 0x2A.
739 */
740 ptpfoff = 0x2A24170C;
741 bfin_write_EMAC_PTP_FOFF(ptpfoff);
742 /*
743 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
744 * registers.
745 */
746 ptpfv1 = 0x11040800;
747 bfin_write_EMAC_PTP_FV1(ptpfv1);
748 ptpfv2 = 0x0140013F;
749 bfin_write_EMAC_PTP_FV2(ptpfv2);
750 /*
751 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
752 * the value to 0xFFF0.
753 */
754 ptpfv3 = 0xFFFFFFF0;
755 bfin_write_EMAC_PTP_FV3(ptpfv3);
756
757 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
758 break;
759 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
760 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
761 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
762 /*
763 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
764 * EFTM and PTPCM field comparison.
765 */
766 ptpctl &= ~0x1100;
767 bfin_write_EMAC_PTP_CTL(ptpctl);
768 /*
769 * Keep the default values of all the fields of the EMAC_PTP_FOFF
770 * register, except set the PTPCOF field to 0x0E.
771 */
772 ptpfoff = 0x0E24170C;
773 bfin_write_EMAC_PTP_FOFF(ptpfoff);
774 /*
775 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
776 * corresponds to PTP messages on the MAC layer.
777 */
778 ptpfv1 = 0x110488F7;
779 bfin_write_EMAC_PTP_FV1(ptpfv1);
780 ptpfv2 = 0x0140013F;
781 bfin_write_EMAC_PTP_FV2(ptpfv2);
782 /*
783 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
784 * messages, set the value to 0xFFF0.
785 */
786 ptpfv3 = 0xFFFFFFF0;
787 bfin_write_EMAC_PTP_FV3(ptpfv3);
788
789 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
790 break;
791 default:
792 return -ERANGE;
793 }
794
795 if (config.tx_type == HWTSTAMP_TX_OFF &&
796 bfin_mac_hwtstamp_is_none(config.rx_filter)) {
797 ptpctl &= ~PTP_EN;
798 bfin_write_EMAC_PTP_CTL(ptpctl);
799
800 SSYNC();
801 } else {
802 ptpctl |= PTP_EN;
803 bfin_write_EMAC_PTP_CTL(ptpctl);
804
805 /*
806 * clear any existing timestamp
807 */
808 bfin_read_EMAC_PTP_RXSNAPLO();
809 bfin_read_EMAC_PTP_RXSNAPHI();
810
811 bfin_read_EMAC_PTP_TXSNAPLO();
812 bfin_read_EMAC_PTP_TXSNAPHI();
813
814 SSYNC();
815 }
816
817 lp->stamp_cfg = config;
818 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
819 -EFAULT : 0;
820}
821
822static int bfin_mac_hwtstamp_get(struct net_device *netdev,
823 struct ifreq *ifr)
824{
825 struct bfin_mac_local *lp = netdev_priv(netdev);
826
827 return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
828 sizeof(lp->stamp_cfg)) ?
829 -EFAULT : 0;
830}
831
832static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
833{
834 struct bfin_mac_local *lp = netdev_priv(netdev);
835
836 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
837 int timeout_cnt = MAX_TIMEOUT_CNT;
838
839 /* When doing time stamping, keep the connection to the socket
840 * a while longer
841 */
842 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
843
844 /*
845 * The timestamping is done at the EMAC module's MII/RMII interface
846 * when the module sees the Start of Frame of an event message packet. This
847 * interface is the closest possible place to the physical Ethernet transmission
848 * medium, providing the best timing accuracy.
849 */
850 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
851 udelay(1);
852 if (timeout_cnt == 0)
853 netdev_err(netdev, "timestamp the TX packet failed\n");
854 else {
855 struct skb_shared_hwtstamps shhwtstamps;
856 u64 ns;
857 u64 regval;
858
859 regval = bfin_read_EMAC_PTP_TXSNAPLO();
860 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
861 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
862 ns = regval << lp->shift;
863 shhwtstamps.hwtstamp = ns_to_ktime(ns);
864 skb_tstamp_tx(skb, &shhwtstamps);
865 }
866 }
867}
868
869static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
870{
871 struct bfin_mac_local *lp = netdev_priv(netdev);
872 u32 valid;
873 u64 regval, ns;
874 struct skb_shared_hwtstamps *shhwtstamps;
875
876 if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
877 return;
878
879 valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
880 if (!valid)
881 return;
882
883 shhwtstamps = skb_hwtstamps(skb);
884
885 regval = bfin_read_EMAC_PTP_RXSNAPLO();
886 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
887 ns = regval << lp->shift;
888 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
889 shhwtstamps->hwtstamp = ns_to_ktime(ns);
890}
891
892static void bfin_mac_hwtstamp_init(struct net_device *netdev)
893{
894 struct bfin_mac_local *lp = netdev_priv(netdev);
895 u64 addend, ppb;
896 u32 input_clk, phc_clk;
897
898 /* Initialize hardware timer */
899 input_clk = get_sclk();
900 phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
901 addend = phc_clk * (1ULL << 32);
902 do_div(addend, input_clk);
903 bfin_write_EMAC_PTP_ADDEND((u32)addend);
904
905 lp->addend = addend;
906 ppb = 1000000000ULL * input_clk;
907 do_div(ppb, phc_clk);
908 lp->max_ppb = ppb - 1000000000ULL - 1ULL;
909
910 /* Initialize hwstamp config */
911 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
912 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
913}
914
915static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
916{
917 u64 ns;
918 u32 lo, hi;
919
920 lo = bfin_read_EMAC_PTP_TIMELO();
921 hi = bfin_read_EMAC_PTP_TIMEHI();
922
923 ns = ((u64) hi) << 32;
924 ns |= lo;
925 ns <<= lp->shift;
926
927 return ns;
928}
929
930static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
931{
932 u32 hi, lo;
933
934 ns >>= lp->shift;
935 hi = ns >> 32;
936 lo = ns & 0xffffffff;
937
938 bfin_write_EMAC_PTP_TIMELO(lo);
939 bfin_write_EMAC_PTP_TIMEHI(hi);
940}
941
942/* PTP Hardware Clock operations */
943
944static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
945{
946 u64 adj;
947 u32 diff, addend;
948 int neg_adj = 0;
949 struct bfin_mac_local *lp =
950 container_of(ptp, struct bfin_mac_local, caps);
951
952 if (ppb < 0) {
953 neg_adj = 1;
954 ppb = -ppb;
955 }
956 addend = lp->addend;
957 adj = addend;
958 adj *= ppb;
959 diff = div_u64(adj, 1000000000ULL);
960
961 addend = neg_adj ? addend - diff : addend + diff;
962
963 bfin_write_EMAC_PTP_ADDEND(addend);
964
965 return 0;
966}
967
968static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
969{
970 s64 now;
971 unsigned long flags;
972 struct bfin_mac_local *lp =
973 container_of(ptp, struct bfin_mac_local, caps);
974
975 spin_lock_irqsave(&lp->phc_lock, flags);
976
977 now = bfin_ptp_time_read(lp);
978 now += delta;
979 bfin_ptp_time_write(lp, now);
980
981 spin_unlock_irqrestore(&lp->phc_lock, flags);
982
983 return 0;
984}
985
986static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
987{
988 u64 ns;
989 u32 remainder;
990 unsigned long flags;
991 struct bfin_mac_local *lp =
992 container_of(ptp, struct bfin_mac_local, caps);
993
994 spin_lock_irqsave(&lp->phc_lock, flags);
995
996 ns = bfin_ptp_time_read(lp);
997
998 spin_unlock_irqrestore(&lp->phc_lock, flags);
999
1000 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
1001 ts->tv_nsec = remainder;
1002 return 0;
1003}
1004
1005static int bfin_ptp_settime(struct ptp_clock_info *ptp,
1006 const struct timespec *ts)
1007{
1008 u64 ns;
1009 unsigned long flags;
1010 struct bfin_mac_local *lp =
1011 container_of(ptp, struct bfin_mac_local, caps);
1012
1013 ns = ts->tv_sec * 1000000000ULL;
1014 ns += ts->tv_nsec;
1015
1016 spin_lock_irqsave(&lp->phc_lock, flags);
1017
1018 bfin_ptp_time_write(lp, ns);
1019
1020 spin_unlock_irqrestore(&lp->phc_lock, flags);
1021
1022 return 0;
1023}
1024
1025static int bfin_ptp_enable(struct ptp_clock_info *ptp,
1026 struct ptp_clock_request *rq, int on)
1027{
1028 return -EOPNOTSUPP;
1029}
1030
1031static struct ptp_clock_info bfin_ptp_caps = {
1032 .owner = THIS_MODULE,
1033 .name = "BF518 clock",
1034 .max_adj = 0,
1035 .n_alarm = 0,
1036 .n_ext_ts = 0,
1037 .n_per_out = 0,
1038 .n_pins = 0,
1039 .pps = 0,
1040 .adjfreq = bfin_ptp_adjfreq,
1041 .adjtime = bfin_ptp_adjtime,
1042 .gettime = bfin_ptp_gettime,
1043 .settime = bfin_ptp_settime,
1044 .enable = bfin_ptp_enable,
1045};
1046
1047static int bfin_phc_init(struct net_device *netdev, struct device *dev)
1048{
1049 struct bfin_mac_local *lp = netdev_priv(netdev);
1050
1051 lp->caps = bfin_ptp_caps;
1052 lp->caps.max_adj = lp->max_ppb;
1053 lp->clock = ptp_clock_register(&lp->caps, dev);
1054 if (IS_ERR(lp->clock))
1055 return PTR_ERR(lp->clock);
1056
1057 lp->phc_index = ptp_clock_index(lp->clock);
1058 spin_lock_init(&lp->phc_lock);
1059
1060 return 0;
1061}
1062
1063static void bfin_phc_release(struct bfin_mac_local *lp)
1064{
1065 ptp_clock_unregister(lp->clock);
1066}
1067
1068#else
1069# define bfin_mac_hwtstamp_is_none(cfg) 0
1070# define bfin_mac_hwtstamp_init(dev)
1071# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
1072# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
1073# define bfin_rx_hwtstamp(dev, skb)
1074# define bfin_tx_hwtstamp(dev, skb)
1075# define bfin_phc_init(netdev, dev) 0
1076# define bfin_phc_release(lp)
1077#endif
1078
1079static inline void _tx_reclaim_skb(void)
1080{
1081 do {
1082 tx_list_head->desc_a.config &= ~DMAEN;
1083 tx_list_head->status.status_word = 0;
1084 if (tx_list_head->skb) {
1085 dev_consume_skb_any(tx_list_head->skb);
1086 tx_list_head->skb = NULL;
1087 }
1088 tx_list_head = tx_list_head->next;
1089
1090 } while (tx_list_head->status.status_word != 0);
1091}
1092
1093static void tx_reclaim_skb(struct bfin_mac_local *lp)
1094{
1095 int timeout_cnt = MAX_TIMEOUT_CNT;
1096
1097 if (tx_list_head->status.status_word != 0)
1098 _tx_reclaim_skb();
1099
1100 if (current_tx_ptr->next == tx_list_head) {
1101 while (tx_list_head->status.status_word == 0) {
1102 /* slow down polling to avoid too many queue stop. */
1103 udelay(10);
1104 /* reclaim skb if DMA is not running. */
1105 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
1106 break;
1107 if (timeout_cnt-- < 0)
1108 break;
1109 }
1110
1111 if (timeout_cnt >= 0)
1112 _tx_reclaim_skb();
1113 else
1114 netif_stop_queue(lp->ndev);
1115 }
1116
1117 if (current_tx_ptr->next != tx_list_head &&
1118 netif_queue_stopped(lp->ndev))
1119 netif_wake_queue(lp->ndev);
1120
1121 if (tx_list_head != current_tx_ptr) {
1122 /* shorten the timer interval if tx queue is stopped */
1123 if (netif_queue_stopped(lp->ndev))
1124 lp->tx_reclaim_timer.expires =
1125 jiffies + (TX_RECLAIM_JIFFIES >> 4);
1126 else
1127 lp->tx_reclaim_timer.expires =
1128 jiffies + TX_RECLAIM_JIFFIES;
1129
1130 mod_timer(&lp->tx_reclaim_timer,
1131 lp->tx_reclaim_timer.expires);
1132 }
1133
1134 return;
1135}
1136
1137static void tx_reclaim_skb_timeout(unsigned long lp)
1138{
1139 tx_reclaim_skb((struct bfin_mac_local *)lp);
1140}
1141
1142static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1143 struct net_device *dev)
1144{
1145 struct bfin_mac_local *lp = netdev_priv(dev);
1146 u16 *data;
1147 u32 data_align = (unsigned long)(skb->data) & 0x3;
1148
1149 current_tx_ptr->skb = skb;
1150
1151 if (data_align == 0x2) {
1152 /* move skb->data to current_tx_ptr payload */
1153 data = (u16 *)(skb->data) - 1;
1154 *data = (u16)(skb->len);
1155 /*
1156 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1157 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1158 * of this field are the length of the packet payload in bytes and the higher
1159 * 4 bits are the timestamping enable field.
1160 */
1161 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1162 *data |= 0x1000;
1163
1164 current_tx_ptr->desc_a.start_addr = (u32)data;
1165 /* this is important! */
1166 blackfin_dcache_flush_range((u32)data,
1167 (u32)((u8 *)data + skb->len + 4));
1168 } else {
1169 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1170 /* enable timestamping for the sent packet */
1171 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1172 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1173 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1174 skb->len);
1175 current_tx_ptr->desc_a.start_addr =
1176 (u32)current_tx_ptr->packet;
1177 blackfin_dcache_flush_range(
1178 (u32)current_tx_ptr->packet,
1179 (u32)(current_tx_ptr->packet + skb->len + 2));
1180 }
1181
1182 /* make sure the internal data buffers in the core are drained
1183 * so that the DMA descriptors are completely written when the
1184 * DMA engine goes to fetch them below
1185 */
1186 SSYNC();
1187
1188 /* always clear status buffer before start tx dma */
1189 current_tx_ptr->status.status_word = 0;
1190
1191 /* enable this packet's dma */
1192 current_tx_ptr->desc_a.config |= DMAEN;
1193
1194 /* tx dma is running, just return */
1195 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
1196 goto out;
1197
1198 /* tx dma is not running */
1199 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
1200 /* dma enabled, read from memory, size is 6 */
1201 bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
1202 /* Turn on the EMAC tx */
1203 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1204
1205out:
1206 bfin_tx_hwtstamp(dev, skb);
1207
1208 current_tx_ptr = current_tx_ptr->next;
1209 dev->stats.tx_packets++;
1210 dev->stats.tx_bytes += (skb->len);
1211
1212 tx_reclaim_skb(lp);
1213
1214 return NETDEV_TX_OK;
1215}
1216
1217#define IP_HEADER_OFF 0
1218#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1219 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1220
1221static void bfin_mac_rx(struct net_device *dev)
1222{
1223 struct sk_buff *skb, *new_skb;
1224 unsigned short len;
1225 struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
1226#if defined(BFIN_MAC_CSUM_OFFLOAD)
1227 unsigned int i;
1228 unsigned char fcs[ETH_FCS_LEN + 1];
1229#endif
1230
1231 /* check if frame status word reports an error condition
1232 * we which case we simply drop the packet
1233 */
1234 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1235 netdev_notice(dev, "rx: receive error - packet dropped\n");
1236 dev->stats.rx_dropped++;
1237 goto out;
1238 }
1239
1240 /* allocate a new skb for next time receive */
1241 skb = current_rx_ptr->skb;
1242
1243 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1244 if (!new_skb) {
1245 dev->stats.rx_dropped++;
1246 goto out;
1247 }
1248 /* reserve 2 bytes for RXDWA padding */
1249 skb_reserve(new_skb, NET_IP_ALIGN);
1250 /* Invidate the data cache of skb->data range when it is write back
1251 * cache. It will prevent overwritting the new data from DMA
1252 */
1253 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
1254 (unsigned long)new_skb->end);
1255
1256 current_rx_ptr->skb = new_skb;
1257 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1258
1259 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1260 /* Deduce Ethernet FCS length from Ethernet payload length */
1261 len -= ETH_FCS_LEN;
1262 skb_put(skb, len);
1263
1264 skb->protocol = eth_type_trans(skb, dev);
1265
1266 bfin_rx_hwtstamp(dev, skb);
1267
1268#if defined(BFIN_MAC_CSUM_OFFLOAD)
1269 /* Checksum offloading only works for IPv4 packets with the standard IP header
1270 * length of 20 bytes, because the blackfin MAC checksum calculation is
1271 * based on that assumption. We must NOT use the calculated checksum if our
1272 * IP version or header break that assumption.
1273 */
1274 if (skb->data[IP_HEADER_OFF] == 0x45) {
1275 skb->csum = current_rx_ptr->status.ip_payload_csum;
1276 /*
1277 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1278 * IP checksum is based on 16-bit one's complement algorithm.
1279 * To deduce a value from checksum is equal to add its inversion.
1280 * If the IP payload len is odd, the inversed FCS should also
1281 * begin from odd address and leave first byte zero.
1282 */
1283 if (skb->len % 2) {
1284 fcs[0] = 0;
1285 for (i = 0; i < ETH_FCS_LEN; i++)
1286 fcs[i + 1] = ~skb->data[skb->len + i];
1287 skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
1288 } else {
1289 for (i = 0; i < ETH_FCS_LEN; i++)
1290 fcs[i] = ~skb->data[skb->len + i];
1291 skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
1292 }
1293 skb->ip_summed = CHECKSUM_COMPLETE;
1294 }
1295#endif
1296
1297 netif_rx(skb);
1298 dev->stats.rx_packets++;
1299 dev->stats.rx_bytes += len;
1300out:
1301 current_rx_ptr->status.status_word = 0x00000000;
1302 current_rx_ptr = current_rx_ptr->next;
1303}
1304
1305/* interrupt routine to handle rx and error signal */
1306static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
1307{
1308 struct net_device *dev = dev_id;
1309 int number = 0;
1310
1311get_one_packet:
1312 if (current_rx_ptr->status.status_word == 0) {
1313 /* no more new packet received */
1314 if (number == 0) {
1315 if (current_rx_ptr->next->status.status_word != 0) {
1316 current_rx_ptr = current_rx_ptr->next;
1317 goto real_rx;
1318 }
1319 }
1320 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
1321 DMA_DONE | DMA_ERR);
1322 return IRQ_HANDLED;
1323 }
1324
1325real_rx:
1326 bfin_mac_rx(dev);
1327 number++;
1328 goto get_one_packet;
1329}
1330
1331#ifdef CONFIG_NET_POLL_CONTROLLER
1332static void bfin_mac_poll(struct net_device *dev)
1333{
1334 struct bfin_mac_local *lp = netdev_priv(dev);
1335
1336 disable_irq(IRQ_MAC_RX);
1337 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1338 tx_reclaim_skb(lp);
1339 enable_irq(IRQ_MAC_RX);
1340}
1341#endif /* CONFIG_NET_POLL_CONTROLLER */
1342
1343static void bfin_mac_disable(void)
1344{
1345 unsigned int opmode;
1346
1347 opmode = bfin_read_EMAC_OPMODE();
1348 opmode &= (~RE);
1349 opmode &= (~TE);
1350 /* Turn off the EMAC */
1351 bfin_write_EMAC_OPMODE(opmode);
1352}
1353
1354/*
1355 * Enable Interrupts, Receive, and Transmit
1356 */
1357static int bfin_mac_enable(struct phy_device *phydev)
1358{
1359 int ret;
1360 u32 opmode;
1361
1362 pr_debug("%s\n", __func__);
1363
1364 /* Set RX DMA */
1365 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
1366 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
1367
1368 /* Wait MII done */
1369 ret = bfin_mdio_poll();
1370 if (ret)
1371 return ret;
1372
1373 /* We enable only RX here */
1374 /* ASTP : Enable Automatic Pad Stripping
1375 PR : Promiscuous Mode for test
1376 PSF : Receive frames with total length less than 64 bytes.
1377 FDMODE : Full Duplex Mode
1378 LB : Internal Loopback for test
1379 RE : Receiver Enable */
1380 opmode = bfin_read_EMAC_OPMODE();
1381 if (opmode & FDMODE)
1382 opmode |= PSF;
1383 else
1384 opmode |= DRO | DC | PSF;
1385 opmode |= RE;
1386
1387 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1388 opmode |= RMII; /* For Now only 100MBit are supported */
1389#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1390 if (__SILICON_REVISION__ < 3) {
1391 /*
1392 * This isn't publicly documented (fun times!), but in
1393 * silicon <=0.2, the RX and TX pins are clocked together.
1394 * So in order to recv, we must enable the transmit side
1395 * as well. This will cause a spurious TX interrupt too,
1396 * but we can easily consume that.
1397 */
1398 opmode |= TE;
1399 }
1400#endif
1401 }
1402
1403 /* Turn on the EMAC rx */
1404 bfin_write_EMAC_OPMODE(opmode);
1405
1406 return 0;
1407}
1408
1409/* Our watchdog timed out. Called by the networking layer */
1410static void bfin_mac_timeout(struct net_device *dev)
1411{
1412 struct bfin_mac_local *lp = netdev_priv(dev);
1413
1414 pr_debug("%s: %s\n", dev->name, __func__);
1415
1416 bfin_mac_disable();
1417
1418 del_timer(&lp->tx_reclaim_timer);
1419
1420 /* reset tx queue and free skb */
1421 while (tx_list_head != current_tx_ptr) {
1422 tx_list_head->desc_a.config &= ~DMAEN;
1423 tx_list_head->status.status_word = 0;
1424 if (tx_list_head->skb) {
1425 dev_kfree_skb(tx_list_head->skb);
1426 tx_list_head->skb = NULL;
1427 }
1428 tx_list_head = tx_list_head->next;
1429 }
1430
1431 if (netif_queue_stopped(lp->ndev))
1432 netif_wake_queue(lp->ndev);
1433
1434 bfin_mac_enable(lp->phydev);
1435
1436 /* We can accept TX packets again */
1437 dev->trans_start = jiffies; /* prevent tx timeout */
1438 netif_wake_queue(dev);
1439}
1440
1441static void bfin_mac_multicast_hash(struct net_device *dev)
1442{
1443 u32 emac_hashhi, emac_hashlo;
1444 struct netdev_hw_addr *ha;
1445 u32 crc;
1446
1447 emac_hashhi = emac_hashlo = 0;
1448
1449 netdev_for_each_mc_addr(ha, dev) {
1450 crc = ether_crc(ETH_ALEN, ha->addr);
1451 crc >>= 26;
1452
1453 if (crc & 0x20)
1454 emac_hashhi |= 1 << (crc & 0x1f);
1455 else
1456 emac_hashlo |= 1 << (crc & 0x1f);
1457 }
1458
1459 bfin_write_EMAC_HASHHI(emac_hashhi);
1460 bfin_write_EMAC_HASHLO(emac_hashlo);
1461}
1462
1463/*
1464 * This routine will, depending on the values passed to it,
1465 * either make it accept multicast packets, go into
1466 * promiscuous mode (for TCPDUMP and cousins) or accept
1467 * a select set of multicast packets
1468 */
1469static void bfin_mac_set_multicast_list(struct net_device *dev)
1470{
1471 u32 sysctl;
1472
1473 if (dev->flags & IFF_PROMISC) {
1474 netdev_info(dev, "set promisc mode\n");
1475 sysctl = bfin_read_EMAC_OPMODE();
1476 sysctl |= PR;
1477 bfin_write_EMAC_OPMODE(sysctl);
1478 } else if (dev->flags & IFF_ALLMULTI) {
1479 /* accept all multicast */
1480 sysctl = bfin_read_EMAC_OPMODE();
1481 sysctl |= PAM;
1482 bfin_write_EMAC_OPMODE(sysctl);
1483 } else if (!netdev_mc_empty(dev)) {
1484 /* set up multicast hash table */
1485 sysctl = bfin_read_EMAC_OPMODE();
1486 sysctl |= HM;
1487 bfin_write_EMAC_OPMODE(sysctl);
1488 bfin_mac_multicast_hash(dev);
1489 } else {
1490 /* clear promisc or multicast mode */
1491 sysctl = bfin_read_EMAC_OPMODE();
1492 sysctl &= ~(RAF | PAM);
1493 bfin_write_EMAC_OPMODE(sysctl);
1494 }
1495}
1496
1497static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1498{
1499 struct bfin_mac_local *lp = netdev_priv(netdev);
1500
1501 if (!netif_running(netdev))
1502 return -EINVAL;
1503
1504 switch (cmd) {
1505 case SIOCSHWTSTAMP:
1506 return bfin_mac_hwtstamp_set(netdev, ifr);
1507 case SIOCGHWTSTAMP:
1508 return bfin_mac_hwtstamp_get(netdev, ifr);
1509 default:
1510 if (lp->phydev)
1511 return phy_mii_ioctl(lp->phydev, ifr, cmd);
1512 else
1513 return -EOPNOTSUPP;
1514 }
1515}
1516
1517/*
1518 * this puts the device in an inactive state
1519 */
1520static void bfin_mac_shutdown(struct net_device *dev)
1521{
1522 /* Turn off the EMAC */
1523 bfin_write_EMAC_OPMODE(0x00000000);
1524 /* Turn off the EMAC RX DMA */
1525 bfin_write_DMA1_CONFIG(0x0000);
1526 bfin_write_DMA2_CONFIG(0x0000);
1527}
1528
1529/*
1530 * Open and Initialize the interface
1531 *
1532 * Set up everything, reset the card, etc..
1533 */
1534static int bfin_mac_open(struct net_device *dev)
1535{
1536 struct bfin_mac_local *lp = netdev_priv(dev);
1537 int ret;
1538 pr_debug("%s: %s\n", dev->name, __func__);
1539
1540 /*
1541 * Check that the address is valid. If its not, refuse
1542 * to bring the device up. The user must specify an
1543 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1544 */
1545 if (!is_valid_ether_addr(dev->dev_addr)) {
1546 netdev_warn(dev, "no valid ethernet hw addr\n");
1547 return -EINVAL;
1548 }
1549
1550 /* initial rx and tx list */
1551 ret = desc_list_init(dev);
1552 if (ret)
1553 return ret;
1554
1555 phy_start(lp->phydev);
1556 setup_system_regs(dev);
1557 setup_mac_addr(dev->dev_addr);
1558
1559 bfin_mac_disable();
1560 ret = bfin_mac_enable(lp->phydev);
1561 if (ret)
1562 return ret;
1563 pr_debug("hardware init finished\n");
1564
1565 netif_start_queue(dev);
1566 netif_carrier_on(dev);
1567
1568 return 0;
1569}
1570
1571/*
1572 * this makes the board clean up everything that it can
1573 * and not talk to the outside world. Caused by
1574 * an 'ifconfig ethX down'
1575 */
1576static int bfin_mac_close(struct net_device *dev)
1577{
1578 struct bfin_mac_local *lp = netdev_priv(dev);
1579 pr_debug("%s: %s\n", dev->name, __func__);
1580
1581 netif_stop_queue(dev);
1582 netif_carrier_off(dev);
1583
1584 phy_stop(lp->phydev);
1585 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
1586
1587 /* clear everything */
1588 bfin_mac_shutdown(dev);
1589
1590 /* free the rx/tx buffers */
1591 desc_list_free();
1592
1593 return 0;
1594}
1595
1596static const struct net_device_ops bfin_mac_netdev_ops = {
1597 .ndo_open = bfin_mac_open,
1598 .ndo_stop = bfin_mac_close,
1599 .ndo_start_xmit = bfin_mac_hard_start_xmit,
1600 .ndo_set_mac_address = bfin_mac_set_mac_address,
1601 .ndo_tx_timeout = bfin_mac_timeout,
1602 .ndo_set_rx_mode = bfin_mac_set_multicast_list,
1603 .ndo_do_ioctl = bfin_mac_ioctl,
1604 .ndo_validate_addr = eth_validate_addr,
1605 .ndo_change_mtu = eth_change_mtu,
1606#ifdef CONFIG_NET_POLL_CONTROLLER
1607 .ndo_poll_controller = bfin_mac_poll,
1608#endif
1609};
1610
1611static int bfin_mac_probe(struct platform_device *pdev)
1612{
1613 struct net_device *ndev;
1614 struct bfin_mac_local *lp;
1615 struct platform_device *pd;
1616 struct bfin_mii_bus_platform_data *mii_bus_data;
1617 int rc;
1618
1619 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1620 if (!ndev)
1621 return -ENOMEM;
1622
1623 SET_NETDEV_DEV(ndev, &pdev->dev);
1624 platform_set_drvdata(pdev, ndev);
1625 lp = netdev_priv(ndev);
1626 lp->ndev = ndev;
1627
1628 /* Grab the MAC address in the MAC */
1629 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1630 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1631
1632 /* probe mac */
1633 /*todo: how to proble? which is revision_register */
1634 bfin_write_EMAC_ADDRLO(0x12345678);
1635 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1636 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1637 rc = -ENODEV;
1638 goto out_err_probe_mac;
1639 }
1640
1641
1642 /*
1643 * Is it valid? (Did bootloader initialize it?)
1644 * Grab the MAC from the board somehow
1645 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1646 */
1647 if (!is_valid_ether_addr(ndev->dev_addr)) {
1648 if (bfin_get_ether_addr(ndev->dev_addr) ||
1649 !is_valid_ether_addr(ndev->dev_addr)) {
1650 /* Still not valid, get a random one */
1651 netdev_warn(ndev, "Setting Ethernet MAC to a random one\n");
1652 eth_hw_addr_random(ndev);
1653 }
1654 }
1655
1656 setup_mac_addr(ndev->dev_addr);
1657
1658 if (!dev_get_platdata(&pdev->dev)) {
1659 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1660 rc = -ENODEV;
1661 goto out_err_probe_mac;
1662 }
1663 pd = dev_get_platdata(&pdev->dev);
1664 lp->mii_bus = platform_get_drvdata(pd);
1665 if (!lp->mii_bus) {
1666 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1667 rc = -ENODEV;
1668 goto out_err_probe_mac;
1669 }
1670 lp->mii_bus->priv = ndev;
1671 mii_bus_data = dev_get_platdata(&pd->dev);
1672
1673 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1674 if (rc) {
1675 dev_err(&pdev->dev, "MII Probe failed!\n");
1676 goto out_err_mii_probe;
1677 }
1678
1679 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1680 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1681
1682 /* Fill in the fields of the device structure with ethernet values. */
1683 ether_setup(ndev);
1684
1685 ndev->netdev_ops = &bfin_mac_netdev_ops;
1686 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1687
1688 init_timer(&lp->tx_reclaim_timer);
1689 lp->tx_reclaim_timer.data = (unsigned long)lp;
1690 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1691
1692 spin_lock_init(&lp->lock);
1693
1694 /* now, enable interrupts */
1695 /* register irq handler */
1696 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1697 0, "EMAC_RX", ndev);
1698 if (rc) {
1699 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1700 rc = -EBUSY;
1701 goto out_err_request_irq;
1702 }
1703
1704 rc = register_netdev(ndev);
1705 if (rc) {
1706 dev_err(&pdev->dev, "Cannot register net device!\n");
1707 goto out_err_reg_ndev;
1708 }
1709
1710 bfin_mac_hwtstamp_init(ndev);
1711 rc = bfin_phc_init(ndev, &pdev->dev);
1712 if (rc) {
1713 dev_err(&pdev->dev, "Cannot register PHC device!\n");
1714 goto out_err_phc;
1715 }
1716
1717 /* now, print out the card info, in a short format.. */
1718 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1719
1720 return 0;
1721
1722out_err_phc:
1723out_err_reg_ndev:
1724 free_irq(IRQ_MAC_RX, ndev);
1725out_err_request_irq:
1726out_err_mii_probe:
1727 mdiobus_unregister(lp->mii_bus);
1728 mdiobus_free(lp->mii_bus);
1729out_err_probe_mac:
1730 free_netdev(ndev);
1731
1732 return rc;
1733}
1734
1735static int bfin_mac_remove(struct platform_device *pdev)
1736{
1737 struct net_device *ndev = platform_get_drvdata(pdev);
1738 struct bfin_mac_local *lp = netdev_priv(ndev);
1739
1740 bfin_phc_release(lp);
1741
1742 lp->mii_bus->priv = NULL;
1743
1744 unregister_netdev(ndev);
1745
1746 free_irq(IRQ_MAC_RX, ndev);
1747
1748 free_netdev(ndev);
1749
1750 return 0;
1751}
1752
1753#ifdef CONFIG_PM
1754static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1755{
1756 struct net_device *net_dev = platform_get_drvdata(pdev);
1757 struct bfin_mac_local *lp = netdev_priv(net_dev);
1758
1759 if (lp->wol) {
1760 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
1761 bfin_write_EMAC_WKUP_CTL(MPKE);
1762 enable_irq_wake(IRQ_MAC_WAKEDET);
1763 } else {
1764 if (netif_running(net_dev))
1765 bfin_mac_close(net_dev);
1766 }
1767
1768 return 0;
1769}
1770
1771static int bfin_mac_resume(struct platform_device *pdev)
1772{
1773 struct net_device *net_dev = platform_get_drvdata(pdev);
1774 struct bfin_mac_local *lp = netdev_priv(net_dev);
1775
1776 if (lp->wol) {
1777 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1778 bfin_write_EMAC_WKUP_CTL(0);
1779 disable_irq_wake(IRQ_MAC_WAKEDET);
1780 } else {
1781 if (netif_running(net_dev))
1782 bfin_mac_open(net_dev);
1783 }
1784
1785 return 0;
1786}
1787#else
1788#define bfin_mac_suspend NULL
1789#define bfin_mac_resume NULL
1790#endif /* CONFIG_PM */
1791
1792static int bfin_mii_bus_probe(struct platform_device *pdev)
1793{
1794 struct mii_bus *miibus;
1795 struct bfin_mii_bus_platform_data *mii_bus_pd;
1796 const unsigned short *pin_req;
1797 int rc, i;
1798
1799 mii_bus_pd = dev_get_platdata(&pdev->dev);
1800 if (!mii_bus_pd) {
1801 dev_err(&pdev->dev, "No peripherals in platform data!\n");
1802 return -EINVAL;
1803 }
1804
1805 /*
1806 * We are setting up a network card,
1807 * so set the GPIO pins to Ethernet mode
1808 */
1809 pin_req = mii_bus_pd->mac_peripherals;
1810 rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1811 if (rc) {
1812 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1813 return rc;
1814 }
1815
1816 rc = -ENOMEM;
1817 miibus = mdiobus_alloc();
1818 if (miibus == NULL)
1819 goto out_err_alloc;
1820 miibus->read = bfin_mdiobus_read;
1821 miibus->write = bfin_mdiobus_write;
1822
1823 miibus->parent = &pdev->dev;
1824 miibus->name = "bfin_mii_bus";
1825 miibus->phy_mask = mii_bus_pd->phy_mask;
1826
1827 snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
1828 pdev->name, pdev->id);
1829 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1830 if (!miibus->irq)
1831 goto out_err_irq_alloc;
1832
1833 for (i = rc; i < PHY_MAX_ADDR; ++i)
1834 miibus->irq[i] = PHY_POLL;
1835
1836 rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
1837 if (rc != mii_bus_pd->phydev_number)
1838 dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
1839 mii_bus_pd->phydev_number);
1840 for (i = 0; i < rc; ++i) {
1841 unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
1842 if (phyaddr < PHY_MAX_ADDR)
1843 miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
1844 else
1845 dev_err(&pdev->dev,
1846 "Invalid PHY address %i for phydev %i\n",
1847 phyaddr, i);
1848 }
1849
1850 rc = mdiobus_register(miibus);
1851 if (rc) {
1852 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1853 goto out_err_mdiobus_register;
1854 }
1855
1856 platform_set_drvdata(pdev, miibus);
1857 return 0;
1858
1859out_err_mdiobus_register:
1860 kfree(miibus->irq);
1861out_err_irq_alloc:
1862 mdiobus_free(miibus);
1863out_err_alloc:
1864 peripheral_free_list(pin_req);
1865
1866 return rc;
1867}
1868
1869static int bfin_mii_bus_remove(struct platform_device *pdev)
1870{
1871 struct mii_bus *miibus = platform_get_drvdata(pdev);
1872 struct bfin_mii_bus_platform_data *mii_bus_pd =
1873 dev_get_platdata(&pdev->dev);
1874
1875 mdiobus_unregister(miibus);
1876 kfree(miibus->irq);
1877 mdiobus_free(miibus);
1878 peripheral_free_list(mii_bus_pd->mac_peripherals);
1879
1880 return 0;
1881}
1882
1883static struct platform_driver bfin_mii_bus_driver = {
1884 .probe = bfin_mii_bus_probe,
1885 .remove = bfin_mii_bus_remove,
1886 .driver = {
1887 .name = "bfin_mii_bus",
1888 .owner = THIS_MODULE,
1889 },
1890};
1891
1892static struct platform_driver bfin_mac_driver = {
1893 .probe = bfin_mac_probe,
1894 .remove = bfin_mac_remove,
1895 .resume = bfin_mac_resume,
1896 .suspend = bfin_mac_suspend,
1897 .driver = {
1898 .name = KBUILD_MODNAME,
1899 .owner = THIS_MODULE,
1900 },
1901};
1902
1903static int __init bfin_mac_init(void)
1904{
1905 int ret;
1906 ret = platform_driver_register(&bfin_mii_bus_driver);
1907 if (!ret)
1908 return platform_driver_register(&bfin_mac_driver);
1909 return -ENODEV;
1910}
1911
1912module_init(bfin_mac_init);
1913
1914static void __exit bfin_mac_cleanup(void)
1915{
1916 platform_driver_unregister(&bfin_mac_driver);
1917 platform_driver_unregister(&bfin_mii_bus_driver);
1918}
1919
1920module_exit(bfin_mac_cleanup);
1921