Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Network device driver for Cell Processor-Based Blade and Celleb platform
4 *
5 * (C) Copyright IBM Corp. 2005
6 * (C) Copyright 2006 TOSHIBA CORPORATION
7 *
8 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
9 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
10 */
11
12#include <linux/compiler.h>
13#include <linux/crc32.h>
14#include <linux/delay.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/firmware.h>
18#include <linux/if_vlan.h>
19#include <linux/in.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/gfp.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
25#include <linux/kernel.h>
26#include <linux/mii.h>
27#include <linux/module.h>
28#include <linux/netdevice.h>
29#include <linux/device.h>
30#include <linux/pci.h>
31#include <linux/skbuff.h>
32#include <linux/tcp.h>
33#include <linux/types.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
36#include <linux/workqueue.h>
37#include <linux/bitops.h>
38#include <linux/of.h>
39#include <net/checksum.h>
40
41#include "spider_net.h"
42
43MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
44 "<Jens.Osterkamp@de.ibm.com>");
45MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
46MODULE_LICENSE("GPL");
47MODULE_VERSION(VERSION);
48MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
49
50static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
51static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
52
53module_param(rx_descriptors, int, 0444);
54module_param(tx_descriptors, int, 0444);
55
56MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
57 "in rx chains");
58MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
59 "in tx chain");
60
61char spider_net_driver_name[] = "spidernet";
62
63static const struct pci_device_id spider_net_pci_tbl[] = {
64 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
65 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
66 { 0, }
67};
68
69MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
70
71/**
72 * spider_net_read_reg - reads an SMMIO register of a card
73 * @card: device structure
74 * @reg: register to read from
75 *
76 * returns the content of the specified SMMIO register.
77 */
78static inline u32
79spider_net_read_reg(struct spider_net_card *card, u32 reg)
80{
81 /* We use the powerpc specific variants instead of readl_be() because
82 * we know spidernet is not a real PCI device and we can thus avoid the
83 * performance hit caused by the PCI workarounds.
84 */
85 return in_be32(card->regs + reg);
86}
87
88/**
89 * spider_net_write_reg - writes to an SMMIO register of a card
90 * @card: device structure
91 * @reg: register to write to
92 * @value: value to write into the specified SMMIO register
93 */
94static inline void
95spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
96{
97 /* We use the powerpc specific variants instead of writel_be() because
98 * we know spidernet is not a real PCI device and we can thus avoid the
99 * performance hit caused by the PCI workarounds.
100 */
101 out_be32(card->regs + reg, value);
102}
103
104/**
105 * spider_net_write_phy - write to phy register
106 * @netdev: adapter to be written to
107 * @mii_id: id of MII
108 * @reg: PHY register
109 * @val: value to be written to phy register
110 *
111 * spider_net_write_phy_register writes to an arbitrary PHY
112 * register via the spider GPCWOPCMD register. We assume the queue does
113 * not run full (not more than 15 commands outstanding).
114 **/
115static void
116spider_net_write_phy(struct net_device *netdev, int mii_id,
117 int reg, int val)
118{
119 struct spider_net_card *card = netdev_priv(netdev);
120 u32 writevalue;
121
122 writevalue = ((u32)mii_id << 21) |
123 ((u32)reg << 16) | ((u32)val);
124
125 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
126}
127
128/**
129 * spider_net_read_phy - read from phy register
130 * @netdev: network device to be read from
131 * @mii_id: id of MII
132 * @reg: PHY register
133 *
134 * Returns value read from PHY register
135 *
136 * spider_net_write_phy reads from an arbitrary PHY
137 * register via the spider GPCROPCMD register
138 **/
139static int
140spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
141{
142 struct spider_net_card *card = netdev_priv(netdev);
143 u32 readvalue;
144
145 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
146 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
147
148 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
149 * interrupt, as we poll for the completion of the read operation
150 * in spider_net_read_phy. Should take about 50 us
151 */
152 do {
153 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
154 } while (readvalue & SPIDER_NET_GPREXEC);
155
156 readvalue &= SPIDER_NET_GPRDAT_MASK;
157
158 return readvalue;
159}
160
161/**
162 * spider_net_setup_aneg - initial auto-negotiation setup
163 * @card: device structure
164 **/
165static void
166spider_net_setup_aneg(struct spider_net_card *card)
167{
168 struct mii_phy *phy = &card->phy;
169 u32 advertise = 0;
170 u16 bmsr, estat;
171
172 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
173 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
174
175 if (bmsr & BMSR_10HALF)
176 advertise |= ADVERTISED_10baseT_Half;
177 if (bmsr & BMSR_10FULL)
178 advertise |= ADVERTISED_10baseT_Full;
179 if (bmsr & BMSR_100HALF)
180 advertise |= ADVERTISED_100baseT_Half;
181 if (bmsr & BMSR_100FULL)
182 advertise |= ADVERTISED_100baseT_Full;
183
184 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
185 advertise |= SUPPORTED_1000baseT_Full;
186 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
187 advertise |= SUPPORTED_1000baseT_Half;
188
189 sungem_phy_probe(phy, phy->mii_id);
190 phy->def->ops->setup_aneg(phy, advertise);
191
192}
193
194/**
195 * spider_net_rx_irq_off - switch off rx irq on this spider card
196 * @card: device structure
197 *
198 * switches off rx irq by masking them out in the GHIINTnMSK register
199 */
200static void
201spider_net_rx_irq_off(struct spider_net_card *card)
202{
203 u32 regvalue;
204
205 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
206 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
207}
208
209/**
210 * spider_net_rx_irq_on - switch on rx irq on this spider card
211 * @card: device structure
212 *
213 * switches on rx irq by enabling them in the GHIINTnMSK register
214 */
215static void
216spider_net_rx_irq_on(struct spider_net_card *card)
217{
218 u32 regvalue;
219
220 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
221 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
222}
223
224/**
225 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
226 * @card: card structure
227 *
228 * spider_net_set_promisc sets the unicast destination address filter and
229 * thus either allows for non-promisc mode or promisc mode
230 */
231static void
232spider_net_set_promisc(struct spider_net_card *card)
233{
234 u32 macu, macl;
235 struct net_device *netdev = card->netdev;
236
237 if (netdev->flags & IFF_PROMISC) {
238 /* clear destination entry 0 */
239 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
240 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
241 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
242 SPIDER_NET_PROMISC_VALUE);
243 } else {
244 macu = netdev->dev_addr[0];
245 macu <<= 8;
246 macu |= netdev->dev_addr[1];
247 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
248
249 macu |= SPIDER_NET_UA_DESCR_VALUE;
250 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
251 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
252 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
253 SPIDER_NET_NONPROMISC_VALUE);
254 }
255}
256
257/**
258 * spider_net_get_descr_status -- returns the status of a descriptor
259 * @hwdescr: descriptor to look at
260 *
261 * returns the status as in the dmac_cmd_status field of the descriptor
262 */
263static inline int
264spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
265{
266 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
267}
268
269/**
270 * spider_net_free_chain - free descriptor chain
271 * @card: card structure
272 * @chain: address of chain
273 *
274 */
275static void
276spider_net_free_chain(struct spider_net_card *card,
277 struct spider_net_descr_chain *chain)
278{
279 struct spider_net_descr *descr;
280
281 descr = chain->ring;
282 do {
283 descr->bus_addr = 0;
284 descr->hwdescr->next_descr_addr = 0;
285 descr = descr->next;
286 } while (descr != chain->ring);
287
288 dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
289 chain->hwring, chain->dma_addr);
290}
291
292/**
293 * spider_net_init_chain - alloc and link descriptor chain
294 * @card: card structure
295 * @chain: address of chain
296 *
297 * We manage a circular list that mirrors the hardware structure,
298 * except that the hardware uses bus addresses.
299 *
300 * Returns 0 on success, <0 on failure
301 */
302static int
303spider_net_init_chain(struct spider_net_card *card,
304 struct spider_net_descr_chain *chain)
305{
306 int i;
307 struct spider_net_descr *descr;
308 struct spider_net_hw_descr *hwdescr;
309 dma_addr_t buf;
310 size_t alloc_size;
311
312 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
313
314 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
315 &chain->dma_addr, GFP_KERNEL);
316 if (!chain->hwring)
317 return -ENOMEM;
318
319 /* Set up the hardware pointers in each descriptor */
320 descr = chain->ring;
321 hwdescr = chain->hwring;
322 buf = chain->dma_addr;
323 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
324 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
325 hwdescr->next_descr_addr = 0;
326
327 descr->hwdescr = hwdescr;
328 descr->bus_addr = buf;
329 descr->next = descr + 1;
330 descr->prev = descr - 1;
331
332 buf += sizeof(struct spider_net_hw_descr);
333 }
334 /* do actual circular list */
335 (descr-1)->next = chain->ring;
336 chain->ring->prev = descr-1;
337
338 spin_lock_init(&chain->lock);
339 chain->head = chain->ring;
340 chain->tail = chain->ring;
341 return 0;
342}
343
344/**
345 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
346 * @card: card structure
347 *
348 * returns 0 on success, <0 on failure
349 */
350static void
351spider_net_free_rx_chain_contents(struct spider_net_card *card)
352{
353 struct spider_net_descr *descr;
354
355 descr = card->rx_chain.head;
356 do {
357 if (descr->skb) {
358 dma_unmap_single(&card->pdev->dev,
359 descr->hwdescr->buf_addr,
360 SPIDER_NET_MAX_FRAME,
361 DMA_BIDIRECTIONAL);
362 dev_kfree_skb(descr->skb);
363 descr->skb = NULL;
364 }
365 descr = descr->next;
366 } while (descr != card->rx_chain.head);
367}
368
369/**
370 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
371 * @card: card structure
372 * @descr: descriptor to re-init
373 *
374 * Return 0 on success, <0 on failure.
375 *
376 * Allocates a new rx skb, iommu-maps it and attaches it to the
377 * descriptor. Mark the descriptor as activated, ready-to-use.
378 */
379static int
380spider_net_prepare_rx_descr(struct spider_net_card *card,
381 struct spider_net_descr *descr)
382{
383 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
384 dma_addr_t buf;
385 int offset;
386 int bufsize;
387
388 /* we need to round up the buffer size to a multiple of 128 */
389 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
390 (~(SPIDER_NET_RXBUF_ALIGN - 1));
391
392 /* and we need to have it 128 byte aligned, therefore we allocate a
393 * bit more
394 */
395 /* allocate an skb */
396 descr->skb = netdev_alloc_skb(card->netdev,
397 bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
398 if (!descr->skb) {
399 if (netif_msg_rx_err(card) && net_ratelimit())
400 dev_err(&card->netdev->dev,
401 "Not enough memory to allocate rx buffer\n");
402 card->spider_stats.alloc_rx_skb_error++;
403 return -ENOMEM;
404 }
405 hwdescr->buf_size = bufsize;
406 hwdescr->result_size = 0;
407 hwdescr->valid_size = 0;
408 hwdescr->data_status = 0;
409 hwdescr->data_error = 0;
410
411 offset = ((unsigned long)descr->skb->data) &
412 (SPIDER_NET_RXBUF_ALIGN - 1);
413 if (offset)
414 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
415 /* iommu-map the skb */
416 buf = dma_map_single(&card->pdev->dev, descr->skb->data,
417 SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
418 if (dma_mapping_error(&card->pdev->dev, buf)) {
419 dev_kfree_skb_any(descr->skb);
420 descr->skb = NULL;
421 if (netif_msg_rx_err(card) && net_ratelimit())
422 dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
423 card->spider_stats.rx_iommu_map_error++;
424 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
425 } else {
426 hwdescr->buf_addr = buf;
427 wmb();
428 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE;
430 }
431
432 return 0;
433}
434
435/**
436 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
437 * @card: card structure
438 *
439 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
440 * chip by writing to the appropriate register. DMA is enabled in
441 * spider_net_enable_rxdmac.
442 */
443static inline void
444spider_net_enable_rxchtails(struct spider_net_card *card)
445{
446 /* assume chain is aligned correctly */
447 spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
448 card->rx_chain.tail->bus_addr);
449}
450
451/**
452 * spider_net_enable_rxdmac - enables a receive DMA controller
453 * @card: card structure
454 *
455 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
456 * in the GDADMACCNTR register
457 */
458static inline void
459spider_net_enable_rxdmac(struct spider_net_card *card)
460{
461 wmb();
462 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
463 SPIDER_NET_DMA_RX_VALUE);
464}
465
466/**
467 * spider_net_disable_rxdmac - disables the receive DMA controller
468 * @card: card structure
469 *
470 * spider_net_disable_rxdmac terminates processing on the DMA controller
471 * by turing off the DMA controller, with the force-end flag set.
472 */
473static inline void
474spider_net_disable_rxdmac(struct spider_net_card *card)
475{
476 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
477 SPIDER_NET_DMA_RX_FEND_VALUE);
478}
479
480/**
481 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
482 * @card: card structure
483 *
484 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
485 */
486static void
487spider_net_refill_rx_chain(struct spider_net_card *card)
488{
489 struct spider_net_descr_chain *chain = &card->rx_chain;
490 unsigned long flags;
491
492 /* one context doing the refill (and a second context seeing that
493 * and omitting it) is ok. If called by NAPI, we'll be called again
494 * as spider_net_decode_one_descr is called several times. If some
495 * interrupt calls us, the NAPI is about to clean up anyway.
496 */
497 if (!spin_trylock_irqsave(&chain->lock, flags))
498 return;
499
500 while (spider_net_get_descr_status(chain->head->hwdescr) ==
501 SPIDER_NET_DESCR_NOT_IN_USE) {
502 if (spider_net_prepare_rx_descr(card, chain->head))
503 break;
504 chain->head = chain->head->next;
505 }
506
507 spin_unlock_irqrestore(&chain->lock, flags);
508}
509
510/**
511 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
512 * @card: card structure
513 *
514 * Returns 0 on success, <0 on failure.
515 */
516static int
517spider_net_alloc_rx_skbs(struct spider_net_card *card)
518{
519 struct spider_net_descr_chain *chain = &card->rx_chain;
520 struct spider_net_descr *start = chain->tail;
521 struct spider_net_descr *descr = start;
522
523 /* Link up the hardware chain pointers */
524 do {
525 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
526 descr = descr->next;
527 } while (descr != start);
528
529 /* Put at least one buffer into the chain. if this fails,
530 * we've got a problem. If not, spider_net_refill_rx_chain
531 * will do the rest at the end of this function.
532 */
533 if (spider_net_prepare_rx_descr(card, chain->head))
534 goto error;
535 else
536 chain->head = chain->head->next;
537
538 /* This will allocate the rest of the rx buffers;
539 * if not, it's business as usual later on.
540 */
541 spider_net_refill_rx_chain(card);
542 spider_net_enable_rxdmac(card);
543 return 0;
544
545error:
546 spider_net_free_rx_chain_contents(card);
547 return -ENOMEM;
548}
549
550/**
551 * spider_net_get_multicast_hash - generates hash for multicast filter table
552 * @netdev: interface device structure
553 * @addr: multicast address
554 *
555 * returns the hash value.
556 *
557 * spider_net_get_multicast_hash calculates a hash value for a given multicast
558 * address, that is used to set the multicast filter tables
559 */
560static u8
561spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
562{
563 u32 crc;
564 u8 hash;
565 char addr_for_crc[ETH_ALEN] = { 0, };
566 int i, bit;
567
568 for (i = 0; i < ETH_ALEN * 8; i++) {
569 bit = (addr[i / 8] >> (i % 8)) & 1;
570 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
571 }
572
573 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
574
575 hash = (crc >> 27);
576 hash <<= 3;
577 hash |= crc & 7;
578 hash &= 0xff;
579
580 return hash;
581}
582
583/**
584 * spider_net_set_multi - sets multicast addresses and promisc flags
585 * @netdev: interface device structure
586 *
587 * spider_net_set_multi configures multicast addresses as needed for the
588 * netdev interface. It also sets up multicast, allmulti and promisc
589 * flags appropriately
590 */
591static void
592spider_net_set_multi(struct net_device *netdev)
593{
594 struct netdev_hw_addr *ha;
595 u8 hash;
596 int i;
597 u32 reg;
598 struct spider_net_card *card = netdev_priv(netdev);
599 DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
600
601 spider_net_set_promisc(card);
602
603 if (netdev->flags & IFF_ALLMULTI) {
604 bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
605 goto write_hash;
606 }
607
608 bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
609
610 /* well, we know, what the broadcast hash value is: it's xfd
611 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
612 __set_bit(0xfd, bitmask);
613
614 netdev_for_each_mc_addr(ha, netdev) {
615 hash = spider_net_get_multicast_hash(netdev, ha->addr);
616 __set_bit(hash, bitmask);
617 }
618
619write_hash:
620 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
621 reg = 0;
622 if (test_bit(i * 4, bitmask))
623 reg += 0x08;
624 reg <<= 8;
625 if (test_bit(i * 4 + 1, bitmask))
626 reg += 0x08;
627 reg <<= 8;
628 if (test_bit(i * 4 + 2, bitmask))
629 reg += 0x08;
630 reg <<= 8;
631 if (test_bit(i * 4 + 3, bitmask))
632 reg += 0x08;
633
634 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
635 }
636}
637
638/**
639 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
640 * @card: card structure
641 * @skb: packet to use
642 *
643 * returns 0 on success, <0 on failure.
644 *
645 * fills out the descriptor structure with skb data and len. Copies data,
646 * if needed (32bit DMA!)
647 */
648static int
649spider_net_prepare_tx_descr(struct spider_net_card *card,
650 struct sk_buff *skb)
651{
652 struct spider_net_descr_chain *chain = &card->tx_chain;
653 struct spider_net_descr *descr;
654 struct spider_net_hw_descr *hwdescr;
655 dma_addr_t buf;
656 unsigned long flags;
657
658 buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
659 DMA_TO_DEVICE);
660 if (dma_mapping_error(&card->pdev->dev, buf)) {
661 if (netif_msg_tx_err(card) && net_ratelimit())
662 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
663 "Dropping packet\n", skb->data, skb->len);
664 card->spider_stats.tx_iommu_map_error++;
665 return -ENOMEM;
666 }
667
668 spin_lock_irqsave(&chain->lock, flags);
669 descr = card->tx_chain.head;
670 if (descr->next == chain->tail->prev) {
671 spin_unlock_irqrestore(&chain->lock, flags);
672 dma_unmap_single(&card->pdev->dev, buf, skb->len,
673 DMA_TO_DEVICE);
674 return -ENOMEM;
675 }
676 hwdescr = descr->hwdescr;
677 chain->head = descr->next;
678
679 descr->skb = skb;
680 hwdescr->buf_addr = buf;
681 hwdescr->buf_size = skb->len;
682 hwdescr->next_descr_addr = 0;
683 hwdescr->data_status = 0;
684
685 hwdescr->dmac_cmd_status =
686 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
687 spin_unlock_irqrestore(&chain->lock, flags);
688
689 if (skb->ip_summed == CHECKSUM_PARTIAL)
690 switch (ip_hdr(skb)->protocol) {
691 case IPPROTO_TCP:
692 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
693 break;
694 case IPPROTO_UDP:
695 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
696 break;
697 }
698
699 /* Chain the bus address, so that the DMA engine finds this descr. */
700 wmb();
701 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
702
703 netif_trans_update(card->netdev); /* set netdev watchdog timer */
704 return 0;
705}
706
707static int
708spider_net_set_low_watermark(struct spider_net_card *card)
709{
710 struct spider_net_descr *descr = card->tx_chain.tail;
711 struct spider_net_hw_descr *hwdescr;
712 unsigned long flags;
713 int status;
714 int cnt=0;
715 int i;
716
717 /* Measure the length of the queue. Measurement does not
718 * need to be precise -- does not need a lock.
719 */
720 while (descr != card->tx_chain.head) {
721 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
722 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
723 break;
724 descr = descr->next;
725 cnt++;
726 }
727
728 /* If TX queue is short, don't even bother with interrupts */
729 if (cnt < card->tx_chain.num_desc/4)
730 return cnt;
731
732 /* Set low-watermark 3/4th's of the way into the queue. */
733 descr = card->tx_chain.tail;
734 cnt = (cnt*3)/4;
735 for (i=0;i<cnt; i++)
736 descr = descr->next;
737
738 /* Set the new watermark, clear the old watermark */
739 spin_lock_irqsave(&card->tx_chain.lock, flags);
740 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
741 if (card->low_watermark && card->low_watermark != descr) {
742 hwdescr = card->low_watermark->hwdescr;
743 hwdescr->dmac_cmd_status =
744 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
745 }
746 card->low_watermark = descr;
747 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
748 return cnt;
749}
750
751/**
752 * spider_net_release_tx_chain - processes sent tx descriptors
753 * @card: adapter structure
754 * @brutal: if set, don't care about whether descriptor seems to be in use
755 *
756 * returns 0 if the tx ring is empty, otherwise 1.
757 *
758 * spider_net_release_tx_chain releases the tx descriptors that spider has
759 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
760 * If some other context is calling this function, we return 1 so that we're
761 * scheduled again (if we were scheduled) and will not lose initiative.
762 */
763static int
764spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
765{
766 struct net_device *dev = card->netdev;
767 struct spider_net_descr_chain *chain = &card->tx_chain;
768 struct spider_net_descr *descr;
769 struct spider_net_hw_descr *hwdescr;
770 struct sk_buff *skb;
771 u32 buf_addr;
772 unsigned long flags;
773 int status;
774
775 while (1) {
776 spin_lock_irqsave(&chain->lock, flags);
777 if (chain->tail == chain->head) {
778 spin_unlock_irqrestore(&chain->lock, flags);
779 return 0;
780 }
781 descr = chain->tail;
782 hwdescr = descr->hwdescr;
783
784 status = spider_net_get_descr_status(hwdescr);
785 switch (status) {
786 case SPIDER_NET_DESCR_COMPLETE:
787 dev->stats.tx_packets++;
788 dev->stats.tx_bytes += descr->skb->len;
789 break;
790
791 case SPIDER_NET_DESCR_CARDOWNED:
792 if (!brutal) {
793 spin_unlock_irqrestore(&chain->lock, flags);
794 return 1;
795 }
796
797 /* fallthrough, if we release the descriptors
798 * brutally (then we don't care about
799 * SPIDER_NET_DESCR_CARDOWNED)
800 */
801 fallthrough;
802
803 case SPIDER_NET_DESCR_RESPONSE_ERROR:
804 case SPIDER_NET_DESCR_PROTECTION_ERROR:
805 case SPIDER_NET_DESCR_FORCE_END:
806 if (netif_msg_tx_err(card))
807 dev_err(&card->netdev->dev, "forcing end of tx descriptor "
808 "with status x%02x\n", status);
809 dev->stats.tx_errors++;
810 break;
811
812 default:
813 dev->stats.tx_dropped++;
814 if (!brutal) {
815 spin_unlock_irqrestore(&chain->lock, flags);
816 return 1;
817 }
818 }
819
820 chain->tail = descr->next;
821 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
822 skb = descr->skb;
823 descr->skb = NULL;
824 buf_addr = hwdescr->buf_addr;
825 spin_unlock_irqrestore(&chain->lock, flags);
826
827 /* unmap the skb */
828 if (skb) {
829 dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
830 DMA_TO_DEVICE);
831 dev_consume_skb_any(skb);
832 }
833 }
834 return 0;
835}
836
837/**
838 * spider_net_kick_tx_dma - enables TX DMA processing
839 * @card: card structure
840 *
841 * This routine will start the transmit DMA running if
842 * it is not already running. This routine ned only be
843 * called when queueing a new packet to an empty tx queue.
844 * Writes the current tx chain head as start address
845 * of the tx descriptor chain and enables the transmission
846 * DMA engine.
847 */
848static inline void
849spider_net_kick_tx_dma(struct spider_net_card *card)
850{
851 struct spider_net_descr *descr;
852
853 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
854 SPIDER_NET_TX_DMA_EN)
855 goto out;
856
857 descr = card->tx_chain.tail;
858 for (;;) {
859 if (spider_net_get_descr_status(descr->hwdescr) ==
860 SPIDER_NET_DESCR_CARDOWNED) {
861 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
862 descr->bus_addr);
863 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
864 SPIDER_NET_DMA_TX_VALUE);
865 break;
866 }
867 if (descr == card->tx_chain.head)
868 break;
869 descr = descr->next;
870 }
871
872out:
873 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
874}
875
876/**
877 * spider_net_xmit - transmits a frame over the device
878 * @skb: packet to send out
879 * @netdev: interface device structure
880 *
881 * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
882 */
883static netdev_tx_t
884spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
885{
886 int cnt;
887 struct spider_net_card *card = netdev_priv(netdev);
888
889 spider_net_release_tx_chain(card, 0);
890
891 if (spider_net_prepare_tx_descr(card, skb) != 0) {
892 netdev->stats.tx_dropped++;
893 netif_stop_queue(netdev);
894 return NETDEV_TX_BUSY;
895 }
896
897 cnt = spider_net_set_low_watermark(card);
898 if (cnt < 5)
899 spider_net_kick_tx_dma(card);
900 return NETDEV_TX_OK;
901}
902
903/**
904 * spider_net_cleanup_tx_ring - cleans up the TX ring
905 * @t: timer context used to obtain the pointer to net card data structure
906 *
907 * spider_net_cleanup_tx_ring is called by either the tx_timer
908 * or from the NAPI polling routine.
909 * This routine releases resources associted with transmitted
910 * packets, including updating the queue tail pointer.
911 */
912static void
913spider_net_cleanup_tx_ring(struct timer_list *t)
914{
915 struct spider_net_card *card = from_timer(card, t, tx_timer);
916 if ((spider_net_release_tx_chain(card, 0) != 0) &&
917 (card->netdev->flags & IFF_UP)) {
918 spider_net_kick_tx_dma(card);
919 netif_wake_queue(card->netdev);
920 }
921}
922
923/**
924 * spider_net_do_ioctl - called for device ioctls
925 * @netdev: interface device structure
926 * @ifr: request parameter structure for ioctl
927 * @cmd: command code for ioctl
928 *
929 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
930 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
931 */
932static int
933spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
934{
935 switch (cmd) {
936 default:
937 return -EOPNOTSUPP;
938 }
939}
940
941/**
942 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
943 * @descr: descriptor to process
944 * @card: card structure
945 *
946 * Fills out skb structure and passes the data to the stack.
947 * The descriptor state is not changed.
948 */
949static void
950spider_net_pass_skb_up(struct spider_net_descr *descr,
951 struct spider_net_card *card)
952{
953 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
954 struct sk_buff *skb = descr->skb;
955 struct net_device *netdev = card->netdev;
956 u32 data_status = hwdescr->data_status;
957 u32 data_error = hwdescr->data_error;
958
959 skb_put(skb, hwdescr->valid_size);
960
961 /* the card seems to add 2 bytes of junk in front
962 * of the ethernet frame
963 */
964#define SPIDER_MISALIGN 2
965 skb_pull(skb, SPIDER_MISALIGN);
966 skb->protocol = eth_type_trans(skb, netdev);
967
968 /* checksum offload */
969 skb_checksum_none_assert(skb);
970 if (netdev->features & NETIF_F_RXCSUM) {
971 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
972 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
973 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
974 skb->ip_summed = CHECKSUM_UNNECESSARY;
975 }
976
977 if (data_status & SPIDER_NET_VLAN_PACKET) {
978 /* further enhancements: HW-accel VLAN */
979 }
980
981 /* update netdevice statistics */
982 netdev->stats.rx_packets++;
983 netdev->stats.rx_bytes += skb->len;
984
985 /* pass skb up to stack */
986 netif_receive_skb(skb);
987}
988
989static void show_rx_chain(struct spider_net_card *card)
990{
991 struct spider_net_descr_chain *chain = &card->rx_chain;
992 struct spider_net_descr *start= chain->tail;
993 struct spider_net_descr *descr= start;
994 struct spider_net_hw_descr *hwd = start->hwdescr;
995 struct device *dev = &card->netdev->dev;
996 u32 curr_desc, next_desc;
997 int status;
998
999 int tot = 0;
1000 int cnt = 0;
1001 int off = start - chain->ring;
1002 int cstat = hwd->dmac_cmd_status;
1003
1004 dev_info(dev, "Total number of descrs=%d\n",
1005 chain->num_desc);
1006 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
1007 off, cstat);
1008
1009 curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
1010 next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
1011
1012 status = cstat;
1013 do
1014 {
1015 hwd = descr->hwdescr;
1016 off = descr - chain->ring;
1017 status = hwd->dmac_cmd_status;
1018
1019 if (descr == chain->head)
1020 dev_info(dev, "Chain head is at %d, head status=0x%x\n",
1021 off, status);
1022
1023 if (curr_desc == descr->bus_addr)
1024 dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
1025 off, status);
1026
1027 if (next_desc == descr->bus_addr)
1028 dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
1029 off, status);
1030
1031 if (hwd->next_descr_addr == 0)
1032 dev_info(dev, "chain is cut at %d\n", off);
1033
1034 if (cstat != status) {
1035 int from = (chain->num_desc + off - cnt) % chain->num_desc;
1036 int to = (chain->num_desc + off - 1) % chain->num_desc;
1037 dev_info(dev, "Have %d (from %d to %d) descrs "
1038 "with stat=0x%08x\n", cnt, from, to, cstat);
1039 cstat = status;
1040 cnt = 0;
1041 }
1042
1043 cnt ++;
1044 tot ++;
1045 descr = descr->next;
1046 } while (descr != start);
1047
1048 dev_info(dev, "Last %d descrs with stat=0x%08x "
1049 "for a total of %d descrs\n", cnt, cstat, tot);
1050
1051#ifdef DEBUG
1052 /* Now dump the whole ring */
1053 descr = start;
1054 do
1055 {
1056 struct spider_net_hw_descr *hwd = descr->hwdescr;
1057 status = spider_net_get_descr_status(hwd);
1058 cnt = descr - chain->ring;
1059 dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
1060 cnt, status, descr->skb);
1061 dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
1062 descr->bus_addr, hwd->buf_addr, hwd->buf_size);
1063 dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
1064 hwd->next_descr_addr, hwd->result_size,
1065 hwd->valid_size);
1066 dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
1067 hwd->dmac_cmd_status, hwd->data_status,
1068 hwd->data_error);
1069 dev_info(dev, "\n");
1070
1071 descr = descr->next;
1072 } while (descr != start);
1073#endif
1074
1075}
1076
1077/**
1078 * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1079 * @card: card structure
1080 *
1081 * If the driver fails to keep up and empty the queue, then the
1082 * hardware wil run out of room to put incoming packets. This
1083 * will cause the hardware to skip descrs that are full (instead
1084 * of halting/retrying). Thus, once the driver runs, it wil need
1085 * to "catch up" to where the hardware chain pointer is at.
1086 */
1087static void spider_net_resync_head_ptr(struct spider_net_card *card)
1088{
1089 unsigned long flags;
1090 struct spider_net_descr_chain *chain = &card->rx_chain;
1091 struct spider_net_descr *descr;
1092 int i, status;
1093
1094 /* Advance head pointer past any empty descrs */
1095 descr = chain->head;
1096 status = spider_net_get_descr_status(descr->hwdescr);
1097
1098 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
1099 return;
1100
1101 spin_lock_irqsave(&chain->lock, flags);
1102
1103 descr = chain->head;
1104 status = spider_net_get_descr_status(descr->hwdescr);
1105 for (i=0; i<chain->num_desc; i++) {
1106 if (status != SPIDER_NET_DESCR_CARDOWNED) break;
1107 descr = descr->next;
1108 status = spider_net_get_descr_status(descr->hwdescr);
1109 }
1110 chain->head = descr;
1111
1112 spin_unlock_irqrestore(&chain->lock, flags);
1113}
1114
1115static int spider_net_resync_tail_ptr(struct spider_net_card *card)
1116{
1117 struct spider_net_descr_chain *chain = &card->rx_chain;
1118 struct spider_net_descr *descr;
1119 int i, status;
1120
1121 /* Advance tail pointer past any empty and reaped descrs */
1122 descr = chain->tail;
1123 status = spider_net_get_descr_status(descr->hwdescr);
1124
1125 for (i=0; i<chain->num_desc; i++) {
1126 if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
1127 (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
1128 descr = descr->next;
1129 status = spider_net_get_descr_status(descr->hwdescr);
1130 }
1131 chain->tail = descr;
1132
1133 if ((i == chain->num_desc) || (i == 0))
1134 return 1;
1135 return 0;
1136}
1137
1138/**
1139 * spider_net_decode_one_descr - processes an RX descriptor
1140 * @card: card structure
1141 *
1142 * Returns 1 if a packet has been sent to the stack, otherwise 0.
1143 *
1144 * Processes an RX descriptor by iommu-unmapping the data buffer
1145 * and passing the packet up to the stack. This function is called
1146 * in softirq context, e.g. either bottom half from interrupt or
1147 * NAPI polling context.
1148 */
1149static int
1150spider_net_decode_one_descr(struct spider_net_card *card)
1151{
1152 struct net_device *dev = card->netdev;
1153 struct spider_net_descr_chain *chain = &card->rx_chain;
1154 struct spider_net_descr *descr = chain->tail;
1155 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1156 u32 hw_buf_addr;
1157 int status;
1158
1159 status = spider_net_get_descr_status(hwdescr);
1160
1161 /* Nothing in the descriptor, or ring must be empty */
1162 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1163 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1164 return 0;
1165
1166 /* descriptor definitively used -- move on tail */
1167 chain->tail = descr->next;
1168
1169 /* unmap descriptor */
1170 hw_buf_addr = hwdescr->buf_addr;
1171 hwdescr->buf_addr = 0xffffffff;
1172 dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
1173 DMA_FROM_DEVICE);
1174
1175 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1176 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1177 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1178 if (netif_msg_rx_err(card))
1179 dev_err(&dev->dev,
1180 "dropping RX descriptor with state %d\n", status);
1181 dev->stats.rx_dropped++;
1182 goto bad_desc;
1183 }
1184
1185 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1186 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1187 if (netif_msg_rx_err(card))
1188 dev_err(&card->netdev->dev,
1189 "RX descriptor with unknown state %d\n", status);
1190 card->spider_stats.rx_desc_unk_state++;
1191 goto bad_desc;
1192 }
1193
1194 /* The cases we'll throw away the packet immediately */
1195 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1196 if (netif_msg_rx_err(card))
1197 dev_err(&card->netdev->dev,
1198 "error in received descriptor found, "
1199 "data_status=x%08x, data_error=x%08x\n",
1200 hwdescr->data_status, hwdescr->data_error);
1201 goto bad_desc;
1202 }
1203
1204 if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
1205 dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
1206 hwdescr->dmac_cmd_status);
1207 pr_err("buf_addr=x%08x\n", hw_buf_addr);
1208 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1209 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1210 pr_err("result_size=x%08x\n", hwdescr->result_size);
1211 pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1212 pr_err("data_status=x%08x\n", hwdescr->data_status);
1213 pr_err("data_error=x%08x\n", hwdescr->data_error);
1214 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1215
1216 card->spider_stats.rx_desc_error++;
1217 goto bad_desc;
1218 }
1219
1220 /* Ok, we've got a packet in descr */
1221 spider_net_pass_skb_up(descr, card);
1222 descr->skb = NULL;
1223 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1224 return 1;
1225
1226bad_desc:
1227 if (netif_msg_rx_err(card))
1228 show_rx_chain(card);
1229 dev_kfree_skb_irq(descr->skb);
1230 descr->skb = NULL;
1231 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1232 return 0;
1233}
1234
1235/**
1236 * spider_net_poll - NAPI poll function called by the stack to return packets
1237 * @napi: napi device structure
1238 * @budget: number of packets we can pass to the stack at most
1239 *
1240 * returns 0 if no more packets available to the driver/stack. Returns 1,
1241 * if the quota is exceeded, but the driver has still packets.
1242 *
1243 * spider_net_poll returns all packets from the rx descriptors to the stack
1244 * (using netif_receive_skb). If all/enough packets are up, the driver
1245 * reenables interrupts and returns 0. If not, 1 is returned.
1246 */
1247static int spider_net_poll(struct napi_struct *napi, int budget)
1248{
1249 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
1250 int packets_done = 0;
1251
1252 while (packets_done < budget) {
1253 if (!spider_net_decode_one_descr(card))
1254 break;
1255
1256 packets_done++;
1257 }
1258
1259 if ((packets_done == 0) && (card->num_rx_ints != 0)) {
1260 if (!spider_net_resync_tail_ptr(card))
1261 packets_done = budget;
1262 spider_net_resync_head_ptr(card);
1263 }
1264 card->num_rx_ints = 0;
1265
1266 spider_net_refill_rx_chain(card);
1267 spider_net_enable_rxdmac(card);
1268
1269 spider_net_cleanup_tx_ring(&card->tx_timer);
1270
1271 /* if all packets are in the stack, enable interrupts and return 0 */
1272 /* if not, return 1 */
1273 if (packets_done < budget) {
1274 napi_complete_done(napi, packets_done);
1275 spider_net_rx_irq_on(card);
1276 card->ignore_rx_ramfull = 0;
1277 }
1278
1279 return packets_done;
1280}
1281
1282/**
1283 * spider_net_set_mac - sets the MAC of an interface
1284 * @netdev: interface device structure
1285 * @p: pointer to new MAC address
1286 *
1287 * Returns 0 on success, <0 on failure. Currently, we don't support this
1288 * and will always return EOPNOTSUPP.
1289 */
1290static int
1291spider_net_set_mac(struct net_device *netdev, void *p)
1292{
1293 struct spider_net_card *card = netdev_priv(netdev);
1294 u32 macl, macu, regvalue;
1295 struct sockaddr *addr = p;
1296
1297 if (!is_valid_ether_addr(addr->sa_data))
1298 return -EADDRNOTAVAIL;
1299
1300 eth_hw_addr_set(netdev, addr->sa_data);
1301
1302 /* switch off GMACTPE and GMACRPE */
1303 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1304 regvalue &= ~((1 << 5) | (1 << 6));
1305 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1306
1307 /* write mac */
1308 macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
1309 (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
1310 macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
1311 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1312 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1313
1314 /* switch GMACTPE and GMACRPE back on */
1315 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1316 regvalue |= ((1 << 5) | (1 << 6));
1317 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1318
1319 spider_net_set_promisc(card);
1320
1321 return 0;
1322}
1323
1324/**
1325 * spider_net_link_reset
1326 * @netdev: net device structure
1327 *
1328 * This is called when the PHY_LINK signal is asserted. For the blade this is
1329 * not connected so we should never get here.
1330 *
1331 */
1332static void
1333spider_net_link_reset(struct net_device *netdev)
1334{
1335
1336 struct spider_net_card *card = netdev_priv(netdev);
1337
1338 del_timer_sync(&card->aneg_timer);
1339
1340 /* clear interrupt, block further interrupts */
1341 spider_net_write_reg(card, SPIDER_NET_GMACST,
1342 spider_net_read_reg(card, SPIDER_NET_GMACST));
1343 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1344
1345 /* reset phy and setup aneg */
1346 card->aneg_count = 0;
1347 card->medium = BCM54XX_COPPER;
1348 spider_net_setup_aneg(card);
1349 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1350
1351}
1352
1353/**
1354 * spider_net_handle_error_irq - handles errors raised by an interrupt
1355 * @card: card structure
1356 * @status_reg: interrupt status register 0 (GHIINT0STS)
1357 * @error_reg1: interrupt status register 1 (GHIINT1STS)
1358 * @error_reg2: interrupt status register 2 (GHIINT2STS)
1359 *
1360 * spider_net_handle_error_irq treats or ignores all error conditions
1361 * found when an interrupt is presented
1362 */
1363static void
1364spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1365 u32 error_reg1, u32 error_reg2)
1366{
1367 u32 i;
1368 int show_error = 1;
1369
1370 /* check GHIINT0STS ************************************/
1371 if (status_reg)
1372 for (i = 0; i < 32; i++)
1373 if (status_reg & (1<<i))
1374 switch (i)
1375 {
1376 /* let error_reg1 and error_reg2 evaluation decide, what to do
1377 case SPIDER_NET_PHYINT:
1378 case SPIDER_NET_GMAC2INT:
1379 case SPIDER_NET_GMAC1INT:
1380 case SPIDER_NET_GFIFOINT:
1381 case SPIDER_NET_DMACINT:
1382 case SPIDER_NET_GSYSINT:
1383 break; */
1384
1385 case SPIDER_NET_GIPSINT:
1386 show_error = 0;
1387 break;
1388
1389 case SPIDER_NET_GPWOPCMPINT:
1390 /* PHY write operation completed */
1391 show_error = 0;
1392 break;
1393 case SPIDER_NET_GPROPCMPINT:
1394 /* PHY read operation completed */
1395 /* we don't use semaphores, as we poll for the completion
1396 * of the read operation in spider_net_read_phy. Should take
1397 * about 50 us
1398 */
1399 show_error = 0;
1400 break;
1401 case SPIDER_NET_GPWFFINT:
1402 /* PHY command queue full */
1403 if (netif_msg_intr(card))
1404 dev_err(&card->netdev->dev, "PHY write queue full\n");
1405 show_error = 0;
1406 break;
1407
1408 /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1409 /* case SPIDER_NET_GRMARPINT: not used. print a message */
1410 /* case SPIDER_NET_GRMMPINT: not used. print a message */
1411
1412 case SPIDER_NET_GDTDEN0INT:
1413 /* someone has set TX_DMA_EN to 0 */
1414 show_error = 0;
1415 break;
1416
1417 case SPIDER_NET_GDDDEN0INT:
1418 case SPIDER_NET_GDCDEN0INT:
1419 case SPIDER_NET_GDBDEN0INT:
1420 case SPIDER_NET_GDADEN0INT:
1421 /* someone has set RX_DMA_EN to 0 */
1422 show_error = 0;
1423 break;
1424
1425 /* RX interrupts */
1426 case SPIDER_NET_GDDFDCINT:
1427 case SPIDER_NET_GDCFDCINT:
1428 case SPIDER_NET_GDBFDCINT:
1429 case SPIDER_NET_GDAFDCINT:
1430 /* case SPIDER_NET_GDNMINT: not used. print a message */
1431 /* case SPIDER_NET_GCNMINT: not used. print a message */
1432 /* case SPIDER_NET_GBNMINT: not used. print a message */
1433 /* case SPIDER_NET_GANMINT: not used. print a message */
1434 /* case SPIDER_NET_GRFNMINT: not used. print a message */
1435 show_error = 0;
1436 break;
1437
1438 /* TX interrupts */
1439 case SPIDER_NET_GDTFDCINT:
1440 show_error = 0;
1441 break;
1442 case SPIDER_NET_GTTEDINT:
1443 show_error = 0;
1444 break;
1445 case SPIDER_NET_GDTDCEINT:
1446 /* chain end. If a descriptor should be sent, kick off
1447 * tx dma
1448 if (card->tx_chain.tail != card->tx_chain.head)
1449 spider_net_kick_tx_dma(card);
1450 */
1451 show_error = 0;
1452 break;
1453
1454 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1455 /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1456 }
1457
1458 /* check GHIINT1STS ************************************/
1459 if (error_reg1)
1460 for (i = 0; i < 32; i++)
1461 if (error_reg1 & (1<<i))
1462 switch (i)
1463 {
1464 case SPIDER_NET_GTMFLLINT:
1465 /* TX RAM full may happen on a usual case.
1466 * Logging is not needed.
1467 */
1468 show_error = 0;
1469 break;
1470 case SPIDER_NET_GRFDFLLINT:
1471 case SPIDER_NET_GRFCFLLINT:
1472 case SPIDER_NET_GRFBFLLINT:
1473 case SPIDER_NET_GRFAFLLINT:
1474 case SPIDER_NET_GRMFLLINT:
1475 /* Could happen when rx chain is full */
1476 if (card->ignore_rx_ramfull == 0) {
1477 card->ignore_rx_ramfull = 1;
1478 spider_net_resync_head_ptr(card);
1479 spider_net_refill_rx_chain(card);
1480 spider_net_enable_rxdmac(card);
1481 card->num_rx_ints ++;
1482 napi_schedule(&card->napi);
1483 }
1484 show_error = 0;
1485 break;
1486
1487 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1488 case SPIDER_NET_GDTINVDINT:
1489 /* allrighty. tx from previous descr ok */
1490 show_error = 0;
1491 break;
1492
1493 /* chain end */
1494 case SPIDER_NET_GDDDCEINT:
1495 case SPIDER_NET_GDCDCEINT:
1496 case SPIDER_NET_GDBDCEINT:
1497 case SPIDER_NET_GDADCEINT:
1498 spider_net_resync_head_ptr(card);
1499 spider_net_refill_rx_chain(card);
1500 spider_net_enable_rxdmac(card);
1501 card->num_rx_ints ++;
1502 napi_schedule(&card->napi);
1503 show_error = 0;
1504 break;
1505
1506 /* invalid descriptor */
1507 case SPIDER_NET_GDDINVDINT:
1508 case SPIDER_NET_GDCINVDINT:
1509 case SPIDER_NET_GDBINVDINT:
1510 case SPIDER_NET_GDAINVDINT:
1511 /* Could happen when rx chain is full */
1512 spider_net_resync_head_ptr(card);
1513 spider_net_refill_rx_chain(card);
1514 spider_net_enable_rxdmac(card);
1515 card->num_rx_ints ++;
1516 napi_schedule(&card->napi);
1517 show_error = 0;
1518 break;
1519
1520 /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1521 /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1522 /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1523 /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1524 /* case SPIDER_NET_GDARSERINT: problem, print a message */
1525 /* case SPIDER_NET_GDSERINT: problem, print a message */
1526 /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1527 /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1528 /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1529 /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1530 /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1531 default:
1532 show_error = 1;
1533 break;
1534 }
1535
1536 /* check GHIINT2STS ************************************/
1537 if (error_reg2)
1538 for (i = 0; i < 32; i++)
1539 if (error_reg2 & (1<<i))
1540 switch (i)
1541 {
1542 /* there is nothing we can (want to) do at this time. Log a
1543 * message, we can switch on and off the specific values later on
1544 case SPIDER_NET_GPROPERINT:
1545 case SPIDER_NET_GMCTCRSNGINT:
1546 case SPIDER_NET_GMCTLCOLINT:
1547 case SPIDER_NET_GMCTTMOTINT:
1548 case SPIDER_NET_GMCRCAERINT:
1549 case SPIDER_NET_GMCRCALERINT:
1550 case SPIDER_NET_GMCRALNERINT:
1551 case SPIDER_NET_GMCROVRINT:
1552 case SPIDER_NET_GMCRRNTINT:
1553 case SPIDER_NET_GMCRRXERINT:
1554 case SPIDER_NET_GTITCSERINT:
1555 case SPIDER_NET_GTIFMTERINT:
1556 case SPIDER_NET_GTIPKTRVKINT:
1557 case SPIDER_NET_GTISPINGINT:
1558 case SPIDER_NET_GTISADNGINT:
1559 case SPIDER_NET_GTISPDNGINT:
1560 case SPIDER_NET_GRIFMTERINT:
1561 case SPIDER_NET_GRIPKTRVKINT:
1562 case SPIDER_NET_GRISPINGINT:
1563 case SPIDER_NET_GRISADNGINT:
1564 case SPIDER_NET_GRISPDNGINT:
1565 break;
1566 */
1567 default:
1568 break;
1569 }
1570
1571 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1572 dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
1573 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1574 status_reg, error_reg1, error_reg2);
1575
1576 /* clear interrupt sources */
1577 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1578 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1579}
1580
1581/**
1582 * spider_net_interrupt - interrupt handler for spider_net
1583 * @irq: interrupt number
1584 * @ptr: pointer to net_device
1585 *
1586 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1587 * interrupt found raised by card.
1588 *
1589 * This is the interrupt handler, that turns off
1590 * interrupts for this device and makes the stack poll the driver
1591 */
1592static irqreturn_t
1593spider_net_interrupt(int irq, void *ptr)
1594{
1595 struct net_device *netdev = ptr;
1596 struct spider_net_card *card = netdev_priv(netdev);
1597 u32 status_reg, error_reg1, error_reg2;
1598
1599 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1600 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1601 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1602
1603 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
1604 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
1605 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
1606 return IRQ_NONE;
1607
1608 if (status_reg & SPIDER_NET_RXINT ) {
1609 spider_net_rx_irq_off(card);
1610 napi_schedule(&card->napi);
1611 card->num_rx_ints ++;
1612 }
1613 if (status_reg & SPIDER_NET_TXINT)
1614 napi_schedule(&card->napi);
1615
1616 if (status_reg & SPIDER_NET_LINKINT)
1617 spider_net_link_reset(netdev);
1618
1619 if (status_reg & SPIDER_NET_ERRINT )
1620 spider_net_handle_error_irq(card, status_reg,
1621 error_reg1, error_reg2);
1622
1623 /* clear interrupt sources */
1624 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1625
1626 return IRQ_HANDLED;
1627}
1628
1629#ifdef CONFIG_NET_POLL_CONTROLLER
1630/**
1631 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1632 * @netdev: interface device structure
1633 *
1634 * see Documentation/networking/netconsole.rst
1635 */
1636static void
1637spider_net_poll_controller(struct net_device *netdev)
1638{
1639 disable_irq(netdev->irq);
1640 spider_net_interrupt(netdev->irq, netdev);
1641 enable_irq(netdev->irq);
1642}
1643#endif /* CONFIG_NET_POLL_CONTROLLER */
1644
1645/**
1646 * spider_net_enable_interrupts - enable interrupts
1647 * @card: card structure
1648 *
1649 * spider_net_enable_interrupt enables several interrupts
1650 */
1651static void
1652spider_net_enable_interrupts(struct spider_net_card *card)
1653{
1654 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1655 SPIDER_NET_INT0_MASK_VALUE);
1656 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1657 SPIDER_NET_INT1_MASK_VALUE);
1658 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1659 SPIDER_NET_INT2_MASK_VALUE);
1660}
1661
1662/**
1663 * spider_net_disable_interrupts - disable interrupts
1664 * @card: card structure
1665 *
1666 * spider_net_disable_interrupts disables all the interrupts
1667 */
1668static void
1669spider_net_disable_interrupts(struct spider_net_card *card)
1670{
1671 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1672 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1673 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1674 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1675}
1676
1677/**
1678 * spider_net_init_card - initializes the card
1679 * @card: card structure
1680 *
1681 * spider_net_init_card initializes the card so that other registers can
1682 * be used
1683 */
1684static void
1685spider_net_init_card(struct spider_net_card *card)
1686{
1687 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1688 SPIDER_NET_CKRCTRL_STOP_VALUE);
1689
1690 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1691 SPIDER_NET_CKRCTRL_RUN_VALUE);
1692
1693 /* trigger ETOMOD signal */
1694 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1695 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1696
1697 spider_net_disable_interrupts(card);
1698}
1699
1700/**
1701 * spider_net_enable_card - enables the card by setting all kinds of regs
1702 * @card: card structure
1703 *
1704 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1705 */
1706static void
1707spider_net_enable_card(struct spider_net_card *card)
1708{
1709 int i;
1710 /* the following array consists of (register),(value) pairs
1711 * that are set in this function. A register of 0 ends the list
1712 */
1713 u32 regs[][2] = {
1714 { SPIDER_NET_GRESUMINTNUM, 0 },
1715 { SPIDER_NET_GREINTNUM, 0 },
1716
1717 /* set interrupt frame number registers */
1718 /* clear the single DMA engine registers first */
1719 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1720 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1721 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1722 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1723 /* then set, what we really need */
1724 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1725
1726 /* timer counter registers and stuff */
1727 { SPIDER_NET_GFREECNNUM, 0 },
1728 { SPIDER_NET_GONETIMENUM, 0 },
1729 { SPIDER_NET_GTOUTFRMNUM, 0 },
1730
1731 /* RX mode setting */
1732 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1733 /* TX mode setting */
1734 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1735 /* IPSEC mode setting */
1736 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1737
1738 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1739
1740 { SPIDER_NET_GMRWOLCTRL, 0 },
1741 { SPIDER_NET_GTESTMD, 0x10000000 },
1742 { SPIDER_NET_GTTQMSK, 0x00400040 },
1743
1744 { SPIDER_NET_GMACINTEN, 0 },
1745
1746 /* flow control stuff */
1747 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1748 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1749
1750 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1751 { 0, 0}
1752 };
1753
1754 i = 0;
1755 while (regs[i][0]) {
1756 spider_net_write_reg(card, regs[i][0], regs[i][1]);
1757 i++;
1758 }
1759
1760 /* clear unicast filter table entries 1 to 14 */
1761 for (i = 1; i <= 14; i++) {
1762 spider_net_write_reg(card,
1763 SPIDER_NET_GMRUAFILnR + i * 8,
1764 0x00080000);
1765 spider_net_write_reg(card,
1766 SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1767 0x00000000);
1768 }
1769
1770 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1771
1772 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1773
1774 /* set chain tail address for RX chains and
1775 * enable DMA
1776 */
1777 spider_net_enable_rxchtails(card);
1778 spider_net_enable_rxdmac(card);
1779
1780 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1781
1782 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1783 SPIDER_NET_LENLMT_VALUE);
1784 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1785 SPIDER_NET_OPMODE_VALUE);
1786
1787 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1788 SPIDER_NET_GDTBSTA);
1789}
1790
1791/**
1792 * spider_net_download_firmware - loads firmware into the adapter
1793 * @card: card structure
1794 * @firmware_ptr: pointer to firmware data
1795 *
1796 * spider_net_download_firmware loads the firmware data into the
1797 * adapter. It assumes the length etc. to be allright.
1798 */
1799static int
1800spider_net_download_firmware(struct spider_net_card *card,
1801 const void *firmware_ptr)
1802{
1803 int sequencer, i;
1804 const u32 *fw_ptr = firmware_ptr;
1805
1806 /* stop sequencers */
1807 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1808 SPIDER_NET_STOP_SEQ_VALUE);
1809
1810 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1811 sequencer++) {
1812 spider_net_write_reg(card,
1813 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1814 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1815 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1816 sequencer * 8, *fw_ptr);
1817 fw_ptr++;
1818 }
1819 }
1820
1821 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1822 return -EIO;
1823
1824 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1825 SPIDER_NET_RUN_SEQ_VALUE);
1826
1827 return 0;
1828}
1829
1830/**
1831 * spider_net_init_firmware - reads in firmware parts
1832 * @card: card structure
1833 *
1834 * Returns 0 on success, <0 on failure
1835 *
1836 * spider_net_init_firmware opens the sequencer firmware and does some basic
1837 * checks. This function opens and releases the firmware structure. A call
1838 * to download the firmware is performed before the release.
1839 *
1840 * Firmware format
1841 * ===============
1842 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1843 * the program for each sequencer. Use the command
1844 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1845 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1846 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1847 *
1848 * to generate spider_fw.bin, if you have sequencer programs with something
1849 * like the following contents for each sequencer:
1850 * <ONE LINE COMMENT>
1851 * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1852 * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1853 * ...
1854 * <1024th 4-BYTES-WORD FOR SEQUENCER>
1855 */
1856static int
1857spider_net_init_firmware(struct spider_net_card *card)
1858{
1859 struct firmware *firmware = NULL;
1860 struct device_node *dn;
1861 const u8 *fw_prop = NULL;
1862 int err = -ENOENT;
1863 int fw_size;
1864
1865 if (request_firmware((const struct firmware **)&firmware,
1866 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1867 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1868 netif_msg_probe(card) ) {
1869 dev_err(&card->netdev->dev,
1870 "Incorrect size of spidernet firmware in " \
1871 "filesystem. Looking in host firmware...\n");
1872 goto try_host_fw;
1873 }
1874 err = spider_net_download_firmware(card, firmware->data);
1875
1876 release_firmware(firmware);
1877 if (err)
1878 goto try_host_fw;
1879
1880 goto done;
1881 }
1882
1883try_host_fw:
1884 dn = pci_device_to_OF_node(card->pdev);
1885 if (!dn)
1886 goto out_err;
1887
1888 fw_prop = of_get_property(dn, "firmware", &fw_size);
1889 if (!fw_prop)
1890 goto out_err;
1891
1892 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1893 netif_msg_probe(card) ) {
1894 dev_err(&card->netdev->dev,
1895 "Incorrect size of spidernet firmware in host firmware\n");
1896 goto done;
1897 }
1898
1899 err = spider_net_download_firmware(card, fw_prop);
1900
1901done:
1902 return err;
1903out_err:
1904 if (netif_msg_probe(card))
1905 dev_err(&card->netdev->dev,
1906 "Couldn't find spidernet firmware in filesystem " \
1907 "or host firmware\n");
1908 return err;
1909}
1910
1911/**
1912 * spider_net_open - called upon ifonfig up
1913 * @netdev: interface device structure
1914 *
1915 * returns 0 on success, <0 on failure
1916 *
1917 * spider_net_open allocates all the descriptors and memory needed for
1918 * operation, sets up multicast list and enables interrupts
1919 */
1920int
1921spider_net_open(struct net_device *netdev)
1922{
1923 struct spider_net_card *card = netdev_priv(netdev);
1924 int result;
1925
1926 result = spider_net_init_firmware(card);
1927 if (result)
1928 goto init_firmware_failed;
1929
1930 /* start probing with copper */
1931 card->aneg_count = 0;
1932 card->medium = BCM54XX_COPPER;
1933 spider_net_setup_aneg(card);
1934 if (card->phy.def->phy_id)
1935 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1936
1937 result = spider_net_init_chain(card, &card->tx_chain);
1938 if (result)
1939 goto alloc_tx_failed;
1940 card->low_watermark = NULL;
1941
1942 result = spider_net_init_chain(card, &card->rx_chain);
1943 if (result)
1944 goto alloc_rx_failed;
1945
1946 /* Allocate rx skbs */
1947 result = spider_net_alloc_rx_skbs(card);
1948 if (result)
1949 goto alloc_skbs_failed;
1950
1951 spider_net_set_multi(netdev);
1952
1953 /* further enhancement: setup hw vlan, if needed */
1954
1955 result = -EBUSY;
1956 if (request_irq(netdev->irq, spider_net_interrupt,
1957 IRQF_SHARED, netdev->name, netdev))
1958 goto register_int_failed;
1959
1960 spider_net_enable_card(card);
1961
1962 netif_start_queue(netdev);
1963 netif_carrier_on(netdev);
1964 napi_enable(&card->napi);
1965
1966 spider_net_enable_interrupts(card);
1967
1968 return 0;
1969
1970register_int_failed:
1971 spider_net_free_rx_chain_contents(card);
1972alloc_skbs_failed:
1973 spider_net_free_chain(card, &card->rx_chain);
1974alloc_rx_failed:
1975 spider_net_free_chain(card, &card->tx_chain);
1976alloc_tx_failed:
1977 del_timer_sync(&card->aneg_timer);
1978init_firmware_failed:
1979 return result;
1980}
1981
1982/**
1983 * spider_net_link_phy
1984 * @t: timer context used to obtain the pointer to net card data structure
1985 */
1986static void spider_net_link_phy(struct timer_list *t)
1987{
1988 struct spider_net_card *card = from_timer(card, t, aneg_timer);
1989 struct mii_phy *phy = &card->phy;
1990
1991 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1992 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1993
1994 pr_debug("%s: link is down trying to bring it up\n",
1995 card->netdev->name);
1996
1997 switch (card->medium) {
1998 case BCM54XX_COPPER:
1999 /* enable fiber with autonegotiation first */
2000 if (phy->def->ops->enable_fiber)
2001 phy->def->ops->enable_fiber(phy, 1);
2002 card->medium = BCM54XX_FIBER;
2003 break;
2004
2005 case BCM54XX_FIBER:
2006 /* fiber didn't come up, try to disable fiber autoneg */
2007 if (phy->def->ops->enable_fiber)
2008 phy->def->ops->enable_fiber(phy, 0);
2009 card->medium = BCM54XX_UNKNOWN;
2010 break;
2011
2012 case BCM54XX_UNKNOWN:
2013 /* copper, fiber with and without failed,
2014 * retry from beginning
2015 */
2016 spider_net_setup_aneg(card);
2017 card->medium = BCM54XX_COPPER;
2018 break;
2019 }
2020
2021 card->aneg_count = 0;
2022 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2023 return;
2024 }
2025
2026 /* link still not up, try again later */
2027 if (!(phy->def->ops->poll_link(phy))) {
2028 card->aneg_count++;
2029 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2030 return;
2031 }
2032
2033 /* link came up, get abilities */
2034 phy->def->ops->read_link(phy);
2035
2036 spider_net_write_reg(card, SPIDER_NET_GMACST,
2037 spider_net_read_reg(card, SPIDER_NET_GMACST));
2038 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
2039
2040 if (phy->speed == 1000)
2041 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
2042 else
2043 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
2044
2045 card->aneg_count = 0;
2046
2047 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
2048 card->netdev->name, phy->speed,
2049 phy->duplex == 1 ? "Full" : "Half",
2050 phy->autoneg == 1 ? "" : "no ");
2051}
2052
2053/**
2054 * spider_net_setup_phy - setup PHY
2055 * @card: card structure
2056 *
2057 * returns 0 on success, <0 on failure
2058 *
2059 * spider_net_setup_phy is used as part of spider_net_probe.
2060 **/
2061static int
2062spider_net_setup_phy(struct spider_net_card *card)
2063{
2064 struct mii_phy *phy = &card->phy;
2065
2066 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2067 SPIDER_NET_DMASEL_VALUE);
2068 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2069 SPIDER_NET_PHY_CTRL_VALUE);
2070
2071 phy->dev = card->netdev;
2072 phy->mdio_read = spider_net_read_phy;
2073 phy->mdio_write = spider_net_write_phy;
2074
2075 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2076 unsigned short id;
2077 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2078 if (id != 0x0000 && id != 0xffff) {
2079 if (!sungem_phy_probe(phy, phy->mii_id)) {
2080 pr_info("Found %s.\n", phy->def->name);
2081 break;
2082 }
2083 }
2084 }
2085
2086 return 0;
2087}
2088
2089/**
2090 * spider_net_workaround_rxramfull - work around firmware bug
2091 * @card: card structure
2092 *
2093 * no return value
2094 **/
2095static void
2096spider_net_workaround_rxramfull(struct spider_net_card *card)
2097{
2098 int i, sequencer = 0;
2099
2100 /* cancel reset */
2101 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2102 SPIDER_NET_CKRCTRL_RUN_VALUE);
2103
2104 /* empty sequencer data */
2105 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
2106 sequencer++) {
2107 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
2108 sequencer * 8, 0x0);
2109 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
2110 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
2111 sequencer * 8, 0x0);
2112 }
2113 }
2114
2115 /* set sequencer operation */
2116 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
2117
2118 /* reset */
2119 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2120 SPIDER_NET_CKRCTRL_STOP_VALUE);
2121}
2122
2123/**
2124 * spider_net_stop - called upon ifconfig down
2125 * @netdev: interface device structure
2126 *
2127 * always returns 0
2128 */
2129int
2130spider_net_stop(struct net_device *netdev)
2131{
2132 struct spider_net_card *card = netdev_priv(netdev);
2133
2134 napi_disable(&card->napi);
2135 netif_carrier_off(netdev);
2136 netif_stop_queue(netdev);
2137 del_timer_sync(&card->tx_timer);
2138 del_timer_sync(&card->aneg_timer);
2139
2140 spider_net_disable_interrupts(card);
2141
2142 free_irq(netdev->irq, netdev);
2143
2144 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
2145 SPIDER_NET_DMA_TX_FEND_VALUE);
2146
2147 /* turn off DMA, force end */
2148 spider_net_disable_rxdmac(card);
2149
2150 /* release chains */
2151 spider_net_release_tx_chain(card, 1);
2152 spider_net_free_rx_chain_contents(card);
2153
2154 spider_net_free_chain(card, &card->tx_chain);
2155 spider_net_free_chain(card, &card->rx_chain);
2156
2157 return 0;
2158}
2159
2160/**
2161 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
2162 * function (to be called not under interrupt status)
2163 * @work: work context used to obtain the pointer to net card data structure
2164 *
2165 * called as task when tx hangs, resets interface (if interface is up)
2166 */
2167static void
2168spider_net_tx_timeout_task(struct work_struct *work)
2169{
2170 struct spider_net_card *card =
2171 container_of(work, struct spider_net_card, tx_timeout_task);
2172 struct net_device *netdev = card->netdev;
2173
2174 if (!(netdev->flags & IFF_UP))
2175 goto out;
2176
2177 netif_device_detach(netdev);
2178 spider_net_stop(netdev);
2179
2180 spider_net_workaround_rxramfull(card);
2181 spider_net_init_card(card);
2182
2183 if (spider_net_setup_phy(card))
2184 goto out;
2185
2186 spider_net_open(netdev);
2187 spider_net_kick_tx_dma(card);
2188 netif_device_attach(netdev);
2189
2190out:
2191 atomic_dec(&card->tx_timeout_task_counter);
2192}
2193
2194/**
2195 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
2196 * @netdev: interface device structure
2197 * @txqueue: unused
2198 *
2199 * called, if tx hangs. Schedules a task that resets the interface
2200 */
2201static void
2202spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2203{
2204 struct spider_net_card *card;
2205
2206 card = netdev_priv(netdev);
2207 atomic_inc(&card->tx_timeout_task_counter);
2208 if (netdev->flags & IFF_UP)
2209 schedule_work(&card->tx_timeout_task);
2210 else
2211 atomic_dec(&card->tx_timeout_task_counter);
2212 card->spider_stats.tx_timeouts++;
2213}
2214
2215static const struct net_device_ops spider_net_ops = {
2216 .ndo_open = spider_net_open,
2217 .ndo_stop = spider_net_stop,
2218 .ndo_start_xmit = spider_net_xmit,
2219 .ndo_set_rx_mode = spider_net_set_multi,
2220 .ndo_set_mac_address = spider_net_set_mac,
2221 .ndo_eth_ioctl = spider_net_do_ioctl,
2222 .ndo_tx_timeout = spider_net_tx_timeout,
2223 .ndo_validate_addr = eth_validate_addr,
2224 /* HW VLAN */
2225#ifdef CONFIG_NET_POLL_CONTROLLER
2226 /* poll controller */
2227 .ndo_poll_controller = spider_net_poll_controller,
2228#endif /* CONFIG_NET_POLL_CONTROLLER */
2229};
2230
2231/**
2232 * spider_net_setup_netdev_ops - initialization of net_device operations
2233 * @netdev: net_device structure
2234 *
2235 * fills out function pointers in the net_device structure
2236 */
2237static void
2238spider_net_setup_netdev_ops(struct net_device *netdev)
2239{
2240 netdev->netdev_ops = &spider_net_ops;
2241 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2242 /* ethtool ops */
2243 netdev->ethtool_ops = &spider_net_ethtool_ops;
2244}
2245
2246/**
2247 * spider_net_setup_netdev - initialization of net_device
2248 * @card: card structure
2249 *
2250 * Returns 0 on success or <0 on failure
2251 *
2252 * spider_net_setup_netdev initializes the net_device structure
2253 **/
2254static int
2255spider_net_setup_netdev(struct spider_net_card *card)
2256{
2257 int result;
2258 struct net_device *netdev = card->netdev;
2259 struct device_node *dn;
2260 struct sockaddr addr;
2261 const u8 *mac;
2262
2263 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2264
2265 pci_set_drvdata(card->pdev, netdev);
2266
2267 timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
2268 netdev->irq = card->pdev->irq;
2269
2270 card->aneg_count = 0;
2271 timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
2272
2273 netif_napi_add(netdev, &card->napi, spider_net_poll);
2274
2275 spider_net_setup_netdev_ops(netdev);
2276
2277 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2278 if (SPIDER_NET_RX_CSUM_DEFAULT)
2279 netdev->features |= NETIF_F_RXCSUM;
2280 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
2281 /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2282 * NETIF_F_HW_VLAN_CTAG_FILTER
2283 */
2284
2285 /* MTU range: 64 - 2294 */
2286 netdev->min_mtu = SPIDER_NET_MIN_MTU;
2287 netdev->max_mtu = SPIDER_NET_MAX_MTU;
2288
2289 netdev->irq = card->pdev->irq;
2290 card->num_rx_ints = 0;
2291 card->ignore_rx_ramfull = 0;
2292
2293 dn = pci_device_to_OF_node(card->pdev);
2294 if (!dn)
2295 return -EIO;
2296
2297 mac = of_get_property(dn, "local-mac-address", NULL);
2298 if (!mac)
2299 return -EIO;
2300 memcpy(addr.sa_data, mac, ETH_ALEN);
2301
2302 result = spider_net_set_mac(netdev, &addr);
2303 if ((result) && (netif_msg_probe(card)))
2304 dev_err(&card->netdev->dev,
2305 "Failed to set MAC address: %i\n", result);
2306
2307 result = register_netdev(netdev);
2308 if (result) {
2309 if (netif_msg_probe(card))
2310 dev_err(&card->netdev->dev,
2311 "Couldn't register net_device: %i\n", result);
2312 return result;
2313 }
2314
2315 if (netif_msg_probe(card))
2316 pr_info("Initialized device %s.\n", netdev->name);
2317
2318 return 0;
2319}
2320
2321/**
2322 * spider_net_alloc_card - allocates net_device and card structure
2323 *
2324 * returns the card structure or NULL in case of errors
2325 *
2326 * the card and net_device structures are linked to each other
2327 */
2328static struct spider_net_card *
2329spider_net_alloc_card(void)
2330{
2331 struct net_device *netdev;
2332 struct spider_net_card *card;
2333
2334 netdev = alloc_etherdev(struct_size(card, darray,
2335 size_add(tx_descriptors, rx_descriptors)));
2336 if (!netdev)
2337 return NULL;
2338
2339 card = netdev_priv(netdev);
2340 card->netdev = netdev;
2341 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2342 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2343 init_waitqueue_head(&card->waitq);
2344 atomic_set(&card->tx_timeout_task_counter, 0);
2345
2346 card->rx_chain.num_desc = rx_descriptors;
2347 card->rx_chain.ring = card->darray;
2348 card->tx_chain.num_desc = tx_descriptors;
2349 card->tx_chain.ring = card->darray + rx_descriptors;
2350
2351 return card;
2352}
2353
2354/**
2355 * spider_net_undo_pci_setup - releases PCI ressources
2356 * @card: card structure
2357 *
2358 * spider_net_undo_pci_setup releases the mapped regions
2359 */
2360static void
2361spider_net_undo_pci_setup(struct spider_net_card *card)
2362{
2363 iounmap(card->regs);
2364 pci_release_regions(card->pdev);
2365}
2366
2367/**
2368 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2369 * @pdev: PCI device
2370 *
2371 * Returns the card structure or NULL if any errors occur
2372 *
2373 * spider_net_setup_pci_dev initializes pdev and together with the
2374 * functions called in spider_net_open configures the device so that
2375 * data can be transferred over it
2376 * The net_device structure is attached to the card structure, if the
2377 * function returns without error.
2378 **/
2379static struct spider_net_card *
2380spider_net_setup_pci_dev(struct pci_dev *pdev)
2381{
2382 struct spider_net_card *card;
2383 unsigned long mmio_start, mmio_len;
2384
2385 if (pci_enable_device(pdev)) {
2386 dev_err(&pdev->dev, "Couldn't enable PCI device\n");
2387 return NULL;
2388 }
2389
2390 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2391 dev_err(&pdev->dev,
2392 "Couldn't find proper PCI device base address.\n");
2393 goto out_disable_dev;
2394 }
2395
2396 if (pci_request_regions(pdev, spider_net_driver_name)) {
2397 dev_err(&pdev->dev,
2398 "Couldn't obtain PCI resources, aborting.\n");
2399 goto out_disable_dev;
2400 }
2401
2402 pci_set_master(pdev);
2403
2404 card = spider_net_alloc_card();
2405 if (!card) {
2406 dev_err(&pdev->dev,
2407 "Couldn't allocate net_device structure, aborting.\n");
2408 goto out_release_regions;
2409 }
2410 card->pdev = pdev;
2411
2412 /* fetch base address and length of first resource */
2413 mmio_start = pci_resource_start(pdev, 0);
2414 mmio_len = pci_resource_len(pdev, 0);
2415
2416 card->netdev->mem_start = mmio_start;
2417 card->netdev->mem_end = mmio_start + mmio_len;
2418 card->regs = ioremap(mmio_start, mmio_len);
2419
2420 if (!card->regs) {
2421 dev_err(&pdev->dev,
2422 "Couldn't obtain PCI resources, aborting.\n");
2423 goto out_release_regions;
2424 }
2425
2426 return card;
2427
2428out_release_regions:
2429 pci_release_regions(pdev);
2430out_disable_dev:
2431 pci_disable_device(pdev);
2432 return NULL;
2433}
2434
2435/**
2436 * spider_net_probe - initialization of a device
2437 * @pdev: PCI device
2438 * @ent: entry in the device id list
2439 *
2440 * Returns 0 on success, <0 on failure
2441 *
2442 * spider_net_probe initializes pdev and registers a net_device
2443 * structure for it. After that, the device can be ifconfig'ed up
2444 **/
2445static int
2446spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2447{
2448 int err = -EIO;
2449 struct spider_net_card *card;
2450
2451 card = spider_net_setup_pci_dev(pdev);
2452 if (!card)
2453 goto out;
2454
2455 spider_net_workaround_rxramfull(card);
2456 spider_net_init_card(card);
2457
2458 err = spider_net_setup_phy(card);
2459 if (err)
2460 goto out_undo_pci;
2461
2462 err = spider_net_setup_netdev(card);
2463 if (err)
2464 goto out_undo_pci;
2465
2466 return 0;
2467
2468out_undo_pci:
2469 spider_net_undo_pci_setup(card);
2470 free_netdev(card->netdev);
2471out:
2472 return err;
2473}
2474
2475/**
2476 * spider_net_remove - removal of a device
2477 * @pdev: PCI device
2478 *
2479 * Returns 0 on success, <0 on failure
2480 *
2481 * spider_net_remove is called to remove the device and unregisters the
2482 * net_device
2483 **/
2484static void
2485spider_net_remove(struct pci_dev *pdev)
2486{
2487 struct net_device *netdev;
2488 struct spider_net_card *card;
2489
2490 netdev = pci_get_drvdata(pdev);
2491 card = netdev_priv(netdev);
2492
2493 wait_event(card->waitq,
2494 atomic_read(&card->tx_timeout_task_counter) == 0);
2495
2496 unregister_netdev(netdev);
2497
2498 /* switch off card */
2499 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2500 SPIDER_NET_CKRCTRL_STOP_VALUE);
2501 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2502 SPIDER_NET_CKRCTRL_RUN_VALUE);
2503
2504 spider_net_undo_pci_setup(card);
2505 free_netdev(netdev);
2506}
2507
2508static struct pci_driver spider_net_driver = {
2509 .name = spider_net_driver_name,
2510 .id_table = spider_net_pci_tbl,
2511 .probe = spider_net_probe,
2512 .remove = spider_net_remove
2513};
2514
2515/**
2516 * spider_net_init - init function when the driver is loaded
2517 *
2518 * spider_net_init registers the device driver
2519 */
2520static int __init spider_net_init(void)
2521{
2522 printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2523
2524 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2525 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2526 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2527 }
2528 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2529 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2530 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2531 }
2532 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2533 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2534 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2535 }
2536 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2537 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2538 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2539 }
2540
2541 return pci_register_driver(&spider_net_driver);
2542}
2543
2544/**
2545 * spider_net_cleanup - exit function when driver is unloaded
2546 *
2547 * spider_net_cleanup unregisters the device driver
2548 */
2549static void __exit spider_net_cleanup(void)
2550{
2551 pci_unregister_driver(&spider_net_driver);
2552}
2553
2554module_init(spider_net_init);
2555module_exit(spider_net_cleanup);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Network device driver for Cell Processor-Based Blade and Celleb platform
4 *
5 * (C) Copyright IBM Corp. 2005
6 * (C) Copyright 2006 TOSHIBA CORPORATION
7 *
8 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
9 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
10 */
11
12#include <linux/compiler.h>
13#include <linux/crc32.h>
14#include <linux/delay.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/firmware.h>
18#include <linux/if_vlan.h>
19#include <linux/in.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/gfp.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
25#include <linux/kernel.h>
26#include <linux/mii.h>
27#include <linux/module.h>
28#include <linux/netdevice.h>
29#include <linux/device.h>
30#include <linux/pci.h>
31#include <linux/skbuff.h>
32#include <linux/tcp.h>
33#include <linux/types.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
36#include <linux/workqueue.h>
37#include <linux/bitops.h>
38#include <net/checksum.h>
39
40#include "spider_net.h"
41
42MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
43 "<Jens.Osterkamp@de.ibm.com>");
44MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
45MODULE_LICENSE("GPL");
46MODULE_VERSION(VERSION);
47MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
48
49static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
50static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
51
52module_param(rx_descriptors, int, 0444);
53module_param(tx_descriptors, int, 0444);
54
55MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
56 "in rx chains");
57MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
58 "in tx chain");
59
60char spider_net_driver_name[] = "spidernet";
61
62static const struct pci_device_id spider_net_pci_tbl[] = {
63 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
65 { 0, }
66};
67
68MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
69
70/**
71 * spider_net_read_reg - reads an SMMIO register of a card
72 * @card: device structure
73 * @reg: register to read from
74 *
75 * returns the content of the specified SMMIO register.
76 */
77static inline u32
78spider_net_read_reg(struct spider_net_card *card, u32 reg)
79{
80 /* We use the powerpc specific variants instead of readl_be() because
81 * we know spidernet is not a real PCI device and we can thus avoid the
82 * performance hit caused by the PCI workarounds.
83 */
84 return in_be32(card->regs + reg);
85}
86
87/**
88 * spider_net_write_reg - writes to an SMMIO register of a card
89 * @card: device structure
90 * @reg: register to write to
91 * @value: value to write into the specified SMMIO register
92 */
93static inline void
94spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
95{
96 /* We use the powerpc specific variants instead of writel_be() because
97 * we know spidernet is not a real PCI device and we can thus avoid the
98 * performance hit caused by the PCI workarounds.
99 */
100 out_be32(card->regs + reg, value);
101}
102
103/**
104 * spider_net_write_phy - write to phy register
105 * @netdev: adapter to be written to
106 * @mii_id: id of MII
107 * @reg: PHY register
108 * @val: value to be written to phy register
109 *
110 * spider_net_write_phy_register writes to an arbitrary PHY
111 * register via the spider GPCWOPCMD register. We assume the queue does
112 * not run full (not more than 15 commands outstanding).
113 **/
114static void
115spider_net_write_phy(struct net_device *netdev, int mii_id,
116 int reg, int val)
117{
118 struct spider_net_card *card = netdev_priv(netdev);
119 u32 writevalue;
120
121 writevalue = ((u32)mii_id << 21) |
122 ((u32)reg << 16) | ((u32)val);
123
124 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
125}
126
127/**
128 * spider_net_read_phy - read from phy register
129 * @netdev: network device to be read from
130 * @mii_id: id of MII
131 * @reg: PHY register
132 *
133 * Returns value read from PHY register
134 *
135 * spider_net_write_phy reads from an arbitrary PHY
136 * register via the spider GPCROPCMD register
137 **/
138static int
139spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
140{
141 struct spider_net_card *card = netdev_priv(netdev);
142 u32 readvalue;
143
144 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
145 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
146
147 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
148 * interrupt, as we poll for the completion of the read operation
149 * in spider_net_read_phy. Should take about 50 us */
150 do {
151 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
152 } while (readvalue & SPIDER_NET_GPREXEC);
153
154 readvalue &= SPIDER_NET_GPRDAT_MASK;
155
156 return readvalue;
157}
158
159/**
160 * spider_net_setup_aneg - initial auto-negotiation setup
161 * @card: device structure
162 **/
163static void
164spider_net_setup_aneg(struct spider_net_card *card)
165{
166 struct mii_phy *phy = &card->phy;
167 u32 advertise = 0;
168 u16 bmsr, estat;
169
170 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
171 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
172
173 if (bmsr & BMSR_10HALF)
174 advertise |= ADVERTISED_10baseT_Half;
175 if (bmsr & BMSR_10FULL)
176 advertise |= ADVERTISED_10baseT_Full;
177 if (bmsr & BMSR_100HALF)
178 advertise |= ADVERTISED_100baseT_Half;
179 if (bmsr & BMSR_100FULL)
180 advertise |= ADVERTISED_100baseT_Full;
181
182 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
183 advertise |= SUPPORTED_1000baseT_Full;
184 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
185 advertise |= SUPPORTED_1000baseT_Half;
186
187 sungem_phy_probe(phy, phy->mii_id);
188 phy->def->ops->setup_aneg(phy, advertise);
189
190}
191
192/**
193 * spider_net_rx_irq_off - switch off rx irq on this spider card
194 * @card: device structure
195 *
196 * switches off rx irq by masking them out in the GHIINTnMSK register
197 */
198static void
199spider_net_rx_irq_off(struct spider_net_card *card)
200{
201 u32 regvalue;
202
203 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
204 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
205}
206
207/**
208 * spider_net_rx_irq_on - switch on rx irq on this spider card
209 * @card: device structure
210 *
211 * switches on rx irq by enabling them in the GHIINTnMSK register
212 */
213static void
214spider_net_rx_irq_on(struct spider_net_card *card)
215{
216 u32 regvalue;
217
218 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
219 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
220}
221
222/**
223 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
224 * @card: card structure
225 *
226 * spider_net_set_promisc sets the unicast destination address filter and
227 * thus either allows for non-promisc mode or promisc mode
228 */
229static void
230spider_net_set_promisc(struct spider_net_card *card)
231{
232 u32 macu, macl;
233 struct net_device *netdev = card->netdev;
234
235 if (netdev->flags & IFF_PROMISC) {
236 /* clear destination entry 0 */
237 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
238 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
239 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
240 SPIDER_NET_PROMISC_VALUE);
241 } else {
242 macu = netdev->dev_addr[0];
243 macu <<= 8;
244 macu |= netdev->dev_addr[1];
245 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
246
247 macu |= SPIDER_NET_UA_DESCR_VALUE;
248 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
249 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
250 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
251 SPIDER_NET_NONPROMISC_VALUE);
252 }
253}
254
255/**
256 * spider_net_get_descr_status -- returns the status of a descriptor
257 * @descr: descriptor to look at
258 *
259 * returns the status as in the dmac_cmd_status field of the descriptor
260 */
261static inline int
262spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
263{
264 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
265}
266
267/**
268 * spider_net_free_chain - free descriptor chain
269 * @card: card structure
270 * @chain: address of chain
271 *
272 */
273static void
274spider_net_free_chain(struct spider_net_card *card,
275 struct spider_net_descr_chain *chain)
276{
277 struct spider_net_descr *descr;
278
279 descr = chain->ring;
280 do {
281 descr->bus_addr = 0;
282 descr->hwdescr->next_descr_addr = 0;
283 descr = descr->next;
284 } while (descr != chain->ring);
285
286 dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
287 chain->hwring, chain->dma_addr);
288}
289
290/**
291 * spider_net_init_chain - alloc and link descriptor chain
292 * @card: card structure
293 * @chain: address of chain
294 *
295 * We manage a circular list that mirrors the hardware structure,
296 * except that the hardware uses bus addresses.
297 *
298 * Returns 0 on success, <0 on failure
299 */
300static int
301spider_net_init_chain(struct spider_net_card *card,
302 struct spider_net_descr_chain *chain)
303{
304 int i;
305 struct spider_net_descr *descr;
306 struct spider_net_hw_descr *hwdescr;
307 dma_addr_t buf;
308 size_t alloc_size;
309
310 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
311
312 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
313 &chain->dma_addr, GFP_KERNEL);
314 if (!chain->hwring)
315 return -ENOMEM;
316
317 /* Set up the hardware pointers in each descriptor */
318 descr = chain->ring;
319 hwdescr = chain->hwring;
320 buf = chain->dma_addr;
321 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
322 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
323 hwdescr->next_descr_addr = 0;
324
325 descr->hwdescr = hwdescr;
326 descr->bus_addr = buf;
327 descr->next = descr + 1;
328 descr->prev = descr - 1;
329
330 buf += sizeof(struct spider_net_hw_descr);
331 }
332 /* do actual circular list */
333 (descr-1)->next = chain->ring;
334 chain->ring->prev = descr-1;
335
336 spin_lock_init(&chain->lock);
337 chain->head = chain->ring;
338 chain->tail = chain->ring;
339 return 0;
340}
341
342/**
343 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
344 * @card: card structure
345 *
346 * returns 0 on success, <0 on failure
347 */
348static void
349spider_net_free_rx_chain_contents(struct spider_net_card *card)
350{
351 struct spider_net_descr *descr;
352
353 descr = card->rx_chain.head;
354 do {
355 if (descr->skb) {
356 pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
357 SPIDER_NET_MAX_FRAME,
358 PCI_DMA_BIDIRECTIONAL);
359 dev_kfree_skb(descr->skb);
360 descr->skb = NULL;
361 }
362 descr = descr->next;
363 } while (descr != card->rx_chain.head);
364}
365
366/**
367 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
368 * @card: card structure
369 * @descr: descriptor to re-init
370 *
371 * Return 0 on success, <0 on failure.
372 *
373 * Allocates a new rx skb, iommu-maps it and attaches it to the
374 * descriptor. Mark the descriptor as activated, ready-to-use.
375 */
376static int
377spider_net_prepare_rx_descr(struct spider_net_card *card,
378 struct spider_net_descr *descr)
379{
380 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
381 dma_addr_t buf;
382 int offset;
383 int bufsize;
384
385 /* we need to round up the buffer size to a multiple of 128 */
386 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
387 (~(SPIDER_NET_RXBUF_ALIGN - 1));
388
389 /* and we need to have it 128 byte aligned, therefore we allocate a
390 * bit more */
391 /* allocate an skb */
392 descr->skb = netdev_alloc_skb(card->netdev,
393 bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
394 if (!descr->skb) {
395 if (netif_msg_rx_err(card) && net_ratelimit())
396 dev_err(&card->netdev->dev,
397 "Not enough memory to allocate rx buffer\n");
398 card->spider_stats.alloc_rx_skb_error++;
399 return -ENOMEM;
400 }
401 hwdescr->buf_size = bufsize;
402 hwdescr->result_size = 0;
403 hwdescr->valid_size = 0;
404 hwdescr->data_status = 0;
405 hwdescr->data_error = 0;
406
407 offset = ((unsigned long)descr->skb->data) &
408 (SPIDER_NET_RXBUF_ALIGN - 1);
409 if (offset)
410 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
411 /* iommu-map the skb */
412 buf = pci_map_single(card->pdev, descr->skb->data,
413 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
414 if (pci_dma_mapping_error(card->pdev, buf)) {
415 dev_kfree_skb_any(descr->skb);
416 descr->skb = NULL;
417 if (netif_msg_rx_err(card) && net_ratelimit())
418 dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
419 card->spider_stats.rx_iommu_map_error++;
420 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
421 } else {
422 hwdescr->buf_addr = buf;
423 wmb();
424 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
425 SPIDER_NET_DMAC_NOINTR_COMPLETE;
426 }
427
428 return 0;
429}
430
431/**
432 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
433 * @card: card structure
434 *
435 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
436 * chip by writing to the appropriate register. DMA is enabled in
437 * spider_net_enable_rxdmac.
438 */
439static inline void
440spider_net_enable_rxchtails(struct spider_net_card *card)
441{
442 /* assume chain is aligned correctly */
443 spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
444 card->rx_chain.tail->bus_addr);
445}
446
447/**
448 * spider_net_enable_rxdmac - enables a receive DMA controller
449 * @card: card structure
450 *
451 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
452 * in the GDADMACCNTR register
453 */
454static inline void
455spider_net_enable_rxdmac(struct spider_net_card *card)
456{
457 wmb();
458 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
459 SPIDER_NET_DMA_RX_VALUE);
460}
461
462/**
463 * spider_net_disable_rxdmac - disables the receive DMA controller
464 * @card: card structure
465 *
466 * spider_net_disable_rxdmac terminates processing on the DMA controller
467 * by turing off the DMA controller, with the force-end flag set.
468 */
469static inline void
470spider_net_disable_rxdmac(struct spider_net_card *card)
471{
472 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
473 SPIDER_NET_DMA_RX_FEND_VALUE);
474}
475
476/**
477 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
478 * @card: card structure
479 *
480 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
481 */
482static void
483spider_net_refill_rx_chain(struct spider_net_card *card)
484{
485 struct spider_net_descr_chain *chain = &card->rx_chain;
486 unsigned long flags;
487
488 /* one context doing the refill (and a second context seeing that
489 * and omitting it) is ok. If called by NAPI, we'll be called again
490 * as spider_net_decode_one_descr is called several times. If some
491 * interrupt calls us, the NAPI is about to clean up anyway. */
492 if (!spin_trylock_irqsave(&chain->lock, flags))
493 return;
494
495 while (spider_net_get_descr_status(chain->head->hwdescr) ==
496 SPIDER_NET_DESCR_NOT_IN_USE) {
497 if (spider_net_prepare_rx_descr(card, chain->head))
498 break;
499 chain->head = chain->head->next;
500 }
501
502 spin_unlock_irqrestore(&chain->lock, flags);
503}
504
505/**
506 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
507 * @card: card structure
508 *
509 * Returns 0 on success, <0 on failure.
510 */
511static int
512spider_net_alloc_rx_skbs(struct spider_net_card *card)
513{
514 struct spider_net_descr_chain *chain = &card->rx_chain;
515 struct spider_net_descr *start = chain->tail;
516 struct spider_net_descr *descr = start;
517
518 /* Link up the hardware chain pointers */
519 do {
520 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
521 descr = descr->next;
522 } while (descr != start);
523
524 /* Put at least one buffer into the chain. if this fails,
525 * we've got a problem. If not, spider_net_refill_rx_chain
526 * will do the rest at the end of this function. */
527 if (spider_net_prepare_rx_descr(card, chain->head))
528 goto error;
529 else
530 chain->head = chain->head->next;
531
532 /* This will allocate the rest of the rx buffers;
533 * if not, it's business as usual later on. */
534 spider_net_refill_rx_chain(card);
535 spider_net_enable_rxdmac(card);
536 return 0;
537
538error:
539 spider_net_free_rx_chain_contents(card);
540 return -ENOMEM;
541}
542
543/**
544 * spider_net_get_multicast_hash - generates hash for multicast filter table
545 * @addr: multicast address
546 *
547 * returns the hash value.
548 *
549 * spider_net_get_multicast_hash calculates a hash value for a given multicast
550 * address, that is used to set the multicast filter tables
551 */
552static u8
553spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
554{
555 u32 crc;
556 u8 hash;
557 char addr_for_crc[ETH_ALEN] = { 0, };
558 int i, bit;
559
560 for (i = 0; i < ETH_ALEN * 8; i++) {
561 bit = (addr[i / 8] >> (i % 8)) & 1;
562 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
563 }
564
565 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
566
567 hash = (crc >> 27);
568 hash <<= 3;
569 hash |= crc & 7;
570 hash &= 0xff;
571
572 return hash;
573}
574
575/**
576 * spider_net_set_multi - sets multicast addresses and promisc flags
577 * @netdev: interface device structure
578 *
579 * spider_net_set_multi configures multicast addresses as needed for the
580 * netdev interface. It also sets up multicast, allmulti and promisc
581 * flags appropriately
582 */
583static void
584spider_net_set_multi(struct net_device *netdev)
585{
586 struct netdev_hw_addr *ha;
587 u8 hash;
588 int i;
589 u32 reg;
590 struct spider_net_card *card = netdev_priv(netdev);
591 DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
592
593 spider_net_set_promisc(card);
594
595 if (netdev->flags & IFF_ALLMULTI) {
596 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
597 set_bit(i, bitmask);
598 }
599 goto write_hash;
600 }
601
602 /* well, we know, what the broadcast hash value is: it's xfd
603 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
604 set_bit(0xfd, bitmask);
605
606 netdev_for_each_mc_addr(ha, netdev) {
607 hash = spider_net_get_multicast_hash(netdev, ha->addr);
608 set_bit(hash, bitmask);
609 }
610
611write_hash:
612 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
613 reg = 0;
614 if (test_bit(i * 4, bitmask))
615 reg += 0x08;
616 reg <<= 8;
617 if (test_bit(i * 4 + 1, bitmask))
618 reg += 0x08;
619 reg <<= 8;
620 if (test_bit(i * 4 + 2, bitmask))
621 reg += 0x08;
622 reg <<= 8;
623 if (test_bit(i * 4 + 3, bitmask))
624 reg += 0x08;
625
626 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
627 }
628}
629
630/**
631 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
632 * @card: card structure
633 * @skb: packet to use
634 *
635 * returns 0 on success, <0 on failure.
636 *
637 * fills out the descriptor structure with skb data and len. Copies data,
638 * if needed (32bit DMA!)
639 */
640static int
641spider_net_prepare_tx_descr(struct spider_net_card *card,
642 struct sk_buff *skb)
643{
644 struct spider_net_descr_chain *chain = &card->tx_chain;
645 struct spider_net_descr *descr;
646 struct spider_net_hw_descr *hwdescr;
647 dma_addr_t buf;
648 unsigned long flags;
649
650 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
651 if (pci_dma_mapping_error(card->pdev, buf)) {
652 if (netif_msg_tx_err(card) && net_ratelimit())
653 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
654 "Dropping packet\n", skb->data, skb->len);
655 card->spider_stats.tx_iommu_map_error++;
656 return -ENOMEM;
657 }
658
659 spin_lock_irqsave(&chain->lock, flags);
660 descr = card->tx_chain.head;
661 if (descr->next == chain->tail->prev) {
662 spin_unlock_irqrestore(&chain->lock, flags);
663 pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
664 return -ENOMEM;
665 }
666 hwdescr = descr->hwdescr;
667 chain->head = descr->next;
668
669 descr->skb = skb;
670 hwdescr->buf_addr = buf;
671 hwdescr->buf_size = skb->len;
672 hwdescr->next_descr_addr = 0;
673 hwdescr->data_status = 0;
674
675 hwdescr->dmac_cmd_status =
676 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
677 spin_unlock_irqrestore(&chain->lock, flags);
678
679 if (skb->ip_summed == CHECKSUM_PARTIAL)
680 switch (ip_hdr(skb)->protocol) {
681 case IPPROTO_TCP:
682 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
683 break;
684 case IPPROTO_UDP:
685 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
686 break;
687 }
688
689 /* Chain the bus address, so that the DMA engine finds this descr. */
690 wmb();
691 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
692
693 netif_trans_update(card->netdev); /* set netdev watchdog timer */
694 return 0;
695}
696
697static int
698spider_net_set_low_watermark(struct spider_net_card *card)
699{
700 struct spider_net_descr *descr = card->tx_chain.tail;
701 struct spider_net_hw_descr *hwdescr;
702 unsigned long flags;
703 int status;
704 int cnt=0;
705 int i;
706
707 /* Measure the length of the queue. Measurement does not
708 * need to be precise -- does not need a lock. */
709 while (descr != card->tx_chain.head) {
710 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
711 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
712 break;
713 descr = descr->next;
714 cnt++;
715 }
716
717 /* If TX queue is short, don't even bother with interrupts */
718 if (cnt < card->tx_chain.num_desc/4)
719 return cnt;
720
721 /* Set low-watermark 3/4th's of the way into the queue. */
722 descr = card->tx_chain.tail;
723 cnt = (cnt*3)/4;
724 for (i=0;i<cnt; i++)
725 descr = descr->next;
726
727 /* Set the new watermark, clear the old watermark */
728 spin_lock_irqsave(&card->tx_chain.lock, flags);
729 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
730 if (card->low_watermark && card->low_watermark != descr) {
731 hwdescr = card->low_watermark->hwdescr;
732 hwdescr->dmac_cmd_status =
733 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
734 }
735 card->low_watermark = descr;
736 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
737 return cnt;
738}
739
740/**
741 * spider_net_release_tx_chain - processes sent tx descriptors
742 * @card: adapter structure
743 * @brutal: if set, don't care about whether descriptor seems to be in use
744 *
745 * returns 0 if the tx ring is empty, otherwise 1.
746 *
747 * spider_net_release_tx_chain releases the tx descriptors that spider has
748 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
749 * If some other context is calling this function, we return 1 so that we're
750 * scheduled again (if we were scheduled) and will not lose initiative.
751 */
752static int
753spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
754{
755 struct net_device *dev = card->netdev;
756 struct spider_net_descr_chain *chain = &card->tx_chain;
757 struct spider_net_descr *descr;
758 struct spider_net_hw_descr *hwdescr;
759 struct sk_buff *skb;
760 u32 buf_addr;
761 unsigned long flags;
762 int status;
763
764 while (1) {
765 spin_lock_irqsave(&chain->lock, flags);
766 if (chain->tail == chain->head) {
767 spin_unlock_irqrestore(&chain->lock, flags);
768 return 0;
769 }
770 descr = chain->tail;
771 hwdescr = descr->hwdescr;
772
773 status = spider_net_get_descr_status(hwdescr);
774 switch (status) {
775 case SPIDER_NET_DESCR_COMPLETE:
776 dev->stats.tx_packets++;
777 dev->stats.tx_bytes += descr->skb->len;
778 break;
779
780 case SPIDER_NET_DESCR_CARDOWNED:
781 if (!brutal) {
782 spin_unlock_irqrestore(&chain->lock, flags);
783 return 1;
784 }
785
786 /* fallthrough, if we release the descriptors
787 * brutally (then we don't care about
788 * SPIDER_NET_DESCR_CARDOWNED) */
789 fallthrough;
790
791 case SPIDER_NET_DESCR_RESPONSE_ERROR:
792 case SPIDER_NET_DESCR_PROTECTION_ERROR:
793 case SPIDER_NET_DESCR_FORCE_END:
794 if (netif_msg_tx_err(card))
795 dev_err(&card->netdev->dev, "forcing end of tx descriptor "
796 "with status x%02x\n", status);
797 dev->stats.tx_errors++;
798 break;
799
800 default:
801 dev->stats.tx_dropped++;
802 if (!brutal) {
803 spin_unlock_irqrestore(&chain->lock, flags);
804 return 1;
805 }
806 }
807
808 chain->tail = descr->next;
809 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
810 skb = descr->skb;
811 descr->skb = NULL;
812 buf_addr = hwdescr->buf_addr;
813 spin_unlock_irqrestore(&chain->lock, flags);
814
815 /* unmap the skb */
816 if (skb) {
817 pci_unmap_single(card->pdev, buf_addr, skb->len,
818 PCI_DMA_TODEVICE);
819 dev_consume_skb_any(skb);
820 }
821 }
822 return 0;
823}
824
825/**
826 * spider_net_kick_tx_dma - enables TX DMA processing
827 * @card: card structure
828 *
829 * This routine will start the transmit DMA running if
830 * it is not already running. This routine ned only be
831 * called when queueing a new packet to an empty tx queue.
832 * Writes the current tx chain head as start address
833 * of the tx descriptor chain and enables the transmission
834 * DMA engine.
835 */
836static inline void
837spider_net_kick_tx_dma(struct spider_net_card *card)
838{
839 struct spider_net_descr *descr;
840
841 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
842 SPIDER_NET_TX_DMA_EN)
843 goto out;
844
845 descr = card->tx_chain.tail;
846 for (;;) {
847 if (spider_net_get_descr_status(descr->hwdescr) ==
848 SPIDER_NET_DESCR_CARDOWNED) {
849 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
850 descr->bus_addr);
851 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
852 SPIDER_NET_DMA_TX_VALUE);
853 break;
854 }
855 if (descr == card->tx_chain.head)
856 break;
857 descr = descr->next;
858 }
859
860out:
861 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
862}
863
864/**
865 * spider_net_xmit - transmits a frame over the device
866 * @skb: packet to send out
867 * @netdev: interface device structure
868 *
869 * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
870 */
871static netdev_tx_t
872spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
873{
874 int cnt;
875 struct spider_net_card *card = netdev_priv(netdev);
876
877 spider_net_release_tx_chain(card, 0);
878
879 if (spider_net_prepare_tx_descr(card, skb) != 0) {
880 netdev->stats.tx_dropped++;
881 netif_stop_queue(netdev);
882 return NETDEV_TX_BUSY;
883 }
884
885 cnt = spider_net_set_low_watermark(card);
886 if (cnt < 5)
887 spider_net_kick_tx_dma(card);
888 return NETDEV_TX_OK;
889}
890
891/**
892 * spider_net_cleanup_tx_ring - cleans up the TX ring
893 * @card: card structure
894 *
895 * spider_net_cleanup_tx_ring is called by either the tx_timer
896 * or from the NAPI polling routine.
897 * This routine releases resources associted with transmitted
898 * packets, including updating the queue tail pointer.
899 */
900static void
901spider_net_cleanup_tx_ring(struct timer_list *t)
902{
903 struct spider_net_card *card = from_timer(card, t, tx_timer);
904 if ((spider_net_release_tx_chain(card, 0) != 0) &&
905 (card->netdev->flags & IFF_UP)) {
906 spider_net_kick_tx_dma(card);
907 netif_wake_queue(card->netdev);
908 }
909}
910
911/**
912 * spider_net_do_ioctl - called for device ioctls
913 * @netdev: interface device structure
914 * @ifr: request parameter structure for ioctl
915 * @cmd: command code for ioctl
916 *
917 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
918 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
919 */
920static int
921spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
922{
923 switch (cmd) {
924 default:
925 return -EOPNOTSUPP;
926 }
927}
928
929/**
930 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
931 * @descr: descriptor to process
932 * @card: card structure
933 *
934 * Fills out skb structure and passes the data to the stack.
935 * The descriptor state is not changed.
936 */
937static void
938spider_net_pass_skb_up(struct spider_net_descr *descr,
939 struct spider_net_card *card)
940{
941 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
942 struct sk_buff *skb = descr->skb;
943 struct net_device *netdev = card->netdev;
944 u32 data_status = hwdescr->data_status;
945 u32 data_error = hwdescr->data_error;
946
947 skb_put(skb, hwdescr->valid_size);
948
949 /* the card seems to add 2 bytes of junk in front
950 * of the ethernet frame */
951#define SPIDER_MISALIGN 2
952 skb_pull(skb, SPIDER_MISALIGN);
953 skb->protocol = eth_type_trans(skb, netdev);
954
955 /* checksum offload */
956 skb_checksum_none_assert(skb);
957 if (netdev->features & NETIF_F_RXCSUM) {
958 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
959 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
960 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
961 skb->ip_summed = CHECKSUM_UNNECESSARY;
962 }
963
964 if (data_status & SPIDER_NET_VLAN_PACKET) {
965 /* further enhancements: HW-accel VLAN */
966 }
967
968 /* update netdevice statistics */
969 netdev->stats.rx_packets++;
970 netdev->stats.rx_bytes += skb->len;
971
972 /* pass skb up to stack */
973 netif_receive_skb(skb);
974}
975
976static void show_rx_chain(struct spider_net_card *card)
977{
978 struct spider_net_descr_chain *chain = &card->rx_chain;
979 struct spider_net_descr *start= chain->tail;
980 struct spider_net_descr *descr= start;
981 struct spider_net_hw_descr *hwd = start->hwdescr;
982 struct device *dev = &card->netdev->dev;
983 u32 curr_desc, next_desc;
984 int status;
985
986 int tot = 0;
987 int cnt = 0;
988 int off = start - chain->ring;
989 int cstat = hwd->dmac_cmd_status;
990
991 dev_info(dev, "Total number of descrs=%d\n",
992 chain->num_desc);
993 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
994 off, cstat);
995
996 curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
997 next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
998
999 status = cstat;
1000 do
1001 {
1002 hwd = descr->hwdescr;
1003 off = descr - chain->ring;
1004 status = hwd->dmac_cmd_status;
1005
1006 if (descr == chain->head)
1007 dev_info(dev, "Chain head is at %d, head status=0x%x\n",
1008 off, status);
1009
1010 if (curr_desc == descr->bus_addr)
1011 dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
1012 off, status);
1013
1014 if (next_desc == descr->bus_addr)
1015 dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
1016 off, status);
1017
1018 if (hwd->next_descr_addr == 0)
1019 dev_info(dev, "chain is cut at %d\n", off);
1020
1021 if (cstat != status) {
1022 int from = (chain->num_desc + off - cnt) % chain->num_desc;
1023 int to = (chain->num_desc + off - 1) % chain->num_desc;
1024 dev_info(dev, "Have %d (from %d to %d) descrs "
1025 "with stat=0x%08x\n", cnt, from, to, cstat);
1026 cstat = status;
1027 cnt = 0;
1028 }
1029
1030 cnt ++;
1031 tot ++;
1032 descr = descr->next;
1033 } while (descr != start);
1034
1035 dev_info(dev, "Last %d descrs with stat=0x%08x "
1036 "for a total of %d descrs\n", cnt, cstat, tot);
1037
1038#ifdef DEBUG
1039 /* Now dump the whole ring */
1040 descr = start;
1041 do
1042 {
1043 struct spider_net_hw_descr *hwd = descr->hwdescr;
1044 status = spider_net_get_descr_status(hwd);
1045 cnt = descr - chain->ring;
1046 dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
1047 cnt, status, descr->skb);
1048 dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
1049 descr->bus_addr, hwd->buf_addr, hwd->buf_size);
1050 dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
1051 hwd->next_descr_addr, hwd->result_size,
1052 hwd->valid_size);
1053 dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
1054 hwd->dmac_cmd_status, hwd->data_status,
1055 hwd->data_error);
1056 dev_info(dev, "\n");
1057
1058 descr = descr->next;
1059 } while (descr != start);
1060#endif
1061
1062}
1063
1064/**
1065 * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1066 *
1067 * If the driver fails to keep up and empty the queue, then the
1068 * hardware wil run out of room to put incoming packets. This
1069 * will cause the hardware to skip descrs that are full (instead
1070 * of halting/retrying). Thus, once the driver runs, it wil need
1071 * to "catch up" to where the hardware chain pointer is at.
1072 */
1073static void spider_net_resync_head_ptr(struct spider_net_card *card)
1074{
1075 unsigned long flags;
1076 struct spider_net_descr_chain *chain = &card->rx_chain;
1077 struct spider_net_descr *descr;
1078 int i, status;
1079
1080 /* Advance head pointer past any empty descrs */
1081 descr = chain->head;
1082 status = spider_net_get_descr_status(descr->hwdescr);
1083
1084 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
1085 return;
1086
1087 spin_lock_irqsave(&chain->lock, flags);
1088
1089 descr = chain->head;
1090 status = spider_net_get_descr_status(descr->hwdescr);
1091 for (i=0; i<chain->num_desc; i++) {
1092 if (status != SPIDER_NET_DESCR_CARDOWNED) break;
1093 descr = descr->next;
1094 status = spider_net_get_descr_status(descr->hwdescr);
1095 }
1096 chain->head = descr;
1097
1098 spin_unlock_irqrestore(&chain->lock, flags);
1099}
1100
1101static int spider_net_resync_tail_ptr(struct spider_net_card *card)
1102{
1103 struct spider_net_descr_chain *chain = &card->rx_chain;
1104 struct spider_net_descr *descr;
1105 int i, status;
1106
1107 /* Advance tail pointer past any empty and reaped descrs */
1108 descr = chain->tail;
1109 status = spider_net_get_descr_status(descr->hwdescr);
1110
1111 for (i=0; i<chain->num_desc; i++) {
1112 if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
1113 (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
1114 descr = descr->next;
1115 status = spider_net_get_descr_status(descr->hwdescr);
1116 }
1117 chain->tail = descr;
1118
1119 if ((i == chain->num_desc) || (i == 0))
1120 return 1;
1121 return 0;
1122}
1123
1124/**
1125 * spider_net_decode_one_descr - processes an RX descriptor
1126 * @card: card structure
1127 *
1128 * Returns 1 if a packet has been sent to the stack, otherwise 0.
1129 *
1130 * Processes an RX descriptor by iommu-unmapping the data buffer
1131 * and passing the packet up to the stack. This function is called
1132 * in softirq context, e.g. either bottom half from interrupt or
1133 * NAPI polling context.
1134 */
1135static int
1136spider_net_decode_one_descr(struct spider_net_card *card)
1137{
1138 struct net_device *dev = card->netdev;
1139 struct spider_net_descr_chain *chain = &card->rx_chain;
1140 struct spider_net_descr *descr = chain->tail;
1141 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1142 u32 hw_buf_addr;
1143 int status;
1144
1145 status = spider_net_get_descr_status(hwdescr);
1146
1147 /* Nothing in the descriptor, or ring must be empty */
1148 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1149 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1150 return 0;
1151
1152 /* descriptor definitively used -- move on tail */
1153 chain->tail = descr->next;
1154
1155 /* unmap descriptor */
1156 hw_buf_addr = hwdescr->buf_addr;
1157 hwdescr->buf_addr = 0xffffffff;
1158 pci_unmap_single(card->pdev, hw_buf_addr,
1159 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1160
1161 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1162 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1163 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1164 if (netif_msg_rx_err(card))
1165 dev_err(&dev->dev,
1166 "dropping RX descriptor with state %d\n", status);
1167 dev->stats.rx_dropped++;
1168 goto bad_desc;
1169 }
1170
1171 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1172 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1173 if (netif_msg_rx_err(card))
1174 dev_err(&card->netdev->dev,
1175 "RX descriptor with unknown state %d\n", status);
1176 card->spider_stats.rx_desc_unk_state++;
1177 goto bad_desc;
1178 }
1179
1180 /* The cases we'll throw away the packet immediately */
1181 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1182 if (netif_msg_rx_err(card))
1183 dev_err(&card->netdev->dev,
1184 "error in received descriptor found, "
1185 "data_status=x%08x, data_error=x%08x\n",
1186 hwdescr->data_status, hwdescr->data_error);
1187 goto bad_desc;
1188 }
1189
1190 if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
1191 dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
1192 hwdescr->dmac_cmd_status);
1193 pr_err("buf_addr=x%08x\n", hw_buf_addr);
1194 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1195 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1196 pr_err("result_size=x%08x\n", hwdescr->result_size);
1197 pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1198 pr_err("data_status=x%08x\n", hwdescr->data_status);
1199 pr_err("data_error=x%08x\n", hwdescr->data_error);
1200 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1201
1202 card->spider_stats.rx_desc_error++;
1203 goto bad_desc;
1204 }
1205
1206 /* Ok, we've got a packet in descr */
1207 spider_net_pass_skb_up(descr, card);
1208 descr->skb = NULL;
1209 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1210 return 1;
1211
1212bad_desc:
1213 if (netif_msg_rx_err(card))
1214 show_rx_chain(card);
1215 dev_kfree_skb_irq(descr->skb);
1216 descr->skb = NULL;
1217 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1218 return 0;
1219}
1220
1221/**
1222 * spider_net_poll - NAPI poll function called by the stack to return packets
1223 * @netdev: interface device structure
1224 * @budget: number of packets we can pass to the stack at most
1225 *
1226 * returns 0 if no more packets available to the driver/stack. Returns 1,
1227 * if the quota is exceeded, but the driver has still packets.
1228 *
1229 * spider_net_poll returns all packets from the rx descriptors to the stack
1230 * (using netif_receive_skb). If all/enough packets are up, the driver
1231 * reenables interrupts and returns 0. If not, 1 is returned.
1232 */
1233static int spider_net_poll(struct napi_struct *napi, int budget)
1234{
1235 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
1236 int packets_done = 0;
1237
1238 while (packets_done < budget) {
1239 if (!spider_net_decode_one_descr(card))
1240 break;
1241
1242 packets_done++;
1243 }
1244
1245 if ((packets_done == 0) && (card->num_rx_ints != 0)) {
1246 if (!spider_net_resync_tail_ptr(card))
1247 packets_done = budget;
1248 spider_net_resync_head_ptr(card);
1249 }
1250 card->num_rx_ints = 0;
1251
1252 spider_net_refill_rx_chain(card);
1253 spider_net_enable_rxdmac(card);
1254
1255 spider_net_cleanup_tx_ring(&card->tx_timer);
1256
1257 /* if all packets are in the stack, enable interrupts and return 0 */
1258 /* if not, return 1 */
1259 if (packets_done < budget) {
1260 napi_complete_done(napi, packets_done);
1261 spider_net_rx_irq_on(card);
1262 card->ignore_rx_ramfull = 0;
1263 }
1264
1265 return packets_done;
1266}
1267
1268/**
1269 * spider_net_set_mac - sets the MAC of an interface
1270 * @netdev: interface device structure
1271 * @ptr: pointer to new MAC address
1272 *
1273 * Returns 0 on success, <0 on failure. Currently, we don't support this
1274 * and will always return EOPNOTSUPP.
1275 */
1276static int
1277spider_net_set_mac(struct net_device *netdev, void *p)
1278{
1279 struct spider_net_card *card = netdev_priv(netdev);
1280 u32 macl, macu, regvalue;
1281 struct sockaddr *addr = p;
1282
1283 if (!is_valid_ether_addr(addr->sa_data))
1284 return -EADDRNOTAVAIL;
1285
1286 memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
1287
1288 /* switch off GMACTPE and GMACRPE */
1289 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1290 regvalue &= ~((1 << 5) | (1 << 6));
1291 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1292
1293 /* write mac */
1294 macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
1295 (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
1296 macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
1297 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1298 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1299
1300 /* switch GMACTPE and GMACRPE back on */
1301 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1302 regvalue |= ((1 << 5) | (1 << 6));
1303 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1304
1305 spider_net_set_promisc(card);
1306
1307 return 0;
1308}
1309
1310/**
1311 * spider_net_link_reset
1312 * @netdev: net device structure
1313 *
1314 * This is called when the PHY_LINK signal is asserted. For the blade this is
1315 * not connected so we should never get here.
1316 *
1317 */
1318static void
1319spider_net_link_reset(struct net_device *netdev)
1320{
1321
1322 struct spider_net_card *card = netdev_priv(netdev);
1323
1324 del_timer_sync(&card->aneg_timer);
1325
1326 /* clear interrupt, block further interrupts */
1327 spider_net_write_reg(card, SPIDER_NET_GMACST,
1328 spider_net_read_reg(card, SPIDER_NET_GMACST));
1329 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1330
1331 /* reset phy and setup aneg */
1332 card->aneg_count = 0;
1333 card->medium = BCM54XX_COPPER;
1334 spider_net_setup_aneg(card);
1335 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1336
1337}
1338
1339/**
1340 * spider_net_handle_error_irq - handles errors raised by an interrupt
1341 * @card: card structure
1342 * @status_reg: interrupt status register 0 (GHIINT0STS)
1343 *
1344 * spider_net_handle_error_irq treats or ignores all error conditions
1345 * found when an interrupt is presented
1346 */
1347static void
1348spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1349 u32 error_reg1, u32 error_reg2)
1350{
1351 u32 i;
1352 int show_error = 1;
1353
1354 /* check GHIINT0STS ************************************/
1355 if (status_reg)
1356 for (i = 0; i < 32; i++)
1357 if (status_reg & (1<<i))
1358 switch (i)
1359 {
1360 /* let error_reg1 and error_reg2 evaluation decide, what to do
1361 case SPIDER_NET_PHYINT:
1362 case SPIDER_NET_GMAC2INT:
1363 case SPIDER_NET_GMAC1INT:
1364 case SPIDER_NET_GFIFOINT:
1365 case SPIDER_NET_DMACINT:
1366 case SPIDER_NET_GSYSINT:
1367 break; */
1368
1369 case SPIDER_NET_GIPSINT:
1370 show_error = 0;
1371 break;
1372
1373 case SPIDER_NET_GPWOPCMPINT:
1374 /* PHY write operation completed */
1375 show_error = 0;
1376 break;
1377 case SPIDER_NET_GPROPCMPINT:
1378 /* PHY read operation completed */
1379 /* we don't use semaphores, as we poll for the completion
1380 * of the read operation in spider_net_read_phy. Should take
1381 * about 50 us */
1382 show_error = 0;
1383 break;
1384 case SPIDER_NET_GPWFFINT:
1385 /* PHY command queue full */
1386 if (netif_msg_intr(card))
1387 dev_err(&card->netdev->dev, "PHY write queue full\n");
1388 show_error = 0;
1389 break;
1390
1391 /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1392 /* case SPIDER_NET_GRMARPINT: not used. print a message */
1393 /* case SPIDER_NET_GRMMPINT: not used. print a message */
1394
1395 case SPIDER_NET_GDTDEN0INT:
1396 /* someone has set TX_DMA_EN to 0 */
1397 show_error = 0;
1398 break;
1399
1400 case SPIDER_NET_GDDDEN0INT:
1401 case SPIDER_NET_GDCDEN0INT:
1402 case SPIDER_NET_GDBDEN0INT:
1403 case SPIDER_NET_GDADEN0INT:
1404 /* someone has set RX_DMA_EN to 0 */
1405 show_error = 0;
1406 break;
1407
1408 /* RX interrupts */
1409 case SPIDER_NET_GDDFDCINT:
1410 case SPIDER_NET_GDCFDCINT:
1411 case SPIDER_NET_GDBFDCINT:
1412 case SPIDER_NET_GDAFDCINT:
1413 /* case SPIDER_NET_GDNMINT: not used. print a message */
1414 /* case SPIDER_NET_GCNMINT: not used. print a message */
1415 /* case SPIDER_NET_GBNMINT: not used. print a message */
1416 /* case SPIDER_NET_GANMINT: not used. print a message */
1417 /* case SPIDER_NET_GRFNMINT: not used. print a message */
1418 show_error = 0;
1419 break;
1420
1421 /* TX interrupts */
1422 case SPIDER_NET_GDTFDCINT:
1423 show_error = 0;
1424 break;
1425 case SPIDER_NET_GTTEDINT:
1426 show_error = 0;
1427 break;
1428 case SPIDER_NET_GDTDCEINT:
1429 /* chain end. If a descriptor should be sent, kick off
1430 * tx dma
1431 if (card->tx_chain.tail != card->tx_chain.head)
1432 spider_net_kick_tx_dma(card);
1433 */
1434 show_error = 0;
1435 break;
1436
1437 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1438 /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1439 }
1440
1441 /* check GHIINT1STS ************************************/
1442 if (error_reg1)
1443 for (i = 0; i < 32; i++)
1444 if (error_reg1 & (1<<i))
1445 switch (i)
1446 {
1447 case SPIDER_NET_GTMFLLINT:
1448 /* TX RAM full may happen on a usual case.
1449 * Logging is not needed. */
1450 show_error = 0;
1451 break;
1452 case SPIDER_NET_GRFDFLLINT:
1453 case SPIDER_NET_GRFCFLLINT:
1454 case SPIDER_NET_GRFBFLLINT:
1455 case SPIDER_NET_GRFAFLLINT:
1456 case SPIDER_NET_GRMFLLINT:
1457 /* Could happen when rx chain is full */
1458 if (card->ignore_rx_ramfull == 0) {
1459 card->ignore_rx_ramfull = 1;
1460 spider_net_resync_head_ptr(card);
1461 spider_net_refill_rx_chain(card);
1462 spider_net_enable_rxdmac(card);
1463 card->num_rx_ints ++;
1464 napi_schedule(&card->napi);
1465 }
1466 show_error = 0;
1467 break;
1468
1469 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1470 case SPIDER_NET_GDTINVDINT:
1471 /* allrighty. tx from previous descr ok */
1472 show_error = 0;
1473 break;
1474
1475 /* chain end */
1476 case SPIDER_NET_GDDDCEINT:
1477 case SPIDER_NET_GDCDCEINT:
1478 case SPIDER_NET_GDBDCEINT:
1479 case SPIDER_NET_GDADCEINT:
1480 spider_net_resync_head_ptr(card);
1481 spider_net_refill_rx_chain(card);
1482 spider_net_enable_rxdmac(card);
1483 card->num_rx_ints ++;
1484 napi_schedule(&card->napi);
1485 show_error = 0;
1486 break;
1487
1488 /* invalid descriptor */
1489 case SPIDER_NET_GDDINVDINT:
1490 case SPIDER_NET_GDCINVDINT:
1491 case SPIDER_NET_GDBINVDINT:
1492 case SPIDER_NET_GDAINVDINT:
1493 /* Could happen when rx chain is full */
1494 spider_net_resync_head_ptr(card);
1495 spider_net_refill_rx_chain(card);
1496 spider_net_enable_rxdmac(card);
1497 card->num_rx_ints ++;
1498 napi_schedule(&card->napi);
1499 show_error = 0;
1500 break;
1501
1502 /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1503 /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1504 /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1505 /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1506 /* case SPIDER_NET_GDARSERINT: problem, print a message */
1507 /* case SPIDER_NET_GDSERINT: problem, print a message */
1508 /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1509 /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1510 /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1511 /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1512 /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1513 default:
1514 show_error = 1;
1515 break;
1516 }
1517
1518 /* check GHIINT2STS ************************************/
1519 if (error_reg2)
1520 for (i = 0; i < 32; i++)
1521 if (error_reg2 & (1<<i))
1522 switch (i)
1523 {
1524 /* there is nothing we can (want to) do at this time. Log a
1525 * message, we can switch on and off the specific values later on
1526 case SPIDER_NET_GPROPERINT:
1527 case SPIDER_NET_GMCTCRSNGINT:
1528 case SPIDER_NET_GMCTLCOLINT:
1529 case SPIDER_NET_GMCTTMOTINT:
1530 case SPIDER_NET_GMCRCAERINT:
1531 case SPIDER_NET_GMCRCALERINT:
1532 case SPIDER_NET_GMCRALNERINT:
1533 case SPIDER_NET_GMCROVRINT:
1534 case SPIDER_NET_GMCRRNTINT:
1535 case SPIDER_NET_GMCRRXERINT:
1536 case SPIDER_NET_GTITCSERINT:
1537 case SPIDER_NET_GTIFMTERINT:
1538 case SPIDER_NET_GTIPKTRVKINT:
1539 case SPIDER_NET_GTISPINGINT:
1540 case SPIDER_NET_GTISADNGINT:
1541 case SPIDER_NET_GTISPDNGINT:
1542 case SPIDER_NET_GRIFMTERINT:
1543 case SPIDER_NET_GRIPKTRVKINT:
1544 case SPIDER_NET_GRISPINGINT:
1545 case SPIDER_NET_GRISADNGINT:
1546 case SPIDER_NET_GRISPDNGINT:
1547 break;
1548 */
1549 default:
1550 break;
1551 }
1552
1553 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1554 dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
1555 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1556 status_reg, error_reg1, error_reg2);
1557
1558 /* clear interrupt sources */
1559 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1560 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1561}
1562
1563/**
1564 * spider_net_interrupt - interrupt handler for spider_net
1565 * @irq: interrupt number
1566 * @ptr: pointer to net_device
1567 *
1568 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1569 * interrupt found raised by card.
1570 *
1571 * This is the interrupt handler, that turns off
1572 * interrupts for this device and makes the stack poll the driver
1573 */
1574static irqreturn_t
1575spider_net_interrupt(int irq, void *ptr)
1576{
1577 struct net_device *netdev = ptr;
1578 struct spider_net_card *card = netdev_priv(netdev);
1579 u32 status_reg, error_reg1, error_reg2;
1580
1581 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1582 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1583 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1584
1585 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
1586 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
1587 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
1588 return IRQ_NONE;
1589
1590 if (status_reg & SPIDER_NET_RXINT ) {
1591 spider_net_rx_irq_off(card);
1592 napi_schedule(&card->napi);
1593 card->num_rx_ints ++;
1594 }
1595 if (status_reg & SPIDER_NET_TXINT)
1596 napi_schedule(&card->napi);
1597
1598 if (status_reg & SPIDER_NET_LINKINT)
1599 spider_net_link_reset(netdev);
1600
1601 if (status_reg & SPIDER_NET_ERRINT )
1602 spider_net_handle_error_irq(card, status_reg,
1603 error_reg1, error_reg2);
1604
1605 /* clear interrupt sources */
1606 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1607
1608 return IRQ_HANDLED;
1609}
1610
1611#ifdef CONFIG_NET_POLL_CONTROLLER
1612/**
1613 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1614 * @netdev: interface device structure
1615 *
1616 * see Documentation/networking/netconsole.rst
1617 */
1618static void
1619spider_net_poll_controller(struct net_device *netdev)
1620{
1621 disable_irq(netdev->irq);
1622 spider_net_interrupt(netdev->irq, netdev);
1623 enable_irq(netdev->irq);
1624}
1625#endif /* CONFIG_NET_POLL_CONTROLLER */
1626
1627/**
1628 * spider_net_enable_interrupts - enable interrupts
1629 * @card: card structure
1630 *
1631 * spider_net_enable_interrupt enables several interrupts
1632 */
1633static void
1634spider_net_enable_interrupts(struct spider_net_card *card)
1635{
1636 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1637 SPIDER_NET_INT0_MASK_VALUE);
1638 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1639 SPIDER_NET_INT1_MASK_VALUE);
1640 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1641 SPIDER_NET_INT2_MASK_VALUE);
1642}
1643
1644/**
1645 * spider_net_disable_interrupts - disable interrupts
1646 * @card: card structure
1647 *
1648 * spider_net_disable_interrupts disables all the interrupts
1649 */
1650static void
1651spider_net_disable_interrupts(struct spider_net_card *card)
1652{
1653 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1654 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1655 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1656 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1657}
1658
1659/**
1660 * spider_net_init_card - initializes the card
1661 * @card: card structure
1662 *
1663 * spider_net_init_card initializes the card so that other registers can
1664 * be used
1665 */
1666static void
1667spider_net_init_card(struct spider_net_card *card)
1668{
1669 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1670 SPIDER_NET_CKRCTRL_STOP_VALUE);
1671
1672 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1673 SPIDER_NET_CKRCTRL_RUN_VALUE);
1674
1675 /* trigger ETOMOD signal */
1676 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1677 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1678
1679 spider_net_disable_interrupts(card);
1680}
1681
1682/**
1683 * spider_net_enable_card - enables the card by setting all kinds of regs
1684 * @card: card structure
1685 *
1686 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1687 */
1688static void
1689spider_net_enable_card(struct spider_net_card *card)
1690{
1691 int i;
1692 /* the following array consists of (register),(value) pairs
1693 * that are set in this function. A register of 0 ends the list */
1694 u32 regs[][2] = {
1695 { SPIDER_NET_GRESUMINTNUM, 0 },
1696 { SPIDER_NET_GREINTNUM, 0 },
1697
1698 /* set interrupt frame number registers */
1699 /* clear the single DMA engine registers first */
1700 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1701 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1702 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1703 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1704 /* then set, what we really need */
1705 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1706
1707 /* timer counter registers and stuff */
1708 { SPIDER_NET_GFREECNNUM, 0 },
1709 { SPIDER_NET_GONETIMENUM, 0 },
1710 { SPIDER_NET_GTOUTFRMNUM, 0 },
1711
1712 /* RX mode setting */
1713 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1714 /* TX mode setting */
1715 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1716 /* IPSEC mode setting */
1717 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1718
1719 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1720
1721 { SPIDER_NET_GMRWOLCTRL, 0 },
1722 { SPIDER_NET_GTESTMD, 0x10000000 },
1723 { SPIDER_NET_GTTQMSK, 0x00400040 },
1724
1725 { SPIDER_NET_GMACINTEN, 0 },
1726
1727 /* flow control stuff */
1728 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1729 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1730
1731 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1732 { 0, 0}
1733 };
1734
1735 i = 0;
1736 while (regs[i][0]) {
1737 spider_net_write_reg(card, regs[i][0], regs[i][1]);
1738 i++;
1739 }
1740
1741 /* clear unicast filter table entries 1 to 14 */
1742 for (i = 1; i <= 14; i++) {
1743 spider_net_write_reg(card,
1744 SPIDER_NET_GMRUAFILnR + i * 8,
1745 0x00080000);
1746 spider_net_write_reg(card,
1747 SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1748 0x00000000);
1749 }
1750
1751 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1752
1753 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1754
1755 /* set chain tail address for RX chains and
1756 * enable DMA */
1757 spider_net_enable_rxchtails(card);
1758 spider_net_enable_rxdmac(card);
1759
1760 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1761
1762 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1763 SPIDER_NET_LENLMT_VALUE);
1764 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1765 SPIDER_NET_OPMODE_VALUE);
1766
1767 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1768 SPIDER_NET_GDTBSTA);
1769}
1770
1771/**
1772 * spider_net_download_firmware - loads firmware into the adapter
1773 * @card: card structure
1774 * @firmware_ptr: pointer to firmware data
1775 *
1776 * spider_net_download_firmware loads the firmware data into the
1777 * adapter. It assumes the length etc. to be allright.
1778 */
1779static int
1780spider_net_download_firmware(struct spider_net_card *card,
1781 const void *firmware_ptr)
1782{
1783 int sequencer, i;
1784 const u32 *fw_ptr = firmware_ptr;
1785
1786 /* stop sequencers */
1787 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1788 SPIDER_NET_STOP_SEQ_VALUE);
1789
1790 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1791 sequencer++) {
1792 spider_net_write_reg(card,
1793 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1794 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1795 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1796 sequencer * 8, *fw_ptr);
1797 fw_ptr++;
1798 }
1799 }
1800
1801 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1802 return -EIO;
1803
1804 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1805 SPIDER_NET_RUN_SEQ_VALUE);
1806
1807 return 0;
1808}
1809
1810/**
1811 * spider_net_init_firmware - reads in firmware parts
1812 * @card: card structure
1813 *
1814 * Returns 0 on success, <0 on failure
1815 *
1816 * spider_net_init_firmware opens the sequencer firmware and does some basic
1817 * checks. This function opens and releases the firmware structure. A call
1818 * to download the firmware is performed before the release.
1819 *
1820 * Firmware format
1821 * ===============
1822 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1823 * the program for each sequencer. Use the command
1824 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1825 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1826 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1827 *
1828 * to generate spider_fw.bin, if you have sequencer programs with something
1829 * like the following contents for each sequencer:
1830 * <ONE LINE COMMENT>
1831 * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1832 * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1833 * ...
1834 * <1024th 4-BYTES-WORD FOR SEQUENCER>
1835 */
1836static int
1837spider_net_init_firmware(struct spider_net_card *card)
1838{
1839 struct firmware *firmware = NULL;
1840 struct device_node *dn;
1841 const u8 *fw_prop = NULL;
1842 int err = -ENOENT;
1843 int fw_size;
1844
1845 if (request_firmware((const struct firmware **)&firmware,
1846 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1847 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1848 netif_msg_probe(card) ) {
1849 dev_err(&card->netdev->dev,
1850 "Incorrect size of spidernet firmware in " \
1851 "filesystem. Looking in host firmware...\n");
1852 goto try_host_fw;
1853 }
1854 err = spider_net_download_firmware(card, firmware->data);
1855
1856 release_firmware(firmware);
1857 if (err)
1858 goto try_host_fw;
1859
1860 goto done;
1861 }
1862
1863try_host_fw:
1864 dn = pci_device_to_OF_node(card->pdev);
1865 if (!dn)
1866 goto out_err;
1867
1868 fw_prop = of_get_property(dn, "firmware", &fw_size);
1869 if (!fw_prop)
1870 goto out_err;
1871
1872 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1873 netif_msg_probe(card) ) {
1874 dev_err(&card->netdev->dev,
1875 "Incorrect size of spidernet firmware in host firmware\n");
1876 goto done;
1877 }
1878
1879 err = spider_net_download_firmware(card, fw_prop);
1880
1881done:
1882 return err;
1883out_err:
1884 if (netif_msg_probe(card))
1885 dev_err(&card->netdev->dev,
1886 "Couldn't find spidernet firmware in filesystem " \
1887 "or host firmware\n");
1888 return err;
1889}
1890
1891/**
1892 * spider_net_open - called upon ifonfig up
1893 * @netdev: interface device structure
1894 *
1895 * returns 0 on success, <0 on failure
1896 *
1897 * spider_net_open allocates all the descriptors and memory needed for
1898 * operation, sets up multicast list and enables interrupts
1899 */
1900int
1901spider_net_open(struct net_device *netdev)
1902{
1903 struct spider_net_card *card = netdev_priv(netdev);
1904 int result;
1905
1906 result = spider_net_init_firmware(card);
1907 if (result)
1908 goto init_firmware_failed;
1909
1910 /* start probing with copper */
1911 card->aneg_count = 0;
1912 card->medium = BCM54XX_COPPER;
1913 spider_net_setup_aneg(card);
1914 if (card->phy.def->phy_id)
1915 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1916
1917 result = spider_net_init_chain(card, &card->tx_chain);
1918 if (result)
1919 goto alloc_tx_failed;
1920 card->low_watermark = NULL;
1921
1922 result = spider_net_init_chain(card, &card->rx_chain);
1923 if (result)
1924 goto alloc_rx_failed;
1925
1926 /* Allocate rx skbs */
1927 result = spider_net_alloc_rx_skbs(card);
1928 if (result)
1929 goto alloc_skbs_failed;
1930
1931 spider_net_set_multi(netdev);
1932
1933 /* further enhancement: setup hw vlan, if needed */
1934
1935 result = -EBUSY;
1936 if (request_irq(netdev->irq, spider_net_interrupt,
1937 IRQF_SHARED, netdev->name, netdev))
1938 goto register_int_failed;
1939
1940 spider_net_enable_card(card);
1941
1942 netif_start_queue(netdev);
1943 netif_carrier_on(netdev);
1944 napi_enable(&card->napi);
1945
1946 spider_net_enable_interrupts(card);
1947
1948 return 0;
1949
1950register_int_failed:
1951 spider_net_free_rx_chain_contents(card);
1952alloc_skbs_failed:
1953 spider_net_free_chain(card, &card->rx_chain);
1954alloc_rx_failed:
1955 spider_net_free_chain(card, &card->tx_chain);
1956alloc_tx_failed:
1957 del_timer_sync(&card->aneg_timer);
1958init_firmware_failed:
1959 return result;
1960}
1961
1962/**
1963 * spider_net_link_phy
1964 * @data: used for pointer to card structure
1965 *
1966 */
1967static void spider_net_link_phy(struct timer_list *t)
1968{
1969 struct spider_net_card *card = from_timer(card, t, aneg_timer);
1970 struct mii_phy *phy = &card->phy;
1971
1972 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1973 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1974
1975 pr_debug("%s: link is down trying to bring it up\n",
1976 card->netdev->name);
1977
1978 switch (card->medium) {
1979 case BCM54XX_COPPER:
1980 /* enable fiber with autonegotiation first */
1981 if (phy->def->ops->enable_fiber)
1982 phy->def->ops->enable_fiber(phy, 1);
1983 card->medium = BCM54XX_FIBER;
1984 break;
1985
1986 case BCM54XX_FIBER:
1987 /* fiber didn't come up, try to disable fiber autoneg */
1988 if (phy->def->ops->enable_fiber)
1989 phy->def->ops->enable_fiber(phy, 0);
1990 card->medium = BCM54XX_UNKNOWN;
1991 break;
1992
1993 case BCM54XX_UNKNOWN:
1994 /* copper, fiber with and without failed,
1995 * retry from beginning */
1996 spider_net_setup_aneg(card);
1997 card->medium = BCM54XX_COPPER;
1998 break;
1999 }
2000
2001 card->aneg_count = 0;
2002 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2003 return;
2004 }
2005
2006 /* link still not up, try again later */
2007 if (!(phy->def->ops->poll_link(phy))) {
2008 card->aneg_count++;
2009 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2010 return;
2011 }
2012
2013 /* link came up, get abilities */
2014 phy->def->ops->read_link(phy);
2015
2016 spider_net_write_reg(card, SPIDER_NET_GMACST,
2017 spider_net_read_reg(card, SPIDER_NET_GMACST));
2018 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
2019
2020 if (phy->speed == 1000)
2021 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
2022 else
2023 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
2024
2025 card->aneg_count = 0;
2026
2027 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
2028 card->netdev->name, phy->speed,
2029 phy->duplex == 1 ? "Full" : "Half",
2030 phy->autoneg == 1 ? "" : "no ");
2031}
2032
2033/**
2034 * spider_net_setup_phy - setup PHY
2035 * @card: card structure
2036 *
2037 * returns 0 on success, <0 on failure
2038 *
2039 * spider_net_setup_phy is used as part of spider_net_probe.
2040 **/
2041static int
2042spider_net_setup_phy(struct spider_net_card *card)
2043{
2044 struct mii_phy *phy = &card->phy;
2045
2046 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2047 SPIDER_NET_DMASEL_VALUE);
2048 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2049 SPIDER_NET_PHY_CTRL_VALUE);
2050
2051 phy->dev = card->netdev;
2052 phy->mdio_read = spider_net_read_phy;
2053 phy->mdio_write = spider_net_write_phy;
2054
2055 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2056 unsigned short id;
2057 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2058 if (id != 0x0000 && id != 0xffff) {
2059 if (!sungem_phy_probe(phy, phy->mii_id)) {
2060 pr_info("Found %s.\n", phy->def->name);
2061 break;
2062 }
2063 }
2064 }
2065
2066 return 0;
2067}
2068
2069/**
2070 * spider_net_workaround_rxramfull - work around firmware bug
2071 * @card: card structure
2072 *
2073 * no return value
2074 **/
2075static void
2076spider_net_workaround_rxramfull(struct spider_net_card *card)
2077{
2078 int i, sequencer = 0;
2079
2080 /* cancel reset */
2081 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2082 SPIDER_NET_CKRCTRL_RUN_VALUE);
2083
2084 /* empty sequencer data */
2085 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
2086 sequencer++) {
2087 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
2088 sequencer * 8, 0x0);
2089 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
2090 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
2091 sequencer * 8, 0x0);
2092 }
2093 }
2094
2095 /* set sequencer operation */
2096 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
2097
2098 /* reset */
2099 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2100 SPIDER_NET_CKRCTRL_STOP_VALUE);
2101}
2102
2103/**
2104 * spider_net_stop - called upon ifconfig down
2105 * @netdev: interface device structure
2106 *
2107 * always returns 0
2108 */
2109int
2110spider_net_stop(struct net_device *netdev)
2111{
2112 struct spider_net_card *card = netdev_priv(netdev);
2113
2114 napi_disable(&card->napi);
2115 netif_carrier_off(netdev);
2116 netif_stop_queue(netdev);
2117 del_timer_sync(&card->tx_timer);
2118 del_timer_sync(&card->aneg_timer);
2119
2120 spider_net_disable_interrupts(card);
2121
2122 free_irq(netdev->irq, netdev);
2123
2124 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
2125 SPIDER_NET_DMA_TX_FEND_VALUE);
2126
2127 /* turn off DMA, force end */
2128 spider_net_disable_rxdmac(card);
2129
2130 /* release chains */
2131 spider_net_release_tx_chain(card, 1);
2132 spider_net_free_rx_chain_contents(card);
2133
2134 spider_net_free_chain(card, &card->tx_chain);
2135 spider_net_free_chain(card, &card->rx_chain);
2136
2137 return 0;
2138}
2139
2140/**
2141 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
2142 * function (to be called not under interrupt status)
2143 * @data: data, is interface device structure
2144 *
2145 * called as task when tx hangs, resets interface (if interface is up)
2146 */
2147static void
2148spider_net_tx_timeout_task(struct work_struct *work)
2149{
2150 struct spider_net_card *card =
2151 container_of(work, struct spider_net_card, tx_timeout_task);
2152 struct net_device *netdev = card->netdev;
2153
2154 if (!(netdev->flags & IFF_UP))
2155 goto out;
2156
2157 netif_device_detach(netdev);
2158 spider_net_stop(netdev);
2159
2160 spider_net_workaround_rxramfull(card);
2161 spider_net_init_card(card);
2162
2163 if (spider_net_setup_phy(card))
2164 goto out;
2165
2166 spider_net_open(netdev);
2167 spider_net_kick_tx_dma(card);
2168 netif_device_attach(netdev);
2169
2170out:
2171 atomic_dec(&card->tx_timeout_task_counter);
2172}
2173
2174/**
2175 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
2176 * @netdev: interface device structure
2177 *
2178 * called, if tx hangs. Schedules a task that resets the interface
2179 */
2180static void
2181spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2182{
2183 struct spider_net_card *card;
2184
2185 card = netdev_priv(netdev);
2186 atomic_inc(&card->tx_timeout_task_counter);
2187 if (netdev->flags & IFF_UP)
2188 schedule_work(&card->tx_timeout_task);
2189 else
2190 atomic_dec(&card->tx_timeout_task_counter);
2191 card->spider_stats.tx_timeouts++;
2192}
2193
2194static const struct net_device_ops spider_net_ops = {
2195 .ndo_open = spider_net_open,
2196 .ndo_stop = spider_net_stop,
2197 .ndo_start_xmit = spider_net_xmit,
2198 .ndo_set_rx_mode = spider_net_set_multi,
2199 .ndo_set_mac_address = spider_net_set_mac,
2200 .ndo_do_ioctl = spider_net_do_ioctl,
2201 .ndo_tx_timeout = spider_net_tx_timeout,
2202 .ndo_validate_addr = eth_validate_addr,
2203 /* HW VLAN */
2204#ifdef CONFIG_NET_POLL_CONTROLLER
2205 /* poll controller */
2206 .ndo_poll_controller = spider_net_poll_controller,
2207#endif /* CONFIG_NET_POLL_CONTROLLER */
2208};
2209
2210/**
2211 * spider_net_setup_netdev_ops - initialization of net_device operations
2212 * @netdev: net_device structure
2213 *
2214 * fills out function pointers in the net_device structure
2215 */
2216static void
2217spider_net_setup_netdev_ops(struct net_device *netdev)
2218{
2219 netdev->netdev_ops = &spider_net_ops;
2220 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2221 /* ethtool ops */
2222 netdev->ethtool_ops = &spider_net_ethtool_ops;
2223}
2224
2225/**
2226 * spider_net_setup_netdev - initialization of net_device
2227 * @card: card structure
2228 *
2229 * Returns 0 on success or <0 on failure
2230 *
2231 * spider_net_setup_netdev initializes the net_device structure
2232 **/
2233static int
2234spider_net_setup_netdev(struct spider_net_card *card)
2235{
2236 int result;
2237 struct net_device *netdev = card->netdev;
2238 struct device_node *dn;
2239 struct sockaddr addr;
2240 const u8 *mac;
2241
2242 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2243
2244 pci_set_drvdata(card->pdev, netdev);
2245
2246 timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
2247 netdev->irq = card->pdev->irq;
2248
2249 card->aneg_count = 0;
2250 timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
2251
2252 netif_napi_add(netdev, &card->napi,
2253 spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
2254
2255 spider_net_setup_netdev_ops(netdev);
2256
2257 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2258 if (SPIDER_NET_RX_CSUM_DEFAULT)
2259 netdev->features |= NETIF_F_RXCSUM;
2260 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
2261 /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2262 * NETIF_F_HW_VLAN_CTAG_FILTER */
2263
2264 /* MTU range: 64 - 2294 */
2265 netdev->min_mtu = SPIDER_NET_MIN_MTU;
2266 netdev->max_mtu = SPIDER_NET_MAX_MTU;
2267
2268 netdev->irq = card->pdev->irq;
2269 card->num_rx_ints = 0;
2270 card->ignore_rx_ramfull = 0;
2271
2272 dn = pci_device_to_OF_node(card->pdev);
2273 if (!dn)
2274 return -EIO;
2275
2276 mac = of_get_property(dn, "local-mac-address", NULL);
2277 if (!mac)
2278 return -EIO;
2279 memcpy(addr.sa_data, mac, ETH_ALEN);
2280
2281 result = spider_net_set_mac(netdev, &addr);
2282 if ((result) && (netif_msg_probe(card)))
2283 dev_err(&card->netdev->dev,
2284 "Failed to set MAC address: %i\n", result);
2285
2286 result = register_netdev(netdev);
2287 if (result) {
2288 if (netif_msg_probe(card))
2289 dev_err(&card->netdev->dev,
2290 "Couldn't register net_device: %i\n", result);
2291 return result;
2292 }
2293
2294 if (netif_msg_probe(card))
2295 pr_info("Initialized device %s.\n", netdev->name);
2296
2297 return 0;
2298}
2299
2300/**
2301 * spider_net_alloc_card - allocates net_device and card structure
2302 *
2303 * returns the card structure or NULL in case of errors
2304 *
2305 * the card and net_device structures are linked to each other
2306 */
2307static struct spider_net_card *
2308spider_net_alloc_card(void)
2309{
2310 struct net_device *netdev;
2311 struct spider_net_card *card;
2312
2313 netdev = alloc_etherdev(struct_size(card, darray,
2314 tx_descriptors + rx_descriptors));
2315 if (!netdev)
2316 return NULL;
2317
2318 card = netdev_priv(netdev);
2319 card->netdev = netdev;
2320 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2321 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2322 init_waitqueue_head(&card->waitq);
2323 atomic_set(&card->tx_timeout_task_counter, 0);
2324
2325 card->rx_chain.num_desc = rx_descriptors;
2326 card->rx_chain.ring = card->darray;
2327 card->tx_chain.num_desc = tx_descriptors;
2328 card->tx_chain.ring = card->darray + rx_descriptors;
2329
2330 return card;
2331}
2332
2333/**
2334 * spider_net_undo_pci_setup - releases PCI ressources
2335 * @card: card structure
2336 *
2337 * spider_net_undo_pci_setup releases the mapped regions
2338 */
2339static void
2340spider_net_undo_pci_setup(struct spider_net_card *card)
2341{
2342 iounmap(card->regs);
2343 pci_release_regions(card->pdev);
2344}
2345
2346/**
2347 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2348 * @pdev: PCI device
2349 *
2350 * Returns the card structure or NULL if any errors occur
2351 *
2352 * spider_net_setup_pci_dev initializes pdev and together with the
2353 * functions called in spider_net_open configures the device so that
2354 * data can be transferred over it
2355 * The net_device structure is attached to the card structure, if the
2356 * function returns without error.
2357 **/
2358static struct spider_net_card *
2359spider_net_setup_pci_dev(struct pci_dev *pdev)
2360{
2361 struct spider_net_card *card;
2362 unsigned long mmio_start, mmio_len;
2363
2364 if (pci_enable_device(pdev)) {
2365 dev_err(&pdev->dev, "Couldn't enable PCI device\n");
2366 return NULL;
2367 }
2368
2369 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2370 dev_err(&pdev->dev,
2371 "Couldn't find proper PCI device base address.\n");
2372 goto out_disable_dev;
2373 }
2374
2375 if (pci_request_regions(pdev, spider_net_driver_name)) {
2376 dev_err(&pdev->dev,
2377 "Couldn't obtain PCI resources, aborting.\n");
2378 goto out_disable_dev;
2379 }
2380
2381 pci_set_master(pdev);
2382
2383 card = spider_net_alloc_card();
2384 if (!card) {
2385 dev_err(&pdev->dev,
2386 "Couldn't allocate net_device structure, aborting.\n");
2387 goto out_release_regions;
2388 }
2389 card->pdev = pdev;
2390
2391 /* fetch base address and length of first resource */
2392 mmio_start = pci_resource_start(pdev, 0);
2393 mmio_len = pci_resource_len(pdev, 0);
2394
2395 card->netdev->mem_start = mmio_start;
2396 card->netdev->mem_end = mmio_start + mmio_len;
2397 card->regs = ioremap(mmio_start, mmio_len);
2398
2399 if (!card->regs) {
2400 dev_err(&pdev->dev,
2401 "Couldn't obtain PCI resources, aborting.\n");
2402 goto out_release_regions;
2403 }
2404
2405 return card;
2406
2407out_release_regions:
2408 pci_release_regions(pdev);
2409out_disable_dev:
2410 pci_disable_device(pdev);
2411 return NULL;
2412}
2413
2414/**
2415 * spider_net_probe - initialization of a device
2416 * @pdev: PCI device
2417 * @ent: entry in the device id list
2418 *
2419 * Returns 0 on success, <0 on failure
2420 *
2421 * spider_net_probe initializes pdev and registers a net_device
2422 * structure for it. After that, the device can be ifconfig'ed up
2423 **/
2424static int
2425spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2426{
2427 int err = -EIO;
2428 struct spider_net_card *card;
2429
2430 card = spider_net_setup_pci_dev(pdev);
2431 if (!card)
2432 goto out;
2433
2434 spider_net_workaround_rxramfull(card);
2435 spider_net_init_card(card);
2436
2437 err = spider_net_setup_phy(card);
2438 if (err)
2439 goto out_undo_pci;
2440
2441 err = spider_net_setup_netdev(card);
2442 if (err)
2443 goto out_undo_pci;
2444
2445 return 0;
2446
2447out_undo_pci:
2448 spider_net_undo_pci_setup(card);
2449 free_netdev(card->netdev);
2450out:
2451 return err;
2452}
2453
2454/**
2455 * spider_net_remove - removal of a device
2456 * @pdev: PCI device
2457 *
2458 * Returns 0 on success, <0 on failure
2459 *
2460 * spider_net_remove is called to remove the device and unregisters the
2461 * net_device
2462 **/
2463static void
2464spider_net_remove(struct pci_dev *pdev)
2465{
2466 struct net_device *netdev;
2467 struct spider_net_card *card;
2468
2469 netdev = pci_get_drvdata(pdev);
2470 card = netdev_priv(netdev);
2471
2472 wait_event(card->waitq,
2473 atomic_read(&card->tx_timeout_task_counter) == 0);
2474
2475 unregister_netdev(netdev);
2476
2477 /* switch off card */
2478 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2479 SPIDER_NET_CKRCTRL_STOP_VALUE);
2480 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2481 SPIDER_NET_CKRCTRL_RUN_VALUE);
2482
2483 spider_net_undo_pci_setup(card);
2484 free_netdev(netdev);
2485}
2486
2487static struct pci_driver spider_net_driver = {
2488 .name = spider_net_driver_name,
2489 .id_table = spider_net_pci_tbl,
2490 .probe = spider_net_probe,
2491 .remove = spider_net_remove
2492};
2493
2494/**
2495 * spider_net_init - init function when the driver is loaded
2496 *
2497 * spider_net_init registers the device driver
2498 */
2499static int __init spider_net_init(void)
2500{
2501 printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2502
2503 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2504 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2505 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2506 }
2507 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2508 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2509 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2510 }
2511 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2512 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2513 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2514 }
2515 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2516 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2517 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2518 }
2519
2520 return pci_register_driver(&spider_net_driver);
2521}
2522
2523/**
2524 * spider_net_cleanup - exit function when driver is unloaded
2525 *
2526 * spider_net_cleanup unregisters the device driver
2527 */
2528static void __exit spider_net_cleanup(void)
2529{
2530 pci_unregister_driver(&spider_net_driver);
2531}
2532
2533module_init(spider_net_init);
2534module_exit(spider_net_cleanup);