Loading...
1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9 * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10 *
11 * Distribute under GPL.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/types.h>
20#include <linux/netdevice.h>
21#include <linux/ethtool.h>
22#include <linux/mii.h>
23#include <linux/if_ether.h>
24#include <linux/if_vlan.h>
25#include <linux/etherdevice.h>
26#include <linux/pci.h>
27#include <linux/delay.h>
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/dma-mapping.h>
31#include <linux/ssb/ssb.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34
35#include <linux/uaccess.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38
39
40#include "b44.h"
41
42#define DRV_MODULE_NAME "b44"
43#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44
45#define B44_DEF_MSG_ENABLE \
46 (NETIF_MSG_DRV | \
47 NETIF_MSG_PROBE | \
48 NETIF_MSG_LINK | \
49 NETIF_MSG_TIMER | \
50 NETIF_MSG_IFDOWN | \
51 NETIF_MSG_IFUP | \
52 NETIF_MSG_RX_ERR | \
53 NETIF_MSG_TX_ERR)
54
55/* length of time before we decide the hardware is borked,
56 * and dev->tx_timeout() should be called to fix the problem
57 */
58#define B44_TX_TIMEOUT (5 * HZ)
59
60/* hardware minimum and maximum for a single frame's data payload */
61#define B44_MIN_MTU ETH_ZLEN
62#define B44_MAX_MTU ETH_DATA_LEN
63
64#define B44_RX_RING_SIZE 512
65#define B44_DEF_RX_RING_PENDING 200
66#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
67 B44_RX_RING_SIZE)
68#define B44_TX_RING_SIZE 512
69#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
70#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
71 B44_TX_RING_SIZE)
72
73#define TX_RING_GAP(BP) \
74 (B44_TX_RING_SIZE - (BP)->tx_pending)
75#define TX_BUFFS_AVAIL(BP) \
76 (((BP)->tx_cons <= (BP)->tx_prod) ? \
77 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
78 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
79#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
80
81#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
82#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
83
84/* minimum number of free TX descriptors required to wake up TX process */
85#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
86
87/* b44 internal pattern match filter info */
88#define B44_PATTERN_BASE 0x400
89#define B44_PATTERN_SIZE 0x80
90#define B44_PMASK_BASE 0x600
91#define B44_PMASK_SIZE 0x10
92#define B44_MAX_PATTERNS 16
93#define B44_ETHIPV6UDP_HLEN 62
94#define B44_ETHIPV4UDP_HLEN 42
95
96MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97MODULE_DESCRIPTION(DRV_DESCRIPTION);
98MODULE_LICENSE("GPL");
99
100static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101module_param(b44_debug, int, 0);
102MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
104
105#ifdef CONFIG_B44_PCI
106static const struct pci_device_id b44_pci_tbl[] = {
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 { 0 } /* terminate list with empty entry */
111};
112MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
114static struct pci_driver b44_pci_driver = {
115 .name = DRV_MODULE_NAME,
116 .id_table = b44_pci_tbl,
117};
118#endif /* CONFIG_B44_PCI */
119
120static const struct ssb_device_id b44_ssb_tbl[] = {
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 {},
123};
124MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
126static void b44_halt(struct b44 *);
127static void b44_init_rings(struct b44 *);
128
129#define B44_FULL_RESET 1
130#define B44_FULL_RESET_SKIP_PHY 2
131#define B44_PARTIAL_RESET 3
132#define B44_CHIP_RESET_FULL 4
133#define B44_CHIP_RESET_PARTIAL 5
134
135static void b44_init_hw(struct b44 *, int);
136
137static int dma_desc_sync_size;
138static int instance;
139
140static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141#define _B44(x...) # x,
142B44_STAT_REG_DECLARE
143#undef _B44
144};
145
146static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
150{
151 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 dma_desc_sync_size, dir);
153}
154
155static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 dma_addr_t dma_base,
157 unsigned long offset,
158 enum dma_data_direction dir)
159{
160 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 dma_desc_sync_size, dir);
162}
163
164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165{
166 return ssb_read32(bp->sdev, reg);
167}
168
169static inline void bw32(const struct b44 *bp,
170 unsigned long reg, unsigned long val)
171{
172 ssb_write32(bp->sdev, reg, val);
173}
174
175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 u32 bit, unsigned long timeout, const int clear)
177{
178 unsigned long i;
179
180 for (i = 0; i < timeout; i++) {
181 u32 val = br32(bp, reg);
182
183 if (clear && !(val & bit))
184 break;
185 if (!clear && (val & bit))
186 break;
187 udelay(10);
188 }
189 if (i == timeout) {
190 if (net_ratelimit())
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192 bit, reg, clear ? "clear" : "set");
193
194 return -ENODEV;
195 }
196 return 0;
197}
198
199static inline void __b44_cam_write(struct b44 *bp,
200 const unsigned char *data, int index)
201{
202 u32 val;
203
204 val = ((u32) data[2]) << 24;
205 val |= ((u32) data[3]) << 16;
206 val |= ((u32) data[4]) << 8;
207 val |= ((u32) data[5]) << 0;
208 bw32(bp, B44_CAM_DATA_LO, val);
209 val = (CAM_DATA_HI_VALID |
210 (((u32) data[0]) << 8) |
211 (((u32) data[1]) << 0));
212 bw32(bp, B44_CAM_DATA_HI, val);
213 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
214 (index << CAM_CTRL_INDEX_SHIFT)));
215 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
216}
217
218static inline void __b44_disable_ints(struct b44 *bp)
219{
220 bw32(bp, B44_IMASK, 0);
221}
222
223static void b44_disable_ints(struct b44 *bp)
224{
225 __b44_disable_ints(bp);
226
227 /* Flush posted writes. */
228 br32(bp, B44_IMASK);
229}
230
231static void b44_enable_ints(struct b44 *bp)
232{
233 bw32(bp, B44_IMASK, bp->imask);
234}
235
236static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
237{
238 int err;
239
240 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
241 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
242 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
243 (phy_addr << MDIO_DATA_PMD_SHIFT) |
244 (reg << MDIO_DATA_RA_SHIFT) |
245 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
246 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
247 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
248
249 return err;
250}
251
252static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
253{
254 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
255 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
256 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
257 (phy_addr << MDIO_DATA_PMD_SHIFT) |
258 (reg << MDIO_DATA_RA_SHIFT) |
259 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
260 (val & MDIO_DATA_DATA)));
261 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
262}
263
264static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
265{
266 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
267 return 0;
268
269 return __b44_readphy(bp, bp->phy_addr, reg, val);
270}
271
272static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
273{
274 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
275 return 0;
276
277 return __b44_writephy(bp, bp->phy_addr, reg, val);
278}
279
280/* miilib interface */
281static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
282{
283 u32 val;
284 struct b44 *bp = netdev_priv(dev);
285 int rc = __b44_readphy(bp, phy_id, location, &val);
286 if (rc)
287 return 0xffffffff;
288 return val;
289}
290
291static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
292 int val)
293{
294 struct b44 *bp = netdev_priv(dev);
295 __b44_writephy(bp, phy_id, location, val);
296}
297
298static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
299{
300 u32 val;
301 struct b44 *bp = bus->priv;
302 int rc = __b44_readphy(bp, phy_id, location, &val);
303 if (rc)
304 return 0xffffffff;
305 return val;
306}
307
308static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
309 u16 val)
310{
311 struct b44 *bp = bus->priv;
312 return __b44_writephy(bp, phy_id, location, val);
313}
314
315static int b44_phy_reset(struct b44 *bp)
316{
317 u32 val;
318 int err;
319
320 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
321 return 0;
322 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
323 if (err)
324 return err;
325 udelay(100);
326 err = b44_readphy(bp, MII_BMCR, &val);
327 if (!err) {
328 if (val & BMCR_RESET) {
329 netdev_err(bp->dev, "PHY Reset would not complete\n");
330 err = -ENODEV;
331 }
332 }
333
334 return err;
335}
336
337static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
338{
339 u32 val;
340
341 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
342 bp->flags |= pause_flags;
343
344 val = br32(bp, B44_RXCONFIG);
345 if (pause_flags & B44_FLAG_RX_PAUSE)
346 val |= RXCONFIG_FLOW;
347 else
348 val &= ~RXCONFIG_FLOW;
349 bw32(bp, B44_RXCONFIG, val);
350
351 val = br32(bp, B44_MAC_FLOW);
352 if (pause_flags & B44_FLAG_TX_PAUSE)
353 val |= (MAC_FLOW_PAUSE_ENAB |
354 (0xc0 & MAC_FLOW_RX_HI_WATER));
355 else
356 val &= ~MAC_FLOW_PAUSE_ENAB;
357 bw32(bp, B44_MAC_FLOW, val);
358}
359
360static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
361{
362 u32 pause_enab = 0;
363
364 /* The driver supports only rx pause by default because
365 the b44 mac tx pause mechanism generates excessive
366 pause frames.
367 Use ethtool to turn on b44 tx pause if necessary.
368 */
369 if ((local & ADVERTISE_PAUSE_CAP) &&
370 (local & ADVERTISE_PAUSE_ASYM)){
371 if ((remote & LPA_PAUSE_ASYM) &&
372 !(remote & LPA_PAUSE_CAP))
373 pause_enab |= B44_FLAG_RX_PAUSE;
374 }
375
376 __b44_set_flow_ctrl(bp, pause_enab);
377}
378
379#ifdef CONFIG_BCM47XX
380#include <linux/bcm47xx_nvram.h>
381static void b44_wap54g10_workaround(struct b44 *bp)
382{
383 char buf[20];
384 u32 val;
385 int err;
386
387 /*
388 * workaround for bad hardware design in Linksys WAP54G v1.0
389 * see https://dev.openwrt.org/ticket/146
390 * check and reset bit "isolate"
391 */
392 if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
393 return;
394 if (simple_strtoul(buf, NULL, 0) == 2) {
395 err = __b44_readphy(bp, 0, MII_BMCR, &val);
396 if (err)
397 goto error;
398 if (!(val & BMCR_ISOLATE))
399 return;
400 val &= ~BMCR_ISOLATE;
401 err = __b44_writephy(bp, 0, MII_BMCR, val);
402 if (err)
403 goto error;
404 }
405 return;
406error:
407 pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
408}
409#else
410static inline void b44_wap54g10_workaround(struct b44 *bp)
411{
412}
413#endif
414
415static int b44_setup_phy(struct b44 *bp)
416{
417 u32 val;
418 int err;
419
420 b44_wap54g10_workaround(bp);
421
422 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
423 return 0;
424 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
425 goto out;
426 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
427 val & MII_ALEDCTRL_ALLMSK)) != 0)
428 goto out;
429 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
430 goto out;
431 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
432 val | MII_TLEDCTRL_ENABLE)) != 0)
433 goto out;
434
435 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
436 u32 adv = ADVERTISE_CSMA;
437
438 if (bp->flags & B44_FLAG_ADV_10HALF)
439 adv |= ADVERTISE_10HALF;
440 if (bp->flags & B44_FLAG_ADV_10FULL)
441 adv |= ADVERTISE_10FULL;
442 if (bp->flags & B44_FLAG_ADV_100HALF)
443 adv |= ADVERTISE_100HALF;
444 if (bp->flags & B44_FLAG_ADV_100FULL)
445 adv |= ADVERTISE_100FULL;
446
447 if (bp->flags & B44_FLAG_PAUSE_AUTO)
448 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
449
450 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
451 goto out;
452 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
453 BMCR_ANRESTART))) != 0)
454 goto out;
455 } else {
456 u32 bmcr;
457
458 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
459 goto out;
460 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
461 if (bp->flags & B44_FLAG_100_BASE_T)
462 bmcr |= BMCR_SPEED100;
463 if (bp->flags & B44_FLAG_FULL_DUPLEX)
464 bmcr |= BMCR_FULLDPLX;
465 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
466 goto out;
467
468 /* Since we will not be negotiating there is no safe way
469 * to determine if the link partner supports flow control
470 * or not. So just disable it completely in this case.
471 */
472 b44_set_flow_ctrl(bp, 0, 0);
473 }
474
475out:
476 return err;
477}
478
479static void b44_stats_update(struct b44 *bp)
480{
481 unsigned long reg;
482 u64 *val;
483
484 val = &bp->hw_stats.tx_good_octets;
485 u64_stats_update_begin(&bp->hw_stats.syncp);
486
487 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
488 *val++ += br32(bp, reg);
489 }
490
491 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
492 *val++ += br32(bp, reg);
493 }
494
495 u64_stats_update_end(&bp->hw_stats.syncp);
496}
497
498static void b44_link_report(struct b44 *bp)
499{
500 if (!netif_carrier_ok(bp->dev)) {
501 netdev_info(bp->dev, "Link is down\n");
502 } else {
503 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
504 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
505 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
506
507 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
508 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
509 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
510 }
511}
512
513static void b44_check_phy(struct b44 *bp)
514{
515 u32 bmsr, aux;
516
517 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
518 bp->flags |= B44_FLAG_100_BASE_T;
519 if (!netif_carrier_ok(bp->dev)) {
520 u32 val = br32(bp, B44_TX_CTRL);
521 if (bp->flags & B44_FLAG_FULL_DUPLEX)
522 val |= TX_CTRL_DUPLEX;
523 else
524 val &= ~TX_CTRL_DUPLEX;
525 bw32(bp, B44_TX_CTRL, val);
526 netif_carrier_on(bp->dev);
527 b44_link_report(bp);
528 }
529 return;
530 }
531
532 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534 (bmsr != 0xffff)) {
535 if (aux & MII_AUXCTRL_SPEED)
536 bp->flags |= B44_FLAG_100_BASE_T;
537 else
538 bp->flags &= ~B44_FLAG_100_BASE_T;
539 if (aux & MII_AUXCTRL_DUPLEX)
540 bp->flags |= B44_FLAG_FULL_DUPLEX;
541 else
542 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543
544 if (!netif_carrier_ok(bp->dev) &&
545 (bmsr & BMSR_LSTATUS)) {
546 u32 val = br32(bp, B44_TX_CTRL);
547 u32 local_adv, remote_adv;
548
549 if (bp->flags & B44_FLAG_FULL_DUPLEX)
550 val |= TX_CTRL_DUPLEX;
551 else
552 val &= ~TX_CTRL_DUPLEX;
553 bw32(bp, B44_TX_CTRL, val);
554
555 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557 !b44_readphy(bp, MII_LPA, &remote_adv))
558 b44_set_flow_ctrl(bp, local_adv, remote_adv);
559
560 /* Link now up */
561 netif_carrier_on(bp->dev);
562 b44_link_report(bp);
563 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564 /* Link now down */
565 netif_carrier_off(bp->dev);
566 b44_link_report(bp);
567 }
568
569 if (bmsr & BMSR_RFAULT)
570 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
571 if (bmsr & BMSR_JCD)
572 netdev_warn(bp->dev, "Jabber detected in PHY\n");
573 }
574}
575
576static void b44_timer(struct timer_list *t)
577{
578 struct b44 *bp = from_timer(bp, t, timer);
579
580 spin_lock_irq(&bp->lock);
581
582 b44_check_phy(bp);
583
584 b44_stats_update(bp);
585
586 spin_unlock_irq(&bp->lock);
587
588 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
589}
590
591static void b44_tx(struct b44 *bp)
592{
593 u32 cur, cons;
594 unsigned bytes_compl = 0, pkts_compl = 0;
595
596 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597 cur /= sizeof(struct dma_desc);
598
599 /* XXX needs updating when NETIF_F_SG is supported */
600 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601 struct ring_info *rp = &bp->tx_buffers[cons];
602 struct sk_buff *skb = rp->skb;
603
604 BUG_ON(skb == NULL);
605
606 dma_unmap_single(bp->sdev->dma_dev,
607 rp->mapping,
608 skb->len,
609 DMA_TO_DEVICE);
610 rp->skb = NULL;
611
612 bytes_compl += skb->len;
613 pkts_compl++;
614
615 dev_consume_skb_irq(skb);
616 }
617
618 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
619 bp->tx_cons = cons;
620 if (netif_queue_stopped(bp->dev) &&
621 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
622 netif_wake_queue(bp->dev);
623
624 bw32(bp, B44_GPTIMER, 0);
625}
626
627/* Works like this. This chip writes a 'struct rx_header" 30 bytes
628 * before the DMA address you give it. So we allocate 30 more bytes
629 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
630 * point the chip at 30 bytes past where the rx_header will go.
631 */
632static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
633{
634 struct dma_desc *dp;
635 struct ring_info *src_map, *map;
636 struct rx_header *rh;
637 struct sk_buff *skb;
638 dma_addr_t mapping;
639 int dest_idx;
640 u32 ctrl;
641
642 src_map = NULL;
643 if (src_idx >= 0)
644 src_map = &bp->rx_buffers[src_idx];
645 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
646 map = &bp->rx_buffers[dest_idx];
647 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
648 if (skb == NULL)
649 return -ENOMEM;
650
651 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
652 RX_PKT_BUF_SZ,
653 DMA_FROM_DEVICE);
654
655 /* Hardware bug work-around, the chip is unable to do PCI DMA
656 to/from anything above 1GB :-( */
657 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
658 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659 /* Sigh... */
660 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661 dma_unmap_single(bp->sdev->dma_dev, mapping,
662 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb);
664 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
665 if (skb == NULL)
666 return -ENOMEM;
667 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
668 RX_PKT_BUF_SZ,
669 DMA_FROM_DEVICE);
670 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
671 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674 dev_kfree_skb_any(skb);
675 return -ENOMEM;
676 }
677 bp->force_copybreak = 1;
678 }
679
680 rh = (struct rx_header *) skb->data;
681
682 rh->len = 0;
683 rh->flags = 0;
684
685 map->skb = skb;
686 map->mapping = mapping;
687
688 if (src_map != NULL)
689 src_map->skb = NULL;
690
691 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
692 if (dest_idx == (B44_RX_RING_SIZE - 1))
693 ctrl |= DESC_CTRL_EOT;
694
695 dp = &bp->rx_ring[dest_idx];
696 dp->ctrl = cpu_to_le32(ctrl);
697 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
698
699 if (bp->flags & B44_FLAG_RX_RING_HACK)
700 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
701 dest_idx * sizeof(*dp),
702 DMA_BIDIRECTIONAL);
703
704 return RX_PKT_BUF_SZ;
705}
706
707static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
708{
709 struct dma_desc *src_desc, *dest_desc;
710 struct ring_info *src_map, *dest_map;
711 struct rx_header *rh;
712 int dest_idx;
713 __le32 ctrl;
714
715 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
716 dest_desc = &bp->rx_ring[dest_idx];
717 dest_map = &bp->rx_buffers[dest_idx];
718 src_desc = &bp->rx_ring[src_idx];
719 src_map = &bp->rx_buffers[src_idx];
720
721 dest_map->skb = src_map->skb;
722 rh = (struct rx_header *) src_map->skb->data;
723 rh->len = 0;
724 rh->flags = 0;
725 dest_map->mapping = src_map->mapping;
726
727 if (bp->flags & B44_FLAG_RX_RING_HACK)
728 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
729 src_idx * sizeof(*src_desc),
730 DMA_BIDIRECTIONAL);
731
732 ctrl = src_desc->ctrl;
733 if (dest_idx == (B44_RX_RING_SIZE - 1))
734 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
735 else
736 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
737
738 dest_desc->ctrl = ctrl;
739 dest_desc->addr = src_desc->addr;
740
741 src_map->skb = NULL;
742
743 if (bp->flags & B44_FLAG_RX_RING_HACK)
744 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
745 dest_idx * sizeof(*dest_desc),
746 DMA_BIDIRECTIONAL);
747
748 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
749 RX_PKT_BUF_SZ,
750 DMA_FROM_DEVICE);
751}
752
753static int b44_rx(struct b44 *bp, int budget)
754{
755 int received;
756 u32 cons, prod;
757
758 received = 0;
759 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
760 prod /= sizeof(struct dma_desc);
761 cons = bp->rx_cons;
762
763 while (cons != prod && budget > 0) {
764 struct ring_info *rp = &bp->rx_buffers[cons];
765 struct sk_buff *skb = rp->skb;
766 dma_addr_t map = rp->mapping;
767 struct rx_header *rh;
768 u16 len;
769
770 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
771 RX_PKT_BUF_SZ,
772 DMA_FROM_DEVICE);
773 rh = (struct rx_header *) skb->data;
774 len = le16_to_cpu(rh->len);
775 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
776 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
777 drop_it:
778 b44_recycle_rx(bp, cons, bp->rx_prod);
779 drop_it_no_recycle:
780 bp->dev->stats.rx_dropped++;
781 goto next_pkt;
782 }
783
784 if (len == 0) {
785 int i = 0;
786
787 do {
788 udelay(2);
789 barrier();
790 len = le16_to_cpu(rh->len);
791 } while (len == 0 && i++ < 5);
792 if (len == 0)
793 goto drop_it;
794 }
795
796 /* Omit CRC. */
797 len -= 4;
798
799 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
800 int skb_size;
801 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 if (skb_size < 0)
803 goto drop_it;
804 dma_unmap_single(bp->sdev->dma_dev, map,
805 skb_size, DMA_FROM_DEVICE);
806 /* Leave out rx_header */
807 skb_put(skb, len + RX_PKT_OFFSET);
808 skb_pull(skb, RX_PKT_OFFSET);
809 } else {
810 struct sk_buff *copy_skb;
811
812 b44_recycle_rx(bp, cons, bp->rx_prod);
813 copy_skb = napi_alloc_skb(&bp->napi, len);
814 if (copy_skb == NULL)
815 goto drop_it_no_recycle;
816
817 skb_put(copy_skb, len);
818 /* DMA sync done above, copy just the actual packet */
819 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
820 copy_skb->data, len);
821 skb = copy_skb;
822 }
823 skb_checksum_none_assert(skb);
824 skb->protocol = eth_type_trans(skb, bp->dev);
825 netif_receive_skb(skb);
826 received++;
827 budget--;
828 next_pkt:
829 bp->rx_prod = (bp->rx_prod + 1) &
830 (B44_RX_RING_SIZE - 1);
831 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832 }
833
834 bp->rx_cons = cons;
835 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836
837 return received;
838}
839
840static int b44_poll(struct napi_struct *napi, int budget)
841{
842 struct b44 *bp = container_of(napi, struct b44, napi);
843 int work_done;
844 unsigned long flags;
845
846 spin_lock_irqsave(&bp->lock, flags);
847
848 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849 /* spin_lock(&bp->tx_lock); */
850 b44_tx(bp);
851 /* spin_unlock(&bp->tx_lock); */
852 }
853 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
854 bp->istat &= ~ISTAT_RFO;
855 b44_disable_ints(bp);
856 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
857 b44_init_rings(bp);
858 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
859 netif_wake_queue(bp->dev);
860 }
861
862 spin_unlock_irqrestore(&bp->lock, flags);
863
864 work_done = 0;
865 if (bp->istat & ISTAT_RX)
866 work_done += b44_rx(bp, budget);
867
868 if (bp->istat & ISTAT_ERRORS) {
869 spin_lock_irqsave(&bp->lock, flags);
870 b44_halt(bp);
871 b44_init_rings(bp);
872 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873 netif_wake_queue(bp->dev);
874 spin_unlock_irqrestore(&bp->lock, flags);
875 work_done = 0;
876 }
877
878 if (work_done < budget) {
879 napi_complete_done(napi, work_done);
880 b44_enable_ints(bp);
881 }
882
883 return work_done;
884}
885
886static irqreturn_t b44_interrupt(int irq, void *dev_id)
887{
888 struct net_device *dev = dev_id;
889 struct b44 *bp = netdev_priv(dev);
890 u32 istat, imask;
891 int handled = 0;
892
893 spin_lock(&bp->lock);
894
895 istat = br32(bp, B44_ISTAT);
896 imask = br32(bp, B44_IMASK);
897
898 /* The interrupt mask register controls which interrupt bits
899 * will actually raise an interrupt to the CPU when set by hw/firmware,
900 * but doesn't mask off the bits.
901 */
902 istat &= imask;
903 if (istat) {
904 handled = 1;
905
906 if (unlikely(!netif_running(dev))) {
907 netdev_info(dev, "late interrupt\n");
908 goto irq_ack;
909 }
910
911 if (napi_schedule_prep(&bp->napi)) {
912 /* NOTE: These writes are posted by the readback of
913 * the ISTAT register below.
914 */
915 bp->istat = istat;
916 __b44_disable_ints(bp);
917 __napi_schedule(&bp->napi);
918 }
919
920irq_ack:
921 bw32(bp, B44_ISTAT, istat);
922 br32(bp, B44_ISTAT);
923 }
924 spin_unlock(&bp->lock);
925 return IRQ_RETVAL(handled);
926}
927
928static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
929{
930 struct b44 *bp = netdev_priv(dev);
931
932 netdev_err(dev, "transmit timed out, resetting\n");
933
934 spin_lock_irq(&bp->lock);
935
936 b44_halt(bp);
937 b44_init_rings(bp);
938 b44_init_hw(bp, B44_FULL_RESET);
939
940 spin_unlock_irq(&bp->lock);
941
942 b44_enable_ints(bp);
943
944 netif_wake_queue(dev);
945}
946
947static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948{
949 struct b44 *bp = netdev_priv(dev);
950 int rc = NETDEV_TX_OK;
951 dma_addr_t mapping;
952 u32 len, entry, ctrl;
953 unsigned long flags;
954
955 len = skb->len;
956 spin_lock_irqsave(&bp->lock, flags);
957
958 /* This is a hard error, log it. */
959 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960 netif_stop_queue(dev);
961 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
962 goto err_out;
963 }
964
965 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
966 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
967 struct sk_buff *bounce_skb;
968
969 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
970 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
971 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
972 DMA_TO_DEVICE);
973
974 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
975 if (!bounce_skb)
976 goto err_out;
977
978 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
979 len, DMA_TO_DEVICE);
980 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
981 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
982 dma_unmap_single(bp->sdev->dma_dev, mapping,
983 len, DMA_TO_DEVICE);
984 dev_kfree_skb_any(bounce_skb);
985 goto err_out;
986 }
987
988 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
989 dev_consume_skb_any(skb);
990 skb = bounce_skb;
991 }
992
993 entry = bp->tx_prod;
994 bp->tx_buffers[entry].skb = skb;
995 bp->tx_buffers[entry].mapping = mapping;
996
997 ctrl = (len & DESC_CTRL_LEN);
998 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
999 if (entry == (B44_TX_RING_SIZE - 1))
1000 ctrl |= DESC_CTRL_EOT;
1001
1002 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1004
1005 if (bp->flags & B44_FLAG_TX_RING_HACK)
1006 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1007 entry * sizeof(bp->tx_ring[0]),
1008 DMA_TO_DEVICE);
1009
1010 entry = NEXT_TX(entry);
1011
1012 bp->tx_prod = entry;
1013
1014 wmb();
1015
1016 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019 if (bp->flags & B44_FLAG_REORDER_BUG)
1020 br32(bp, B44_DMATX_PTR);
1021
1022 netdev_sent_queue(dev, skb->len);
1023
1024 if (TX_BUFFS_AVAIL(bp) < 1)
1025 netif_stop_queue(dev);
1026
1027out_unlock:
1028 spin_unlock_irqrestore(&bp->lock, flags);
1029
1030 return rc;
1031
1032err_out:
1033 rc = NETDEV_TX_BUSY;
1034 goto out_unlock;
1035}
1036
1037static int b44_change_mtu(struct net_device *dev, int new_mtu)
1038{
1039 struct b44 *bp = netdev_priv(dev);
1040
1041 if (!netif_running(dev)) {
1042 /* We'll just catch it later when the
1043 * device is up'd.
1044 */
1045 dev->mtu = new_mtu;
1046 return 0;
1047 }
1048
1049 spin_lock_irq(&bp->lock);
1050 b44_halt(bp);
1051 dev->mtu = new_mtu;
1052 b44_init_rings(bp);
1053 b44_init_hw(bp, B44_FULL_RESET);
1054 spin_unlock_irq(&bp->lock);
1055
1056 b44_enable_ints(bp);
1057
1058 return 0;
1059}
1060
1061/* Free up pending packets in all rx/tx rings.
1062 *
1063 * The chip has been shut down and the driver detached from
1064 * the networking, so no interrupts or new tx packets will
1065 * end up in the driver. bp->lock is not held and we are not
1066 * in an interrupt context and thus may sleep.
1067 */
1068static void b44_free_rings(struct b44 *bp)
1069{
1070 struct ring_info *rp;
1071 int i;
1072
1073 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1074 rp = &bp->rx_buffers[i];
1075
1076 if (rp->skb == NULL)
1077 continue;
1078 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1079 DMA_FROM_DEVICE);
1080 dev_kfree_skb_any(rp->skb);
1081 rp->skb = NULL;
1082 }
1083
1084 /* XXX needs changes once NETIF_F_SG is set... */
1085 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1086 rp = &bp->tx_buffers[i];
1087
1088 if (rp->skb == NULL)
1089 continue;
1090 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1091 DMA_TO_DEVICE);
1092 dev_kfree_skb_any(rp->skb);
1093 rp->skb = NULL;
1094 }
1095}
1096
1097/* Initialize tx/rx rings for packet processing.
1098 *
1099 * The chip has been shut down and the driver detached from
1100 * the networking, so no interrupts or new tx packets will
1101 * end up in the driver.
1102 */
1103static void b44_init_rings(struct b44 *bp)
1104{
1105 int i;
1106
1107 b44_free_rings(bp);
1108
1109 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1110 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1111
1112 if (bp->flags & B44_FLAG_RX_RING_HACK)
1113 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1114 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1115
1116 if (bp->flags & B44_FLAG_TX_RING_HACK)
1117 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1118 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1119
1120 for (i = 0; i < bp->rx_pending; i++) {
1121 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1122 break;
1123 }
1124}
1125
1126/*
1127 * Must not be invoked with interrupt sources disabled and
1128 * the hardware shutdown down.
1129 */
1130static void b44_free_consistent(struct b44 *bp)
1131{
1132 kfree(bp->rx_buffers);
1133 bp->rx_buffers = NULL;
1134 kfree(bp->tx_buffers);
1135 bp->tx_buffers = NULL;
1136 if (bp->rx_ring) {
1137 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1138 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1139 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1140 kfree(bp->rx_ring);
1141 } else
1142 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1143 bp->rx_ring, bp->rx_ring_dma);
1144 bp->rx_ring = NULL;
1145 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1146 }
1147 if (bp->tx_ring) {
1148 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1149 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1150 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1151 kfree(bp->tx_ring);
1152 } else
1153 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1154 bp->tx_ring, bp->tx_ring_dma);
1155 bp->tx_ring = NULL;
1156 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1157 }
1158}
1159
1160/*
1161 * Must not be invoked with interrupt sources disabled and
1162 * the hardware shutdown down. Can sleep.
1163 */
1164static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165{
1166 int size;
1167
1168 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1169 bp->rx_buffers = kzalloc(size, gfp);
1170 if (!bp->rx_buffers)
1171 goto out_err;
1172
1173 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1174 bp->tx_buffers = kzalloc(size, gfp);
1175 if (!bp->tx_buffers)
1176 goto out_err;
1177
1178 size = DMA_TABLE_BYTES;
1179 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1180 &bp->rx_ring_dma, gfp);
1181 if (!bp->rx_ring) {
1182 /* Allocation may have failed due to dma_alloc_coherent
1183 insisting on use of GFP_DMA, which is more restrictive
1184 than necessary... */
1185 struct dma_desc *rx_ring;
1186 dma_addr_t rx_ring_dma;
1187
1188 rx_ring = kzalloc(size, gfp);
1189 if (!rx_ring)
1190 goto out_err;
1191
1192 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1193 DMA_TABLE_BYTES,
1194 DMA_BIDIRECTIONAL);
1195
1196 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1197 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1198 kfree(rx_ring);
1199 goto out_err;
1200 }
1201
1202 bp->rx_ring = rx_ring;
1203 bp->rx_ring_dma = rx_ring_dma;
1204 bp->flags |= B44_FLAG_RX_RING_HACK;
1205 }
1206
1207 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1208 &bp->tx_ring_dma, gfp);
1209 if (!bp->tx_ring) {
1210 /* Allocation may have failed due to ssb_dma_alloc_consistent
1211 insisting on use of GFP_DMA, which is more restrictive
1212 than necessary... */
1213 struct dma_desc *tx_ring;
1214 dma_addr_t tx_ring_dma;
1215
1216 tx_ring = kzalloc(size, gfp);
1217 if (!tx_ring)
1218 goto out_err;
1219
1220 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1221 DMA_TABLE_BYTES,
1222 DMA_TO_DEVICE);
1223
1224 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1225 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1226 kfree(tx_ring);
1227 goto out_err;
1228 }
1229
1230 bp->tx_ring = tx_ring;
1231 bp->tx_ring_dma = tx_ring_dma;
1232 bp->flags |= B44_FLAG_TX_RING_HACK;
1233 }
1234
1235 return 0;
1236
1237out_err:
1238 b44_free_consistent(bp);
1239 return -ENOMEM;
1240}
1241
1242/* bp->lock is held. */
1243static void b44_clear_stats(struct b44 *bp)
1244{
1245 unsigned long reg;
1246
1247 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1248 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1249 br32(bp, reg);
1250 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1251 br32(bp, reg);
1252}
1253
1254/* bp->lock is held. */
1255static void b44_chip_reset(struct b44 *bp, int reset_kind)
1256{
1257 struct ssb_device *sdev = bp->sdev;
1258 bool was_enabled;
1259
1260 was_enabled = ssb_device_is_enabled(bp->sdev);
1261
1262 ssb_device_enable(bp->sdev, 0);
1263 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1264
1265 if (was_enabled) {
1266 bw32(bp, B44_RCV_LAZY, 0);
1267 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1268 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1269 bw32(bp, B44_DMATX_CTRL, 0);
1270 bp->tx_prod = bp->tx_cons = 0;
1271 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1272 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1273 100, 0);
1274 }
1275 bw32(bp, B44_DMARX_CTRL, 0);
1276 bp->rx_prod = bp->rx_cons = 0;
1277 }
1278
1279 b44_clear_stats(bp);
1280
1281 /*
1282 * Don't enable PHY if we are doing a partial reset
1283 * we are probably going to power down
1284 */
1285 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1286 return;
1287
1288 switch (sdev->bus->bustype) {
1289 case SSB_BUSTYPE_SSB:
1290 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1291 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1292 B44_MDC_RATIO)
1293 & MDIO_CTRL_MAXF_MASK)));
1294 break;
1295 case SSB_BUSTYPE_PCI:
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297 (0x0d & MDIO_CTRL_MAXF_MASK)));
1298 break;
1299 case SSB_BUSTYPE_PCMCIA:
1300 case SSB_BUSTYPE_SDIO:
1301 WARN_ON(1); /* A device with this bus does not exist. */
1302 break;
1303 }
1304
1305 br32(bp, B44_MDIO_CTRL);
1306
1307 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309 br32(bp, B44_ENET_CTRL);
1310 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1311 } else {
1312 u32 val = br32(bp, B44_DEVCTRL);
1313
1314 if (val & DEVCTRL_EPR) {
1315 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316 br32(bp, B44_DEVCTRL);
1317 udelay(100);
1318 }
1319 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1320 }
1321}
1322
1323/* bp->lock is held. */
1324static void b44_halt(struct b44 *bp)
1325{
1326 b44_disable_ints(bp);
1327 /* reset PHY */
1328 b44_phy_reset(bp);
1329 /* power down PHY */
1330 netdev_info(bp->dev, "powering down PHY\n");
1331 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1332 /* now reset the chip, but without enabling the MAC&PHY
1333 * part of it. This has to be done _after_ we shut down the PHY */
1334 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1335 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1336 else
1337 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1338}
1339
1340/* bp->lock is held. */
1341static void __b44_set_mac_addr(struct b44 *bp)
1342{
1343 bw32(bp, B44_CAM_CTRL, 0);
1344 if (!(bp->dev->flags & IFF_PROMISC)) {
1345 u32 val;
1346
1347 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1348 val = br32(bp, B44_CAM_CTRL);
1349 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350 }
1351}
1352
1353static int b44_set_mac_addr(struct net_device *dev, void *p)
1354{
1355 struct b44 *bp = netdev_priv(dev);
1356 struct sockaddr *addr = p;
1357 u32 val;
1358
1359 if (netif_running(dev))
1360 return -EBUSY;
1361
1362 if (!is_valid_ether_addr(addr->sa_data))
1363 return -EINVAL;
1364
1365 eth_hw_addr_set(dev, addr->sa_data);
1366
1367 spin_lock_irq(&bp->lock);
1368
1369 val = br32(bp, B44_RXCONFIG);
1370 if (!(val & RXCONFIG_CAM_ABSENT))
1371 __b44_set_mac_addr(bp);
1372
1373 spin_unlock_irq(&bp->lock);
1374
1375 return 0;
1376}
1377
1378/* Called at device open time to get the chip ready for
1379 * packet processing. Invoked with bp->lock held.
1380 */
1381static void __b44_set_rx_mode(struct net_device *);
1382static void b44_init_hw(struct b44 *bp, int reset_kind)
1383{
1384 u32 val;
1385
1386 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1387 if (reset_kind == B44_FULL_RESET) {
1388 b44_phy_reset(bp);
1389 b44_setup_phy(bp);
1390 }
1391
1392 /* Enable CRC32, set proper LED modes and power on PHY */
1393 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1394 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1395
1396 /* This sets the MAC address too. */
1397 __b44_set_rx_mode(bp->dev);
1398
1399 /* MTU + eth header + possible VLAN tag + struct rx_header */
1400 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1401 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1402
1403 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1404 if (reset_kind == B44_PARTIAL_RESET) {
1405 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1407 } else {
1408 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1409 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1410 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1412 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1413
1414 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1415 bp->rx_prod = bp->rx_pending;
1416
1417 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1418 }
1419
1420 val = br32(bp, B44_ENET_CTRL);
1421 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1422
1423 netdev_reset_queue(bp->dev);
1424}
1425
1426static int b44_open(struct net_device *dev)
1427{
1428 struct b44 *bp = netdev_priv(dev);
1429 int err;
1430
1431 err = b44_alloc_consistent(bp, GFP_KERNEL);
1432 if (err)
1433 goto out;
1434
1435 napi_enable(&bp->napi);
1436
1437 b44_init_rings(bp);
1438 b44_init_hw(bp, B44_FULL_RESET);
1439
1440 b44_check_phy(bp);
1441
1442 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1443 if (unlikely(err < 0)) {
1444 napi_disable(&bp->napi);
1445 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1446 b44_free_rings(bp);
1447 b44_free_consistent(bp);
1448 goto out;
1449 }
1450
1451 timer_setup(&bp->timer, b44_timer, 0);
1452 bp->timer.expires = jiffies + HZ;
1453 add_timer(&bp->timer);
1454
1455 b44_enable_ints(bp);
1456
1457 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1458 phy_start(dev->phydev);
1459
1460 netif_start_queue(dev);
1461out:
1462 return err;
1463}
1464
1465#ifdef CONFIG_NET_POLL_CONTROLLER
1466/*
1467 * Polling receive - used by netconsole and other diagnostic tools
1468 * to allow network i/o with interrupts disabled.
1469 */
1470static void b44_poll_controller(struct net_device *dev)
1471{
1472 disable_irq(dev->irq);
1473 b44_interrupt(dev->irq, dev);
1474 enable_irq(dev->irq);
1475}
1476#endif
1477
1478static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1479{
1480 u32 i;
1481 u32 *pattern = (u32 *) pp;
1482
1483 for (i = 0; i < bytes; i += sizeof(u32)) {
1484 bw32(bp, B44_FILT_ADDR, table_offset + i);
1485 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1486 }
1487}
1488
1489static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
1490 int offset)
1491{
1492 int magicsync = 6;
1493 int k, j, len = offset;
1494 int ethaddr_bytes = ETH_ALEN;
1495
1496 memset(ppattern + offset, 0xff, magicsync);
1497 for (j = 0; j < magicsync; j++) {
1498 pmask[len >> 3] |= BIT(len & 7);
1499 len++;
1500 }
1501
1502 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504 ethaddr_bytes = ETH_ALEN;
1505 else
1506 ethaddr_bytes = B44_PATTERN_SIZE - len;
1507 if (ethaddr_bytes <=0)
1508 break;
1509 for (k = 0; k< ethaddr_bytes; k++) {
1510 ppattern[offset + magicsync +
1511 (j * ETH_ALEN) + k] = macaddr[k];
1512 pmask[len >> 3] |= BIT(len & 7);
1513 len++;
1514 }
1515 }
1516 return len - 1;
1517}
1518
1519/* Setup magic packet patterns in the b44 WOL
1520 * pattern matching filter.
1521 */
1522static void b44_setup_pseudo_magicp(struct b44 *bp)
1523{
1524
1525 u32 val;
1526 int plen0, plen1, plen2;
1527 u8 *pwol_pattern;
1528 u8 pwol_mask[B44_PMASK_SIZE];
1529
1530 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1531 if (!pwol_pattern)
1532 return;
1533
1534 /* Ipv4 magic packet pattern - pattern 0.*/
1535 memset(pwol_mask, 0, B44_PMASK_SIZE);
1536 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537 B44_ETHIPV4UDP_HLEN);
1538
1539 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1541
1542 /* Raw ethernet II magic packet pattern - pattern 1 */
1543 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544 memset(pwol_mask, 0, B44_PMASK_SIZE);
1545 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546 ETH_HLEN);
1547
1548 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1550 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551 B44_PMASK_BASE + B44_PMASK_SIZE);
1552
1553 /* Ipv6 magic packet pattern - pattern 2 */
1554 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1555 memset(pwol_mask, 0, B44_PMASK_SIZE);
1556 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557 B44_ETHIPV6UDP_HLEN);
1558
1559 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1560 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1561 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1562 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1563
1564 kfree(pwol_pattern);
1565
1566 /* set these pattern's lengths: one less than each real length */
1567 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1568 bw32(bp, B44_WKUP_LEN, val);
1569
1570 /* enable wakeup pattern matching */
1571 val = br32(bp, B44_DEVCTRL);
1572 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1573
1574}
1575
1576#ifdef CONFIG_B44_PCI
1577static void b44_setup_wol_pci(struct b44 *bp)
1578{
1579 u16 val;
1580
1581 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1582 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1583 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1584 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1585 }
1586}
1587#else
1588static inline void b44_setup_wol_pci(struct b44 *bp) { }
1589#endif /* CONFIG_B44_PCI */
1590
1591static void b44_setup_wol(struct b44 *bp)
1592{
1593 u32 val;
1594
1595 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1596
1597 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1598
1599 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1600
1601 val = bp->dev->dev_addr[2] << 24 |
1602 bp->dev->dev_addr[3] << 16 |
1603 bp->dev->dev_addr[4] << 8 |
1604 bp->dev->dev_addr[5];
1605 bw32(bp, B44_ADDR_LO, val);
1606
1607 val = bp->dev->dev_addr[0] << 8 |
1608 bp->dev->dev_addr[1];
1609 bw32(bp, B44_ADDR_HI, val);
1610
1611 val = br32(bp, B44_DEVCTRL);
1612 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1613
1614 } else {
1615 b44_setup_pseudo_magicp(bp);
1616 }
1617 b44_setup_wol_pci(bp);
1618}
1619
1620static int b44_close(struct net_device *dev)
1621{
1622 struct b44 *bp = netdev_priv(dev);
1623
1624 netif_stop_queue(dev);
1625
1626 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1627 phy_stop(dev->phydev);
1628
1629 napi_disable(&bp->napi);
1630
1631 del_timer_sync(&bp->timer);
1632
1633 spin_lock_irq(&bp->lock);
1634
1635 b44_halt(bp);
1636 b44_free_rings(bp);
1637 netif_carrier_off(dev);
1638
1639 spin_unlock_irq(&bp->lock);
1640
1641 free_irq(dev->irq, dev);
1642
1643 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1644 b44_init_hw(bp, B44_PARTIAL_RESET);
1645 b44_setup_wol(bp);
1646 }
1647
1648 b44_free_consistent(bp);
1649
1650 return 0;
1651}
1652
1653static void b44_get_stats64(struct net_device *dev,
1654 struct rtnl_link_stats64 *nstat)
1655{
1656 struct b44 *bp = netdev_priv(dev);
1657 struct b44_hw_stats *hwstat = &bp->hw_stats;
1658 unsigned int start;
1659
1660 do {
1661 start = u64_stats_fetch_begin(&hwstat->syncp);
1662
1663 /* Convert HW stats into rtnl_link_stats64 stats. */
1664 nstat->rx_packets = hwstat->rx_pkts;
1665 nstat->tx_packets = hwstat->tx_pkts;
1666 nstat->rx_bytes = hwstat->rx_octets;
1667 nstat->tx_bytes = hwstat->tx_octets;
1668 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1669 hwstat->tx_oversize_pkts +
1670 hwstat->tx_underruns +
1671 hwstat->tx_excessive_cols +
1672 hwstat->tx_late_cols);
1673 nstat->multicast = hwstat->rx_multicast_pkts;
1674 nstat->collisions = hwstat->tx_total_cols;
1675
1676 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1677 hwstat->rx_undersize);
1678 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1679 nstat->rx_frame_errors = hwstat->rx_align_errs;
1680 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1681 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1682 hwstat->rx_oversize_pkts +
1683 hwstat->rx_missed_pkts +
1684 hwstat->rx_crc_align_errs +
1685 hwstat->rx_undersize +
1686 hwstat->rx_crc_errs +
1687 hwstat->rx_align_errs +
1688 hwstat->rx_symbol_errs);
1689
1690 nstat->tx_aborted_errors = hwstat->tx_underruns;
1691#if 0
1692 /* Carrier lost counter seems to be broken for some devices */
1693 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1694#endif
1695 } while (u64_stats_fetch_retry(&hwstat->syncp, start));
1696
1697}
1698
1699static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1700{
1701 struct netdev_hw_addr *ha;
1702 int i, num_ents;
1703
1704 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1705 i = 0;
1706 netdev_for_each_mc_addr(ha, dev) {
1707 if (i == num_ents)
1708 break;
1709 __b44_cam_write(bp, ha->addr, i++ + 1);
1710 }
1711 return i+1;
1712}
1713
1714static void __b44_set_rx_mode(struct net_device *dev)
1715{
1716 struct b44 *bp = netdev_priv(dev);
1717 u32 val;
1718
1719 val = br32(bp, B44_RXCONFIG);
1720 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1721 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1722 val |= RXCONFIG_PROMISC;
1723 bw32(bp, B44_RXCONFIG, val);
1724 } else {
1725 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1726 int i = 1;
1727
1728 __b44_set_mac_addr(bp);
1729
1730 if ((dev->flags & IFF_ALLMULTI) ||
1731 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1732 val |= RXCONFIG_ALLMULTI;
1733 else
1734 i = __b44_load_mcast(bp, dev);
1735
1736 for (; i < 64; i++)
1737 __b44_cam_write(bp, zero, i);
1738
1739 bw32(bp, B44_RXCONFIG, val);
1740 val = br32(bp, B44_CAM_CTRL);
1741 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1742 }
1743}
1744
1745static void b44_set_rx_mode(struct net_device *dev)
1746{
1747 struct b44 *bp = netdev_priv(dev);
1748
1749 spin_lock_irq(&bp->lock);
1750 __b44_set_rx_mode(dev);
1751 spin_unlock_irq(&bp->lock);
1752}
1753
1754static u32 b44_get_msglevel(struct net_device *dev)
1755{
1756 struct b44 *bp = netdev_priv(dev);
1757 return bp->msg_enable;
1758}
1759
1760static void b44_set_msglevel(struct net_device *dev, u32 value)
1761{
1762 struct b44 *bp = netdev_priv(dev);
1763 bp->msg_enable = value;
1764}
1765
1766static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1767{
1768 struct b44 *bp = netdev_priv(dev);
1769 struct ssb_bus *bus = bp->sdev->bus;
1770
1771 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1772 switch (bus->bustype) {
1773 case SSB_BUSTYPE_PCI:
1774 strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1775 break;
1776 case SSB_BUSTYPE_SSB:
1777 strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
1778 break;
1779 case SSB_BUSTYPE_PCMCIA:
1780 case SSB_BUSTYPE_SDIO:
1781 WARN_ON(1); /* A device with this bus does not exist. */
1782 break;
1783 }
1784}
1785
1786static int b44_nway_reset(struct net_device *dev)
1787{
1788 struct b44 *bp = netdev_priv(dev);
1789 u32 bmcr;
1790 int r;
1791
1792 spin_lock_irq(&bp->lock);
1793 b44_readphy(bp, MII_BMCR, &bmcr);
1794 b44_readphy(bp, MII_BMCR, &bmcr);
1795 r = -EINVAL;
1796 if (bmcr & BMCR_ANENABLE)
1797 r = b44_writephy(bp, MII_BMCR,
1798 bmcr | BMCR_ANRESTART);
1799 spin_unlock_irq(&bp->lock);
1800
1801 return r;
1802}
1803
1804static int b44_get_link_ksettings(struct net_device *dev,
1805 struct ethtool_link_ksettings *cmd)
1806{
1807 struct b44 *bp = netdev_priv(dev);
1808 u32 supported, advertising;
1809
1810 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1811 BUG_ON(!dev->phydev);
1812 phy_ethtool_ksettings_get(dev->phydev, cmd);
1813
1814 return 0;
1815 }
1816
1817 supported = (SUPPORTED_Autoneg);
1818 supported |= (SUPPORTED_100baseT_Half |
1819 SUPPORTED_100baseT_Full |
1820 SUPPORTED_10baseT_Half |
1821 SUPPORTED_10baseT_Full |
1822 SUPPORTED_MII);
1823
1824 advertising = 0;
1825 if (bp->flags & B44_FLAG_ADV_10HALF)
1826 advertising |= ADVERTISED_10baseT_Half;
1827 if (bp->flags & B44_FLAG_ADV_10FULL)
1828 advertising |= ADVERTISED_10baseT_Full;
1829 if (bp->flags & B44_FLAG_ADV_100HALF)
1830 advertising |= ADVERTISED_100baseT_Half;
1831 if (bp->flags & B44_FLAG_ADV_100FULL)
1832 advertising |= ADVERTISED_100baseT_Full;
1833 advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1834 cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1835 SPEED_100 : SPEED_10;
1836 cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1837 DUPLEX_FULL : DUPLEX_HALF;
1838 cmd->base.port = 0;
1839 cmd->base.phy_address = bp->phy_addr;
1840 cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1841 AUTONEG_DISABLE : AUTONEG_ENABLE;
1842 if (cmd->base.autoneg == AUTONEG_ENABLE)
1843 advertising |= ADVERTISED_Autoneg;
1844
1845 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1846 supported);
1847 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1848 advertising);
1849
1850 if (!netif_running(dev)){
1851 cmd->base.speed = 0;
1852 cmd->base.duplex = 0xff;
1853 }
1854
1855 return 0;
1856}
1857
1858static int b44_set_link_ksettings(struct net_device *dev,
1859 const struct ethtool_link_ksettings *cmd)
1860{
1861 struct b44 *bp = netdev_priv(dev);
1862 u32 speed;
1863 int ret;
1864 u32 advertising;
1865
1866 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1867 BUG_ON(!dev->phydev);
1868 spin_lock_irq(&bp->lock);
1869 if (netif_running(dev))
1870 b44_setup_phy(bp);
1871
1872 ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1873
1874 spin_unlock_irq(&bp->lock);
1875
1876 return ret;
1877 }
1878
1879 speed = cmd->base.speed;
1880
1881 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1882 cmd->link_modes.advertising);
1883
1884 /* We do not support gigabit. */
1885 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1886 if (advertising &
1887 (ADVERTISED_1000baseT_Half |
1888 ADVERTISED_1000baseT_Full))
1889 return -EINVAL;
1890 } else if ((speed != SPEED_100 &&
1891 speed != SPEED_10) ||
1892 (cmd->base.duplex != DUPLEX_HALF &&
1893 cmd->base.duplex != DUPLEX_FULL)) {
1894 return -EINVAL;
1895 }
1896
1897 spin_lock_irq(&bp->lock);
1898
1899 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1900 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1901 B44_FLAG_100_BASE_T |
1902 B44_FLAG_FULL_DUPLEX |
1903 B44_FLAG_ADV_10HALF |
1904 B44_FLAG_ADV_10FULL |
1905 B44_FLAG_ADV_100HALF |
1906 B44_FLAG_ADV_100FULL);
1907 if (advertising == 0) {
1908 bp->flags |= (B44_FLAG_ADV_10HALF |
1909 B44_FLAG_ADV_10FULL |
1910 B44_FLAG_ADV_100HALF |
1911 B44_FLAG_ADV_100FULL);
1912 } else {
1913 if (advertising & ADVERTISED_10baseT_Half)
1914 bp->flags |= B44_FLAG_ADV_10HALF;
1915 if (advertising & ADVERTISED_10baseT_Full)
1916 bp->flags |= B44_FLAG_ADV_10FULL;
1917 if (advertising & ADVERTISED_100baseT_Half)
1918 bp->flags |= B44_FLAG_ADV_100HALF;
1919 if (advertising & ADVERTISED_100baseT_Full)
1920 bp->flags |= B44_FLAG_ADV_100FULL;
1921 }
1922 } else {
1923 bp->flags |= B44_FLAG_FORCE_LINK;
1924 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1925 if (speed == SPEED_100)
1926 bp->flags |= B44_FLAG_100_BASE_T;
1927 if (cmd->base.duplex == DUPLEX_FULL)
1928 bp->flags |= B44_FLAG_FULL_DUPLEX;
1929 }
1930
1931 if (netif_running(dev))
1932 b44_setup_phy(bp);
1933
1934 spin_unlock_irq(&bp->lock);
1935
1936 return 0;
1937}
1938
1939static void b44_get_ringparam(struct net_device *dev,
1940 struct ethtool_ringparam *ering,
1941 struct kernel_ethtool_ringparam *kernel_ering,
1942 struct netlink_ext_ack *extack)
1943{
1944 struct b44 *bp = netdev_priv(dev);
1945
1946 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1947 ering->rx_pending = bp->rx_pending;
1948
1949 /* XXX ethtool lacks a tx_max_pending, oops... */
1950}
1951
1952static int b44_set_ringparam(struct net_device *dev,
1953 struct ethtool_ringparam *ering,
1954 struct kernel_ethtool_ringparam *kernel_ering,
1955 struct netlink_ext_ack *extack)
1956{
1957 struct b44 *bp = netdev_priv(dev);
1958
1959 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1960 (ering->rx_mini_pending != 0) ||
1961 (ering->rx_jumbo_pending != 0) ||
1962 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1963 return -EINVAL;
1964
1965 spin_lock_irq(&bp->lock);
1966
1967 bp->rx_pending = ering->rx_pending;
1968 bp->tx_pending = ering->tx_pending;
1969
1970 b44_halt(bp);
1971 b44_init_rings(bp);
1972 b44_init_hw(bp, B44_FULL_RESET);
1973 netif_wake_queue(bp->dev);
1974 spin_unlock_irq(&bp->lock);
1975
1976 b44_enable_ints(bp);
1977
1978 return 0;
1979}
1980
1981static void b44_get_pauseparam(struct net_device *dev,
1982 struct ethtool_pauseparam *epause)
1983{
1984 struct b44 *bp = netdev_priv(dev);
1985
1986 epause->autoneg =
1987 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1988 epause->rx_pause =
1989 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1990 epause->tx_pause =
1991 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1992}
1993
1994static int b44_set_pauseparam(struct net_device *dev,
1995 struct ethtool_pauseparam *epause)
1996{
1997 struct b44 *bp = netdev_priv(dev);
1998
1999 spin_lock_irq(&bp->lock);
2000 if (epause->autoneg)
2001 bp->flags |= B44_FLAG_PAUSE_AUTO;
2002 else
2003 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2004 if (epause->rx_pause)
2005 bp->flags |= B44_FLAG_RX_PAUSE;
2006 else
2007 bp->flags &= ~B44_FLAG_RX_PAUSE;
2008 if (epause->tx_pause)
2009 bp->flags |= B44_FLAG_TX_PAUSE;
2010 else
2011 bp->flags &= ~B44_FLAG_TX_PAUSE;
2012 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2013 b44_halt(bp);
2014 b44_init_rings(bp);
2015 b44_init_hw(bp, B44_FULL_RESET);
2016 } else {
2017 __b44_set_flow_ctrl(bp, bp->flags);
2018 }
2019 spin_unlock_irq(&bp->lock);
2020
2021 b44_enable_ints(bp);
2022
2023 return 0;
2024}
2025
2026static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2027{
2028 switch(stringset) {
2029 case ETH_SS_STATS:
2030 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2031 break;
2032 }
2033}
2034
2035static int b44_get_sset_count(struct net_device *dev, int sset)
2036{
2037 switch (sset) {
2038 case ETH_SS_STATS:
2039 return ARRAY_SIZE(b44_gstrings);
2040 default:
2041 return -EOPNOTSUPP;
2042 }
2043}
2044
2045static void b44_get_ethtool_stats(struct net_device *dev,
2046 struct ethtool_stats *stats, u64 *data)
2047{
2048 struct b44 *bp = netdev_priv(dev);
2049 struct b44_hw_stats *hwstat = &bp->hw_stats;
2050 u64 *data_src, *data_dst;
2051 unsigned int start;
2052 u32 i;
2053
2054 spin_lock_irq(&bp->lock);
2055 b44_stats_update(bp);
2056 spin_unlock_irq(&bp->lock);
2057
2058 do {
2059 data_src = &hwstat->tx_good_octets;
2060 data_dst = data;
2061 start = u64_stats_fetch_begin(&hwstat->syncp);
2062
2063 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2064 *data_dst++ = *data_src++;
2065
2066 } while (u64_stats_fetch_retry(&hwstat->syncp, start));
2067}
2068
2069static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2070{
2071 struct b44 *bp = netdev_priv(dev);
2072
2073 wol->supported = WAKE_MAGIC;
2074 if (bp->flags & B44_FLAG_WOL_ENABLE)
2075 wol->wolopts = WAKE_MAGIC;
2076 else
2077 wol->wolopts = 0;
2078 memset(&wol->sopass, 0, sizeof(wol->sopass));
2079}
2080
2081static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2082{
2083 struct b44 *bp = netdev_priv(dev);
2084
2085 spin_lock_irq(&bp->lock);
2086 if (wol->wolopts & WAKE_MAGIC)
2087 bp->flags |= B44_FLAG_WOL_ENABLE;
2088 else
2089 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2090 spin_unlock_irq(&bp->lock);
2091
2092 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2093 return 0;
2094}
2095
2096static const struct ethtool_ops b44_ethtool_ops = {
2097 .get_drvinfo = b44_get_drvinfo,
2098 .nway_reset = b44_nway_reset,
2099 .get_link = ethtool_op_get_link,
2100 .get_wol = b44_get_wol,
2101 .set_wol = b44_set_wol,
2102 .get_ringparam = b44_get_ringparam,
2103 .set_ringparam = b44_set_ringparam,
2104 .get_pauseparam = b44_get_pauseparam,
2105 .set_pauseparam = b44_set_pauseparam,
2106 .get_msglevel = b44_get_msglevel,
2107 .set_msglevel = b44_set_msglevel,
2108 .get_strings = b44_get_strings,
2109 .get_sset_count = b44_get_sset_count,
2110 .get_ethtool_stats = b44_get_ethtool_stats,
2111 .get_link_ksettings = b44_get_link_ksettings,
2112 .set_link_ksettings = b44_set_link_ksettings,
2113};
2114
2115static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2116{
2117 struct b44 *bp = netdev_priv(dev);
2118 int err = -EINVAL;
2119
2120 if (!netif_running(dev))
2121 goto out;
2122
2123 spin_lock_irq(&bp->lock);
2124 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2125 BUG_ON(!dev->phydev);
2126 err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2127 } else {
2128 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2129 }
2130 spin_unlock_irq(&bp->lock);
2131out:
2132 return err;
2133}
2134
2135static int b44_get_invariants(struct b44 *bp)
2136{
2137 struct ssb_device *sdev = bp->sdev;
2138 int err = 0;
2139 u8 *addr;
2140
2141 bp->dma_offset = ssb_dma_translation(sdev);
2142
2143 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2144 instance > 1) {
2145 addr = sdev->bus->sprom.et1mac;
2146 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2147 } else {
2148 addr = sdev->bus->sprom.et0mac;
2149 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2150 }
2151 /* Some ROMs have buggy PHY addresses with the high
2152 * bits set (sign extension?). Truncate them to a
2153 * valid PHY address. */
2154 bp->phy_addr &= 0x1F;
2155
2156 eth_hw_addr_set(bp->dev, addr);
2157
2158 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2159 pr_err("Invalid MAC address found in EEPROM\n");
2160 return -EINVAL;
2161 }
2162
2163 bp->imask = IMASK_DEF;
2164
2165 /* XXX - really required?
2166 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2167 */
2168
2169 if (bp->sdev->id.revision >= 7)
2170 bp->flags |= B44_FLAG_B0_ANDLATER;
2171
2172 return err;
2173}
2174
2175static const struct net_device_ops b44_netdev_ops = {
2176 .ndo_open = b44_open,
2177 .ndo_stop = b44_close,
2178 .ndo_start_xmit = b44_start_xmit,
2179 .ndo_get_stats64 = b44_get_stats64,
2180 .ndo_set_rx_mode = b44_set_rx_mode,
2181 .ndo_set_mac_address = b44_set_mac_addr,
2182 .ndo_validate_addr = eth_validate_addr,
2183 .ndo_eth_ioctl = b44_ioctl,
2184 .ndo_tx_timeout = b44_tx_timeout,
2185 .ndo_change_mtu = b44_change_mtu,
2186#ifdef CONFIG_NET_POLL_CONTROLLER
2187 .ndo_poll_controller = b44_poll_controller,
2188#endif
2189};
2190
2191static void b44_adjust_link(struct net_device *dev)
2192{
2193 struct b44 *bp = netdev_priv(dev);
2194 struct phy_device *phydev = dev->phydev;
2195 bool status_changed = false;
2196
2197 BUG_ON(!phydev);
2198
2199 if (bp->old_link != phydev->link) {
2200 status_changed = true;
2201 bp->old_link = phydev->link;
2202 }
2203
2204 /* reflect duplex change */
2205 if (phydev->link) {
2206 if ((phydev->duplex == DUPLEX_HALF) &&
2207 (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2208 status_changed = true;
2209 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2210 } else if ((phydev->duplex == DUPLEX_FULL) &&
2211 !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2212 status_changed = true;
2213 bp->flags |= B44_FLAG_FULL_DUPLEX;
2214 }
2215 }
2216
2217 if (status_changed) {
2218 u32 val = br32(bp, B44_TX_CTRL);
2219 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2220 val |= TX_CTRL_DUPLEX;
2221 else
2222 val &= ~TX_CTRL_DUPLEX;
2223 bw32(bp, B44_TX_CTRL, val);
2224 phy_print_status(phydev);
2225 }
2226}
2227
2228static int b44_register_phy_one(struct b44 *bp)
2229{
2230 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2231 struct mii_bus *mii_bus;
2232 struct ssb_device *sdev = bp->sdev;
2233 struct phy_device *phydev;
2234 char bus_id[MII_BUS_ID_SIZE + 3];
2235 struct ssb_sprom *sprom = &sdev->bus->sprom;
2236 int err;
2237
2238 mii_bus = mdiobus_alloc();
2239 if (!mii_bus) {
2240 dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2241 err = -ENOMEM;
2242 goto err_out;
2243 }
2244
2245 mii_bus->priv = bp;
2246 mii_bus->read = b44_mdio_read_phylib;
2247 mii_bus->write = b44_mdio_write_phylib;
2248 mii_bus->name = "b44_eth_mii";
2249 mii_bus->parent = sdev->dev;
2250 mii_bus->phy_mask = ~(1 << bp->phy_addr);
2251 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2252
2253 bp->mii_bus = mii_bus;
2254
2255 err = mdiobus_register(mii_bus);
2256 if (err) {
2257 dev_err(sdev->dev, "failed to register MII bus\n");
2258 goto err_out_mdiobus;
2259 }
2260
2261 if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2262 (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2263
2264 dev_info(sdev->dev,
2265 "could not find PHY at %i, use fixed one\n",
2266 bp->phy_addr);
2267
2268 bp->phy_addr = 0;
2269 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2270 bp->phy_addr);
2271 } else {
2272 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2273 bp->phy_addr);
2274 }
2275
2276 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2277 PHY_INTERFACE_MODE_MII);
2278 if (IS_ERR(phydev)) {
2279 dev_err(sdev->dev, "could not attach PHY at %i\n",
2280 bp->phy_addr);
2281 err = PTR_ERR(phydev);
2282 goto err_out_mdiobus_unregister;
2283 }
2284
2285 /* mask with MAC supported features */
2286 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2287 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2288 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2289 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2290 linkmode_and(phydev->supported, phydev->supported, mask);
2291 linkmode_copy(phydev->advertising, phydev->supported);
2292
2293 bp->old_link = 0;
2294 bp->phy_addr = phydev->mdio.addr;
2295
2296 phy_attached_info(phydev);
2297
2298 return 0;
2299
2300err_out_mdiobus_unregister:
2301 mdiobus_unregister(mii_bus);
2302
2303err_out_mdiobus:
2304 mdiobus_free(mii_bus);
2305
2306err_out:
2307 return err;
2308}
2309
2310static void b44_unregister_phy_one(struct b44 *bp)
2311{
2312 struct net_device *dev = bp->dev;
2313 struct mii_bus *mii_bus = bp->mii_bus;
2314
2315 phy_disconnect(dev->phydev);
2316 mdiobus_unregister(mii_bus);
2317 mdiobus_free(mii_bus);
2318}
2319
2320static int b44_init_one(struct ssb_device *sdev,
2321 const struct ssb_device_id *ent)
2322{
2323 struct net_device *dev;
2324 struct b44 *bp;
2325 int err;
2326
2327 instance++;
2328
2329 dev = alloc_etherdev(sizeof(*bp));
2330 if (!dev) {
2331 err = -ENOMEM;
2332 goto out;
2333 }
2334
2335 SET_NETDEV_DEV(dev, sdev->dev);
2336
2337 /* No interesting netdevice features in this card... */
2338 dev->features |= 0;
2339
2340 bp = netdev_priv(dev);
2341 bp->sdev = sdev;
2342 bp->dev = dev;
2343 bp->force_copybreak = 0;
2344
2345 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2346
2347 spin_lock_init(&bp->lock);
2348 u64_stats_init(&bp->hw_stats.syncp);
2349
2350 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2351 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2352
2353 dev->netdev_ops = &b44_netdev_ops;
2354 netif_napi_add(dev, &bp->napi, b44_poll);
2355 dev->watchdog_timeo = B44_TX_TIMEOUT;
2356 dev->min_mtu = B44_MIN_MTU;
2357 dev->max_mtu = B44_MAX_MTU;
2358 dev->irq = sdev->irq;
2359 dev->ethtool_ops = &b44_ethtool_ops;
2360
2361 err = ssb_bus_powerup(sdev->bus, 0);
2362 if (err) {
2363 dev_err(sdev->dev,
2364 "Failed to powerup the bus\n");
2365 goto err_out_free_dev;
2366 }
2367
2368 err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2369 if (err) {
2370 dev_err(sdev->dev,
2371 "Required 30BIT DMA mask unsupported by the system\n");
2372 goto err_out_powerdown;
2373 }
2374
2375 err = b44_get_invariants(bp);
2376 if (err) {
2377 dev_err(sdev->dev,
2378 "Problem fetching invariants of chip, aborting\n");
2379 goto err_out_powerdown;
2380 }
2381
2382 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2383 dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2384 err = -ENODEV;
2385 goto err_out_powerdown;
2386 }
2387
2388 bp->mii_if.dev = dev;
2389 bp->mii_if.mdio_read = b44_mdio_read_mii;
2390 bp->mii_if.mdio_write = b44_mdio_write_mii;
2391 bp->mii_if.phy_id = bp->phy_addr;
2392 bp->mii_if.phy_id_mask = 0x1f;
2393 bp->mii_if.reg_num_mask = 0x1f;
2394
2395 /* By default, advertise all speed/duplex settings. */
2396 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2397 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2398
2399 /* By default, auto-negotiate PAUSE. */
2400 bp->flags |= B44_FLAG_PAUSE_AUTO;
2401
2402 err = register_netdev(dev);
2403 if (err) {
2404 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2405 goto err_out_powerdown;
2406 }
2407
2408 netif_carrier_off(dev);
2409
2410 ssb_set_drvdata(sdev, dev);
2411
2412 /* Chip reset provides power to the b44 MAC & PCI cores, which
2413 * is necessary for MAC register access.
2414 */
2415 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2416
2417 /* do a phy reset to test if there is an active phy */
2418 err = b44_phy_reset(bp);
2419 if (err < 0) {
2420 dev_err(sdev->dev, "phy reset failed\n");
2421 goto err_out_unregister_netdev;
2422 }
2423
2424 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2425 err = b44_register_phy_one(bp);
2426 if (err) {
2427 dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2428 goto err_out_unregister_netdev;
2429 }
2430 }
2431
2432 device_set_wakeup_capable(sdev->dev, true);
2433 netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2434
2435 return 0;
2436
2437err_out_unregister_netdev:
2438 unregister_netdev(dev);
2439err_out_powerdown:
2440 ssb_bus_may_powerdown(sdev->bus);
2441
2442err_out_free_dev:
2443 netif_napi_del(&bp->napi);
2444 free_netdev(dev);
2445
2446out:
2447 return err;
2448}
2449
2450static void b44_remove_one(struct ssb_device *sdev)
2451{
2452 struct net_device *dev = ssb_get_drvdata(sdev);
2453 struct b44 *bp = netdev_priv(dev);
2454
2455 unregister_netdev(dev);
2456 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2457 b44_unregister_phy_one(bp);
2458 ssb_device_disable(sdev, 0);
2459 ssb_bus_may_powerdown(sdev->bus);
2460 netif_napi_del(&bp->napi);
2461 free_netdev(dev);
2462 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2463 ssb_set_drvdata(sdev, NULL);
2464}
2465
2466static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2467{
2468 struct net_device *dev = ssb_get_drvdata(sdev);
2469 struct b44 *bp = netdev_priv(dev);
2470
2471 if (!netif_running(dev))
2472 return 0;
2473
2474 del_timer_sync(&bp->timer);
2475
2476 spin_lock_irq(&bp->lock);
2477
2478 b44_halt(bp);
2479 netif_carrier_off(bp->dev);
2480 netif_device_detach(bp->dev);
2481 b44_free_rings(bp);
2482
2483 spin_unlock_irq(&bp->lock);
2484
2485 free_irq(dev->irq, dev);
2486 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2487 b44_init_hw(bp, B44_PARTIAL_RESET);
2488 b44_setup_wol(bp);
2489 }
2490
2491 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2492 return 0;
2493}
2494
2495static int b44_resume(struct ssb_device *sdev)
2496{
2497 struct net_device *dev = ssb_get_drvdata(sdev);
2498 struct b44 *bp = netdev_priv(dev);
2499 int rc = 0;
2500
2501 rc = ssb_bus_powerup(sdev->bus, 0);
2502 if (rc) {
2503 dev_err(sdev->dev,
2504 "Failed to powerup the bus\n");
2505 return rc;
2506 }
2507
2508 if (!netif_running(dev))
2509 return 0;
2510
2511 spin_lock_irq(&bp->lock);
2512 b44_init_rings(bp);
2513 b44_init_hw(bp, B44_FULL_RESET);
2514 spin_unlock_irq(&bp->lock);
2515
2516 /*
2517 * As a shared interrupt, the handler can be called immediately. To be
2518 * able to check the interrupt status the hardware must already be
2519 * powered back on (b44_init_hw).
2520 */
2521 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2522 if (rc) {
2523 netdev_err(dev, "request_irq failed\n");
2524 spin_lock_irq(&bp->lock);
2525 b44_halt(bp);
2526 b44_free_rings(bp);
2527 spin_unlock_irq(&bp->lock);
2528 return rc;
2529 }
2530
2531 netif_device_attach(bp->dev);
2532
2533 b44_enable_ints(bp);
2534 netif_wake_queue(dev);
2535
2536 mod_timer(&bp->timer, jiffies + 1);
2537
2538 return 0;
2539}
2540
2541static struct ssb_driver b44_ssb_driver = {
2542 .name = DRV_MODULE_NAME,
2543 .id_table = b44_ssb_tbl,
2544 .probe = b44_init_one,
2545 .remove = b44_remove_one,
2546 .suspend = b44_suspend,
2547 .resume = b44_resume,
2548};
2549
2550static inline int __init b44_pci_init(void)
2551{
2552 int err = 0;
2553#ifdef CONFIG_B44_PCI
2554 err = ssb_pcihost_register(&b44_pci_driver);
2555#endif
2556 return err;
2557}
2558
2559static inline void b44_pci_exit(void)
2560{
2561#ifdef CONFIG_B44_PCI
2562 ssb_pcihost_unregister(&b44_pci_driver);
2563#endif
2564}
2565
2566static int __init b44_init(void)
2567{
2568 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2569 int err;
2570
2571 /* Setup paramaters for syncing RX/TX DMA descriptors */
2572 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2573
2574 err = b44_pci_init();
2575 if (err)
2576 return err;
2577 err = ssb_driver_register(&b44_ssb_driver);
2578 if (err)
2579 b44_pci_exit();
2580 return err;
2581}
2582
2583static void __exit b44_cleanup(void)
2584{
2585 ssb_driver_unregister(&b44_ssb_driver);
2586 b44_pci_exit();
2587}
2588
2589module_init(b44_init);
2590module_exit(b44_cleanup);
2591
1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9 *
10 * Distribute under GPL.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21#include <linux/mii.h>
22#include <linux/if_ether.h>
23#include <linux/if_vlan.h>
24#include <linux/etherdevice.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30#include <linux/ssb/ssb.h>
31#include <linux/slab.h>
32
33#include <asm/uaccess.h>
34#include <asm/io.h>
35#include <asm/irq.h>
36
37
38#include "b44.h"
39
40#define DRV_MODULE_NAME "b44"
41#define DRV_MODULE_VERSION "2.0"
42#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
43
44#define B44_DEF_MSG_ENABLE \
45 (NETIF_MSG_DRV | \
46 NETIF_MSG_PROBE | \
47 NETIF_MSG_LINK | \
48 NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | \
50 NETIF_MSG_IFUP | \
51 NETIF_MSG_RX_ERR | \
52 NETIF_MSG_TX_ERR)
53
54/* length of time before we decide the hardware is borked,
55 * and dev->tx_timeout() should be called to fix the problem
56 */
57#define B44_TX_TIMEOUT (5 * HZ)
58
59/* hardware minimum and maximum for a single frame's data payload */
60#define B44_MIN_MTU 60
61#define B44_MAX_MTU 1500
62
63#define B44_RX_RING_SIZE 512
64#define B44_DEF_RX_RING_PENDING 200
65#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
66 B44_RX_RING_SIZE)
67#define B44_TX_RING_SIZE 512
68#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
69#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
70 B44_TX_RING_SIZE)
71
72#define TX_RING_GAP(BP) \
73 (B44_TX_RING_SIZE - (BP)->tx_pending)
74#define TX_BUFFS_AVAIL(BP) \
75 (((BP)->tx_cons <= (BP)->tx_prod) ? \
76 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
77 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
79
80#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
81#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
82
83/* minimum number of free TX descriptors required to wake up TX process */
84#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
85
86/* b44 internal pattern match filter info */
87#define B44_PATTERN_BASE 0x400
88#define B44_PATTERN_SIZE 0x80
89#define B44_PMASK_BASE 0x600
90#define B44_PMASK_SIZE 0x10
91#define B44_MAX_PATTERNS 16
92#define B44_ETHIPV6UDP_HLEN 62
93#define B44_ETHIPV4UDP_HLEN 42
94
95MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96MODULE_DESCRIPTION(DRV_DESCRIPTION);
97MODULE_LICENSE("GPL");
98MODULE_VERSION(DRV_MODULE_VERSION);
99
100static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101module_param(b44_debug, int, 0);
102MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
104
105#ifdef CONFIG_B44_PCI
106static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 { 0 } /* terminate list with empty entry */
111};
112MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
114static struct pci_driver b44_pci_driver = {
115 .name = DRV_MODULE_NAME,
116 .id_table = b44_pci_tbl,
117};
118#endif /* CONFIG_B44_PCI */
119
120static const struct ssb_device_id b44_ssb_tbl[] = {
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 SSB_DEVTABLE_END
123};
124MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
126static void b44_halt(struct b44 *);
127static void b44_init_rings(struct b44 *);
128
129#define B44_FULL_RESET 1
130#define B44_FULL_RESET_SKIP_PHY 2
131#define B44_PARTIAL_RESET 3
132#define B44_CHIP_RESET_FULL 4
133#define B44_CHIP_RESET_PARTIAL 5
134
135static void b44_init_hw(struct b44 *, int);
136
137static int dma_desc_sync_size;
138static int instance;
139
140static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141#define _B44(x...) # x,
142B44_STAT_REG_DECLARE
143#undef _B44
144};
145
146static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
150{
151 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 dma_desc_sync_size, dir);
153}
154
155static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 dma_addr_t dma_base,
157 unsigned long offset,
158 enum dma_data_direction dir)
159{
160 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 dma_desc_sync_size, dir);
162}
163
164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165{
166 return ssb_read32(bp->sdev, reg);
167}
168
169static inline void bw32(const struct b44 *bp,
170 unsigned long reg, unsigned long val)
171{
172 ssb_write32(bp->sdev, reg, val);
173}
174
175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 u32 bit, unsigned long timeout, const int clear)
177{
178 unsigned long i;
179
180 for (i = 0; i < timeout; i++) {
181 u32 val = br32(bp, reg);
182
183 if (clear && !(val & bit))
184 break;
185 if (!clear && (val & bit))
186 break;
187 udelay(10);
188 }
189 if (i == timeout) {
190 if (net_ratelimit())
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192 bit, reg, clear ? "clear" : "set");
193
194 return -ENODEV;
195 }
196 return 0;
197}
198
199static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200{
201 u32 val;
202
203 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204 (index << CAM_CTRL_INDEX_SHIFT)));
205
206 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207
208 val = br32(bp, B44_CAM_DATA_LO);
209
210 data[2] = (val >> 24) & 0xFF;
211 data[3] = (val >> 16) & 0xFF;
212 data[4] = (val >> 8) & 0xFF;
213 data[5] = (val >> 0) & 0xFF;
214
215 val = br32(bp, B44_CAM_DATA_HI);
216
217 data[0] = (val >> 8) & 0xFF;
218 data[1] = (val >> 0) & 0xFF;
219}
220
221static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222{
223 u32 val;
224
225 val = ((u32) data[2]) << 24;
226 val |= ((u32) data[3]) << 16;
227 val |= ((u32) data[4]) << 8;
228 val |= ((u32) data[5]) << 0;
229 bw32(bp, B44_CAM_DATA_LO, val);
230 val = (CAM_DATA_HI_VALID |
231 (((u32) data[0]) << 8) |
232 (((u32) data[1]) << 0));
233 bw32(bp, B44_CAM_DATA_HI, val);
234 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235 (index << CAM_CTRL_INDEX_SHIFT)));
236 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237}
238
239static inline void __b44_disable_ints(struct b44 *bp)
240{
241 bw32(bp, B44_IMASK, 0);
242}
243
244static void b44_disable_ints(struct b44 *bp)
245{
246 __b44_disable_ints(bp);
247
248 /* Flush posted writes. */
249 br32(bp, B44_IMASK);
250}
251
252static void b44_enable_ints(struct b44 *bp)
253{
254 bw32(bp, B44_IMASK, bp->imask);
255}
256
257static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258{
259 int err;
260
261 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264 (phy_addr << MDIO_DATA_PMD_SHIFT) |
265 (reg << MDIO_DATA_RA_SHIFT) |
266 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269
270 return err;
271}
272
273static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274{
275 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278 (phy_addr << MDIO_DATA_PMD_SHIFT) |
279 (reg << MDIO_DATA_RA_SHIFT) |
280 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281 (val & MDIO_DATA_DATA)));
282 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283}
284
285static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286{
287 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
288 return 0;
289
290 return __b44_readphy(bp, bp->phy_addr, reg, val);
291}
292
293static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294{
295 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
296 return 0;
297
298 return __b44_writephy(bp, bp->phy_addr, reg, val);
299}
300
301/* miilib interface */
302static int b44_mii_read(struct net_device *dev, int phy_id, int location)
303{
304 u32 val;
305 struct b44 *bp = netdev_priv(dev);
306 int rc = __b44_readphy(bp, phy_id, location, &val);
307 if (rc)
308 return 0xffffffff;
309 return val;
310}
311
312static void b44_mii_write(struct net_device *dev, int phy_id, int location,
313 int val)
314{
315 struct b44 *bp = netdev_priv(dev);
316 __b44_writephy(bp, phy_id, location, val);
317}
318
319static int b44_phy_reset(struct b44 *bp)
320{
321 u32 val;
322 int err;
323
324 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
325 return 0;
326 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
327 if (err)
328 return err;
329 udelay(100);
330 err = b44_readphy(bp, MII_BMCR, &val);
331 if (!err) {
332 if (val & BMCR_RESET) {
333 netdev_err(bp->dev, "PHY Reset would not complete\n");
334 err = -ENODEV;
335 }
336 }
337
338 return err;
339}
340
341static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
342{
343 u32 val;
344
345 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346 bp->flags |= pause_flags;
347
348 val = br32(bp, B44_RXCONFIG);
349 if (pause_flags & B44_FLAG_RX_PAUSE)
350 val |= RXCONFIG_FLOW;
351 else
352 val &= ~RXCONFIG_FLOW;
353 bw32(bp, B44_RXCONFIG, val);
354
355 val = br32(bp, B44_MAC_FLOW);
356 if (pause_flags & B44_FLAG_TX_PAUSE)
357 val |= (MAC_FLOW_PAUSE_ENAB |
358 (0xc0 & MAC_FLOW_RX_HI_WATER));
359 else
360 val &= ~MAC_FLOW_PAUSE_ENAB;
361 bw32(bp, B44_MAC_FLOW, val);
362}
363
364static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
365{
366 u32 pause_enab = 0;
367
368 /* The driver supports only rx pause by default because
369 the b44 mac tx pause mechanism generates excessive
370 pause frames.
371 Use ethtool to turn on b44 tx pause if necessary.
372 */
373 if ((local & ADVERTISE_PAUSE_CAP) &&
374 (local & ADVERTISE_PAUSE_ASYM)){
375 if ((remote & LPA_PAUSE_ASYM) &&
376 !(remote & LPA_PAUSE_CAP))
377 pause_enab |= B44_FLAG_RX_PAUSE;
378 }
379
380 __b44_set_flow_ctrl(bp, pause_enab);
381}
382
383#ifdef CONFIG_BCM47XX
384#include <asm/mach-bcm47xx/nvram.h>
385static void b44_wap54g10_workaround(struct b44 *bp)
386{
387 char buf[20];
388 u32 val;
389 int err;
390
391 /*
392 * workaround for bad hardware design in Linksys WAP54G v1.0
393 * see https://dev.openwrt.org/ticket/146
394 * check and reset bit "isolate"
395 */
396 if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
397 return;
398 if (simple_strtoul(buf, NULL, 0) == 2) {
399 err = __b44_readphy(bp, 0, MII_BMCR, &val);
400 if (err)
401 goto error;
402 if (!(val & BMCR_ISOLATE))
403 return;
404 val &= ~BMCR_ISOLATE;
405 err = __b44_writephy(bp, 0, MII_BMCR, val);
406 if (err)
407 goto error;
408 }
409 return;
410error:
411 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
412}
413#else
414static inline void b44_wap54g10_workaround(struct b44 *bp)
415{
416}
417#endif
418
419static int b44_setup_phy(struct b44 *bp)
420{
421 u32 val;
422 int err;
423
424 b44_wap54g10_workaround(bp);
425
426 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
427 return 0;
428 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
429 goto out;
430 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431 val & MII_ALEDCTRL_ALLMSK)) != 0)
432 goto out;
433 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436 val | MII_TLEDCTRL_ENABLE)) != 0)
437 goto out;
438
439 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440 u32 adv = ADVERTISE_CSMA;
441
442 if (bp->flags & B44_FLAG_ADV_10HALF)
443 adv |= ADVERTISE_10HALF;
444 if (bp->flags & B44_FLAG_ADV_10FULL)
445 adv |= ADVERTISE_10FULL;
446 if (bp->flags & B44_FLAG_ADV_100HALF)
447 adv |= ADVERTISE_100HALF;
448 if (bp->flags & B44_FLAG_ADV_100FULL)
449 adv |= ADVERTISE_100FULL;
450
451 if (bp->flags & B44_FLAG_PAUSE_AUTO)
452 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
453
454 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
455 goto out;
456 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457 BMCR_ANRESTART))) != 0)
458 goto out;
459 } else {
460 u32 bmcr;
461
462 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
463 goto out;
464 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465 if (bp->flags & B44_FLAG_100_BASE_T)
466 bmcr |= BMCR_SPEED100;
467 if (bp->flags & B44_FLAG_FULL_DUPLEX)
468 bmcr |= BMCR_FULLDPLX;
469 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
470 goto out;
471
472 /* Since we will not be negotiating there is no safe way
473 * to determine if the link partner supports flow control
474 * or not. So just disable it completely in this case.
475 */
476 b44_set_flow_ctrl(bp, 0, 0);
477 }
478
479out:
480 return err;
481}
482
483static void b44_stats_update(struct b44 *bp)
484{
485 unsigned long reg;
486 u32 *val;
487
488 val = &bp->hw_stats.tx_good_octets;
489 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
490 *val++ += br32(bp, reg);
491 }
492
493 /* Pad */
494 reg += 8*4UL;
495
496 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
497 *val++ += br32(bp, reg);
498 }
499}
500
501static void b44_link_report(struct b44 *bp)
502{
503 if (!netif_carrier_ok(bp->dev)) {
504 netdev_info(bp->dev, "Link is down\n");
505 } else {
506 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
507 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
508 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
509
510 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
511 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
512 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
513 }
514}
515
516static void b44_check_phy(struct b44 *bp)
517{
518 u32 bmsr, aux;
519
520 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
521 bp->flags |= B44_FLAG_100_BASE_T;
522 bp->flags |= B44_FLAG_FULL_DUPLEX;
523 if (!netif_carrier_ok(bp->dev)) {
524 u32 val = br32(bp, B44_TX_CTRL);
525 val |= TX_CTRL_DUPLEX;
526 bw32(bp, B44_TX_CTRL, val);
527 netif_carrier_on(bp->dev);
528 b44_link_report(bp);
529 }
530 return;
531 }
532
533 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
534 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
535 (bmsr != 0xffff)) {
536 if (aux & MII_AUXCTRL_SPEED)
537 bp->flags |= B44_FLAG_100_BASE_T;
538 else
539 bp->flags &= ~B44_FLAG_100_BASE_T;
540 if (aux & MII_AUXCTRL_DUPLEX)
541 bp->flags |= B44_FLAG_FULL_DUPLEX;
542 else
543 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
544
545 if (!netif_carrier_ok(bp->dev) &&
546 (bmsr & BMSR_LSTATUS)) {
547 u32 val = br32(bp, B44_TX_CTRL);
548 u32 local_adv, remote_adv;
549
550 if (bp->flags & B44_FLAG_FULL_DUPLEX)
551 val |= TX_CTRL_DUPLEX;
552 else
553 val &= ~TX_CTRL_DUPLEX;
554 bw32(bp, B44_TX_CTRL, val);
555
556 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
557 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
558 !b44_readphy(bp, MII_LPA, &remote_adv))
559 b44_set_flow_ctrl(bp, local_adv, remote_adv);
560
561 /* Link now up */
562 netif_carrier_on(bp->dev);
563 b44_link_report(bp);
564 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
565 /* Link now down */
566 netif_carrier_off(bp->dev);
567 b44_link_report(bp);
568 }
569
570 if (bmsr & BMSR_RFAULT)
571 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
572 if (bmsr & BMSR_JCD)
573 netdev_warn(bp->dev, "Jabber detected in PHY\n");
574 }
575}
576
577static void b44_timer(unsigned long __opaque)
578{
579 struct b44 *bp = (struct b44 *) __opaque;
580
581 spin_lock_irq(&bp->lock);
582
583 b44_check_phy(bp);
584
585 b44_stats_update(bp);
586
587 spin_unlock_irq(&bp->lock);
588
589 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
590}
591
592static void b44_tx(struct b44 *bp)
593{
594 u32 cur, cons;
595
596 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597 cur /= sizeof(struct dma_desc);
598
599 /* XXX needs updating when NETIF_F_SG is supported */
600 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601 struct ring_info *rp = &bp->tx_buffers[cons];
602 struct sk_buff *skb = rp->skb;
603
604 BUG_ON(skb == NULL);
605
606 dma_unmap_single(bp->sdev->dma_dev,
607 rp->mapping,
608 skb->len,
609 DMA_TO_DEVICE);
610 rp->skb = NULL;
611 dev_kfree_skb_irq(skb);
612 }
613
614 bp->tx_cons = cons;
615 if (netif_queue_stopped(bp->dev) &&
616 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
617 netif_wake_queue(bp->dev);
618
619 bw32(bp, B44_GPTIMER, 0);
620}
621
622/* Works like this. This chip writes a 'struct rx_header" 30 bytes
623 * before the DMA address you give it. So we allocate 30 more bytes
624 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
625 * point the chip at 30 bytes past where the rx_header will go.
626 */
627static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
628{
629 struct dma_desc *dp;
630 struct ring_info *src_map, *map;
631 struct rx_header *rh;
632 struct sk_buff *skb;
633 dma_addr_t mapping;
634 int dest_idx;
635 u32 ctrl;
636
637 src_map = NULL;
638 if (src_idx >= 0)
639 src_map = &bp->rx_buffers[src_idx];
640 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
641 map = &bp->rx_buffers[dest_idx];
642 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
643 if (skb == NULL)
644 return -ENOMEM;
645
646 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
647 RX_PKT_BUF_SZ,
648 DMA_FROM_DEVICE);
649
650 /* Hardware bug work-around, the chip is unable to do PCI DMA
651 to/from anything above 1GB :-( */
652 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
653 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
654 /* Sigh... */
655 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
656 dma_unmap_single(bp->sdev->dma_dev, mapping,
657 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
658 dev_kfree_skb_any(skb);
659 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
660 if (skb == NULL)
661 return -ENOMEM;
662 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
663 RX_PKT_BUF_SZ,
664 DMA_FROM_DEVICE);
665 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
666 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
667 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
668 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
669 dev_kfree_skb_any(skb);
670 return -ENOMEM;
671 }
672 bp->force_copybreak = 1;
673 }
674
675 rh = (struct rx_header *) skb->data;
676
677 rh->len = 0;
678 rh->flags = 0;
679
680 map->skb = skb;
681 map->mapping = mapping;
682
683 if (src_map != NULL)
684 src_map->skb = NULL;
685
686 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
687 if (dest_idx == (B44_RX_RING_SIZE - 1))
688 ctrl |= DESC_CTRL_EOT;
689
690 dp = &bp->rx_ring[dest_idx];
691 dp->ctrl = cpu_to_le32(ctrl);
692 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
693
694 if (bp->flags & B44_FLAG_RX_RING_HACK)
695 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
696 dest_idx * sizeof(*dp),
697 DMA_BIDIRECTIONAL);
698
699 return RX_PKT_BUF_SZ;
700}
701
702static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
703{
704 struct dma_desc *src_desc, *dest_desc;
705 struct ring_info *src_map, *dest_map;
706 struct rx_header *rh;
707 int dest_idx;
708 __le32 ctrl;
709
710 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
711 dest_desc = &bp->rx_ring[dest_idx];
712 dest_map = &bp->rx_buffers[dest_idx];
713 src_desc = &bp->rx_ring[src_idx];
714 src_map = &bp->rx_buffers[src_idx];
715
716 dest_map->skb = src_map->skb;
717 rh = (struct rx_header *) src_map->skb->data;
718 rh->len = 0;
719 rh->flags = 0;
720 dest_map->mapping = src_map->mapping;
721
722 if (bp->flags & B44_FLAG_RX_RING_HACK)
723 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
724 src_idx * sizeof(*src_desc),
725 DMA_BIDIRECTIONAL);
726
727 ctrl = src_desc->ctrl;
728 if (dest_idx == (B44_RX_RING_SIZE - 1))
729 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
730 else
731 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
732
733 dest_desc->ctrl = ctrl;
734 dest_desc->addr = src_desc->addr;
735
736 src_map->skb = NULL;
737
738 if (bp->flags & B44_FLAG_RX_RING_HACK)
739 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
740 dest_idx * sizeof(*dest_desc),
741 DMA_BIDIRECTIONAL);
742
743 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
744 RX_PKT_BUF_SZ,
745 DMA_FROM_DEVICE);
746}
747
748static int b44_rx(struct b44 *bp, int budget)
749{
750 int received;
751 u32 cons, prod;
752
753 received = 0;
754 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
755 prod /= sizeof(struct dma_desc);
756 cons = bp->rx_cons;
757
758 while (cons != prod && budget > 0) {
759 struct ring_info *rp = &bp->rx_buffers[cons];
760 struct sk_buff *skb = rp->skb;
761 dma_addr_t map = rp->mapping;
762 struct rx_header *rh;
763 u16 len;
764
765 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
766 RX_PKT_BUF_SZ,
767 DMA_FROM_DEVICE);
768 rh = (struct rx_header *) skb->data;
769 len = le16_to_cpu(rh->len);
770 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
771 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
772 drop_it:
773 b44_recycle_rx(bp, cons, bp->rx_prod);
774 drop_it_no_recycle:
775 bp->dev->stats.rx_dropped++;
776 goto next_pkt;
777 }
778
779 if (len == 0) {
780 int i = 0;
781
782 do {
783 udelay(2);
784 barrier();
785 len = le16_to_cpu(rh->len);
786 } while (len == 0 && i++ < 5);
787 if (len == 0)
788 goto drop_it;
789 }
790
791 /* Omit CRC. */
792 len -= 4;
793
794 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
795 int skb_size;
796 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
797 if (skb_size < 0)
798 goto drop_it;
799 dma_unmap_single(bp->sdev->dma_dev, map,
800 skb_size, DMA_FROM_DEVICE);
801 /* Leave out rx_header */
802 skb_put(skb, len + RX_PKT_OFFSET);
803 skb_pull(skb, RX_PKT_OFFSET);
804 } else {
805 struct sk_buff *copy_skb;
806
807 b44_recycle_rx(bp, cons, bp->rx_prod);
808 copy_skb = netdev_alloc_skb(bp->dev, len + 2);
809 if (copy_skb == NULL)
810 goto drop_it_no_recycle;
811
812 skb_reserve(copy_skb, 2);
813 skb_put(copy_skb, len);
814 /* DMA sync done above, copy just the actual packet */
815 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
816 copy_skb->data, len);
817 skb = copy_skb;
818 }
819 skb_checksum_none_assert(skb);
820 skb->protocol = eth_type_trans(skb, bp->dev);
821 netif_receive_skb(skb);
822 received++;
823 budget--;
824 next_pkt:
825 bp->rx_prod = (bp->rx_prod + 1) &
826 (B44_RX_RING_SIZE - 1);
827 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
828 }
829
830 bp->rx_cons = cons;
831 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
832
833 return received;
834}
835
836static int b44_poll(struct napi_struct *napi, int budget)
837{
838 struct b44 *bp = container_of(napi, struct b44, napi);
839 int work_done;
840 unsigned long flags;
841
842 spin_lock_irqsave(&bp->lock, flags);
843
844 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
845 /* spin_lock(&bp->tx_lock); */
846 b44_tx(bp);
847 /* spin_unlock(&bp->tx_lock); */
848 }
849 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
850 bp->istat &= ~ISTAT_RFO;
851 b44_disable_ints(bp);
852 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
853 b44_init_rings(bp);
854 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
855 netif_wake_queue(bp->dev);
856 }
857
858 spin_unlock_irqrestore(&bp->lock, flags);
859
860 work_done = 0;
861 if (bp->istat & ISTAT_RX)
862 work_done += b44_rx(bp, budget);
863
864 if (bp->istat & ISTAT_ERRORS) {
865 spin_lock_irqsave(&bp->lock, flags);
866 b44_halt(bp);
867 b44_init_rings(bp);
868 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
869 netif_wake_queue(bp->dev);
870 spin_unlock_irqrestore(&bp->lock, flags);
871 work_done = 0;
872 }
873
874 if (work_done < budget) {
875 napi_complete(napi);
876 b44_enable_ints(bp);
877 }
878
879 return work_done;
880}
881
882static irqreturn_t b44_interrupt(int irq, void *dev_id)
883{
884 struct net_device *dev = dev_id;
885 struct b44 *bp = netdev_priv(dev);
886 u32 istat, imask;
887 int handled = 0;
888
889 spin_lock(&bp->lock);
890
891 istat = br32(bp, B44_ISTAT);
892 imask = br32(bp, B44_IMASK);
893
894 /* The interrupt mask register controls which interrupt bits
895 * will actually raise an interrupt to the CPU when set by hw/firmware,
896 * but doesn't mask off the bits.
897 */
898 istat &= imask;
899 if (istat) {
900 handled = 1;
901
902 if (unlikely(!netif_running(dev))) {
903 netdev_info(dev, "late interrupt\n");
904 goto irq_ack;
905 }
906
907 if (napi_schedule_prep(&bp->napi)) {
908 /* NOTE: These writes are posted by the readback of
909 * the ISTAT register below.
910 */
911 bp->istat = istat;
912 __b44_disable_ints(bp);
913 __napi_schedule(&bp->napi);
914 }
915
916irq_ack:
917 bw32(bp, B44_ISTAT, istat);
918 br32(bp, B44_ISTAT);
919 }
920 spin_unlock(&bp->lock);
921 return IRQ_RETVAL(handled);
922}
923
924static void b44_tx_timeout(struct net_device *dev)
925{
926 struct b44 *bp = netdev_priv(dev);
927
928 netdev_err(dev, "transmit timed out, resetting\n");
929
930 spin_lock_irq(&bp->lock);
931
932 b44_halt(bp);
933 b44_init_rings(bp);
934 b44_init_hw(bp, B44_FULL_RESET);
935
936 spin_unlock_irq(&bp->lock);
937
938 b44_enable_ints(bp);
939
940 netif_wake_queue(dev);
941}
942
943static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
944{
945 struct b44 *bp = netdev_priv(dev);
946 int rc = NETDEV_TX_OK;
947 dma_addr_t mapping;
948 u32 len, entry, ctrl;
949 unsigned long flags;
950
951 len = skb->len;
952 spin_lock_irqsave(&bp->lock, flags);
953
954 /* This is a hard error, log it. */
955 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
956 netif_stop_queue(dev);
957 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
958 goto err_out;
959 }
960
961 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
962 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
963 struct sk_buff *bounce_skb;
964
965 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
966 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
967 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
968 DMA_TO_DEVICE);
969
970 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
971 if (!bounce_skb)
972 goto err_out;
973
974 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
975 len, DMA_TO_DEVICE);
976 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
977 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
978 dma_unmap_single(bp->sdev->dma_dev, mapping,
979 len, DMA_TO_DEVICE);
980 dev_kfree_skb_any(bounce_skb);
981 goto err_out;
982 }
983
984 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
985 dev_kfree_skb_any(skb);
986 skb = bounce_skb;
987 }
988
989 entry = bp->tx_prod;
990 bp->tx_buffers[entry].skb = skb;
991 bp->tx_buffers[entry].mapping = mapping;
992
993 ctrl = (len & DESC_CTRL_LEN);
994 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
995 if (entry == (B44_TX_RING_SIZE - 1))
996 ctrl |= DESC_CTRL_EOT;
997
998 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
999 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1000
1001 if (bp->flags & B44_FLAG_TX_RING_HACK)
1002 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1003 entry * sizeof(bp->tx_ring[0]),
1004 DMA_TO_DEVICE);
1005
1006 entry = NEXT_TX(entry);
1007
1008 bp->tx_prod = entry;
1009
1010 wmb();
1011
1012 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1013 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1014 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1015 if (bp->flags & B44_FLAG_REORDER_BUG)
1016 br32(bp, B44_DMATX_PTR);
1017
1018 if (TX_BUFFS_AVAIL(bp) < 1)
1019 netif_stop_queue(dev);
1020
1021out_unlock:
1022 spin_unlock_irqrestore(&bp->lock, flags);
1023
1024 return rc;
1025
1026err_out:
1027 rc = NETDEV_TX_BUSY;
1028 goto out_unlock;
1029}
1030
1031static int b44_change_mtu(struct net_device *dev, int new_mtu)
1032{
1033 struct b44 *bp = netdev_priv(dev);
1034
1035 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1036 return -EINVAL;
1037
1038 if (!netif_running(dev)) {
1039 /* We'll just catch it later when the
1040 * device is up'd.
1041 */
1042 dev->mtu = new_mtu;
1043 return 0;
1044 }
1045
1046 spin_lock_irq(&bp->lock);
1047 b44_halt(bp);
1048 dev->mtu = new_mtu;
1049 b44_init_rings(bp);
1050 b44_init_hw(bp, B44_FULL_RESET);
1051 spin_unlock_irq(&bp->lock);
1052
1053 b44_enable_ints(bp);
1054
1055 return 0;
1056}
1057
1058/* Free up pending packets in all rx/tx rings.
1059 *
1060 * The chip has been shut down and the driver detached from
1061 * the networking, so no interrupts or new tx packets will
1062 * end up in the driver. bp->lock is not held and we are not
1063 * in an interrupt context and thus may sleep.
1064 */
1065static void b44_free_rings(struct b44 *bp)
1066{
1067 struct ring_info *rp;
1068 int i;
1069
1070 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1071 rp = &bp->rx_buffers[i];
1072
1073 if (rp->skb == NULL)
1074 continue;
1075 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1076 DMA_FROM_DEVICE);
1077 dev_kfree_skb_any(rp->skb);
1078 rp->skb = NULL;
1079 }
1080
1081 /* XXX needs changes once NETIF_F_SG is set... */
1082 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1083 rp = &bp->tx_buffers[i];
1084
1085 if (rp->skb == NULL)
1086 continue;
1087 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1088 DMA_TO_DEVICE);
1089 dev_kfree_skb_any(rp->skb);
1090 rp->skb = NULL;
1091 }
1092}
1093
1094/* Initialize tx/rx rings for packet processing.
1095 *
1096 * The chip has been shut down and the driver detached from
1097 * the networking, so no interrupts or new tx packets will
1098 * end up in the driver.
1099 */
1100static void b44_init_rings(struct b44 *bp)
1101{
1102 int i;
1103
1104 b44_free_rings(bp);
1105
1106 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1107 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1108
1109 if (bp->flags & B44_FLAG_RX_RING_HACK)
1110 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1111 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1112
1113 if (bp->flags & B44_FLAG_TX_RING_HACK)
1114 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1115 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1116
1117 for (i = 0; i < bp->rx_pending; i++) {
1118 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1119 break;
1120 }
1121}
1122
1123/*
1124 * Must not be invoked with interrupt sources disabled and
1125 * the hardware shutdown down.
1126 */
1127static void b44_free_consistent(struct b44 *bp)
1128{
1129 kfree(bp->rx_buffers);
1130 bp->rx_buffers = NULL;
1131 kfree(bp->tx_buffers);
1132 bp->tx_buffers = NULL;
1133 if (bp->rx_ring) {
1134 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1135 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1136 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1137 kfree(bp->rx_ring);
1138 } else
1139 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1140 bp->rx_ring, bp->rx_ring_dma);
1141 bp->rx_ring = NULL;
1142 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1143 }
1144 if (bp->tx_ring) {
1145 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1146 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1147 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1148 kfree(bp->tx_ring);
1149 } else
1150 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1151 bp->tx_ring, bp->tx_ring_dma);
1152 bp->tx_ring = NULL;
1153 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1154 }
1155}
1156
1157/*
1158 * Must not be invoked with interrupt sources disabled and
1159 * the hardware shutdown down. Can sleep.
1160 */
1161static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1162{
1163 int size;
1164
1165 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1166 bp->rx_buffers = kzalloc(size, gfp);
1167 if (!bp->rx_buffers)
1168 goto out_err;
1169
1170 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1171 bp->tx_buffers = kzalloc(size, gfp);
1172 if (!bp->tx_buffers)
1173 goto out_err;
1174
1175 size = DMA_TABLE_BYTES;
1176 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1177 &bp->rx_ring_dma, gfp);
1178 if (!bp->rx_ring) {
1179 /* Allocation may have failed due to pci_alloc_consistent
1180 insisting on use of GFP_DMA, which is more restrictive
1181 than necessary... */
1182 struct dma_desc *rx_ring;
1183 dma_addr_t rx_ring_dma;
1184
1185 rx_ring = kzalloc(size, gfp);
1186 if (!rx_ring)
1187 goto out_err;
1188
1189 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1190 DMA_TABLE_BYTES,
1191 DMA_BIDIRECTIONAL);
1192
1193 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1194 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1195 kfree(rx_ring);
1196 goto out_err;
1197 }
1198
1199 bp->rx_ring = rx_ring;
1200 bp->rx_ring_dma = rx_ring_dma;
1201 bp->flags |= B44_FLAG_RX_RING_HACK;
1202 }
1203
1204 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1205 &bp->tx_ring_dma, gfp);
1206 if (!bp->tx_ring) {
1207 /* Allocation may have failed due to ssb_dma_alloc_consistent
1208 insisting on use of GFP_DMA, which is more restrictive
1209 than necessary... */
1210 struct dma_desc *tx_ring;
1211 dma_addr_t tx_ring_dma;
1212
1213 tx_ring = kzalloc(size, gfp);
1214 if (!tx_ring)
1215 goto out_err;
1216
1217 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1218 DMA_TABLE_BYTES,
1219 DMA_TO_DEVICE);
1220
1221 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1222 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1223 kfree(tx_ring);
1224 goto out_err;
1225 }
1226
1227 bp->tx_ring = tx_ring;
1228 bp->tx_ring_dma = tx_ring_dma;
1229 bp->flags |= B44_FLAG_TX_RING_HACK;
1230 }
1231
1232 return 0;
1233
1234out_err:
1235 b44_free_consistent(bp);
1236 return -ENOMEM;
1237}
1238
1239/* bp->lock is held. */
1240static void b44_clear_stats(struct b44 *bp)
1241{
1242 unsigned long reg;
1243
1244 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1245 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1246 br32(bp, reg);
1247 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1248 br32(bp, reg);
1249}
1250
1251/* bp->lock is held. */
1252static void b44_chip_reset(struct b44 *bp, int reset_kind)
1253{
1254 struct ssb_device *sdev = bp->sdev;
1255 bool was_enabled;
1256
1257 was_enabled = ssb_device_is_enabled(bp->sdev);
1258
1259 ssb_device_enable(bp->sdev, 0);
1260 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1261
1262 if (was_enabled) {
1263 bw32(bp, B44_RCV_LAZY, 0);
1264 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1265 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1266 bw32(bp, B44_DMATX_CTRL, 0);
1267 bp->tx_prod = bp->tx_cons = 0;
1268 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1269 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1270 100, 0);
1271 }
1272 bw32(bp, B44_DMARX_CTRL, 0);
1273 bp->rx_prod = bp->rx_cons = 0;
1274 }
1275
1276 b44_clear_stats(bp);
1277
1278 /*
1279 * Don't enable PHY if we are doing a partial reset
1280 * we are probably going to power down
1281 */
1282 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1283 return;
1284
1285 switch (sdev->bus->bustype) {
1286 case SSB_BUSTYPE_SSB:
1287 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1289 B44_MDC_RATIO)
1290 & MDIO_CTRL_MAXF_MASK)));
1291 break;
1292 case SSB_BUSTYPE_PCI:
1293 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1294 (0x0d & MDIO_CTRL_MAXF_MASK)));
1295 break;
1296 case SSB_BUSTYPE_PCMCIA:
1297 case SSB_BUSTYPE_SDIO:
1298 WARN_ON(1); /* A device with this bus does not exist. */
1299 break;
1300 }
1301
1302 br32(bp, B44_MDIO_CTRL);
1303
1304 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1305 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1306 br32(bp, B44_ENET_CTRL);
1307 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1308 } else {
1309 u32 val = br32(bp, B44_DEVCTRL);
1310
1311 if (val & DEVCTRL_EPR) {
1312 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1313 br32(bp, B44_DEVCTRL);
1314 udelay(100);
1315 }
1316 bp->flags |= B44_FLAG_INTERNAL_PHY;
1317 }
1318}
1319
1320/* bp->lock is held. */
1321static void b44_halt(struct b44 *bp)
1322{
1323 b44_disable_ints(bp);
1324 /* reset PHY */
1325 b44_phy_reset(bp);
1326 /* power down PHY */
1327 netdev_info(bp->dev, "powering down PHY\n");
1328 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1329 /* now reset the chip, but without enabling the MAC&PHY
1330 * part of it. This has to be done _after_ we shut down the PHY */
1331 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1332}
1333
1334/* bp->lock is held. */
1335static void __b44_set_mac_addr(struct b44 *bp)
1336{
1337 bw32(bp, B44_CAM_CTRL, 0);
1338 if (!(bp->dev->flags & IFF_PROMISC)) {
1339 u32 val;
1340
1341 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1342 val = br32(bp, B44_CAM_CTRL);
1343 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1344 }
1345}
1346
1347static int b44_set_mac_addr(struct net_device *dev, void *p)
1348{
1349 struct b44 *bp = netdev_priv(dev);
1350 struct sockaddr *addr = p;
1351 u32 val;
1352
1353 if (netif_running(dev))
1354 return -EBUSY;
1355
1356 if (!is_valid_ether_addr(addr->sa_data))
1357 return -EINVAL;
1358
1359 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1360
1361 spin_lock_irq(&bp->lock);
1362
1363 val = br32(bp, B44_RXCONFIG);
1364 if (!(val & RXCONFIG_CAM_ABSENT))
1365 __b44_set_mac_addr(bp);
1366
1367 spin_unlock_irq(&bp->lock);
1368
1369 return 0;
1370}
1371
1372/* Called at device open time to get the chip ready for
1373 * packet processing. Invoked with bp->lock held.
1374 */
1375static void __b44_set_rx_mode(struct net_device *);
1376static void b44_init_hw(struct b44 *bp, int reset_kind)
1377{
1378 u32 val;
1379
1380 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1381 if (reset_kind == B44_FULL_RESET) {
1382 b44_phy_reset(bp);
1383 b44_setup_phy(bp);
1384 }
1385
1386 /* Enable CRC32, set proper LED modes and power on PHY */
1387 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1388 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1389
1390 /* This sets the MAC address too. */
1391 __b44_set_rx_mode(bp->dev);
1392
1393 /* MTU + eth header + possible VLAN tag + struct rx_header */
1394 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396
1397 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1398 if (reset_kind == B44_PARTIAL_RESET) {
1399 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1400 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1401 } else {
1402 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1403 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1404 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1405 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1406 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1407
1408 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1409 bp->rx_prod = bp->rx_pending;
1410
1411 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1412 }
1413
1414 val = br32(bp, B44_ENET_CTRL);
1415 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1416}
1417
1418static int b44_open(struct net_device *dev)
1419{
1420 struct b44 *bp = netdev_priv(dev);
1421 int err;
1422
1423 err = b44_alloc_consistent(bp, GFP_KERNEL);
1424 if (err)
1425 goto out;
1426
1427 napi_enable(&bp->napi);
1428
1429 b44_init_rings(bp);
1430 b44_init_hw(bp, B44_FULL_RESET);
1431
1432 b44_check_phy(bp);
1433
1434 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1435 if (unlikely(err < 0)) {
1436 napi_disable(&bp->napi);
1437 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1438 b44_free_rings(bp);
1439 b44_free_consistent(bp);
1440 goto out;
1441 }
1442
1443 init_timer(&bp->timer);
1444 bp->timer.expires = jiffies + HZ;
1445 bp->timer.data = (unsigned long) bp;
1446 bp->timer.function = b44_timer;
1447 add_timer(&bp->timer);
1448
1449 b44_enable_ints(bp);
1450 netif_start_queue(dev);
1451out:
1452 return err;
1453}
1454
1455#ifdef CONFIG_NET_POLL_CONTROLLER
1456/*
1457 * Polling receive - used by netconsole and other diagnostic tools
1458 * to allow network i/o with interrupts disabled.
1459 */
1460static void b44_poll_controller(struct net_device *dev)
1461{
1462 disable_irq(dev->irq);
1463 b44_interrupt(dev->irq, dev);
1464 enable_irq(dev->irq);
1465}
1466#endif
1467
1468static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1469{
1470 u32 i;
1471 u32 *pattern = (u32 *) pp;
1472
1473 for (i = 0; i < bytes; i += sizeof(u32)) {
1474 bw32(bp, B44_FILT_ADDR, table_offset + i);
1475 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1476 }
1477}
1478
1479static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1480{
1481 int magicsync = 6;
1482 int k, j, len = offset;
1483 int ethaddr_bytes = ETH_ALEN;
1484
1485 memset(ppattern + offset, 0xff, magicsync);
1486 for (j = 0; j < magicsync; j++)
1487 set_bit(len++, (unsigned long *) pmask);
1488
1489 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1490 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1491 ethaddr_bytes = ETH_ALEN;
1492 else
1493 ethaddr_bytes = B44_PATTERN_SIZE - len;
1494 if (ethaddr_bytes <=0)
1495 break;
1496 for (k = 0; k< ethaddr_bytes; k++) {
1497 ppattern[offset + magicsync +
1498 (j * ETH_ALEN) + k] = macaddr[k];
1499 set_bit(len++, (unsigned long *) pmask);
1500 }
1501 }
1502 return len - 1;
1503}
1504
1505/* Setup magic packet patterns in the b44 WOL
1506 * pattern matching filter.
1507 */
1508static void b44_setup_pseudo_magicp(struct b44 *bp)
1509{
1510
1511 u32 val;
1512 int plen0, plen1, plen2;
1513 u8 *pwol_pattern;
1514 u8 pwol_mask[B44_PMASK_SIZE];
1515
1516 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1517 if (!pwol_pattern) {
1518 pr_err("Memory not available for WOL\n");
1519 return;
1520 }
1521
1522 /* Ipv4 magic packet pattern - pattern 0.*/
1523 memset(pwol_mask, 0, B44_PMASK_SIZE);
1524 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1525 B44_ETHIPV4UDP_HLEN);
1526
1527 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1528 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1529
1530 /* Raw ethernet II magic packet pattern - pattern 1 */
1531 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532 memset(pwol_mask, 0, B44_PMASK_SIZE);
1533 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534 ETH_HLEN);
1535
1536 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1537 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1538 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1539 B44_PMASK_BASE + B44_PMASK_SIZE);
1540
1541 /* Ipv6 magic packet pattern - pattern 2 */
1542 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543 memset(pwol_mask, 0, B44_PMASK_SIZE);
1544 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545 B44_ETHIPV6UDP_HLEN);
1546
1547 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1549 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1551
1552 kfree(pwol_pattern);
1553
1554 /* set these pattern's lengths: one less than each real length */
1555 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1556 bw32(bp, B44_WKUP_LEN, val);
1557
1558 /* enable wakeup pattern matching */
1559 val = br32(bp, B44_DEVCTRL);
1560 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1561
1562}
1563
1564#ifdef CONFIG_B44_PCI
1565static void b44_setup_wol_pci(struct b44 *bp)
1566{
1567 u16 val;
1568
1569 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1570 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1571 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1572 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1573 }
1574}
1575#else
1576static inline void b44_setup_wol_pci(struct b44 *bp) { }
1577#endif /* CONFIG_B44_PCI */
1578
1579static void b44_setup_wol(struct b44 *bp)
1580{
1581 u32 val;
1582
1583 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1584
1585 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1586
1587 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1588
1589 val = bp->dev->dev_addr[2] << 24 |
1590 bp->dev->dev_addr[3] << 16 |
1591 bp->dev->dev_addr[4] << 8 |
1592 bp->dev->dev_addr[5];
1593 bw32(bp, B44_ADDR_LO, val);
1594
1595 val = bp->dev->dev_addr[0] << 8 |
1596 bp->dev->dev_addr[1];
1597 bw32(bp, B44_ADDR_HI, val);
1598
1599 val = br32(bp, B44_DEVCTRL);
1600 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1601
1602 } else {
1603 b44_setup_pseudo_magicp(bp);
1604 }
1605 b44_setup_wol_pci(bp);
1606}
1607
1608static int b44_close(struct net_device *dev)
1609{
1610 struct b44 *bp = netdev_priv(dev);
1611
1612 netif_stop_queue(dev);
1613
1614 napi_disable(&bp->napi);
1615
1616 del_timer_sync(&bp->timer);
1617
1618 spin_lock_irq(&bp->lock);
1619
1620 b44_halt(bp);
1621 b44_free_rings(bp);
1622 netif_carrier_off(dev);
1623
1624 spin_unlock_irq(&bp->lock);
1625
1626 free_irq(dev->irq, dev);
1627
1628 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1629 b44_init_hw(bp, B44_PARTIAL_RESET);
1630 b44_setup_wol(bp);
1631 }
1632
1633 b44_free_consistent(bp);
1634
1635 return 0;
1636}
1637
1638static struct net_device_stats *b44_get_stats(struct net_device *dev)
1639{
1640 struct b44 *bp = netdev_priv(dev);
1641 struct net_device_stats *nstat = &dev->stats;
1642 struct b44_hw_stats *hwstat = &bp->hw_stats;
1643
1644 /* Convert HW stats into netdevice stats. */
1645 nstat->rx_packets = hwstat->rx_pkts;
1646 nstat->tx_packets = hwstat->tx_pkts;
1647 nstat->rx_bytes = hwstat->rx_octets;
1648 nstat->tx_bytes = hwstat->tx_octets;
1649 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1650 hwstat->tx_oversize_pkts +
1651 hwstat->tx_underruns +
1652 hwstat->tx_excessive_cols +
1653 hwstat->tx_late_cols);
1654 nstat->multicast = hwstat->tx_multicast_pkts;
1655 nstat->collisions = hwstat->tx_total_cols;
1656
1657 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1658 hwstat->rx_undersize);
1659 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1660 nstat->rx_frame_errors = hwstat->rx_align_errs;
1661 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1662 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1663 hwstat->rx_oversize_pkts +
1664 hwstat->rx_missed_pkts +
1665 hwstat->rx_crc_align_errs +
1666 hwstat->rx_undersize +
1667 hwstat->rx_crc_errs +
1668 hwstat->rx_align_errs +
1669 hwstat->rx_symbol_errs);
1670
1671 nstat->tx_aborted_errors = hwstat->tx_underruns;
1672#if 0
1673 /* Carrier lost counter seems to be broken for some devices */
1674 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1675#endif
1676
1677 return nstat;
1678}
1679
1680static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1681{
1682 struct netdev_hw_addr *ha;
1683 int i, num_ents;
1684
1685 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1686 i = 0;
1687 netdev_for_each_mc_addr(ha, dev) {
1688 if (i == num_ents)
1689 break;
1690 __b44_cam_write(bp, ha->addr, i++ + 1);
1691 }
1692 return i+1;
1693}
1694
1695static void __b44_set_rx_mode(struct net_device *dev)
1696{
1697 struct b44 *bp = netdev_priv(dev);
1698 u32 val;
1699
1700 val = br32(bp, B44_RXCONFIG);
1701 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1702 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1703 val |= RXCONFIG_PROMISC;
1704 bw32(bp, B44_RXCONFIG, val);
1705 } else {
1706 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1707 int i = 1;
1708
1709 __b44_set_mac_addr(bp);
1710
1711 if ((dev->flags & IFF_ALLMULTI) ||
1712 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1713 val |= RXCONFIG_ALLMULTI;
1714 else
1715 i = __b44_load_mcast(bp, dev);
1716
1717 for (; i < 64; i++)
1718 __b44_cam_write(bp, zero, i);
1719
1720 bw32(bp, B44_RXCONFIG, val);
1721 val = br32(bp, B44_CAM_CTRL);
1722 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1723 }
1724}
1725
1726static void b44_set_rx_mode(struct net_device *dev)
1727{
1728 struct b44 *bp = netdev_priv(dev);
1729
1730 spin_lock_irq(&bp->lock);
1731 __b44_set_rx_mode(dev);
1732 spin_unlock_irq(&bp->lock);
1733}
1734
1735static u32 b44_get_msglevel(struct net_device *dev)
1736{
1737 struct b44 *bp = netdev_priv(dev);
1738 return bp->msg_enable;
1739}
1740
1741static void b44_set_msglevel(struct net_device *dev, u32 value)
1742{
1743 struct b44 *bp = netdev_priv(dev);
1744 bp->msg_enable = value;
1745}
1746
1747static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1748{
1749 struct b44 *bp = netdev_priv(dev);
1750 struct ssb_bus *bus = bp->sdev->bus;
1751
1752 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1753 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1754 switch (bus->bustype) {
1755 case SSB_BUSTYPE_PCI:
1756 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1757 break;
1758 case SSB_BUSTYPE_SSB:
1759 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1760 break;
1761 case SSB_BUSTYPE_PCMCIA:
1762 case SSB_BUSTYPE_SDIO:
1763 WARN_ON(1); /* A device with this bus does not exist. */
1764 break;
1765 }
1766}
1767
1768static int b44_nway_reset(struct net_device *dev)
1769{
1770 struct b44 *bp = netdev_priv(dev);
1771 u32 bmcr;
1772 int r;
1773
1774 spin_lock_irq(&bp->lock);
1775 b44_readphy(bp, MII_BMCR, &bmcr);
1776 b44_readphy(bp, MII_BMCR, &bmcr);
1777 r = -EINVAL;
1778 if (bmcr & BMCR_ANENABLE) {
1779 b44_writephy(bp, MII_BMCR,
1780 bmcr | BMCR_ANRESTART);
1781 r = 0;
1782 }
1783 spin_unlock_irq(&bp->lock);
1784
1785 return r;
1786}
1787
1788static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1789{
1790 struct b44 *bp = netdev_priv(dev);
1791
1792 cmd->supported = (SUPPORTED_Autoneg);
1793 cmd->supported |= (SUPPORTED_100baseT_Half |
1794 SUPPORTED_100baseT_Full |
1795 SUPPORTED_10baseT_Half |
1796 SUPPORTED_10baseT_Full |
1797 SUPPORTED_MII);
1798
1799 cmd->advertising = 0;
1800 if (bp->flags & B44_FLAG_ADV_10HALF)
1801 cmd->advertising |= ADVERTISED_10baseT_Half;
1802 if (bp->flags & B44_FLAG_ADV_10FULL)
1803 cmd->advertising |= ADVERTISED_10baseT_Full;
1804 if (bp->flags & B44_FLAG_ADV_100HALF)
1805 cmd->advertising |= ADVERTISED_100baseT_Half;
1806 if (bp->flags & B44_FLAG_ADV_100FULL)
1807 cmd->advertising |= ADVERTISED_100baseT_Full;
1808 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1809 ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1810 SPEED_100 : SPEED_10));
1811 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1812 DUPLEX_FULL : DUPLEX_HALF;
1813 cmd->port = 0;
1814 cmd->phy_address = bp->phy_addr;
1815 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1816 XCVR_INTERNAL : XCVR_EXTERNAL;
1817 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1818 AUTONEG_DISABLE : AUTONEG_ENABLE;
1819 if (cmd->autoneg == AUTONEG_ENABLE)
1820 cmd->advertising |= ADVERTISED_Autoneg;
1821 if (!netif_running(dev)){
1822 ethtool_cmd_speed_set(cmd, 0);
1823 cmd->duplex = 0xff;
1824 }
1825 cmd->maxtxpkt = 0;
1826 cmd->maxrxpkt = 0;
1827 return 0;
1828}
1829
1830static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1831{
1832 struct b44 *bp = netdev_priv(dev);
1833 u32 speed = ethtool_cmd_speed(cmd);
1834
1835 /* We do not support gigabit. */
1836 if (cmd->autoneg == AUTONEG_ENABLE) {
1837 if (cmd->advertising &
1838 (ADVERTISED_1000baseT_Half |
1839 ADVERTISED_1000baseT_Full))
1840 return -EINVAL;
1841 } else if ((speed != SPEED_100 &&
1842 speed != SPEED_10) ||
1843 (cmd->duplex != DUPLEX_HALF &&
1844 cmd->duplex != DUPLEX_FULL)) {
1845 return -EINVAL;
1846 }
1847
1848 spin_lock_irq(&bp->lock);
1849
1850 if (cmd->autoneg == AUTONEG_ENABLE) {
1851 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1852 B44_FLAG_100_BASE_T |
1853 B44_FLAG_FULL_DUPLEX |
1854 B44_FLAG_ADV_10HALF |
1855 B44_FLAG_ADV_10FULL |
1856 B44_FLAG_ADV_100HALF |
1857 B44_FLAG_ADV_100FULL);
1858 if (cmd->advertising == 0) {
1859 bp->flags |= (B44_FLAG_ADV_10HALF |
1860 B44_FLAG_ADV_10FULL |
1861 B44_FLAG_ADV_100HALF |
1862 B44_FLAG_ADV_100FULL);
1863 } else {
1864 if (cmd->advertising & ADVERTISED_10baseT_Half)
1865 bp->flags |= B44_FLAG_ADV_10HALF;
1866 if (cmd->advertising & ADVERTISED_10baseT_Full)
1867 bp->flags |= B44_FLAG_ADV_10FULL;
1868 if (cmd->advertising & ADVERTISED_100baseT_Half)
1869 bp->flags |= B44_FLAG_ADV_100HALF;
1870 if (cmd->advertising & ADVERTISED_100baseT_Full)
1871 bp->flags |= B44_FLAG_ADV_100FULL;
1872 }
1873 } else {
1874 bp->flags |= B44_FLAG_FORCE_LINK;
1875 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1876 if (speed == SPEED_100)
1877 bp->flags |= B44_FLAG_100_BASE_T;
1878 if (cmd->duplex == DUPLEX_FULL)
1879 bp->flags |= B44_FLAG_FULL_DUPLEX;
1880 }
1881
1882 if (netif_running(dev))
1883 b44_setup_phy(bp);
1884
1885 spin_unlock_irq(&bp->lock);
1886
1887 return 0;
1888}
1889
1890static void b44_get_ringparam(struct net_device *dev,
1891 struct ethtool_ringparam *ering)
1892{
1893 struct b44 *bp = netdev_priv(dev);
1894
1895 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1896 ering->rx_pending = bp->rx_pending;
1897
1898 /* XXX ethtool lacks a tx_max_pending, oops... */
1899}
1900
1901static int b44_set_ringparam(struct net_device *dev,
1902 struct ethtool_ringparam *ering)
1903{
1904 struct b44 *bp = netdev_priv(dev);
1905
1906 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1907 (ering->rx_mini_pending != 0) ||
1908 (ering->rx_jumbo_pending != 0) ||
1909 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1910 return -EINVAL;
1911
1912 spin_lock_irq(&bp->lock);
1913
1914 bp->rx_pending = ering->rx_pending;
1915 bp->tx_pending = ering->tx_pending;
1916
1917 b44_halt(bp);
1918 b44_init_rings(bp);
1919 b44_init_hw(bp, B44_FULL_RESET);
1920 netif_wake_queue(bp->dev);
1921 spin_unlock_irq(&bp->lock);
1922
1923 b44_enable_ints(bp);
1924
1925 return 0;
1926}
1927
1928static void b44_get_pauseparam(struct net_device *dev,
1929 struct ethtool_pauseparam *epause)
1930{
1931 struct b44 *bp = netdev_priv(dev);
1932
1933 epause->autoneg =
1934 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1935 epause->rx_pause =
1936 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1937 epause->tx_pause =
1938 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1939}
1940
1941static int b44_set_pauseparam(struct net_device *dev,
1942 struct ethtool_pauseparam *epause)
1943{
1944 struct b44 *bp = netdev_priv(dev);
1945
1946 spin_lock_irq(&bp->lock);
1947 if (epause->autoneg)
1948 bp->flags |= B44_FLAG_PAUSE_AUTO;
1949 else
1950 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1951 if (epause->rx_pause)
1952 bp->flags |= B44_FLAG_RX_PAUSE;
1953 else
1954 bp->flags &= ~B44_FLAG_RX_PAUSE;
1955 if (epause->tx_pause)
1956 bp->flags |= B44_FLAG_TX_PAUSE;
1957 else
1958 bp->flags &= ~B44_FLAG_TX_PAUSE;
1959 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1960 b44_halt(bp);
1961 b44_init_rings(bp);
1962 b44_init_hw(bp, B44_FULL_RESET);
1963 } else {
1964 __b44_set_flow_ctrl(bp, bp->flags);
1965 }
1966 spin_unlock_irq(&bp->lock);
1967
1968 b44_enable_ints(bp);
1969
1970 return 0;
1971}
1972
1973static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1974{
1975 switch(stringset) {
1976 case ETH_SS_STATS:
1977 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1978 break;
1979 }
1980}
1981
1982static int b44_get_sset_count(struct net_device *dev, int sset)
1983{
1984 switch (sset) {
1985 case ETH_SS_STATS:
1986 return ARRAY_SIZE(b44_gstrings);
1987 default:
1988 return -EOPNOTSUPP;
1989 }
1990}
1991
1992static void b44_get_ethtool_stats(struct net_device *dev,
1993 struct ethtool_stats *stats, u64 *data)
1994{
1995 struct b44 *bp = netdev_priv(dev);
1996 u32 *val = &bp->hw_stats.tx_good_octets;
1997 u32 i;
1998
1999 spin_lock_irq(&bp->lock);
2000
2001 b44_stats_update(bp);
2002
2003 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2004 *data++ = *val++;
2005
2006 spin_unlock_irq(&bp->lock);
2007}
2008
2009static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010{
2011 struct b44 *bp = netdev_priv(dev);
2012
2013 wol->supported = WAKE_MAGIC;
2014 if (bp->flags & B44_FLAG_WOL_ENABLE)
2015 wol->wolopts = WAKE_MAGIC;
2016 else
2017 wol->wolopts = 0;
2018 memset(&wol->sopass, 0, sizeof(wol->sopass));
2019}
2020
2021static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2022{
2023 struct b44 *bp = netdev_priv(dev);
2024
2025 spin_lock_irq(&bp->lock);
2026 if (wol->wolopts & WAKE_MAGIC)
2027 bp->flags |= B44_FLAG_WOL_ENABLE;
2028 else
2029 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2030 spin_unlock_irq(&bp->lock);
2031
2032 return 0;
2033}
2034
2035static const struct ethtool_ops b44_ethtool_ops = {
2036 .get_drvinfo = b44_get_drvinfo,
2037 .get_settings = b44_get_settings,
2038 .set_settings = b44_set_settings,
2039 .nway_reset = b44_nway_reset,
2040 .get_link = ethtool_op_get_link,
2041 .get_wol = b44_get_wol,
2042 .set_wol = b44_set_wol,
2043 .get_ringparam = b44_get_ringparam,
2044 .set_ringparam = b44_set_ringparam,
2045 .get_pauseparam = b44_get_pauseparam,
2046 .set_pauseparam = b44_set_pauseparam,
2047 .get_msglevel = b44_get_msglevel,
2048 .set_msglevel = b44_set_msglevel,
2049 .get_strings = b44_get_strings,
2050 .get_sset_count = b44_get_sset_count,
2051 .get_ethtool_stats = b44_get_ethtool_stats,
2052};
2053
2054static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2055{
2056 struct mii_ioctl_data *data = if_mii(ifr);
2057 struct b44 *bp = netdev_priv(dev);
2058 int err = -EINVAL;
2059
2060 if (!netif_running(dev))
2061 goto out;
2062
2063 spin_lock_irq(&bp->lock);
2064 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2065 spin_unlock_irq(&bp->lock);
2066out:
2067 return err;
2068}
2069
2070static int __devinit b44_get_invariants(struct b44 *bp)
2071{
2072 struct ssb_device *sdev = bp->sdev;
2073 int err = 0;
2074 u8 *addr;
2075
2076 bp->dma_offset = ssb_dma_translation(sdev);
2077
2078 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2079 instance > 1) {
2080 addr = sdev->bus->sprom.et1mac;
2081 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2082 } else {
2083 addr = sdev->bus->sprom.et0mac;
2084 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2085 }
2086 /* Some ROMs have buggy PHY addresses with the high
2087 * bits set (sign extension?). Truncate them to a
2088 * valid PHY address. */
2089 bp->phy_addr &= 0x1F;
2090
2091 memcpy(bp->dev->dev_addr, addr, 6);
2092
2093 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2094 pr_err("Invalid MAC address found in EEPROM\n");
2095 return -EINVAL;
2096 }
2097
2098 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2099
2100 bp->imask = IMASK_DEF;
2101
2102 /* XXX - really required?
2103 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104 */
2105
2106 if (bp->sdev->id.revision >= 7)
2107 bp->flags |= B44_FLAG_B0_ANDLATER;
2108
2109 return err;
2110}
2111
2112static const struct net_device_ops b44_netdev_ops = {
2113 .ndo_open = b44_open,
2114 .ndo_stop = b44_close,
2115 .ndo_start_xmit = b44_start_xmit,
2116 .ndo_get_stats = b44_get_stats,
2117 .ndo_set_rx_mode = b44_set_rx_mode,
2118 .ndo_set_mac_address = b44_set_mac_addr,
2119 .ndo_validate_addr = eth_validate_addr,
2120 .ndo_do_ioctl = b44_ioctl,
2121 .ndo_tx_timeout = b44_tx_timeout,
2122 .ndo_change_mtu = b44_change_mtu,
2123#ifdef CONFIG_NET_POLL_CONTROLLER
2124 .ndo_poll_controller = b44_poll_controller,
2125#endif
2126};
2127
2128static int __devinit b44_init_one(struct ssb_device *sdev,
2129 const struct ssb_device_id *ent)
2130{
2131 struct net_device *dev;
2132 struct b44 *bp;
2133 int err;
2134
2135 instance++;
2136
2137 pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2138
2139 dev = alloc_etherdev(sizeof(*bp));
2140 if (!dev) {
2141 err = -ENOMEM;
2142 goto out;
2143 }
2144
2145 SET_NETDEV_DEV(dev, sdev->dev);
2146
2147 /* No interesting netdevice features in this card... */
2148 dev->features |= 0;
2149
2150 bp = netdev_priv(dev);
2151 bp->sdev = sdev;
2152 bp->dev = dev;
2153 bp->force_copybreak = 0;
2154
2155 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2156
2157 spin_lock_init(&bp->lock);
2158
2159 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2160 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2161
2162 dev->netdev_ops = &b44_netdev_ops;
2163 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2164 dev->watchdog_timeo = B44_TX_TIMEOUT;
2165 dev->irq = sdev->irq;
2166 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2167
2168 err = ssb_bus_powerup(sdev->bus, 0);
2169 if (err) {
2170 dev_err(sdev->dev,
2171 "Failed to powerup the bus\n");
2172 goto err_out_free_dev;
2173 }
2174
2175 if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2176 dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2177 dev_err(sdev->dev,
2178 "Required 30BIT DMA mask unsupported by the system\n");
2179 goto err_out_powerdown;
2180 }
2181
2182 err = b44_get_invariants(bp);
2183 if (err) {
2184 dev_err(sdev->dev,
2185 "Problem fetching invariants of chip, aborting\n");
2186 goto err_out_powerdown;
2187 }
2188
2189 bp->mii_if.dev = dev;
2190 bp->mii_if.mdio_read = b44_mii_read;
2191 bp->mii_if.mdio_write = b44_mii_write;
2192 bp->mii_if.phy_id = bp->phy_addr;
2193 bp->mii_if.phy_id_mask = 0x1f;
2194 bp->mii_if.reg_num_mask = 0x1f;
2195
2196 /* By default, advertise all speed/duplex settings. */
2197 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2198 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2199
2200 /* By default, auto-negotiate PAUSE. */
2201 bp->flags |= B44_FLAG_PAUSE_AUTO;
2202
2203 err = register_netdev(dev);
2204 if (err) {
2205 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2206 goto err_out_powerdown;
2207 }
2208
2209 netif_carrier_off(dev);
2210
2211 ssb_set_drvdata(sdev, dev);
2212
2213 /* Chip reset provides power to the b44 MAC & PCI cores, which
2214 * is necessary for MAC register access.
2215 */
2216 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2217
2218 /* do a phy reset to test if there is an active phy */
2219 if (b44_phy_reset(bp) < 0)
2220 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2221
2222 netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2223
2224 return 0;
2225
2226err_out_powerdown:
2227 ssb_bus_may_powerdown(sdev->bus);
2228
2229err_out_free_dev:
2230 free_netdev(dev);
2231
2232out:
2233 return err;
2234}
2235
2236static void __devexit b44_remove_one(struct ssb_device *sdev)
2237{
2238 struct net_device *dev = ssb_get_drvdata(sdev);
2239
2240 unregister_netdev(dev);
2241 ssb_device_disable(sdev, 0);
2242 ssb_bus_may_powerdown(sdev->bus);
2243 free_netdev(dev);
2244 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2245 ssb_set_drvdata(sdev, NULL);
2246}
2247
2248static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2249{
2250 struct net_device *dev = ssb_get_drvdata(sdev);
2251 struct b44 *bp = netdev_priv(dev);
2252
2253 if (!netif_running(dev))
2254 return 0;
2255
2256 del_timer_sync(&bp->timer);
2257
2258 spin_lock_irq(&bp->lock);
2259
2260 b44_halt(bp);
2261 netif_carrier_off(bp->dev);
2262 netif_device_detach(bp->dev);
2263 b44_free_rings(bp);
2264
2265 spin_unlock_irq(&bp->lock);
2266
2267 free_irq(dev->irq, dev);
2268 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2269 b44_init_hw(bp, B44_PARTIAL_RESET);
2270 b44_setup_wol(bp);
2271 }
2272
2273 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2274 return 0;
2275}
2276
2277static int b44_resume(struct ssb_device *sdev)
2278{
2279 struct net_device *dev = ssb_get_drvdata(sdev);
2280 struct b44 *bp = netdev_priv(dev);
2281 int rc = 0;
2282
2283 rc = ssb_bus_powerup(sdev->bus, 0);
2284 if (rc) {
2285 dev_err(sdev->dev,
2286 "Failed to powerup the bus\n");
2287 return rc;
2288 }
2289
2290 if (!netif_running(dev))
2291 return 0;
2292
2293 spin_lock_irq(&bp->lock);
2294 b44_init_rings(bp);
2295 b44_init_hw(bp, B44_FULL_RESET);
2296 spin_unlock_irq(&bp->lock);
2297
2298 /*
2299 * As a shared interrupt, the handler can be called immediately. To be
2300 * able to check the interrupt status the hardware must already be
2301 * powered back on (b44_init_hw).
2302 */
2303 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2304 if (rc) {
2305 netdev_err(dev, "request_irq failed\n");
2306 spin_lock_irq(&bp->lock);
2307 b44_halt(bp);
2308 b44_free_rings(bp);
2309 spin_unlock_irq(&bp->lock);
2310 return rc;
2311 }
2312
2313 netif_device_attach(bp->dev);
2314
2315 b44_enable_ints(bp);
2316 netif_wake_queue(dev);
2317
2318 mod_timer(&bp->timer, jiffies + 1);
2319
2320 return 0;
2321}
2322
2323static struct ssb_driver b44_ssb_driver = {
2324 .name = DRV_MODULE_NAME,
2325 .id_table = b44_ssb_tbl,
2326 .probe = b44_init_one,
2327 .remove = __devexit_p(b44_remove_one),
2328 .suspend = b44_suspend,
2329 .resume = b44_resume,
2330};
2331
2332static inline int __init b44_pci_init(void)
2333{
2334 int err = 0;
2335#ifdef CONFIG_B44_PCI
2336 err = ssb_pcihost_register(&b44_pci_driver);
2337#endif
2338 return err;
2339}
2340
2341static inline void b44_pci_exit(void)
2342{
2343#ifdef CONFIG_B44_PCI
2344 ssb_pcihost_unregister(&b44_pci_driver);
2345#endif
2346}
2347
2348static int __init b44_init(void)
2349{
2350 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2351 int err;
2352
2353 /* Setup paramaters for syncing RX/TX DMA descriptors */
2354 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2355
2356 err = b44_pci_init();
2357 if (err)
2358 return err;
2359 err = ssb_driver_register(&b44_ssb_driver);
2360 if (err)
2361 b44_pci_exit();
2362 return err;
2363}
2364
2365static void __exit b44_cleanup(void)
2366{
2367 ssb_driver_unregister(&b44_ssb_driver);
2368 b44_pci_exit();
2369}
2370
2371module_init(b44_init);
2372module_exit(b44_cleanup);
2373