Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
3 * auto carrier detecting ethernet driver. Also known as the
4 * "Happy Meal Ethernet" found on SunSwift SBUS cards.
5 *
6 * Copyright (C) 1996, 1998, 1999, 2002, 2003,
7 * 2006, 2008 David S. Miller (davem@davemloft.net)
8 *
9 * Changes :
10 * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
11 * - port to non-sparc architectures. Tested only on x86 and
12 * only currently works with QFE PCI cards.
13 * - ability to specify the MAC address at module load time by passing this
14 * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
15 */
16
17#include <linux/bitops.h>
18#include <linux/crc32.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
22#include <linux/etherdevice.h>
23#include <linux/ethtool.h>
24#include <linux/fcntl.h>
25#include <linux/in.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/ioport.h>
30#include <linux/kernel.h>
31#include <linux/mii.h>
32#include <linux/mm.h>
33#include <linux/module.h>
34#include <linux/netdevice.h>
35#include <linux/of.h>
36#include <linux/of_device.h>
37#include <linux/pci.h>
38#include <linux/platform_device.h>
39#include <linux/random.h>
40#include <linux/skbuff.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/types.h>
44#include <linux/uaccess.h>
45
46#include <asm/byteorder.h>
47#include <asm/dma.h>
48#include <asm/irq.h>
49
50#ifdef CONFIG_SPARC
51#include <asm/auxio.h>
52#include <asm/idprom.h>
53#include <asm/openprom.h>
54#include <asm/oplib.h>
55#include <asm/prom.h>
56#endif
57
58#include "sunhme.h"
59
60#define DRV_NAME "sunhme"
61
62MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
63MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
64MODULE_LICENSE("GPL");
65
66static int macaddr[6];
67
68/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
69module_param_array(macaddr, int, NULL, 0);
70MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
71
72#ifdef CONFIG_SBUS
73static struct quattro *qfe_sbus_list;
74#endif
75
76#ifdef CONFIG_PCI
77static struct quattro *qfe_pci_list;
78#endif
79
80#define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
81#define HMD hme_debug
82
83/* "Auto Switch Debug" aka phy debug */
84#if 1
85#define ASD hme_debug
86#else
87#define ASD(...)
88#endif
89
90#if 0
91struct hme_tx_logent {
92 unsigned int tstamp;
93 int tx_new, tx_old;
94 unsigned int action;
95#define TXLOG_ACTION_IRQ 0x01
96#define TXLOG_ACTION_TXMIT 0x02
97#define TXLOG_ACTION_TBUSY 0x04
98#define TXLOG_ACTION_NBUFS 0x08
99 unsigned int status;
100};
101#define TX_LOG_LEN 128
102static struct hme_tx_logent tx_log[TX_LOG_LEN];
103static int txlog_cur_entry;
104static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
105{
106 struct hme_tx_logent *tlp;
107 unsigned long flags;
108
109 local_irq_save(flags);
110 tlp = &tx_log[txlog_cur_entry];
111 tlp->tstamp = (unsigned int)jiffies;
112 tlp->tx_new = hp->tx_new;
113 tlp->tx_old = hp->tx_old;
114 tlp->action = a;
115 tlp->status = s;
116 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
117 local_irq_restore(flags);
118}
119static __inline__ void tx_dump_log(void)
120{
121 int i, this;
122
123 this = txlog_cur_entry;
124 for (i = 0; i < TX_LOG_LEN; i++) {
125 pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
126 tx_log[this].tstamp,
127 tx_log[this].tx_new, tx_log[this].tx_old,
128 tx_log[this].action, tx_log[this].status);
129 this = (this + 1) & (TX_LOG_LEN - 1);
130 }
131}
132#else
133#define tx_add_log(hp, a, s)
134#define tx_dump_log()
135#endif
136
137#define DEFAULT_IPG0 16 /* For lance-mode only */
138#define DEFAULT_IPG1 8 /* For all modes */
139#define DEFAULT_IPG2 4 /* For all modes */
140#define DEFAULT_JAMSIZE 4 /* Toe jam */
141
142/* NOTE: In the descriptor writes one _must_ write the address
143 * member _first_. The card must not be allowed to see
144 * the updated descriptor flags until the address is
145 * correct. I've added a write memory barrier between
146 * the two stores so that I can sleep well at night... -DaveM
147 */
148
149#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
150static void sbus_hme_write32(void __iomem *reg, u32 val)
151{
152 sbus_writel(val, reg);
153}
154
155static u32 sbus_hme_read32(void __iomem *reg)
156{
157 return sbus_readl(reg);
158}
159
160static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
161{
162 rxd->rx_addr = (__force hme32)addr;
163 dma_wmb();
164 rxd->rx_flags = (__force hme32)flags;
165}
166
167static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
168{
169 txd->tx_addr = (__force hme32)addr;
170 dma_wmb();
171 txd->tx_flags = (__force hme32)flags;
172}
173
174static u32 sbus_hme_read_desc32(hme32 *p)
175{
176 return (__force u32)*p;
177}
178
179static void pci_hme_write32(void __iomem *reg, u32 val)
180{
181 writel(val, reg);
182}
183
184static u32 pci_hme_read32(void __iomem *reg)
185{
186 return readl(reg);
187}
188
189static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
190{
191 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
192 dma_wmb();
193 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
194}
195
196static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
197{
198 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
199 dma_wmb();
200 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
201}
202
203static u32 pci_hme_read_desc32(hme32 *p)
204{
205 return le32_to_cpup((__le32 *)p);
206}
207
208#define hme_write32(__hp, __reg, __val) \
209 ((__hp)->write32((__reg), (__val)))
210#define hme_read32(__hp, __reg) \
211 ((__hp)->read32(__reg))
212#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
213 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
214#define hme_write_txd(__hp, __txd, __flags, __addr) \
215 ((__hp)->write_txd((__txd), (__flags), (__addr)))
216#define hme_read_desc32(__hp, __p) \
217 ((__hp)->read_desc32(__p))
218#else
219#ifdef CONFIG_SBUS
220/* SBUS only compilation */
221#define hme_write32(__hp, __reg, __val) \
222 sbus_writel((__val), (__reg))
223#define hme_read32(__hp, __reg) \
224 sbus_readl(__reg)
225#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
226do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
227 dma_wmb(); \
228 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
229} while(0)
230#define hme_write_txd(__hp, __txd, __flags, __addr) \
231do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
232 dma_wmb(); \
233 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
234} while(0)
235#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
236#else
237/* PCI only compilation */
238#define hme_write32(__hp, __reg, __val) \
239 writel((__val), (__reg))
240#define hme_read32(__hp, __reg) \
241 readl(__reg)
242#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
243do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
244 dma_wmb(); \
245 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
246} while(0)
247#define hme_write_txd(__hp, __txd, __flags, __addr) \
248do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
249 dma_wmb(); \
250 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
251} while(0)
252static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
253{
254 return le32_to_cpup((__le32 *)p);
255}
256#endif
257#endif
258
259
260/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
261static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
262{
263 hme_write32(hp, tregs + TCVR_BBDATA, bit);
264 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
265 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
266}
267
268#if 0
269static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
270{
271 u32 ret;
272
273 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
274 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
275 ret = hme_read32(hp, tregs + TCVR_CFG);
276 if (internal)
277 ret &= TCV_CFG_MDIO0;
278 else
279 ret &= TCV_CFG_MDIO1;
280
281 return ret;
282}
283#endif
284
285static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
286{
287 u32 retval;
288
289 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
290 udelay(1);
291 retval = hme_read32(hp, tregs + TCVR_CFG);
292 if (internal)
293 retval &= TCV_CFG_MDIO0;
294 else
295 retval &= TCV_CFG_MDIO1;
296 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
297
298 return retval;
299}
300
301#define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
302
303static int happy_meal_bb_read(struct happy_meal *hp,
304 void __iomem *tregs, int reg)
305{
306 u32 tmp;
307 int retval = 0;
308 int i;
309
310 /* Enable the MIF BitBang outputs. */
311 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
312
313 /* Force BitBang into the idle state. */
314 for (i = 0; i < 32; i++)
315 BB_PUT_BIT(hp, tregs, 1);
316
317 /* Give it the read sequence. */
318 BB_PUT_BIT(hp, tregs, 0);
319 BB_PUT_BIT(hp, tregs, 1);
320 BB_PUT_BIT(hp, tregs, 1);
321 BB_PUT_BIT(hp, tregs, 0);
322
323 /* Give it the PHY address. */
324 tmp = hp->paddr & 0xff;
325 for (i = 4; i >= 0; i--)
326 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
327
328 /* Tell it what register we want to read. */
329 tmp = (reg & 0xff);
330 for (i = 4; i >= 0; i--)
331 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
332
333 /* Close down the MIF BitBang outputs. */
334 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
335
336 /* Now read in the value. */
337 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
338 for (i = 15; i >= 0; i--)
339 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
340 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
341 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
342 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
343 ASD("reg=%d value=%x\n", reg, retval);
344 return retval;
345}
346
347static void happy_meal_bb_write(struct happy_meal *hp,
348 void __iomem *tregs, int reg,
349 unsigned short value)
350{
351 u32 tmp;
352 int i;
353
354 ASD("reg=%d value=%x\n", reg, value);
355
356 /* Enable the MIF BitBang outputs. */
357 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
358
359 /* Force BitBang into the idle state. */
360 for (i = 0; i < 32; i++)
361 BB_PUT_BIT(hp, tregs, 1);
362
363 /* Give it write sequence. */
364 BB_PUT_BIT(hp, tregs, 0);
365 BB_PUT_BIT(hp, tregs, 1);
366 BB_PUT_BIT(hp, tregs, 0);
367 BB_PUT_BIT(hp, tregs, 1);
368
369 /* Give it the PHY address. */
370 tmp = (hp->paddr & 0xff);
371 for (i = 4; i >= 0; i--)
372 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
373
374 /* Tell it what register we will be writing. */
375 tmp = (reg & 0xff);
376 for (i = 4; i >= 0; i--)
377 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
378
379 /* Tell it to become ready for the bits. */
380 BB_PUT_BIT(hp, tregs, 1);
381 BB_PUT_BIT(hp, tregs, 0);
382
383 for (i = 15; i >= 0; i--)
384 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
385
386 /* Close down the MIF BitBang outputs. */
387 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
388}
389
390#define TCVR_READ_TRIES 16
391
392static int happy_meal_tcvr_read(struct happy_meal *hp,
393 void __iomem *tregs, int reg)
394{
395 int tries = TCVR_READ_TRIES;
396 int retval;
397
398 if (hp->tcvr_type == none) {
399 ASD("no transceiver, value=TCVR_FAILURE\n");
400 return TCVR_FAILURE;
401 }
402
403 if (!(hp->happy_flags & HFLAG_FENABLE)) {
404 ASD("doing bit bang\n");
405 return happy_meal_bb_read(hp, tregs, reg);
406 }
407
408 hme_write32(hp, tregs + TCVR_FRAME,
409 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
410 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
411 udelay(20);
412 if (!tries) {
413 netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
414 return TCVR_FAILURE;
415 }
416 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
417 ASD("reg=0x%02x value=%04x\n", reg, retval);
418 return retval;
419}
420
421#define TCVR_WRITE_TRIES 16
422
423static void happy_meal_tcvr_write(struct happy_meal *hp,
424 void __iomem *tregs, int reg,
425 unsigned short value)
426{
427 int tries = TCVR_WRITE_TRIES;
428
429 ASD("reg=0x%02x value=%04x\n", reg, value);
430
431 /* Welcome to Sun Microsystems, can I take your order please? */
432 if (!(hp->happy_flags & HFLAG_FENABLE)) {
433 happy_meal_bb_write(hp, tregs, reg, value);
434 return;
435 }
436
437 /* Would you like fries with that? */
438 hme_write32(hp, tregs + TCVR_FRAME,
439 (FRAME_WRITE | (hp->paddr << 23) |
440 ((reg & 0xff) << 18) | (value & 0xffff)));
441 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
442 udelay(20);
443
444 /* Anything else? */
445 if (!tries)
446 netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
447
448 /* Fifty-two cents is your change, have a nice day. */
449}
450
451/* Auto negotiation. The scheme is very simple. We have a timer routine
452 * that keeps watching the auto negotiation process as it progresses.
453 * The DP83840 is first told to start doing it's thing, we set up the time
454 * and place the timer state machine in it's initial state.
455 *
456 * Here the timer peeks at the DP83840 status registers at each click to see
457 * if the auto negotiation has completed, we assume here that the DP83840 PHY
458 * will time out at some point and just tell us what (didn't) happen. For
459 * complete coverage we only allow so many of the ticks at this level to run,
460 * when this has expired we print a warning message and try another strategy.
461 * This "other" strategy is to force the interface into various speed/duplex
462 * configurations and we stop when we see a link-up condition before the
463 * maximum number of "peek" ticks have occurred.
464 *
465 * Once a valid link status has been detected we configure the BigMAC and
466 * the rest of the Happy Meal to speak the most efficient protocol we could
467 * get a clean link for. The priority for link configurations, highest first
468 * is:
469 * 100 Base-T Full Duplex
470 * 100 Base-T Half Duplex
471 * 10 Base-T Full Duplex
472 * 10 Base-T Half Duplex
473 *
474 * We start a new timer now, after a successful auto negotiation status has
475 * been detected. This timer just waits for the link-up bit to get set in
476 * the BMCR of the DP83840. When this occurs we print a kernel log message
477 * describing the link type in use and the fact that it is up.
478 *
479 * If a fatal error of some sort is signalled and detected in the interrupt
480 * service routine, and the chip is reset, or the link is ifconfig'd down
481 * and then back up, this entire process repeats itself all over again.
482 */
483static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
484{
485 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
486
487 /* Downgrade from full to half duplex. Only possible
488 * via ethtool.
489 */
490 if (hp->sw_bmcr & BMCR_FULLDPLX) {
491 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
492 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
493 return 0;
494 }
495
496 /* Downgrade from 100 to 10. */
497 if (hp->sw_bmcr & BMCR_SPEED100) {
498 hp->sw_bmcr &= ~(BMCR_SPEED100);
499 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
500 return 0;
501 }
502
503 /* We've tried everything. */
504 return -1;
505}
506
507static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
508{
509 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
510
511 netdev_info(hp->dev,
512 "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
513 hp->tcvr_type == external ? "external" : "internal",
514 hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
515 hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
516}
517
518static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
519{
520 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
521
522 netdev_info(hp->dev,
523 "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
524 hp->tcvr_type == external ? "external" : "internal",
525 hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
526 hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
527}
528
529static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
530{
531 int full;
532
533 /* All we care about is making sure the bigmac tx_cfg has a
534 * proper duplex setting.
535 */
536 if (hp->timer_state == arbwait) {
537 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
538 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
539 goto no_response;
540 if (hp->sw_lpa & LPA_100FULL)
541 full = 1;
542 else if (hp->sw_lpa & LPA_100HALF)
543 full = 0;
544 else if (hp->sw_lpa & LPA_10FULL)
545 full = 1;
546 else
547 full = 0;
548 } else {
549 /* Forcing a link mode. */
550 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
551 if (hp->sw_bmcr & BMCR_FULLDPLX)
552 full = 1;
553 else
554 full = 0;
555 }
556
557 /* Before changing other bits in the tx_cfg register, and in
558 * general any of other the TX config registers too, you
559 * must:
560 * 1) Clear Enable
561 * 2) Poll with reads until that bit reads back as zero
562 * 3) Make TX configuration changes
563 * 4) Set Enable once more
564 */
565 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
566 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
567 ~(BIGMAC_TXCFG_ENABLE));
568 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
569 barrier();
570 if (full) {
571 hp->happy_flags |= HFLAG_FULL;
572 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
573 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
574 BIGMAC_TXCFG_FULLDPLX);
575 } else {
576 hp->happy_flags &= ~(HFLAG_FULL);
577 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
578 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
579 ~(BIGMAC_TXCFG_FULLDPLX));
580 }
581 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
582 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
583 BIGMAC_TXCFG_ENABLE);
584 return 0;
585no_response:
586 return 1;
587}
588
589static int is_lucent_phy(struct happy_meal *hp)
590{
591 void __iomem *tregs = hp->tcvregs;
592 unsigned short mr2, mr3;
593 int ret = 0;
594
595 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
596 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
597 if ((mr2 & 0xffff) == 0x0180 &&
598 ((mr3 & 0xffff) >> 10) == 0x1d)
599 ret = 1;
600
601 return ret;
602}
603
604/* hp->happy_lock must be held */
605static void
606happy_meal_begin_auto_negotiation(struct happy_meal *hp,
607 void __iomem *tregs,
608 const struct ethtool_link_ksettings *ep)
609{
610 int timeout;
611
612 /* Read all of the registers we are interested in now. */
613 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
614 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
615 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
616 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
617
618 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
619
620 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
621 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
622 /* Advertise everything we can support. */
623 if (hp->sw_bmsr & BMSR_10HALF)
624 hp->sw_advertise |= (ADVERTISE_10HALF);
625 else
626 hp->sw_advertise &= ~(ADVERTISE_10HALF);
627
628 if (hp->sw_bmsr & BMSR_10FULL)
629 hp->sw_advertise |= (ADVERTISE_10FULL);
630 else
631 hp->sw_advertise &= ~(ADVERTISE_10FULL);
632 if (hp->sw_bmsr & BMSR_100HALF)
633 hp->sw_advertise |= (ADVERTISE_100HALF);
634 else
635 hp->sw_advertise &= ~(ADVERTISE_100HALF);
636 if (hp->sw_bmsr & BMSR_100FULL)
637 hp->sw_advertise |= (ADVERTISE_100FULL);
638 else
639 hp->sw_advertise &= ~(ADVERTISE_100FULL);
640 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
641
642 /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
643 * XXX and this is because the DP83840 does not support it, changes
644 * XXX would need to be made to the tx/rx logic in the driver as well
645 * XXX so I completely skip checking for it in the BMSR for now.
646 */
647
648 ASD("Advertising [ %s%s%s%s]\n",
649 hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
650 hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
651 hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
652 hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
653
654 /* Enable Auto-Negotiation, this is usually on already... */
655 hp->sw_bmcr |= BMCR_ANENABLE;
656 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
657
658 /* Restart it to make sure it is going. */
659 hp->sw_bmcr |= BMCR_ANRESTART;
660 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
661
662 /* BMCR_ANRESTART self clears when the process has begun. */
663
664 timeout = 64; /* More than enough. */
665 while (--timeout) {
666 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
667 if (!(hp->sw_bmcr & BMCR_ANRESTART))
668 break; /* got it. */
669 udelay(10);
670 }
671 if (!timeout) {
672 netdev_err(hp->dev,
673 "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
674 hp->sw_bmcr);
675 netdev_notice(hp->dev,
676 "Performing force link detection.\n");
677 goto force_link;
678 } else {
679 hp->timer_state = arbwait;
680 }
681 } else {
682force_link:
683 /* Force the link up, trying first a particular mode.
684 * Either we are here at the request of ethtool or
685 * because the Happy Meal would not start to autoneg.
686 */
687
688 /* Disable auto-negotiation in BMCR, enable the duplex and
689 * speed setting, init the timer state machine, and fire it off.
690 */
691 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
692 hp->sw_bmcr = BMCR_SPEED100;
693 } else {
694 if (ep->base.speed == SPEED_100)
695 hp->sw_bmcr = BMCR_SPEED100;
696 else
697 hp->sw_bmcr = 0;
698 if (ep->base.duplex == DUPLEX_FULL)
699 hp->sw_bmcr |= BMCR_FULLDPLX;
700 }
701 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
702
703 if (!is_lucent_phy(hp)) {
704 /* OK, seems we need do disable the transceiver for the first
705 * tick to make sure we get an accurate link state at the
706 * second tick.
707 */
708 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
709 DP83840_CSCONFIG);
710 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
711 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
712 hp->sw_csconfig);
713 }
714 hp->timer_state = ltrywait;
715 }
716
717 hp->timer_ticks = 0;
718 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
719 add_timer(&hp->happy_timer);
720}
721
722static void happy_meal_timer(struct timer_list *t)
723{
724 struct happy_meal *hp = from_timer(hp, t, happy_timer);
725 void __iomem *tregs = hp->tcvregs;
726 int restart_timer = 0;
727
728 spin_lock_irq(&hp->happy_lock);
729
730 hp->timer_ticks++;
731 switch(hp->timer_state) {
732 case arbwait:
733 /* Only allow for 5 ticks, thats 10 seconds and much too
734 * long to wait for arbitration to complete.
735 */
736 if (hp->timer_ticks >= 10) {
737 /* Enter force mode. */
738 do_force_mode:
739 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
740 netdev_notice(hp->dev,
741 "Auto-Negotiation unsuccessful, trying force link mode\n");
742 hp->sw_bmcr = BMCR_SPEED100;
743 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
744
745 if (!is_lucent_phy(hp)) {
746 /* OK, seems we need do disable the transceiver for the first
747 * tick to make sure we get an accurate link state at the
748 * second tick.
749 */
750 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
751 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
752 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
753 }
754 hp->timer_state = ltrywait;
755 hp->timer_ticks = 0;
756 restart_timer = 1;
757 } else {
758 /* Anything interesting happen? */
759 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
760 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
761 int ret;
762
763 /* Just what we've been waiting for... */
764 ret = set_happy_link_modes(hp, tregs);
765 if (ret) {
766 /* Ooops, something bad happened, go to force
767 * mode.
768 *
769 * XXX Broken hubs which don't support 802.3u
770 * XXX auto-negotiation make this happen as well.
771 */
772 goto do_force_mode;
773 }
774
775 /* Success, at least so far, advance our state engine. */
776 hp->timer_state = lupwait;
777 restart_timer = 1;
778 } else {
779 restart_timer = 1;
780 }
781 }
782 break;
783
784 case lupwait:
785 /* Auto negotiation was successful and we are awaiting a
786 * link up status. I have decided to let this timer run
787 * forever until some sort of error is signalled, reporting
788 * a message to the user at 10 second intervals.
789 */
790 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
791 if (hp->sw_bmsr & BMSR_LSTATUS) {
792 /* Wheee, it's up, display the link mode in use and put
793 * the timer to sleep.
794 */
795 display_link_mode(hp, tregs);
796 hp->timer_state = asleep;
797 restart_timer = 0;
798 } else {
799 if (hp->timer_ticks >= 10) {
800 netdev_notice(hp->dev,
801 "Auto negotiation successful, link still not completely up.\n");
802 hp->timer_ticks = 0;
803 restart_timer = 1;
804 } else {
805 restart_timer = 1;
806 }
807 }
808 break;
809
810 case ltrywait:
811 /* Making the timeout here too long can make it take
812 * annoyingly long to attempt all of the link mode
813 * permutations, but then again this is essentially
814 * error recovery code for the most part.
815 */
816 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
817 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
818 if (hp->timer_ticks == 1) {
819 if (!is_lucent_phy(hp)) {
820 /* Re-enable transceiver, we'll re-enable the transceiver next
821 * tick, then check link state on the following tick.
822 */
823 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
824 happy_meal_tcvr_write(hp, tregs,
825 DP83840_CSCONFIG, hp->sw_csconfig);
826 }
827 restart_timer = 1;
828 break;
829 }
830 if (hp->timer_ticks == 2) {
831 if (!is_lucent_phy(hp)) {
832 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
833 happy_meal_tcvr_write(hp, tregs,
834 DP83840_CSCONFIG, hp->sw_csconfig);
835 }
836 restart_timer = 1;
837 break;
838 }
839 if (hp->sw_bmsr & BMSR_LSTATUS) {
840 /* Force mode selection success. */
841 display_forced_link_mode(hp, tregs);
842 set_happy_link_modes(hp, tregs); /* XXX error? then what? */
843 hp->timer_state = asleep;
844 restart_timer = 0;
845 } else {
846 if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
847 int ret;
848
849 ret = try_next_permutation(hp, tregs);
850 if (ret == -1) {
851 /* Aieee, tried them all, reset the
852 * chip and try all over again.
853 */
854
855 /* Let the user know... */
856 netdev_notice(hp->dev,
857 "Link down, cable problem?\n");
858
859 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
860 goto out;
861 }
862 if (!is_lucent_phy(hp)) {
863 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
864 DP83840_CSCONFIG);
865 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
866 happy_meal_tcvr_write(hp, tregs,
867 DP83840_CSCONFIG, hp->sw_csconfig);
868 }
869 hp->timer_ticks = 0;
870 restart_timer = 1;
871 } else {
872 restart_timer = 1;
873 }
874 }
875 break;
876
877 case asleep:
878 default:
879 /* Can't happens.... */
880 netdev_err(hp->dev,
881 "Aieee, link timer is asleep but we got one anyways!\n");
882 restart_timer = 0;
883 hp->timer_ticks = 0;
884 hp->timer_state = asleep; /* foo on you */
885 break;
886 }
887
888 if (restart_timer) {
889 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
890 add_timer(&hp->happy_timer);
891 }
892
893out:
894 spin_unlock_irq(&hp->happy_lock);
895}
896
897#define TX_RESET_TRIES 32
898#define RX_RESET_TRIES 32
899
900/* hp->happy_lock must be held */
901static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
902{
903 int tries = TX_RESET_TRIES;
904
905 HMD("reset...\n");
906
907 /* Would you like to try our SMCC Delux? */
908 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
909 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
910 udelay(20);
911
912 /* Lettuce, tomato, buggy hardware (no extra charge)? */
913 if (!tries)
914 netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
915
916 /* Take care. */
917 HMD("done\n");
918}
919
920/* hp->happy_lock must be held */
921static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
922{
923 int tries = RX_RESET_TRIES;
924
925 HMD("reset...\n");
926
927 /* We have a special on GNU/Viking hardware bugs today. */
928 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
929 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
930 udelay(20);
931
932 /* Will that be all? */
933 if (!tries)
934 netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
935
936 /* Don't forget your vik_1137125_wa. Have a nice day. */
937 HMD("done\n");
938}
939
940#define STOP_TRIES 16
941
942/* hp->happy_lock must be held */
943static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
944{
945 int tries = STOP_TRIES;
946
947 HMD("reset...\n");
948
949 /* We're consolidating our STB products, it's your lucky day. */
950 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
951 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
952 udelay(20);
953
954 /* Come back next week when we are "Sun Microelectronics". */
955 if (!tries)
956 netdev_err(hp->dev, "Fry guys.\n");
957
958 /* Remember: "Different name, same old buggy as shit hardware." */
959 HMD("done\n");
960}
961
962/* hp->happy_lock must be held */
963static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
964{
965 struct net_device_stats *stats = &hp->dev->stats;
966
967 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
968 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
969
970 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
971 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
972
973 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
974 hme_write32(hp, bregs + BMAC_GLECTR, 0);
975
976 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
977
978 stats->collisions +=
979 (hme_read32(hp, bregs + BMAC_EXCTR) +
980 hme_read32(hp, bregs + BMAC_LTCTR));
981 hme_write32(hp, bregs + BMAC_EXCTR, 0);
982 hme_write32(hp, bregs + BMAC_LTCTR, 0);
983}
984
985/* Only Sun can take such nice parts and fuck up the programming interface
986 * like this. Good job guys...
987 */
988#define TCVR_RESET_TRIES 16 /* It should reset quickly */
989#define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
990
991/* hp->happy_lock must be held */
992static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
993{
994 u32 tconfig;
995 int result, tries = TCVR_RESET_TRIES;
996
997 tconfig = hme_read32(hp, tregs + TCVR_CFG);
998 ASD("tcfg=%08x\n", tconfig);
999 if (hp->tcvr_type == external) {
1000 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1001 hp->tcvr_type = internal;
1002 hp->paddr = TCV_PADDR_ITX;
1003 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1004 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1005 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1006 if (result == TCVR_FAILURE) {
1007 ASD("phyread_fail\n");
1008 return -1;
1009 }
1010 ASD("external: ISOLATE, phyread_ok, PSELECT\n");
1011 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1012 hp->tcvr_type = external;
1013 hp->paddr = TCV_PADDR_ETX;
1014 } else {
1015 if (tconfig & TCV_CFG_MDIO1) {
1016 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1017 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1018 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1019 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1020 if (result == TCVR_FAILURE) {
1021 ASD("phyread_fail>\n");
1022 return -1;
1023 }
1024 ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
1025 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1026 hp->tcvr_type = internal;
1027 hp->paddr = TCV_PADDR_ITX;
1028 }
1029 }
1030
1031 ASD("BMCR_RESET...\n");
1032 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1033
1034 while (--tries) {
1035 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1036 if (result == TCVR_FAILURE)
1037 return -1;
1038 hp->sw_bmcr = result;
1039 if (!(result & BMCR_RESET))
1040 break;
1041 udelay(20);
1042 }
1043 if (!tries) {
1044 ASD("BMCR RESET FAILED!\n");
1045 return -1;
1046 }
1047 ASD("RESET_OK\n");
1048
1049 /* Get fresh copies of the PHY registers. */
1050 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1051 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1052 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1053 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1054
1055 ASD("UNISOLATE...\n");
1056 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1057 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1058
1059 tries = TCVR_UNISOLATE_TRIES;
1060 while (--tries) {
1061 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1062 if (result == TCVR_FAILURE)
1063 return -1;
1064 if (!(result & BMCR_ISOLATE))
1065 break;
1066 udelay(20);
1067 }
1068 if (!tries) {
1069 ASD("UNISOLATE FAILED!\n");
1070 return -1;
1071 }
1072 ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
1073 if (!is_lucent_phy(hp)) {
1074 result = happy_meal_tcvr_read(hp, tregs,
1075 DP83840_CSCONFIG);
1076 happy_meal_tcvr_write(hp, tregs,
1077 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1078 }
1079 return 0;
1080}
1081
1082/* Figure out whether we have an internal or external transceiver.
1083 *
1084 * hp->happy_lock must be held
1085 */
1086static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1087{
1088 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1089 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1090
1091 ASD("tcfg=%08lx\n", tconfig);
1092 if (reread & TCV_CFG_MDIO1) {
1093 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1094 hp->paddr = TCV_PADDR_ETX;
1095 hp->tcvr_type = external;
1096 ASD("not polling, external\n");
1097 } else {
1098 if (reread & TCV_CFG_MDIO0) {
1099 hme_write32(hp, tregs + TCVR_CFG,
1100 tconfig & ~(TCV_CFG_PSELECT));
1101 hp->paddr = TCV_PADDR_ITX;
1102 hp->tcvr_type = internal;
1103 ASD("not polling, internal\n");
1104 } else {
1105 netdev_err(hp->dev,
1106 "Transceiver and a coke please.");
1107 hp->tcvr_type = none; /* Grrr... */
1108 ASD("not polling, none\n");
1109 }
1110 }
1111}
1112
1113/* The receive ring buffers are a bit tricky to get right. Here goes...
1114 *
1115 * The buffers we dma into must be 64 byte aligned. So we use a special
1116 * alloc_skb() routine for the happy meal to allocate 64 bytes more than
1117 * we really need.
1118 *
1119 * We use skb_reserve() to align the data block we get in the skb. We
1120 * also program the etxregs->cfg register to use an offset of 2. This
1121 * imperical constant plus the ethernet header size will always leave
1122 * us with a nicely aligned ip header once we pass things up to the
1123 * protocol layers.
1124 *
1125 * The numbers work out to:
1126 *
1127 * Max ethernet frame size 1518
1128 * Ethernet header size 14
1129 * Happy Meal base offset 2
1130 *
1131 * Say a skb data area is at 0xf001b010, and its size alloced is
1132 * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
1133 *
1134 * First our alloc_skb() routine aligns the data base to a 64 byte
1135 * boundary. We now have 0xf001b040 as our skb data address. We
1136 * plug this into the receive descriptor address.
1137 *
1138 * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
1139 * So now the data we will end up looking at starts at 0xf001b042. When
1140 * the packet arrives, we will check out the size received and subtract
1141 * this from the skb->length. Then we just pass the packet up to the
1142 * protocols as is, and allocate a new skb to replace this slot we have
1143 * just received from.
1144 *
1145 * The ethernet layer will strip the ether header from the front of the
1146 * skb we just sent to it, this leaves us with the ip header sitting
1147 * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
1148 * Happy Meal has even checksummed the tcp/udp data for us. The 16
1149 * bit checksum is obtained from the low bits of the receive descriptor
1150 * flags, thus:
1151 *
1152 * skb->csum = rxd->rx_flags & 0xffff;
1153 * skb->ip_summed = CHECKSUM_COMPLETE;
1154 *
1155 * before sending off the skb to the protocols, and we are good as gold.
1156 */
1157static void happy_meal_clean_rings(struct happy_meal *hp)
1158{
1159 int i;
1160
1161 for (i = 0; i < RX_RING_SIZE; i++) {
1162 if (hp->rx_skbs[i] != NULL) {
1163 struct sk_buff *skb = hp->rx_skbs[i];
1164 struct happy_meal_rxd *rxd;
1165 u32 dma_addr;
1166
1167 rxd = &hp->happy_block->happy_meal_rxd[i];
1168 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1169 dma_unmap_single(hp->dma_dev, dma_addr,
1170 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1171 dev_kfree_skb_any(skb);
1172 hp->rx_skbs[i] = NULL;
1173 }
1174 }
1175
1176 for (i = 0; i < TX_RING_SIZE; i++) {
1177 if (hp->tx_skbs[i] != NULL) {
1178 struct sk_buff *skb = hp->tx_skbs[i];
1179 struct happy_meal_txd *txd;
1180 u32 dma_addr;
1181 int frag;
1182
1183 hp->tx_skbs[i] = NULL;
1184
1185 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1186 txd = &hp->happy_block->happy_meal_txd[i];
1187 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1188 if (!frag)
1189 dma_unmap_single(hp->dma_dev, dma_addr,
1190 (hme_read_desc32(hp, &txd->tx_flags)
1191 & TXFLAG_SIZE),
1192 DMA_TO_DEVICE);
1193 else
1194 dma_unmap_page(hp->dma_dev, dma_addr,
1195 (hme_read_desc32(hp, &txd->tx_flags)
1196 & TXFLAG_SIZE),
1197 DMA_TO_DEVICE);
1198
1199 if (frag != skb_shinfo(skb)->nr_frags)
1200 i++;
1201 }
1202
1203 dev_kfree_skb_any(skb);
1204 }
1205 }
1206}
1207
1208/* hp->happy_lock must be held */
1209static void happy_meal_init_rings(struct happy_meal *hp)
1210{
1211 struct hmeal_init_block *hb = hp->happy_block;
1212 int i;
1213
1214 HMD("counters to zero\n");
1215 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1216
1217 /* Free any skippy bufs left around in the rings. */
1218 happy_meal_clean_rings(hp);
1219
1220 /* Now get new skippy bufs for the receive ring. */
1221 HMD("init rxring\n");
1222 for (i = 0; i < RX_RING_SIZE; i++) {
1223 struct sk_buff *skb;
1224 u32 mapping;
1225
1226 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1227 if (!skb) {
1228 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1229 continue;
1230 }
1231 hp->rx_skbs[i] = skb;
1232
1233 /* Because we reserve afterwards. */
1234 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1235 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1236 DMA_FROM_DEVICE);
1237 if (dma_mapping_error(hp->dma_dev, mapping)) {
1238 dev_kfree_skb_any(skb);
1239 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1240 continue;
1241 }
1242 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1243 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1244 mapping);
1245 skb_reserve(skb, RX_OFFSET);
1246 }
1247
1248 HMD("init txring\n");
1249 for (i = 0; i < TX_RING_SIZE; i++)
1250 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1251
1252 HMD("done\n");
1253}
1254
1255/* hp->happy_lock must be held */
1256static int happy_meal_init(struct happy_meal *hp)
1257{
1258 const unsigned char *e = &hp->dev->dev_addr[0];
1259 void __iomem *gregs = hp->gregs;
1260 void __iomem *etxregs = hp->etxregs;
1261 void __iomem *erxregs = hp->erxregs;
1262 void __iomem *bregs = hp->bigmacregs;
1263 void __iomem *tregs = hp->tcvregs;
1264 const char *bursts = "64";
1265 u32 regtmp, rxcfg;
1266
1267 /* If auto-negotiation timer is running, kill it. */
1268 del_timer(&hp->happy_timer);
1269
1270 HMD("happy_flags[%08x]\n", hp->happy_flags);
1271 if (!(hp->happy_flags & HFLAG_INIT)) {
1272 HMD("set HFLAG_INIT\n");
1273 hp->happy_flags |= HFLAG_INIT;
1274 happy_meal_get_counters(hp, bregs);
1275 }
1276
1277 /* Stop transmitter and receiver. */
1278 HMD("to happy_meal_stop\n");
1279 happy_meal_stop(hp, gregs);
1280
1281 /* Alloc and reset the tx/rx descriptor chains. */
1282 HMD("to happy_meal_init_rings\n");
1283 happy_meal_init_rings(hp);
1284
1285 /* See if we can enable the MIF frame on this card to speak to the DP83840. */
1286 if (hp->happy_flags & HFLAG_FENABLE) {
1287 HMD("use frame old[%08x]\n",
1288 hme_read32(hp, tregs + TCVR_CFG));
1289 hme_write32(hp, tregs + TCVR_CFG,
1290 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1291 } else {
1292 HMD("use bitbang old[%08x]\n",
1293 hme_read32(hp, tregs + TCVR_CFG));
1294 hme_write32(hp, tregs + TCVR_CFG,
1295 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1296 }
1297
1298 /* Check the state of the transceiver. */
1299 HMD("to happy_meal_transceiver_check\n");
1300 happy_meal_transceiver_check(hp, tregs);
1301
1302 /* Put the Big Mac into a sane state. */
1303 switch(hp->tcvr_type) {
1304 case none:
1305 /* Cannot operate if we don't know the transceiver type! */
1306 HMD("AAIEEE no transceiver type, EAGAIN\n");
1307 return -EAGAIN;
1308
1309 case internal:
1310 /* Using the MII buffers. */
1311 HMD("internal, using MII\n");
1312 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1313 break;
1314
1315 case external:
1316 /* Not using the MII, disable it. */
1317 HMD("external, disable MII\n");
1318 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1319 break;
1320 }
1321
1322 if (happy_meal_tcvr_reset(hp, tregs))
1323 return -EAGAIN;
1324
1325 /* Reset the Happy Meal Big Mac transceiver and the receiver. */
1326 HMD("tx/rx reset\n");
1327 happy_meal_tx_reset(hp, bregs);
1328 happy_meal_rx_reset(hp, bregs);
1329
1330 /* Set jam size and inter-packet gaps to reasonable defaults. */
1331 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1332 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1333 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1334
1335 /* Load up the MAC address and random seed. */
1336
1337 /* The docs recommend to use the 10LSB of our MAC here. */
1338 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1339
1340 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1341 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1342 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1343
1344 if ((hp->dev->flags & IFF_ALLMULTI) ||
1345 (netdev_mc_count(hp->dev) > 64)) {
1346 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1347 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1348 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1349 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1350 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1351 u16 hash_table[4];
1352 struct netdev_hw_addr *ha;
1353 u32 crc;
1354
1355 memset(hash_table, 0, sizeof(hash_table));
1356 netdev_for_each_mc_addr(ha, hp->dev) {
1357 crc = ether_crc_le(6, ha->addr);
1358 crc >>= 26;
1359 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1360 }
1361 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1362 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1363 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1364 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1365 } else {
1366 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1367 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1368 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1369 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1370 }
1371
1372 /* Set the RX and TX ring ptrs. */
1373 HMD("ring ptrs rxr[%08x] txr[%08x]\n",
1374 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1375 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1376 hme_write32(hp, erxregs + ERX_RING,
1377 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1378 hme_write32(hp, etxregs + ETX_RING,
1379 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1380
1381 /* Parity issues in the ERX unit of some HME revisions can cause some
1382 * registers to not be written unless their parity is even. Detect such
1383 * lost writes and simply rewrite with a low bit set (which will be ignored
1384 * since the rxring needs to be 2K aligned).
1385 */
1386 if (hme_read32(hp, erxregs + ERX_RING) !=
1387 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1388 hme_write32(hp, erxregs + ERX_RING,
1389 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1390 | 0x4);
1391
1392 /* Set the supported burst sizes. */
1393#ifndef CONFIG_SPARC
1394 /* It is always PCI and can handle 64byte bursts. */
1395 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1396#else
1397 if ((hp->happy_bursts & DMA_BURST64) &&
1398 ((hp->happy_flags & HFLAG_PCI) != 0
1399#ifdef CONFIG_SBUS
1400 || sbus_can_burst64()
1401#endif
1402 || 0)) {
1403 u32 gcfg = GREG_CFG_BURST64;
1404
1405 /* I have no idea if I should set the extended
1406 * transfer mode bit for Cheerio, so for now I
1407 * do not. -DaveM
1408 */
1409#ifdef CONFIG_SBUS
1410 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1411 struct platform_device *op = hp->happy_dev;
1412 if (sbus_can_dma_64bit()) {
1413 sbus_set_sbus64(&op->dev,
1414 hp->happy_bursts);
1415 gcfg |= GREG_CFG_64BIT;
1416 }
1417 }
1418#endif
1419
1420 bursts = "64";
1421 hme_write32(hp, gregs + GREG_CFG, gcfg);
1422 } else if (hp->happy_bursts & DMA_BURST32) {
1423 bursts = "32";
1424 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1425 } else if (hp->happy_bursts & DMA_BURST16) {
1426 bursts = "16";
1427 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1428 } else {
1429 bursts = "XXX";
1430 hme_write32(hp, gregs + GREG_CFG, 0);
1431 }
1432#endif /* CONFIG_SPARC */
1433
1434 HMD("old[%08x] bursts<%s>\n",
1435 hme_read32(hp, gregs + GREG_CFG), bursts);
1436
1437 /* Turn off interrupts we do not want to hear. */
1438 hme_write32(hp, gregs + GREG_IMASK,
1439 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1440 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1441
1442 /* Set the transmit ring buffer size. */
1443 HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
1444 hme_read32(hp, etxregs + ETX_RSIZE));
1445 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1446
1447 /* Enable transmitter DVMA. */
1448 HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
1449 hme_write32(hp, etxregs + ETX_CFG,
1450 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1451
1452 /* This chip really rots, for the receiver sometimes when you
1453 * write to its control registers not all the bits get there
1454 * properly. I cannot think of a sane way to provide complete
1455 * coverage for this hardware bug yet.
1456 */
1457 HMD("erx regs bug old[%08x]\n",
1458 hme_read32(hp, erxregs + ERX_CFG));
1459 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1460 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1461 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1462 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1463 netdev_err(hp->dev,
1464 "Eieee, rx config register gets greasy fries.\n");
1465 netdev_err(hp->dev,
1466 "Trying to set %08x, reread gives %08x\n",
1467 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1468 /* XXX Should return failure here... */
1469 }
1470
1471 /* Enable Big Mac hash table filter. */
1472 HMD("enable hash rx_cfg_old[%08x]\n",
1473 hme_read32(hp, bregs + BMAC_RXCFG));
1474 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1475 if (hp->dev->flags & IFF_PROMISC)
1476 rxcfg |= BIGMAC_RXCFG_PMISC;
1477 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1478
1479 /* Let the bits settle in the chip. */
1480 udelay(10);
1481
1482 /* Ok, configure the Big Mac transmitter. */
1483 HMD("BIGMAC init\n");
1484 regtmp = 0;
1485 if (hp->happy_flags & HFLAG_FULL)
1486 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1487
1488 /* Don't turn on the "don't give up" bit for now. It could cause hme
1489 * to deadlock with the PHY if a Jabber occurs.
1490 */
1491 hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
1492
1493 /* Give up after 16 TX attempts. */
1494 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1495
1496 /* Enable the output drivers no matter what. */
1497 regtmp = BIGMAC_XCFG_ODENABLE;
1498
1499 /* If card can do lance mode, enable it. */
1500 if (hp->happy_flags & HFLAG_LANCE)
1501 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1502
1503 /* Disable the MII buffers if using external transceiver. */
1504 if (hp->tcvr_type == external)
1505 regtmp |= BIGMAC_XCFG_MIIDISAB;
1506
1507 HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
1508 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1509
1510 /* Start things up. */
1511 HMD("tx old[%08x] and rx [%08x] ON!\n",
1512 hme_read32(hp, bregs + BMAC_TXCFG),
1513 hme_read32(hp, bregs + BMAC_RXCFG));
1514
1515 /* Set larger TX/RX size to allow for 802.1q */
1516 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1517 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1518
1519 hme_write32(hp, bregs + BMAC_TXCFG,
1520 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1521 hme_write32(hp, bregs + BMAC_RXCFG,
1522 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1523
1524 /* Get the autonegotiation started, and the watch timer ticking. */
1525 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1526
1527 /* Success. */
1528 return 0;
1529}
1530
1531/* hp->happy_lock must be held */
1532static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1533{
1534 void __iomem *tregs = hp->tcvregs;
1535 void __iomem *bregs = hp->bigmacregs;
1536 void __iomem *gregs = hp->gregs;
1537
1538 happy_meal_stop(hp, gregs);
1539 if (hp->happy_flags & HFLAG_FENABLE)
1540 hme_write32(hp, tregs + TCVR_CFG,
1541 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1542 else
1543 hme_write32(hp, tregs + TCVR_CFG,
1544 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1545 happy_meal_transceiver_check(hp, tregs);
1546 switch(hp->tcvr_type) {
1547 case none:
1548 return;
1549 case internal:
1550 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1551 break;
1552 case external:
1553 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1554 break;
1555 }
1556 if (happy_meal_tcvr_reset(hp, tregs))
1557 return;
1558
1559 /* Latch PHY registers as of now. */
1560 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1561 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1562
1563 /* Advertise everything we can support. */
1564 if (hp->sw_bmsr & BMSR_10HALF)
1565 hp->sw_advertise |= (ADVERTISE_10HALF);
1566 else
1567 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1568
1569 if (hp->sw_bmsr & BMSR_10FULL)
1570 hp->sw_advertise |= (ADVERTISE_10FULL);
1571 else
1572 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1573 if (hp->sw_bmsr & BMSR_100HALF)
1574 hp->sw_advertise |= (ADVERTISE_100HALF);
1575 else
1576 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1577 if (hp->sw_bmsr & BMSR_100FULL)
1578 hp->sw_advertise |= (ADVERTISE_100FULL);
1579 else
1580 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1581
1582 /* Update the PHY advertisement register. */
1583 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1584}
1585
1586/* Once status is latched (by happy_meal_interrupt) it is cleared by
1587 * the hardware, so we cannot re-read it and get a correct value.
1588 *
1589 * hp->happy_lock must be held
1590 */
1591static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1592{
1593 int reset = 0;
1594
1595 /* Only print messages for non-counter related interrupts. */
1596 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1597 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1598 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1599 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1600 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1601 GREG_STAT_SLVPERR))
1602 netdev_err(hp->dev,
1603 "Error interrupt for happy meal, status = %08x\n",
1604 status);
1605
1606 if (status & GREG_STAT_RFIFOVF) {
1607 /* Receive FIFO overflow is harmless and the hardware will take
1608 care of it, just some packets are lost. Who cares. */
1609 netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
1610 }
1611
1612 if (status & GREG_STAT_STSTERR) {
1613 /* BigMAC SQE link test failed. */
1614 netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
1615 reset = 1;
1616 }
1617
1618 if (status & GREG_STAT_TFIFO_UND) {
1619 /* Transmit FIFO underrun, again DMA error likely. */
1620 netdev_err(hp->dev,
1621 "Happy Meal transmitter FIFO underrun, DMA error.\n");
1622 reset = 1;
1623 }
1624
1625 if (status & GREG_STAT_MAXPKTERR) {
1626 /* Driver error, tried to transmit something larger
1627 * than ethernet max mtu.
1628 */
1629 netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
1630 reset = 1;
1631 }
1632
1633 if (status & GREG_STAT_NORXD) {
1634 /* This is harmless, it just means the system is
1635 * quite loaded and the incoming packet rate was
1636 * faster than the interrupt handler could keep up
1637 * with.
1638 */
1639 netdev_info(hp->dev,
1640 "Happy Meal out of receive descriptors, packet dropped.\n");
1641 }
1642
1643 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1644 /* All sorts of DMA receive errors. */
1645 netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
1646 status & GREG_STAT_RXERR ? "GenericError " : "",
1647 status & GREG_STAT_RXPERR ? "ParityError " : "",
1648 status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
1649 reset = 1;
1650 }
1651
1652 if (status & GREG_STAT_EOPERR) {
1653 /* Driver bug, didn't set EOP bit in tx descriptor given
1654 * to the happy meal.
1655 */
1656 netdev_err(hp->dev,
1657 "EOP not set in happy meal transmit descriptor!\n");
1658 reset = 1;
1659 }
1660
1661 if (status & GREG_STAT_MIFIRQ) {
1662 /* MIF signalled an interrupt, were we polling it? */
1663 netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
1664 }
1665
1666 if (status &
1667 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1668 /* All sorts of transmit DMA errors. */
1669 netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
1670 status & GREG_STAT_TXEACK ? "GenericError " : "",
1671 status & GREG_STAT_TXLERR ? "LateError " : "",
1672 status & GREG_STAT_TXPERR ? "ParityError " : "",
1673 status & GREG_STAT_TXTERR ? "TagBotch " : "");
1674 reset = 1;
1675 }
1676
1677 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1678 /* Bus or parity error when cpu accessed happy meal registers
1679 * or it's internal FIFO's. Should never see this.
1680 */
1681 netdev_err(hp->dev,
1682 "Happy Meal register access SBUS slave (%s) error.\n",
1683 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1684 reset = 1;
1685 }
1686
1687 if (reset) {
1688 netdev_notice(hp->dev, "Resetting...\n");
1689 happy_meal_init(hp);
1690 return 1;
1691 }
1692 return 0;
1693}
1694
1695/* hp->happy_lock must be held */
1696static void happy_meal_tx(struct happy_meal *hp)
1697{
1698 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1699 struct happy_meal_txd *this;
1700 struct net_device *dev = hp->dev;
1701 int elem;
1702
1703 elem = hp->tx_old;
1704 while (elem != hp->tx_new) {
1705 struct sk_buff *skb;
1706 u32 flags, dma_addr, dma_len;
1707 int frag;
1708
1709 netdev_vdbg(hp->dev, "TX[%d]\n", elem);
1710 this = &txbase[elem];
1711 flags = hme_read_desc32(hp, &this->tx_flags);
1712 if (flags & TXFLAG_OWN)
1713 break;
1714 skb = hp->tx_skbs[elem];
1715 if (skb_shinfo(skb)->nr_frags) {
1716 int last;
1717
1718 last = elem + skb_shinfo(skb)->nr_frags;
1719 last &= (TX_RING_SIZE - 1);
1720 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1721 if (flags & TXFLAG_OWN)
1722 break;
1723 }
1724 hp->tx_skbs[elem] = NULL;
1725 dev->stats.tx_bytes += skb->len;
1726
1727 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1728 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1729 dma_len = hme_read_desc32(hp, &this->tx_flags);
1730
1731 dma_len &= TXFLAG_SIZE;
1732 if (!frag)
1733 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1734 else
1735 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1736
1737 elem = NEXT_TX(elem);
1738 this = &txbase[elem];
1739 }
1740
1741 dev_consume_skb_irq(skb);
1742 dev->stats.tx_packets++;
1743 }
1744 hp->tx_old = elem;
1745
1746 if (netif_queue_stopped(dev) &&
1747 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1748 netif_wake_queue(dev);
1749}
1750
1751/* Originally I used to handle the allocation failure by just giving back just
1752 * that one ring buffer to the happy meal. Problem is that usually when that
1753 * condition is triggered, the happy meal expects you to do something reasonable
1754 * with all of the packets it has DMA'd in. So now I just drop the entire
1755 * ring when we cannot get a new skb and give them all back to the happy meal,
1756 * maybe things will be "happier" now.
1757 *
1758 * hp->happy_lock must be held
1759 */
1760static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1761{
1762 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1763 struct happy_meal_rxd *this;
1764 int elem = hp->rx_new, drops = 0;
1765 u32 flags;
1766
1767 this = &rxbase[elem];
1768 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
1769 struct sk_buff *skb;
1770 int len = flags >> 16;
1771 u16 csum = flags & RXFLAG_CSUM;
1772 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
1773
1774 /* Check for errors. */
1775 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
1776 netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
1777 dev->stats.rx_errors++;
1778 if (len < ETH_ZLEN)
1779 dev->stats.rx_length_errors++;
1780 if (len & (RXFLAG_OVERFLOW >> 16)) {
1781 dev->stats.rx_over_errors++;
1782 dev->stats.rx_fifo_errors++;
1783 }
1784
1785 /* Return it to the Happy meal. */
1786 drop_it:
1787 dev->stats.rx_dropped++;
1788 hme_write_rxd(hp, this,
1789 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1790 dma_addr);
1791 goto next;
1792 }
1793 skb = hp->rx_skbs[elem];
1794 if (len > RX_COPY_THRESHOLD) {
1795 struct sk_buff *new_skb;
1796 u32 mapping;
1797
1798 /* Now refill the entry, if we can. */
1799 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1800 if (new_skb == NULL) {
1801 drops++;
1802 goto drop_it;
1803 }
1804 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1805 mapping = dma_map_single(hp->dma_dev, new_skb->data,
1806 RX_BUF_ALLOC_SIZE,
1807 DMA_FROM_DEVICE);
1808 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
1809 dev_kfree_skb_any(new_skb);
1810 drops++;
1811 goto drop_it;
1812 }
1813
1814 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1815 hp->rx_skbs[elem] = new_skb;
1816 hme_write_rxd(hp, this,
1817 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1818 mapping);
1819 skb_reserve(new_skb, RX_OFFSET);
1820
1821 /* Trim the original skb for the netif. */
1822 skb_trim(skb, len);
1823 } else {
1824 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
1825
1826 if (copy_skb == NULL) {
1827 drops++;
1828 goto drop_it;
1829 }
1830
1831 skb_reserve(copy_skb, 2);
1832 skb_put(copy_skb, len);
1833 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1834 skb_copy_from_linear_data(skb, copy_skb->data, len);
1835 dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1836 /* Reuse original ring buffer. */
1837 hme_write_rxd(hp, this,
1838 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1839 dma_addr);
1840
1841 skb = copy_skb;
1842 }
1843
1844 /* This card is _fucking_ hot... */
1845 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
1846 skb->ip_summed = CHECKSUM_COMPLETE;
1847
1848 netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
1849 skb->protocol = eth_type_trans(skb, dev);
1850 netif_rx(skb);
1851
1852 dev->stats.rx_packets++;
1853 dev->stats.rx_bytes += len;
1854 next:
1855 elem = NEXT_RX(elem);
1856 this = &rxbase[elem];
1857 }
1858 hp->rx_new = elem;
1859 if (drops)
1860 netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
1861}
1862
1863static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
1864{
1865 struct net_device *dev = dev_id;
1866 struct happy_meal *hp = netdev_priv(dev);
1867 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
1868
1869 HMD("status=%08x\n", happy_status);
1870 if (!happy_status)
1871 return IRQ_NONE;
1872
1873 spin_lock(&hp->happy_lock);
1874
1875 if (happy_status & GREG_STAT_ERRORS) {
1876 if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
1877 goto out;
1878 }
1879
1880 if (happy_status & GREG_STAT_TXALL)
1881 happy_meal_tx(hp);
1882
1883 if (happy_status & GREG_STAT_RXTOHOST)
1884 happy_meal_rx(hp, dev);
1885
1886 HMD("done\n");
1887out:
1888 spin_unlock(&hp->happy_lock);
1889
1890 return IRQ_HANDLED;
1891}
1892
1893static int happy_meal_open(struct net_device *dev)
1894{
1895 struct happy_meal *hp = netdev_priv(dev);
1896 int res;
1897
1898 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
1899 dev->name, dev);
1900 if (res) {
1901 netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
1902 return res;
1903 }
1904
1905 HMD("to happy_meal_init\n");
1906
1907 spin_lock_irq(&hp->happy_lock);
1908 res = happy_meal_init(hp);
1909 spin_unlock_irq(&hp->happy_lock);
1910
1911 if (res)
1912 free_irq(hp->irq, dev);
1913 return res;
1914}
1915
1916static int happy_meal_close(struct net_device *dev)
1917{
1918 struct happy_meal *hp = netdev_priv(dev);
1919
1920 spin_lock_irq(&hp->happy_lock);
1921 happy_meal_stop(hp, hp->gregs);
1922 happy_meal_clean_rings(hp);
1923
1924 /* If auto-negotiation timer is running, kill it. */
1925 del_timer(&hp->happy_timer);
1926
1927 spin_unlock_irq(&hp->happy_lock);
1928
1929 free_irq(hp->irq, dev);
1930
1931 return 0;
1932}
1933
1934static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
1935{
1936 struct happy_meal *hp = netdev_priv(dev);
1937
1938 netdev_err(dev, "transmit timed out, resetting\n");
1939 tx_dump_log();
1940 netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
1941 hme_read32(hp, hp->gregs + GREG_STAT),
1942 hme_read32(hp, hp->etxregs + ETX_CFG),
1943 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
1944
1945 spin_lock_irq(&hp->happy_lock);
1946 happy_meal_init(hp);
1947 spin_unlock_irq(&hp->happy_lock);
1948
1949 netif_wake_queue(dev);
1950}
1951
1952static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
1953 u32 first_len, u32 first_entry, u32 entry)
1954{
1955 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1956
1957 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
1958
1959 first_entry = NEXT_TX(first_entry);
1960 while (first_entry != entry) {
1961 struct happy_meal_txd *this = &txbase[first_entry];
1962 u32 addr, len;
1963
1964 addr = hme_read_desc32(hp, &this->tx_addr);
1965 len = hme_read_desc32(hp, &this->tx_flags);
1966 len &= TXFLAG_SIZE;
1967 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
1968 }
1969}
1970
1971static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
1972 struct net_device *dev)
1973{
1974 struct happy_meal *hp = netdev_priv(dev);
1975 int entry;
1976 u32 tx_flags;
1977
1978 tx_flags = TXFLAG_OWN;
1979 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1980 const u32 csum_start_off = skb_checksum_start_offset(skb);
1981 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
1982
1983 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
1984 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
1985 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
1986 }
1987
1988 spin_lock_irq(&hp->happy_lock);
1989
1990 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1991 netif_stop_queue(dev);
1992 spin_unlock_irq(&hp->happy_lock);
1993 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1994 return NETDEV_TX_BUSY;
1995 }
1996
1997 entry = hp->tx_new;
1998 netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
1999 hp->tx_skbs[entry] = skb;
2000
2001 if (skb_shinfo(skb)->nr_frags == 0) {
2002 u32 mapping, len;
2003
2004 len = skb->len;
2005 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2006 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2007 goto out_dma_error;
2008 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2009 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2010 (tx_flags | (len & TXFLAG_SIZE)),
2011 mapping);
2012 entry = NEXT_TX(entry);
2013 } else {
2014 u32 first_len, first_mapping;
2015 int frag, first_entry = entry;
2016
2017 /* We must give this initial chunk to the device last.
2018 * Otherwise we could race with the device.
2019 */
2020 first_len = skb_headlen(skb);
2021 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2022 DMA_TO_DEVICE);
2023 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2024 goto out_dma_error;
2025 entry = NEXT_TX(entry);
2026
2027 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2028 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2029 u32 len, mapping, this_txflags;
2030
2031 len = skb_frag_size(this_frag);
2032 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2033 0, len, DMA_TO_DEVICE);
2034 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2035 unmap_partial_tx_skb(hp, first_mapping, first_len,
2036 first_entry, entry);
2037 goto out_dma_error;
2038 }
2039 this_txflags = tx_flags;
2040 if (frag == skb_shinfo(skb)->nr_frags - 1)
2041 this_txflags |= TXFLAG_EOP;
2042 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2043 (this_txflags | (len & TXFLAG_SIZE)),
2044 mapping);
2045 entry = NEXT_TX(entry);
2046 }
2047 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2048 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2049 first_mapping);
2050 }
2051
2052 hp->tx_new = entry;
2053
2054 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2055 netif_stop_queue(dev);
2056
2057 /* Get it going. */
2058 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2059
2060 spin_unlock_irq(&hp->happy_lock);
2061
2062 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2063 return NETDEV_TX_OK;
2064
2065out_dma_error:
2066 hp->tx_skbs[hp->tx_new] = NULL;
2067 spin_unlock_irq(&hp->happy_lock);
2068
2069 dev_kfree_skb_any(skb);
2070 dev->stats.tx_dropped++;
2071 return NETDEV_TX_OK;
2072}
2073
2074static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2075{
2076 struct happy_meal *hp = netdev_priv(dev);
2077
2078 spin_lock_irq(&hp->happy_lock);
2079 happy_meal_get_counters(hp, hp->bigmacregs);
2080 spin_unlock_irq(&hp->happy_lock);
2081
2082 return &dev->stats;
2083}
2084
2085static void happy_meal_set_multicast(struct net_device *dev)
2086{
2087 struct happy_meal *hp = netdev_priv(dev);
2088 void __iomem *bregs = hp->bigmacregs;
2089 struct netdev_hw_addr *ha;
2090 u32 crc;
2091
2092 spin_lock_irq(&hp->happy_lock);
2093
2094 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2095 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2096 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2097 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2098 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2099 } else if (dev->flags & IFF_PROMISC) {
2100 hme_write32(hp, bregs + BMAC_RXCFG,
2101 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2102 } else {
2103 u16 hash_table[4];
2104
2105 memset(hash_table, 0, sizeof(hash_table));
2106 netdev_for_each_mc_addr(ha, dev) {
2107 crc = ether_crc_le(6, ha->addr);
2108 crc >>= 26;
2109 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2110 }
2111 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2112 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2113 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2114 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2115 }
2116
2117 spin_unlock_irq(&hp->happy_lock);
2118}
2119
2120/* Ethtool support... */
2121static int hme_get_link_ksettings(struct net_device *dev,
2122 struct ethtool_link_ksettings *cmd)
2123{
2124 struct happy_meal *hp = netdev_priv(dev);
2125 u32 speed;
2126 u32 supported;
2127
2128 supported =
2129 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2130 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2131 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2132
2133 /* XXX hardcoded stuff for now */
2134 cmd->base.port = PORT_TP; /* XXX no MII support */
2135 cmd->base.phy_address = 0; /* XXX fixed PHYAD */
2136
2137 /* Record PHY settings. */
2138 spin_lock_irq(&hp->happy_lock);
2139 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2140 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2141 spin_unlock_irq(&hp->happy_lock);
2142
2143 if (hp->sw_bmcr & BMCR_ANENABLE) {
2144 cmd->base.autoneg = AUTONEG_ENABLE;
2145 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2146 SPEED_100 : SPEED_10);
2147 if (speed == SPEED_100)
2148 cmd->base.duplex =
2149 (hp->sw_lpa & (LPA_100FULL)) ?
2150 DUPLEX_FULL : DUPLEX_HALF;
2151 else
2152 cmd->base.duplex =
2153 (hp->sw_lpa & (LPA_10FULL)) ?
2154 DUPLEX_FULL : DUPLEX_HALF;
2155 } else {
2156 cmd->base.autoneg = AUTONEG_DISABLE;
2157 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2158 cmd->base.duplex =
2159 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2160 DUPLEX_FULL : DUPLEX_HALF;
2161 }
2162 cmd->base.speed = speed;
2163 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2164 supported);
2165
2166 return 0;
2167}
2168
2169static int hme_set_link_ksettings(struct net_device *dev,
2170 const struct ethtool_link_ksettings *cmd)
2171{
2172 struct happy_meal *hp = netdev_priv(dev);
2173
2174 /* Verify the settings we care about. */
2175 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2176 cmd->base.autoneg != AUTONEG_DISABLE)
2177 return -EINVAL;
2178 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2179 ((cmd->base.speed != SPEED_100 &&
2180 cmd->base.speed != SPEED_10) ||
2181 (cmd->base.duplex != DUPLEX_HALF &&
2182 cmd->base.duplex != DUPLEX_FULL)))
2183 return -EINVAL;
2184
2185 /* Ok, do it to it. */
2186 spin_lock_irq(&hp->happy_lock);
2187 del_timer(&hp->happy_timer);
2188 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2189 spin_unlock_irq(&hp->happy_lock);
2190
2191 return 0;
2192}
2193
2194static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2195{
2196 struct happy_meal *hp = netdev_priv(dev);
2197
2198 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2199 if (hp->happy_flags & HFLAG_PCI) {
2200 struct pci_dev *pdev = hp->happy_dev;
2201 strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2202 }
2203#ifdef CONFIG_SBUS
2204 else {
2205 const struct linux_prom_registers *regs;
2206 struct platform_device *op = hp->happy_dev;
2207 regs = of_get_property(op->dev.of_node, "regs", NULL);
2208 if (regs)
2209 snprintf(info->bus_info, sizeof(info->bus_info),
2210 "SBUS:%d",
2211 regs->which_io);
2212 }
2213#endif
2214}
2215
2216static u32 hme_get_link(struct net_device *dev)
2217{
2218 struct happy_meal *hp = netdev_priv(dev);
2219
2220 spin_lock_irq(&hp->happy_lock);
2221 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2222 spin_unlock_irq(&hp->happy_lock);
2223
2224 return hp->sw_bmsr & BMSR_LSTATUS;
2225}
2226
2227static const struct ethtool_ops hme_ethtool_ops = {
2228 .get_drvinfo = hme_get_drvinfo,
2229 .get_link = hme_get_link,
2230 .get_link_ksettings = hme_get_link_ksettings,
2231 .set_link_ksettings = hme_set_link_ksettings,
2232};
2233
2234#ifdef CONFIG_SBUS
2235/* Given a happy meal sbus device, find it's quattro parent.
2236 * If none exist, allocate and return a new one.
2237 *
2238 * Return NULL on failure.
2239 */
2240static struct quattro *quattro_sbus_find(struct platform_device *child)
2241{
2242 struct device *parent = child->dev.parent;
2243 struct platform_device *op;
2244 struct quattro *qp;
2245
2246 op = to_platform_device(parent);
2247 qp = platform_get_drvdata(op);
2248 if (qp)
2249 return qp;
2250
2251 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2252 if (!qp)
2253 return NULL;
2254
2255 qp->quattro_dev = child;
2256 qp->next = qfe_sbus_list;
2257 qfe_sbus_list = qp;
2258
2259 platform_set_drvdata(op, qp);
2260 return qp;
2261}
2262#endif /* CONFIG_SBUS */
2263
2264#ifdef CONFIG_PCI
2265static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2266{
2267 int i;
2268 struct pci_dev *bdev = pdev->bus->self;
2269 struct quattro *qp;
2270
2271 if (!bdev)
2272 return ERR_PTR(-ENODEV);
2273
2274 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2275 struct pci_dev *qpdev = qp->quattro_dev;
2276
2277 if (qpdev == bdev)
2278 return qp;
2279 }
2280
2281 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2282 if (!qp)
2283 return ERR_PTR(-ENOMEM);
2284
2285 for (i = 0; i < 4; i++)
2286 qp->happy_meals[i] = NULL;
2287
2288 qp->quattro_dev = bdev;
2289 qp->next = qfe_pci_list;
2290 qfe_pci_list = qp;
2291
2292 /* No range tricks necessary on PCI. */
2293 qp->nranges = 0;
2294 return qp;
2295}
2296#endif /* CONFIG_PCI */
2297
2298static const struct net_device_ops hme_netdev_ops = {
2299 .ndo_open = happy_meal_open,
2300 .ndo_stop = happy_meal_close,
2301 .ndo_start_xmit = happy_meal_start_xmit,
2302 .ndo_tx_timeout = happy_meal_tx_timeout,
2303 .ndo_get_stats = happy_meal_get_stats,
2304 .ndo_set_rx_mode = happy_meal_set_multicast,
2305 .ndo_set_mac_address = eth_mac_addr,
2306 .ndo_validate_addr = eth_validate_addr,
2307};
2308
2309#ifdef CONFIG_PCI
2310static int is_quattro_p(struct pci_dev *pdev)
2311{
2312 struct pci_dev *busdev = pdev->bus->self;
2313 struct pci_dev *this_pdev;
2314 int n_hmes;
2315
2316 if (!busdev || busdev->vendor != PCI_VENDOR_ID_DEC ||
2317 busdev->device != PCI_DEVICE_ID_DEC_21153)
2318 return 0;
2319
2320 n_hmes = 0;
2321 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2322 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2323 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2324 n_hmes++;
2325 }
2326
2327 if (n_hmes != 4)
2328 return 0;
2329
2330 return 1;
2331}
2332
2333/* Fetch MAC address from vital product data of PCI ROM. */
2334static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2335{
2336 int this_offset;
2337
2338 for (this_offset = 0x20; this_offset < len; this_offset++) {
2339 void __iomem *p = rom_base + this_offset;
2340
2341 if (readb(p + 0) != 0x90 ||
2342 readb(p + 1) != 0x00 ||
2343 readb(p + 2) != 0x09 ||
2344 readb(p + 3) != 0x4e ||
2345 readb(p + 4) != 0x41 ||
2346 readb(p + 5) != 0x06)
2347 continue;
2348
2349 this_offset += 6;
2350 p += 6;
2351
2352 if (index == 0) {
2353 for (int i = 0; i < 6; i++)
2354 dev_addr[i] = readb(p + i);
2355 return 1;
2356 }
2357 index--;
2358 }
2359 return 0;
2360}
2361
2362static void __maybe_unused get_hme_mac_nonsparc(struct pci_dev *pdev,
2363 unsigned char *dev_addr)
2364{
2365 void __iomem *p;
2366 size_t size;
2367
2368 p = pci_map_rom(pdev, &size);
2369 if (p) {
2370 int index = 0;
2371 int found;
2372
2373 if (is_quattro_p(pdev))
2374 index = PCI_SLOT(pdev->devfn);
2375
2376 found = readb(p) == 0x55 &&
2377 readb(p + 1) == 0xaa &&
2378 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2379 pci_unmap_rom(pdev, p);
2380 if (found)
2381 return;
2382 }
2383
2384 /* Sun MAC prefix then 3 random bytes. */
2385 dev_addr[0] = 0x08;
2386 dev_addr[1] = 0x00;
2387 dev_addr[2] = 0x20;
2388 get_random_bytes(&dev_addr[3], 3);
2389}
2390#endif
2391
2392static void happy_meal_addr_init(struct happy_meal *hp,
2393 struct device_node *dp, int qfe_slot)
2394{
2395 int i;
2396
2397 for (i = 0; i < 6; i++) {
2398 if (macaddr[i] != 0)
2399 break;
2400 }
2401
2402 if (i < 6) { /* a mac address was given */
2403 u8 addr[ETH_ALEN];
2404
2405 for (i = 0; i < 6; i++)
2406 addr[i] = macaddr[i];
2407 eth_hw_addr_set(hp->dev, addr);
2408 macaddr[5]++;
2409 } else {
2410#ifdef CONFIG_SPARC
2411 const unsigned char *addr;
2412 int len;
2413
2414 /* If user did not specify a MAC address specifically, use
2415 * the Quattro local-mac-address property...
2416 */
2417 if (qfe_slot != -1) {
2418 addr = of_get_property(dp, "local-mac-address", &len);
2419 if (addr && len == 6) {
2420 eth_hw_addr_set(hp->dev, addr);
2421 return;
2422 }
2423 }
2424
2425 eth_hw_addr_set(hp->dev, idprom->id_ethaddr);
2426#else
2427 u8 addr[ETH_ALEN];
2428
2429 get_hme_mac_nonsparc(hp->happy_dev, addr);
2430 eth_hw_addr_set(hp->dev, addr);
2431#endif
2432 }
2433}
2434
2435static int happy_meal_common_probe(struct happy_meal *hp,
2436 struct device_node *dp)
2437{
2438 struct net_device *dev = hp->dev;
2439 int err;
2440
2441#ifdef CONFIG_SPARC
2442 hp->hm_revision = of_getintprop_default(dp, "hm-rev", hp->hm_revision);
2443#endif
2444
2445 /* Now enable the feature flags we can. */
2446 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2447 hp->happy_flags |= HFLAG_20_21;
2448 else if (hp->hm_revision != 0xa0)
2449 hp->happy_flags |= HFLAG_NOT_A0;
2450
2451 hp->happy_block = dmam_alloc_coherent(hp->dma_dev, PAGE_SIZE,
2452 &hp->hblock_dvma, GFP_KERNEL);
2453 if (!hp->happy_block)
2454 return -ENOMEM;
2455
2456 /* Force check of the link first time we are brought up. */
2457 hp->linkcheck = 0;
2458
2459 /* Force timer state to 'asleep' with count of zero. */
2460 hp->timer_state = asleep;
2461 hp->timer_ticks = 0;
2462
2463 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2464
2465 dev->netdev_ops = &hme_netdev_ops;
2466 dev->watchdog_timeo = 5 * HZ;
2467 dev->ethtool_ops = &hme_ethtool_ops;
2468
2469 /* Happy Meal can do it all... */
2470 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2471 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2472
2473
2474 /* Grrr, Happy Meal comes up by default not advertising
2475 * full duplex 100baseT capabilities, fix this.
2476 */
2477 spin_lock_irq(&hp->happy_lock);
2478 happy_meal_set_initial_advertisement(hp);
2479 spin_unlock_irq(&hp->happy_lock);
2480
2481 err = devm_register_netdev(hp->dma_dev, dev);
2482 if (err)
2483 dev_err(hp->dma_dev, "Cannot register net device, aborting.\n");
2484 return err;
2485}
2486
2487#ifdef CONFIG_SBUS
2488static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2489{
2490 struct device_node *dp = op->dev.of_node, *sbus_dp;
2491 struct quattro *qp = NULL;
2492 struct happy_meal *hp;
2493 struct net_device *dev;
2494 int qfe_slot = -1;
2495 int err;
2496
2497 sbus_dp = op->dev.parent->of_node;
2498
2499 /* We can match PCI devices too, do not accept those here. */
2500 if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
2501 return -ENODEV;
2502
2503 if (is_qfe) {
2504 qp = quattro_sbus_find(op);
2505 if (qp == NULL)
2506 return -ENODEV;
2507 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2508 if (qp->happy_meals[qfe_slot] == NULL)
2509 break;
2510 if (qfe_slot == 4)
2511 return -ENODEV;
2512 }
2513
2514 dev = devm_alloc_etherdev(&op->dev, sizeof(struct happy_meal));
2515 if (!dev)
2516 return -ENOMEM;
2517 SET_NETDEV_DEV(dev, &op->dev);
2518
2519 hp = netdev_priv(dev);
2520 hp->dev = dev;
2521 hp->happy_dev = op;
2522 hp->dma_dev = &op->dev;
2523 happy_meal_addr_init(hp, dp, qfe_slot);
2524
2525 spin_lock_init(&hp->happy_lock);
2526
2527 if (qp != NULL) {
2528 hp->qfe_parent = qp;
2529 hp->qfe_ent = qfe_slot;
2530 qp->happy_meals[qfe_slot] = dev;
2531 }
2532
2533 hp->gregs = devm_platform_ioremap_resource(op, 0);
2534 if (IS_ERR(hp->gregs)) {
2535 dev_err(&op->dev, "Cannot map global registers.\n");
2536 err = PTR_ERR(hp->gregs);
2537 goto err_out_clear_quattro;
2538 }
2539
2540 hp->etxregs = devm_platform_ioremap_resource(op, 1);
2541 if (IS_ERR(hp->etxregs)) {
2542 dev_err(&op->dev, "Cannot map MAC TX registers.\n");
2543 err = PTR_ERR(hp->etxregs);
2544 goto err_out_clear_quattro;
2545 }
2546
2547 hp->erxregs = devm_platform_ioremap_resource(op, 2);
2548 if (IS_ERR(hp->erxregs)) {
2549 dev_err(&op->dev, "Cannot map MAC RX registers.\n");
2550 err = PTR_ERR(hp->erxregs);
2551 goto err_out_clear_quattro;
2552 }
2553
2554 hp->bigmacregs = devm_platform_ioremap_resource(op, 3);
2555 if (IS_ERR(hp->bigmacregs)) {
2556 dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
2557 err = PTR_ERR(hp->bigmacregs);
2558 goto err_out_clear_quattro;
2559 }
2560
2561 hp->tcvregs = devm_platform_ioremap_resource(op, 4);
2562 if (IS_ERR(hp->tcvregs)) {
2563 dev_err(&op->dev, "Cannot map TCVR registers.\n");
2564 err = PTR_ERR(hp->tcvregs);
2565 goto err_out_clear_quattro;
2566 }
2567
2568 hp->hm_revision = 0xa0;
2569
2570 if (qp != NULL)
2571 hp->happy_flags |= HFLAG_QUATTRO;
2572
2573 hp->irq = op->archdata.irqs[0];
2574
2575 /* Get the supported DVMA burst sizes from our Happy SBUS. */
2576 hp->happy_bursts = of_getintprop_default(sbus_dp,
2577 "burst-sizes", 0x00);
2578
2579#ifdef CONFIG_PCI
2580 /* Hook up SBUS register/descriptor accessors. */
2581 hp->read_desc32 = sbus_hme_read_desc32;
2582 hp->write_txd = sbus_hme_write_txd;
2583 hp->write_rxd = sbus_hme_write_rxd;
2584 hp->read32 = sbus_hme_read32;
2585 hp->write32 = sbus_hme_write32;
2586#endif
2587
2588 err = happy_meal_common_probe(hp, dp);
2589 if (err)
2590 goto err_out_clear_quattro;
2591
2592 platform_set_drvdata(op, hp);
2593
2594 if (qfe_slot != -1)
2595 netdev_info(dev,
2596 "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
2597 qfe_slot, dev->dev_addr);
2598 else
2599 netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
2600 dev->dev_addr);
2601
2602 return 0;
2603
2604err_out_clear_quattro:
2605 if (qp)
2606 qp->happy_meals[qfe_slot] = NULL;
2607 return err;
2608}
2609#endif
2610
2611#ifdef CONFIG_PCI
2612static int happy_meal_pci_probe(struct pci_dev *pdev,
2613 const struct pci_device_id *ent)
2614{
2615 struct device_node *dp = NULL;
2616 struct quattro *qp = NULL;
2617 struct happy_meal *hp;
2618 struct net_device *dev;
2619 void __iomem *hpreg_base;
2620 struct resource *hpreg_res;
2621 char prom_name[64];
2622 int qfe_slot = -1;
2623 int err = -ENODEV;
2624
2625 /* Now make sure pci_dev cookie is there. */
2626#ifdef CONFIG_SPARC
2627 dp = pci_device_to_OF_node(pdev);
2628 snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
2629#else
2630 if (is_quattro_p(pdev))
2631 strcpy(prom_name, "SUNW,qfe");
2632 else
2633 strcpy(prom_name, "SUNW,hme");
2634#endif
2635
2636 err = pcim_enable_device(pdev);
2637 if (err)
2638 return err;
2639 pci_set_master(pdev);
2640
2641 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
2642 qp = quattro_pci_find(pdev);
2643 if (IS_ERR(qp))
2644 return PTR_ERR(qp);
2645
2646 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2647 if (!qp->happy_meals[qfe_slot])
2648 break;
2649
2650 if (qfe_slot == 4)
2651 return -ENODEV;
2652 }
2653
2654 dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
2655 if (!dev)
2656 return -ENOMEM;
2657 SET_NETDEV_DEV(dev, &pdev->dev);
2658
2659 hp = netdev_priv(dev);
2660 hp->dev = dev;
2661 hp->happy_dev = pdev;
2662 hp->dma_dev = &pdev->dev;
2663
2664 spin_lock_init(&hp->happy_lock);
2665
2666 if (qp != NULL) {
2667 hp->qfe_parent = qp;
2668 hp->qfe_ent = qfe_slot;
2669 qp->happy_meals[qfe_slot] = dev;
2670 }
2671
2672 err = -EINVAL;
2673 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2674 dev_err(&pdev->dev,
2675 "Cannot find proper PCI device base address.\n");
2676 goto err_out_clear_quattro;
2677 }
2678
2679 hpreg_res = devm_request_mem_region(&pdev->dev,
2680 pci_resource_start(pdev, 0),
2681 pci_resource_len(pdev, 0),
2682 DRV_NAME);
2683 if (!hpreg_res) {
2684 err = -EBUSY;
2685 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
2686 goto err_out_clear_quattro;
2687 }
2688
2689 hpreg_base = pcim_iomap(pdev, 0, 0x8000);
2690 if (!hpreg_base) {
2691 err = -ENOMEM;
2692 dev_err(&pdev->dev, "Unable to remap card memory.\n");
2693 goto err_out_clear_quattro;
2694 }
2695
2696 happy_meal_addr_init(hp, dp, qfe_slot);
2697
2698 /* Layout registers. */
2699 hp->gregs = (hpreg_base + 0x0000UL);
2700 hp->etxregs = (hpreg_base + 0x2000UL);
2701 hp->erxregs = (hpreg_base + 0x4000UL);
2702 hp->bigmacregs = (hpreg_base + 0x6000UL);
2703 hp->tcvregs = (hpreg_base + 0x7000UL);
2704
2705 if (IS_ENABLED(CONFIG_SPARC))
2706 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
2707 else
2708 hp->hm_revision = 0x20;
2709
2710 if (qp != NULL)
2711 hp->happy_flags |= HFLAG_QUATTRO;
2712
2713 /* And of course, indicate this is PCI. */
2714 hp->happy_flags |= HFLAG_PCI;
2715
2716#ifdef CONFIG_SPARC
2717 /* Assume PCI happy meals can handle all burst sizes. */
2718 hp->happy_bursts = DMA_BURSTBITS;
2719#endif
2720 hp->irq = pdev->irq;
2721
2722#ifdef CONFIG_SBUS
2723 /* Hook up PCI register/descriptor accessors. */
2724 hp->read_desc32 = pci_hme_read_desc32;
2725 hp->write_txd = pci_hme_write_txd;
2726 hp->write_rxd = pci_hme_write_rxd;
2727 hp->read32 = pci_hme_read32;
2728 hp->write32 = pci_hme_write32;
2729#endif
2730
2731 err = happy_meal_common_probe(hp, dp);
2732 if (err)
2733 goto err_out_clear_quattro;
2734
2735 pci_set_drvdata(pdev, hp);
2736
2737 if (!qfe_slot) {
2738 struct pci_dev *qpdev = qp->quattro_dev;
2739
2740 prom_name[0] = 0;
2741 if (!strncmp(dev->name, "eth", 3)) {
2742 int i = simple_strtoul(dev->name + 3, NULL, 10);
2743 sprintf(prom_name, "-%d", i + 3);
2744 }
2745 netdev_info(dev,
2746 "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
2747 prom_name, qpdev->vendor, qpdev->device);
2748 }
2749
2750 if (qfe_slot != -1)
2751 netdev_info(dev,
2752 "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
2753 qfe_slot, dev->dev_addr);
2754 else
2755 netdev_info(dev,
2756 "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
2757 dev->dev_addr);
2758
2759 return 0;
2760
2761err_out_clear_quattro:
2762 if (qp != NULL)
2763 qp->happy_meals[qfe_slot] = NULL;
2764 return err;
2765}
2766
2767static const struct pci_device_id happymeal_pci_ids[] = {
2768 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
2769 { } /* Terminating entry */
2770};
2771
2772MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
2773
2774static struct pci_driver hme_pci_driver = {
2775 .name = "hme",
2776 .id_table = happymeal_pci_ids,
2777 .probe = happy_meal_pci_probe,
2778};
2779
2780static int __init happy_meal_pci_init(void)
2781{
2782 return pci_register_driver(&hme_pci_driver);
2783}
2784
2785static void happy_meal_pci_exit(void)
2786{
2787 pci_unregister_driver(&hme_pci_driver);
2788
2789 while (qfe_pci_list) {
2790 struct quattro *qfe = qfe_pci_list;
2791 struct quattro *next = qfe->next;
2792
2793 kfree(qfe);
2794
2795 qfe_pci_list = next;
2796 }
2797}
2798
2799#endif
2800
2801#ifdef CONFIG_SBUS
2802static const struct of_device_id hme_sbus_match[];
2803static int hme_sbus_probe(struct platform_device *op)
2804{
2805 const struct of_device_id *match;
2806 struct device_node *dp = op->dev.of_node;
2807 const char *model = of_get_property(dp, "model", NULL);
2808 int is_qfe;
2809
2810 match = of_match_device(hme_sbus_match, &op->dev);
2811 if (!match)
2812 return -EINVAL;
2813 is_qfe = (match->data != NULL);
2814
2815 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
2816 is_qfe = 1;
2817
2818 return happy_meal_sbus_probe_one(op, is_qfe);
2819}
2820
2821static const struct of_device_id hme_sbus_match[] = {
2822 {
2823 .name = "SUNW,hme",
2824 },
2825 {
2826 .name = "SUNW,qfe",
2827 .data = (void *) 1,
2828 },
2829 {
2830 .name = "qfe",
2831 .data = (void *) 1,
2832 },
2833 {},
2834};
2835
2836MODULE_DEVICE_TABLE(of, hme_sbus_match);
2837
2838static struct platform_driver hme_sbus_driver = {
2839 .driver = {
2840 .name = "hme",
2841 .of_match_table = hme_sbus_match,
2842 },
2843 .probe = hme_sbus_probe,
2844};
2845
2846static int __init happy_meal_sbus_init(void)
2847{
2848 return platform_driver_register(&hme_sbus_driver);
2849}
2850
2851static void happy_meal_sbus_exit(void)
2852{
2853 platform_driver_unregister(&hme_sbus_driver);
2854
2855 while (qfe_sbus_list) {
2856 struct quattro *qfe = qfe_sbus_list;
2857 struct quattro *next = qfe->next;
2858
2859 kfree(qfe);
2860
2861 qfe_sbus_list = next;
2862 }
2863}
2864#endif
2865
2866static int __init happy_meal_probe(void)
2867{
2868 int err = 0;
2869
2870#ifdef CONFIG_SBUS
2871 err = happy_meal_sbus_init();
2872#endif
2873#ifdef CONFIG_PCI
2874 if (!err) {
2875 err = happy_meal_pci_init();
2876#ifdef CONFIG_SBUS
2877 if (err)
2878 happy_meal_sbus_exit();
2879#endif
2880 }
2881#endif
2882
2883 return err;
2884}
2885
2886
2887static void __exit happy_meal_exit(void)
2888{
2889#ifdef CONFIG_SBUS
2890 happy_meal_sbus_exit();
2891#endif
2892#ifdef CONFIG_PCI
2893 happy_meal_pci_exit();
2894#endif
2895}
2896
2897module_init(happy_meal_probe);
2898module_exit(happy_meal_exit);
1// SPDX-License-Identifier: GPL-2.0
2/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
3 * auto carrier detecting ethernet driver. Also known as the
4 * "Happy Meal Ethernet" found on SunSwift SBUS cards.
5 *
6 * Copyright (C) 1996, 1998, 1999, 2002, 2003,
7 * 2006, 2008 David S. Miller (davem@davemloft.net)
8 *
9 * Changes :
10 * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
11 * - port to non-sparc architectures. Tested only on x86 and
12 * only currently works with QFE PCI cards.
13 * - ability to specify the MAC address at module load time by passing this
14 * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/fcntl.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/in.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ethtool.h>
29#include <linux/mii.h>
30#include <linux/crc32.h>
31#include <linux/random.h>
32#include <linux/errno.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/mm.h>
37#include <linux/bitops.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/byteorder.h>
43
44#ifdef CONFIG_SPARC
45#include <linux/of.h>
46#include <linux/of_device.h>
47#include <asm/idprom.h>
48#include <asm/openprom.h>
49#include <asm/oplib.h>
50#include <asm/prom.h>
51#include <asm/auxio.h>
52#endif
53#include <linux/uaccess.h>
54
55#include <asm/irq.h>
56
57#ifdef CONFIG_PCI
58#include <linux/pci.h>
59#endif
60
61#include "sunhme.h"
62
63#define DRV_NAME "sunhme"
64
65MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
66MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
67MODULE_LICENSE("GPL");
68
69static int macaddr[6];
70
71/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
72module_param_array(macaddr, int, NULL, 0);
73MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
74
75#ifdef CONFIG_SBUS
76static struct quattro *qfe_sbus_list;
77#endif
78
79#ifdef CONFIG_PCI
80static struct quattro *qfe_pci_list;
81#endif
82
83#define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
84#define HMD hme_debug
85
86/* "Auto Switch Debug" aka phy debug */
87#if 1
88#define ASD hme_debug
89#else
90#define ASD(...)
91#endif
92
93#if 0
94struct hme_tx_logent {
95 unsigned int tstamp;
96 int tx_new, tx_old;
97 unsigned int action;
98#define TXLOG_ACTION_IRQ 0x01
99#define TXLOG_ACTION_TXMIT 0x02
100#define TXLOG_ACTION_TBUSY 0x04
101#define TXLOG_ACTION_NBUFS 0x08
102 unsigned int status;
103};
104#define TX_LOG_LEN 128
105static struct hme_tx_logent tx_log[TX_LOG_LEN];
106static int txlog_cur_entry;
107static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
108{
109 struct hme_tx_logent *tlp;
110 unsigned long flags;
111
112 local_irq_save(flags);
113 tlp = &tx_log[txlog_cur_entry];
114 tlp->tstamp = (unsigned int)jiffies;
115 tlp->tx_new = hp->tx_new;
116 tlp->tx_old = hp->tx_old;
117 tlp->action = a;
118 tlp->status = s;
119 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
120 local_irq_restore(flags);
121}
122static __inline__ void tx_dump_log(void)
123{
124 int i, this;
125
126 this = txlog_cur_entry;
127 for (i = 0; i < TX_LOG_LEN; i++) {
128 pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
129 tx_log[this].tstamp,
130 tx_log[this].tx_new, tx_log[this].tx_old,
131 tx_log[this].action, tx_log[this].status);
132 this = (this + 1) & (TX_LOG_LEN - 1);
133 }
134}
135#else
136#define tx_add_log(hp, a, s)
137#define tx_dump_log()
138#endif
139
140#define DEFAULT_IPG0 16 /* For lance-mode only */
141#define DEFAULT_IPG1 8 /* For all modes */
142#define DEFAULT_IPG2 4 /* For all modes */
143#define DEFAULT_JAMSIZE 4 /* Toe jam */
144
145/* NOTE: In the descriptor writes one _must_ write the address
146 * member _first_. The card must not be allowed to see
147 * the updated descriptor flags until the address is
148 * correct. I've added a write memory barrier between
149 * the two stores so that I can sleep well at night... -DaveM
150 */
151
152#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
153static void sbus_hme_write32(void __iomem *reg, u32 val)
154{
155 sbus_writel(val, reg);
156}
157
158static u32 sbus_hme_read32(void __iomem *reg)
159{
160 return sbus_readl(reg);
161}
162
163static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
164{
165 rxd->rx_addr = (__force hme32)addr;
166 dma_wmb();
167 rxd->rx_flags = (__force hme32)flags;
168}
169
170static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
171{
172 txd->tx_addr = (__force hme32)addr;
173 dma_wmb();
174 txd->tx_flags = (__force hme32)flags;
175}
176
177static u32 sbus_hme_read_desc32(hme32 *p)
178{
179 return (__force u32)*p;
180}
181
182static void pci_hme_write32(void __iomem *reg, u32 val)
183{
184 writel(val, reg);
185}
186
187static u32 pci_hme_read32(void __iomem *reg)
188{
189 return readl(reg);
190}
191
192static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
193{
194 rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
195 dma_wmb();
196 rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
197}
198
199static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
200{
201 txd->tx_addr = (__force hme32)cpu_to_le32(addr);
202 dma_wmb();
203 txd->tx_flags = (__force hme32)cpu_to_le32(flags);
204}
205
206static u32 pci_hme_read_desc32(hme32 *p)
207{
208 return le32_to_cpup((__le32 *)p);
209}
210
211#define hme_write32(__hp, __reg, __val) \
212 ((__hp)->write32((__reg), (__val)))
213#define hme_read32(__hp, __reg) \
214 ((__hp)->read32(__reg))
215#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
216 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
217#define hme_write_txd(__hp, __txd, __flags, __addr) \
218 ((__hp)->write_txd((__txd), (__flags), (__addr)))
219#define hme_read_desc32(__hp, __p) \
220 ((__hp)->read_desc32(__p))
221#else
222#ifdef CONFIG_SBUS
223/* SBUS only compilation */
224#define hme_write32(__hp, __reg, __val) \
225 sbus_writel((__val), (__reg))
226#define hme_read32(__hp, __reg) \
227 sbus_readl(__reg)
228#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
229do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
230 dma_wmb(); \
231 (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
232} while(0)
233#define hme_write_txd(__hp, __txd, __flags, __addr) \
234do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
235 dma_wmb(); \
236 (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
237} while(0)
238#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
239#else
240/* PCI only compilation */
241#define hme_write32(__hp, __reg, __val) \
242 writel((__val), (__reg))
243#define hme_read32(__hp, __reg) \
244 readl(__reg)
245#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
246do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
247 dma_wmb(); \
248 (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
249} while(0)
250#define hme_write_txd(__hp, __txd, __flags, __addr) \
251do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
252 dma_wmb(); \
253 (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
254} while(0)
255static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
256{
257 return le32_to_cpup((__le32 *)p);
258}
259#endif
260#endif
261
262
263/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
264static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
265{
266 hme_write32(hp, tregs + TCVR_BBDATA, bit);
267 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
268 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
269}
270
271#if 0
272static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
273{
274 u32 ret;
275
276 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
277 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
278 ret = hme_read32(hp, tregs + TCVR_CFG);
279 if (internal)
280 ret &= TCV_CFG_MDIO0;
281 else
282 ret &= TCV_CFG_MDIO1;
283
284 return ret;
285}
286#endif
287
288static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
289{
290 u32 retval;
291
292 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
293 udelay(1);
294 retval = hme_read32(hp, tregs + TCVR_CFG);
295 if (internal)
296 retval &= TCV_CFG_MDIO0;
297 else
298 retval &= TCV_CFG_MDIO1;
299 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
300
301 return retval;
302}
303
304#define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
305
306static int happy_meal_bb_read(struct happy_meal *hp,
307 void __iomem *tregs, int reg)
308{
309 u32 tmp;
310 int retval = 0;
311 int i;
312
313 /* Enable the MIF BitBang outputs. */
314 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
315
316 /* Force BitBang into the idle state. */
317 for (i = 0; i < 32; i++)
318 BB_PUT_BIT(hp, tregs, 1);
319
320 /* Give it the read sequence. */
321 BB_PUT_BIT(hp, tregs, 0);
322 BB_PUT_BIT(hp, tregs, 1);
323 BB_PUT_BIT(hp, tregs, 1);
324 BB_PUT_BIT(hp, tregs, 0);
325
326 /* Give it the PHY address. */
327 tmp = hp->paddr & 0xff;
328 for (i = 4; i >= 0; i--)
329 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
330
331 /* Tell it what register we want to read. */
332 tmp = (reg & 0xff);
333 for (i = 4; i >= 0; i--)
334 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
335
336 /* Close down the MIF BitBang outputs. */
337 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
338
339 /* Now read in the value. */
340 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
341 for (i = 15; i >= 0; i--)
342 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
343 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
344 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
345 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
346 ASD("reg=%d value=%x\n", reg, retval);
347 return retval;
348}
349
350static void happy_meal_bb_write(struct happy_meal *hp,
351 void __iomem *tregs, int reg,
352 unsigned short value)
353{
354 u32 tmp;
355 int i;
356
357 ASD("reg=%d value=%x\n", reg, value);
358
359 /* Enable the MIF BitBang outputs. */
360 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
361
362 /* Force BitBang into the idle state. */
363 for (i = 0; i < 32; i++)
364 BB_PUT_BIT(hp, tregs, 1);
365
366 /* Give it write sequence. */
367 BB_PUT_BIT(hp, tregs, 0);
368 BB_PUT_BIT(hp, tregs, 1);
369 BB_PUT_BIT(hp, tregs, 0);
370 BB_PUT_BIT(hp, tregs, 1);
371
372 /* Give it the PHY address. */
373 tmp = (hp->paddr & 0xff);
374 for (i = 4; i >= 0; i--)
375 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
376
377 /* Tell it what register we will be writing. */
378 tmp = (reg & 0xff);
379 for (i = 4; i >= 0; i--)
380 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
381
382 /* Tell it to become ready for the bits. */
383 BB_PUT_BIT(hp, tregs, 1);
384 BB_PUT_BIT(hp, tregs, 0);
385
386 for (i = 15; i >= 0; i--)
387 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
388
389 /* Close down the MIF BitBang outputs. */
390 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
391}
392
393#define TCVR_READ_TRIES 16
394
395static int happy_meal_tcvr_read(struct happy_meal *hp,
396 void __iomem *tregs, int reg)
397{
398 int tries = TCVR_READ_TRIES;
399 int retval;
400
401 if (hp->tcvr_type == none) {
402 ASD("no transceiver, value=TCVR_FAILURE\n");
403 return TCVR_FAILURE;
404 }
405
406 if (!(hp->happy_flags & HFLAG_FENABLE)) {
407 ASD("doing bit bang\n");
408 return happy_meal_bb_read(hp, tregs, reg);
409 }
410
411 hme_write32(hp, tregs + TCVR_FRAME,
412 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
413 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
414 udelay(20);
415 if (!tries) {
416 netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
417 return TCVR_FAILURE;
418 }
419 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
420 ASD("reg=0x%02x value=%04x\n", reg, retval);
421 return retval;
422}
423
424#define TCVR_WRITE_TRIES 16
425
426static void happy_meal_tcvr_write(struct happy_meal *hp,
427 void __iomem *tregs, int reg,
428 unsigned short value)
429{
430 int tries = TCVR_WRITE_TRIES;
431
432 ASD("reg=0x%02x value=%04x\n", reg, value);
433
434 /* Welcome to Sun Microsystems, can I take your order please? */
435 if (!(hp->happy_flags & HFLAG_FENABLE)) {
436 happy_meal_bb_write(hp, tregs, reg, value);
437 return;
438 }
439
440 /* Would you like fries with that? */
441 hme_write32(hp, tregs + TCVR_FRAME,
442 (FRAME_WRITE | (hp->paddr << 23) |
443 ((reg & 0xff) << 18) | (value & 0xffff)));
444 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
445 udelay(20);
446
447 /* Anything else? */
448 if (!tries)
449 netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
450
451 /* Fifty-two cents is your change, have a nice day. */
452}
453
454/* Auto negotiation. The scheme is very simple. We have a timer routine
455 * that keeps watching the auto negotiation process as it progresses.
456 * The DP83840 is first told to start doing it's thing, we set up the time
457 * and place the timer state machine in it's initial state.
458 *
459 * Here the timer peeks at the DP83840 status registers at each click to see
460 * if the auto negotiation has completed, we assume here that the DP83840 PHY
461 * will time out at some point and just tell us what (didn't) happen. For
462 * complete coverage we only allow so many of the ticks at this level to run,
463 * when this has expired we print a warning message and try another strategy.
464 * This "other" strategy is to force the interface into various speed/duplex
465 * configurations and we stop when we see a link-up condition before the
466 * maximum number of "peek" ticks have occurred.
467 *
468 * Once a valid link status has been detected we configure the BigMAC and
469 * the rest of the Happy Meal to speak the most efficient protocol we could
470 * get a clean link for. The priority for link configurations, highest first
471 * is:
472 * 100 Base-T Full Duplex
473 * 100 Base-T Half Duplex
474 * 10 Base-T Full Duplex
475 * 10 Base-T Half Duplex
476 *
477 * We start a new timer now, after a successful auto negotiation status has
478 * been detected. This timer just waits for the link-up bit to get set in
479 * the BMCR of the DP83840. When this occurs we print a kernel log message
480 * describing the link type in use and the fact that it is up.
481 *
482 * If a fatal error of some sort is signalled and detected in the interrupt
483 * service routine, and the chip is reset, or the link is ifconfig'd down
484 * and then back up, this entire process repeats itself all over again.
485 */
486static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
487{
488 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
489
490 /* Downgrade from full to half duplex. Only possible
491 * via ethtool.
492 */
493 if (hp->sw_bmcr & BMCR_FULLDPLX) {
494 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
495 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
496 return 0;
497 }
498
499 /* Downgrade from 100 to 10. */
500 if (hp->sw_bmcr & BMCR_SPEED100) {
501 hp->sw_bmcr &= ~(BMCR_SPEED100);
502 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
503 return 0;
504 }
505
506 /* We've tried everything. */
507 return -1;
508}
509
510static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
511{
512 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
513
514 netdev_info(hp->dev,
515 "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
516 hp->tcvr_type == external ? "external" : "internal",
517 hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
518 hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
519}
520
521static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
522{
523 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
524
525 netdev_info(hp->dev,
526 "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
527 hp->tcvr_type == external ? "external" : "internal",
528 hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
529 hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
530}
531
532static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
533{
534 int full;
535
536 /* All we care about is making sure the bigmac tx_cfg has a
537 * proper duplex setting.
538 */
539 if (hp->timer_state == arbwait) {
540 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
541 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
542 goto no_response;
543 if (hp->sw_lpa & LPA_100FULL)
544 full = 1;
545 else if (hp->sw_lpa & LPA_100HALF)
546 full = 0;
547 else if (hp->sw_lpa & LPA_10FULL)
548 full = 1;
549 else
550 full = 0;
551 } else {
552 /* Forcing a link mode. */
553 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
554 if (hp->sw_bmcr & BMCR_FULLDPLX)
555 full = 1;
556 else
557 full = 0;
558 }
559
560 /* Before changing other bits in the tx_cfg register, and in
561 * general any of other the TX config registers too, you
562 * must:
563 * 1) Clear Enable
564 * 2) Poll with reads until that bit reads back as zero
565 * 3) Make TX configuration changes
566 * 4) Set Enable once more
567 */
568 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
569 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
570 ~(BIGMAC_TXCFG_ENABLE));
571 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
572 barrier();
573 if (full) {
574 hp->happy_flags |= HFLAG_FULL;
575 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
576 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
577 BIGMAC_TXCFG_FULLDPLX);
578 } else {
579 hp->happy_flags &= ~(HFLAG_FULL);
580 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
581 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
582 ~(BIGMAC_TXCFG_FULLDPLX));
583 }
584 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
585 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
586 BIGMAC_TXCFG_ENABLE);
587 return 0;
588no_response:
589 return 1;
590}
591
592static int happy_meal_init(struct happy_meal *hp);
593
594static int is_lucent_phy(struct happy_meal *hp)
595{
596 void __iomem *tregs = hp->tcvregs;
597 unsigned short mr2, mr3;
598 int ret = 0;
599
600 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
601 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
602 if ((mr2 & 0xffff) == 0x0180 &&
603 ((mr3 & 0xffff) >> 10) == 0x1d)
604 ret = 1;
605
606 return ret;
607}
608
609static void happy_meal_timer(struct timer_list *t)
610{
611 struct happy_meal *hp = from_timer(hp, t, happy_timer);
612 void __iomem *tregs = hp->tcvregs;
613 int restart_timer = 0;
614
615 spin_lock_irq(&hp->happy_lock);
616
617 hp->timer_ticks++;
618 switch(hp->timer_state) {
619 case arbwait:
620 /* Only allow for 5 ticks, thats 10 seconds and much too
621 * long to wait for arbitration to complete.
622 */
623 if (hp->timer_ticks >= 10) {
624 /* Enter force mode. */
625 do_force_mode:
626 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
627 netdev_notice(hp->dev,
628 "Auto-Negotiation unsuccessful, trying force link mode\n");
629 hp->sw_bmcr = BMCR_SPEED100;
630 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
631
632 if (!is_lucent_phy(hp)) {
633 /* OK, seems we need do disable the transceiver for the first
634 * tick to make sure we get an accurate link state at the
635 * second tick.
636 */
637 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
638 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
639 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
640 }
641 hp->timer_state = ltrywait;
642 hp->timer_ticks = 0;
643 restart_timer = 1;
644 } else {
645 /* Anything interesting happen? */
646 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
647 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
648 int ret;
649
650 /* Just what we've been waiting for... */
651 ret = set_happy_link_modes(hp, tregs);
652 if (ret) {
653 /* Ooops, something bad happened, go to force
654 * mode.
655 *
656 * XXX Broken hubs which don't support 802.3u
657 * XXX auto-negotiation make this happen as well.
658 */
659 goto do_force_mode;
660 }
661
662 /* Success, at least so far, advance our state engine. */
663 hp->timer_state = lupwait;
664 restart_timer = 1;
665 } else {
666 restart_timer = 1;
667 }
668 }
669 break;
670
671 case lupwait:
672 /* Auto negotiation was successful and we are awaiting a
673 * link up status. I have decided to let this timer run
674 * forever until some sort of error is signalled, reporting
675 * a message to the user at 10 second intervals.
676 */
677 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
678 if (hp->sw_bmsr & BMSR_LSTATUS) {
679 /* Wheee, it's up, display the link mode in use and put
680 * the timer to sleep.
681 */
682 display_link_mode(hp, tregs);
683 hp->timer_state = asleep;
684 restart_timer = 0;
685 } else {
686 if (hp->timer_ticks >= 10) {
687 netdev_notice(hp->dev,
688 "Auto negotiation successful, link still not completely up.\n");
689 hp->timer_ticks = 0;
690 restart_timer = 1;
691 } else {
692 restart_timer = 1;
693 }
694 }
695 break;
696
697 case ltrywait:
698 /* Making the timeout here too long can make it take
699 * annoyingly long to attempt all of the link mode
700 * permutations, but then again this is essentially
701 * error recovery code for the most part.
702 */
703 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
704 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
705 if (hp->timer_ticks == 1) {
706 if (!is_lucent_phy(hp)) {
707 /* Re-enable transceiver, we'll re-enable the transceiver next
708 * tick, then check link state on the following tick.
709 */
710 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
711 happy_meal_tcvr_write(hp, tregs,
712 DP83840_CSCONFIG, hp->sw_csconfig);
713 }
714 restart_timer = 1;
715 break;
716 }
717 if (hp->timer_ticks == 2) {
718 if (!is_lucent_phy(hp)) {
719 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
720 happy_meal_tcvr_write(hp, tregs,
721 DP83840_CSCONFIG, hp->sw_csconfig);
722 }
723 restart_timer = 1;
724 break;
725 }
726 if (hp->sw_bmsr & BMSR_LSTATUS) {
727 /* Force mode selection success. */
728 display_forced_link_mode(hp, tregs);
729 set_happy_link_modes(hp, tregs); /* XXX error? then what? */
730 hp->timer_state = asleep;
731 restart_timer = 0;
732 } else {
733 if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
734 int ret;
735
736 ret = try_next_permutation(hp, tregs);
737 if (ret == -1) {
738 /* Aieee, tried them all, reset the
739 * chip and try all over again.
740 */
741
742 /* Let the user know... */
743 netdev_notice(hp->dev,
744 "Link down, cable problem?\n");
745
746 ret = happy_meal_init(hp);
747 if (ret) {
748 /* ho hum... */
749 netdev_err(hp->dev,
750 "Error, cannot re-init the Happy Meal.\n");
751 }
752 goto out;
753 }
754 if (!is_lucent_phy(hp)) {
755 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
756 DP83840_CSCONFIG);
757 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
758 happy_meal_tcvr_write(hp, tregs,
759 DP83840_CSCONFIG, hp->sw_csconfig);
760 }
761 hp->timer_ticks = 0;
762 restart_timer = 1;
763 } else {
764 restart_timer = 1;
765 }
766 }
767 break;
768
769 case asleep:
770 default:
771 /* Can't happens.... */
772 netdev_err(hp->dev,
773 "Aieee, link timer is asleep but we got one anyways!\n");
774 restart_timer = 0;
775 hp->timer_ticks = 0;
776 hp->timer_state = asleep; /* foo on you */
777 break;
778 }
779
780 if (restart_timer) {
781 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
782 add_timer(&hp->happy_timer);
783 }
784
785out:
786 spin_unlock_irq(&hp->happy_lock);
787}
788
789#define TX_RESET_TRIES 32
790#define RX_RESET_TRIES 32
791
792/* hp->happy_lock must be held */
793static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
794{
795 int tries = TX_RESET_TRIES;
796
797 HMD("reset...\n");
798
799 /* Would you like to try our SMCC Delux? */
800 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
801 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
802 udelay(20);
803
804 /* Lettuce, tomato, buggy hardware (no extra charge)? */
805 if (!tries)
806 netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
807
808 /* Take care. */
809 HMD("done\n");
810}
811
812/* hp->happy_lock must be held */
813static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
814{
815 int tries = RX_RESET_TRIES;
816
817 HMD("reset...\n");
818
819 /* We have a special on GNU/Viking hardware bugs today. */
820 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
821 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
822 udelay(20);
823
824 /* Will that be all? */
825 if (!tries)
826 netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
827
828 /* Don't forget your vik_1137125_wa. Have a nice day. */
829 HMD("done\n");
830}
831
832#define STOP_TRIES 16
833
834/* hp->happy_lock must be held */
835static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
836{
837 int tries = STOP_TRIES;
838
839 HMD("reset...\n");
840
841 /* We're consolidating our STB products, it's your lucky day. */
842 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
843 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
844 udelay(20);
845
846 /* Come back next week when we are "Sun Microelectronics". */
847 if (!tries)
848 netdev_err(hp->dev, "Fry guys.\n");
849
850 /* Remember: "Different name, same old buggy as shit hardware." */
851 HMD("done\n");
852}
853
854/* hp->happy_lock must be held */
855static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
856{
857 struct net_device_stats *stats = &hp->dev->stats;
858
859 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
860 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
861
862 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
863 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
864
865 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
866 hme_write32(hp, bregs + BMAC_GLECTR, 0);
867
868 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
869
870 stats->collisions +=
871 (hme_read32(hp, bregs + BMAC_EXCTR) +
872 hme_read32(hp, bregs + BMAC_LTCTR));
873 hme_write32(hp, bregs + BMAC_EXCTR, 0);
874 hme_write32(hp, bregs + BMAC_LTCTR, 0);
875}
876
877/* hp->happy_lock must be held */
878static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
879{
880 /* If polling disabled or not polling already, nothing to do. */
881 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
882 (HFLAG_POLLENABLE | HFLAG_POLL)) {
883 ASD("not polling, return\n");
884 return;
885 }
886
887 /* Shut up the MIF. */
888 ASD("were polling, mif ints off, polling off\n");
889 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
890
891 /* Turn off polling. */
892 hme_write32(hp, tregs + TCVR_CFG,
893 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
894
895 /* We are no longer polling. */
896 hp->happy_flags &= ~(HFLAG_POLL);
897
898 /* Let the bits set. */
899 udelay(200);
900 ASD("done\n");
901}
902
903/* Only Sun can take such nice parts and fuck up the programming interface
904 * like this. Good job guys...
905 */
906#define TCVR_RESET_TRIES 16 /* It should reset quickly */
907#define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
908
909/* hp->happy_lock must be held */
910static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
911{
912 u32 tconfig;
913 int result, tries = TCVR_RESET_TRIES;
914
915 tconfig = hme_read32(hp, tregs + TCVR_CFG);
916 ASD("tcfg=%08x\n", tconfig);
917 if (hp->tcvr_type == external) {
918 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
919 hp->tcvr_type = internal;
920 hp->paddr = TCV_PADDR_ITX;
921 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
922 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
923 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
924 if (result == TCVR_FAILURE) {
925 ASD("phyread_fail\n");
926 return -1;
927 }
928 ASD("external: ISOLATE, phyread_ok, PSELECT\n");
929 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
930 hp->tcvr_type = external;
931 hp->paddr = TCV_PADDR_ETX;
932 } else {
933 if (tconfig & TCV_CFG_MDIO1) {
934 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
935 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
936 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
937 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
938 if (result == TCVR_FAILURE) {
939 ASD("phyread_fail>\n");
940 return -1;
941 }
942 ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
943 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
944 hp->tcvr_type = internal;
945 hp->paddr = TCV_PADDR_ITX;
946 }
947 }
948
949 ASD("BMCR_RESET...\n");
950 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
951
952 while (--tries) {
953 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
954 if (result == TCVR_FAILURE)
955 return -1;
956 hp->sw_bmcr = result;
957 if (!(result & BMCR_RESET))
958 break;
959 udelay(20);
960 }
961 if (!tries) {
962 ASD("BMCR RESET FAILED!\n");
963 return -1;
964 }
965 ASD("RESET_OK\n");
966
967 /* Get fresh copies of the PHY registers. */
968 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
969 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
970 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
971 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
972
973 ASD("UNISOLATE...\n");
974 hp->sw_bmcr &= ~(BMCR_ISOLATE);
975 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
976
977 tries = TCVR_UNISOLATE_TRIES;
978 while (--tries) {
979 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
980 if (result == TCVR_FAILURE)
981 return -1;
982 if (!(result & BMCR_ISOLATE))
983 break;
984 udelay(20);
985 }
986 if (!tries) {
987 ASD("UNISOLATE FAILED!\n");
988 return -1;
989 }
990 ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
991 if (!is_lucent_phy(hp)) {
992 result = happy_meal_tcvr_read(hp, tregs,
993 DP83840_CSCONFIG);
994 happy_meal_tcvr_write(hp, tregs,
995 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
996 }
997 return 0;
998}
999
1000/* Figure out whether we have an internal or external transceiver.
1001 *
1002 * hp->happy_lock must be held
1003 */
1004static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1005{
1006 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1007
1008 ASD("tcfg=%08lx\n", tconfig);
1009 if (hp->happy_flags & HFLAG_POLL) {
1010 /* If we are polling, we must stop to get the transceiver type. */
1011 if (hp->tcvr_type == internal) {
1012 if (tconfig & TCV_CFG_MDIO1) {
1013 happy_meal_poll_stop(hp, tregs);
1014 hp->paddr = TCV_PADDR_ETX;
1015 hp->tcvr_type = external;
1016 tconfig &= ~(TCV_CFG_PENABLE);
1017 tconfig |= TCV_CFG_PSELECT;
1018 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1019 ASD("poll stop, internal->external\n");
1020 }
1021 } else {
1022 if (hp->tcvr_type == external) {
1023 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1024 happy_meal_poll_stop(hp, tregs);
1025 hp->paddr = TCV_PADDR_ITX;
1026 hp->tcvr_type = internal;
1027 hme_write32(hp, tregs + TCVR_CFG,
1028 hme_read32(hp, tregs + TCVR_CFG) &
1029 ~(TCV_CFG_PSELECT));
1030 ASD("poll stop, external->internal\n");
1031 }
1032 } else {
1033 ASD("polling, none\n");
1034 }
1035 }
1036 } else {
1037 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1038
1039 /* Else we can just work off of the MDIO bits. */
1040 if (reread & TCV_CFG_MDIO1) {
1041 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1042 hp->paddr = TCV_PADDR_ETX;
1043 hp->tcvr_type = external;
1044 ASD("not polling, external\n");
1045 } else {
1046 if (reread & TCV_CFG_MDIO0) {
1047 hme_write32(hp, tregs + TCVR_CFG,
1048 tconfig & ~(TCV_CFG_PSELECT));
1049 hp->paddr = TCV_PADDR_ITX;
1050 hp->tcvr_type = internal;
1051 ASD("not polling, internal\n");
1052 } else {
1053 netdev_err(hp->dev,
1054 "Transceiver and a coke please.");
1055 hp->tcvr_type = none; /* Grrr... */
1056 ASD("not polling, none\n");
1057 }
1058 }
1059 }
1060}
1061
1062/* The receive ring buffers are a bit tricky to get right. Here goes...
1063 *
1064 * The buffers we dma into must be 64 byte aligned. So we use a special
1065 * alloc_skb() routine for the happy meal to allocate 64 bytes more than
1066 * we really need.
1067 *
1068 * We use skb_reserve() to align the data block we get in the skb. We
1069 * also program the etxregs->cfg register to use an offset of 2. This
1070 * imperical constant plus the ethernet header size will always leave
1071 * us with a nicely aligned ip header once we pass things up to the
1072 * protocol layers.
1073 *
1074 * The numbers work out to:
1075 *
1076 * Max ethernet frame size 1518
1077 * Ethernet header size 14
1078 * Happy Meal base offset 2
1079 *
1080 * Say a skb data area is at 0xf001b010, and its size alloced is
1081 * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
1082 *
1083 * First our alloc_skb() routine aligns the data base to a 64 byte
1084 * boundary. We now have 0xf001b040 as our skb data address. We
1085 * plug this into the receive descriptor address.
1086 *
1087 * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
1088 * So now the data we will end up looking at starts at 0xf001b042. When
1089 * the packet arrives, we will check out the size received and subtract
1090 * this from the skb->length. Then we just pass the packet up to the
1091 * protocols as is, and allocate a new skb to replace this slot we have
1092 * just received from.
1093 *
1094 * The ethernet layer will strip the ether header from the front of the
1095 * skb we just sent to it, this leaves us with the ip header sitting
1096 * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
1097 * Happy Meal has even checksummed the tcp/udp data for us. The 16
1098 * bit checksum is obtained from the low bits of the receive descriptor
1099 * flags, thus:
1100 *
1101 * skb->csum = rxd->rx_flags & 0xffff;
1102 * skb->ip_summed = CHECKSUM_COMPLETE;
1103 *
1104 * before sending off the skb to the protocols, and we are good as gold.
1105 */
1106static void happy_meal_clean_rings(struct happy_meal *hp)
1107{
1108 int i;
1109
1110 for (i = 0; i < RX_RING_SIZE; i++) {
1111 if (hp->rx_skbs[i] != NULL) {
1112 struct sk_buff *skb = hp->rx_skbs[i];
1113 struct happy_meal_rxd *rxd;
1114 u32 dma_addr;
1115
1116 rxd = &hp->happy_block->happy_meal_rxd[i];
1117 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1118 dma_unmap_single(hp->dma_dev, dma_addr,
1119 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1120 dev_kfree_skb_any(skb);
1121 hp->rx_skbs[i] = NULL;
1122 }
1123 }
1124
1125 for (i = 0; i < TX_RING_SIZE; i++) {
1126 if (hp->tx_skbs[i] != NULL) {
1127 struct sk_buff *skb = hp->tx_skbs[i];
1128 struct happy_meal_txd *txd;
1129 u32 dma_addr;
1130 int frag;
1131
1132 hp->tx_skbs[i] = NULL;
1133
1134 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1135 txd = &hp->happy_block->happy_meal_txd[i];
1136 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1137 if (!frag)
1138 dma_unmap_single(hp->dma_dev, dma_addr,
1139 (hme_read_desc32(hp, &txd->tx_flags)
1140 & TXFLAG_SIZE),
1141 DMA_TO_DEVICE);
1142 else
1143 dma_unmap_page(hp->dma_dev, dma_addr,
1144 (hme_read_desc32(hp, &txd->tx_flags)
1145 & TXFLAG_SIZE),
1146 DMA_TO_DEVICE);
1147
1148 if (frag != skb_shinfo(skb)->nr_frags)
1149 i++;
1150 }
1151
1152 dev_kfree_skb_any(skb);
1153 }
1154 }
1155}
1156
1157/* hp->happy_lock must be held */
1158static void happy_meal_init_rings(struct happy_meal *hp)
1159{
1160 struct hmeal_init_block *hb = hp->happy_block;
1161 int i;
1162
1163 HMD("counters to zero\n");
1164 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1165
1166 /* Free any skippy bufs left around in the rings. */
1167 happy_meal_clean_rings(hp);
1168
1169 /* Now get new skippy bufs for the receive ring. */
1170 HMD("init rxring\n");
1171 for (i = 0; i < RX_RING_SIZE; i++) {
1172 struct sk_buff *skb;
1173 u32 mapping;
1174
1175 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1176 if (!skb) {
1177 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1178 continue;
1179 }
1180 hp->rx_skbs[i] = skb;
1181
1182 /* Because we reserve afterwards. */
1183 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1184 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1185 DMA_FROM_DEVICE);
1186 if (dma_mapping_error(hp->dma_dev, mapping)) {
1187 dev_kfree_skb_any(skb);
1188 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1189 continue;
1190 }
1191 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1192 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1193 mapping);
1194 skb_reserve(skb, RX_OFFSET);
1195 }
1196
1197 HMD("init txring\n");
1198 for (i = 0; i < TX_RING_SIZE; i++)
1199 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1200
1201 HMD("done\n");
1202}
1203
1204/* hp->happy_lock must be held */
1205static void
1206happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1207 void __iomem *tregs,
1208 const struct ethtool_link_ksettings *ep)
1209{
1210 int timeout;
1211
1212 /* Read all of the registers we are interested in now. */
1213 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1214 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1215 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1216 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1217
1218 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
1219
1220 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1221 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1222 /* Advertise everything we can support. */
1223 if (hp->sw_bmsr & BMSR_10HALF)
1224 hp->sw_advertise |= (ADVERTISE_10HALF);
1225 else
1226 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1227
1228 if (hp->sw_bmsr & BMSR_10FULL)
1229 hp->sw_advertise |= (ADVERTISE_10FULL);
1230 else
1231 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1232 if (hp->sw_bmsr & BMSR_100HALF)
1233 hp->sw_advertise |= (ADVERTISE_100HALF);
1234 else
1235 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1236 if (hp->sw_bmsr & BMSR_100FULL)
1237 hp->sw_advertise |= (ADVERTISE_100FULL);
1238 else
1239 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1240 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1241
1242 /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
1243 * XXX and this is because the DP83840 does not support it, changes
1244 * XXX would need to be made to the tx/rx logic in the driver as well
1245 * XXX so I completely skip checking for it in the BMSR for now.
1246 */
1247
1248 ASD("Advertising [ %s%s%s%s]\n",
1249 hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
1250 hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
1251 hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
1252 hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
1253
1254 /* Enable Auto-Negotiation, this is usually on already... */
1255 hp->sw_bmcr |= BMCR_ANENABLE;
1256 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1257
1258 /* Restart it to make sure it is going. */
1259 hp->sw_bmcr |= BMCR_ANRESTART;
1260 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1261
1262 /* BMCR_ANRESTART self clears when the process has begun. */
1263
1264 timeout = 64; /* More than enough. */
1265 while (--timeout) {
1266 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1267 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1268 break; /* got it. */
1269 udelay(10);
1270 }
1271 if (!timeout) {
1272 netdev_err(hp->dev,
1273 "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
1274 hp->sw_bmcr);
1275 netdev_notice(hp->dev,
1276 "Performing force link detection.\n");
1277 goto force_link;
1278 } else {
1279 hp->timer_state = arbwait;
1280 }
1281 } else {
1282force_link:
1283 /* Force the link up, trying first a particular mode.
1284 * Either we are here at the request of ethtool or
1285 * because the Happy Meal would not start to autoneg.
1286 */
1287
1288 /* Disable auto-negotiation in BMCR, enable the duplex and
1289 * speed setting, init the timer state machine, and fire it off.
1290 */
1291 if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
1292 hp->sw_bmcr = BMCR_SPEED100;
1293 } else {
1294 if (ep->base.speed == SPEED_100)
1295 hp->sw_bmcr = BMCR_SPEED100;
1296 else
1297 hp->sw_bmcr = 0;
1298 if (ep->base.duplex == DUPLEX_FULL)
1299 hp->sw_bmcr |= BMCR_FULLDPLX;
1300 }
1301 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1302
1303 if (!is_lucent_phy(hp)) {
1304 /* OK, seems we need do disable the transceiver for the first
1305 * tick to make sure we get an accurate link state at the
1306 * second tick.
1307 */
1308 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1309 DP83840_CSCONFIG);
1310 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1311 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1312 hp->sw_csconfig);
1313 }
1314 hp->timer_state = ltrywait;
1315 }
1316
1317 hp->timer_ticks = 0;
1318 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1319 add_timer(&hp->happy_timer);
1320}
1321
1322/* hp->happy_lock must be held */
1323static int happy_meal_init(struct happy_meal *hp)
1324{
1325 const unsigned char *e = &hp->dev->dev_addr[0];
1326 void __iomem *gregs = hp->gregs;
1327 void __iomem *etxregs = hp->etxregs;
1328 void __iomem *erxregs = hp->erxregs;
1329 void __iomem *bregs = hp->bigmacregs;
1330 void __iomem *tregs = hp->tcvregs;
1331 const char *bursts = "64";
1332 u32 regtmp, rxcfg;
1333
1334 /* If auto-negotiation timer is running, kill it. */
1335 del_timer(&hp->happy_timer);
1336
1337 HMD("happy_flags[%08x]\n", hp->happy_flags);
1338 if (!(hp->happy_flags & HFLAG_INIT)) {
1339 HMD("set HFLAG_INIT\n");
1340 hp->happy_flags |= HFLAG_INIT;
1341 happy_meal_get_counters(hp, bregs);
1342 }
1343
1344 /* Stop polling. */
1345 HMD("to happy_meal_poll_stop\n");
1346 happy_meal_poll_stop(hp, tregs);
1347
1348 /* Stop transmitter and receiver. */
1349 HMD("to happy_meal_stop\n");
1350 happy_meal_stop(hp, gregs);
1351
1352 /* Alloc and reset the tx/rx descriptor chains. */
1353 HMD("to happy_meal_init_rings\n");
1354 happy_meal_init_rings(hp);
1355
1356 /* Shut up the MIF. */
1357 HMD("Disable all MIF irqs (old[%08x])\n",
1358 hme_read32(hp, tregs + TCVR_IMASK));
1359 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1360
1361 /* See if we can enable the MIF frame on this card to speak to the DP83840. */
1362 if (hp->happy_flags & HFLAG_FENABLE) {
1363 HMD("use frame old[%08x]\n",
1364 hme_read32(hp, tregs + TCVR_CFG));
1365 hme_write32(hp, tregs + TCVR_CFG,
1366 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1367 } else {
1368 HMD("use bitbang old[%08x]\n",
1369 hme_read32(hp, tregs + TCVR_CFG));
1370 hme_write32(hp, tregs + TCVR_CFG,
1371 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1372 }
1373
1374 /* Check the state of the transceiver. */
1375 HMD("to happy_meal_transceiver_check\n");
1376 happy_meal_transceiver_check(hp, tregs);
1377
1378 /* Put the Big Mac into a sane state. */
1379 switch(hp->tcvr_type) {
1380 case none:
1381 /* Cannot operate if we don't know the transceiver type! */
1382 HMD("AAIEEE no transceiver type, EAGAIN\n");
1383 return -EAGAIN;
1384
1385 case internal:
1386 /* Using the MII buffers. */
1387 HMD("internal, using MII\n");
1388 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1389 break;
1390
1391 case external:
1392 /* Not using the MII, disable it. */
1393 HMD("external, disable MII\n");
1394 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1395 break;
1396 }
1397
1398 if (happy_meal_tcvr_reset(hp, tregs))
1399 return -EAGAIN;
1400
1401 /* Reset the Happy Meal Big Mac transceiver and the receiver. */
1402 HMD("tx/rx reset\n");
1403 happy_meal_tx_reset(hp, bregs);
1404 happy_meal_rx_reset(hp, bregs);
1405
1406 /* Set jam size and inter-packet gaps to reasonable defaults. */
1407 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1408 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1409 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1410
1411 /* Load up the MAC address and random seed. */
1412
1413 /* The docs recommend to use the 10LSB of our MAC here. */
1414 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1415
1416 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1417 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1418 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1419
1420 if ((hp->dev->flags & IFF_ALLMULTI) ||
1421 (netdev_mc_count(hp->dev) > 64)) {
1422 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1423 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1424 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1425 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1426 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1427 u16 hash_table[4];
1428 struct netdev_hw_addr *ha;
1429 u32 crc;
1430
1431 memset(hash_table, 0, sizeof(hash_table));
1432 netdev_for_each_mc_addr(ha, hp->dev) {
1433 crc = ether_crc_le(6, ha->addr);
1434 crc >>= 26;
1435 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1436 }
1437 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1438 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1439 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1440 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1441 } else {
1442 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1443 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1444 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1445 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1446 }
1447
1448 /* Set the RX and TX ring ptrs. */
1449 HMD("ring ptrs rxr[%08x] txr[%08x]\n",
1450 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1451 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1452 hme_write32(hp, erxregs + ERX_RING,
1453 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1454 hme_write32(hp, etxregs + ETX_RING,
1455 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1456
1457 /* Parity issues in the ERX unit of some HME revisions can cause some
1458 * registers to not be written unless their parity is even. Detect such
1459 * lost writes and simply rewrite with a low bit set (which will be ignored
1460 * since the rxring needs to be 2K aligned).
1461 */
1462 if (hme_read32(hp, erxregs + ERX_RING) !=
1463 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1464 hme_write32(hp, erxregs + ERX_RING,
1465 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1466 | 0x4);
1467
1468 /* Set the supported burst sizes. */
1469#ifndef CONFIG_SPARC
1470 /* It is always PCI and can handle 64byte bursts. */
1471 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1472#else
1473 if ((hp->happy_bursts & DMA_BURST64) &&
1474 ((hp->happy_flags & HFLAG_PCI) != 0
1475#ifdef CONFIG_SBUS
1476 || sbus_can_burst64()
1477#endif
1478 || 0)) {
1479 u32 gcfg = GREG_CFG_BURST64;
1480
1481 /* I have no idea if I should set the extended
1482 * transfer mode bit for Cheerio, so for now I
1483 * do not. -DaveM
1484 */
1485#ifdef CONFIG_SBUS
1486 if ((hp->happy_flags & HFLAG_PCI) == 0) {
1487 struct platform_device *op = hp->happy_dev;
1488 if (sbus_can_dma_64bit()) {
1489 sbus_set_sbus64(&op->dev,
1490 hp->happy_bursts);
1491 gcfg |= GREG_CFG_64BIT;
1492 }
1493 }
1494#endif
1495
1496 bursts = "64";
1497 hme_write32(hp, gregs + GREG_CFG, gcfg);
1498 } else if (hp->happy_bursts & DMA_BURST32) {
1499 bursts = "32";
1500 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1501 } else if (hp->happy_bursts & DMA_BURST16) {
1502 bursts = "16";
1503 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1504 } else {
1505 bursts = "XXX";
1506 hme_write32(hp, gregs + GREG_CFG, 0);
1507 }
1508#endif /* CONFIG_SPARC */
1509
1510 HMD("old[%08x] bursts<%s>\n",
1511 hme_read32(hp, gregs + GREG_CFG), bursts);
1512
1513 /* Turn off interrupts we do not want to hear. */
1514 hme_write32(hp, gregs + GREG_IMASK,
1515 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1516 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1517
1518 /* Set the transmit ring buffer size. */
1519 HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
1520 hme_read32(hp, etxregs + ETX_RSIZE));
1521 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1522
1523 /* Enable transmitter DVMA. */
1524 HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
1525 hme_write32(hp, etxregs + ETX_CFG,
1526 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1527
1528 /* This chip really rots, for the receiver sometimes when you
1529 * write to its control registers not all the bits get there
1530 * properly. I cannot think of a sane way to provide complete
1531 * coverage for this hardware bug yet.
1532 */
1533 HMD("erx regs bug old[%08x]\n",
1534 hme_read32(hp, erxregs + ERX_CFG));
1535 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1536 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1537 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1538 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1539 netdev_err(hp->dev,
1540 "Eieee, rx config register gets greasy fries.\n");
1541 netdev_err(hp->dev,
1542 "Trying to set %08x, reread gives %08x\n",
1543 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1544 /* XXX Should return failure here... */
1545 }
1546
1547 /* Enable Big Mac hash table filter. */
1548 HMD("enable hash rx_cfg_old[%08x]\n",
1549 hme_read32(hp, bregs + BMAC_RXCFG));
1550 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1551 if (hp->dev->flags & IFF_PROMISC)
1552 rxcfg |= BIGMAC_RXCFG_PMISC;
1553 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1554
1555 /* Let the bits settle in the chip. */
1556 udelay(10);
1557
1558 /* Ok, configure the Big Mac transmitter. */
1559 HMD("BIGMAC init\n");
1560 regtmp = 0;
1561 if (hp->happy_flags & HFLAG_FULL)
1562 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1563
1564 /* Don't turn on the "don't give up" bit for now. It could cause hme
1565 * to deadlock with the PHY if a Jabber occurs.
1566 */
1567 hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
1568
1569 /* Give up after 16 TX attempts. */
1570 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1571
1572 /* Enable the output drivers no matter what. */
1573 regtmp = BIGMAC_XCFG_ODENABLE;
1574
1575 /* If card can do lance mode, enable it. */
1576 if (hp->happy_flags & HFLAG_LANCE)
1577 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1578
1579 /* Disable the MII buffers if using external transceiver. */
1580 if (hp->tcvr_type == external)
1581 regtmp |= BIGMAC_XCFG_MIIDISAB;
1582
1583 HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
1584 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1585
1586 /* Start things up. */
1587 HMD("tx old[%08x] and rx [%08x] ON!\n",
1588 hme_read32(hp, bregs + BMAC_TXCFG),
1589 hme_read32(hp, bregs + BMAC_RXCFG));
1590
1591 /* Set larger TX/RX size to allow for 802.1q */
1592 hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1593 hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1594
1595 hme_write32(hp, bregs + BMAC_TXCFG,
1596 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1597 hme_write32(hp, bregs + BMAC_RXCFG,
1598 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1599
1600 /* Get the autonegotiation started, and the watch timer ticking. */
1601 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1602
1603 /* Success. */
1604 return 0;
1605}
1606
1607/* hp->happy_lock must be held */
1608static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1609{
1610 void __iomem *tregs = hp->tcvregs;
1611 void __iomem *bregs = hp->bigmacregs;
1612 void __iomem *gregs = hp->gregs;
1613
1614 happy_meal_stop(hp, gregs);
1615 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1616 if (hp->happy_flags & HFLAG_FENABLE)
1617 hme_write32(hp, tregs + TCVR_CFG,
1618 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1619 else
1620 hme_write32(hp, tregs + TCVR_CFG,
1621 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1622 happy_meal_transceiver_check(hp, tregs);
1623 switch(hp->tcvr_type) {
1624 case none:
1625 return;
1626 case internal:
1627 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1628 break;
1629 case external:
1630 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1631 break;
1632 }
1633 if (happy_meal_tcvr_reset(hp, tregs))
1634 return;
1635
1636 /* Latch PHY registers as of now. */
1637 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1638 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1639
1640 /* Advertise everything we can support. */
1641 if (hp->sw_bmsr & BMSR_10HALF)
1642 hp->sw_advertise |= (ADVERTISE_10HALF);
1643 else
1644 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1645
1646 if (hp->sw_bmsr & BMSR_10FULL)
1647 hp->sw_advertise |= (ADVERTISE_10FULL);
1648 else
1649 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1650 if (hp->sw_bmsr & BMSR_100HALF)
1651 hp->sw_advertise |= (ADVERTISE_100HALF);
1652 else
1653 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1654 if (hp->sw_bmsr & BMSR_100FULL)
1655 hp->sw_advertise |= (ADVERTISE_100FULL);
1656 else
1657 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1658
1659 /* Update the PHY advertisement register. */
1660 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1661}
1662
1663/* Once status is latched (by happy_meal_interrupt) it is cleared by
1664 * the hardware, so we cannot re-read it and get a correct value.
1665 *
1666 * hp->happy_lock must be held
1667 */
1668static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1669{
1670 int reset = 0;
1671
1672 /* Only print messages for non-counter related interrupts. */
1673 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1674 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1675 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1676 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1677 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1678 GREG_STAT_SLVPERR))
1679 netdev_err(hp->dev,
1680 "Error interrupt for happy meal, status = %08x\n",
1681 status);
1682
1683 if (status & GREG_STAT_RFIFOVF) {
1684 /* Receive FIFO overflow is harmless and the hardware will take
1685 care of it, just some packets are lost. Who cares. */
1686 netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
1687 }
1688
1689 if (status & GREG_STAT_STSTERR) {
1690 /* BigMAC SQE link test failed. */
1691 netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
1692 reset = 1;
1693 }
1694
1695 if (status & GREG_STAT_TFIFO_UND) {
1696 /* Transmit FIFO underrun, again DMA error likely. */
1697 netdev_err(hp->dev,
1698 "Happy Meal transmitter FIFO underrun, DMA error.\n");
1699 reset = 1;
1700 }
1701
1702 if (status & GREG_STAT_MAXPKTERR) {
1703 /* Driver error, tried to transmit something larger
1704 * than ethernet max mtu.
1705 */
1706 netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
1707 reset = 1;
1708 }
1709
1710 if (status & GREG_STAT_NORXD) {
1711 /* This is harmless, it just means the system is
1712 * quite loaded and the incoming packet rate was
1713 * faster than the interrupt handler could keep up
1714 * with.
1715 */
1716 netdev_info(hp->dev,
1717 "Happy Meal out of receive descriptors, packet dropped.\n");
1718 }
1719
1720 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1721 /* All sorts of DMA receive errors. */
1722 netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
1723 status & GREG_STAT_RXERR ? "GenericError " : "",
1724 status & GREG_STAT_RXPERR ? "ParityError " : "",
1725 status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
1726 reset = 1;
1727 }
1728
1729 if (status & GREG_STAT_EOPERR) {
1730 /* Driver bug, didn't set EOP bit in tx descriptor given
1731 * to the happy meal.
1732 */
1733 netdev_err(hp->dev,
1734 "EOP not set in happy meal transmit descriptor!\n");
1735 reset = 1;
1736 }
1737
1738 if (status & GREG_STAT_MIFIRQ) {
1739 /* MIF signalled an interrupt, were we polling it? */
1740 netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
1741 }
1742
1743 if (status &
1744 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1745 /* All sorts of transmit DMA errors. */
1746 netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
1747 status & GREG_STAT_TXEACK ? "GenericError " : "",
1748 status & GREG_STAT_TXLERR ? "LateError " : "",
1749 status & GREG_STAT_TXPERR ? "ParityError " : "",
1750 status & GREG_STAT_TXTERR ? "TagBotch " : "");
1751 reset = 1;
1752 }
1753
1754 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1755 /* Bus or parity error when cpu accessed happy meal registers
1756 * or it's internal FIFO's. Should never see this.
1757 */
1758 netdev_err(hp->dev,
1759 "Happy Meal register access SBUS slave (%s) error.\n",
1760 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1761 reset = 1;
1762 }
1763
1764 if (reset) {
1765 netdev_notice(hp->dev, "Resetting...\n");
1766 happy_meal_init(hp);
1767 return 1;
1768 }
1769 return 0;
1770}
1771
1772/* hp->happy_lock must be held */
1773static void happy_meal_mif_interrupt(struct happy_meal *hp)
1774{
1775 void __iomem *tregs = hp->tcvregs;
1776
1777 netdev_info(hp->dev, "Link status change.\n");
1778 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1779 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1780
1781 /* Use the fastest transmission protocol possible. */
1782 if (hp->sw_lpa & LPA_100FULL) {
1783 netdev_info(hp->dev, "Switching to 100Mbps at full duplex.\n");
1784 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1785 } else if (hp->sw_lpa & LPA_100HALF) {
1786 netdev_info(hp->dev, "Switching to 100MBps at half duplex.\n");
1787 hp->sw_bmcr |= BMCR_SPEED100;
1788 } else if (hp->sw_lpa & LPA_10FULL) {
1789 netdev_info(hp->dev, "Switching to 10MBps at full duplex.\n");
1790 hp->sw_bmcr |= BMCR_FULLDPLX;
1791 } else {
1792 netdev_info(hp->dev, "Using 10Mbps at half duplex.\n");
1793 }
1794 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1795
1796 /* Finally stop polling and shut up the MIF. */
1797 happy_meal_poll_stop(hp, tregs);
1798}
1799
1800/* hp->happy_lock must be held */
1801static void happy_meal_tx(struct happy_meal *hp)
1802{
1803 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1804 struct happy_meal_txd *this;
1805 struct net_device *dev = hp->dev;
1806 int elem;
1807
1808 elem = hp->tx_old;
1809 while (elem != hp->tx_new) {
1810 struct sk_buff *skb;
1811 u32 flags, dma_addr, dma_len;
1812 int frag;
1813
1814 netdev_vdbg(hp->dev, "TX[%d]\n", elem);
1815 this = &txbase[elem];
1816 flags = hme_read_desc32(hp, &this->tx_flags);
1817 if (flags & TXFLAG_OWN)
1818 break;
1819 skb = hp->tx_skbs[elem];
1820 if (skb_shinfo(skb)->nr_frags) {
1821 int last;
1822
1823 last = elem + skb_shinfo(skb)->nr_frags;
1824 last &= (TX_RING_SIZE - 1);
1825 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1826 if (flags & TXFLAG_OWN)
1827 break;
1828 }
1829 hp->tx_skbs[elem] = NULL;
1830 dev->stats.tx_bytes += skb->len;
1831
1832 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1833 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1834 dma_len = hme_read_desc32(hp, &this->tx_flags);
1835
1836 dma_len &= TXFLAG_SIZE;
1837 if (!frag)
1838 dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1839 else
1840 dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1841
1842 elem = NEXT_TX(elem);
1843 this = &txbase[elem];
1844 }
1845
1846 dev_consume_skb_irq(skb);
1847 dev->stats.tx_packets++;
1848 }
1849 hp->tx_old = elem;
1850
1851 if (netif_queue_stopped(dev) &&
1852 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1853 netif_wake_queue(dev);
1854}
1855
1856/* Originally I used to handle the allocation failure by just giving back just
1857 * that one ring buffer to the happy meal. Problem is that usually when that
1858 * condition is triggered, the happy meal expects you to do something reasonable
1859 * with all of the packets it has DMA'd in. So now I just drop the entire
1860 * ring when we cannot get a new skb and give them all back to the happy meal,
1861 * maybe things will be "happier" now.
1862 *
1863 * hp->happy_lock must be held
1864 */
1865static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1866{
1867 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1868 struct happy_meal_rxd *this;
1869 int elem = hp->rx_new, drops = 0;
1870 u32 flags;
1871
1872 this = &rxbase[elem];
1873 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
1874 struct sk_buff *skb;
1875 int len = flags >> 16;
1876 u16 csum = flags & RXFLAG_CSUM;
1877 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
1878
1879 /* Check for errors. */
1880 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
1881 netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
1882 dev->stats.rx_errors++;
1883 if (len < ETH_ZLEN)
1884 dev->stats.rx_length_errors++;
1885 if (len & (RXFLAG_OVERFLOW >> 16)) {
1886 dev->stats.rx_over_errors++;
1887 dev->stats.rx_fifo_errors++;
1888 }
1889
1890 /* Return it to the Happy meal. */
1891 drop_it:
1892 dev->stats.rx_dropped++;
1893 hme_write_rxd(hp, this,
1894 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1895 dma_addr);
1896 goto next;
1897 }
1898 skb = hp->rx_skbs[elem];
1899 if (len > RX_COPY_THRESHOLD) {
1900 struct sk_buff *new_skb;
1901 u32 mapping;
1902
1903 /* Now refill the entry, if we can. */
1904 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1905 if (new_skb == NULL) {
1906 drops++;
1907 goto drop_it;
1908 }
1909 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1910 mapping = dma_map_single(hp->dma_dev, new_skb->data,
1911 RX_BUF_ALLOC_SIZE,
1912 DMA_FROM_DEVICE);
1913 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
1914 dev_kfree_skb_any(new_skb);
1915 drops++;
1916 goto drop_it;
1917 }
1918
1919 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1920 hp->rx_skbs[elem] = new_skb;
1921 hme_write_rxd(hp, this,
1922 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1923 mapping);
1924 skb_reserve(new_skb, RX_OFFSET);
1925
1926 /* Trim the original skb for the netif. */
1927 skb_trim(skb, len);
1928 } else {
1929 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
1930
1931 if (copy_skb == NULL) {
1932 drops++;
1933 goto drop_it;
1934 }
1935
1936 skb_reserve(copy_skb, 2);
1937 skb_put(copy_skb, len);
1938 dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1939 skb_copy_from_linear_data(skb, copy_skb->data, len);
1940 dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1941 /* Reuse original ring buffer. */
1942 hme_write_rxd(hp, this,
1943 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1944 dma_addr);
1945
1946 skb = copy_skb;
1947 }
1948
1949 /* This card is _fucking_ hot... */
1950 skb->csum = csum_unfold(~(__force __sum16)htons(csum));
1951 skb->ip_summed = CHECKSUM_COMPLETE;
1952
1953 netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
1954 skb->protocol = eth_type_trans(skb, dev);
1955 netif_rx(skb);
1956
1957 dev->stats.rx_packets++;
1958 dev->stats.rx_bytes += len;
1959 next:
1960 elem = NEXT_RX(elem);
1961 this = &rxbase[elem];
1962 }
1963 hp->rx_new = elem;
1964 if (drops)
1965 netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
1966}
1967
1968static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
1969{
1970 struct net_device *dev = dev_id;
1971 struct happy_meal *hp = netdev_priv(dev);
1972 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
1973
1974 HMD("status=%08x\n", happy_status);
1975
1976 spin_lock(&hp->happy_lock);
1977
1978 if (happy_status & GREG_STAT_ERRORS) {
1979 if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
1980 goto out;
1981 }
1982
1983 if (happy_status & GREG_STAT_MIFIRQ)
1984 happy_meal_mif_interrupt(hp);
1985
1986 if (happy_status & GREG_STAT_TXALL)
1987 happy_meal_tx(hp);
1988
1989 if (happy_status & GREG_STAT_RXTOHOST)
1990 happy_meal_rx(hp, dev);
1991
1992 HMD("done\n");
1993out:
1994 spin_unlock(&hp->happy_lock);
1995
1996 return IRQ_HANDLED;
1997}
1998
1999#ifdef CONFIG_SBUS
2000static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2001{
2002 struct quattro *qp = (struct quattro *) cookie;
2003 int i;
2004
2005 for (i = 0; i < 4; i++) {
2006 struct net_device *dev = qp->happy_meals[i];
2007 struct happy_meal *hp = netdev_priv(dev);
2008 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2009
2010 HMD("status=%08x\n", happy_status);
2011
2012 if (!(happy_status & (GREG_STAT_ERRORS |
2013 GREG_STAT_MIFIRQ |
2014 GREG_STAT_TXALL |
2015 GREG_STAT_RXTOHOST)))
2016 continue;
2017
2018 spin_lock(&hp->happy_lock);
2019
2020 if (happy_status & GREG_STAT_ERRORS)
2021 if (happy_meal_is_not_so_happy(hp, happy_status))
2022 goto next;
2023
2024 if (happy_status & GREG_STAT_MIFIRQ)
2025 happy_meal_mif_interrupt(hp);
2026
2027 if (happy_status & GREG_STAT_TXALL)
2028 happy_meal_tx(hp);
2029
2030 if (happy_status & GREG_STAT_RXTOHOST)
2031 happy_meal_rx(hp, dev);
2032
2033 next:
2034 spin_unlock(&hp->happy_lock);
2035 }
2036 HMD("done\n");
2037
2038 return IRQ_HANDLED;
2039}
2040#endif
2041
2042static int happy_meal_open(struct net_device *dev)
2043{
2044 struct happy_meal *hp = netdev_priv(dev);
2045 int res;
2046
2047 /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
2048 * into a single source which we register handling at probe time.
2049 */
2050 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2051 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2052 dev->name, dev);
2053 if (res) {
2054 HMD("EAGAIN\n");
2055 netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
2056
2057 return -EAGAIN;
2058 }
2059 }
2060
2061 HMD("to happy_meal_init\n");
2062
2063 spin_lock_irq(&hp->happy_lock);
2064 res = happy_meal_init(hp);
2065 spin_unlock_irq(&hp->happy_lock);
2066
2067 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2068 free_irq(hp->irq, dev);
2069 return res;
2070}
2071
2072static int happy_meal_close(struct net_device *dev)
2073{
2074 struct happy_meal *hp = netdev_priv(dev);
2075
2076 spin_lock_irq(&hp->happy_lock);
2077 happy_meal_stop(hp, hp->gregs);
2078 happy_meal_clean_rings(hp);
2079
2080 /* If auto-negotiation timer is running, kill it. */
2081 del_timer(&hp->happy_timer);
2082
2083 spin_unlock_irq(&hp->happy_lock);
2084
2085 /* On Quattro QFE cards, all hme interrupts are concentrated
2086 * into a single source which we register handling at probe
2087 * time and never unregister.
2088 */
2089 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2090 free_irq(hp->irq, dev);
2091
2092 return 0;
2093}
2094
2095static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
2096{
2097 struct happy_meal *hp = netdev_priv(dev);
2098
2099 netdev_err(dev, "transmit timed out, resetting\n");
2100 tx_dump_log();
2101 netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
2102 hme_read32(hp, hp->gregs + GREG_STAT),
2103 hme_read32(hp, hp->etxregs + ETX_CFG),
2104 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2105
2106 spin_lock_irq(&hp->happy_lock);
2107 happy_meal_init(hp);
2108 spin_unlock_irq(&hp->happy_lock);
2109
2110 netif_wake_queue(dev);
2111}
2112
2113static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2114 u32 first_len, u32 first_entry, u32 entry)
2115{
2116 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2117
2118 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2119
2120 first_entry = NEXT_TX(first_entry);
2121 while (first_entry != entry) {
2122 struct happy_meal_txd *this = &txbase[first_entry];
2123 u32 addr, len;
2124
2125 addr = hme_read_desc32(hp, &this->tx_addr);
2126 len = hme_read_desc32(hp, &this->tx_flags);
2127 len &= TXFLAG_SIZE;
2128 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2129 }
2130}
2131
2132static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2133 struct net_device *dev)
2134{
2135 struct happy_meal *hp = netdev_priv(dev);
2136 int entry;
2137 u32 tx_flags;
2138
2139 tx_flags = TXFLAG_OWN;
2140 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2141 const u32 csum_start_off = skb_checksum_start_offset(skb);
2142 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2143
2144 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2145 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2146 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2147 }
2148
2149 spin_lock_irq(&hp->happy_lock);
2150
2151 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2152 netif_stop_queue(dev);
2153 spin_unlock_irq(&hp->happy_lock);
2154 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2155 return NETDEV_TX_BUSY;
2156 }
2157
2158 entry = hp->tx_new;
2159 netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
2160 hp->tx_skbs[entry] = skb;
2161
2162 if (skb_shinfo(skb)->nr_frags == 0) {
2163 u32 mapping, len;
2164
2165 len = skb->len;
2166 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2167 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2168 goto out_dma_error;
2169 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2170 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2171 (tx_flags | (len & TXFLAG_SIZE)),
2172 mapping);
2173 entry = NEXT_TX(entry);
2174 } else {
2175 u32 first_len, first_mapping;
2176 int frag, first_entry = entry;
2177
2178 /* We must give this initial chunk to the device last.
2179 * Otherwise we could race with the device.
2180 */
2181 first_len = skb_headlen(skb);
2182 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2183 DMA_TO_DEVICE);
2184 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2185 goto out_dma_error;
2186 entry = NEXT_TX(entry);
2187
2188 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2189 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2190 u32 len, mapping, this_txflags;
2191
2192 len = skb_frag_size(this_frag);
2193 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2194 0, len, DMA_TO_DEVICE);
2195 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2196 unmap_partial_tx_skb(hp, first_mapping, first_len,
2197 first_entry, entry);
2198 goto out_dma_error;
2199 }
2200 this_txflags = tx_flags;
2201 if (frag == skb_shinfo(skb)->nr_frags - 1)
2202 this_txflags |= TXFLAG_EOP;
2203 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2204 (this_txflags | (len & TXFLAG_SIZE)),
2205 mapping);
2206 entry = NEXT_TX(entry);
2207 }
2208 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2209 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2210 first_mapping);
2211 }
2212
2213 hp->tx_new = entry;
2214
2215 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2216 netif_stop_queue(dev);
2217
2218 /* Get it going. */
2219 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2220
2221 spin_unlock_irq(&hp->happy_lock);
2222
2223 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2224 return NETDEV_TX_OK;
2225
2226out_dma_error:
2227 hp->tx_skbs[hp->tx_new] = NULL;
2228 spin_unlock_irq(&hp->happy_lock);
2229
2230 dev_kfree_skb_any(skb);
2231 dev->stats.tx_dropped++;
2232 return NETDEV_TX_OK;
2233}
2234
2235static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2236{
2237 struct happy_meal *hp = netdev_priv(dev);
2238
2239 spin_lock_irq(&hp->happy_lock);
2240 happy_meal_get_counters(hp, hp->bigmacregs);
2241 spin_unlock_irq(&hp->happy_lock);
2242
2243 return &dev->stats;
2244}
2245
2246static void happy_meal_set_multicast(struct net_device *dev)
2247{
2248 struct happy_meal *hp = netdev_priv(dev);
2249 void __iomem *bregs = hp->bigmacregs;
2250 struct netdev_hw_addr *ha;
2251 u32 crc;
2252
2253 spin_lock_irq(&hp->happy_lock);
2254
2255 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2256 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2257 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2258 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2259 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2260 } else if (dev->flags & IFF_PROMISC) {
2261 hme_write32(hp, bregs + BMAC_RXCFG,
2262 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2263 } else {
2264 u16 hash_table[4];
2265
2266 memset(hash_table, 0, sizeof(hash_table));
2267 netdev_for_each_mc_addr(ha, dev) {
2268 crc = ether_crc_le(6, ha->addr);
2269 crc >>= 26;
2270 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2271 }
2272 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2273 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2274 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2275 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2276 }
2277
2278 spin_unlock_irq(&hp->happy_lock);
2279}
2280
2281/* Ethtool support... */
2282static int hme_get_link_ksettings(struct net_device *dev,
2283 struct ethtool_link_ksettings *cmd)
2284{
2285 struct happy_meal *hp = netdev_priv(dev);
2286 u32 speed;
2287 u32 supported;
2288
2289 supported =
2290 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2291 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2292 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2293
2294 /* XXX hardcoded stuff for now */
2295 cmd->base.port = PORT_TP; /* XXX no MII support */
2296 cmd->base.phy_address = 0; /* XXX fixed PHYAD */
2297
2298 /* Record PHY settings. */
2299 spin_lock_irq(&hp->happy_lock);
2300 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2301 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2302 spin_unlock_irq(&hp->happy_lock);
2303
2304 if (hp->sw_bmcr & BMCR_ANENABLE) {
2305 cmd->base.autoneg = AUTONEG_ENABLE;
2306 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2307 SPEED_100 : SPEED_10);
2308 if (speed == SPEED_100)
2309 cmd->base.duplex =
2310 (hp->sw_lpa & (LPA_100FULL)) ?
2311 DUPLEX_FULL : DUPLEX_HALF;
2312 else
2313 cmd->base.duplex =
2314 (hp->sw_lpa & (LPA_10FULL)) ?
2315 DUPLEX_FULL : DUPLEX_HALF;
2316 } else {
2317 cmd->base.autoneg = AUTONEG_DISABLE;
2318 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2319 cmd->base.duplex =
2320 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2321 DUPLEX_FULL : DUPLEX_HALF;
2322 }
2323 cmd->base.speed = speed;
2324 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2325 supported);
2326
2327 return 0;
2328}
2329
2330static int hme_set_link_ksettings(struct net_device *dev,
2331 const struct ethtool_link_ksettings *cmd)
2332{
2333 struct happy_meal *hp = netdev_priv(dev);
2334
2335 /* Verify the settings we care about. */
2336 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2337 cmd->base.autoneg != AUTONEG_DISABLE)
2338 return -EINVAL;
2339 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2340 ((cmd->base.speed != SPEED_100 &&
2341 cmd->base.speed != SPEED_10) ||
2342 (cmd->base.duplex != DUPLEX_HALF &&
2343 cmd->base.duplex != DUPLEX_FULL)))
2344 return -EINVAL;
2345
2346 /* Ok, do it to it. */
2347 spin_lock_irq(&hp->happy_lock);
2348 del_timer(&hp->happy_timer);
2349 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2350 spin_unlock_irq(&hp->happy_lock);
2351
2352 return 0;
2353}
2354
2355static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2356{
2357 struct happy_meal *hp = netdev_priv(dev);
2358
2359 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2360 if (hp->happy_flags & HFLAG_PCI) {
2361 struct pci_dev *pdev = hp->happy_dev;
2362 strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2363 }
2364#ifdef CONFIG_SBUS
2365 else {
2366 const struct linux_prom_registers *regs;
2367 struct platform_device *op = hp->happy_dev;
2368 regs = of_get_property(op->dev.of_node, "regs", NULL);
2369 if (regs)
2370 snprintf(info->bus_info, sizeof(info->bus_info),
2371 "SBUS:%d",
2372 regs->which_io);
2373 }
2374#endif
2375}
2376
2377static u32 hme_get_link(struct net_device *dev)
2378{
2379 struct happy_meal *hp = netdev_priv(dev);
2380
2381 spin_lock_irq(&hp->happy_lock);
2382 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 return hp->sw_bmsr & BMSR_LSTATUS;
2386}
2387
2388static const struct ethtool_ops hme_ethtool_ops = {
2389 .get_drvinfo = hme_get_drvinfo,
2390 .get_link = hme_get_link,
2391 .get_link_ksettings = hme_get_link_ksettings,
2392 .set_link_ksettings = hme_set_link_ksettings,
2393};
2394
2395#ifdef CONFIG_SBUS
2396/* Given a happy meal sbus device, find it's quattro parent.
2397 * If none exist, allocate and return a new one.
2398 *
2399 * Return NULL on failure.
2400 */
2401static struct quattro *quattro_sbus_find(struct platform_device *child)
2402{
2403 struct device *parent = child->dev.parent;
2404 struct platform_device *op;
2405 struct quattro *qp;
2406
2407 op = to_platform_device(parent);
2408 qp = platform_get_drvdata(op);
2409 if (qp)
2410 return qp;
2411
2412 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2413 if (!qp)
2414 return NULL;
2415
2416 qp->quattro_dev = child;
2417 qp->next = qfe_sbus_list;
2418 qfe_sbus_list = qp;
2419
2420 platform_set_drvdata(op, qp);
2421 return qp;
2422}
2423
2424/* After all quattro cards have been probed, we call these functions
2425 * to register the IRQ handlers for the cards that have been
2426 * successfully probed and skip the cards that failed to initialize
2427 */
2428static int __init quattro_sbus_register_irqs(void)
2429{
2430 struct quattro *qp;
2431
2432 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2433 struct platform_device *op = qp->quattro_dev;
2434 int err, qfe_slot, skip = 0;
2435
2436 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2437 if (!qp->happy_meals[qfe_slot])
2438 skip = 1;
2439 }
2440 if (skip)
2441 continue;
2442
2443 err = request_irq(op->archdata.irqs[0],
2444 quattro_sbus_interrupt,
2445 IRQF_SHARED, "Quattro",
2446 qp);
2447 if (err != 0) {
2448 dev_err(&op->dev,
2449 "Quattro HME: IRQ registration error %d.\n",
2450 err);
2451 return err;
2452 }
2453 }
2454
2455 return 0;
2456}
2457
2458static void quattro_sbus_free_irqs(void)
2459{
2460 struct quattro *qp;
2461
2462 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2463 struct platform_device *op = qp->quattro_dev;
2464 int qfe_slot, skip = 0;
2465
2466 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
2467 if (!qp->happy_meals[qfe_slot])
2468 skip = 1;
2469 }
2470 if (skip)
2471 continue;
2472
2473 free_irq(op->archdata.irqs[0], qp);
2474 }
2475}
2476#endif /* CONFIG_SBUS */
2477
2478#ifdef CONFIG_PCI
2479static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2480{
2481 int i;
2482 struct pci_dev *bdev = pdev->bus->self;
2483 struct quattro *qp;
2484
2485 if (!bdev)
2486 return ERR_PTR(-ENODEV);
2487
2488 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2489 struct pci_dev *qpdev = qp->quattro_dev;
2490
2491 if (qpdev == bdev)
2492 return qp;
2493 }
2494
2495 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2496 if (!qp)
2497 return ERR_PTR(-ENOMEM);
2498
2499 for (i = 0; i < 4; i++)
2500 qp->happy_meals[i] = NULL;
2501
2502 qp->quattro_dev = bdev;
2503 qp->next = qfe_pci_list;
2504 qfe_pci_list = qp;
2505
2506 /* No range tricks necessary on PCI. */
2507 qp->nranges = 0;
2508 return qp;
2509}
2510#endif /* CONFIG_PCI */
2511
2512static const struct net_device_ops hme_netdev_ops = {
2513 .ndo_open = happy_meal_open,
2514 .ndo_stop = happy_meal_close,
2515 .ndo_start_xmit = happy_meal_start_xmit,
2516 .ndo_tx_timeout = happy_meal_tx_timeout,
2517 .ndo_get_stats = happy_meal_get_stats,
2518 .ndo_set_rx_mode = happy_meal_set_multicast,
2519 .ndo_set_mac_address = eth_mac_addr,
2520 .ndo_validate_addr = eth_validate_addr,
2521};
2522
2523#ifdef CONFIG_SBUS
2524static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2525{
2526 struct device_node *dp = op->dev.of_node, *sbus_dp;
2527 struct quattro *qp = NULL;
2528 struct happy_meal *hp;
2529 struct net_device *dev;
2530 int i, qfe_slot = -1;
2531 u8 addr[ETH_ALEN];
2532 int err = -ENODEV;
2533
2534 sbus_dp = op->dev.parent->of_node;
2535
2536 /* We can match PCI devices too, do not accept those here. */
2537 if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
2538 return err;
2539
2540 if (is_qfe) {
2541 qp = quattro_sbus_find(op);
2542 if (qp == NULL)
2543 goto err_out;
2544 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2545 if (qp->happy_meals[qfe_slot] == NULL)
2546 break;
2547 if (qfe_slot == 4)
2548 goto err_out;
2549 }
2550
2551 err = -ENOMEM;
2552 dev = alloc_etherdev(sizeof(struct happy_meal));
2553 if (!dev)
2554 goto err_out;
2555 SET_NETDEV_DEV(dev, &op->dev);
2556
2557 /* If user did not specify a MAC address specifically, use
2558 * the Quattro local-mac-address property...
2559 */
2560 for (i = 0; i < 6; i++) {
2561 if (macaddr[i] != 0)
2562 break;
2563 }
2564 if (i < 6) { /* a mac address was given */
2565 for (i = 0; i < 6; i++)
2566 addr[i] = macaddr[i];
2567 eth_hw_addr_set(dev, addr);
2568 macaddr[5]++;
2569 } else {
2570 const unsigned char *addr;
2571 int len;
2572
2573 addr = of_get_property(dp, "local-mac-address", &len);
2574
2575 if (qfe_slot != -1 && addr && len == ETH_ALEN)
2576 eth_hw_addr_set(dev, addr);
2577 else
2578 eth_hw_addr_set(dev, idprom->id_ethaddr);
2579 }
2580
2581 hp = netdev_priv(dev);
2582
2583 hp->happy_dev = op;
2584 hp->dma_dev = &op->dev;
2585
2586 spin_lock_init(&hp->happy_lock);
2587
2588 err = -ENODEV;
2589 if (qp != NULL) {
2590 hp->qfe_parent = qp;
2591 hp->qfe_ent = qfe_slot;
2592 qp->happy_meals[qfe_slot] = dev;
2593 }
2594
2595 hp->gregs = of_ioremap(&op->resource[0], 0,
2596 GREG_REG_SIZE, "HME Global Regs");
2597 if (!hp->gregs) {
2598 dev_err(&op->dev, "Cannot map global registers.\n");
2599 goto err_out_free_netdev;
2600 }
2601
2602 hp->etxregs = of_ioremap(&op->resource[1], 0,
2603 ETX_REG_SIZE, "HME TX Regs");
2604 if (!hp->etxregs) {
2605 dev_err(&op->dev, "Cannot map MAC TX registers.\n");
2606 goto err_out_iounmap;
2607 }
2608
2609 hp->erxregs = of_ioremap(&op->resource[2], 0,
2610 ERX_REG_SIZE, "HME RX Regs");
2611 if (!hp->erxregs) {
2612 dev_err(&op->dev, "Cannot map MAC RX registers.\n");
2613 goto err_out_iounmap;
2614 }
2615
2616 hp->bigmacregs = of_ioremap(&op->resource[3], 0,
2617 BMAC_REG_SIZE, "HME BIGMAC Regs");
2618 if (!hp->bigmacregs) {
2619 dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
2620 goto err_out_iounmap;
2621 }
2622
2623 hp->tcvregs = of_ioremap(&op->resource[4], 0,
2624 TCVR_REG_SIZE, "HME Tranceiver Regs");
2625 if (!hp->tcvregs) {
2626 dev_err(&op->dev, "Cannot map TCVR registers.\n");
2627 goto err_out_iounmap;
2628 }
2629
2630 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2631 if (hp->hm_revision == 0xff)
2632 hp->hm_revision = 0xa0;
2633
2634 /* Now enable the feature flags we can. */
2635 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2636 hp->happy_flags = HFLAG_20_21;
2637 else if (hp->hm_revision != 0xa0)
2638 hp->happy_flags = HFLAG_NOT_A0;
2639
2640 if (qp != NULL)
2641 hp->happy_flags |= HFLAG_QUATTRO;
2642
2643 /* Get the supported DVMA burst sizes from our Happy SBUS. */
2644 hp->happy_bursts = of_getintprop_default(sbus_dp,
2645 "burst-sizes", 0x00);
2646
2647 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2648 PAGE_SIZE,
2649 &hp->hblock_dvma,
2650 GFP_ATOMIC);
2651 err = -ENOMEM;
2652 if (!hp->happy_block)
2653 goto err_out_iounmap;
2654
2655 /* Force check of the link first time we are brought up. */
2656 hp->linkcheck = 0;
2657
2658 /* Force timer state to 'asleep' with count of zero. */
2659 hp->timer_state = asleep;
2660 hp->timer_ticks = 0;
2661
2662 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2663
2664 hp->dev = dev;
2665 dev->netdev_ops = &hme_netdev_ops;
2666 dev->watchdog_timeo = 5*HZ;
2667 dev->ethtool_ops = &hme_ethtool_ops;
2668
2669 /* Happy Meal can do it all... */
2670 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2671 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2672
2673 hp->irq = op->archdata.irqs[0];
2674
2675#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2676 /* Hook up SBUS register/descriptor accessors. */
2677 hp->read_desc32 = sbus_hme_read_desc32;
2678 hp->write_txd = sbus_hme_write_txd;
2679 hp->write_rxd = sbus_hme_write_rxd;
2680 hp->read32 = sbus_hme_read32;
2681 hp->write32 = sbus_hme_write32;
2682#endif
2683
2684 /* Grrr, Happy Meal comes up by default not advertising
2685 * full duplex 100baseT capabilities, fix this.
2686 */
2687 spin_lock_irq(&hp->happy_lock);
2688 happy_meal_set_initial_advertisement(hp);
2689 spin_unlock_irq(&hp->happy_lock);
2690
2691 err = register_netdev(hp->dev);
2692 if (err) {
2693 dev_err(&op->dev, "Cannot register net device, aborting.\n");
2694 goto err_out_free_coherent;
2695 }
2696
2697 platform_set_drvdata(op, hp);
2698
2699 if (qfe_slot != -1)
2700 netdev_info(dev,
2701 "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
2702 qfe_slot, dev->dev_addr);
2703 else
2704 netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
2705 dev->dev_addr);
2706
2707 return 0;
2708
2709err_out_free_coherent:
2710 dma_free_coherent(hp->dma_dev,
2711 PAGE_SIZE,
2712 hp->happy_block,
2713 hp->hblock_dvma);
2714
2715err_out_iounmap:
2716 if (hp->gregs)
2717 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
2718 if (hp->etxregs)
2719 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
2720 if (hp->erxregs)
2721 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
2722 if (hp->bigmacregs)
2723 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
2724 if (hp->tcvregs)
2725 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
2726
2727 if (qp)
2728 qp->happy_meals[qfe_slot] = NULL;
2729
2730err_out_free_netdev:
2731 free_netdev(dev);
2732
2733err_out:
2734 return err;
2735}
2736#endif
2737
2738#ifdef CONFIG_PCI
2739#ifndef CONFIG_SPARC
2740static int is_quattro_p(struct pci_dev *pdev)
2741{
2742 struct pci_dev *busdev = pdev->bus->self;
2743 struct pci_dev *this_pdev;
2744 int n_hmes;
2745
2746 if (busdev == NULL ||
2747 busdev->vendor != PCI_VENDOR_ID_DEC ||
2748 busdev->device != PCI_DEVICE_ID_DEC_21153)
2749 return 0;
2750
2751 n_hmes = 0;
2752 list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2753 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2754 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2755 n_hmes++;
2756 }
2757
2758 if (n_hmes != 4)
2759 return 0;
2760
2761 return 1;
2762}
2763
2764/* Fetch MAC address from vital product data of PCI ROM. */
2765static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2766{
2767 int this_offset;
2768
2769 for (this_offset = 0x20; this_offset < len; this_offset++) {
2770 void __iomem *p = rom_base + this_offset;
2771
2772 if (readb(p + 0) != 0x90 ||
2773 readb(p + 1) != 0x00 ||
2774 readb(p + 2) != 0x09 ||
2775 readb(p + 3) != 0x4e ||
2776 readb(p + 4) != 0x41 ||
2777 readb(p + 5) != 0x06)
2778 continue;
2779
2780 this_offset += 6;
2781 p += 6;
2782
2783 if (index == 0) {
2784 int i;
2785
2786 for (i = 0; i < 6; i++)
2787 dev_addr[i] = readb(p + i);
2788 return 1;
2789 }
2790 index--;
2791 }
2792 return 0;
2793}
2794
2795static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2796{
2797 size_t size;
2798 void __iomem *p = pci_map_rom(pdev, &size);
2799
2800 if (p) {
2801 int index = 0;
2802 int found;
2803
2804 if (is_quattro_p(pdev))
2805 index = PCI_SLOT(pdev->devfn);
2806
2807 found = readb(p) == 0x55 &&
2808 readb(p + 1) == 0xaa &&
2809 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2810 pci_unmap_rom(pdev, p);
2811 if (found)
2812 return;
2813 }
2814
2815 /* Sun MAC prefix then 3 random bytes. */
2816 dev_addr[0] = 0x08;
2817 dev_addr[1] = 0x00;
2818 dev_addr[2] = 0x20;
2819 get_random_bytes(&dev_addr[3], 3);
2820}
2821#endif /* !(CONFIG_SPARC) */
2822
2823static int happy_meal_pci_probe(struct pci_dev *pdev,
2824 const struct pci_device_id *ent)
2825{
2826 struct quattro *qp = NULL;
2827#ifdef CONFIG_SPARC
2828 struct device_node *dp;
2829#endif
2830 struct happy_meal *hp;
2831 struct net_device *dev;
2832 void __iomem *hpreg_base;
2833 struct resource *hpreg_res;
2834 int i, qfe_slot = -1;
2835 char prom_name[64];
2836 u8 addr[ETH_ALEN];
2837 int err;
2838
2839 /* Now make sure pci_dev cookie is there. */
2840#ifdef CONFIG_SPARC
2841 dp = pci_device_to_OF_node(pdev);
2842 snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
2843#else
2844 if (is_quattro_p(pdev))
2845 strcpy(prom_name, "SUNW,qfe");
2846 else
2847 strcpy(prom_name, "SUNW,hme");
2848#endif
2849
2850 err = pcim_enable_device(pdev);
2851 if (err)
2852 goto err_out;
2853 pci_set_master(pdev);
2854
2855 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
2856 qp = quattro_pci_find(pdev);
2857 if (IS_ERR(qp)) {
2858 err = PTR_ERR(qp);
2859 goto err_out;
2860 }
2861
2862 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2863 if (!qp->happy_meals[qfe_slot])
2864 break;
2865
2866 if (qfe_slot == 4)
2867 goto err_out;
2868 }
2869
2870 dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
2871 if (!dev) {
2872 err = -ENOMEM;
2873 goto err_out;
2874 }
2875 SET_NETDEV_DEV(dev, &pdev->dev);
2876
2877 hp = netdev_priv(dev);
2878
2879 hp->happy_dev = pdev;
2880 hp->dma_dev = &pdev->dev;
2881
2882 spin_lock_init(&hp->happy_lock);
2883
2884 if (qp != NULL) {
2885 hp->qfe_parent = qp;
2886 hp->qfe_ent = qfe_slot;
2887 qp->happy_meals[qfe_slot] = dev;
2888 }
2889
2890 err = -EINVAL;
2891 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2892 dev_err(&pdev->dev,
2893 "Cannot find proper PCI device base address.\n");
2894 goto err_out_clear_quattro;
2895 }
2896
2897 hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
2898 pci_resource_len(pdev, 0), DRV_NAME);
2899 if (!hpreg_res) {
2900 err = -EBUSY;
2901 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
2902 goto err_out_clear_quattro;
2903 }
2904
2905 hpreg_base = pcim_iomap(pdev, 0, 0x8000);
2906 if (!hpreg_base) {
2907 err = -ENOMEM;
2908 dev_err(&pdev->dev, "Unable to remap card memory.\n");
2909 goto err_out_clear_quattro;
2910 }
2911
2912 for (i = 0; i < 6; i++) {
2913 if (macaddr[i] != 0)
2914 break;
2915 }
2916 if (i < 6) { /* a mac address was given */
2917 for (i = 0; i < 6; i++)
2918 addr[i] = macaddr[i];
2919 eth_hw_addr_set(dev, addr);
2920 macaddr[5]++;
2921 } else {
2922#ifdef CONFIG_SPARC
2923 const unsigned char *addr;
2924 int len;
2925
2926 if (qfe_slot != -1 &&
2927 (addr = of_get_property(dp, "local-mac-address", &len))
2928 != NULL &&
2929 len == 6) {
2930 eth_hw_addr_set(dev, addr);
2931 } else {
2932 eth_hw_addr_set(dev, idprom->id_ethaddr);
2933 }
2934#else
2935 u8 addr[ETH_ALEN];
2936
2937 get_hme_mac_nonsparc(pdev, addr);
2938 eth_hw_addr_set(dev, addr);
2939#endif
2940 }
2941
2942 /* Layout registers. */
2943 hp->gregs = (hpreg_base + 0x0000UL);
2944 hp->etxregs = (hpreg_base + 0x2000UL);
2945 hp->erxregs = (hpreg_base + 0x4000UL);
2946 hp->bigmacregs = (hpreg_base + 0x6000UL);
2947 hp->tcvregs = (hpreg_base + 0x7000UL);
2948
2949#ifdef CONFIG_SPARC
2950 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
2951 if (hp->hm_revision == 0xff)
2952 hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
2953#else
2954 /* works with this on non-sparc hosts */
2955 hp->hm_revision = 0x20;
2956#endif
2957
2958 /* Now enable the feature flags we can. */
2959 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2960 hp->happy_flags = HFLAG_20_21;
2961 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
2962 hp->happy_flags = HFLAG_NOT_A0;
2963
2964 if (qp != NULL)
2965 hp->happy_flags |= HFLAG_QUATTRO;
2966
2967 /* And of course, indicate this is PCI. */
2968 hp->happy_flags |= HFLAG_PCI;
2969
2970#ifdef CONFIG_SPARC
2971 /* Assume PCI happy meals can handle all burst sizes. */
2972 hp->happy_bursts = DMA_BURSTBITS;
2973#endif
2974
2975 hp->happy_block = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
2976 &hp->hblock_dvma, GFP_KERNEL);
2977 if (!hp->happy_block) {
2978 err = -ENOMEM;
2979 goto err_out_clear_quattro;
2980 }
2981
2982 hp->linkcheck = 0;
2983 hp->timer_state = asleep;
2984 hp->timer_ticks = 0;
2985
2986 timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2987
2988 hp->irq = pdev->irq;
2989 hp->dev = dev;
2990 dev->netdev_ops = &hme_netdev_ops;
2991 dev->watchdog_timeo = 5*HZ;
2992 dev->ethtool_ops = &hme_ethtool_ops;
2993
2994 /* Happy Meal can do it all... */
2995 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2996 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2997
2998#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2999 /* Hook up PCI register/descriptor accessors. */
3000 hp->read_desc32 = pci_hme_read_desc32;
3001 hp->write_txd = pci_hme_write_txd;
3002 hp->write_rxd = pci_hme_write_rxd;
3003 hp->read32 = pci_hme_read32;
3004 hp->write32 = pci_hme_write32;
3005#endif
3006
3007 /* Grrr, Happy Meal comes up by default not advertising
3008 * full duplex 100baseT capabilities, fix this.
3009 */
3010 spin_lock_irq(&hp->happy_lock);
3011 happy_meal_set_initial_advertisement(hp);
3012 spin_unlock_irq(&hp->happy_lock);
3013
3014 err = devm_register_netdev(&pdev->dev, dev);
3015 if (err) {
3016 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3017 goto err_out_clear_quattro;
3018 }
3019
3020 pci_set_drvdata(pdev, hp);
3021
3022 if (!qfe_slot) {
3023 struct pci_dev *qpdev = qp->quattro_dev;
3024
3025 prom_name[0] = 0;
3026 if (!strncmp(dev->name, "eth", 3)) {
3027 int i = simple_strtoul(dev->name + 3, NULL, 10);
3028 sprintf(prom_name, "-%d", i + 3);
3029 }
3030 netdev_info(dev,
3031 "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
3032 prom_name, qpdev->vendor, qpdev->device);
3033 }
3034
3035 if (qfe_slot != -1)
3036 netdev_info(dev,
3037 "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
3038 qfe_slot, dev->dev_addr);
3039 else
3040 netdev_info(dev,
3041 "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
3042 dev->dev_addr);
3043
3044 return 0;
3045
3046err_out_clear_quattro:
3047 if (qp != NULL)
3048 qp->happy_meals[qfe_slot] = NULL;
3049
3050err_out:
3051 return err;
3052}
3053
3054static const struct pci_device_id happymeal_pci_ids[] = {
3055 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3056 { } /* Terminating entry */
3057};
3058
3059MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
3060
3061static struct pci_driver hme_pci_driver = {
3062 .name = "hme",
3063 .id_table = happymeal_pci_ids,
3064 .probe = happy_meal_pci_probe,
3065};
3066
3067static int __init happy_meal_pci_init(void)
3068{
3069 return pci_register_driver(&hme_pci_driver);
3070}
3071
3072static void happy_meal_pci_exit(void)
3073{
3074 pci_unregister_driver(&hme_pci_driver);
3075
3076 while (qfe_pci_list) {
3077 struct quattro *qfe = qfe_pci_list;
3078 struct quattro *next = qfe->next;
3079
3080 kfree(qfe);
3081
3082 qfe_pci_list = next;
3083 }
3084}
3085
3086#endif
3087
3088#ifdef CONFIG_SBUS
3089static const struct of_device_id hme_sbus_match[];
3090static int hme_sbus_probe(struct platform_device *op)
3091{
3092 const struct of_device_id *match;
3093 struct device_node *dp = op->dev.of_node;
3094 const char *model = of_get_property(dp, "model", NULL);
3095 int is_qfe;
3096
3097 match = of_match_device(hme_sbus_match, &op->dev);
3098 if (!match)
3099 return -EINVAL;
3100 is_qfe = (match->data != NULL);
3101
3102 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3103 is_qfe = 1;
3104
3105 return happy_meal_sbus_probe_one(op, is_qfe);
3106}
3107
3108static int hme_sbus_remove(struct platform_device *op)
3109{
3110 struct happy_meal *hp = platform_get_drvdata(op);
3111 struct net_device *net_dev = hp->dev;
3112
3113 unregister_netdev(net_dev);
3114
3115 /* XXX qfe parent interrupt... */
3116
3117 of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
3118 of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
3119 of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
3120 of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
3121 of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
3122 dma_free_coherent(hp->dma_dev,
3123 PAGE_SIZE,
3124 hp->happy_block,
3125 hp->hblock_dvma);
3126
3127 free_netdev(net_dev);
3128
3129 return 0;
3130}
3131
3132static const struct of_device_id hme_sbus_match[] = {
3133 {
3134 .name = "SUNW,hme",
3135 },
3136 {
3137 .name = "SUNW,qfe",
3138 .data = (void *) 1,
3139 },
3140 {
3141 .name = "qfe",
3142 .data = (void *) 1,
3143 },
3144 {},
3145};
3146
3147MODULE_DEVICE_TABLE(of, hme_sbus_match);
3148
3149static struct platform_driver hme_sbus_driver = {
3150 .driver = {
3151 .name = "hme",
3152 .of_match_table = hme_sbus_match,
3153 },
3154 .probe = hme_sbus_probe,
3155 .remove = hme_sbus_remove,
3156};
3157
3158static int __init happy_meal_sbus_init(void)
3159{
3160 int err;
3161
3162 err = platform_driver_register(&hme_sbus_driver);
3163 if (!err)
3164 err = quattro_sbus_register_irqs();
3165
3166 return err;
3167}
3168
3169static void happy_meal_sbus_exit(void)
3170{
3171 platform_driver_unregister(&hme_sbus_driver);
3172 quattro_sbus_free_irqs();
3173
3174 while (qfe_sbus_list) {
3175 struct quattro *qfe = qfe_sbus_list;
3176 struct quattro *next = qfe->next;
3177
3178 kfree(qfe);
3179
3180 qfe_sbus_list = next;
3181 }
3182}
3183#endif
3184
3185static int __init happy_meal_probe(void)
3186{
3187 int err = 0;
3188
3189#ifdef CONFIG_SBUS
3190 err = happy_meal_sbus_init();
3191#endif
3192#ifdef CONFIG_PCI
3193 if (!err) {
3194 err = happy_meal_pci_init();
3195#ifdef CONFIG_SBUS
3196 if (err)
3197 happy_meal_sbus_exit();
3198#endif
3199 }
3200#endif
3201
3202 return err;
3203}
3204
3205
3206static void __exit happy_meal_exit(void)
3207{
3208#ifdef CONFIG_SBUS
3209 happy_meal_sbus_exit();
3210#endif
3211#ifdef CONFIG_PCI
3212 happy_meal_pci_exit();
3213#endif
3214}
3215
3216module_init(happy_meal_probe);
3217module_exit(happy_meal_exit);