Loading...
1/*
2 * Driver for the Macintosh 68K onboard MACE controller with PSC
3 * driven DMA. The MACE driver code is derived from mace.c. The
4 * Mac68k theory of operation is courtesy of the MacBSD wizards.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Copyright (C) 1996 Paul Mackerras.
12 * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
13 *
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15 *
16 * Copyright (C) 2007 Finn Thain
17 *
18 * Converted to DMA API, converted to unified driver model,
19 * sync'd some routines with mace.c and fixed various bugs.
20 */
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/crc32.h>
30#include <linux/bitrev.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
33#include <linux/gfp.h>
34#include <linux/interrupt.h>
35#include <asm/io.h>
36#include <asm/macints.h>
37#include <asm/mac_psc.h>
38#include <asm/page.h>
39#include "mace.h"
40
41static char mac_mace_string[] = "macmace";
42
43#define N_TX_BUFF_ORDER 0
44#define N_TX_RING (1 << N_TX_BUFF_ORDER)
45#define N_RX_BUFF_ORDER 3
46#define N_RX_RING (1 << N_RX_BUFF_ORDER)
47
48#define TX_TIMEOUT HZ
49
50#define MACE_BUFF_SIZE 0x800
51
52/* Chip rev needs workaround on HW & multicast addr change */
53#define BROKEN_ADDRCHG_REV 0x0941
54
55/* The MACE is simply wired down on a Mac68K box */
56
57#define MACE_BASE (void *)(0x50F1C000)
58#define MACE_PROM (void *)(0x50F08001)
59
60struct mace_data {
61 volatile struct mace *mace;
62 unsigned char *tx_ring;
63 dma_addr_t tx_ring_phys;
64 unsigned char *rx_ring;
65 dma_addr_t rx_ring_phys;
66 int dma_intr;
67 int rx_slot, rx_tail;
68 int tx_slot, tx_sloti, tx_count;
69 int chipid;
70 struct device *device;
71};
72
73struct mace_frame {
74 u8 rcvcnt;
75 u8 pad1;
76 u8 rcvsts;
77 u8 pad2;
78 u8 rntpc;
79 u8 pad3;
80 u8 rcvcc;
81 u8 pad4;
82 u32 pad5;
83 u32 pad6;
84 u8 data[1];
85 /* And frame continues.. */
86};
87
88#define PRIV_BYTES sizeof(struct mace_data)
89
90static int mace_open(struct net_device *dev);
91static int mace_close(struct net_device *dev);
92static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
93static void mace_set_multicast(struct net_device *dev);
94static int mace_set_address(struct net_device *dev, void *addr);
95static void mace_reset(struct net_device *dev);
96static irqreturn_t mace_interrupt(int irq, void *dev_id);
97static irqreturn_t mace_dma_intr(int irq, void *dev_id);
98static void mace_tx_timeout(struct net_device *dev);
99static void __mace_set_address(struct net_device *dev, void *addr);
100
101/*
102 * Load a receive DMA channel with a base address and ring length
103 */
104
105static void mace_load_rxdma_base(struct net_device *dev, int set)
106{
107 struct mace_data *mp = netdev_priv(dev);
108
109 psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
110 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
111 psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
112 psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
113 mp->rx_tail = 0;
114}
115
116/*
117 * Reset the receive DMA subsystem
118 */
119
120static void mace_rxdma_reset(struct net_device *dev)
121{
122 struct mace_data *mp = netdev_priv(dev);
123 volatile struct mace *mace = mp->mace;
124 u8 maccc = mace->maccc;
125
126 mace->maccc = maccc & ~ENRCV;
127
128 psc_write_word(PSC_ENETRD_CTL, 0x8800);
129 mace_load_rxdma_base(dev, 0x00);
130 psc_write_word(PSC_ENETRD_CTL, 0x0400);
131
132 psc_write_word(PSC_ENETRD_CTL, 0x8800);
133 mace_load_rxdma_base(dev, 0x10);
134 psc_write_word(PSC_ENETRD_CTL, 0x0400);
135
136 mace->maccc = maccc;
137 mp->rx_slot = 0;
138
139 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
140 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
141}
142
143/*
144 * Reset the transmit DMA subsystem
145 */
146
147static void mace_txdma_reset(struct net_device *dev)
148{
149 struct mace_data *mp = netdev_priv(dev);
150 volatile struct mace *mace = mp->mace;
151 u8 maccc;
152
153 psc_write_word(PSC_ENETWR_CTL, 0x8800);
154
155 maccc = mace->maccc;
156 mace->maccc = maccc & ~ENXMT;
157
158 mp->tx_slot = mp->tx_sloti = 0;
159 mp->tx_count = N_TX_RING;
160
161 psc_write_word(PSC_ENETWR_CTL, 0x0400);
162 mace->maccc = maccc;
163}
164
165/*
166 * Disable DMA
167 */
168
169static void mace_dma_off(struct net_device *dev)
170{
171 psc_write_word(PSC_ENETRD_CTL, 0x8800);
172 psc_write_word(PSC_ENETRD_CTL, 0x1000);
173 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
174 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
175
176 psc_write_word(PSC_ENETWR_CTL, 0x8800);
177 psc_write_word(PSC_ENETWR_CTL, 0x1000);
178 psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
179 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
180}
181
182static const struct net_device_ops mace_netdev_ops = {
183 .ndo_open = mace_open,
184 .ndo_stop = mace_close,
185 .ndo_start_xmit = mace_xmit_start,
186 .ndo_tx_timeout = mace_tx_timeout,
187 .ndo_set_rx_mode = mace_set_multicast,
188 .ndo_set_mac_address = mace_set_address,
189 .ndo_validate_addr = eth_validate_addr,
190};
191
192/*
193 * Not really much of a probe. The hardware table tells us if this
194 * model of Macintrash has a MACE (AV macintoshes)
195 */
196
197static int mace_probe(struct platform_device *pdev)
198{
199 int j;
200 struct mace_data *mp;
201 unsigned char *addr;
202 struct net_device *dev;
203 unsigned char checksum = 0;
204 int err;
205
206 dev = alloc_etherdev(PRIV_BYTES);
207 if (!dev)
208 return -ENOMEM;
209
210 mp = netdev_priv(dev);
211
212 mp->device = &pdev->dev;
213 platform_set_drvdata(pdev, dev);
214 SET_NETDEV_DEV(dev, &pdev->dev);
215
216 dev->base_addr = (u32)MACE_BASE;
217 mp->mace = MACE_BASE;
218
219 dev->irq = IRQ_MAC_MACE;
220 mp->dma_intr = IRQ_MAC_MACE_DMA;
221
222 mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
223
224 /*
225 * The PROM contains 8 bytes which total 0xFF when XOR'd
226 * together. Due to the usual peculiar apple brain damage
227 * the bytes are spaced out in a strange boundary and the
228 * bits are reversed.
229 */
230
231 addr = MACE_PROM;
232
233 for (j = 0; j < 6; ++j) {
234 u8 v = bitrev8(addr[j<<4]);
235 checksum ^= v;
236 dev->dev_addr[j] = v;
237 }
238 for (; j < 8; ++j) {
239 checksum ^= bitrev8(addr[j<<4]);
240 }
241
242 if (checksum != 0xFF) {
243 free_netdev(dev);
244 return -ENODEV;
245 }
246
247 dev->netdev_ops = &mace_netdev_ops;
248 dev->watchdog_timeo = TX_TIMEOUT;
249
250 pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
251 dev->dev_addr, mp->chipid);
252
253 err = register_netdev(dev);
254 if (!err)
255 return 0;
256
257 free_netdev(dev);
258 return err;
259}
260
261/*
262 * Reset the chip.
263 */
264
265static void mace_reset(struct net_device *dev)
266{
267 struct mace_data *mp = netdev_priv(dev);
268 volatile struct mace *mb = mp->mace;
269 int i;
270
271 /* soft-reset the chip */
272 i = 200;
273 while (--i) {
274 mb->biucc = SWRST;
275 if (mb->biucc & SWRST) {
276 udelay(10);
277 continue;
278 }
279 break;
280 }
281 if (!i) {
282 printk(KERN_ERR "macmace: cannot reset chip!\n");
283 return;
284 }
285
286 mb->maccc = 0; /* turn off tx, rx */
287 mb->imr = 0xFF; /* disable all intrs for now */
288 i = mb->ir;
289
290 mb->biucc = XMTSP_64;
291 mb->utr = RTRD;
292 mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
293
294 mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
295 mb->rcvfc = 0;
296
297 /* load up the hardware address */
298 __mace_set_address(dev, dev->dev_addr);
299
300 /* clear the multicast filter */
301 if (mp->chipid == BROKEN_ADDRCHG_REV)
302 mb->iac = LOGADDR;
303 else {
304 mb->iac = ADDRCHG | LOGADDR;
305 while ((mb->iac & ADDRCHG) != 0)
306 ;
307 }
308 for (i = 0; i < 8; ++i)
309 mb->ladrf = 0;
310
311 /* done changing address */
312 if (mp->chipid != BROKEN_ADDRCHG_REV)
313 mb->iac = 0;
314
315 mb->plscc = PORTSEL_AUI;
316}
317
318/*
319 * Load the address on a mace controller.
320 */
321
322static void __mace_set_address(struct net_device *dev, void *addr)
323{
324 struct mace_data *mp = netdev_priv(dev);
325 volatile struct mace *mb = mp->mace;
326 unsigned char *p = addr;
327 int i;
328
329 /* load up the hardware address */
330 if (mp->chipid == BROKEN_ADDRCHG_REV)
331 mb->iac = PHYADDR;
332 else {
333 mb->iac = ADDRCHG | PHYADDR;
334 while ((mb->iac & ADDRCHG) != 0)
335 ;
336 }
337 for (i = 0; i < 6; ++i)
338 mb->padr = dev->dev_addr[i] = p[i];
339 if (mp->chipid != BROKEN_ADDRCHG_REV)
340 mb->iac = 0;
341}
342
343static int mace_set_address(struct net_device *dev, void *addr)
344{
345 struct mace_data *mp = netdev_priv(dev);
346 volatile struct mace *mb = mp->mace;
347 unsigned long flags;
348 u8 maccc;
349
350 local_irq_save(flags);
351
352 maccc = mb->maccc;
353
354 __mace_set_address(dev, addr);
355
356 mb->maccc = maccc;
357
358 local_irq_restore(flags);
359
360 return 0;
361}
362
363/*
364 * Open the Macintosh MACE. Most of this is playing with the DMA
365 * engine. The ethernet chip is quite friendly.
366 */
367
368static int mace_open(struct net_device *dev)
369{
370 struct mace_data *mp = netdev_priv(dev);
371 volatile struct mace *mb = mp->mace;
372
373 /* reset the chip */
374 mace_reset(dev);
375
376 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
377 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
378 return -EAGAIN;
379 }
380 if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
381 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
382 free_irq(dev->irq, dev);
383 return -EAGAIN;
384 }
385
386 /* Allocate the DMA ring buffers */
387
388 mp->tx_ring = dma_alloc_coherent(mp->device,
389 N_TX_RING * MACE_BUFF_SIZE,
390 &mp->tx_ring_phys, GFP_KERNEL);
391 if (mp->tx_ring == NULL)
392 goto out1;
393
394 mp->rx_ring = dma_alloc_coherent(mp->device,
395 N_RX_RING * MACE_BUFF_SIZE,
396 &mp->rx_ring_phys, GFP_KERNEL);
397 if (mp->rx_ring == NULL)
398 goto out2;
399
400 mace_dma_off(dev);
401
402 /* Not sure what these do */
403
404 psc_write_word(PSC_ENETWR_CTL, 0x9000);
405 psc_write_word(PSC_ENETRD_CTL, 0x9000);
406 psc_write_word(PSC_ENETWR_CTL, 0x0400);
407 psc_write_word(PSC_ENETRD_CTL, 0x0400);
408
409 mace_rxdma_reset(dev);
410 mace_txdma_reset(dev);
411
412 /* turn it on! */
413 mb->maccc = ENXMT | ENRCV;
414 /* enable all interrupts except receive interrupts */
415 mb->imr = RCVINT;
416 return 0;
417
418out2:
419 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
420 mp->tx_ring, mp->tx_ring_phys);
421out1:
422 free_irq(dev->irq, dev);
423 free_irq(mp->dma_intr, dev);
424 return -ENOMEM;
425}
426
427/*
428 * Shut down the mace and its interrupt channel
429 */
430
431static int mace_close(struct net_device *dev)
432{
433 struct mace_data *mp = netdev_priv(dev);
434 volatile struct mace *mb = mp->mace;
435
436 mb->maccc = 0; /* disable rx and tx */
437 mb->imr = 0xFF; /* disable all irqs */
438 mace_dma_off(dev); /* disable rx and tx dma */
439
440 return 0;
441}
442
443/*
444 * Transmit a frame
445 */
446
447static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
448{
449 struct mace_data *mp = netdev_priv(dev);
450 unsigned long flags;
451
452 /* Stop the queue since there's only the one buffer */
453
454 local_irq_save(flags);
455 netif_stop_queue(dev);
456 if (!mp->tx_count) {
457 printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
458 local_irq_restore(flags);
459 return NETDEV_TX_BUSY;
460 }
461 mp->tx_count--;
462 local_irq_restore(flags);
463
464 dev->stats.tx_packets++;
465 dev->stats.tx_bytes += skb->len;
466
467 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
468 skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
469
470 /* load the Tx DMA and fire it off */
471
472 psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
473 psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
474 psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
475
476 mp->tx_slot ^= 0x10;
477
478 dev_kfree_skb(skb);
479
480 return NETDEV_TX_OK;
481}
482
483static void mace_set_multicast(struct net_device *dev)
484{
485 struct mace_data *mp = netdev_priv(dev);
486 volatile struct mace *mb = mp->mace;
487 int i;
488 u32 crc;
489 u8 maccc;
490 unsigned long flags;
491
492 local_irq_save(flags);
493 maccc = mb->maccc;
494 mb->maccc &= ~PROM;
495
496 if (dev->flags & IFF_PROMISC) {
497 mb->maccc |= PROM;
498 } else {
499 unsigned char multicast_filter[8];
500 struct netdev_hw_addr *ha;
501
502 if (dev->flags & IFF_ALLMULTI) {
503 for (i = 0; i < 8; i++) {
504 multicast_filter[i] = 0xFF;
505 }
506 } else {
507 for (i = 0; i < 8; i++)
508 multicast_filter[i] = 0;
509 netdev_for_each_mc_addr(ha, dev) {
510 crc = ether_crc_le(6, ha->addr);
511 /* bit number in multicast_filter */
512 i = crc >> 26;
513 multicast_filter[i >> 3] |= 1 << (i & 7);
514 }
515 }
516
517 if (mp->chipid == BROKEN_ADDRCHG_REV)
518 mb->iac = LOGADDR;
519 else {
520 mb->iac = ADDRCHG | LOGADDR;
521 while ((mb->iac & ADDRCHG) != 0)
522 ;
523 }
524 for (i = 0; i < 8; ++i)
525 mb->ladrf = multicast_filter[i];
526 if (mp->chipid != BROKEN_ADDRCHG_REV)
527 mb->iac = 0;
528 }
529
530 mb->maccc = maccc;
531 local_irq_restore(flags);
532}
533
534static void mace_handle_misc_intrs(struct net_device *dev, int intr)
535{
536 struct mace_data *mp = netdev_priv(dev);
537 volatile struct mace *mb = mp->mace;
538 static int mace_babbles, mace_jabbers;
539
540 if (intr & MPCO)
541 dev->stats.rx_missed_errors += 256;
542 dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
543 if (intr & RNTPCO)
544 dev->stats.rx_length_errors += 256;
545 dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
546 if (intr & CERR)
547 ++dev->stats.tx_heartbeat_errors;
548 if (intr & BABBLE)
549 if (mace_babbles++ < 4)
550 printk(KERN_DEBUG "macmace: babbling transmitter\n");
551 if (intr & JABBER)
552 if (mace_jabbers++ < 4)
553 printk(KERN_DEBUG "macmace: jabbering transceiver\n");
554}
555
556static irqreturn_t mace_interrupt(int irq, void *dev_id)
557{
558 struct net_device *dev = (struct net_device *) dev_id;
559 struct mace_data *mp = netdev_priv(dev);
560 volatile struct mace *mb = mp->mace;
561 int intr, fs;
562 unsigned long flags;
563
564 /* don't want the dma interrupt handler to fire */
565 local_irq_save(flags);
566
567 intr = mb->ir; /* read interrupt register */
568 mace_handle_misc_intrs(dev, intr);
569
570 if (intr & XMTINT) {
571 fs = mb->xmtfs;
572 if ((fs & XMTSV) == 0) {
573 printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
574 mace_reset(dev);
575 /*
576 * XXX mace likes to hang the machine after a xmtfs error.
577 * This is hard to reproduce, resetting *may* help
578 */
579 }
580 /* dma should have finished */
581 if (!mp->tx_count) {
582 printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
583 }
584 /* Update stats */
585 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
586 ++dev->stats.tx_errors;
587 if (fs & LCAR)
588 ++dev->stats.tx_carrier_errors;
589 else if (fs & (UFLO|LCOL|RTRY)) {
590 ++dev->stats.tx_aborted_errors;
591 if (mb->xmtfs & UFLO) {
592 dev->stats.tx_fifo_errors++;
593 mace_txdma_reset(dev);
594 }
595 }
596 }
597 }
598
599 if (mp->tx_count)
600 netif_wake_queue(dev);
601
602 local_irq_restore(flags);
603
604 return IRQ_HANDLED;
605}
606
607static void mace_tx_timeout(struct net_device *dev)
608{
609 struct mace_data *mp = netdev_priv(dev);
610 volatile struct mace *mb = mp->mace;
611 unsigned long flags;
612
613 local_irq_save(flags);
614
615 /* turn off both tx and rx and reset the chip */
616 mb->maccc = 0;
617 printk(KERN_ERR "macmace: transmit timeout - resetting\n");
618 mace_txdma_reset(dev);
619 mace_reset(dev);
620
621 /* restart rx dma */
622 mace_rxdma_reset(dev);
623
624 mp->tx_count = N_TX_RING;
625 netif_wake_queue(dev);
626
627 /* turn it on! */
628 mb->maccc = ENXMT | ENRCV;
629 /* enable all interrupts except receive interrupts */
630 mb->imr = RCVINT;
631
632 local_irq_restore(flags);
633}
634
635/*
636 * Handle a newly arrived frame
637 */
638
639static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
640{
641 struct sk_buff *skb;
642 unsigned int frame_status = mf->rcvsts;
643
644 if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
645 dev->stats.rx_errors++;
646 if (frame_status & RS_OFLO)
647 dev->stats.rx_fifo_errors++;
648 if (frame_status & RS_CLSN)
649 dev->stats.collisions++;
650 if (frame_status & RS_FRAMERR)
651 dev->stats.rx_frame_errors++;
652 if (frame_status & RS_FCSERR)
653 dev->stats.rx_crc_errors++;
654 } else {
655 unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
656
657 skb = netdev_alloc_skb(dev, frame_length + 2);
658 if (!skb) {
659 dev->stats.rx_dropped++;
660 return;
661 }
662 skb_reserve(skb, 2);
663 skb_put_data(skb, mf->data, frame_length);
664
665 skb->protocol = eth_type_trans(skb, dev);
666 netif_rx(skb);
667 dev->stats.rx_packets++;
668 dev->stats.rx_bytes += frame_length;
669 }
670}
671
672/*
673 * The PSC has passed us a DMA interrupt event.
674 */
675
676static irqreturn_t mace_dma_intr(int irq, void *dev_id)
677{
678 struct net_device *dev = (struct net_device *) dev_id;
679 struct mace_data *mp = netdev_priv(dev);
680 int left, head;
681 u16 status;
682 u32 baka;
683
684 /* Not sure what this does */
685
686 while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
687 if (!(baka & 0x60000000)) return IRQ_NONE;
688
689 /*
690 * Process the read queue
691 */
692
693 status = psc_read_word(PSC_ENETRD_CTL);
694
695 if (status & 0x2000) {
696 mace_rxdma_reset(dev);
697 } else if (status & 0x0100) {
698 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
699
700 left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
701 head = N_RX_RING - left;
702
703 /* Loop through the ring buffer and process new packages */
704
705 while (mp->rx_tail < head) {
706 mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
707 + (mp->rx_tail * MACE_BUFF_SIZE)));
708 mp->rx_tail++;
709 }
710
711 /* If we're out of buffers in this ring then switch to */
712 /* the other set, otherwise just reactivate this one. */
713
714 if (!left) {
715 mace_load_rxdma_base(dev, mp->rx_slot);
716 mp->rx_slot ^= 0x10;
717 } else {
718 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
719 }
720 }
721
722 /*
723 * Process the write queue
724 */
725
726 status = psc_read_word(PSC_ENETWR_CTL);
727
728 if (status & 0x2000) {
729 mace_txdma_reset(dev);
730 } else if (status & 0x0100) {
731 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
732 mp->tx_sloti ^= 0x10;
733 mp->tx_count++;
734 }
735 return IRQ_HANDLED;
736}
737
738MODULE_LICENSE("GPL");
739MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
740MODULE_ALIAS("platform:macmace");
741
742static int mac_mace_device_remove(struct platform_device *pdev)
743{
744 struct net_device *dev = platform_get_drvdata(pdev);
745 struct mace_data *mp = netdev_priv(dev);
746
747 unregister_netdev(dev);
748
749 free_irq(dev->irq, dev);
750 free_irq(IRQ_MAC_MACE_DMA, dev);
751
752 dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
753 mp->rx_ring, mp->rx_ring_phys);
754 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
755 mp->tx_ring, mp->tx_ring_phys);
756
757 free_netdev(dev);
758
759 return 0;
760}
761
762static struct platform_driver mac_mace_driver = {
763 .probe = mace_probe,
764 .remove = mac_mace_device_remove,
765 .driver = {
766 .name = mac_mace_string,
767 },
768};
769
770module_platform_driver(mac_mace_driver);
1/*
2 * Driver for the Macintosh 68K onboard MACE controller with PSC
3 * driven DMA. The MACE driver code is derived from mace.c. The
4 * Mac68k theory of operation is courtesy of the MacBSD wizards.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Copyright (C) 1996 Paul Mackerras.
12 * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
13 *
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15 *
16 * Copyright (C) 2007 Finn Thain
17 *
18 * Converted to DMA API, converted to unified driver model,
19 * sync'd some routines with mace.c and fixed various bugs.
20 */
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/crc32.h>
30#include <linux/bitrev.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
33#include <linux/gfp.h>
34#include <linux/interrupt.h>
35#include <asm/io.h>
36#include <asm/macints.h>
37#include <asm/mac_psc.h>
38#include <asm/page.h>
39#include "mace.h"
40
41static char mac_mace_string[] = "macmace";
42
43#define N_TX_BUFF_ORDER 0
44#define N_TX_RING (1 << N_TX_BUFF_ORDER)
45#define N_RX_BUFF_ORDER 3
46#define N_RX_RING (1 << N_RX_BUFF_ORDER)
47
48#define TX_TIMEOUT HZ
49
50#define MACE_BUFF_SIZE 0x800
51
52/* Chip rev needs workaround on HW & multicast addr change */
53#define BROKEN_ADDRCHG_REV 0x0941
54
55/* The MACE is simply wired down on a Mac68K box */
56
57#define MACE_BASE (void *)(0x50F1C000)
58#define MACE_PROM (void *)(0x50F08001)
59
60struct mace_data {
61 volatile struct mace *mace;
62 unsigned char *tx_ring;
63 dma_addr_t tx_ring_phys;
64 unsigned char *rx_ring;
65 dma_addr_t rx_ring_phys;
66 int dma_intr;
67 int rx_slot, rx_tail;
68 int tx_slot, tx_sloti, tx_count;
69 int chipid;
70 struct device *device;
71};
72
73struct mace_frame {
74 u8 rcvcnt;
75 u8 pad1;
76 u8 rcvsts;
77 u8 pad2;
78 u8 rntpc;
79 u8 pad3;
80 u8 rcvcc;
81 u8 pad4;
82 u32 pad5;
83 u32 pad6;
84 u8 data[1];
85 /* And frame continues.. */
86};
87
88#define PRIV_BYTES sizeof(struct mace_data)
89
90static int mace_open(struct net_device *dev);
91static int mace_close(struct net_device *dev);
92static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
93static void mace_set_multicast(struct net_device *dev);
94static int mace_set_address(struct net_device *dev, void *addr);
95static void mace_reset(struct net_device *dev);
96static irqreturn_t mace_interrupt(int irq, void *dev_id);
97static irqreturn_t mace_dma_intr(int irq, void *dev_id);
98static void mace_tx_timeout(struct net_device *dev);
99static void __mace_set_address(struct net_device *dev, void *addr);
100
101/*
102 * Load a receive DMA channel with a base address and ring length
103 */
104
105static void mace_load_rxdma_base(struct net_device *dev, int set)
106{
107 struct mace_data *mp = netdev_priv(dev);
108
109 psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
110 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
111 psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
112 psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
113 mp->rx_tail = 0;
114}
115
116/*
117 * Reset the receive DMA subsystem
118 */
119
120static void mace_rxdma_reset(struct net_device *dev)
121{
122 struct mace_data *mp = netdev_priv(dev);
123 volatile struct mace *mace = mp->mace;
124 u8 maccc = mace->maccc;
125
126 mace->maccc = maccc & ~ENRCV;
127
128 psc_write_word(PSC_ENETRD_CTL, 0x8800);
129 mace_load_rxdma_base(dev, 0x00);
130 psc_write_word(PSC_ENETRD_CTL, 0x0400);
131
132 psc_write_word(PSC_ENETRD_CTL, 0x8800);
133 mace_load_rxdma_base(dev, 0x10);
134 psc_write_word(PSC_ENETRD_CTL, 0x0400);
135
136 mace->maccc = maccc;
137 mp->rx_slot = 0;
138
139 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
140 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
141}
142
143/*
144 * Reset the transmit DMA subsystem
145 */
146
147static void mace_txdma_reset(struct net_device *dev)
148{
149 struct mace_data *mp = netdev_priv(dev);
150 volatile struct mace *mace = mp->mace;
151 u8 maccc;
152
153 psc_write_word(PSC_ENETWR_CTL, 0x8800);
154
155 maccc = mace->maccc;
156 mace->maccc = maccc & ~ENXMT;
157
158 mp->tx_slot = mp->tx_sloti = 0;
159 mp->tx_count = N_TX_RING;
160
161 psc_write_word(PSC_ENETWR_CTL, 0x0400);
162 mace->maccc = maccc;
163}
164
165/*
166 * Disable DMA
167 */
168
169static void mace_dma_off(struct net_device *dev)
170{
171 psc_write_word(PSC_ENETRD_CTL, 0x8800);
172 psc_write_word(PSC_ENETRD_CTL, 0x1000);
173 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
174 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
175
176 psc_write_word(PSC_ENETWR_CTL, 0x8800);
177 psc_write_word(PSC_ENETWR_CTL, 0x1000);
178 psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
179 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
180}
181
182static const struct net_device_ops mace_netdev_ops = {
183 .ndo_open = mace_open,
184 .ndo_stop = mace_close,
185 .ndo_start_xmit = mace_xmit_start,
186 .ndo_tx_timeout = mace_tx_timeout,
187 .ndo_set_rx_mode = mace_set_multicast,
188 .ndo_set_mac_address = mace_set_address,
189 .ndo_change_mtu = eth_change_mtu,
190 .ndo_validate_addr = eth_validate_addr,
191};
192
193/*
194 * Not really much of a probe. The hardware table tells us if this
195 * model of Macintrash has a MACE (AV macintoshes)
196 */
197
198static int mace_probe(struct platform_device *pdev)
199{
200 int j;
201 struct mace_data *mp;
202 unsigned char *addr;
203 struct net_device *dev;
204 unsigned char checksum = 0;
205 int err;
206
207 dev = alloc_etherdev(PRIV_BYTES);
208 if (!dev)
209 return -ENOMEM;
210
211 mp = netdev_priv(dev);
212
213 mp->device = &pdev->dev;
214 platform_set_drvdata(pdev, dev);
215 SET_NETDEV_DEV(dev, &pdev->dev);
216
217 dev->base_addr = (u32)MACE_BASE;
218 mp->mace = MACE_BASE;
219
220 dev->irq = IRQ_MAC_MACE;
221 mp->dma_intr = IRQ_MAC_MACE_DMA;
222
223 mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
224
225 /*
226 * The PROM contains 8 bytes which total 0xFF when XOR'd
227 * together. Due to the usual peculiar apple brain damage
228 * the bytes are spaced out in a strange boundary and the
229 * bits are reversed.
230 */
231
232 addr = MACE_PROM;
233
234 for (j = 0; j < 6; ++j) {
235 u8 v = bitrev8(addr[j<<4]);
236 checksum ^= v;
237 dev->dev_addr[j] = v;
238 }
239 for (; j < 8; ++j) {
240 checksum ^= bitrev8(addr[j<<4]);
241 }
242
243 if (checksum != 0xFF) {
244 free_netdev(dev);
245 return -ENODEV;
246 }
247
248 dev->netdev_ops = &mace_netdev_ops;
249 dev->watchdog_timeo = TX_TIMEOUT;
250
251 printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
252 dev->name, dev->dev_addr);
253
254 err = register_netdev(dev);
255 if (!err)
256 return 0;
257
258 free_netdev(dev);
259 return err;
260}
261
262/*
263 * Reset the chip.
264 */
265
266static void mace_reset(struct net_device *dev)
267{
268 struct mace_data *mp = netdev_priv(dev);
269 volatile struct mace *mb = mp->mace;
270 int i;
271
272 /* soft-reset the chip */
273 i = 200;
274 while (--i) {
275 mb->biucc = SWRST;
276 if (mb->biucc & SWRST) {
277 udelay(10);
278 continue;
279 }
280 break;
281 }
282 if (!i) {
283 printk(KERN_ERR "macmace: cannot reset chip!\n");
284 return;
285 }
286
287 mb->maccc = 0; /* turn off tx, rx */
288 mb->imr = 0xFF; /* disable all intrs for now */
289 i = mb->ir;
290
291 mb->biucc = XMTSP_64;
292 mb->utr = RTRD;
293 mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
294
295 mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
296 mb->rcvfc = 0;
297
298 /* load up the hardware address */
299 __mace_set_address(dev, dev->dev_addr);
300
301 /* clear the multicast filter */
302 if (mp->chipid == BROKEN_ADDRCHG_REV)
303 mb->iac = LOGADDR;
304 else {
305 mb->iac = ADDRCHG | LOGADDR;
306 while ((mb->iac & ADDRCHG) != 0)
307 ;
308 }
309 for (i = 0; i < 8; ++i)
310 mb->ladrf = 0;
311
312 /* done changing address */
313 if (mp->chipid != BROKEN_ADDRCHG_REV)
314 mb->iac = 0;
315
316 mb->plscc = PORTSEL_AUI;
317}
318
319/*
320 * Load the address on a mace controller.
321 */
322
323static void __mace_set_address(struct net_device *dev, void *addr)
324{
325 struct mace_data *mp = netdev_priv(dev);
326 volatile struct mace *mb = mp->mace;
327 unsigned char *p = addr;
328 int i;
329
330 /* load up the hardware address */
331 if (mp->chipid == BROKEN_ADDRCHG_REV)
332 mb->iac = PHYADDR;
333 else {
334 mb->iac = ADDRCHG | PHYADDR;
335 while ((mb->iac & ADDRCHG) != 0)
336 ;
337 }
338 for (i = 0; i < 6; ++i)
339 mb->padr = dev->dev_addr[i] = p[i];
340 if (mp->chipid != BROKEN_ADDRCHG_REV)
341 mb->iac = 0;
342}
343
344static int mace_set_address(struct net_device *dev, void *addr)
345{
346 struct mace_data *mp = netdev_priv(dev);
347 volatile struct mace *mb = mp->mace;
348 unsigned long flags;
349 u8 maccc;
350
351 local_irq_save(flags);
352
353 maccc = mb->maccc;
354
355 __mace_set_address(dev, addr);
356
357 mb->maccc = maccc;
358
359 local_irq_restore(flags);
360
361 return 0;
362}
363
364/*
365 * Open the Macintosh MACE. Most of this is playing with the DMA
366 * engine. The ethernet chip is quite friendly.
367 */
368
369static int mace_open(struct net_device *dev)
370{
371 struct mace_data *mp = netdev_priv(dev);
372 volatile struct mace *mb = mp->mace;
373
374 /* reset the chip */
375 mace_reset(dev);
376
377 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
378 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
379 return -EAGAIN;
380 }
381 if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
382 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
383 free_irq(dev->irq, dev);
384 return -EAGAIN;
385 }
386
387 /* Allocate the DMA ring buffers */
388
389 mp->tx_ring = dma_alloc_coherent(mp->device,
390 N_TX_RING * MACE_BUFF_SIZE,
391 &mp->tx_ring_phys, GFP_KERNEL);
392 if (mp->tx_ring == NULL)
393 goto out1;
394
395 mp->rx_ring = dma_alloc_coherent(mp->device,
396 N_RX_RING * MACE_BUFF_SIZE,
397 &mp->rx_ring_phys, GFP_KERNEL);
398 if (mp->rx_ring == NULL)
399 goto out2;
400
401 mace_dma_off(dev);
402
403 /* Not sure what these do */
404
405 psc_write_word(PSC_ENETWR_CTL, 0x9000);
406 psc_write_word(PSC_ENETRD_CTL, 0x9000);
407 psc_write_word(PSC_ENETWR_CTL, 0x0400);
408 psc_write_word(PSC_ENETRD_CTL, 0x0400);
409
410 mace_rxdma_reset(dev);
411 mace_txdma_reset(dev);
412
413 /* turn it on! */
414 mb->maccc = ENXMT | ENRCV;
415 /* enable all interrupts except receive interrupts */
416 mb->imr = RCVINT;
417 return 0;
418
419out2:
420 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
421 mp->tx_ring, mp->tx_ring_phys);
422out1:
423 free_irq(dev->irq, dev);
424 free_irq(mp->dma_intr, dev);
425 return -ENOMEM;
426}
427
428/*
429 * Shut down the mace and its interrupt channel
430 */
431
432static int mace_close(struct net_device *dev)
433{
434 struct mace_data *mp = netdev_priv(dev);
435 volatile struct mace *mb = mp->mace;
436
437 mb->maccc = 0; /* disable rx and tx */
438 mb->imr = 0xFF; /* disable all irqs */
439 mace_dma_off(dev); /* disable rx and tx dma */
440
441 return 0;
442}
443
444/*
445 * Transmit a frame
446 */
447
448static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
449{
450 struct mace_data *mp = netdev_priv(dev);
451 unsigned long flags;
452
453 /* Stop the queue since there's only the one buffer */
454
455 local_irq_save(flags);
456 netif_stop_queue(dev);
457 if (!mp->tx_count) {
458 printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
459 local_irq_restore(flags);
460 return NETDEV_TX_BUSY;
461 }
462 mp->tx_count--;
463 local_irq_restore(flags);
464
465 dev->stats.tx_packets++;
466 dev->stats.tx_bytes += skb->len;
467
468 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
469 skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
470
471 /* load the Tx DMA and fire it off */
472
473 psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
474 psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
475 psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
476
477 mp->tx_slot ^= 0x10;
478
479 dev_kfree_skb(skb);
480
481 return NETDEV_TX_OK;
482}
483
484static void mace_set_multicast(struct net_device *dev)
485{
486 struct mace_data *mp = netdev_priv(dev);
487 volatile struct mace *mb = mp->mace;
488 int i;
489 u32 crc;
490 u8 maccc;
491 unsigned long flags;
492
493 local_irq_save(flags);
494 maccc = mb->maccc;
495 mb->maccc &= ~PROM;
496
497 if (dev->flags & IFF_PROMISC) {
498 mb->maccc |= PROM;
499 } else {
500 unsigned char multicast_filter[8];
501 struct netdev_hw_addr *ha;
502
503 if (dev->flags & IFF_ALLMULTI) {
504 for (i = 0; i < 8; i++) {
505 multicast_filter[i] = 0xFF;
506 }
507 } else {
508 for (i = 0; i < 8; i++)
509 multicast_filter[i] = 0;
510 netdev_for_each_mc_addr(ha, dev) {
511 crc = ether_crc_le(6, ha->addr);
512 /* bit number in multicast_filter */
513 i = crc >> 26;
514 multicast_filter[i >> 3] |= 1 << (i & 7);
515 }
516 }
517
518 if (mp->chipid == BROKEN_ADDRCHG_REV)
519 mb->iac = LOGADDR;
520 else {
521 mb->iac = ADDRCHG | LOGADDR;
522 while ((mb->iac & ADDRCHG) != 0)
523 ;
524 }
525 for (i = 0; i < 8; ++i)
526 mb->ladrf = multicast_filter[i];
527 if (mp->chipid != BROKEN_ADDRCHG_REV)
528 mb->iac = 0;
529 }
530
531 mb->maccc = maccc;
532 local_irq_restore(flags);
533}
534
535static void mace_handle_misc_intrs(struct net_device *dev, int intr)
536{
537 struct mace_data *mp = netdev_priv(dev);
538 volatile struct mace *mb = mp->mace;
539 static int mace_babbles, mace_jabbers;
540
541 if (intr & MPCO)
542 dev->stats.rx_missed_errors += 256;
543 dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
544 if (intr & RNTPCO)
545 dev->stats.rx_length_errors += 256;
546 dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
547 if (intr & CERR)
548 ++dev->stats.tx_heartbeat_errors;
549 if (intr & BABBLE)
550 if (mace_babbles++ < 4)
551 printk(KERN_DEBUG "macmace: babbling transmitter\n");
552 if (intr & JABBER)
553 if (mace_jabbers++ < 4)
554 printk(KERN_DEBUG "macmace: jabbering transceiver\n");
555}
556
557static irqreturn_t mace_interrupt(int irq, void *dev_id)
558{
559 struct net_device *dev = (struct net_device *) dev_id;
560 struct mace_data *mp = netdev_priv(dev);
561 volatile struct mace *mb = mp->mace;
562 int intr, fs;
563 unsigned long flags;
564
565 /* don't want the dma interrupt handler to fire */
566 local_irq_save(flags);
567
568 intr = mb->ir; /* read interrupt register */
569 mace_handle_misc_intrs(dev, intr);
570
571 if (intr & XMTINT) {
572 fs = mb->xmtfs;
573 if ((fs & XMTSV) == 0) {
574 printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
575 mace_reset(dev);
576 /*
577 * XXX mace likes to hang the machine after a xmtfs error.
578 * This is hard to reproduce, reseting *may* help
579 */
580 }
581 /* dma should have finished */
582 if (!mp->tx_count) {
583 printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
584 }
585 /* Update stats */
586 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
587 ++dev->stats.tx_errors;
588 if (fs & LCAR)
589 ++dev->stats.tx_carrier_errors;
590 else if (fs & (UFLO|LCOL|RTRY)) {
591 ++dev->stats.tx_aborted_errors;
592 if (mb->xmtfs & UFLO) {
593 printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
594 dev->stats.tx_fifo_errors++;
595 mace_txdma_reset(dev);
596 }
597 }
598 }
599 }
600
601 if (mp->tx_count)
602 netif_wake_queue(dev);
603
604 local_irq_restore(flags);
605
606 return IRQ_HANDLED;
607}
608
609static void mace_tx_timeout(struct net_device *dev)
610{
611 struct mace_data *mp = netdev_priv(dev);
612 volatile struct mace *mb = mp->mace;
613 unsigned long flags;
614
615 local_irq_save(flags);
616
617 /* turn off both tx and rx and reset the chip */
618 mb->maccc = 0;
619 printk(KERN_ERR "macmace: transmit timeout - resetting\n");
620 mace_txdma_reset(dev);
621 mace_reset(dev);
622
623 /* restart rx dma */
624 mace_rxdma_reset(dev);
625
626 mp->tx_count = N_TX_RING;
627 netif_wake_queue(dev);
628
629 /* turn it on! */
630 mb->maccc = ENXMT | ENRCV;
631 /* enable all interrupts except receive interrupts */
632 mb->imr = RCVINT;
633
634 local_irq_restore(flags);
635}
636
637/*
638 * Handle a newly arrived frame
639 */
640
641static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
642{
643 struct sk_buff *skb;
644 unsigned int frame_status = mf->rcvsts;
645
646 if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
647 dev->stats.rx_errors++;
648 if (frame_status & RS_OFLO) {
649 printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
650 dev->stats.rx_fifo_errors++;
651 }
652 if (frame_status & RS_CLSN)
653 dev->stats.collisions++;
654 if (frame_status & RS_FRAMERR)
655 dev->stats.rx_frame_errors++;
656 if (frame_status & RS_FCSERR)
657 dev->stats.rx_crc_errors++;
658 } else {
659 unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
660
661 skb = netdev_alloc_skb(dev, frame_length + 2);
662 if (!skb) {
663 dev->stats.rx_dropped++;
664 return;
665 }
666 skb_reserve(skb, 2);
667 memcpy(skb_put(skb, frame_length), mf->data, frame_length);
668
669 skb->protocol = eth_type_trans(skb, dev);
670 netif_rx(skb);
671 dev->stats.rx_packets++;
672 dev->stats.rx_bytes += frame_length;
673 }
674}
675
676/*
677 * The PSC has passed us a DMA interrupt event.
678 */
679
680static irqreturn_t mace_dma_intr(int irq, void *dev_id)
681{
682 struct net_device *dev = (struct net_device *) dev_id;
683 struct mace_data *mp = netdev_priv(dev);
684 int left, head;
685 u16 status;
686 u32 baka;
687
688 /* Not sure what this does */
689
690 while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
691 if (!(baka & 0x60000000)) return IRQ_NONE;
692
693 /*
694 * Process the read queue
695 */
696
697 status = psc_read_word(PSC_ENETRD_CTL);
698
699 if (status & 0x2000) {
700 mace_rxdma_reset(dev);
701 } else if (status & 0x0100) {
702 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
703
704 left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
705 head = N_RX_RING - left;
706
707 /* Loop through the ring buffer and process new packages */
708
709 while (mp->rx_tail < head) {
710 mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
711 + (mp->rx_tail * MACE_BUFF_SIZE)));
712 mp->rx_tail++;
713 }
714
715 /* If we're out of buffers in this ring then switch to */
716 /* the other set, otherwise just reactivate this one. */
717
718 if (!left) {
719 mace_load_rxdma_base(dev, mp->rx_slot);
720 mp->rx_slot ^= 0x10;
721 } else {
722 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
723 }
724 }
725
726 /*
727 * Process the write queue
728 */
729
730 status = psc_read_word(PSC_ENETWR_CTL);
731
732 if (status & 0x2000) {
733 mace_txdma_reset(dev);
734 } else if (status & 0x0100) {
735 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
736 mp->tx_sloti ^= 0x10;
737 mp->tx_count++;
738 }
739 return IRQ_HANDLED;
740}
741
742MODULE_LICENSE("GPL");
743MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
744MODULE_ALIAS("platform:macmace");
745
746static int mac_mace_device_remove(struct platform_device *pdev)
747{
748 struct net_device *dev = platform_get_drvdata(pdev);
749 struct mace_data *mp = netdev_priv(dev);
750
751 unregister_netdev(dev);
752
753 free_irq(dev->irq, dev);
754 free_irq(IRQ_MAC_MACE_DMA, dev);
755
756 dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
757 mp->rx_ring, mp->rx_ring_phys);
758 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
759 mp->tx_ring, mp->tx_ring_phys);
760
761 free_netdev(dev);
762
763 return 0;
764}
765
766static struct platform_driver mac_mace_driver = {
767 .probe = mace_probe,
768 .remove = mac_mace_device_remove,
769 .driver = {
770 .name = mac_mace_string,
771 .owner = THIS_MODULE,
772 },
773};
774
775static int __init mac_mace_init_module(void)
776{
777 if (!MACH_IS_MAC)
778 return -ENODEV;
779
780 return platform_driver_register(&mac_mace_driver);
781}
782
783static void __exit mac_mace_cleanup_module(void)
784{
785 platform_driver_unregister(&mac_mace_driver);
786}
787
788module_init(mac_mace_init_module);
789module_exit(mac_mace_cleanup_module);