Loading...
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 * (c) Copyright 2000, 2001 Red Hat Inc
9 *
10 * Development of this driver was funded by Equiinet Ltd
11 * http://www.equiinet.com
12 *
13 * ChangeLog:
14 *
15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 * unification of all the Z85x30 asynchronous drivers for real.
17 *
18 * DMA now uses get_free_page as kmalloc buffers may span a 64K
19 * boundary.
20 *
21 * Modified for SMP safety and SMP locking by Alan Cox
22 * <alan@lxorguk.ukuu.org.uk>
23 *
24 * Performance
25 *
26 * Z85230:
27 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28 * X.25 is not unrealistic on all machines. DMA mode can in theory
29 * handle T1/E1 quite nicely. In practice the limit seems to be about
30 * 512Kbit->1Mbit depending on motherboard.
31 *
32 * Z85C30:
33 * 64K will take DMA, 9600 baud X.25 should be ok.
34 *
35 * Z8530:
36 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/net.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/if_arp.h>
48#include <linux/delay.h>
49#include <linux/hdlc.h>
50#include <linux/ioport.h>
51#include <linux/init.h>
52#include <linux/gfp.h>
53#include <asm/dma.h>
54#include <asm/io.h>
55#define RT_LOCK
56#define RT_UNLOCK
57#include <linux/spinlock.h>
58
59#include "z85230.h"
60
61
62/**
63 * z8530_read_port - Architecture specific interface function
64 * @p: port to read
65 *
66 * Provided port access methods. The Comtrol SV11 requires no delays
67 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
68 *
69 * In the longer term this should become an architecture specific
70 * section so that this can become a generic driver interface for all
71 * platforms. For now we only handle PC I/O ports with or without the
72 * dread 5uS sanity delay.
73 *
74 * The caller must hold sufficient locks to avoid violating the horrible
75 * 5uS delay rule.
76 */
77
78static inline int z8530_read_port(unsigned long p)
79{
80 u8 r=inb(Z8530_PORT_OF(p));
81 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
82 udelay(5);
83 return r;
84}
85
86/**
87 * z8530_write_port - Architecture specific interface function
88 * @p: port to write
89 * @d: value to write
90 *
91 * Write a value to a port with delays if need be. Note that the
92 * caller must hold locks to avoid read/writes from other contexts
93 * violating the 5uS rule
94 *
95 * In the longer term this should become an architecture specific
96 * section so that this can become a generic driver interface for all
97 * platforms. For now we only handle PC I/O ports with or without the
98 * dread 5uS sanity delay.
99 */
100
101
102static inline void z8530_write_port(unsigned long p, u8 d)
103{
104 outb(d,Z8530_PORT_OF(p));
105 if(p&Z8530_PORT_SLEEP)
106 udelay(5);
107}
108
109
110
111static void z8530_rx_done(struct z8530_channel *c);
112static void z8530_tx_done(struct z8530_channel *c);
113
114
115/**
116 * read_zsreg - Read a register from a Z85230
117 * @c: Z8530 channel to read from (2 per chip)
118 * @reg: Register to read
119 * FIXME: Use a spinlock.
120 *
121 * Most of the Z8530 registers are indexed off the control registers.
122 * A read is done by writing to the control register and reading the
123 * register back. The caller must hold the lock
124 */
125
126static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
127{
128 if(reg)
129 z8530_write_port(c->ctrlio, reg);
130 return z8530_read_port(c->ctrlio);
131}
132
133/**
134 * read_zsdata - Read the data port of a Z8530 channel
135 * @c: The Z8530 channel to read the data port from
136 *
137 * The data port provides fast access to some things. We still
138 * have all the 5uS delays to worry about.
139 */
140
141static inline u8 read_zsdata(struct z8530_channel *c)
142{
143 u8 r;
144 r=z8530_read_port(c->dataio);
145 return r;
146}
147
148/**
149 * write_zsreg - Write to a Z8530 channel register
150 * @c: The Z8530 channel
151 * @reg: Register number
152 * @val: Value to write
153 *
154 * Write a value to an indexed register. The caller must hold the lock
155 * to honour the irritating delay rules. We know about register 0
156 * being fast to access.
157 *
158 * Assumes c->lock is held.
159 */
160static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
161{
162 if(reg)
163 z8530_write_port(c->ctrlio, reg);
164 z8530_write_port(c->ctrlio, val);
165
166}
167
168/**
169 * write_zsctrl - Write to a Z8530 control register
170 * @c: The Z8530 channel
171 * @val: Value to write
172 *
173 * Write directly to the control register on the Z8530
174 */
175
176static inline void write_zsctrl(struct z8530_channel *c, u8 val)
177{
178 z8530_write_port(c->ctrlio, val);
179}
180
181/**
182 * write_zsdata - Write to a Z8530 control register
183 * @c: The Z8530 channel
184 * @val: Value to write
185 *
186 * Write directly to the data register on the Z8530
187 */
188
189
190static inline void write_zsdata(struct z8530_channel *c, u8 val)
191{
192 z8530_write_port(c->dataio, val);
193}
194
195/*
196 * Register loading parameters for a dead port
197 */
198
199u8 z8530_dead_port[]=
200{
201 255
202};
203
204EXPORT_SYMBOL(z8530_dead_port);
205
206/*
207 * Register loading parameters for currently supported circuit types
208 */
209
210
211/*
212 * Data clocked by telco end. This is the correct data for the UK
213 * "kilostream" service, and most other similar services.
214 */
215
216u8 z8530_hdlc_kilostream[]=
217{
218 4, SYNC_ENAB|SDLC|X1CLK,
219 2, 0, /* No vector */
220 1, 0,
221 3, ENT_HM|RxCRC_ENAB|Rx8,
222 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
223 9, 0, /* Disable interrupts */
224 6, 0xFF,
225 7, FLAG,
226 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
227 11, TCTRxCP,
228 14, DISDPLL,
229 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
230 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
231 9, NV|MIE|NORESET,
232 255
233};
234
235EXPORT_SYMBOL(z8530_hdlc_kilostream);
236
237/*
238 * As above but for enhanced chips.
239 */
240
241u8 z8530_hdlc_kilostream_85230[]=
242{
243 4, SYNC_ENAB|SDLC|X1CLK,
244 2, 0, /* No vector */
245 1, 0,
246 3, ENT_HM|RxCRC_ENAB|Rx8,
247 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
248 9, 0, /* Disable interrupts */
249 6, 0xFF,
250 7, FLAG,
251 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
252 11, TCTRxCP,
253 14, DISDPLL,
254 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
255 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
256 9, NV|MIE|NORESET,
257 23, 3, /* Extended mode AUTO TX and EOM*/
258
259 255
260};
261
262EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
263
264/**
265 * z8530_flush_fifo - Flush on chip RX FIFO
266 * @c: Channel to flush
267 *
268 * Flush the receive FIFO. There is no specific option for this, we
269 * blindly read bytes and discard them. Reading when there is no data
270 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
271 *
272 * All locking is handled for the caller. On return data may still be
273 * present if it arrived during the flush.
274 */
275
276static void z8530_flush_fifo(struct z8530_channel *c)
277{
278 read_zsreg(c, R1);
279 read_zsreg(c, R1);
280 read_zsreg(c, R1);
281 read_zsreg(c, R1);
282 if(c->dev->type==Z85230)
283 {
284 read_zsreg(c, R1);
285 read_zsreg(c, R1);
286 read_zsreg(c, R1);
287 read_zsreg(c, R1);
288 }
289}
290
291/**
292 * z8530_rtsdtr - Control the outgoing DTS/RTS line
293 * @c: The Z8530 channel to control;
294 * @set: 1 to set, 0 to clear
295 *
296 * Sets or clears DTR/RTS on the requested line. All locking is handled
297 * by the caller. For now we assume all boards use the actual RTS/DTR
298 * on the chip. Apparently one or two don't. We'll scream about them
299 * later.
300 */
301
302static void z8530_rtsdtr(struct z8530_channel *c, int set)
303{
304 if (set)
305 c->regs[5] |= (RTS | DTR);
306 else
307 c->regs[5] &= ~(RTS | DTR);
308 write_zsreg(c, R5, c->regs[5]);
309}
310
311/**
312 * z8530_rx - Handle a PIO receive event
313 * @c: Z8530 channel to process
314 *
315 * Receive handler for receiving in PIO mode. This is much like the
316 * async one but not quite the same or as complex
317 *
318 * Note: Its intended that this handler can easily be separated from
319 * the main code to run realtime. That'll be needed for some machines
320 * (eg to ever clock 64kbits on a sparc ;)).
321 *
322 * The RT_LOCK macros don't do anything now. Keep the code covered
323 * by them as short as possible in all circumstances - clocks cost
324 * baud. The interrupt handler is assumed to be atomic w.r.t. to
325 * other code - this is true in the RT case too.
326 *
327 * We only cover the sync cases for this. If you want 2Mbit async
328 * do it yourself but consider medical assistance first. This non DMA
329 * synchronous mode is portable code. The DMA mode assumes PCI like
330 * ISA DMA
331 *
332 * Called with the device lock held
333 */
334
335static void z8530_rx(struct z8530_channel *c)
336{
337 u8 ch,stat;
338
339 while(1)
340 {
341 /* FIFO empty ? */
342 if(!(read_zsreg(c, R0)&1))
343 break;
344 ch=read_zsdata(c);
345 stat=read_zsreg(c, R1);
346
347 /*
348 * Overrun ?
349 */
350 if(c->count < c->max)
351 {
352 *c->dptr++=ch;
353 c->count++;
354 }
355
356 if(stat&END_FR)
357 {
358
359 /*
360 * Error ?
361 */
362 if(stat&(Rx_OVR|CRC_ERR))
363 {
364 /* Rewind the buffer and return */
365 if(c->skb)
366 c->dptr=c->skb->data;
367 c->count=0;
368 if(stat&Rx_OVR)
369 {
370 pr_warn("%s: overrun\n", c->dev->name);
371 c->rx_overrun++;
372 }
373 if(stat&CRC_ERR)
374 {
375 c->rx_crc_err++;
376 /* printk("crc error\n"); */
377 }
378 /* Shove the frame upstream */
379 }
380 else
381 {
382 /*
383 * Drop the lock for RX processing, or
384 * there are deadlocks
385 */
386 z8530_rx_done(c);
387 write_zsctrl(c, RES_Rx_CRC);
388 }
389 }
390 }
391 /*
392 * Clear irq
393 */
394 write_zsctrl(c, ERR_RES);
395 write_zsctrl(c, RES_H_IUS);
396}
397
398
399/**
400 * z8530_tx - Handle a PIO transmit event
401 * @c: Z8530 channel to process
402 *
403 * Z8530 transmit interrupt handler for the PIO mode. The basic
404 * idea is to attempt to keep the FIFO fed. We fill as many bytes
405 * in as possible, its quite possible that we won't keep up with the
406 * data rate otherwise.
407 */
408
409static void z8530_tx(struct z8530_channel *c)
410{
411 while(c->txcount) {
412 /* FIFO full ? */
413 if(!(read_zsreg(c, R0)&4))
414 return;
415 c->txcount--;
416 /*
417 * Shovel out the byte
418 */
419 write_zsreg(c, R8, *c->tx_ptr++);
420 write_zsctrl(c, RES_H_IUS);
421 /* We are about to underflow */
422 if(c->txcount==0)
423 {
424 write_zsctrl(c, RES_EOM_L);
425 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
426 }
427 }
428
429
430 /*
431 * End of frame TX - fire another one
432 */
433
434 write_zsctrl(c, RES_Tx_P);
435
436 z8530_tx_done(c);
437 write_zsctrl(c, RES_H_IUS);
438}
439
440/**
441 * z8530_status - Handle a PIO status exception
442 * @chan: Z8530 channel to process
443 *
444 * A status event occurred in PIO synchronous mode. There are several
445 * reasons the chip will bother us here. A transmit underrun means we
446 * failed to feed the chip fast enough and just broke a packet. A DCD
447 * change is a line up or down.
448 */
449
450static void z8530_status(struct z8530_channel *chan)
451{
452 u8 status, altered;
453
454 status = read_zsreg(chan, R0);
455 altered = chan->status ^ status;
456
457 chan->status = status;
458
459 if (status & TxEOM) {
460/* printk("%s: Tx underrun.\n", chan->dev->name); */
461 chan->netdevice->stats.tx_fifo_errors++;
462 write_zsctrl(chan, ERR_RES);
463 z8530_tx_done(chan);
464 }
465
466 if (altered & chan->dcdcheck)
467 {
468 if (status & chan->dcdcheck) {
469 pr_info("%s: DCD raised\n", chan->dev->name);
470 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
471 if (chan->netdevice)
472 netif_carrier_on(chan->netdevice);
473 } else {
474 pr_info("%s: DCD lost\n", chan->dev->name);
475 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
476 z8530_flush_fifo(chan);
477 if (chan->netdevice)
478 netif_carrier_off(chan->netdevice);
479 }
480
481 }
482 write_zsctrl(chan, RES_EXT_INT);
483 write_zsctrl(chan, RES_H_IUS);
484}
485
486struct z8530_irqhandler z8530_sync =
487{
488 z8530_rx,
489 z8530_tx,
490 z8530_status
491};
492
493EXPORT_SYMBOL(z8530_sync);
494
495/**
496 * z8530_dma_rx - Handle a DMA RX event
497 * @chan: Channel to handle
498 *
499 * Non bus mastering DMA interfaces for the Z8x30 devices. This
500 * is really pretty PC specific. The DMA mode means that most receive
501 * events are handled by the DMA hardware. We get a kick here only if
502 * a frame ended.
503 */
504
505static void z8530_dma_rx(struct z8530_channel *chan)
506{
507 if(chan->rxdma_on)
508 {
509 /* Special condition check only */
510 u8 status;
511
512 read_zsreg(chan, R7);
513 read_zsreg(chan, R6);
514
515 status=read_zsreg(chan, R1);
516
517 if(status&END_FR)
518 {
519 z8530_rx_done(chan); /* Fire up the next one */
520 }
521 write_zsctrl(chan, ERR_RES);
522 write_zsctrl(chan, RES_H_IUS);
523 }
524 else
525 {
526 /* DMA is off right now, drain the slow way */
527 z8530_rx(chan);
528 }
529}
530
531/**
532 * z8530_dma_tx - Handle a DMA TX event
533 * @chan: The Z8530 channel to handle
534 *
535 * We have received an interrupt while doing DMA transmissions. It
536 * shouldn't happen. Scream loudly if it does.
537 */
538
539static void z8530_dma_tx(struct z8530_channel *chan)
540{
541 if(!chan->dma_tx)
542 {
543 pr_warn("Hey who turned the DMA off?\n");
544 z8530_tx(chan);
545 return;
546 }
547 /* This shouldn't occur in DMA mode */
548 pr_err("DMA tx - bogus event!\n");
549 z8530_tx(chan);
550}
551
552/**
553 * z8530_dma_status - Handle a DMA status exception
554 * @chan: Z8530 channel to process
555 *
556 * A status event occurred on the Z8530. We receive these for two reasons
557 * when in DMA mode. Firstly if we finished a packet transfer we get one
558 * and kick the next packet out. Secondly we may see a DCD change.
559 *
560 */
561
562static void z8530_dma_status(struct z8530_channel *chan)
563{
564 u8 status, altered;
565
566 status=read_zsreg(chan, R0);
567 altered=chan->status^status;
568
569 chan->status=status;
570
571
572 if(chan->dma_tx)
573 {
574 if(status&TxEOM)
575 {
576 unsigned long flags;
577
578 flags=claim_dma_lock();
579 disable_dma(chan->txdma);
580 clear_dma_ff(chan->txdma);
581 chan->txdma_on=0;
582 release_dma_lock(flags);
583 z8530_tx_done(chan);
584 }
585 }
586
587 if (altered & chan->dcdcheck)
588 {
589 if (status & chan->dcdcheck) {
590 pr_info("%s: DCD raised\n", chan->dev->name);
591 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
592 if (chan->netdevice)
593 netif_carrier_on(chan->netdevice);
594 } else {
595 pr_info("%s: DCD lost\n", chan->dev->name);
596 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
597 z8530_flush_fifo(chan);
598 if (chan->netdevice)
599 netif_carrier_off(chan->netdevice);
600 }
601 }
602
603 write_zsctrl(chan, RES_EXT_INT);
604 write_zsctrl(chan, RES_H_IUS);
605}
606
607static struct z8530_irqhandler z8530_dma_sync = {
608 z8530_dma_rx,
609 z8530_dma_tx,
610 z8530_dma_status
611};
612
613static struct z8530_irqhandler z8530_txdma_sync = {
614 z8530_rx,
615 z8530_dma_tx,
616 z8530_dma_status
617};
618
619/**
620 * z8530_rx_clear - Handle RX events from a stopped chip
621 * @c: Z8530 channel to shut up
622 *
623 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
624 * For machines with PCI Z85x30 cards, or level triggered interrupts
625 * (eg the MacII) we must clear the interrupt cause or die.
626 */
627
628
629static void z8530_rx_clear(struct z8530_channel *c)
630{
631 /*
632 * Data and status bytes
633 */
634 u8 stat;
635
636 read_zsdata(c);
637 stat=read_zsreg(c, R1);
638
639 if(stat&END_FR)
640 write_zsctrl(c, RES_Rx_CRC);
641 /*
642 * Clear irq
643 */
644 write_zsctrl(c, ERR_RES);
645 write_zsctrl(c, RES_H_IUS);
646}
647
648/**
649 * z8530_tx_clear - Handle TX events from a stopped chip
650 * @c: Z8530 channel to shut up
651 *
652 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
653 * For machines with PCI Z85x30 cards, or level triggered interrupts
654 * (eg the MacII) we must clear the interrupt cause or die.
655 */
656
657static void z8530_tx_clear(struct z8530_channel *c)
658{
659 write_zsctrl(c, RES_Tx_P);
660 write_zsctrl(c, RES_H_IUS);
661}
662
663/**
664 * z8530_status_clear - Handle status events from a stopped chip
665 * @chan: Z8530 channel to shut up
666 *
667 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
668 * For machines with PCI Z85x30 cards, or level triggered interrupts
669 * (eg the MacII) we must clear the interrupt cause or die.
670 */
671
672static void z8530_status_clear(struct z8530_channel *chan)
673{
674 u8 status=read_zsreg(chan, R0);
675 if(status&TxEOM)
676 write_zsctrl(chan, ERR_RES);
677 write_zsctrl(chan, RES_EXT_INT);
678 write_zsctrl(chan, RES_H_IUS);
679}
680
681struct z8530_irqhandler z8530_nop=
682{
683 z8530_rx_clear,
684 z8530_tx_clear,
685 z8530_status_clear
686};
687
688
689EXPORT_SYMBOL(z8530_nop);
690
691/**
692 * z8530_interrupt - Handle an interrupt from a Z8530
693 * @irq: Interrupt number
694 * @dev_id: The Z8530 device that is interrupting.
695 *
696 * A Z85[2]30 device has stuck its hand in the air for attention.
697 * We scan both the channels on the chip for events and then call
698 * the channel specific call backs for each channel that has events.
699 * We have to use callback functions because the two channels can be
700 * in different modes.
701 *
702 * Locking is done for the handlers. Note that locking is done
703 * at the chip level (the 5uS delay issue is per chip not per
704 * channel). c->lock for both channels points to dev->lock
705 */
706
707irqreturn_t z8530_interrupt(int irq, void *dev_id)
708{
709 struct z8530_dev *dev=dev_id;
710 u8 uninitialized_var(intr);
711 static volatile int locker=0;
712 int work=0;
713 struct z8530_irqhandler *irqs;
714
715 if(locker)
716 {
717 pr_err("IRQ re-enter\n");
718 return IRQ_NONE;
719 }
720 locker=1;
721
722 spin_lock(&dev->lock);
723
724 while(++work<5000)
725 {
726
727 intr = read_zsreg(&dev->chanA, R3);
728 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
729 break;
730
731 /* This holds the IRQ status. On the 8530 you must read it from chan
732 A even though it applies to the whole chip */
733
734 /* Now walk the chip and see what it is wanting - it may be
735 an IRQ for someone else remember */
736
737 irqs=dev->chanA.irqs;
738
739 if(intr & (CHARxIP|CHATxIP|CHAEXT))
740 {
741 if(intr&CHARxIP)
742 irqs->rx(&dev->chanA);
743 if(intr&CHATxIP)
744 irqs->tx(&dev->chanA);
745 if(intr&CHAEXT)
746 irqs->status(&dev->chanA);
747 }
748
749 irqs=dev->chanB.irqs;
750
751 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
752 {
753 if(intr&CHBRxIP)
754 irqs->rx(&dev->chanB);
755 if(intr&CHBTxIP)
756 irqs->tx(&dev->chanB);
757 if(intr&CHBEXT)
758 irqs->status(&dev->chanB);
759 }
760 }
761 spin_unlock(&dev->lock);
762 if(work==5000)
763 pr_err("%s: interrupt jammed - abort(0x%X)!\n",
764 dev->name, intr);
765 /* Ok all done */
766 locker=0;
767 return IRQ_HANDLED;
768}
769
770EXPORT_SYMBOL(z8530_interrupt);
771
772static const u8 reg_init[16]=
773{
774 0,0,0,0,
775 0,0,0,0,
776 0,0,0,0,
777 0x55,0,0,0
778};
779
780
781/**
782 * z8530_sync_open - Open a Z8530 channel for PIO
783 * @dev: The network interface we are using
784 * @c: The Z8530 channel to open in synchronous PIO mode
785 *
786 * Switch a Z8530 into synchronous mode without DMA assist. We
787 * raise the RTS/DTR and commence network operation.
788 */
789
790int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
791{
792 unsigned long flags;
793
794 spin_lock_irqsave(c->lock, flags);
795
796 c->sync = 1;
797 c->mtu = dev->mtu+64;
798 c->count = 0;
799 c->skb = NULL;
800 c->skb2 = NULL;
801 c->irqs = &z8530_sync;
802
803 /* This loads the double buffer up */
804 z8530_rx_done(c); /* Load the frame ring */
805 z8530_rx_done(c); /* Load the backup frame */
806 z8530_rtsdtr(c,1);
807 c->dma_tx = 0;
808 c->regs[R1]|=TxINT_ENAB;
809 write_zsreg(c, R1, c->regs[R1]);
810 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
811
812 spin_unlock_irqrestore(c->lock, flags);
813 return 0;
814}
815
816
817EXPORT_SYMBOL(z8530_sync_open);
818
819/**
820 * z8530_sync_close - Close a PIO Z8530 channel
821 * @dev: Network device to close
822 * @c: Z8530 channel to disassociate and move to idle
823 *
824 * Close down a Z8530 interface and switch its interrupt handlers
825 * to discard future events.
826 */
827
828int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
829{
830 u8 chk;
831 unsigned long flags;
832
833 spin_lock_irqsave(c->lock, flags);
834 c->irqs = &z8530_nop;
835 c->max = 0;
836 c->sync = 0;
837
838 chk=read_zsreg(c,R0);
839 write_zsreg(c, R3, c->regs[R3]);
840 z8530_rtsdtr(c,0);
841
842 spin_unlock_irqrestore(c->lock, flags);
843 return 0;
844}
845
846EXPORT_SYMBOL(z8530_sync_close);
847
848/**
849 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
850 * @dev: The network device to attach
851 * @c: The Z8530 channel to configure in sync DMA mode.
852 *
853 * Set up a Z85x30 device for synchronous DMA in both directions. Two
854 * ISA DMA channels must be available for this to work. We assume ISA
855 * DMA driven I/O and PC limits on access.
856 */
857
858int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
859{
860 unsigned long cflags, dflags;
861
862 c->sync = 1;
863 c->mtu = dev->mtu+64;
864 c->count = 0;
865 c->skb = NULL;
866 c->skb2 = NULL;
867 /*
868 * Load the DMA interfaces up
869 */
870 c->rxdma_on = 0;
871 c->txdma_on = 0;
872
873 /*
874 * Allocate the DMA flip buffers. Limit by page size.
875 * Everyone runs 1500 mtu or less on wan links so this
876 * should be fine.
877 */
878
879 if(c->mtu > PAGE_SIZE/2)
880 return -EMSGSIZE;
881
882 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883 if(c->rx_buf[0]==NULL)
884 return -ENOBUFS;
885 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
886
887 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
888 if(c->tx_dma_buf[0]==NULL)
889 {
890 free_page((unsigned long)c->rx_buf[0]);
891 c->rx_buf[0]=NULL;
892 return -ENOBUFS;
893 }
894 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
895
896 c->tx_dma_used=0;
897 c->dma_tx = 1;
898 c->dma_num=0;
899 c->dma_ready=1;
900
901 /*
902 * Enable DMA control mode
903 */
904
905 spin_lock_irqsave(c->lock, cflags);
906
907 /*
908 * TX DMA via DIR/REQ
909 */
910
911 c->regs[R14]|= DTRREQ;
912 write_zsreg(c, R14, c->regs[R14]);
913
914 c->regs[R1]&= ~TxINT_ENAB;
915 write_zsreg(c, R1, c->regs[R1]);
916
917 /*
918 * RX DMA via W/Req
919 */
920
921 c->regs[R1]|= WT_FN_RDYFN;
922 c->regs[R1]|= WT_RDY_RT;
923 c->regs[R1]|= INT_ERR_Rx;
924 c->regs[R1]&= ~TxINT_ENAB;
925 write_zsreg(c, R1, c->regs[R1]);
926 c->regs[R1]|= WT_RDY_ENAB;
927 write_zsreg(c, R1, c->regs[R1]);
928
929 /*
930 * DMA interrupts
931 */
932
933 /*
934 * Set up the DMA configuration
935 */
936
937 dflags=claim_dma_lock();
938
939 disable_dma(c->rxdma);
940 clear_dma_ff(c->rxdma);
941 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
942 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
943 set_dma_count(c->rxdma, c->mtu);
944 enable_dma(c->rxdma);
945
946 disable_dma(c->txdma);
947 clear_dma_ff(c->txdma);
948 set_dma_mode(c->txdma, DMA_MODE_WRITE);
949 disable_dma(c->txdma);
950
951 release_dma_lock(dflags);
952
953 /*
954 * Select the DMA interrupt handlers
955 */
956
957 c->rxdma_on = 1;
958 c->txdma_on = 1;
959 c->tx_dma_used = 1;
960
961 c->irqs = &z8530_dma_sync;
962 z8530_rtsdtr(c,1);
963 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
964
965 spin_unlock_irqrestore(c->lock, cflags);
966
967 return 0;
968}
969
970EXPORT_SYMBOL(z8530_sync_dma_open);
971
972/**
973 * z8530_sync_dma_close - Close down DMA I/O
974 * @dev: Network device to detach
975 * @c: Z8530 channel to move into discard mode
976 *
977 * Shut down a DMA mode synchronous interface. Halt the DMA, and
978 * free the buffers.
979 */
980
981int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
982{
983 u8 chk;
984 unsigned long flags;
985
986 c->irqs = &z8530_nop;
987 c->max = 0;
988 c->sync = 0;
989
990 /*
991 * Disable the PC DMA channels
992 */
993
994 flags=claim_dma_lock();
995 disable_dma(c->rxdma);
996 clear_dma_ff(c->rxdma);
997
998 c->rxdma_on = 0;
999
1000 disable_dma(c->txdma);
1001 clear_dma_ff(c->txdma);
1002 release_dma_lock(flags);
1003
1004 c->txdma_on = 0;
1005 c->tx_dma_used = 0;
1006
1007 spin_lock_irqsave(c->lock, flags);
1008
1009 /*
1010 * Disable DMA control mode
1011 */
1012
1013 c->regs[R1]&= ~WT_RDY_ENAB;
1014 write_zsreg(c, R1, c->regs[R1]);
1015 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1016 c->regs[R1]|= INT_ALL_Rx;
1017 write_zsreg(c, R1, c->regs[R1]);
1018 c->regs[R14]&= ~DTRREQ;
1019 write_zsreg(c, R14, c->regs[R14]);
1020
1021 if(c->rx_buf[0])
1022 {
1023 free_page((unsigned long)c->rx_buf[0]);
1024 c->rx_buf[0]=NULL;
1025 }
1026 if(c->tx_dma_buf[0])
1027 {
1028 free_page((unsigned long)c->tx_dma_buf[0]);
1029 c->tx_dma_buf[0]=NULL;
1030 }
1031 chk=read_zsreg(c,R0);
1032 write_zsreg(c, R3, c->regs[R3]);
1033 z8530_rtsdtr(c,0);
1034
1035 spin_unlock_irqrestore(c->lock, flags);
1036
1037 return 0;
1038}
1039
1040EXPORT_SYMBOL(z8530_sync_dma_close);
1041
1042/**
1043 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1044 * @dev: The network device to attach
1045 * @c: The Z8530 channel to configure in sync DMA mode.
1046 *
1047 * Set up a Z85x30 device for synchronous DMA tranmission. One
1048 * ISA DMA channel must be available for this to work. The receive
1049 * side is run in PIO mode, but then it has the bigger FIFO.
1050 */
1051
1052int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1053{
1054 unsigned long cflags, dflags;
1055
1056 printk("Opening sync interface for TX-DMA\n");
1057 c->sync = 1;
1058 c->mtu = dev->mtu+64;
1059 c->count = 0;
1060 c->skb = NULL;
1061 c->skb2 = NULL;
1062
1063 /*
1064 * Allocate the DMA flip buffers. Limit by page size.
1065 * Everyone runs 1500 mtu or less on wan links so this
1066 * should be fine.
1067 */
1068
1069 if(c->mtu > PAGE_SIZE/2)
1070 return -EMSGSIZE;
1071
1072 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1073 if(c->tx_dma_buf[0]==NULL)
1074 return -ENOBUFS;
1075
1076 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1077
1078
1079 spin_lock_irqsave(c->lock, cflags);
1080
1081 /*
1082 * Load the PIO receive ring
1083 */
1084
1085 z8530_rx_done(c);
1086 z8530_rx_done(c);
1087
1088 /*
1089 * Load the DMA interfaces up
1090 */
1091
1092 c->rxdma_on = 0;
1093 c->txdma_on = 0;
1094
1095 c->tx_dma_used=0;
1096 c->dma_num=0;
1097 c->dma_ready=1;
1098 c->dma_tx = 1;
1099
1100 /*
1101 * Enable DMA control mode
1102 */
1103
1104 /*
1105 * TX DMA via DIR/REQ
1106 */
1107 c->regs[R14]|= DTRREQ;
1108 write_zsreg(c, R14, c->regs[R14]);
1109
1110 c->regs[R1]&= ~TxINT_ENAB;
1111 write_zsreg(c, R1, c->regs[R1]);
1112
1113 /*
1114 * Set up the DMA configuration
1115 */
1116
1117 dflags = claim_dma_lock();
1118
1119 disable_dma(c->txdma);
1120 clear_dma_ff(c->txdma);
1121 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1122 disable_dma(c->txdma);
1123
1124 release_dma_lock(dflags);
1125
1126 /*
1127 * Select the DMA interrupt handlers
1128 */
1129
1130 c->rxdma_on = 0;
1131 c->txdma_on = 1;
1132 c->tx_dma_used = 1;
1133
1134 c->irqs = &z8530_txdma_sync;
1135 z8530_rtsdtr(c,1);
1136 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1137 spin_unlock_irqrestore(c->lock, cflags);
1138
1139 return 0;
1140}
1141
1142EXPORT_SYMBOL(z8530_sync_txdma_open);
1143
1144/**
1145 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1146 * @dev: Network device to detach
1147 * @c: Z8530 channel to move into discard mode
1148 *
1149 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1150 * and free the buffers.
1151 */
1152
1153int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1154{
1155 unsigned long dflags, cflags;
1156 u8 chk;
1157
1158
1159 spin_lock_irqsave(c->lock, cflags);
1160
1161 c->irqs = &z8530_nop;
1162 c->max = 0;
1163 c->sync = 0;
1164
1165 /*
1166 * Disable the PC DMA channels
1167 */
1168
1169 dflags = claim_dma_lock();
1170
1171 disable_dma(c->txdma);
1172 clear_dma_ff(c->txdma);
1173 c->txdma_on = 0;
1174 c->tx_dma_used = 0;
1175
1176 release_dma_lock(dflags);
1177
1178 /*
1179 * Disable DMA control mode
1180 */
1181
1182 c->regs[R1]&= ~WT_RDY_ENAB;
1183 write_zsreg(c, R1, c->regs[R1]);
1184 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1185 c->regs[R1]|= INT_ALL_Rx;
1186 write_zsreg(c, R1, c->regs[R1]);
1187 c->regs[R14]&= ~DTRREQ;
1188 write_zsreg(c, R14, c->regs[R14]);
1189
1190 if(c->tx_dma_buf[0])
1191 {
1192 free_page((unsigned long)c->tx_dma_buf[0]);
1193 c->tx_dma_buf[0]=NULL;
1194 }
1195 chk=read_zsreg(c,R0);
1196 write_zsreg(c, R3, c->regs[R3]);
1197 z8530_rtsdtr(c,0);
1198
1199 spin_unlock_irqrestore(c->lock, cflags);
1200 return 0;
1201}
1202
1203
1204EXPORT_SYMBOL(z8530_sync_txdma_close);
1205
1206
1207/*
1208 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1209 * it exists...
1210 */
1211
1212static const char *z8530_type_name[]={
1213 "Z8530",
1214 "Z85C30",
1215 "Z85230"
1216};
1217
1218/**
1219 * z8530_describe - Uniformly describe a Z8530 port
1220 * @dev: Z8530 device to describe
1221 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1222 * @io: the port value in question
1223 *
1224 * Describe a Z8530 in a standard format. We must pass the I/O as
1225 * the port offset isn't predictable. The main reason for this function
1226 * is to try and get a common format of report.
1227 */
1228
1229void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1230{
1231 pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1232 dev->name,
1233 z8530_type_name[dev->type],
1234 mapping,
1235 Z8530_PORT_OF(io),
1236 dev->irq);
1237}
1238
1239EXPORT_SYMBOL(z8530_describe);
1240
1241/*
1242 * Locked operation part of the z8530 init code
1243 */
1244
1245static inline int do_z8530_init(struct z8530_dev *dev)
1246{
1247 /* NOP the interrupt handlers first - we might get a
1248 floating IRQ transition when we reset the chip */
1249 dev->chanA.irqs=&z8530_nop;
1250 dev->chanB.irqs=&z8530_nop;
1251 dev->chanA.dcdcheck=DCD;
1252 dev->chanB.dcdcheck=DCD;
1253
1254 /* Reset the chip */
1255 write_zsreg(&dev->chanA, R9, 0xC0);
1256 udelay(200);
1257 /* Now check its valid */
1258 write_zsreg(&dev->chanA, R12, 0xAA);
1259 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1260 return -ENODEV;
1261 write_zsreg(&dev->chanA, R12, 0x55);
1262 if(read_zsreg(&dev->chanA, R12)!=0x55)
1263 return -ENODEV;
1264
1265 dev->type=Z8530;
1266
1267 /*
1268 * See the application note.
1269 */
1270
1271 write_zsreg(&dev->chanA, R15, 0x01);
1272
1273 /*
1274 * If we can set the low bit of R15 then
1275 * the chip is enhanced.
1276 */
1277
1278 if(read_zsreg(&dev->chanA, R15)==0x01)
1279 {
1280 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1281 /* Put a char in the fifo */
1282 write_zsreg(&dev->chanA, R8, 0);
1283 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1284 dev->type = Z85230; /* Has a FIFO */
1285 else
1286 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1287 }
1288
1289 /*
1290 * The code assumes R7' and friends are
1291 * off. Use write_zsext() for these and keep
1292 * this bit clear.
1293 */
1294
1295 write_zsreg(&dev->chanA, R15, 0);
1296
1297 /*
1298 * At this point it looks like the chip is behaving
1299 */
1300
1301 memcpy(dev->chanA.regs, reg_init, 16);
1302 memcpy(dev->chanB.regs, reg_init ,16);
1303
1304 return 0;
1305}
1306
1307/**
1308 * z8530_init - Initialise a Z8530 device
1309 * @dev: Z8530 device to initialise.
1310 *
1311 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1312 * is present, identify the type and then program it to hopefully
1313 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1314 * state will sometimes get into stupid modes generating 10Khz
1315 * interrupt streams and the like.
1316 *
1317 * We set the interrupt handler up to discard any events, in case
1318 * we get them during reset or setp.
1319 *
1320 * Return 0 for success, or a negative value indicating the problem
1321 * in errno form.
1322 */
1323
1324int z8530_init(struct z8530_dev *dev)
1325{
1326 unsigned long flags;
1327 int ret;
1328
1329 /* Set up the chip level lock */
1330 spin_lock_init(&dev->lock);
1331 dev->chanA.lock = &dev->lock;
1332 dev->chanB.lock = &dev->lock;
1333
1334 spin_lock_irqsave(&dev->lock, flags);
1335 ret = do_z8530_init(dev);
1336 spin_unlock_irqrestore(&dev->lock, flags);
1337
1338 return ret;
1339}
1340
1341
1342EXPORT_SYMBOL(z8530_init);
1343
1344/**
1345 * z8530_shutdown - Shutdown a Z8530 device
1346 * @dev: The Z8530 chip to shutdown
1347 *
1348 * We set the interrupt handlers to silence any interrupts. We then
1349 * reset the chip and wait 100uS to be sure the reset completed. Just
1350 * in case the caller then tries to do stuff.
1351 *
1352 * This is called without the lock held
1353 */
1354
1355int z8530_shutdown(struct z8530_dev *dev)
1356{
1357 unsigned long flags;
1358 /* Reset the chip */
1359
1360 spin_lock_irqsave(&dev->lock, flags);
1361 dev->chanA.irqs=&z8530_nop;
1362 dev->chanB.irqs=&z8530_nop;
1363 write_zsreg(&dev->chanA, R9, 0xC0);
1364 /* We must lock the udelay, the chip is offlimits here */
1365 udelay(100);
1366 spin_unlock_irqrestore(&dev->lock, flags);
1367 return 0;
1368}
1369
1370EXPORT_SYMBOL(z8530_shutdown);
1371
1372/**
1373 * z8530_channel_load - Load channel data
1374 * @c: Z8530 channel to configure
1375 * @rtable: table of register, value pairs
1376 * FIXME: ioctl to allow user uploaded tables
1377 *
1378 * Load a Z8530 channel up from the system data. We use +16 to
1379 * indicate the "prime" registers. The value 255 terminates the
1380 * table.
1381 */
1382
1383int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1384{
1385 unsigned long flags;
1386
1387 spin_lock_irqsave(c->lock, flags);
1388
1389 while(*rtable!=255)
1390 {
1391 int reg=*rtable++;
1392 if(reg>0x0F)
1393 write_zsreg(c, R15, c->regs[15]|1);
1394 write_zsreg(c, reg&0x0F, *rtable);
1395 if(reg>0x0F)
1396 write_zsreg(c, R15, c->regs[15]&~1);
1397 c->regs[reg]=*rtable++;
1398 }
1399 c->rx_function=z8530_null_rx;
1400 c->skb=NULL;
1401 c->tx_skb=NULL;
1402 c->tx_next_skb=NULL;
1403 c->mtu=1500;
1404 c->max=0;
1405 c->count=0;
1406 c->status=read_zsreg(c, R0);
1407 c->sync=1;
1408 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1409
1410 spin_unlock_irqrestore(c->lock, flags);
1411 return 0;
1412}
1413
1414EXPORT_SYMBOL(z8530_channel_load);
1415
1416
1417/**
1418 * z8530_tx_begin - Begin packet transmission
1419 * @c: The Z8530 channel to kick
1420 *
1421 * This is the speed sensitive side of transmission. If we are called
1422 * and no buffer is being transmitted we commence the next buffer. If
1423 * nothing is queued we idle the sync.
1424 *
1425 * Note: We are handling this code path in the interrupt path, keep it
1426 * fast or bad things will happen.
1427 *
1428 * Called with the lock held.
1429 */
1430
1431static void z8530_tx_begin(struct z8530_channel *c)
1432{
1433 unsigned long flags;
1434 if(c->tx_skb)
1435 return;
1436
1437 c->tx_skb=c->tx_next_skb;
1438 c->tx_next_skb=NULL;
1439 c->tx_ptr=c->tx_next_ptr;
1440
1441 if(c->tx_skb==NULL)
1442 {
1443 /* Idle on */
1444 if(c->dma_tx)
1445 {
1446 flags=claim_dma_lock();
1447 disable_dma(c->txdma);
1448 /*
1449 * Check if we crapped out.
1450 */
1451 if (get_dma_residue(c->txdma))
1452 {
1453 c->netdevice->stats.tx_dropped++;
1454 c->netdevice->stats.tx_fifo_errors++;
1455 }
1456 release_dma_lock(flags);
1457 }
1458 c->txcount=0;
1459 }
1460 else
1461 {
1462 c->txcount=c->tx_skb->len;
1463
1464
1465 if(c->dma_tx)
1466 {
1467 /*
1468 * FIXME. DMA is broken for the original 8530,
1469 * on the older parts we need to set a flag and
1470 * wait for a further TX interrupt to fire this
1471 * stage off
1472 */
1473
1474 flags=claim_dma_lock();
1475 disable_dma(c->txdma);
1476
1477 /*
1478 * These two are needed by the 8530/85C30
1479 * and must be issued when idling.
1480 */
1481
1482 if(c->dev->type!=Z85230)
1483 {
1484 write_zsctrl(c, RES_Tx_CRC);
1485 write_zsctrl(c, RES_EOM_L);
1486 }
1487 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1488 clear_dma_ff(c->txdma);
1489 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1490 set_dma_count(c->txdma, c->txcount);
1491 enable_dma(c->txdma);
1492 release_dma_lock(flags);
1493 write_zsctrl(c, RES_EOM_L);
1494 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1495 }
1496 else
1497 {
1498
1499 /* ABUNDER off */
1500 write_zsreg(c, R10, c->regs[10]);
1501 write_zsctrl(c, RES_Tx_CRC);
1502
1503 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1504 {
1505 write_zsreg(c, R8, *c->tx_ptr++);
1506 c->txcount--;
1507 }
1508
1509 }
1510 }
1511 /*
1512 * Since we emptied tx_skb we can ask for more
1513 */
1514 netif_wake_queue(c->netdevice);
1515}
1516
1517/**
1518 * z8530_tx_done - TX complete callback
1519 * @c: The channel that completed a transmit.
1520 *
1521 * This is called when we complete a packet send. We wake the queue,
1522 * start the next packet going and then free the buffer of the existing
1523 * packet. This code is fairly timing sensitive.
1524 *
1525 * Called with the register lock held.
1526 */
1527
1528static void z8530_tx_done(struct z8530_channel *c)
1529{
1530 struct sk_buff *skb;
1531
1532 /* Actually this can happen.*/
1533 if (c->tx_skb == NULL)
1534 return;
1535
1536 skb = c->tx_skb;
1537 c->tx_skb = NULL;
1538 z8530_tx_begin(c);
1539 c->netdevice->stats.tx_packets++;
1540 c->netdevice->stats.tx_bytes += skb->len;
1541 dev_kfree_skb_irq(skb);
1542}
1543
1544/**
1545 * z8530_null_rx - Discard a packet
1546 * @c: The channel the packet arrived on
1547 * @skb: The buffer
1548 *
1549 * We point the receive handler at this function when idle. Instead
1550 * of processing the frames we get to throw them away.
1551 */
1552
1553void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1554{
1555 dev_kfree_skb_any(skb);
1556}
1557
1558EXPORT_SYMBOL(z8530_null_rx);
1559
1560/**
1561 * z8530_rx_done - Receive completion callback
1562 * @c: The channel that completed a receive
1563 *
1564 * A new packet is complete. Our goal here is to get back into receive
1565 * mode as fast as possible. On the Z85230 we could change to using
1566 * ESCC mode, but on the older chips we have no choice. We flip to the
1567 * new buffer immediately in DMA mode so that the DMA of the next
1568 * frame can occur while we are copying the previous buffer to an sk_buff
1569 *
1570 * Called with the lock held
1571 */
1572
1573static void z8530_rx_done(struct z8530_channel *c)
1574{
1575 struct sk_buff *skb;
1576 int ct;
1577
1578 /*
1579 * Is our receive engine in DMA mode
1580 */
1581
1582 if(c->rxdma_on)
1583 {
1584 /*
1585 * Save the ready state and the buffer currently
1586 * being used as the DMA target
1587 */
1588
1589 int ready=c->dma_ready;
1590 unsigned char *rxb=c->rx_buf[c->dma_num];
1591 unsigned long flags;
1592
1593 /*
1594 * Complete this DMA. Necessary to find the length
1595 */
1596
1597 flags=claim_dma_lock();
1598
1599 disable_dma(c->rxdma);
1600 clear_dma_ff(c->rxdma);
1601 c->rxdma_on=0;
1602 ct=c->mtu-get_dma_residue(c->rxdma);
1603 if(ct<0)
1604 ct=2; /* Shit happens.. */
1605 c->dma_ready=0;
1606
1607 /*
1608 * Normal case: the other slot is free, start the next DMA
1609 * into it immediately.
1610 */
1611
1612 if(ready)
1613 {
1614 c->dma_num^=1;
1615 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1616 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1617 set_dma_count(c->rxdma, c->mtu);
1618 c->rxdma_on = 1;
1619 enable_dma(c->rxdma);
1620 /* Stop any frames that we missed the head of
1621 from passing */
1622 write_zsreg(c, R0, RES_Rx_CRC);
1623 }
1624 else
1625 /* Can't occur as we dont reenable the DMA irq until
1626 after the flip is done */
1627 netdev_warn(c->netdevice, "DMA flip overrun!\n");
1628
1629 release_dma_lock(flags);
1630
1631 /*
1632 * Shove the old buffer into an sk_buff. We can't DMA
1633 * directly into one on a PC - it might be above the 16Mb
1634 * boundary. Optimisation - we could check to see if we
1635 * can avoid the copy. Optimisation 2 - make the memcpy
1636 * a copychecksum.
1637 */
1638
1639 skb = dev_alloc_skb(ct);
1640 if (skb == NULL) {
1641 c->netdevice->stats.rx_dropped++;
1642 netdev_warn(c->netdevice, "Memory squeeze\n");
1643 } else {
1644 skb_put(skb, ct);
1645 skb_copy_to_linear_data(skb, rxb, ct);
1646 c->netdevice->stats.rx_packets++;
1647 c->netdevice->stats.rx_bytes += ct;
1648 }
1649 c->dma_ready = 1;
1650 } else {
1651 RT_LOCK;
1652 skb = c->skb;
1653
1654 /*
1655 * The game we play for non DMA is similar. We want to
1656 * get the controller set up for the next packet as fast
1657 * as possible. We potentially only have one byte + the
1658 * fifo length for this. Thus we want to flip to the new
1659 * buffer and then mess around copying and allocating
1660 * things. For the current case it doesn't matter but
1661 * if you build a system where the sync irq isn't blocked
1662 * by the kernel IRQ disable then you need only block the
1663 * sync IRQ for the RT_LOCK area.
1664 *
1665 */
1666 ct=c->count;
1667
1668 c->skb = c->skb2;
1669 c->count = 0;
1670 c->max = c->mtu;
1671 if (c->skb) {
1672 c->dptr = c->skb->data;
1673 c->max = c->mtu;
1674 } else {
1675 c->count = 0;
1676 c->max = 0;
1677 }
1678 RT_UNLOCK;
1679
1680 c->skb2 = dev_alloc_skb(c->mtu);
1681 if (c->skb2 == NULL)
1682 netdev_warn(c->netdevice, "memory squeeze\n");
1683 else
1684 skb_put(c->skb2, c->mtu);
1685 c->netdevice->stats.rx_packets++;
1686 c->netdevice->stats.rx_bytes += ct;
1687 }
1688 /*
1689 * If we received a frame we must now process it.
1690 */
1691 if (skb) {
1692 skb_trim(skb, ct);
1693 c->rx_function(c, skb);
1694 } else {
1695 c->netdevice->stats.rx_dropped++;
1696 netdev_err(c->netdevice, "Lost a frame\n");
1697 }
1698}
1699
1700/**
1701 * spans_boundary - Check a packet can be ISA DMA'd
1702 * @skb: The buffer to check
1703 *
1704 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1705 * thing can only DMA within a 64K block not across the edges of it.
1706 */
1707
1708static inline int spans_boundary(struct sk_buff *skb)
1709{
1710 unsigned long a=(unsigned long)skb->data;
1711 a^=(a+skb->len);
1712 if(a&0x00010000) /* If the 64K bit is different.. */
1713 return 1;
1714 return 0;
1715}
1716
1717/**
1718 * z8530_queue_xmit - Queue a packet
1719 * @c: The channel to use
1720 * @skb: The packet to kick down the channel
1721 *
1722 * Queue a packet for transmission. Because we have rather
1723 * hard to hit interrupt latencies for the Z85230 per packet
1724 * even in DMA mode we do the flip to DMA buffer if needed here
1725 * not in the IRQ.
1726 *
1727 * Called from the network code. The lock is not held at this
1728 * point.
1729 */
1730
1731netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1732{
1733 unsigned long flags;
1734
1735 netif_stop_queue(c->netdevice);
1736 if(c->tx_next_skb)
1737 return NETDEV_TX_BUSY;
1738
1739
1740 /* PC SPECIFIC - DMA limits */
1741
1742 /*
1743 * If we will DMA the transmit and its gone over the ISA bus
1744 * limit, then copy to the flip buffer
1745 */
1746
1747 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1748 {
1749 /*
1750 * Send the flip buffer, and flip the flippy bit.
1751 * We don't care which is used when just so long as
1752 * we never use the same buffer twice in a row. Since
1753 * only one buffer can be going out at a time the other
1754 * has to be safe.
1755 */
1756 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1757 c->tx_dma_used^=1; /* Flip temp buffer */
1758 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1759 }
1760 else
1761 c->tx_next_ptr=skb->data;
1762 RT_LOCK;
1763 c->tx_next_skb=skb;
1764 RT_UNLOCK;
1765
1766 spin_lock_irqsave(c->lock, flags);
1767 z8530_tx_begin(c);
1768 spin_unlock_irqrestore(c->lock, flags);
1769
1770 return NETDEV_TX_OK;
1771}
1772
1773EXPORT_SYMBOL(z8530_queue_xmit);
1774
1775/*
1776 * Module support
1777 */
1778static const char banner[] __initconst =
1779 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1780
1781static int __init z85230_init_driver(void)
1782{
1783 printk(banner);
1784 return 0;
1785}
1786module_init(z85230_init_driver);
1787
1788static void __exit z85230_cleanup_driver(void)
1789{
1790}
1791module_exit(z85230_cleanup_driver);
1792
1793MODULE_AUTHOR("Red Hat Inc.");
1794MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1795MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * (c) Copyright 2000, 2001 Red Hat Inc
6 *
7 * Development of this driver was funded by Equiinet Ltd
8 * http://www.equiinet.com
9 *
10 * ChangeLog:
11 *
12 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
13 * unification of all the Z85x30 asynchronous drivers for real.
14 *
15 * DMA now uses get_free_page as kmalloc buffers may span a 64K
16 * boundary.
17 *
18 * Modified for SMP safety and SMP locking by Alan Cox
19 * <alan@lxorguk.ukuu.org.uk>
20 *
21 * Performance
22 *
23 * Z85230:
24 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
25 * X.25 is not unrealistic on all machines. DMA mode can in theory
26 * handle T1/E1 quite nicely. In practice the limit seems to be about
27 * 512Kbit->1Mbit depending on motherboard.
28 *
29 * Z85C30:
30 * 64K will take DMA, 9600 baud X.25 should be ok.
31 *
32 * Z8530:
33 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
34 */
35
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/net.h>
42#include <linux/skbuff.h>
43#include <linux/netdevice.h>
44#include <linux/if_arp.h>
45#include <linux/delay.h>
46#include <linux/hdlc.h>
47#include <linux/ioport.h>
48#include <linux/init.h>
49#include <linux/gfp.h>
50#include <asm/dma.h>
51#include <asm/io.h>
52#define RT_LOCK
53#define RT_UNLOCK
54#include <linux/spinlock.h>
55
56#include "z85230.h"
57
58
59/**
60 * z8530_read_port - Architecture specific interface function
61 * @p: port to read
62 *
63 * Provided port access methods. The Comtrol SV11 requires no delays
64 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
65 *
66 * In the longer term this should become an architecture specific
67 * section so that this can become a generic driver interface for all
68 * platforms. For now we only handle PC I/O ports with or without the
69 * dread 5uS sanity delay.
70 *
71 * The caller must hold sufficient locks to avoid violating the horrible
72 * 5uS delay rule.
73 */
74
75static inline int z8530_read_port(unsigned long p)
76{
77 u8 r=inb(Z8530_PORT_OF(p));
78 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
79 udelay(5);
80 return r;
81}
82
83/**
84 * z8530_write_port - Architecture specific interface function
85 * @p: port to write
86 * @d: value to write
87 *
88 * Write a value to a port with delays if need be. Note that the
89 * caller must hold locks to avoid read/writes from other contexts
90 * violating the 5uS rule
91 *
92 * In the longer term this should become an architecture specific
93 * section so that this can become a generic driver interface for all
94 * platforms. For now we only handle PC I/O ports with or without the
95 * dread 5uS sanity delay.
96 */
97
98
99static inline void z8530_write_port(unsigned long p, u8 d)
100{
101 outb(d,Z8530_PORT_OF(p));
102 if(p&Z8530_PORT_SLEEP)
103 udelay(5);
104}
105
106
107
108static void z8530_rx_done(struct z8530_channel *c);
109static void z8530_tx_done(struct z8530_channel *c);
110
111
112/**
113 * read_zsreg - Read a register from a Z85230
114 * @c: Z8530 channel to read from (2 per chip)
115 * @reg: Register to read
116 * FIXME: Use a spinlock.
117 *
118 * Most of the Z8530 registers are indexed off the control registers.
119 * A read is done by writing to the control register and reading the
120 * register back. The caller must hold the lock
121 */
122
123static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124{
125 if(reg)
126 z8530_write_port(c->ctrlio, reg);
127 return z8530_read_port(c->ctrlio);
128}
129
130/**
131 * read_zsdata - Read the data port of a Z8530 channel
132 * @c: The Z8530 channel to read the data port from
133 *
134 * The data port provides fast access to some things. We still
135 * have all the 5uS delays to worry about.
136 */
137
138static inline u8 read_zsdata(struct z8530_channel *c)
139{
140 u8 r;
141 r=z8530_read_port(c->dataio);
142 return r;
143}
144
145/**
146 * write_zsreg - Write to a Z8530 channel register
147 * @c: The Z8530 channel
148 * @reg: Register number
149 * @val: Value to write
150 *
151 * Write a value to an indexed register. The caller must hold the lock
152 * to honour the irritating delay rules. We know about register 0
153 * being fast to access.
154 *
155 * Assumes c->lock is held.
156 */
157static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158{
159 if(reg)
160 z8530_write_port(c->ctrlio, reg);
161 z8530_write_port(c->ctrlio, val);
162
163}
164
165/**
166 * write_zsctrl - Write to a Z8530 control register
167 * @c: The Z8530 channel
168 * @val: Value to write
169 *
170 * Write directly to the control register on the Z8530
171 */
172
173static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174{
175 z8530_write_port(c->ctrlio, val);
176}
177
178/**
179 * write_zsdata - Write to a Z8530 control register
180 * @c: The Z8530 channel
181 * @val: Value to write
182 *
183 * Write directly to the data register on the Z8530
184 */
185
186
187static inline void write_zsdata(struct z8530_channel *c, u8 val)
188{
189 z8530_write_port(c->dataio, val);
190}
191
192/*
193 * Register loading parameters for a dead port
194 */
195
196u8 z8530_dead_port[]=
197{
198 255
199};
200
201EXPORT_SYMBOL(z8530_dead_port);
202
203/*
204 * Register loading parameters for currently supported circuit types
205 */
206
207
208/*
209 * Data clocked by telco end. This is the correct data for the UK
210 * "kilostream" service, and most other similar services.
211 */
212
213u8 z8530_hdlc_kilostream[]=
214{
215 4, SYNC_ENAB|SDLC|X1CLK,
216 2, 0, /* No vector */
217 1, 0,
218 3, ENT_HM|RxCRC_ENAB|Rx8,
219 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220 9, 0, /* Disable interrupts */
221 6, 0xFF,
222 7, FLAG,
223 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
224 11, TCTRxCP,
225 14, DISDPLL,
226 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228 9, NV|MIE|NORESET,
229 255
230};
231
232EXPORT_SYMBOL(z8530_hdlc_kilostream);
233
234/*
235 * As above but for enhanced chips.
236 */
237
238u8 z8530_hdlc_kilostream_85230[]=
239{
240 4, SYNC_ENAB|SDLC|X1CLK,
241 2, 0, /* No vector */
242 1, 0,
243 3, ENT_HM|RxCRC_ENAB|Rx8,
244 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245 9, 0, /* Disable interrupts */
246 6, 0xFF,
247 7, FLAG,
248 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
249 11, TCTRxCP,
250 14, DISDPLL,
251 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253 9, NV|MIE|NORESET,
254 23, 3, /* Extended mode AUTO TX and EOM*/
255
256 255
257};
258
259EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260
261/**
262 * z8530_flush_fifo - Flush on chip RX FIFO
263 * @c: Channel to flush
264 *
265 * Flush the receive FIFO. There is no specific option for this, we
266 * blindly read bytes and discard them. Reading when there is no data
267 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
268 *
269 * All locking is handled for the caller. On return data may still be
270 * present if it arrived during the flush.
271 */
272
273static void z8530_flush_fifo(struct z8530_channel *c)
274{
275 read_zsreg(c, R1);
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 read_zsreg(c, R1);
279 if(c->dev->type==Z85230)
280 {
281 read_zsreg(c, R1);
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 read_zsreg(c, R1);
285 }
286}
287
288/**
289 * z8530_rtsdtr - Control the outgoing DTS/RTS line
290 * @c: The Z8530 channel to control;
291 * @set: 1 to set, 0 to clear
292 *
293 * Sets or clears DTR/RTS on the requested line. All locking is handled
294 * by the caller. For now we assume all boards use the actual RTS/DTR
295 * on the chip. Apparently one or two don't. We'll scream about them
296 * later.
297 */
298
299static void z8530_rtsdtr(struct z8530_channel *c, int set)
300{
301 if (set)
302 c->regs[5] |= (RTS | DTR);
303 else
304 c->regs[5] &= ~(RTS | DTR);
305 write_zsreg(c, R5, c->regs[5]);
306}
307
308/**
309 * z8530_rx - Handle a PIO receive event
310 * @c: Z8530 channel to process
311 *
312 * Receive handler for receiving in PIO mode. This is much like the
313 * async one but not quite the same or as complex
314 *
315 * Note: Its intended that this handler can easily be separated from
316 * the main code to run realtime. That'll be needed for some machines
317 * (eg to ever clock 64kbits on a sparc ;)).
318 *
319 * The RT_LOCK macros don't do anything now. Keep the code covered
320 * by them as short as possible in all circumstances - clocks cost
321 * baud. The interrupt handler is assumed to be atomic w.r.t. to
322 * other code - this is true in the RT case too.
323 *
324 * We only cover the sync cases for this. If you want 2Mbit async
325 * do it yourself but consider medical assistance first. This non DMA
326 * synchronous mode is portable code. The DMA mode assumes PCI like
327 * ISA DMA
328 *
329 * Called with the device lock held
330 */
331
332static void z8530_rx(struct z8530_channel *c)
333{
334 u8 ch,stat;
335
336 while(1)
337 {
338 /* FIFO empty ? */
339 if(!(read_zsreg(c, R0)&1))
340 break;
341 ch=read_zsdata(c);
342 stat=read_zsreg(c, R1);
343
344 /*
345 * Overrun ?
346 */
347 if(c->count < c->max)
348 {
349 *c->dptr++=ch;
350 c->count++;
351 }
352
353 if(stat&END_FR)
354 {
355
356 /*
357 * Error ?
358 */
359 if(stat&(Rx_OVR|CRC_ERR))
360 {
361 /* Rewind the buffer and return */
362 if(c->skb)
363 c->dptr=c->skb->data;
364 c->count=0;
365 if(stat&Rx_OVR)
366 {
367 pr_warn("%s: overrun\n", c->dev->name);
368 c->rx_overrun++;
369 }
370 if(stat&CRC_ERR)
371 {
372 c->rx_crc_err++;
373 /* printk("crc error\n"); */
374 }
375 /* Shove the frame upstream */
376 }
377 else
378 {
379 /*
380 * Drop the lock for RX processing, or
381 * there are deadlocks
382 */
383 z8530_rx_done(c);
384 write_zsctrl(c, RES_Rx_CRC);
385 }
386 }
387 }
388 /*
389 * Clear irq
390 */
391 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS);
393}
394
395
396/**
397 * z8530_tx - Handle a PIO transmit event
398 * @c: Z8530 channel to process
399 *
400 * Z8530 transmit interrupt handler for the PIO mode. The basic
401 * idea is to attempt to keep the FIFO fed. We fill as many bytes
402 * in as possible, its quite possible that we won't keep up with the
403 * data rate otherwise.
404 */
405
406static void z8530_tx(struct z8530_channel *c)
407{
408 while(c->txcount) {
409 /* FIFO full ? */
410 if(!(read_zsreg(c, R0)&4))
411 return;
412 c->txcount--;
413 /*
414 * Shovel out the byte
415 */
416 write_zsreg(c, R8, *c->tx_ptr++);
417 write_zsctrl(c, RES_H_IUS);
418 /* We are about to underflow */
419 if(c->txcount==0)
420 {
421 write_zsctrl(c, RES_EOM_L);
422 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423 }
424 }
425
426
427 /*
428 * End of frame TX - fire another one
429 */
430
431 write_zsctrl(c, RES_Tx_P);
432
433 z8530_tx_done(c);
434 write_zsctrl(c, RES_H_IUS);
435}
436
437/**
438 * z8530_status - Handle a PIO status exception
439 * @chan: Z8530 channel to process
440 *
441 * A status event occurred in PIO synchronous mode. There are several
442 * reasons the chip will bother us here. A transmit underrun means we
443 * failed to feed the chip fast enough and just broke a packet. A DCD
444 * change is a line up or down.
445 */
446
447static void z8530_status(struct z8530_channel *chan)
448{
449 u8 status, altered;
450
451 status = read_zsreg(chan, R0);
452 altered = chan->status ^ status;
453
454 chan->status = status;
455
456 if (status & TxEOM) {
457/* printk("%s: Tx underrun.\n", chan->dev->name); */
458 chan->netdevice->stats.tx_fifo_errors++;
459 write_zsctrl(chan, ERR_RES);
460 z8530_tx_done(chan);
461 }
462
463 if (altered & chan->dcdcheck)
464 {
465 if (status & chan->dcdcheck) {
466 pr_info("%s: DCD raised\n", chan->dev->name);
467 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468 if (chan->netdevice)
469 netif_carrier_on(chan->netdevice);
470 } else {
471 pr_info("%s: DCD lost\n", chan->dev->name);
472 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
473 z8530_flush_fifo(chan);
474 if (chan->netdevice)
475 netif_carrier_off(chan->netdevice);
476 }
477
478 }
479 write_zsctrl(chan, RES_EXT_INT);
480 write_zsctrl(chan, RES_H_IUS);
481}
482
483struct z8530_irqhandler z8530_sync = {
484 .rx = z8530_rx,
485 .tx = z8530_tx,
486 .status = z8530_status,
487};
488
489EXPORT_SYMBOL(z8530_sync);
490
491/**
492 * z8530_dma_rx - Handle a DMA RX event
493 * @chan: Channel to handle
494 *
495 * Non bus mastering DMA interfaces for the Z8x30 devices. This
496 * is really pretty PC specific. The DMA mode means that most receive
497 * events are handled by the DMA hardware. We get a kick here only if
498 * a frame ended.
499 */
500
501static void z8530_dma_rx(struct z8530_channel *chan)
502{
503 if(chan->rxdma_on)
504 {
505 /* Special condition check only */
506 u8 status;
507
508 read_zsreg(chan, R7);
509 read_zsreg(chan, R6);
510
511 status=read_zsreg(chan, R1);
512
513 if(status&END_FR)
514 {
515 z8530_rx_done(chan); /* Fire up the next one */
516 }
517 write_zsctrl(chan, ERR_RES);
518 write_zsctrl(chan, RES_H_IUS);
519 }
520 else
521 {
522 /* DMA is off right now, drain the slow way */
523 z8530_rx(chan);
524 }
525}
526
527/**
528 * z8530_dma_tx - Handle a DMA TX event
529 * @chan: The Z8530 channel to handle
530 *
531 * We have received an interrupt while doing DMA transmissions. It
532 * shouldn't happen. Scream loudly if it does.
533 */
534
535static void z8530_dma_tx(struct z8530_channel *chan)
536{
537 if(!chan->dma_tx)
538 {
539 pr_warn("Hey who turned the DMA off?\n");
540 z8530_tx(chan);
541 return;
542 }
543 /* This shouldn't occur in DMA mode */
544 pr_err("DMA tx - bogus event!\n");
545 z8530_tx(chan);
546}
547
548/**
549 * z8530_dma_status - Handle a DMA status exception
550 * @chan: Z8530 channel to process
551 *
552 * A status event occurred on the Z8530. We receive these for two reasons
553 * when in DMA mode. Firstly if we finished a packet transfer we get one
554 * and kick the next packet out. Secondly we may see a DCD change.
555 *
556 */
557
558static void z8530_dma_status(struct z8530_channel *chan)
559{
560 u8 status, altered;
561
562 status=read_zsreg(chan, R0);
563 altered=chan->status^status;
564
565 chan->status=status;
566
567
568 if(chan->dma_tx)
569 {
570 if(status&TxEOM)
571 {
572 unsigned long flags;
573
574 flags=claim_dma_lock();
575 disable_dma(chan->txdma);
576 clear_dma_ff(chan->txdma);
577 chan->txdma_on=0;
578 release_dma_lock(flags);
579 z8530_tx_done(chan);
580 }
581 }
582
583 if (altered & chan->dcdcheck)
584 {
585 if (status & chan->dcdcheck) {
586 pr_info("%s: DCD raised\n", chan->dev->name);
587 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
588 if (chan->netdevice)
589 netif_carrier_on(chan->netdevice);
590 } else {
591 pr_info("%s: DCD lost\n", chan->dev->name);
592 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
593 z8530_flush_fifo(chan);
594 if (chan->netdevice)
595 netif_carrier_off(chan->netdevice);
596 }
597 }
598
599 write_zsctrl(chan, RES_EXT_INT);
600 write_zsctrl(chan, RES_H_IUS);
601}
602
603static struct z8530_irqhandler z8530_dma_sync = {
604 .rx = z8530_dma_rx,
605 .tx = z8530_dma_tx,
606 .status = z8530_dma_status,
607};
608
609static struct z8530_irqhandler z8530_txdma_sync = {
610 .rx = z8530_rx,
611 .tx = z8530_dma_tx,
612 .status = z8530_dma_status,
613};
614
615/**
616 * z8530_rx_clear - Handle RX events from a stopped chip
617 * @c: Z8530 channel to shut up
618 *
619 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
620 * For machines with PCI Z85x30 cards, or level triggered interrupts
621 * (eg the MacII) we must clear the interrupt cause or die.
622 */
623
624
625static void z8530_rx_clear(struct z8530_channel *c)
626{
627 /*
628 * Data and status bytes
629 */
630 u8 stat;
631
632 read_zsdata(c);
633 stat=read_zsreg(c, R1);
634
635 if(stat&END_FR)
636 write_zsctrl(c, RES_Rx_CRC);
637 /*
638 * Clear irq
639 */
640 write_zsctrl(c, ERR_RES);
641 write_zsctrl(c, RES_H_IUS);
642}
643
644/**
645 * z8530_tx_clear - Handle TX events from a stopped chip
646 * @c: Z8530 channel to shut up
647 *
648 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
649 * For machines with PCI Z85x30 cards, or level triggered interrupts
650 * (eg the MacII) we must clear the interrupt cause or die.
651 */
652
653static void z8530_tx_clear(struct z8530_channel *c)
654{
655 write_zsctrl(c, RES_Tx_P);
656 write_zsctrl(c, RES_H_IUS);
657}
658
659/**
660 * z8530_status_clear - Handle status events from a stopped chip
661 * @chan: Z8530 channel to shut up
662 *
663 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
664 * For machines with PCI Z85x30 cards, or level triggered interrupts
665 * (eg the MacII) we must clear the interrupt cause or die.
666 */
667
668static void z8530_status_clear(struct z8530_channel *chan)
669{
670 u8 status=read_zsreg(chan, R0);
671 if(status&TxEOM)
672 write_zsctrl(chan, ERR_RES);
673 write_zsctrl(chan, RES_EXT_INT);
674 write_zsctrl(chan, RES_H_IUS);
675}
676
677struct z8530_irqhandler z8530_nop = {
678 .rx = z8530_rx_clear,
679 .tx = z8530_tx_clear,
680 .status = z8530_status_clear,
681};
682
683
684EXPORT_SYMBOL(z8530_nop);
685
686/**
687 * z8530_interrupt - Handle an interrupt from a Z8530
688 * @irq: Interrupt number
689 * @dev_id: The Z8530 device that is interrupting.
690 *
691 * A Z85[2]30 device has stuck its hand in the air for attention.
692 * We scan both the channels on the chip for events and then call
693 * the channel specific call backs for each channel that has events.
694 * We have to use callback functions because the two channels can be
695 * in different modes.
696 *
697 * Locking is done for the handlers. Note that locking is done
698 * at the chip level (the 5uS delay issue is per chip not per
699 * channel). c->lock for both channels points to dev->lock
700 */
701
702irqreturn_t z8530_interrupt(int irq, void *dev_id)
703{
704 struct z8530_dev *dev=dev_id;
705 u8 intr;
706 static volatile int locker=0;
707 int work=0;
708 struct z8530_irqhandler *irqs;
709
710 if(locker)
711 {
712 pr_err("IRQ re-enter\n");
713 return IRQ_NONE;
714 }
715 locker=1;
716
717 spin_lock(&dev->lock);
718
719 while(++work<5000)
720 {
721
722 intr = read_zsreg(&dev->chanA, R3);
723 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
724 break;
725
726 /* This holds the IRQ status. On the 8530 you must read it from chan
727 A even though it applies to the whole chip */
728
729 /* Now walk the chip and see what it is wanting - it may be
730 an IRQ for someone else remember */
731
732 irqs=dev->chanA.irqs;
733
734 if(intr & (CHARxIP|CHATxIP|CHAEXT))
735 {
736 if(intr&CHARxIP)
737 irqs->rx(&dev->chanA);
738 if(intr&CHATxIP)
739 irqs->tx(&dev->chanA);
740 if(intr&CHAEXT)
741 irqs->status(&dev->chanA);
742 }
743
744 irqs=dev->chanB.irqs;
745
746 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
747 {
748 if(intr&CHBRxIP)
749 irqs->rx(&dev->chanB);
750 if(intr&CHBTxIP)
751 irqs->tx(&dev->chanB);
752 if(intr&CHBEXT)
753 irqs->status(&dev->chanB);
754 }
755 }
756 spin_unlock(&dev->lock);
757 if(work==5000)
758 pr_err("%s: interrupt jammed - abort(0x%X)!\n",
759 dev->name, intr);
760 /* Ok all done */
761 locker=0;
762 return IRQ_HANDLED;
763}
764
765EXPORT_SYMBOL(z8530_interrupt);
766
767static const u8 reg_init[16]=
768{
769 0,0,0,0,
770 0,0,0,0,
771 0,0,0,0,
772 0x55,0,0,0
773};
774
775
776/**
777 * z8530_sync_open - Open a Z8530 channel for PIO
778 * @dev: The network interface we are using
779 * @c: The Z8530 channel to open in synchronous PIO mode
780 *
781 * Switch a Z8530 into synchronous mode without DMA assist. We
782 * raise the RTS/DTR and commence network operation.
783 */
784
785int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
786{
787 unsigned long flags;
788
789 spin_lock_irqsave(c->lock, flags);
790
791 c->sync = 1;
792 c->mtu = dev->mtu+64;
793 c->count = 0;
794 c->skb = NULL;
795 c->skb2 = NULL;
796 c->irqs = &z8530_sync;
797
798 /* This loads the double buffer up */
799 z8530_rx_done(c); /* Load the frame ring */
800 z8530_rx_done(c); /* Load the backup frame */
801 z8530_rtsdtr(c,1);
802 c->dma_tx = 0;
803 c->regs[R1]|=TxINT_ENAB;
804 write_zsreg(c, R1, c->regs[R1]);
805 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
806
807 spin_unlock_irqrestore(c->lock, flags);
808 return 0;
809}
810
811
812EXPORT_SYMBOL(z8530_sync_open);
813
814/**
815 * z8530_sync_close - Close a PIO Z8530 channel
816 * @dev: Network device to close
817 * @c: Z8530 channel to disassociate and move to idle
818 *
819 * Close down a Z8530 interface and switch its interrupt handlers
820 * to discard future events.
821 */
822
823int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
824{
825 u8 chk;
826 unsigned long flags;
827
828 spin_lock_irqsave(c->lock, flags);
829 c->irqs = &z8530_nop;
830 c->max = 0;
831 c->sync = 0;
832
833 chk=read_zsreg(c,R0);
834 write_zsreg(c, R3, c->regs[R3]);
835 z8530_rtsdtr(c,0);
836
837 spin_unlock_irqrestore(c->lock, flags);
838 return 0;
839}
840
841EXPORT_SYMBOL(z8530_sync_close);
842
843/**
844 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
845 * @dev: The network device to attach
846 * @c: The Z8530 channel to configure in sync DMA mode.
847 *
848 * Set up a Z85x30 device for synchronous DMA in both directions. Two
849 * ISA DMA channels must be available for this to work. We assume ISA
850 * DMA driven I/O and PC limits on access.
851 */
852
853int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
854{
855 unsigned long cflags, dflags;
856
857 c->sync = 1;
858 c->mtu = dev->mtu+64;
859 c->count = 0;
860 c->skb = NULL;
861 c->skb2 = NULL;
862 /*
863 * Load the DMA interfaces up
864 */
865 c->rxdma_on = 0;
866 c->txdma_on = 0;
867
868 /*
869 * Allocate the DMA flip buffers. Limit by page size.
870 * Everyone runs 1500 mtu or less on wan links so this
871 * should be fine.
872 */
873
874 if(c->mtu > PAGE_SIZE/2)
875 return -EMSGSIZE;
876
877 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
878 if(c->rx_buf[0]==NULL)
879 return -ENOBUFS;
880 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
881
882 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883 if(c->tx_dma_buf[0]==NULL)
884 {
885 free_page((unsigned long)c->rx_buf[0]);
886 c->rx_buf[0]=NULL;
887 return -ENOBUFS;
888 }
889 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
890
891 c->tx_dma_used=0;
892 c->dma_tx = 1;
893 c->dma_num=0;
894 c->dma_ready=1;
895
896 /*
897 * Enable DMA control mode
898 */
899
900 spin_lock_irqsave(c->lock, cflags);
901
902 /*
903 * TX DMA via DIR/REQ
904 */
905
906 c->regs[R14]|= DTRREQ;
907 write_zsreg(c, R14, c->regs[R14]);
908
909 c->regs[R1]&= ~TxINT_ENAB;
910 write_zsreg(c, R1, c->regs[R1]);
911
912 /*
913 * RX DMA via W/Req
914 */
915
916 c->regs[R1]|= WT_FN_RDYFN;
917 c->regs[R1]|= WT_RDY_RT;
918 c->regs[R1]|= INT_ERR_Rx;
919 c->regs[R1]&= ~TxINT_ENAB;
920 write_zsreg(c, R1, c->regs[R1]);
921 c->regs[R1]|= WT_RDY_ENAB;
922 write_zsreg(c, R1, c->regs[R1]);
923
924 /*
925 * DMA interrupts
926 */
927
928 /*
929 * Set up the DMA configuration
930 */
931
932 dflags=claim_dma_lock();
933
934 disable_dma(c->rxdma);
935 clear_dma_ff(c->rxdma);
936 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
937 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
938 set_dma_count(c->rxdma, c->mtu);
939 enable_dma(c->rxdma);
940
941 disable_dma(c->txdma);
942 clear_dma_ff(c->txdma);
943 set_dma_mode(c->txdma, DMA_MODE_WRITE);
944 disable_dma(c->txdma);
945
946 release_dma_lock(dflags);
947
948 /*
949 * Select the DMA interrupt handlers
950 */
951
952 c->rxdma_on = 1;
953 c->txdma_on = 1;
954 c->tx_dma_used = 1;
955
956 c->irqs = &z8530_dma_sync;
957 z8530_rtsdtr(c,1);
958 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
959
960 spin_unlock_irqrestore(c->lock, cflags);
961
962 return 0;
963}
964
965EXPORT_SYMBOL(z8530_sync_dma_open);
966
967/**
968 * z8530_sync_dma_close - Close down DMA I/O
969 * @dev: Network device to detach
970 * @c: Z8530 channel to move into discard mode
971 *
972 * Shut down a DMA mode synchronous interface. Halt the DMA, and
973 * free the buffers.
974 */
975
976int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
977{
978 u8 chk;
979 unsigned long flags;
980
981 c->irqs = &z8530_nop;
982 c->max = 0;
983 c->sync = 0;
984
985 /*
986 * Disable the PC DMA channels
987 */
988
989 flags=claim_dma_lock();
990 disable_dma(c->rxdma);
991 clear_dma_ff(c->rxdma);
992
993 c->rxdma_on = 0;
994
995 disable_dma(c->txdma);
996 clear_dma_ff(c->txdma);
997 release_dma_lock(flags);
998
999 c->txdma_on = 0;
1000 c->tx_dma_used = 0;
1001
1002 spin_lock_irqsave(c->lock, flags);
1003
1004 /*
1005 * Disable DMA control mode
1006 */
1007
1008 c->regs[R1]&= ~WT_RDY_ENAB;
1009 write_zsreg(c, R1, c->regs[R1]);
1010 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1011 c->regs[R1]|= INT_ALL_Rx;
1012 write_zsreg(c, R1, c->regs[R1]);
1013 c->regs[R14]&= ~DTRREQ;
1014 write_zsreg(c, R14, c->regs[R14]);
1015
1016 if(c->rx_buf[0])
1017 {
1018 free_page((unsigned long)c->rx_buf[0]);
1019 c->rx_buf[0]=NULL;
1020 }
1021 if(c->tx_dma_buf[0])
1022 {
1023 free_page((unsigned long)c->tx_dma_buf[0]);
1024 c->tx_dma_buf[0]=NULL;
1025 }
1026 chk=read_zsreg(c,R0);
1027 write_zsreg(c, R3, c->regs[R3]);
1028 z8530_rtsdtr(c,0);
1029
1030 spin_unlock_irqrestore(c->lock, flags);
1031
1032 return 0;
1033}
1034
1035EXPORT_SYMBOL(z8530_sync_dma_close);
1036
1037/**
1038 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1039 * @dev: The network device to attach
1040 * @c: The Z8530 channel to configure in sync DMA mode.
1041 *
1042 * Set up a Z85x30 device for synchronous DMA transmission. One
1043 * ISA DMA channel must be available for this to work. The receive
1044 * side is run in PIO mode, but then it has the bigger FIFO.
1045 */
1046
1047int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1048{
1049 unsigned long cflags, dflags;
1050
1051 printk("Opening sync interface for TX-DMA\n");
1052 c->sync = 1;
1053 c->mtu = dev->mtu+64;
1054 c->count = 0;
1055 c->skb = NULL;
1056 c->skb2 = NULL;
1057
1058 /*
1059 * Allocate the DMA flip buffers. Limit by page size.
1060 * Everyone runs 1500 mtu or less on wan links so this
1061 * should be fine.
1062 */
1063
1064 if(c->mtu > PAGE_SIZE/2)
1065 return -EMSGSIZE;
1066
1067 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1068 if(c->tx_dma_buf[0]==NULL)
1069 return -ENOBUFS;
1070
1071 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1072
1073
1074 spin_lock_irqsave(c->lock, cflags);
1075
1076 /*
1077 * Load the PIO receive ring
1078 */
1079
1080 z8530_rx_done(c);
1081 z8530_rx_done(c);
1082
1083 /*
1084 * Load the DMA interfaces up
1085 */
1086
1087 c->rxdma_on = 0;
1088 c->txdma_on = 0;
1089
1090 c->tx_dma_used=0;
1091 c->dma_num=0;
1092 c->dma_ready=1;
1093 c->dma_tx = 1;
1094
1095 /*
1096 * Enable DMA control mode
1097 */
1098
1099 /*
1100 * TX DMA via DIR/REQ
1101 */
1102 c->regs[R14]|= DTRREQ;
1103 write_zsreg(c, R14, c->regs[R14]);
1104
1105 c->regs[R1]&= ~TxINT_ENAB;
1106 write_zsreg(c, R1, c->regs[R1]);
1107
1108 /*
1109 * Set up the DMA configuration
1110 */
1111
1112 dflags = claim_dma_lock();
1113
1114 disable_dma(c->txdma);
1115 clear_dma_ff(c->txdma);
1116 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1117 disable_dma(c->txdma);
1118
1119 release_dma_lock(dflags);
1120
1121 /*
1122 * Select the DMA interrupt handlers
1123 */
1124
1125 c->rxdma_on = 0;
1126 c->txdma_on = 1;
1127 c->tx_dma_used = 1;
1128
1129 c->irqs = &z8530_txdma_sync;
1130 z8530_rtsdtr(c,1);
1131 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1132 spin_unlock_irqrestore(c->lock, cflags);
1133
1134 return 0;
1135}
1136
1137EXPORT_SYMBOL(z8530_sync_txdma_open);
1138
1139/**
1140 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1141 * @dev: Network device to detach
1142 * @c: Z8530 channel to move into discard mode
1143 *
1144 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1145 * and free the buffers.
1146 */
1147
1148int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1149{
1150 unsigned long dflags, cflags;
1151 u8 chk;
1152
1153
1154 spin_lock_irqsave(c->lock, cflags);
1155
1156 c->irqs = &z8530_nop;
1157 c->max = 0;
1158 c->sync = 0;
1159
1160 /*
1161 * Disable the PC DMA channels
1162 */
1163
1164 dflags = claim_dma_lock();
1165
1166 disable_dma(c->txdma);
1167 clear_dma_ff(c->txdma);
1168 c->txdma_on = 0;
1169 c->tx_dma_used = 0;
1170
1171 release_dma_lock(dflags);
1172
1173 /*
1174 * Disable DMA control mode
1175 */
1176
1177 c->regs[R1]&= ~WT_RDY_ENAB;
1178 write_zsreg(c, R1, c->regs[R1]);
1179 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1180 c->regs[R1]|= INT_ALL_Rx;
1181 write_zsreg(c, R1, c->regs[R1]);
1182 c->regs[R14]&= ~DTRREQ;
1183 write_zsreg(c, R14, c->regs[R14]);
1184
1185 if(c->tx_dma_buf[0])
1186 {
1187 free_page((unsigned long)c->tx_dma_buf[0]);
1188 c->tx_dma_buf[0]=NULL;
1189 }
1190 chk=read_zsreg(c,R0);
1191 write_zsreg(c, R3, c->regs[R3]);
1192 z8530_rtsdtr(c,0);
1193
1194 spin_unlock_irqrestore(c->lock, cflags);
1195 return 0;
1196}
1197
1198
1199EXPORT_SYMBOL(z8530_sync_txdma_close);
1200
1201
1202/*
1203 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1204 * it exists...
1205 */
1206
1207static const char *z8530_type_name[]={
1208 "Z8530",
1209 "Z85C30",
1210 "Z85230"
1211};
1212
1213/**
1214 * z8530_describe - Uniformly describe a Z8530 port
1215 * @dev: Z8530 device to describe
1216 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1217 * @io: the port value in question
1218 *
1219 * Describe a Z8530 in a standard format. We must pass the I/O as
1220 * the port offset isn't predictable. The main reason for this function
1221 * is to try and get a common format of report.
1222 */
1223
1224void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1225{
1226 pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1227 dev->name,
1228 z8530_type_name[dev->type],
1229 mapping,
1230 Z8530_PORT_OF(io),
1231 dev->irq);
1232}
1233
1234EXPORT_SYMBOL(z8530_describe);
1235
1236/*
1237 * Locked operation part of the z8530 init code
1238 */
1239
1240static inline int do_z8530_init(struct z8530_dev *dev)
1241{
1242 /* NOP the interrupt handlers first - we might get a
1243 floating IRQ transition when we reset the chip */
1244 dev->chanA.irqs=&z8530_nop;
1245 dev->chanB.irqs=&z8530_nop;
1246 dev->chanA.dcdcheck=DCD;
1247 dev->chanB.dcdcheck=DCD;
1248
1249 /* Reset the chip */
1250 write_zsreg(&dev->chanA, R9, 0xC0);
1251 udelay(200);
1252 /* Now check its valid */
1253 write_zsreg(&dev->chanA, R12, 0xAA);
1254 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1255 return -ENODEV;
1256 write_zsreg(&dev->chanA, R12, 0x55);
1257 if(read_zsreg(&dev->chanA, R12)!=0x55)
1258 return -ENODEV;
1259
1260 dev->type=Z8530;
1261
1262 /*
1263 * See the application note.
1264 */
1265
1266 write_zsreg(&dev->chanA, R15, 0x01);
1267
1268 /*
1269 * If we can set the low bit of R15 then
1270 * the chip is enhanced.
1271 */
1272
1273 if(read_zsreg(&dev->chanA, R15)==0x01)
1274 {
1275 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1276 /* Put a char in the fifo */
1277 write_zsreg(&dev->chanA, R8, 0);
1278 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1279 dev->type = Z85230; /* Has a FIFO */
1280 else
1281 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1282 }
1283
1284 /*
1285 * The code assumes R7' and friends are
1286 * off. Use write_zsext() for these and keep
1287 * this bit clear.
1288 */
1289
1290 write_zsreg(&dev->chanA, R15, 0);
1291
1292 /*
1293 * At this point it looks like the chip is behaving
1294 */
1295
1296 memcpy(dev->chanA.regs, reg_init, 16);
1297 memcpy(dev->chanB.regs, reg_init ,16);
1298
1299 return 0;
1300}
1301
1302/**
1303 * z8530_init - Initialise a Z8530 device
1304 * @dev: Z8530 device to initialise.
1305 *
1306 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1307 * is present, identify the type and then program it to hopefully
1308 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1309 * state will sometimes get into stupid modes generating 10Khz
1310 * interrupt streams and the like.
1311 *
1312 * We set the interrupt handler up to discard any events, in case
1313 * we get them during reset or setp.
1314 *
1315 * Return 0 for success, or a negative value indicating the problem
1316 * in errno form.
1317 */
1318
1319int z8530_init(struct z8530_dev *dev)
1320{
1321 unsigned long flags;
1322 int ret;
1323
1324 /* Set up the chip level lock */
1325 spin_lock_init(&dev->lock);
1326 dev->chanA.lock = &dev->lock;
1327 dev->chanB.lock = &dev->lock;
1328
1329 spin_lock_irqsave(&dev->lock, flags);
1330 ret = do_z8530_init(dev);
1331 spin_unlock_irqrestore(&dev->lock, flags);
1332
1333 return ret;
1334}
1335
1336
1337EXPORT_SYMBOL(z8530_init);
1338
1339/**
1340 * z8530_shutdown - Shutdown a Z8530 device
1341 * @dev: The Z8530 chip to shutdown
1342 *
1343 * We set the interrupt handlers to silence any interrupts. We then
1344 * reset the chip and wait 100uS to be sure the reset completed. Just
1345 * in case the caller then tries to do stuff.
1346 *
1347 * This is called without the lock held
1348 */
1349
1350int z8530_shutdown(struct z8530_dev *dev)
1351{
1352 unsigned long flags;
1353 /* Reset the chip */
1354
1355 spin_lock_irqsave(&dev->lock, flags);
1356 dev->chanA.irqs=&z8530_nop;
1357 dev->chanB.irqs=&z8530_nop;
1358 write_zsreg(&dev->chanA, R9, 0xC0);
1359 /* We must lock the udelay, the chip is offlimits here */
1360 udelay(100);
1361 spin_unlock_irqrestore(&dev->lock, flags);
1362 return 0;
1363}
1364
1365EXPORT_SYMBOL(z8530_shutdown);
1366
1367/**
1368 * z8530_channel_load - Load channel data
1369 * @c: Z8530 channel to configure
1370 * @rtable: table of register, value pairs
1371 * FIXME: ioctl to allow user uploaded tables
1372 *
1373 * Load a Z8530 channel up from the system data. We use +16 to
1374 * indicate the "prime" registers. The value 255 terminates the
1375 * table.
1376 */
1377
1378int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1379{
1380 unsigned long flags;
1381
1382 spin_lock_irqsave(c->lock, flags);
1383
1384 while(*rtable!=255)
1385 {
1386 int reg=*rtable++;
1387 if(reg>0x0F)
1388 write_zsreg(c, R15, c->regs[15]|1);
1389 write_zsreg(c, reg&0x0F, *rtable);
1390 if(reg>0x0F)
1391 write_zsreg(c, R15, c->regs[15]&~1);
1392 c->regs[reg]=*rtable++;
1393 }
1394 c->rx_function=z8530_null_rx;
1395 c->skb=NULL;
1396 c->tx_skb=NULL;
1397 c->tx_next_skb=NULL;
1398 c->mtu=1500;
1399 c->max=0;
1400 c->count=0;
1401 c->status=read_zsreg(c, R0);
1402 c->sync=1;
1403 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1404
1405 spin_unlock_irqrestore(c->lock, flags);
1406 return 0;
1407}
1408
1409EXPORT_SYMBOL(z8530_channel_load);
1410
1411
1412/**
1413 * z8530_tx_begin - Begin packet transmission
1414 * @c: The Z8530 channel to kick
1415 *
1416 * This is the speed sensitive side of transmission. If we are called
1417 * and no buffer is being transmitted we commence the next buffer. If
1418 * nothing is queued we idle the sync.
1419 *
1420 * Note: We are handling this code path in the interrupt path, keep it
1421 * fast or bad things will happen.
1422 *
1423 * Called with the lock held.
1424 */
1425
1426static void z8530_tx_begin(struct z8530_channel *c)
1427{
1428 unsigned long flags;
1429 if(c->tx_skb)
1430 return;
1431
1432 c->tx_skb=c->tx_next_skb;
1433 c->tx_next_skb=NULL;
1434 c->tx_ptr=c->tx_next_ptr;
1435
1436 if(c->tx_skb==NULL)
1437 {
1438 /* Idle on */
1439 if(c->dma_tx)
1440 {
1441 flags=claim_dma_lock();
1442 disable_dma(c->txdma);
1443 /*
1444 * Check if we crapped out.
1445 */
1446 if (get_dma_residue(c->txdma))
1447 {
1448 c->netdevice->stats.tx_dropped++;
1449 c->netdevice->stats.tx_fifo_errors++;
1450 }
1451 release_dma_lock(flags);
1452 }
1453 c->txcount=0;
1454 }
1455 else
1456 {
1457 c->txcount=c->tx_skb->len;
1458
1459
1460 if(c->dma_tx)
1461 {
1462 /*
1463 * FIXME. DMA is broken for the original 8530,
1464 * on the older parts we need to set a flag and
1465 * wait for a further TX interrupt to fire this
1466 * stage off
1467 */
1468
1469 flags=claim_dma_lock();
1470 disable_dma(c->txdma);
1471
1472 /*
1473 * These two are needed by the 8530/85C30
1474 * and must be issued when idling.
1475 */
1476
1477 if(c->dev->type!=Z85230)
1478 {
1479 write_zsctrl(c, RES_Tx_CRC);
1480 write_zsctrl(c, RES_EOM_L);
1481 }
1482 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1483 clear_dma_ff(c->txdma);
1484 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1485 set_dma_count(c->txdma, c->txcount);
1486 enable_dma(c->txdma);
1487 release_dma_lock(flags);
1488 write_zsctrl(c, RES_EOM_L);
1489 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1490 }
1491 else
1492 {
1493
1494 /* ABUNDER off */
1495 write_zsreg(c, R10, c->regs[10]);
1496 write_zsctrl(c, RES_Tx_CRC);
1497
1498 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1499 {
1500 write_zsreg(c, R8, *c->tx_ptr++);
1501 c->txcount--;
1502 }
1503
1504 }
1505 }
1506 /*
1507 * Since we emptied tx_skb we can ask for more
1508 */
1509 netif_wake_queue(c->netdevice);
1510}
1511
1512/**
1513 * z8530_tx_done - TX complete callback
1514 * @c: The channel that completed a transmit.
1515 *
1516 * This is called when we complete a packet send. We wake the queue,
1517 * start the next packet going and then free the buffer of the existing
1518 * packet. This code is fairly timing sensitive.
1519 *
1520 * Called with the register lock held.
1521 */
1522
1523static void z8530_tx_done(struct z8530_channel *c)
1524{
1525 struct sk_buff *skb;
1526
1527 /* Actually this can happen.*/
1528 if (c->tx_skb == NULL)
1529 return;
1530
1531 skb = c->tx_skb;
1532 c->tx_skb = NULL;
1533 z8530_tx_begin(c);
1534 c->netdevice->stats.tx_packets++;
1535 c->netdevice->stats.tx_bytes += skb->len;
1536 dev_consume_skb_irq(skb);
1537}
1538
1539/**
1540 * z8530_null_rx - Discard a packet
1541 * @c: The channel the packet arrived on
1542 * @skb: The buffer
1543 *
1544 * We point the receive handler at this function when idle. Instead
1545 * of processing the frames we get to throw them away.
1546 */
1547
1548void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1549{
1550 dev_kfree_skb_any(skb);
1551}
1552
1553EXPORT_SYMBOL(z8530_null_rx);
1554
1555/**
1556 * z8530_rx_done - Receive completion callback
1557 * @c: The channel that completed a receive
1558 *
1559 * A new packet is complete. Our goal here is to get back into receive
1560 * mode as fast as possible. On the Z85230 we could change to using
1561 * ESCC mode, but on the older chips we have no choice. We flip to the
1562 * new buffer immediately in DMA mode so that the DMA of the next
1563 * frame can occur while we are copying the previous buffer to an sk_buff
1564 *
1565 * Called with the lock held
1566 */
1567
1568static void z8530_rx_done(struct z8530_channel *c)
1569{
1570 struct sk_buff *skb;
1571 int ct;
1572
1573 /*
1574 * Is our receive engine in DMA mode
1575 */
1576
1577 if(c->rxdma_on)
1578 {
1579 /*
1580 * Save the ready state and the buffer currently
1581 * being used as the DMA target
1582 */
1583
1584 int ready=c->dma_ready;
1585 unsigned char *rxb=c->rx_buf[c->dma_num];
1586 unsigned long flags;
1587
1588 /*
1589 * Complete this DMA. Necessary to find the length
1590 */
1591
1592 flags=claim_dma_lock();
1593
1594 disable_dma(c->rxdma);
1595 clear_dma_ff(c->rxdma);
1596 c->rxdma_on=0;
1597 ct=c->mtu-get_dma_residue(c->rxdma);
1598 if(ct<0)
1599 ct=2; /* Shit happens.. */
1600 c->dma_ready=0;
1601
1602 /*
1603 * Normal case: the other slot is free, start the next DMA
1604 * into it immediately.
1605 */
1606
1607 if(ready)
1608 {
1609 c->dma_num^=1;
1610 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1611 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1612 set_dma_count(c->rxdma, c->mtu);
1613 c->rxdma_on = 1;
1614 enable_dma(c->rxdma);
1615 /* Stop any frames that we missed the head of
1616 from passing */
1617 write_zsreg(c, R0, RES_Rx_CRC);
1618 }
1619 else
1620 /* Can't occur as we dont reenable the DMA irq until
1621 after the flip is done */
1622 netdev_warn(c->netdevice, "DMA flip overrun!\n");
1623
1624 release_dma_lock(flags);
1625
1626 /*
1627 * Shove the old buffer into an sk_buff. We can't DMA
1628 * directly into one on a PC - it might be above the 16Mb
1629 * boundary. Optimisation - we could check to see if we
1630 * can avoid the copy. Optimisation 2 - make the memcpy
1631 * a copychecksum.
1632 */
1633
1634 skb = dev_alloc_skb(ct);
1635 if (skb == NULL) {
1636 c->netdevice->stats.rx_dropped++;
1637 netdev_warn(c->netdevice, "Memory squeeze\n");
1638 } else {
1639 skb_put(skb, ct);
1640 skb_copy_to_linear_data(skb, rxb, ct);
1641 c->netdevice->stats.rx_packets++;
1642 c->netdevice->stats.rx_bytes += ct;
1643 }
1644 c->dma_ready = 1;
1645 } else {
1646 RT_LOCK;
1647 skb = c->skb;
1648
1649 /*
1650 * The game we play for non DMA is similar. We want to
1651 * get the controller set up for the next packet as fast
1652 * as possible. We potentially only have one byte + the
1653 * fifo length for this. Thus we want to flip to the new
1654 * buffer and then mess around copying and allocating
1655 * things. For the current case it doesn't matter but
1656 * if you build a system where the sync irq isn't blocked
1657 * by the kernel IRQ disable then you need only block the
1658 * sync IRQ for the RT_LOCK area.
1659 *
1660 */
1661 ct=c->count;
1662
1663 c->skb = c->skb2;
1664 c->count = 0;
1665 c->max = c->mtu;
1666 if (c->skb) {
1667 c->dptr = c->skb->data;
1668 c->max = c->mtu;
1669 } else {
1670 c->count = 0;
1671 c->max = 0;
1672 }
1673 RT_UNLOCK;
1674
1675 c->skb2 = dev_alloc_skb(c->mtu);
1676 if (c->skb2 == NULL)
1677 netdev_warn(c->netdevice, "memory squeeze\n");
1678 else
1679 skb_put(c->skb2, c->mtu);
1680 c->netdevice->stats.rx_packets++;
1681 c->netdevice->stats.rx_bytes += ct;
1682 }
1683 /*
1684 * If we received a frame we must now process it.
1685 */
1686 if (skb) {
1687 skb_trim(skb, ct);
1688 c->rx_function(c, skb);
1689 } else {
1690 c->netdevice->stats.rx_dropped++;
1691 netdev_err(c->netdevice, "Lost a frame\n");
1692 }
1693}
1694
1695/**
1696 * spans_boundary - Check a packet can be ISA DMA'd
1697 * @skb: The buffer to check
1698 *
1699 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1700 * thing can only DMA within a 64K block not across the edges of it.
1701 */
1702
1703static inline int spans_boundary(struct sk_buff *skb)
1704{
1705 unsigned long a=(unsigned long)skb->data;
1706 a^=(a+skb->len);
1707 if(a&0x00010000) /* If the 64K bit is different.. */
1708 return 1;
1709 return 0;
1710}
1711
1712/**
1713 * z8530_queue_xmit - Queue a packet
1714 * @c: The channel to use
1715 * @skb: The packet to kick down the channel
1716 *
1717 * Queue a packet for transmission. Because we have rather
1718 * hard to hit interrupt latencies for the Z85230 per packet
1719 * even in DMA mode we do the flip to DMA buffer if needed here
1720 * not in the IRQ.
1721 *
1722 * Called from the network code. The lock is not held at this
1723 * point.
1724 */
1725
1726netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1727{
1728 unsigned long flags;
1729
1730 netif_stop_queue(c->netdevice);
1731 if(c->tx_next_skb)
1732 return NETDEV_TX_BUSY;
1733
1734
1735 /* PC SPECIFIC - DMA limits */
1736
1737 /*
1738 * If we will DMA the transmit and its gone over the ISA bus
1739 * limit, then copy to the flip buffer
1740 */
1741
1742 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1743 {
1744 /*
1745 * Send the flip buffer, and flip the flippy bit.
1746 * We don't care which is used when just so long as
1747 * we never use the same buffer twice in a row. Since
1748 * only one buffer can be going out at a time the other
1749 * has to be safe.
1750 */
1751 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1752 c->tx_dma_used^=1; /* Flip temp buffer */
1753 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1754 }
1755 else
1756 c->tx_next_ptr=skb->data;
1757 RT_LOCK;
1758 c->tx_next_skb=skb;
1759 RT_UNLOCK;
1760
1761 spin_lock_irqsave(c->lock, flags);
1762 z8530_tx_begin(c);
1763 spin_unlock_irqrestore(c->lock, flags);
1764
1765 return NETDEV_TX_OK;
1766}
1767
1768EXPORT_SYMBOL(z8530_queue_xmit);
1769
1770/*
1771 * Module support
1772 */
1773static const char banner[] __initconst =
1774 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1775
1776static int __init z85230_init_driver(void)
1777{
1778 printk(banner);
1779 return 0;
1780}
1781module_init(z85230_init_driver);
1782
1783static void __exit z85230_cleanup_driver(void)
1784{
1785}
1786module_exit(z85230_cleanup_driver);
1787
1788MODULE_AUTHOR("Red Hat Inc.");
1789MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1790MODULE_LICENSE("GPL");