Loading...
1/*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/interrupt.h>
51#include <linux/rtnetlink.h>
52#include <linux/dma-mapping.h>
53#include <linux/gfp.h>
54
55#include <asm/io.h>
56#include <asm/dma.h>
57#include <asm/byteorder.h>
58
59#include <net/irda/irda.h>
60#include <net/irda/wrapper.h>
61#include <net/irda/irda_device.h>
62#include "w83977af.h"
63#include "w83977af_ir.h"
64
65#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
66#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
67#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
68#endif
69#define CONFIG_USE_W977_PNP /* Currently needed */
70#define PIO_MAX_SPEED 115200
71
72static char *driver_name = "w83977af_ir";
73static int qos_mtt_bits = 0x07; /* 1 ms or more */
74
75#define CHIP_IO_EXTENT 8
76
77static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
78#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
79static unsigned int irq[] = { 6, 0, 0, 0 };
80#else
81static unsigned int irq[] = { 11, 0, 0, 0 };
82#endif
83static unsigned int dma[] = { 1, 0, 0, 0 };
84static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
85static unsigned int efio = W977_EFIO_BASE;
86
87static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
88
89/* Some prototypes */
90static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
91 unsigned int dma);
92static int w83977af_close(struct w83977af_ir *self);
93static int w83977af_probe(int iobase, int irq, int dma);
94static int w83977af_dma_receive(struct w83977af_ir *self);
95static int w83977af_dma_receive_complete(struct w83977af_ir *self);
96static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
97 struct net_device *dev);
98static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
99static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
100static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
101static int w83977af_is_receiving(struct w83977af_ir *self);
102
103static int w83977af_net_open(struct net_device *dev);
104static int w83977af_net_close(struct net_device *dev);
105static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
106
107/*
108 * Function w83977af_init ()
109 *
110 * Initialize chip. Just try to find out how many chips we are dealing with
111 * and where they are
112 */
113static int __init w83977af_init(void)
114{
115 int i;
116
117 IRDA_DEBUG(0, "%s()\n", __func__ );
118
119 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
121 return 0;
122 }
123 return -ENODEV;
124}
125
126/*
127 * Function w83977af_cleanup ()
128 *
129 * Close all configured chips
130 *
131 */
132static void __exit w83977af_cleanup(void)
133{
134 int i;
135
136 IRDA_DEBUG(4, "%s()\n", __func__ );
137
138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
139 if (dev_self[i])
140 w83977af_close(dev_self[i]);
141 }
142}
143
144static const struct net_device_ops w83977_netdev_ops = {
145 .ndo_open = w83977af_net_open,
146 .ndo_stop = w83977af_net_close,
147 .ndo_start_xmit = w83977af_hard_xmit,
148 .ndo_do_ioctl = w83977af_net_ioctl,
149};
150
151/*
152 * Function w83977af_open (iobase, irq)
153 *
154 * Open driver instance
155 *
156 */
157static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
158 unsigned int dma)
159{
160 struct net_device *dev;
161 struct w83977af_ir *self;
162 int err;
163
164 IRDA_DEBUG(0, "%s()\n", __func__ );
165
166 /* Lock the port that we need */
167 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
168 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
169 __func__ , iobase);
170 return -ENODEV;
171 }
172
173 if (w83977af_probe(iobase, irq, dma) == -1) {
174 err = -1;
175 goto err_out;
176 }
177 /*
178 * Allocate new instance of the driver
179 */
180 dev = alloc_irdadev(sizeof(struct w83977af_ir));
181 if (dev == NULL) {
182 printk( KERN_ERR "IrDA: Can't allocate memory for "
183 "IrDA control block!\n");
184 err = -ENOMEM;
185 goto err_out;
186 }
187
188 self = netdev_priv(dev);
189 spin_lock_init(&self->lock);
190
191
192 /* Initialize IO */
193 self->io.fir_base = iobase;
194 self->io.irq = irq;
195 self->io.fir_ext = CHIP_IO_EXTENT;
196 self->io.dma = dma;
197 self->io.fifo_size = 32;
198
199 /* Initialize QoS for this device */
200 irda_init_max_qos_capabilies(&self->qos);
201
202 /* The only value we must override it the baudrate */
203
204 /* FIXME: The HP HDLS-1100 does not support 1152000! */
205 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
206 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
207
208 /* The HP HDLS-1100 needs 1 ms according to the specs */
209 self->qos.min_turn_time.bits = qos_mtt_bits;
210 irda_qos_bits_to_value(&self->qos);
211
212 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
213 self->rx_buff.truesize = 14384;
214 self->tx_buff.truesize = 4000;
215
216 /* Allocate memory if needed */
217 self->rx_buff.head =
218 dma_alloc_coherent(NULL, self->rx_buff.truesize,
219 &self->rx_buff_dma, GFP_KERNEL);
220 if (self->rx_buff.head == NULL) {
221 err = -ENOMEM;
222 goto err_out1;
223 }
224
225 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
226
227 self->tx_buff.head =
228 dma_alloc_coherent(NULL, self->tx_buff.truesize,
229 &self->tx_buff_dma, GFP_KERNEL);
230 if (self->tx_buff.head == NULL) {
231 err = -ENOMEM;
232 goto err_out2;
233 }
234 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
235
236 self->rx_buff.in_frame = FALSE;
237 self->rx_buff.state = OUTSIDE_FRAME;
238 self->tx_buff.data = self->tx_buff.head;
239 self->rx_buff.data = self->rx_buff.head;
240 self->netdev = dev;
241
242 dev->netdev_ops = &w83977_netdev_ops;
243
244 err = register_netdev(dev);
245 if (err) {
246 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
247 goto err_out3;
248 }
249 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
250
251 /* Need to store self somewhere */
252 dev_self[i] = self;
253
254 return 0;
255err_out3:
256 dma_free_coherent(NULL, self->tx_buff.truesize,
257 self->tx_buff.head, self->tx_buff_dma);
258err_out2:
259 dma_free_coherent(NULL, self->rx_buff.truesize,
260 self->rx_buff.head, self->rx_buff_dma);
261err_out1:
262 free_netdev(dev);
263err_out:
264 release_region(iobase, CHIP_IO_EXTENT);
265 return err;
266}
267
268/*
269 * Function w83977af_close (self)
270 *
271 * Close driver instance
272 *
273 */
274static int w83977af_close(struct w83977af_ir *self)
275{
276 int iobase;
277
278 IRDA_DEBUG(0, "%s()\n", __func__ );
279
280 iobase = self->io.fir_base;
281
282#ifdef CONFIG_USE_W977_PNP
283 /* enter PnP configuration mode */
284 w977_efm_enter(efio);
285
286 w977_select_device(W977_DEVICE_IR, efio);
287
288 /* Deactivate device */
289 w977_write_reg(0x30, 0x00, efio);
290
291 w977_efm_exit(efio);
292#endif /* CONFIG_USE_W977_PNP */
293
294 /* Remove netdevice */
295 unregister_netdev(self->netdev);
296
297 /* Release the PORT that this driver is using */
298 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
299 __func__ , self->io.fir_base);
300 release_region(self->io.fir_base, self->io.fir_ext);
301
302 if (self->tx_buff.head)
303 dma_free_coherent(NULL, self->tx_buff.truesize,
304 self->tx_buff.head, self->tx_buff_dma);
305
306 if (self->rx_buff.head)
307 dma_free_coherent(NULL, self->rx_buff.truesize,
308 self->rx_buff.head, self->rx_buff_dma);
309
310 free_netdev(self->netdev);
311
312 return 0;
313}
314
315static int w83977af_probe(int iobase, int irq, int dma)
316{
317 int version;
318 int i;
319
320 for (i=0; i < 2; i++) {
321 IRDA_DEBUG( 0, "%s()\n", __func__ );
322#ifdef CONFIG_USE_W977_PNP
323 /* Enter PnP configuration mode */
324 w977_efm_enter(efbase[i]);
325
326 w977_select_device(W977_DEVICE_IR, efbase[i]);
327
328 /* Configure PnP port, IRQ, and DMA channel */
329 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
330 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
331
332 w977_write_reg(0x70, irq, efbase[i]);
333#ifdef CONFIG_ARCH_NETWINDER
334 /* Netwinder uses 1 higher than Linux */
335 w977_write_reg(0x74, dma+1, efbase[i]);
336#else
337 w977_write_reg(0x74, dma, efbase[i]);
338#endif /*CONFIG_ARCH_NETWINDER */
339 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
340
341 /* Set append hardware CRC, enable IR bank selection */
342 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
343
344 /* Activate device */
345 w977_write_reg(0x30, 0x01, efbase[i]);
346
347 w977_efm_exit(efbase[i]);
348#endif /* CONFIG_USE_W977_PNP */
349 /* Disable Advanced mode */
350 switch_bank(iobase, SET2);
351 outb(iobase+2, 0x00);
352
353 /* Turn on UART (global) interrupts */
354 switch_bank(iobase, SET0);
355 outb(HCR_EN_IRQ, iobase+HCR);
356
357 /* Switch to advanced mode */
358 switch_bank(iobase, SET2);
359 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
360
361 /* Set default IR-mode */
362 switch_bank(iobase, SET0);
363 outb(HCR_SIR, iobase+HCR);
364
365 /* Read the Advanced IR ID */
366 switch_bank(iobase, SET3);
367 version = inb(iobase+AUID);
368
369 /* Should be 0x1? */
370 if (0x10 == (version & 0xf0)) {
371 efio = efbase[i];
372
373 /* Set FIFO size to 32 */
374 switch_bank(iobase, SET2);
375 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
376
377 /* Set FIFO threshold to TX17, RX16 */
378 switch_bank(iobase, SET0);
379 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
380 UFR_EN_FIFO,iobase+UFR);
381
382 /* Receiver frame length */
383 switch_bank(iobase, SET4);
384 outb(2048 & 0xff, iobase+6);
385 outb((2048 >> 8) & 0x1f, iobase+7);
386
387 /*
388 * Init HP HSDL-1100 transceiver.
389 *
390 * Set IRX_MSL since we have 2 * receive paths IRRX,
391 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
392 * be a input pin used for IRRXH
393 *
394 * IRRX pin 37 connected to receiver
395 * IRTX pin 38 connected to transmitter
396 * FIRRX pin 39 connected to receiver (IRSL0)
397 * CIRRX pin 40 connected to pin 37
398 */
399 switch_bank(iobase, SET7);
400 outb(0x40, iobase+7);
401
402 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
403 "Version: 0x%02x\n", version);
404
405 return 0;
406 } else {
407 /* Try next extented function register address */
408 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
409 }
410 }
411 return -1;
412}
413
414static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
415{
416 int ir_mode = HCR_SIR;
417 int iobase;
418 __u8 set;
419
420 iobase = self->io.fir_base;
421
422 /* Update accounting for new speed */
423 self->io.speed = speed;
424
425 /* Save current bank */
426 set = inb(iobase+SSR);
427
428 /* Disable interrupts */
429 switch_bank(iobase, SET0);
430 outb(0, iobase+ICR);
431
432 /* Select Set 2 */
433 switch_bank(iobase, SET2);
434 outb(0x00, iobase+ABHL);
435
436 switch (speed) {
437 case 9600: outb(0x0c, iobase+ABLL); break;
438 case 19200: outb(0x06, iobase+ABLL); break;
439 case 38400: outb(0x03, iobase+ABLL); break;
440 case 57600: outb(0x02, iobase+ABLL); break;
441 case 115200: outb(0x01, iobase+ABLL); break;
442 case 576000:
443 ir_mode = HCR_MIR_576;
444 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
445 break;
446 case 1152000:
447 ir_mode = HCR_MIR_1152;
448 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
449 break;
450 case 4000000:
451 ir_mode = HCR_FIR;
452 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
453 break;
454 default:
455 ir_mode = HCR_FIR;
456 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
457 break;
458 }
459
460 /* Set speed mode */
461 switch_bank(iobase, SET0);
462 outb(ir_mode, iobase+HCR);
463
464 /* set FIFO size to 32 */
465 switch_bank(iobase, SET2);
466 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
467
468 /* set FIFO threshold to TX17, RX16 */
469 switch_bank(iobase, SET0);
470 outb(0x00, iobase+UFR); /* Reset */
471 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
472 outb(0xa7, iobase+UFR);
473
474 netif_wake_queue(self->netdev);
475
476 /* Enable some interrupts so we can receive frames */
477 switch_bank(iobase, SET0);
478 if (speed > PIO_MAX_SPEED) {
479 outb(ICR_EFSFI, iobase+ICR);
480 w83977af_dma_receive(self);
481 } else
482 outb(ICR_ERBRI, iobase+ICR);
483
484 /* Restore SSR */
485 outb(set, iobase+SSR);
486}
487
488/*
489 * Function w83977af_hard_xmit (skb, dev)
490 *
491 * Sets up a DMA transfer to send the current frame.
492 *
493 */
494static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
495 struct net_device *dev)
496{
497 struct w83977af_ir *self;
498 __s32 speed;
499 int iobase;
500 __u8 set;
501 int mtt;
502
503 self = netdev_priv(dev);
504
505 iobase = self->io.fir_base;
506
507 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
508 (int) skb->len);
509
510 /* Lock transmit buffer */
511 netif_stop_queue(dev);
512
513 /* Check if we need to change the speed */
514 speed = irda_get_next_speed(skb);
515 if ((speed != self->io.speed) && (speed != -1)) {
516 /* Check for empty frame */
517 if (!skb->len) {
518 w83977af_change_speed(self, speed);
519 dev_kfree_skb(skb);
520 return NETDEV_TX_OK;
521 } else
522 self->new_speed = speed;
523 }
524
525 /* Save current set */
526 set = inb(iobase+SSR);
527
528 /* Decide if we should use PIO or DMA transfer */
529 if (self->io.speed > PIO_MAX_SPEED) {
530 self->tx_buff.data = self->tx_buff.head;
531 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
532 self->tx_buff.len = skb->len;
533
534 mtt = irda_get_mtt(skb);
535 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
536 if (mtt)
537 udelay(mtt);
538
539 /* Enable DMA interrupt */
540 switch_bank(iobase, SET0);
541 outb(ICR_EDMAI, iobase+ICR);
542 w83977af_dma_write(self, iobase);
543 } else {
544 self->tx_buff.data = self->tx_buff.head;
545 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
546 self->tx_buff.truesize);
547
548 /* Add interrupt on tx low level (will fire immediately) */
549 switch_bank(iobase, SET0);
550 outb(ICR_ETXTHI, iobase+ICR);
551 }
552 dev_kfree_skb(skb);
553
554 /* Restore set register */
555 outb(set, iobase+SSR);
556
557 return NETDEV_TX_OK;
558}
559
560/*
561 * Function w83977af_dma_write (self, iobase)
562 *
563 * Send frame using DMA
564 *
565 */
566static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
567{
568 __u8 set;
569#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
570 unsigned long flags;
571 __u8 hcr;
572#endif
573 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
574
575 /* Save current set */
576 set = inb(iobase+SSR);
577
578 /* Disable DMA */
579 switch_bank(iobase, SET0);
580 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
581
582 /* Choose transmit DMA channel */
583 switch_bank(iobase, SET2);
584 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
585#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
586 spin_lock_irqsave(&self->lock, flags);
587
588 disable_dma(self->io.dma);
589 clear_dma_ff(self->io.dma);
590 set_dma_mode(self->io.dma, DMA_MODE_READ);
591 set_dma_addr(self->io.dma, self->tx_buff_dma);
592 set_dma_count(self->io.dma, self->tx_buff.len);
593#else
594 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
595 DMA_MODE_WRITE);
596#endif
597 self->io.direction = IO_XMIT;
598
599 /* Enable DMA */
600 switch_bank(iobase, SET0);
601#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
602 hcr = inb(iobase+HCR);
603 outb(hcr | HCR_EN_DMA, iobase+HCR);
604 enable_dma(self->io.dma);
605 spin_unlock_irqrestore(&self->lock, flags);
606#else
607 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
608#endif
609
610 /* Restore set register */
611 outb(set, iobase+SSR);
612}
613
614/*
615 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
616 *
617 *
618 *
619 */
620static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
621{
622 int actual = 0;
623 __u8 set;
624
625 IRDA_DEBUG(4, "%s()\n", __func__ );
626
627 /* Save current bank */
628 set = inb(iobase+SSR);
629
630 switch_bank(iobase, SET0);
631 if (!(inb_p(iobase+USR) & USR_TSRE)) {
632 IRDA_DEBUG(4,
633 "%s(), warning, FIFO not empty yet!\n", __func__ );
634
635 fifo_size -= 17;
636 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
637 __func__ , fifo_size);
638 }
639
640 /* Fill FIFO with current frame */
641 while ((fifo_size-- > 0) && (actual < len)) {
642 /* Transmit next byte */
643 outb(buf[actual++], iobase+TBR);
644 }
645
646 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
647 __func__ , fifo_size, actual, len);
648
649 /* Restore bank */
650 outb(set, iobase+SSR);
651
652 return actual;
653}
654
655/*
656 * Function w83977af_dma_xmit_complete (self)
657 *
658 * The transfer of a frame in finished. So do the necessary things
659 *
660 *
661 */
662static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
663{
664 int iobase;
665 __u8 set;
666
667 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
668
669 IRDA_ASSERT(self != NULL, return;);
670
671 iobase = self->io.fir_base;
672
673 /* Save current set */
674 set = inb(iobase+SSR);
675
676 /* Disable DMA */
677 switch_bank(iobase, SET0);
678 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
679
680 /* Check for underrrun! */
681 if (inb(iobase+AUDR) & AUDR_UNDR) {
682 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
683
684 self->netdev->stats.tx_errors++;
685 self->netdev->stats.tx_fifo_errors++;
686
687 /* Clear bit, by writing 1 to it */
688 outb(AUDR_UNDR, iobase+AUDR);
689 } else
690 self->netdev->stats.tx_packets++;
691
692
693 if (self->new_speed) {
694 w83977af_change_speed(self, self->new_speed);
695 self->new_speed = 0;
696 }
697
698 /* Unlock tx_buff and request another frame */
699 /* Tell the network layer, that we want more frames */
700 netif_wake_queue(self->netdev);
701
702 /* Restore set */
703 outb(set, iobase+SSR);
704}
705
706/*
707 * Function w83977af_dma_receive (self)
708 *
709 * Get ready for receiving a frame. The device will initiate a DMA
710 * if it starts to receive a frame.
711 *
712 */
713static int w83977af_dma_receive(struct w83977af_ir *self)
714{
715 int iobase;
716 __u8 set;
717#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
718 unsigned long flags;
719 __u8 hcr;
720#endif
721 IRDA_ASSERT(self != NULL, return -1;);
722
723 IRDA_DEBUG(4, "%s\n", __func__ );
724
725 iobase= self->io.fir_base;
726
727 /* Save current set */
728 set = inb(iobase+SSR);
729
730 /* Disable DMA */
731 switch_bank(iobase, SET0);
732 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
733
734 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
735 switch_bank(iobase, SET2);
736 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
737 iobase+ADCR1);
738
739 self->io.direction = IO_RECV;
740 self->rx_buff.data = self->rx_buff.head;
741
742#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
743 spin_lock_irqsave(&self->lock, flags);
744
745 disable_dma(self->io.dma);
746 clear_dma_ff(self->io.dma);
747 set_dma_mode(self->io.dma, DMA_MODE_READ);
748 set_dma_addr(self->io.dma, self->rx_buff_dma);
749 set_dma_count(self->io.dma, self->rx_buff.truesize);
750#else
751 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
752 DMA_MODE_READ);
753#endif
754 /*
755 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
756 * important that we don't reset the Tx FIFO since it might not
757 * be finished transmitting yet
758 */
759 switch_bank(iobase, SET0);
760 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
761 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
762
763 /* Enable DMA */
764 switch_bank(iobase, SET0);
765#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
766 hcr = inb(iobase+HCR);
767 outb(hcr | HCR_EN_DMA, iobase+HCR);
768 enable_dma(self->io.dma);
769 spin_unlock_irqrestore(&self->lock, flags);
770#else
771 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
772#endif
773 /* Restore set */
774 outb(set, iobase+SSR);
775
776 return 0;
777}
778
779/*
780 * Function w83977af_receive_complete (self)
781 *
782 * Finished with receiving a frame
783 *
784 */
785static int w83977af_dma_receive_complete(struct w83977af_ir *self)
786{
787 struct sk_buff *skb;
788 struct st_fifo *st_fifo;
789 int len;
790 int iobase;
791 __u8 set;
792 __u8 status;
793
794 IRDA_DEBUG(4, "%s\n", __func__ );
795
796 st_fifo = &self->st_fifo;
797
798 iobase = self->io.fir_base;
799
800 /* Save current set */
801 set = inb(iobase+SSR);
802
803 iobase = self->io.fir_base;
804
805 /* Read status FIFO */
806 switch_bank(iobase, SET5);
807 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
808 st_fifo->entries[st_fifo->tail].status = status;
809
810 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
811 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
812
813 st_fifo->tail++;
814 st_fifo->len++;
815 }
816
817 while (st_fifo->len) {
818 /* Get first entry */
819 status = st_fifo->entries[st_fifo->head].status;
820 len = st_fifo->entries[st_fifo->head].len;
821 st_fifo->head++;
822 st_fifo->len--;
823
824 /* Check for errors */
825 if (status & FS_FO_ERR_MSK) {
826 if (status & FS_FO_LST_FR) {
827 /* Add number of lost frames to stats */
828 self->netdev->stats.rx_errors += len;
829 } else {
830 /* Skip frame */
831 self->netdev->stats.rx_errors++;
832
833 self->rx_buff.data += len;
834
835 if (status & FS_FO_MX_LEX)
836 self->netdev->stats.rx_length_errors++;
837
838 if (status & FS_FO_PHY_ERR)
839 self->netdev->stats.rx_frame_errors++;
840
841 if (status & FS_FO_CRC_ERR)
842 self->netdev->stats.rx_crc_errors++;
843 }
844 /* The errors below can be reported in both cases */
845 if (status & FS_FO_RX_OV)
846 self->netdev->stats.rx_fifo_errors++;
847
848 if (status & FS_FO_FSF_OV)
849 self->netdev->stats.rx_fifo_errors++;
850
851 } else {
852 /* Check if we have transferred all data to memory */
853 switch_bank(iobase, SET0);
854 if (inb(iobase+USR) & USR_RDR) {
855 udelay(80); /* Should be enough!? */
856 }
857
858 skb = dev_alloc_skb(len+1);
859 if (skb == NULL) {
860 printk(KERN_INFO
861 "%s(), memory squeeze, dropping frame.\n", __func__);
862 /* Restore set register */
863 outb(set, iobase+SSR);
864
865 return FALSE;
866 }
867
868 /* Align to 20 bytes */
869 skb_reserve(skb, 1);
870
871 /* Copy frame without CRC */
872 if (self->io.speed < 4000000) {
873 skb_put(skb, len-2);
874 skb_copy_to_linear_data(skb,
875 self->rx_buff.data,
876 len - 2);
877 } else {
878 skb_put(skb, len-4);
879 skb_copy_to_linear_data(skb,
880 self->rx_buff.data,
881 len - 4);
882 }
883
884 /* Move to next frame */
885 self->rx_buff.data += len;
886 self->netdev->stats.rx_packets++;
887
888 skb->dev = self->netdev;
889 skb_reset_mac_header(skb);
890 skb->protocol = htons(ETH_P_IRDA);
891 netif_rx(skb);
892 }
893 }
894 /* Restore set register */
895 outb(set, iobase+SSR);
896
897 return TRUE;
898}
899
900/*
901 * Function pc87108_pio_receive (self)
902 *
903 * Receive all data in receiver FIFO
904 *
905 */
906static void w83977af_pio_receive(struct w83977af_ir *self)
907{
908 __u8 byte = 0x00;
909 int iobase;
910
911 IRDA_DEBUG(4, "%s()\n", __func__ );
912
913 IRDA_ASSERT(self != NULL, return;);
914
915 iobase = self->io.fir_base;
916
917 /* Receive all characters in Rx FIFO */
918 do {
919 byte = inb(iobase+RBR);
920 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
921 byte);
922 } while (inb(iobase+USR) & USR_RDR); /* Data available */
923}
924
925/*
926 * Function w83977af_sir_interrupt (self, eir)
927 *
928 * Handle SIR interrupt
929 *
930 */
931static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
932{
933 int actual;
934 __u8 new_icr = 0;
935 __u8 set;
936 int iobase;
937
938 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
939
940 iobase = self->io.fir_base;
941 /* Transmit FIFO low on data */
942 if (isr & ISR_TXTH_I) {
943 /* Write data left in transmit buffer */
944 actual = w83977af_pio_write(self->io.fir_base,
945 self->tx_buff.data,
946 self->tx_buff.len,
947 self->io.fifo_size);
948
949 self->tx_buff.data += actual;
950 self->tx_buff.len -= actual;
951
952 self->io.direction = IO_XMIT;
953
954 /* Check if finished */
955 if (self->tx_buff.len > 0) {
956 new_icr |= ICR_ETXTHI;
957 } else {
958 set = inb(iobase+SSR);
959 switch_bank(iobase, SET0);
960 outb(AUDR_SFEND, iobase+AUDR);
961 outb(set, iobase+SSR);
962
963 self->netdev->stats.tx_packets++;
964
965 /* Feed me more packets */
966 netif_wake_queue(self->netdev);
967 new_icr |= ICR_ETBREI;
968 }
969 }
970 /* Check if transmission has completed */
971 if (isr & ISR_TXEMP_I) {
972 /* Check if we need to change the speed? */
973 if (self->new_speed) {
974 IRDA_DEBUG(2,
975 "%s(), Changing speed!\n", __func__ );
976 w83977af_change_speed(self, self->new_speed);
977 self->new_speed = 0;
978 }
979
980 /* Turn around and get ready to receive some data */
981 self->io.direction = IO_RECV;
982 new_icr |= ICR_ERBRI;
983 }
984
985 /* Rx FIFO threshold or timeout */
986 if (isr & ISR_RXTH_I) {
987 w83977af_pio_receive(self);
988
989 /* Keep receiving */
990 new_icr |= ICR_ERBRI;
991 }
992 return new_icr;
993}
994
995/*
996 * Function pc87108_fir_interrupt (self, eir)
997 *
998 * Handle MIR/FIR interrupt
999 *
1000 */
1001static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1002{
1003 __u8 new_icr = 0;
1004 __u8 set;
1005 int iobase;
1006
1007 iobase = self->io.fir_base;
1008 set = inb(iobase+SSR);
1009
1010 /* End of frame detected in FIFO */
1011 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1012 if (w83977af_dma_receive_complete(self)) {
1013
1014 /* Wait for next status FIFO interrupt */
1015 new_icr |= ICR_EFSFI;
1016 } else {
1017 /* DMA not finished yet */
1018
1019 /* Set timer value, resolution 1 ms */
1020 switch_bank(iobase, SET4);
1021 outb(0x01, iobase+TMRL); /* 1 ms */
1022 outb(0x00, iobase+TMRH);
1023
1024 /* Start timer */
1025 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1026
1027 new_icr |= ICR_ETMRI;
1028 }
1029 }
1030 /* Timer finished */
1031 if (isr & ISR_TMR_I) {
1032 /* Disable timer */
1033 switch_bank(iobase, SET4);
1034 outb(0, iobase+IR_MSL);
1035
1036 /* Clear timer event */
1037 /* switch_bank(iobase, SET0); */
1038/* outb(ASCR_CTE, iobase+ASCR); */
1039
1040 /* Check if this is a TX timer interrupt */
1041 if (self->io.direction == IO_XMIT) {
1042 w83977af_dma_write(self, iobase);
1043
1044 new_icr |= ICR_EDMAI;
1045 } else {
1046 /* Check if DMA has now finished */
1047 w83977af_dma_receive_complete(self);
1048
1049 new_icr |= ICR_EFSFI;
1050 }
1051 }
1052 /* Finished with DMA */
1053 if (isr & ISR_DMA_I) {
1054 w83977af_dma_xmit_complete(self);
1055
1056 /* Check if there are more frames to be transmitted */
1057 /* if (irda_device_txqueue_empty(self)) { */
1058
1059 /* Prepare for receive
1060 *
1061 * ** Netwinder Tx DMA likes that we do this anyway **
1062 */
1063 w83977af_dma_receive(self);
1064 new_icr = ICR_EFSFI;
1065 /* } */
1066 }
1067
1068 /* Restore set */
1069 outb(set, iobase+SSR);
1070
1071 return new_icr;
1072}
1073
1074/*
1075 * Function w83977af_interrupt (irq, dev_id, regs)
1076 *
1077 * An interrupt from the chip has arrived. Time to do some work
1078 *
1079 */
1080static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1081{
1082 struct net_device *dev = dev_id;
1083 struct w83977af_ir *self;
1084 __u8 set, icr, isr;
1085 int iobase;
1086
1087 self = netdev_priv(dev);
1088
1089 iobase = self->io.fir_base;
1090
1091 /* Save current bank */
1092 set = inb(iobase+SSR);
1093 switch_bank(iobase, SET0);
1094
1095 icr = inb(iobase+ICR);
1096 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1097
1098 outb(0, iobase+ICR); /* Disable interrupts */
1099
1100 if (isr) {
1101 /* Dispatch interrupt handler for the current speed */
1102 if (self->io.speed > PIO_MAX_SPEED )
1103 icr = w83977af_fir_interrupt(self, isr);
1104 else
1105 icr = w83977af_sir_interrupt(self, isr);
1106 }
1107
1108 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1109 outb(set, iobase+SSR); /* Restore bank register */
1110 return IRQ_RETVAL(isr);
1111}
1112
1113/*
1114 * Function w83977af_is_receiving (self)
1115 *
1116 * Return TRUE is we are currently receiving a frame
1117 *
1118 */
1119static int w83977af_is_receiving(struct w83977af_ir *self)
1120{
1121 int status = FALSE;
1122 int iobase;
1123 __u8 set;
1124
1125 IRDA_ASSERT(self != NULL, return FALSE;);
1126
1127 if (self->io.speed > 115200) {
1128 iobase = self->io.fir_base;
1129
1130 /* Check if rx FIFO is not empty */
1131 set = inb(iobase+SSR);
1132 switch_bank(iobase, SET2);
1133 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1134 /* We are receiving something */
1135 status = TRUE;
1136 }
1137 outb(set, iobase+SSR);
1138 } else
1139 status = (self->rx_buff.state != OUTSIDE_FRAME);
1140
1141 return status;
1142}
1143
1144/*
1145 * Function w83977af_net_open (dev)
1146 *
1147 * Start the device
1148 *
1149 */
1150static int w83977af_net_open(struct net_device *dev)
1151{
1152 struct w83977af_ir *self;
1153 int iobase;
1154 char hwname[32];
1155 __u8 set;
1156
1157 IRDA_DEBUG(0, "%s()\n", __func__ );
1158
1159 IRDA_ASSERT(dev != NULL, return -1;);
1160 self = netdev_priv(dev);
1161
1162 IRDA_ASSERT(self != NULL, return 0;);
1163
1164 iobase = self->io.fir_base;
1165
1166 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1167 (void *) dev)) {
1168 return -EAGAIN;
1169 }
1170 /*
1171 * Always allocate the DMA channel after the IRQ,
1172 * and clean up on failure.
1173 */
1174 if (request_dma(self->io.dma, dev->name)) {
1175 free_irq(self->io.irq, self);
1176 return -EAGAIN;
1177 }
1178
1179 /* Save current set */
1180 set = inb(iobase+SSR);
1181
1182 /* Enable some interrupts so we can receive frames again */
1183 switch_bank(iobase, SET0);
1184 if (self->io.speed > 115200) {
1185 outb(ICR_EFSFI, iobase+ICR);
1186 w83977af_dma_receive(self);
1187 } else
1188 outb(ICR_ERBRI, iobase+ICR);
1189
1190 /* Restore bank register */
1191 outb(set, iobase+SSR);
1192
1193 /* Ready to play! */
1194 netif_start_queue(dev);
1195
1196 /* Give self a hardware name */
1197 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1198
1199 /*
1200 * Open new IrLAP layer instance, now that everything should be
1201 * initialized properly
1202 */
1203 self->irlap = irlap_open(dev, &self->qos, hwname);
1204
1205 return 0;
1206}
1207
1208/*
1209 * Function w83977af_net_close (dev)
1210 *
1211 * Stop the device
1212 *
1213 */
1214static int w83977af_net_close(struct net_device *dev)
1215{
1216 struct w83977af_ir *self;
1217 int iobase;
1218 __u8 set;
1219
1220 IRDA_DEBUG(0, "%s()\n", __func__ );
1221
1222 IRDA_ASSERT(dev != NULL, return -1;);
1223
1224 self = netdev_priv(dev);
1225
1226 IRDA_ASSERT(self != NULL, return 0;);
1227
1228 iobase = self->io.fir_base;
1229
1230 /* Stop device */
1231 netif_stop_queue(dev);
1232
1233 /* Stop and remove instance of IrLAP */
1234 if (self->irlap)
1235 irlap_close(self->irlap);
1236 self->irlap = NULL;
1237
1238 disable_dma(self->io.dma);
1239
1240 /* Save current set */
1241 set = inb(iobase+SSR);
1242
1243 /* Disable interrupts */
1244 switch_bank(iobase, SET0);
1245 outb(0, iobase+ICR);
1246
1247 free_irq(self->io.irq, dev);
1248 free_dma(self->io.dma);
1249
1250 /* Restore bank register */
1251 outb(set, iobase+SSR);
1252
1253 return 0;
1254}
1255
1256/*
1257 * Function w83977af_net_ioctl (dev, rq, cmd)
1258 *
1259 * Process IOCTL commands for this device
1260 *
1261 */
1262static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1263{
1264 struct if_irda_req *irq = (struct if_irda_req *) rq;
1265 struct w83977af_ir *self;
1266 unsigned long flags;
1267 int ret = 0;
1268
1269 IRDA_ASSERT(dev != NULL, return -1;);
1270
1271 self = netdev_priv(dev);
1272
1273 IRDA_ASSERT(self != NULL, return -1;);
1274
1275 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1276
1277 spin_lock_irqsave(&self->lock, flags);
1278
1279 switch (cmd) {
1280 case SIOCSBANDWIDTH: /* Set bandwidth */
1281 if (!capable(CAP_NET_ADMIN)) {
1282 ret = -EPERM;
1283 goto out;
1284 }
1285 w83977af_change_speed(self, irq->ifr_baudrate);
1286 break;
1287 case SIOCSMEDIABUSY: /* Set media busy */
1288 if (!capable(CAP_NET_ADMIN)) {
1289 ret = -EPERM;
1290 goto out;
1291 }
1292 irda_device_set_media_busy(self->netdev, TRUE);
1293 break;
1294 case SIOCGRECEIVING: /* Check if we are receiving right now */
1295 irq->ifr_receiving = w83977af_is_receiving(self);
1296 break;
1297 default:
1298 ret = -EOPNOTSUPP;
1299 }
1300out:
1301 spin_unlock_irqrestore(&self->lock, flags);
1302 return ret;
1303}
1304
1305MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1306MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1307MODULE_LICENSE("GPL");
1308
1309
1310module_param(qos_mtt_bits, int, 0);
1311MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1312module_param_array(io, int, NULL, 0);
1313MODULE_PARM_DESC(io, "Base I/O addresses");
1314module_param_array(irq, int, NULL, 0);
1315MODULE_PARM_DESC(irq, "IRQ lines");
1316
1317/*
1318 * Function init_module (void)
1319 *
1320 *
1321 *
1322 */
1323module_init(w83977af_init);
1324
1325/*
1326 * Function cleanup_module (void)
1327 *
1328 *
1329 *
1330 */
1331module_exit(w83977af_cleanup);
1/*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/interrupt.h>
51#include <linux/rtnetlink.h>
52#include <linux/dma-mapping.h>
53#include <linux/gfp.h>
54
55#include <asm/io.h>
56#include <asm/dma.h>
57#include <asm/byteorder.h>
58
59#include <net/irda/irda.h>
60#include <net/irda/wrapper.h>
61#include <net/irda/irda_device.h>
62#include "w83977af.h"
63#include "w83977af_ir.h"
64
65#define CONFIG_USE_W977_PNP /* Currently needed */
66#define PIO_MAX_SPEED 115200
67
68static char *driver_name = "w83977af_ir";
69static int qos_mtt_bits = 0x07; /* 1 ms or more */
70
71#define CHIP_IO_EXTENT 8
72
73static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
74#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
75static unsigned int irq[] = { 6, 0, 0, 0 };
76#else
77static unsigned int irq[] = { 11, 0, 0, 0 };
78#endif
79static unsigned int dma[] = { 1, 0, 0, 0 };
80static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
81static unsigned int efio = W977_EFIO_BASE;
82
83static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
84
85/* Some prototypes */
86static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
87 unsigned int dma);
88static int w83977af_close(struct w83977af_ir *self);
89static int w83977af_probe(int iobase, int irq, int dma);
90static int w83977af_dma_receive(struct w83977af_ir *self);
91static int w83977af_dma_receive_complete(struct w83977af_ir *self);
92static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
93 struct net_device *dev);
94static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
95static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
96static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
97static int w83977af_is_receiving(struct w83977af_ir *self);
98
99static int w83977af_net_open(struct net_device *dev);
100static int w83977af_net_close(struct net_device *dev);
101static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
102
103/*
104 * Function w83977af_init ()
105 *
106 * Initialize chip. Just try to find out how many chips we are dealing with
107 * and where they are
108 */
109static int __init w83977af_init(void)
110{
111 int i;
112
113 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
114 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
115 return 0;
116 }
117 return -ENODEV;
118}
119
120/*
121 * Function w83977af_cleanup ()
122 *
123 * Close all configured chips
124 *
125 */
126static void __exit w83977af_cleanup(void)
127{
128 int i;
129
130 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
131 if (dev_self[i])
132 w83977af_close(dev_self[i]);
133 }
134}
135
136static const struct net_device_ops w83977_netdev_ops = {
137 .ndo_open = w83977af_net_open,
138 .ndo_stop = w83977af_net_close,
139 .ndo_start_xmit = w83977af_hard_xmit,
140 .ndo_do_ioctl = w83977af_net_ioctl,
141};
142
143/*
144 * Function w83977af_open (iobase, irq)
145 *
146 * Open driver instance
147 *
148 */
149static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
150 unsigned int dma)
151{
152 struct net_device *dev;
153 struct w83977af_ir *self;
154 int err;
155
156 /* Lock the port that we need */
157 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
158 pr_debug("%s(), can't get iobase of 0x%03x\n",
159 __func__ , iobase);
160 return -ENODEV;
161 }
162
163 if (w83977af_probe(iobase, irq, dma) == -1) {
164 err = -1;
165 goto err_out;
166 }
167 /*
168 * Allocate new instance of the driver
169 */
170 dev = alloc_irdadev(sizeof(struct w83977af_ir));
171 if (dev == NULL) {
172 printk( KERN_ERR "IrDA: Can't allocate memory for "
173 "IrDA control block!\n");
174 err = -ENOMEM;
175 goto err_out;
176 }
177
178 self = netdev_priv(dev);
179 spin_lock_init(&self->lock);
180
181
182 /* Initialize IO */
183 self->io.fir_base = iobase;
184 self->io.irq = irq;
185 self->io.fir_ext = CHIP_IO_EXTENT;
186 self->io.dma = dma;
187 self->io.fifo_size = 32;
188
189 /* Initialize QoS for this device */
190 irda_init_max_qos_capabilies(&self->qos);
191
192 /* The only value we must override it the baudrate */
193
194 /* FIXME: The HP HDLS-1100 does not support 1152000! */
195 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
196 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
197
198 /* The HP HDLS-1100 needs 1 ms according to the specs */
199 self->qos.min_turn_time.bits = qos_mtt_bits;
200 irda_qos_bits_to_value(&self->qos);
201
202 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
203 self->rx_buff.truesize = 14384;
204 self->tx_buff.truesize = 4000;
205
206 /* Allocate memory if needed */
207 self->rx_buff.head =
208 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
209 &self->rx_buff_dma, GFP_KERNEL);
210 if (self->rx_buff.head == NULL) {
211 err = -ENOMEM;
212 goto err_out1;
213 }
214
215 self->tx_buff.head =
216 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
217 &self->tx_buff_dma, GFP_KERNEL);
218 if (self->tx_buff.head == NULL) {
219 err = -ENOMEM;
220 goto err_out2;
221 }
222
223 self->rx_buff.in_frame = FALSE;
224 self->rx_buff.state = OUTSIDE_FRAME;
225 self->tx_buff.data = self->tx_buff.head;
226 self->rx_buff.data = self->rx_buff.head;
227 self->netdev = dev;
228
229 dev->netdev_ops = &w83977_netdev_ops;
230
231 err = register_netdev(dev);
232 if (err) {
233 net_err_ratelimited("%s(), register_netdevice() failed!\n",
234 __func__);
235 goto err_out3;
236 }
237 net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
238
239 /* Need to store self somewhere */
240 dev_self[i] = self;
241
242 return 0;
243err_out3:
244 dma_free_coherent(NULL, self->tx_buff.truesize,
245 self->tx_buff.head, self->tx_buff_dma);
246err_out2:
247 dma_free_coherent(NULL, self->rx_buff.truesize,
248 self->rx_buff.head, self->rx_buff_dma);
249err_out1:
250 free_netdev(dev);
251err_out:
252 release_region(iobase, CHIP_IO_EXTENT);
253 return err;
254}
255
256/*
257 * Function w83977af_close (self)
258 *
259 * Close driver instance
260 *
261 */
262static int w83977af_close(struct w83977af_ir *self)
263{
264 int iobase;
265
266 iobase = self->io.fir_base;
267
268#ifdef CONFIG_USE_W977_PNP
269 /* enter PnP configuration mode */
270 w977_efm_enter(efio);
271
272 w977_select_device(W977_DEVICE_IR, efio);
273
274 /* Deactivate device */
275 w977_write_reg(0x30, 0x00, efio);
276
277 w977_efm_exit(efio);
278#endif /* CONFIG_USE_W977_PNP */
279
280 /* Remove netdevice */
281 unregister_netdev(self->netdev);
282
283 /* Release the PORT that this driver is using */
284 pr_debug("%s(), Releasing Region %03x\n",
285 __func__ , self->io.fir_base);
286 release_region(self->io.fir_base, self->io.fir_ext);
287
288 if (self->tx_buff.head)
289 dma_free_coherent(NULL, self->tx_buff.truesize,
290 self->tx_buff.head, self->tx_buff_dma);
291
292 if (self->rx_buff.head)
293 dma_free_coherent(NULL, self->rx_buff.truesize,
294 self->rx_buff.head, self->rx_buff_dma);
295
296 free_netdev(self->netdev);
297
298 return 0;
299}
300
301static int w83977af_probe(int iobase, int irq, int dma)
302{
303 int version;
304 int i;
305
306 for (i=0; i < 2; i++) {
307#ifdef CONFIG_USE_W977_PNP
308 /* Enter PnP configuration mode */
309 w977_efm_enter(efbase[i]);
310
311 w977_select_device(W977_DEVICE_IR, efbase[i]);
312
313 /* Configure PnP port, IRQ, and DMA channel */
314 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
315 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
316
317 w977_write_reg(0x70, irq, efbase[i]);
318#ifdef CONFIG_ARCH_NETWINDER
319 /* Netwinder uses 1 higher than Linux */
320 w977_write_reg(0x74, dma+1, efbase[i]);
321#else
322 w977_write_reg(0x74, dma, efbase[i]);
323#endif /* CONFIG_ARCH_NETWINDER */
324 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
325
326 /* Set append hardware CRC, enable IR bank selection */
327 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
328
329 /* Activate device */
330 w977_write_reg(0x30, 0x01, efbase[i]);
331
332 w977_efm_exit(efbase[i]);
333#endif /* CONFIG_USE_W977_PNP */
334 /* Disable Advanced mode */
335 switch_bank(iobase, SET2);
336 outb(iobase+2, 0x00);
337
338 /* Turn on UART (global) interrupts */
339 switch_bank(iobase, SET0);
340 outb(HCR_EN_IRQ, iobase+HCR);
341
342 /* Switch to advanced mode */
343 switch_bank(iobase, SET2);
344 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
345
346 /* Set default IR-mode */
347 switch_bank(iobase, SET0);
348 outb(HCR_SIR, iobase+HCR);
349
350 /* Read the Advanced IR ID */
351 switch_bank(iobase, SET3);
352 version = inb(iobase+AUID);
353
354 /* Should be 0x1? */
355 if (0x10 == (version & 0xf0)) {
356 efio = efbase[i];
357
358 /* Set FIFO size to 32 */
359 switch_bank(iobase, SET2);
360 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
361
362 /* Set FIFO threshold to TX17, RX16 */
363 switch_bank(iobase, SET0);
364 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
365 UFR_EN_FIFO,iobase+UFR);
366
367 /* Receiver frame length */
368 switch_bank(iobase, SET4);
369 outb(2048 & 0xff, iobase+6);
370 outb((2048 >> 8) & 0x1f, iobase+7);
371
372 /*
373 * Init HP HSDL-1100 transceiver.
374 *
375 * Set IRX_MSL since we have 2 * receive paths IRRX,
376 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
377 * be a input pin used for IRRXH
378 *
379 * IRRX pin 37 connected to receiver
380 * IRTX pin 38 connected to transmitter
381 * FIRRX pin 39 connected to receiver (IRSL0)
382 * CIRRX pin 40 connected to pin 37
383 */
384 switch_bank(iobase, SET7);
385 outb(0x40, iobase+7);
386
387 net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
388 version);
389
390 return 0;
391 } else {
392 /* Try next extented function register address */
393 pr_debug("%s(), Wrong chip version", __func__);
394 }
395 }
396 return -1;
397}
398
399static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
400{
401 int ir_mode = HCR_SIR;
402 int iobase;
403 __u8 set;
404
405 iobase = self->io.fir_base;
406
407 /* Update accounting for new speed */
408 self->io.speed = speed;
409
410 /* Save current bank */
411 set = inb(iobase+SSR);
412
413 /* Disable interrupts */
414 switch_bank(iobase, SET0);
415 outb(0, iobase+ICR);
416
417 /* Select Set 2 */
418 switch_bank(iobase, SET2);
419 outb(0x00, iobase+ABHL);
420
421 switch (speed) {
422 case 9600: outb(0x0c, iobase+ABLL); break;
423 case 19200: outb(0x06, iobase+ABLL); break;
424 case 38400: outb(0x03, iobase+ABLL); break;
425 case 57600: outb(0x02, iobase+ABLL); break;
426 case 115200: outb(0x01, iobase+ABLL); break;
427 case 576000:
428 ir_mode = HCR_MIR_576;
429 pr_debug("%s(), handling baud of 576000\n", __func__);
430 break;
431 case 1152000:
432 ir_mode = HCR_MIR_1152;
433 pr_debug("%s(), handling baud of 1152000\n", __func__);
434 break;
435 case 4000000:
436 ir_mode = HCR_FIR;
437 pr_debug("%s(), handling baud of 4000000\n", __func__);
438 break;
439 default:
440 ir_mode = HCR_FIR;
441 pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
442 break;
443 }
444
445 /* Set speed mode */
446 switch_bank(iobase, SET0);
447 outb(ir_mode, iobase+HCR);
448
449 /* set FIFO size to 32 */
450 switch_bank(iobase, SET2);
451 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
452
453 /* set FIFO threshold to TX17, RX16 */
454 switch_bank(iobase, SET0);
455 outb(0x00, iobase+UFR); /* Reset */
456 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
457 outb(0xa7, iobase+UFR);
458
459 netif_wake_queue(self->netdev);
460
461 /* Enable some interrupts so we can receive frames */
462 switch_bank(iobase, SET0);
463 if (speed > PIO_MAX_SPEED) {
464 outb(ICR_EFSFI, iobase+ICR);
465 w83977af_dma_receive(self);
466 } else
467 outb(ICR_ERBRI, iobase+ICR);
468
469 /* Restore SSR */
470 outb(set, iobase+SSR);
471}
472
473/*
474 * Function w83977af_hard_xmit (skb, dev)
475 *
476 * Sets up a DMA transfer to send the current frame.
477 *
478 */
479static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
480 struct net_device *dev)
481{
482 struct w83977af_ir *self;
483 __s32 speed;
484 int iobase;
485 __u8 set;
486 int mtt;
487
488 self = netdev_priv(dev);
489
490 iobase = self->io.fir_base;
491
492 pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
493 (int)skb->len);
494
495 /* Lock transmit buffer */
496 netif_stop_queue(dev);
497
498 /* Check if we need to change the speed */
499 speed = irda_get_next_speed(skb);
500 if ((speed != self->io.speed) && (speed != -1)) {
501 /* Check for empty frame */
502 if (!skb->len) {
503 w83977af_change_speed(self, speed);
504 dev_kfree_skb(skb);
505 return NETDEV_TX_OK;
506 } else
507 self->new_speed = speed;
508 }
509
510 /* Save current set */
511 set = inb(iobase+SSR);
512
513 /* Decide if we should use PIO or DMA transfer */
514 if (self->io.speed > PIO_MAX_SPEED) {
515 self->tx_buff.data = self->tx_buff.head;
516 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
517 self->tx_buff.len = skb->len;
518
519 mtt = irda_get_mtt(skb);
520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
521 if (mtt)
522 udelay(mtt);
523
524 /* Enable DMA interrupt */
525 switch_bank(iobase, SET0);
526 outb(ICR_EDMAI, iobase+ICR);
527 w83977af_dma_write(self, iobase);
528 } else {
529 self->tx_buff.data = self->tx_buff.head;
530 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
531 self->tx_buff.truesize);
532
533 /* Add interrupt on tx low level (will fire immediately) */
534 switch_bank(iobase, SET0);
535 outb(ICR_ETXTHI, iobase+ICR);
536 }
537 dev_kfree_skb(skb);
538
539 /* Restore set register */
540 outb(set, iobase+SSR);
541
542 return NETDEV_TX_OK;
543}
544
545/*
546 * Function w83977af_dma_write (self, iobase)
547 *
548 * Send frame using DMA
549 *
550 */
551static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
552{
553 __u8 set;
554 pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
555
556 /* Save current set */
557 set = inb(iobase+SSR);
558
559 /* Disable DMA */
560 switch_bank(iobase, SET0);
561 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
562
563 /* Choose transmit DMA channel */
564 switch_bank(iobase, SET2);
565 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
566 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
567 DMA_MODE_WRITE);
568 self->io.direction = IO_XMIT;
569
570 /* Enable DMA */
571 switch_bank(iobase, SET0);
572 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
573
574 /* Restore set register */
575 outb(set, iobase+SSR);
576}
577
578/*
579 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
580 *
581 *
582 *
583 */
584static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
585{
586 int actual = 0;
587 __u8 set;
588
589 /* Save current bank */
590 set = inb(iobase+SSR);
591
592 switch_bank(iobase, SET0);
593 if (!(inb_p(iobase+USR) & USR_TSRE)) {
594 pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
595
596 fifo_size -= 17;
597 pr_debug("%s(), %d bytes left in tx fifo\n",
598 __func__ , fifo_size);
599 }
600
601 /* Fill FIFO with current frame */
602 while ((fifo_size-- > 0) && (actual < len)) {
603 /* Transmit next byte */
604 outb(buf[actual++], iobase+TBR);
605 }
606
607 pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
608 __func__ , fifo_size, actual, len);
609
610 /* Restore bank */
611 outb(set, iobase+SSR);
612
613 return actual;
614}
615
616/*
617 * Function w83977af_dma_xmit_complete (self)
618 *
619 * The transfer of a frame in finished. So do the necessary things
620 *
621 *
622 */
623static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
624{
625 int iobase;
626 __u8 set;
627
628 pr_debug("%s(%ld)\n", __func__ , jiffies);
629
630 IRDA_ASSERT(self != NULL, return;);
631
632 iobase = self->io.fir_base;
633
634 /* Save current set */
635 set = inb(iobase+SSR);
636
637 /* Disable DMA */
638 switch_bank(iobase, SET0);
639 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
640
641 /* Check for underrun! */
642 if (inb(iobase+AUDR) & AUDR_UNDR) {
643 pr_debug("%s(), Transmit underrun!\n", __func__);
644
645 self->netdev->stats.tx_errors++;
646 self->netdev->stats.tx_fifo_errors++;
647
648 /* Clear bit, by writing 1 to it */
649 outb(AUDR_UNDR, iobase+AUDR);
650 } else
651 self->netdev->stats.tx_packets++;
652
653
654 if (self->new_speed) {
655 w83977af_change_speed(self, self->new_speed);
656 self->new_speed = 0;
657 }
658
659 /* Unlock tx_buff and request another frame */
660 /* Tell the network layer, that we want more frames */
661 netif_wake_queue(self->netdev);
662
663 /* Restore set */
664 outb(set, iobase+SSR);
665}
666
667/*
668 * Function w83977af_dma_receive (self)
669 *
670 * Get ready for receiving a frame. The device will initiate a DMA
671 * if it starts to receive a frame.
672 *
673 */
674static int w83977af_dma_receive(struct w83977af_ir *self)
675{
676 int iobase;
677 __u8 set;
678#ifdef CONFIG_ARCH_NETWINDER
679 unsigned long flags;
680 __u8 hcr;
681#endif
682 IRDA_ASSERT(self != NULL, return -1;);
683
684 pr_debug("%s\n", __func__);
685
686 iobase= self->io.fir_base;
687
688 /* Save current set */
689 set = inb(iobase+SSR);
690
691 /* Disable DMA */
692 switch_bank(iobase, SET0);
693 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
694
695 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
696 switch_bank(iobase, SET2);
697 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
698 iobase+ADCR1);
699
700 self->io.direction = IO_RECV;
701 self->rx_buff.data = self->rx_buff.head;
702
703#ifdef CONFIG_ARCH_NETWINDER
704 spin_lock_irqsave(&self->lock, flags);
705
706 disable_dma(self->io.dma);
707 clear_dma_ff(self->io.dma);
708 set_dma_mode(self->io.dma, DMA_MODE_READ);
709 set_dma_addr(self->io.dma, self->rx_buff_dma);
710 set_dma_count(self->io.dma, self->rx_buff.truesize);
711#else
712 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
713 DMA_MODE_READ);
714#endif
715 /*
716 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
717 * important that we don't reset the Tx FIFO since it might not
718 * be finished transmitting yet
719 */
720 switch_bank(iobase, SET0);
721 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
722 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
723
724 /* Enable DMA */
725 switch_bank(iobase, SET0);
726#ifdef CONFIG_ARCH_NETWINDER
727 hcr = inb(iobase+HCR);
728 outb(hcr | HCR_EN_DMA, iobase+HCR);
729 enable_dma(self->io.dma);
730 spin_unlock_irqrestore(&self->lock, flags);
731#else
732 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
733#endif
734 /* Restore set */
735 outb(set, iobase+SSR);
736
737 return 0;
738}
739
740/*
741 * Function w83977af_receive_complete (self)
742 *
743 * Finished with receiving a frame
744 *
745 */
746static int w83977af_dma_receive_complete(struct w83977af_ir *self)
747{
748 struct sk_buff *skb;
749 struct st_fifo *st_fifo;
750 int len;
751 int iobase;
752 __u8 set;
753 __u8 status;
754
755 pr_debug("%s\n", __func__);
756
757 st_fifo = &self->st_fifo;
758
759 iobase = self->io.fir_base;
760
761 /* Save current set */
762 set = inb(iobase+SSR);
763
764 iobase = self->io.fir_base;
765
766 /* Read status FIFO */
767 switch_bank(iobase, SET5);
768 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
769 st_fifo->entries[st_fifo->tail].status = status;
770
771 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
772 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
773
774 st_fifo->tail++;
775 st_fifo->len++;
776 }
777
778 while (st_fifo->len) {
779 /* Get first entry */
780 status = st_fifo->entries[st_fifo->head].status;
781 len = st_fifo->entries[st_fifo->head].len;
782 st_fifo->head++;
783 st_fifo->len--;
784
785 /* Check for errors */
786 if (status & FS_FO_ERR_MSK) {
787 if (status & FS_FO_LST_FR) {
788 /* Add number of lost frames to stats */
789 self->netdev->stats.rx_errors += len;
790 } else {
791 /* Skip frame */
792 self->netdev->stats.rx_errors++;
793
794 self->rx_buff.data += len;
795
796 if (status & FS_FO_MX_LEX)
797 self->netdev->stats.rx_length_errors++;
798
799 if (status & FS_FO_PHY_ERR)
800 self->netdev->stats.rx_frame_errors++;
801
802 if (status & FS_FO_CRC_ERR)
803 self->netdev->stats.rx_crc_errors++;
804 }
805 /* The errors below can be reported in both cases */
806 if (status & FS_FO_RX_OV)
807 self->netdev->stats.rx_fifo_errors++;
808
809 if (status & FS_FO_FSF_OV)
810 self->netdev->stats.rx_fifo_errors++;
811
812 } else {
813 /* Check if we have transferred all data to memory */
814 switch_bank(iobase, SET0);
815 if (inb(iobase+USR) & USR_RDR) {
816 udelay(80); /* Should be enough!? */
817 }
818
819 skb = dev_alloc_skb(len+1);
820 if (skb == NULL) {
821 printk(KERN_INFO
822 "%s(), memory squeeze, dropping frame.\n", __func__);
823 /* Restore set register */
824 outb(set, iobase+SSR);
825
826 return FALSE;
827 }
828
829 /* Align to 20 bytes */
830 skb_reserve(skb, 1);
831
832 /* Copy frame without CRC */
833 if (self->io.speed < 4000000) {
834 skb_put(skb, len-2);
835 skb_copy_to_linear_data(skb,
836 self->rx_buff.data,
837 len - 2);
838 } else {
839 skb_put(skb, len-4);
840 skb_copy_to_linear_data(skb,
841 self->rx_buff.data,
842 len - 4);
843 }
844
845 /* Move to next frame */
846 self->rx_buff.data += len;
847 self->netdev->stats.rx_packets++;
848
849 skb->dev = self->netdev;
850 skb_reset_mac_header(skb);
851 skb->protocol = htons(ETH_P_IRDA);
852 netif_rx(skb);
853 }
854 }
855 /* Restore set register */
856 outb(set, iobase+SSR);
857
858 return TRUE;
859}
860
861/*
862 * Function pc87108_pio_receive (self)
863 *
864 * Receive all data in receiver FIFO
865 *
866 */
867static void w83977af_pio_receive(struct w83977af_ir *self)
868{
869 __u8 byte = 0x00;
870 int iobase;
871
872 IRDA_ASSERT(self != NULL, return;);
873
874 iobase = self->io.fir_base;
875
876 /* Receive all characters in Rx FIFO */
877 do {
878 byte = inb(iobase+RBR);
879 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
880 byte);
881 } while (inb(iobase+USR) & USR_RDR); /* Data available */
882}
883
884/*
885 * Function w83977af_sir_interrupt (self, eir)
886 *
887 * Handle SIR interrupt
888 *
889 */
890static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
891{
892 int actual;
893 __u8 new_icr = 0;
894 __u8 set;
895 int iobase;
896
897 pr_debug("%s(), isr=%#x\n", __func__ , isr);
898
899 iobase = self->io.fir_base;
900 /* Transmit FIFO low on data */
901 if (isr & ISR_TXTH_I) {
902 /* Write data left in transmit buffer */
903 actual = w83977af_pio_write(self->io.fir_base,
904 self->tx_buff.data,
905 self->tx_buff.len,
906 self->io.fifo_size);
907
908 self->tx_buff.data += actual;
909 self->tx_buff.len -= actual;
910
911 self->io.direction = IO_XMIT;
912
913 /* Check if finished */
914 if (self->tx_buff.len > 0) {
915 new_icr |= ICR_ETXTHI;
916 } else {
917 set = inb(iobase+SSR);
918 switch_bank(iobase, SET0);
919 outb(AUDR_SFEND, iobase+AUDR);
920 outb(set, iobase+SSR);
921
922 self->netdev->stats.tx_packets++;
923
924 /* Feed me more packets */
925 netif_wake_queue(self->netdev);
926 new_icr |= ICR_ETBREI;
927 }
928 }
929 /* Check if transmission has completed */
930 if (isr & ISR_TXEMP_I) {
931 /* Check if we need to change the speed? */
932 if (self->new_speed) {
933 pr_debug("%s(), Changing speed!\n", __func__);
934 w83977af_change_speed(self, self->new_speed);
935 self->new_speed = 0;
936 }
937
938 /* Turn around and get ready to receive some data */
939 self->io.direction = IO_RECV;
940 new_icr |= ICR_ERBRI;
941 }
942
943 /* Rx FIFO threshold or timeout */
944 if (isr & ISR_RXTH_I) {
945 w83977af_pio_receive(self);
946
947 /* Keep receiving */
948 new_icr |= ICR_ERBRI;
949 }
950 return new_icr;
951}
952
953/*
954 * Function pc87108_fir_interrupt (self, eir)
955 *
956 * Handle MIR/FIR interrupt
957 *
958 */
959static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
960{
961 __u8 new_icr = 0;
962 __u8 set;
963 int iobase;
964
965 iobase = self->io.fir_base;
966 set = inb(iobase+SSR);
967
968 /* End of frame detected in FIFO */
969 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
970 if (w83977af_dma_receive_complete(self)) {
971
972 /* Wait for next status FIFO interrupt */
973 new_icr |= ICR_EFSFI;
974 } else {
975 /* DMA not finished yet */
976
977 /* Set timer value, resolution 1 ms */
978 switch_bank(iobase, SET4);
979 outb(0x01, iobase+TMRL); /* 1 ms */
980 outb(0x00, iobase+TMRH);
981
982 /* Start timer */
983 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
984
985 new_icr |= ICR_ETMRI;
986 }
987 }
988 /* Timer finished */
989 if (isr & ISR_TMR_I) {
990 /* Disable timer */
991 switch_bank(iobase, SET4);
992 outb(0, iobase+IR_MSL);
993
994 /* Clear timer event */
995 /* switch_bank(iobase, SET0); */
996/* outb(ASCR_CTE, iobase+ASCR); */
997
998 /* Check if this is a TX timer interrupt */
999 if (self->io.direction == IO_XMIT) {
1000 w83977af_dma_write(self, iobase);
1001
1002 new_icr |= ICR_EDMAI;
1003 } else {
1004 /* Check if DMA has now finished */
1005 w83977af_dma_receive_complete(self);
1006
1007 new_icr |= ICR_EFSFI;
1008 }
1009 }
1010 /* Finished with DMA */
1011 if (isr & ISR_DMA_I) {
1012 w83977af_dma_xmit_complete(self);
1013
1014 /* Check if there are more frames to be transmitted */
1015 /* if (irda_device_txqueue_empty(self)) { */
1016
1017 /* Prepare for receive
1018 *
1019 * ** Netwinder Tx DMA likes that we do this anyway **
1020 */
1021 w83977af_dma_receive(self);
1022 new_icr = ICR_EFSFI;
1023 /* } */
1024 }
1025
1026 /* Restore set */
1027 outb(set, iobase+SSR);
1028
1029 return new_icr;
1030}
1031
1032/*
1033 * Function w83977af_interrupt (irq, dev_id, regs)
1034 *
1035 * An interrupt from the chip has arrived. Time to do some work
1036 *
1037 */
1038static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1039{
1040 struct net_device *dev = dev_id;
1041 struct w83977af_ir *self;
1042 __u8 set, icr, isr;
1043 int iobase;
1044
1045 self = netdev_priv(dev);
1046
1047 iobase = self->io.fir_base;
1048
1049 /* Save current bank */
1050 set = inb(iobase+SSR);
1051 switch_bank(iobase, SET0);
1052
1053 icr = inb(iobase+ICR);
1054 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1055
1056 outb(0, iobase+ICR); /* Disable interrupts */
1057
1058 if (isr) {
1059 /* Dispatch interrupt handler for the current speed */
1060 if (self->io.speed > PIO_MAX_SPEED )
1061 icr = w83977af_fir_interrupt(self, isr);
1062 else
1063 icr = w83977af_sir_interrupt(self, isr);
1064 }
1065
1066 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1067 outb(set, iobase+SSR); /* Restore bank register */
1068 return IRQ_RETVAL(isr);
1069}
1070
1071/*
1072 * Function w83977af_is_receiving (self)
1073 *
1074 * Return TRUE is we are currently receiving a frame
1075 *
1076 */
1077static int w83977af_is_receiving(struct w83977af_ir *self)
1078{
1079 int status = FALSE;
1080 int iobase;
1081 __u8 set;
1082
1083 IRDA_ASSERT(self != NULL, return FALSE;);
1084
1085 if (self->io.speed > 115200) {
1086 iobase = self->io.fir_base;
1087
1088 /* Check if rx FIFO is not empty */
1089 set = inb(iobase+SSR);
1090 switch_bank(iobase, SET2);
1091 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1092 /* We are receiving something */
1093 status = TRUE;
1094 }
1095 outb(set, iobase+SSR);
1096 } else
1097 status = (self->rx_buff.state != OUTSIDE_FRAME);
1098
1099 return status;
1100}
1101
1102/*
1103 * Function w83977af_net_open (dev)
1104 *
1105 * Start the device
1106 *
1107 */
1108static int w83977af_net_open(struct net_device *dev)
1109{
1110 struct w83977af_ir *self;
1111 int iobase;
1112 char hwname[32];
1113 __u8 set;
1114
1115
1116 IRDA_ASSERT(dev != NULL, return -1;);
1117 self = netdev_priv(dev);
1118
1119 IRDA_ASSERT(self != NULL, return 0;);
1120
1121 iobase = self->io.fir_base;
1122
1123 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1124 (void *) dev)) {
1125 return -EAGAIN;
1126 }
1127 /*
1128 * Always allocate the DMA channel after the IRQ,
1129 * and clean up on failure.
1130 */
1131 if (request_dma(self->io.dma, dev->name)) {
1132 free_irq(self->io.irq, dev);
1133 return -EAGAIN;
1134 }
1135
1136 /* Save current set */
1137 set = inb(iobase+SSR);
1138
1139 /* Enable some interrupts so we can receive frames again */
1140 switch_bank(iobase, SET0);
1141 if (self->io.speed > 115200) {
1142 outb(ICR_EFSFI, iobase+ICR);
1143 w83977af_dma_receive(self);
1144 } else
1145 outb(ICR_ERBRI, iobase+ICR);
1146
1147 /* Restore bank register */
1148 outb(set, iobase+SSR);
1149
1150 /* Ready to play! */
1151 netif_start_queue(dev);
1152
1153 /* Give self a hardware name */
1154 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1155
1156 /*
1157 * Open new IrLAP layer instance, now that everything should be
1158 * initialized properly
1159 */
1160 self->irlap = irlap_open(dev, &self->qos, hwname);
1161
1162 return 0;
1163}
1164
1165/*
1166 * Function w83977af_net_close (dev)
1167 *
1168 * Stop the device
1169 *
1170 */
1171static int w83977af_net_close(struct net_device *dev)
1172{
1173 struct w83977af_ir *self;
1174 int iobase;
1175 __u8 set;
1176
1177 IRDA_ASSERT(dev != NULL, return -1;);
1178
1179 self = netdev_priv(dev);
1180
1181 IRDA_ASSERT(self != NULL, return 0;);
1182
1183 iobase = self->io.fir_base;
1184
1185 /* Stop device */
1186 netif_stop_queue(dev);
1187
1188 /* Stop and remove instance of IrLAP */
1189 if (self->irlap)
1190 irlap_close(self->irlap);
1191 self->irlap = NULL;
1192
1193 disable_dma(self->io.dma);
1194
1195 /* Save current set */
1196 set = inb(iobase+SSR);
1197
1198 /* Disable interrupts */
1199 switch_bank(iobase, SET0);
1200 outb(0, iobase+ICR);
1201
1202 free_irq(self->io.irq, dev);
1203 free_dma(self->io.dma);
1204
1205 /* Restore bank register */
1206 outb(set, iobase+SSR);
1207
1208 return 0;
1209}
1210
1211/*
1212 * Function w83977af_net_ioctl (dev, rq, cmd)
1213 *
1214 * Process IOCTL commands for this device
1215 *
1216 */
1217static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1218{
1219 struct if_irda_req *irq = (struct if_irda_req *) rq;
1220 struct w83977af_ir *self;
1221 unsigned long flags;
1222 int ret = 0;
1223
1224 IRDA_ASSERT(dev != NULL, return -1;);
1225
1226 self = netdev_priv(dev);
1227
1228 IRDA_ASSERT(self != NULL, return -1;);
1229
1230 pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1231
1232 spin_lock_irqsave(&self->lock, flags);
1233
1234 switch (cmd) {
1235 case SIOCSBANDWIDTH: /* Set bandwidth */
1236 if (!capable(CAP_NET_ADMIN)) {
1237 ret = -EPERM;
1238 goto out;
1239 }
1240 w83977af_change_speed(self, irq->ifr_baudrate);
1241 break;
1242 case SIOCSMEDIABUSY: /* Set media busy */
1243 if (!capable(CAP_NET_ADMIN)) {
1244 ret = -EPERM;
1245 goto out;
1246 }
1247 irda_device_set_media_busy(self->netdev, TRUE);
1248 break;
1249 case SIOCGRECEIVING: /* Check if we are receiving right now */
1250 irq->ifr_receiving = w83977af_is_receiving(self);
1251 break;
1252 default:
1253 ret = -EOPNOTSUPP;
1254 }
1255out:
1256 spin_unlock_irqrestore(&self->lock, flags);
1257 return ret;
1258}
1259
1260MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1261MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1262MODULE_LICENSE("GPL");
1263
1264
1265module_param(qos_mtt_bits, int, 0);
1266MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1267module_param_array(io, int, NULL, 0);
1268MODULE_PARM_DESC(io, "Base I/O addresses");
1269module_param_array(irq, int, NULL, 0);
1270MODULE_PARM_DESC(irq, "IRQ lines");
1271
1272/*
1273 * Function init_module (void)
1274 *
1275 *
1276 *
1277 */
1278module_init(w83977af_init);
1279
1280/*
1281 * Function cleanup_module (void)
1282 *
1283 *
1284 *
1285 */
1286module_exit(w83977af_cleanup);