Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
Note: File does not exist in v6.8.
   1/*********************************************************************
   2 *
   3 * Filename:      w83977af_ir.c
   4 * Version:       1.0
   5 * Description:   FIR driver for the Winbond W83977AF Super I/O chip
   6 * Status:        Experimental.
   7 * Author:        Paul VanderSpek
   8 * Created at:    Wed Nov  4 11:46:16 1998
   9 * Modified at:   Fri Jan 28 12:10:59 2000
  10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
  11 *
  12 *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
  13 *     Copyright (c) 1998-1999 Rebel.com
  14 *
  15 *     This program is free software; you can redistribute it and/or
  16 *     modify it under the terms of the GNU General Public License as
  17 *     published by the Free Software Foundation; either version 2 of
  18 *     the License, or (at your option) any later version.
  19 *
  20 *     Neither Paul VanderSpek nor Rebel.com admit liability nor provide
  21 *     warranty for any of this software. This material is provided "AS-IS"
  22 *     and at no charge.
  23 *
  24 *     If you find bugs in this file, its very likely that the same bug
  25 *     will also be in pc87108.c since the implementations are quite
  26 *     similar.
  27 *
  28 *     Notice that all functions that needs to access the chip in _any_
  29 *     way, must save BSR register on entry, and restore it on exit.
  30 *     It is _very_ important to follow this policy!
  31 *
  32 *         __u8 bank;
  33 *
  34 *         bank = inb( iobase+BSR);
  35 *
  36 *         do_your_stuff_here();
  37 *
  38 *         outb( bank, iobase+BSR);
  39 *
  40 ********************************************************************/
  41
  42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43
  44#include <linux/module.h>
  45#include <linux/kernel.h>
  46#include <linux/types.h>
  47#include <linux/skbuff.h>
  48#include <linux/netdevice.h>
  49#include <linux/ioport.h>
  50#include <linux/delay.h>
  51#include <linux/init.h>
  52#include <linux/interrupt.h>
  53#include <linux/rtnetlink.h>
  54#include <linux/dma-mapping.h>
  55#include <linux/gfp.h>
  56
  57#include <asm/io.h>
  58#include <asm/dma.h>
  59#include <asm/byteorder.h>
  60
  61#include <net/irda/irda.h>
  62#include <net/irda/wrapper.h>
  63#include <net/irda/irda_device.h>
  64#include "w83977af.h"
  65#include "w83977af_ir.h"
  66
  67#define CONFIG_USE_W977_PNP        /* Currently needed */
  68#define PIO_MAX_SPEED       115200
  69
  70static char *driver_name = "w83977af_ir";
  71static int  qos_mtt_bits = 0x07;   /* 1 ms or more */
  72
  73#define CHIP_IO_EXTENT 8
  74
  75static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
  76#ifdef CONFIG_ARCH_NETWINDER             /* Adjust to NetWinder differences */
  77static unsigned int irq[] = { 6, 0, 0, 0 };
  78#else
  79static unsigned int irq[] = { 11, 0, 0, 0 };
  80#endif
  81static unsigned int dma[] = { 1, 0, 0, 0 };
  82static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
  83static unsigned int efio = W977_EFIO_BASE;
  84
  85static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
  86
  87/* Some prototypes */
  88static int  w83977af_open(int i, unsigned int iobase, unsigned int irq,
  89			  unsigned int dma);
  90static int  w83977af_close(struct w83977af_ir *self);
  91static int  w83977af_probe(int iobase, int irq, int dma);
  92static int  w83977af_dma_receive(struct w83977af_ir *self);
  93static int  w83977af_dma_receive_complete(struct w83977af_ir *self);
  94static netdev_tx_t  w83977af_hard_xmit(struct sk_buff *skb,
  95				       struct net_device *dev);
  96static int  w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
  97static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
  98static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
  99static int  w83977af_is_receiving(struct w83977af_ir *self);
 100
 101static int  w83977af_net_open(struct net_device *dev);
 102static int  w83977af_net_close(struct net_device *dev);
 103static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 104
 105/*
 106 * Function w83977af_init ()
 107 *
 108 *    Initialize chip. Just try to find out how many chips we are dealing with
 109 *    and where they are
 110 */
 111static int __init w83977af_init(void)
 112{
 113	int i;
 114
 115	for (i = 0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
 116		if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
 117			return 0;
 118	}
 119	return -ENODEV;
 120}
 121
 122/*
 123 * Function w83977af_cleanup ()
 124 *
 125 *    Close all configured chips
 126 *
 127 */
 128static void __exit w83977af_cleanup(void)
 129{
 130	int i;
 131
 132	for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
 133		if (dev_self[i])
 134			w83977af_close(dev_self[i]);
 135	}
 136}
 137
 138static const struct net_device_ops w83977_netdev_ops = {
 139	.ndo_open       = w83977af_net_open,
 140	.ndo_stop       = w83977af_net_close,
 141	.ndo_start_xmit = w83977af_hard_xmit,
 142	.ndo_do_ioctl   = w83977af_net_ioctl,
 143};
 144
 145/*
 146 * Function w83977af_open (iobase, irq)
 147 *
 148 *    Open driver instance
 149 *
 150 */
 151static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
 152			 unsigned int dma)
 153{
 154	struct net_device *dev;
 155	struct w83977af_ir *self;
 156	int err;
 157
 158	/* Lock the port that we need */
 159	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
 160		pr_debug("%s: can't get iobase of 0x%03x\n",
 161			 __func__, iobase);
 162		return -ENODEV;
 163	}
 164
 165	if (w83977af_probe(iobase, irq, dma) == -1) {
 166		err = -1;
 167		goto err_out;
 168	}
 169	/*
 170	 *  Allocate new instance of the driver
 171	 */
 172	dev = alloc_irdadev(sizeof(struct w83977af_ir));
 173	if (!dev) {
 174		pr_err("IrDA: Can't allocate memory for IrDA control block!\n");
 175		err = -ENOMEM;
 176		goto err_out;
 177	}
 178
 179	self = netdev_priv(dev);
 180	spin_lock_init(&self->lock);
 181
 182	/* Initialize IO */
 183	self->io.fir_base = iobase;
 184	self->io.irq = irq;
 185	self->io.fir_ext = CHIP_IO_EXTENT;
 186	self->io.dma = dma;
 187	self->io.fifo_size = 32;
 188
 189	/* Initialize QoS for this device */
 190	irda_init_max_qos_capabilies(&self->qos);
 191
 192	/* The only value we must override it the baudrate */
 193
 194	/* FIXME: The HP HDLS-1100 does not support 1152000! */
 195	self->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | IR_57600 |
 196		IR_115200 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
 197
 198	/* The HP HDLS-1100 needs 1 ms according to the specs */
 199	self->qos.min_turn_time.bits = qos_mtt_bits;
 200	irda_qos_bits_to_value(&self->qos);
 201
 202	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
 203	self->rx_buff.truesize = 14384;
 204	self->tx_buff.truesize = 4000;
 205
 206	/* Allocate memory if needed */
 207	self->rx_buff.head =
 208		dma_zalloc_coherent(NULL, self->rx_buff.truesize,
 209				    &self->rx_buff_dma, GFP_KERNEL);
 210	if (!self->rx_buff.head) {
 211		err = -ENOMEM;
 212		goto err_out1;
 213	}
 214
 215	self->tx_buff.head =
 216		dma_zalloc_coherent(NULL, self->tx_buff.truesize,
 217				    &self->tx_buff_dma, GFP_KERNEL);
 218	if (!self->tx_buff.head) {
 219		err = -ENOMEM;
 220		goto err_out2;
 221	}
 222
 223	self->rx_buff.in_frame = FALSE;
 224	self->rx_buff.state = OUTSIDE_FRAME;
 225	self->tx_buff.data = self->tx_buff.head;
 226	self->rx_buff.data = self->rx_buff.head;
 227	self->netdev = dev;
 228
 229	dev->netdev_ops	= &w83977_netdev_ops;
 230
 231	err = register_netdev(dev);
 232	if (err) {
 233		net_err_ratelimited("%s:, register_netdevice() failed!\n",
 234				    __func__);
 235		goto err_out3;
 236	}
 237	net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
 238
 239	/* Need to store self somewhere */
 240	dev_self[i] = self;
 241
 242	return 0;
 243err_out3:
 244	dma_free_coherent(NULL, self->tx_buff.truesize,
 245			  self->tx_buff.head, self->tx_buff_dma);
 246err_out2:
 247	dma_free_coherent(NULL, self->rx_buff.truesize,
 248			  self->rx_buff.head, self->rx_buff_dma);
 249err_out1:
 250	free_netdev(dev);
 251err_out:
 252	release_region(iobase, CHIP_IO_EXTENT);
 253	return err;
 254}
 255
 256/*
 257 * Function w83977af_close (self)
 258 *
 259 *    Close driver instance
 260 *
 261 */
 262static int w83977af_close(struct w83977af_ir *self)
 263{
 264	int iobase;
 265
 266	iobase = self->io.fir_base;
 267
 268#ifdef CONFIG_USE_W977_PNP
 269	/* enter PnP configuration mode */
 270	w977_efm_enter(efio);
 271
 272	w977_select_device(W977_DEVICE_IR, efio);
 273
 274	/* Deactivate device */
 275	w977_write_reg(0x30, 0x00, efio);
 276
 277	w977_efm_exit(efio);
 278#endif /* CONFIG_USE_W977_PNP */
 279
 280	/* Remove netdevice */
 281	unregister_netdev(self->netdev);
 282
 283	/* Release the PORT that this driver is using */
 284	pr_debug("%s: Releasing Region %03x\n", __func__, self->io.fir_base);
 285	release_region(self->io.fir_base, self->io.fir_ext);
 286
 287	if (self->tx_buff.head)
 288		dma_free_coherent(NULL, self->tx_buff.truesize,
 289				  self->tx_buff.head, self->tx_buff_dma);
 290
 291	if (self->rx_buff.head)
 292		dma_free_coherent(NULL, self->rx_buff.truesize,
 293				  self->rx_buff.head, self->rx_buff_dma);
 294
 295	free_netdev(self->netdev);
 296
 297	return 0;
 298}
 299
 300static int w83977af_probe(int iobase, int irq, int dma)
 301{
 302	int version;
 303	int i;
 304
 305	for (i = 0; i < 2; i++) {
 306#ifdef CONFIG_USE_W977_PNP
 307		/* Enter PnP configuration mode */
 308		w977_efm_enter(efbase[i]);
 309
 310		w977_select_device(W977_DEVICE_IR, efbase[i]);
 311
 312		/* Configure PnP port, IRQ, and DMA channel */
 313		w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
 314		w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
 315
 316		w977_write_reg(0x70, irq, efbase[i]);
 317#ifdef CONFIG_ARCH_NETWINDER
 318		/* Netwinder uses 1 higher than Linux */
 319		w977_write_reg(0x74, dma + 1, efbase[i]);
 320#else
 321		w977_write_reg(0x74, dma, efbase[i]);
 322#endif /* CONFIG_ARCH_NETWINDER */
 323		w977_write_reg(0x75, 0x04, efbase[i]);/* Disable Tx DMA */
 324
 325		/* Set append hardware CRC, enable IR bank selection */
 326		w977_write_reg(0xf0, APEDCRC | ENBNKSEL, efbase[i]);
 327
 328		/* Activate device */
 329		w977_write_reg(0x30, 0x01, efbase[i]);
 330
 331		w977_efm_exit(efbase[i]);
 332#endif /* CONFIG_USE_W977_PNP */
 333		/* Disable Advanced mode */
 334		switch_bank(iobase, SET2);
 335		outb(iobase + 2, 0x00);
 336
 337		/* Turn on UART (global) interrupts */
 338		switch_bank(iobase, SET0);
 339		outb(HCR_EN_IRQ, iobase + HCR);
 340
 341		/* Switch to advanced mode */
 342		switch_bank(iobase, SET2);
 343		outb(inb(iobase + ADCR1) | ADCR1_ADV_SL, iobase + ADCR1);
 344
 345		/* Set default IR-mode */
 346		switch_bank(iobase, SET0);
 347		outb(HCR_SIR, iobase + HCR);
 348
 349		/* Read the Advanced IR ID */
 350		switch_bank(iobase, SET3);
 351		version = inb(iobase + AUID);
 352
 353		/* Should be 0x1? */
 354		if (0x10 == (version & 0xf0)) {
 355			efio = efbase[i];
 356
 357			/* Set FIFO size to 32 */
 358			switch_bank(iobase, SET2);
 359			outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
 360
 361			/* Set FIFO threshold to TX17, RX16 */
 362			switch_bank(iobase, SET0);
 363			outb(UFR_RXTL | UFR_TXTL | UFR_TXF_RST | UFR_RXF_RST |
 364			     UFR_EN_FIFO, iobase + UFR);
 365
 366			/* Receiver frame length */
 367			switch_bank(iobase, SET4);
 368			outb(2048 & 0xff, iobase + 6);
 369			outb((2048 >> 8) & 0x1f, iobase + 7);
 370
 371			/*
 372			 * Init HP HSDL-1100 transceiver.
 373			 *
 374			 * Set IRX_MSL since we have 2 * receive paths IRRX,
 375			 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
 376			 * be a input pin used for IRRXH
 377			 *
 378			 *   IRRX  pin 37 connected to receiver
 379			 *   IRTX  pin 38 connected to transmitter
 380			 *   FIRRX pin 39 connected to receiver      (IRSL0)
 381			 *   CIRRX pin 40 connected to pin 37
 382			 */
 383			switch_bank(iobase, SET7);
 384			outb(0x40, iobase + 7);
 385
 386			net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
 387					     version);
 388
 389			return 0;
 390		} else {
 391			/* Try next extented function register address */
 392			pr_debug("%s: Wrong chip version\n", __func__);
 393		}
 394	}
 395	return -1;
 396}
 397
 398static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
 399{
 400	int ir_mode = HCR_SIR;
 401	int iobase;
 402	__u8 set;
 403
 404	iobase = self->io.fir_base;
 405
 406	/* Update accounting for new speed */
 407	self->io.speed = speed;
 408
 409	/* Save current bank */
 410	set = inb(iobase + SSR);
 411
 412	/* Disable interrupts */
 413	switch_bank(iobase, SET0);
 414	outb(0, iobase + ICR);
 415
 416	/* Select Set 2 */
 417	switch_bank(iobase, SET2);
 418	outb(0x00, iobase + ABHL);
 419
 420	switch (speed) {
 421	case 9600:   outb(0x0c, iobase + ABLL); break;
 422	case 19200:  outb(0x06, iobase + ABLL); break;
 423	case 38400:  outb(0x03, iobase + ABLL); break;
 424	case 57600:  outb(0x02, iobase + ABLL); break;
 425	case 115200: outb(0x01, iobase + ABLL); break;
 426	case 576000:
 427		ir_mode = HCR_MIR_576;
 428		pr_debug("%s: handling baud of 576000\n", __func__);
 429		break;
 430	case 1152000:
 431		ir_mode = HCR_MIR_1152;
 432		pr_debug("%s: handling baud of 1152000\n", __func__);
 433		break;
 434	case 4000000:
 435		ir_mode = HCR_FIR;
 436		pr_debug("%s: handling baud of 4000000\n", __func__);
 437		break;
 438	default:
 439		ir_mode = HCR_FIR;
 440		pr_debug("%s: unknown baud rate of %d\n", __func__, speed);
 441		break;
 442	}
 443
 444	/* Set speed mode */
 445	switch_bank(iobase, SET0);
 446	outb(ir_mode, iobase + HCR);
 447
 448	/* set FIFO size to 32 */
 449	switch_bank(iobase, SET2);
 450	outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
 451
 452	/* set FIFO threshold to TX17, RX16 */
 453	switch_bank(iobase, SET0);
 454	outb(0x00, iobase + UFR);        /* Reset */
 455	outb(UFR_EN_FIFO, iobase + UFR); /* First we must enable FIFO */
 456	outb(0xa7, iobase + UFR);
 457
 458	netif_wake_queue(self->netdev);
 459
 460	/* Enable some interrupts so we can receive frames */
 461	switch_bank(iobase, SET0);
 462	if (speed > PIO_MAX_SPEED) {
 463		outb(ICR_EFSFI, iobase + ICR);
 464		w83977af_dma_receive(self);
 465	} else {
 466		outb(ICR_ERBRI, iobase + ICR);
 467	}
 468
 469	/* Restore SSR */
 470	outb(set, iobase + SSR);
 471}
 472
 473/*
 474 * Function w83977af_hard_xmit (skb, dev)
 475 *
 476 *    Sets up a DMA transfer to send the current frame.
 477 *
 478 */
 479static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
 480				      struct net_device *dev)
 481{
 482	struct w83977af_ir *self;
 483	__s32 speed;
 484	int iobase;
 485	__u8 set;
 486	int mtt;
 487
 488	self = netdev_priv(dev);
 489
 490	iobase = self->io.fir_base;
 491
 492	pr_debug("%s: %ld, skb->len=%d\n", __func__, jiffies, (int)skb->len);
 493
 494	/* Lock transmit buffer */
 495	netif_stop_queue(dev);
 496
 497	/* Check if we need to change the speed */
 498	speed = irda_get_next_speed(skb);
 499	if ((speed != self->io.speed) && (speed != -1)) {
 500		/* Check for empty frame */
 501		if (!skb->len) {
 502			w83977af_change_speed(self, speed);
 503			dev_kfree_skb(skb);
 504			return NETDEV_TX_OK;
 505		}
 506		self->new_speed = speed;
 507	}
 508
 509	/* Save current set */
 510	set = inb(iobase + SSR);
 511
 512	/* Decide if we should use PIO or DMA transfer */
 513	if (self->io.speed > PIO_MAX_SPEED) {
 514		self->tx_buff.data = self->tx_buff.head;
 515		skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
 516		self->tx_buff.len = skb->len;
 517
 518		mtt = irda_get_mtt(skb);
 519		pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
 520		if (mtt > 1000)
 521			mdelay(mtt / 1000);
 522		else if (mtt)
 523			udelay(mtt);
 524
 525		/* Enable DMA interrupt */
 526		switch_bank(iobase, SET0);
 527		outb(ICR_EDMAI, iobase + ICR);
 528		w83977af_dma_write(self, iobase);
 529	} else {
 530		self->tx_buff.data = self->tx_buff.head;
 531		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
 532						   self->tx_buff.truesize);
 533
 534		/* Add interrupt on tx low level (will fire immediately) */
 535		switch_bank(iobase, SET0);
 536		outb(ICR_ETXTHI, iobase + ICR);
 537	}
 538	dev_kfree_skb(skb);
 539
 540	/* Restore set register */
 541	outb(set, iobase + SSR);
 542
 543	return NETDEV_TX_OK;
 544}
 545
 546/*
 547 * Function w83977af_dma_write (self, iobase)
 548 *
 549 *    Send frame using DMA
 550 *
 551 */
 552static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
 553{
 554	__u8 set;
 555
 556	pr_debug("%s: len=%d\n", __func__, self->tx_buff.len);
 557
 558	/* Save current set */
 559	set = inb(iobase + SSR);
 560
 561	/* Disable DMA */
 562	switch_bank(iobase, SET0);
 563	outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
 564
 565	/* Choose transmit DMA channel  */
 566	switch_bank(iobase, SET2);
 567	outb(ADCR1_D_CHSW | /*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase + ADCR1);
 568	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
 569		       DMA_MODE_WRITE);
 570	self->io.direction = IO_XMIT;
 571
 572	/* Enable DMA */
 573	switch_bank(iobase, SET0);
 574	outb(inb(iobase + HCR) | HCR_EN_DMA | HCR_TX_WT, iobase + HCR);
 575
 576	/* Restore set register */
 577	outb(set, iobase + SSR);
 578}
 579
 580/*
 581 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
 582 *
 583 *
 584 *
 585 */
 586static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
 587{
 588	int actual = 0;
 589	__u8 set;
 590
 591	/* Save current bank */
 592	set = inb(iobase + SSR);
 593
 594	switch_bank(iobase, SET0);
 595	if (!(inb_p(iobase + USR) & USR_TSRE)) {
 596		pr_debug("%s: warning, FIFO not empty yet!\n", __func__);
 597
 598		fifo_size -= 17;
 599		pr_debug("%s: %d bytes left in tx fifo\n", __func__, fifo_size);
 600	}
 601
 602	/* Fill FIFO with current frame */
 603	while ((fifo_size-- > 0) && (actual < len)) {
 604		/* Transmit next byte */
 605		outb(buf[actual++], iobase + TBR);
 606	}
 607
 608	pr_debug("%s: fifo_size %d ; %d sent of %d\n",
 609		 __func__, fifo_size, actual, len);
 610
 611	/* Restore bank */
 612	outb(set, iobase + SSR);
 613
 614	return actual;
 615}
 616
 617/*
 618 * Function w83977af_dma_xmit_complete (self)
 619 *
 620 *    The transfer of a frame in finished. So do the necessary things
 621 *
 622 *
 623 */
 624static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
 625{
 626	int iobase;
 627	__u8 set;
 628
 629	pr_debug("%s: %ld\n", __func__, jiffies);
 630
 631	IRDA_ASSERT(self, return;);
 632
 633	iobase = self->io.fir_base;
 634
 635	/* Save current set */
 636	set = inb(iobase + SSR);
 637
 638	/* Disable DMA */
 639	switch_bank(iobase, SET0);
 640	outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
 641
 642	/* Check for underrun! */
 643	if (inb(iobase + AUDR) & AUDR_UNDR) {
 644		pr_debug("%s: Transmit underrun!\n", __func__);
 645
 646		self->netdev->stats.tx_errors++;
 647		self->netdev->stats.tx_fifo_errors++;
 648
 649		/* Clear bit, by writing 1 to it */
 650		outb(AUDR_UNDR, iobase + AUDR);
 651	} else {
 652		self->netdev->stats.tx_packets++;
 653	}
 654
 655	if (self->new_speed) {
 656		w83977af_change_speed(self, self->new_speed);
 657		self->new_speed = 0;
 658	}
 659
 660	/* Unlock tx_buff and request another frame */
 661	/* Tell the network layer, that we want more frames */
 662	netif_wake_queue(self->netdev);
 663
 664	/* Restore set */
 665	outb(set, iobase + SSR);
 666}
 667
 668/*
 669 * Function w83977af_dma_receive (self)
 670 *
 671 *    Get ready for receiving a frame. The device will initiate a DMA
 672 *    if it starts to receive a frame.
 673 *
 674 */
 675static int w83977af_dma_receive(struct w83977af_ir *self)
 676{
 677	int iobase;
 678	__u8 set;
 679#ifdef CONFIG_ARCH_NETWINDER
 680	unsigned long flags;
 681	__u8 hcr;
 682#endif
 683	IRDA_ASSERT(self, return -1;);
 684
 685	pr_debug("%s\n", __func__);
 686
 687	iobase = self->io.fir_base;
 688
 689	/* Save current set */
 690	set = inb(iobase + SSR);
 691
 692	/* Disable DMA */
 693	switch_bank(iobase, SET0);
 694	outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
 695
 696	/* Choose DMA Rx, DMA Fairness, and Advanced mode */
 697	switch_bank(iobase, SET2);
 698	outb((inb(iobase + ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL,
 699	     iobase + ADCR1);
 700
 701	self->io.direction = IO_RECV;
 702	self->rx_buff.data = self->rx_buff.head;
 703
 704#ifdef CONFIG_ARCH_NETWINDER
 705	spin_lock_irqsave(&self->lock, flags);
 706
 707	disable_dma(self->io.dma);
 708	clear_dma_ff(self->io.dma);
 709	set_dma_mode(self->io.dma, DMA_MODE_READ);
 710	set_dma_addr(self->io.dma, self->rx_buff_dma);
 711	set_dma_count(self->io.dma, self->rx_buff.truesize);
 712#else
 713	irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
 714		       DMA_MODE_READ);
 715#endif
 716	/*
 717	 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
 718	 * important that we don't reset the Tx FIFO since it might not
 719	 * be finished transmitting yet
 720	 */
 721	switch_bank(iobase, SET0);
 722	outb(UFR_RXTL | UFR_TXTL | UFR_RXF_RST | UFR_EN_FIFO, iobase + UFR);
 723	self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
 724
 725	/* Enable DMA */
 726	switch_bank(iobase, SET0);
 727#ifdef CONFIG_ARCH_NETWINDER
 728	hcr = inb(iobase + HCR);
 729	outb(hcr | HCR_EN_DMA, iobase + HCR);
 730	enable_dma(self->io.dma);
 731	spin_unlock_irqrestore(&self->lock, flags);
 732#else
 733	outb(inb(iobase + HCR) | HCR_EN_DMA, iobase + HCR);
 734#endif
 735	/* Restore set */
 736	outb(set, iobase + SSR);
 737
 738	return 0;
 739}
 740
 741/*
 742 * Function w83977af_receive_complete (self)
 743 *
 744 *    Finished with receiving a frame
 745 *
 746 */
 747static int w83977af_dma_receive_complete(struct w83977af_ir *self)
 748{
 749	struct sk_buff *skb;
 750	struct st_fifo *st_fifo;
 751	int len;
 752	int iobase;
 753	__u8 set;
 754	__u8 status;
 755
 756	pr_debug("%s\n", __func__);
 757
 758	st_fifo = &self->st_fifo;
 759
 760	iobase = self->io.fir_base;
 761
 762	/* Save current set */
 763	set = inb(iobase + SSR);
 764
 765	iobase = self->io.fir_base;
 766
 767	/* Read status FIFO */
 768	switch_bank(iobase, SET5);
 769	while ((status = inb(iobase + FS_FO)) & FS_FO_FSFDR) {
 770		st_fifo->entries[st_fifo->tail].status = status;
 771
 772		st_fifo->entries[st_fifo->tail].len  = inb(iobase + RFLFL);
 773		st_fifo->entries[st_fifo->tail].len |= inb(iobase + RFLFH) << 8;
 774
 775		st_fifo->tail++;
 776		st_fifo->len++;
 777	}
 778
 779	while (st_fifo->len) {
 780		/* Get first entry */
 781		status = st_fifo->entries[st_fifo->head].status;
 782		len    = st_fifo->entries[st_fifo->head].len;
 783		st_fifo->head++;
 784		st_fifo->len--;
 785
 786		/* Check for errors */
 787		if (status & FS_FO_ERR_MSK) {
 788			if (status & FS_FO_LST_FR) {
 789				/* Add number of lost frames to stats */
 790				self->netdev->stats.rx_errors += len;
 791			} else {
 792				/* Skip frame */
 793				self->netdev->stats.rx_errors++;
 794
 795				self->rx_buff.data += len;
 796
 797				if (status & FS_FO_MX_LEX)
 798					self->netdev->stats.rx_length_errors++;
 799
 800				if (status & FS_FO_PHY_ERR)
 801					self->netdev->stats.rx_frame_errors++;
 802
 803				if (status & FS_FO_CRC_ERR)
 804					self->netdev->stats.rx_crc_errors++;
 805			}
 806			/* The errors below can be reported in both cases */
 807			if (status & FS_FO_RX_OV)
 808				self->netdev->stats.rx_fifo_errors++;
 809
 810			if (status & FS_FO_FSF_OV)
 811				self->netdev->stats.rx_fifo_errors++;
 812
 813		} else {
 814			/* Check if we have transferred all data to memory */
 815			switch_bank(iobase, SET0);
 816			if (inb(iobase + USR) & USR_RDR)
 817				udelay(80); /* Should be enough!? */
 818
 819			skb = dev_alloc_skb(len + 1);
 820			if (!skb)  {
 821				pr_info("%s: memory squeeze, dropping frame\n",
 822					__func__);
 823				/* Restore set register */
 824				outb(set, iobase + SSR);
 825
 826				return FALSE;
 827			}
 828
 829			/*  Align to 20 bytes */
 830			skb_reserve(skb, 1);
 831
 832			/* Copy frame without CRC */
 833			if (self->io.speed < 4000000) {
 834				skb_put(skb, len - 2);
 835				skb_copy_to_linear_data(skb,
 836							self->rx_buff.data,
 837							len - 2);
 838			} else {
 839				skb_put(skb, len - 4);
 840				skb_copy_to_linear_data(skb,
 841							self->rx_buff.data,
 842							len - 4);
 843			}
 844
 845			/* Move to next frame */
 846			self->rx_buff.data += len;
 847			self->netdev->stats.rx_packets++;
 848
 849			skb->dev = self->netdev;
 850			skb_reset_mac_header(skb);
 851			skb->protocol = htons(ETH_P_IRDA);
 852			netif_rx(skb);
 853		}
 854	}
 855	/* Restore set register */
 856	outb(set, iobase + SSR);
 857
 858	return TRUE;
 859}
 860
 861/*
 862 * Function pc87108_pio_receive (self)
 863 *
 864 *    Receive all data in receiver FIFO
 865 *
 866 */
 867static void w83977af_pio_receive(struct w83977af_ir *self)
 868{
 869	__u8 byte = 0x00;
 870	int iobase;
 871
 872	IRDA_ASSERT(self, return;);
 873
 874	iobase = self->io.fir_base;
 875
 876	/*  Receive all characters in Rx FIFO */
 877	do {
 878		byte = inb(iobase + RBR);
 879		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
 880				  byte);
 881	} while (inb(iobase + USR) & USR_RDR); /* Data available */
 882}
 883
 884/*
 885 * Function w83977af_sir_interrupt (self, eir)
 886 *
 887 *    Handle SIR interrupt
 888 *
 889 */
 890static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
 891{
 892	int actual;
 893	__u8 new_icr = 0;
 894	__u8 set;
 895	int iobase;
 896
 897	pr_debug("%s: isr=%#x\n", __func__, isr);
 898
 899	iobase = self->io.fir_base;
 900	/* Transmit FIFO low on data */
 901	if (isr & ISR_TXTH_I) {
 902		/* Write data left in transmit buffer */
 903		actual = w83977af_pio_write(self->io.fir_base,
 904					    self->tx_buff.data,
 905					    self->tx_buff.len,
 906					    self->io.fifo_size);
 907
 908		self->tx_buff.data += actual;
 909		self->tx_buff.len  -= actual;
 910
 911		self->io.direction = IO_XMIT;
 912
 913		/* Check if finished */
 914		if (self->tx_buff.len > 0) {
 915			new_icr |= ICR_ETXTHI;
 916		} else {
 917			set = inb(iobase + SSR);
 918			switch_bank(iobase, SET0);
 919			outb(AUDR_SFEND, iobase + AUDR);
 920			outb(set, iobase + SSR);
 921
 922			self->netdev->stats.tx_packets++;
 923
 924			/* Feed me more packets */
 925			netif_wake_queue(self->netdev);
 926			new_icr |= ICR_ETBREI;
 927		}
 928	}
 929	/* Check if transmission has completed */
 930	if (isr & ISR_TXEMP_I) {
 931		/* Check if we need to change the speed? */
 932		if (self->new_speed) {
 933			pr_debug("%s: Changing speed!\n", __func__);
 934			w83977af_change_speed(self, self->new_speed);
 935			self->new_speed = 0;
 936		}
 937
 938		/* Turn around and get ready to receive some data */
 939		self->io.direction = IO_RECV;
 940		new_icr |= ICR_ERBRI;
 941	}
 942
 943	/* Rx FIFO threshold or timeout */
 944	if (isr & ISR_RXTH_I) {
 945		w83977af_pio_receive(self);
 946
 947		/* Keep receiving */
 948		new_icr |= ICR_ERBRI;
 949	}
 950	return new_icr;
 951}
 952
 953/*
 954 * Function pc87108_fir_interrupt (self, eir)
 955 *
 956 *    Handle MIR/FIR interrupt
 957 *
 958 */
 959static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
 960{
 961	__u8 new_icr = 0;
 962	__u8 set;
 963	int iobase;
 964
 965	iobase = self->io.fir_base;
 966	set = inb(iobase + SSR);
 967
 968	/* End of frame detected in FIFO */
 969	if (isr & (ISR_FEND_I | ISR_FSF_I)) {
 970		if (w83977af_dma_receive_complete(self)) {
 971			/* Wait for next status FIFO interrupt */
 972			new_icr |= ICR_EFSFI;
 973		} else {
 974			/* DMA not finished yet */
 975
 976			/* Set timer value, resolution 1 ms */
 977			switch_bank(iobase, SET4);
 978			outb(0x01, iobase + TMRL); /* 1 ms */
 979			outb(0x00, iobase + TMRH);
 980
 981			/* Start timer */
 982			outb(IR_MSL_EN_TMR, iobase + IR_MSL);
 983
 984			new_icr |= ICR_ETMRI;
 985		}
 986	}
 987	/* Timer finished */
 988	if (isr & ISR_TMR_I) {
 989		/* Disable timer */
 990		switch_bank(iobase, SET4);
 991		outb(0, iobase + IR_MSL);
 992
 993		/* Clear timer event */
 994		/* switch_bank(iobase, SET0); */
 995/*		outb(ASCR_CTE, iobase+ASCR); */
 996
 997		/* Check if this is a TX timer interrupt */
 998		if (self->io.direction == IO_XMIT) {
 999			w83977af_dma_write(self, iobase);
1000
1001			new_icr |= ICR_EDMAI;
1002		} else {
1003			/* Check if DMA has now finished */
1004			w83977af_dma_receive_complete(self);
1005
1006			new_icr |= ICR_EFSFI;
1007		}
1008	}
1009	/* Finished with DMA */
1010	if (isr & ISR_DMA_I) {
1011		w83977af_dma_xmit_complete(self);
1012
1013		/* Check if there are more frames to be transmitted */
1014		/* if (irda_device_txqueue_empty(self)) { */
1015
1016		/* Prepare for receive
1017		 *
1018		 * ** Netwinder Tx DMA likes that we do this anyway **
1019		 */
1020		w83977af_dma_receive(self);
1021		new_icr = ICR_EFSFI;
1022		/* } */
1023	}
1024
1025	/* Restore set */
1026	outb(set, iobase + SSR);
1027
1028	return new_icr;
1029}
1030
1031/*
1032 * Function w83977af_interrupt (irq, dev_id, regs)
1033 *
1034 *    An interrupt from the chip has arrived. Time to do some work
1035 *
1036 */
1037static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1038{
1039	struct net_device *dev = dev_id;
1040	struct w83977af_ir *self;
1041	__u8 set, icr, isr;
1042	int iobase;
1043
1044	self = netdev_priv(dev);
1045
1046	iobase = self->io.fir_base;
1047
1048	/* Save current bank */
1049	set = inb(iobase + SSR);
1050	switch_bank(iobase, SET0);
1051
1052	icr = inb(iobase + ICR);
1053	isr = inb(iobase + ISR) & icr; /* Mask out the interesting ones */
1054
1055	outb(0, iobase + ICR); /* Disable interrupts */
1056
1057	if (isr) {
1058		/* Dispatch interrupt handler for the current speed */
1059		if (self->io.speed > PIO_MAX_SPEED)
1060			icr = w83977af_fir_interrupt(self, isr);
1061		else
1062			icr = w83977af_sir_interrupt(self, isr);
1063	}
1064
1065	outb(icr, iobase + ICR);    /* Restore (new) interrupts */
1066	outb(set, iobase + SSR);    /* Restore bank register */
1067	return IRQ_RETVAL(isr);
1068}
1069
1070/*
1071 * Function w83977af_is_receiving (self)
1072 *
1073 *    Return TRUE is we are currently receiving a frame
1074 *
1075 */
1076static int w83977af_is_receiving(struct w83977af_ir *self)
1077{
1078	int status = FALSE;
1079	int iobase;
1080	__u8 set;
1081
1082	IRDA_ASSERT(self, return FALSE;);
1083
1084	if (self->io.speed > 115200) {
1085		iobase = self->io.fir_base;
1086
1087		/* Check if rx FIFO is not empty */
1088		set = inb(iobase + SSR);
1089		switch_bank(iobase, SET2);
1090		if ((inb(iobase + RXFDTH) & 0x3f) != 0) {
1091			/* We are receiving something */
1092			status =  TRUE;
1093		}
1094		outb(set, iobase + SSR);
1095	} else {
1096		status = (self->rx_buff.state != OUTSIDE_FRAME);
1097	}
1098
1099	return status;
1100}
1101
1102/*
1103 * Function w83977af_net_open (dev)
1104 *
1105 *    Start the device
1106 *
1107 */
1108static int w83977af_net_open(struct net_device *dev)
1109{
1110	struct w83977af_ir *self;
1111	int iobase;
1112	char hwname[32];
1113	__u8 set;
1114
1115	IRDA_ASSERT(dev, return -1;);
1116	self = netdev_priv(dev);
1117
1118	IRDA_ASSERT(self, return 0;);
1119
1120	iobase = self->io.fir_base;
1121
1122	if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1123			(void *)dev)) {
1124		return -EAGAIN;
1125	}
1126	/*
1127	 * Always allocate the DMA channel after the IRQ,
1128	 * and clean up on failure.
1129	 */
1130	if (request_dma(self->io.dma, dev->name)) {
1131		free_irq(self->io.irq, dev);
1132		return -EAGAIN;
1133	}
1134
1135	/* Save current set */
1136	set = inb(iobase + SSR);
1137
1138	/* Enable some interrupts so we can receive frames again */
1139	switch_bank(iobase, SET0);
1140	if (self->io.speed > 115200) {
1141		outb(ICR_EFSFI, iobase + ICR);
1142		w83977af_dma_receive(self);
1143	} else {
1144		outb(ICR_ERBRI, iobase + ICR);
1145	}
1146
1147	/* Restore bank register */
1148	outb(set, iobase + SSR);
1149
1150	/* Ready to play! */
1151	netif_start_queue(dev);
1152
1153	/* Give self a hardware name */
1154	sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1155
1156	/*
1157	 * Open new IrLAP layer instance, now that everything should be
1158	 * initialized properly
1159	 */
1160	self->irlap = irlap_open(dev, &self->qos, hwname);
1161
1162	return 0;
1163}
1164
1165/*
1166 * Function w83977af_net_close (dev)
1167 *
1168 *    Stop the device
1169 *
1170 */
1171static int w83977af_net_close(struct net_device *dev)
1172{
1173	struct w83977af_ir *self;
1174	int iobase;
1175	__u8 set;
1176
1177	IRDA_ASSERT(dev, return -1;);
1178
1179	self = netdev_priv(dev);
1180
1181	IRDA_ASSERT(self, return 0;);
1182
1183	iobase = self->io.fir_base;
1184
1185	/* Stop device */
1186	netif_stop_queue(dev);
1187
1188	/* Stop and remove instance of IrLAP */
1189	if (self->irlap)
1190		irlap_close(self->irlap);
1191	self->irlap = NULL;
1192
1193	disable_dma(self->io.dma);
1194
1195	/* Save current set */
1196	set = inb(iobase + SSR);
1197
1198	/* Disable interrupts */
1199	switch_bank(iobase, SET0);
1200	outb(0, iobase + ICR);
1201
1202	free_irq(self->io.irq, dev);
1203	free_dma(self->io.dma);
1204
1205	/* Restore bank register */
1206	outb(set, iobase + SSR);
1207
1208	return 0;
1209}
1210
1211/*
1212 * Function w83977af_net_ioctl (dev, rq, cmd)
1213 *
1214 *    Process IOCTL commands for this device
1215 *
1216 */
1217static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1218{
1219	struct if_irda_req *irq = (struct if_irda_req *)rq;
1220	struct w83977af_ir *self;
1221	unsigned long flags;
1222	int ret = 0;
1223
1224	IRDA_ASSERT(dev, return -1;);
1225
1226	self = netdev_priv(dev);
1227
1228	IRDA_ASSERT(self, return -1;);
1229
1230	pr_debug("%s: %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1231
1232	spin_lock_irqsave(&self->lock, flags);
1233
1234	switch (cmd) {
1235	case SIOCSBANDWIDTH: /* Set bandwidth */
1236		if (!capable(CAP_NET_ADMIN)) {
1237			ret = -EPERM;
1238			goto out;
1239		}
1240		w83977af_change_speed(self, irq->ifr_baudrate);
1241		break;
1242	case SIOCSMEDIABUSY: /* Set media busy */
1243		if (!capable(CAP_NET_ADMIN)) {
1244			ret = -EPERM;
1245			goto out;
1246		}
1247		irda_device_set_media_busy(self->netdev, TRUE);
1248		break;
1249	case SIOCGRECEIVING: /* Check if we are receiving right now */
1250		irq->ifr_receiving = w83977af_is_receiving(self);
1251		break;
1252	default:
1253		ret = -EOPNOTSUPP;
1254	}
1255out:
1256	spin_unlock_irqrestore(&self->lock, flags);
1257	return ret;
1258}
1259
1260MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1261MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1262MODULE_LICENSE("GPL");
1263
1264module_param(qos_mtt_bits, int, 0);
1265MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1266module_param_array(io, int, NULL, 0);
1267MODULE_PARM_DESC(io, "Base I/O addresses");
1268module_param_array(irq, int, NULL, 0);
1269MODULE_PARM_DESC(irq, "IRQ lines");
1270
1271/*
1272 * Function init_module (void)
1273 *
1274 *
1275 *
1276 */
1277module_init(w83977af_init);
1278
1279/*
1280 * Function cleanup_module (void)
1281 *
1282 *
1283 *
1284 */
1285module_exit(w83977af_cleanup);