Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*********************************************************************
   2 *                
   3 * Filename:      w83977af_ir.c
   4 * Version:       1.0
   5 * Description:   FIR driver for the Winbond W83977AF Super I/O chip
   6 * Status:        Experimental.
   7 * Author:        Paul VanderSpek
   8 * Created at:    Wed Nov  4 11:46:16 1998
   9 * Modified at:   Fri Jan 28 12:10:59 2000
  10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
  11 * 
  12 *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
  13 *     Copyright (c) 1998-1999 Rebel.com
  14 *      
  15 *     This program is free software; you can redistribute it and/or 
  16 *     modify it under the terms of the GNU General Public License as 
  17 *     published by the Free Software Foundation; either version 2 of 
  18 *     the License, or (at your option) any later version.
  19 *  
  20 *     Neither Paul VanderSpek nor Rebel.com admit liability nor provide
  21 *     warranty for any of this software. This material is provided "AS-IS"
  22 *     and at no charge.
  23 *     
  24 *     If you find bugs in this file, its very likely that the same bug
  25 *     will also be in pc87108.c since the implementations are quite
  26 *     similar.
  27 *
  28 *     Notice that all functions that needs to access the chip in _any_
  29 *     way, must save BSR register on entry, and restore it on exit. 
  30 *     It is _very_ important to follow this policy!
  31 *
  32 *         __u8 bank;
  33 *     
  34 *         bank = inb( iobase+BSR);
  35 *  
  36 *         do_your_stuff_here();
  37 *
  38 *         outb( bank, iobase+BSR);
  39 *
  40 ********************************************************************/
  41
  42#include <linux/module.h>
  43#include <linux/kernel.h>
  44#include <linux/types.h>
  45#include <linux/skbuff.h>
  46#include <linux/netdevice.h>
  47#include <linux/ioport.h>
  48#include <linux/delay.h>
  49#include <linux/init.h>
  50#include <linux/interrupt.h>
  51#include <linux/rtnetlink.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/gfp.h>
  54
  55#include <asm/io.h>
  56#include <asm/dma.h>
  57#include <asm/byteorder.h>
  58
  59#include <net/irda/irda.h>
  60#include <net/irda/wrapper.h>
  61#include <net/irda/irda_device.h>
  62#include "w83977af.h"
  63#include "w83977af_ir.h"
  64
  65#ifdef  CONFIG_ARCH_NETWINDER            /* Adjust to NetWinder differences */
  66#undef  CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
  67#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
  68#endif
  69#define CONFIG_USE_W977_PNP        /* Currently needed */
  70#define PIO_MAX_SPEED       115200 
  71
  72static char *driver_name = "w83977af_ir";
  73static int  qos_mtt_bits = 0x07;   /* 1 ms or more */
  74
  75#define CHIP_IO_EXTENT 8
  76
  77static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
  78#ifdef CONFIG_ARCH_NETWINDER             /* Adjust to NetWinder differences */
  79static unsigned int irq[] = { 6, 0, 0, 0 };
  80#else
  81static unsigned int irq[] = { 11, 0, 0, 0 };
  82#endif
  83static unsigned int dma[] = { 1, 0, 0, 0 };
  84static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
  85static unsigned int efio = W977_EFIO_BASE;
  86
  87static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
  88
  89/* Some prototypes */
  90static int  w83977af_open(int i, unsigned int iobase, unsigned int irq, 
  91                          unsigned int dma);
  92static int  w83977af_close(struct w83977af_ir *self);
  93static int  w83977af_probe(int iobase, int irq, int dma);
  94static int  w83977af_dma_receive(struct w83977af_ir *self); 
  95static int  w83977af_dma_receive_complete(struct w83977af_ir *self);
  96static netdev_tx_t  w83977af_hard_xmit(struct sk_buff *skb,
  97					     struct net_device *dev);
  98static int  w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
  99static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
 100static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
 101static int  w83977af_is_receiving(struct w83977af_ir *self);
 102
 103static int  w83977af_net_open(struct net_device *dev);
 104static int  w83977af_net_close(struct net_device *dev);
 105static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 106
 107/*
 108 * Function w83977af_init ()
 109 *
 110 *    Initialize chip. Just try to find out how many chips we are dealing with
 111 *    and where they are
 112 */
 113static int __init w83977af_init(void)
 114{
 115        int i;
 116
 117	IRDA_DEBUG(0, "%s()\n", __func__ );
 118
 119	for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
 120		if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
 121			return 0;
 122	}
 123	return -ENODEV;
 124}
 125
 126/*
 127 * Function w83977af_cleanup ()
 128 *
 129 *    Close all configured chips
 130 *
 131 */
 132static void __exit w83977af_cleanup(void)
 133{
 134	int i;
 135
 136        IRDA_DEBUG(4, "%s()\n", __func__ );
 137
 138	for (i=0; i < ARRAY_SIZE(dev_self); i++) {
 139		if (dev_self[i])
 140			w83977af_close(dev_self[i]);
 141	}
 142}
 143
 144static const struct net_device_ops w83977_netdev_ops = {
 145	.ndo_open       = w83977af_net_open,
 146	.ndo_stop       = w83977af_net_close,
 147	.ndo_start_xmit = w83977af_hard_xmit,
 148	.ndo_do_ioctl   = w83977af_net_ioctl,
 149};
 150
 151/*
 152 * Function w83977af_open (iobase, irq)
 153 *
 154 *    Open driver instance
 155 *
 156 */
 157static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
 158			 unsigned int dma)
 159{
 160	struct net_device *dev;
 161        struct w83977af_ir *self;
 162	int err;
 163
 164	IRDA_DEBUG(0, "%s()\n", __func__ );
 165
 166	/* Lock the port that we need */
 167	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
 168		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
 169		      __func__ , iobase);
 170		return -ENODEV;
 171	}
 172
 173	if (w83977af_probe(iobase, irq, dma) == -1) {
 174		err = -1;
 175		goto err_out;
 176	}
 177	/*
 178	 *  Allocate new instance of the driver
 179	 */
 180	dev = alloc_irdadev(sizeof(struct w83977af_ir));
 181	if (dev == NULL) {
 182		printk( KERN_ERR "IrDA: Can't allocate memory for "
 183			"IrDA control block!\n");
 184		err = -ENOMEM;
 185		goto err_out;
 186	}
 187
 188	self = netdev_priv(dev);
 189	spin_lock_init(&self->lock);
 190   
 191
 192	/* Initialize IO */
 193	self->io.fir_base   = iobase;
 194        self->io.irq       = irq;
 195        self->io.fir_ext   = CHIP_IO_EXTENT;
 196        self->io.dma       = dma;
 197        self->io.fifo_size = 32;
 198
 199	/* Initialize QoS for this device */
 200	irda_init_max_qos_capabilies(&self->qos);
 201	
 202	/* The only value we must override it the baudrate */
 203
 204	/* FIXME: The HP HDLS-1100 does not support 1152000! */
 205	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
 206		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
 207
 208	/* The HP HDLS-1100 needs 1 ms according to the specs */
 209	self->qos.min_turn_time.bits = qos_mtt_bits;
 210	irda_qos_bits_to_value(&self->qos);
 211	
 212	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
 213	self->rx_buff.truesize = 14384; 
 214	self->tx_buff.truesize = 4000;
 215	
 216	/* Allocate memory if needed */
 217	self->rx_buff.head =
 218		dma_alloc_coherent(NULL, self->rx_buff.truesize,
 219				   &self->rx_buff_dma, GFP_KERNEL);
 220	if (self->rx_buff.head == NULL) {
 221		err = -ENOMEM;
 222		goto err_out1;
 223	}
 224
 225	memset(self->rx_buff.head, 0, self->rx_buff.truesize);
 226	
 227	self->tx_buff.head =
 228		dma_alloc_coherent(NULL, self->tx_buff.truesize,
 229				   &self->tx_buff_dma, GFP_KERNEL);
 230	if (self->tx_buff.head == NULL) {
 231		err = -ENOMEM;
 232		goto err_out2;
 233	}
 234	memset(self->tx_buff.head, 0, self->tx_buff.truesize);
 235
 236	self->rx_buff.in_frame = FALSE;
 237	self->rx_buff.state = OUTSIDE_FRAME;
 238	self->tx_buff.data = self->tx_buff.head;
 239	self->rx_buff.data = self->rx_buff.head;
 240	self->netdev = dev;
 241
 242	dev->netdev_ops	= &w83977_netdev_ops;
 243
 244	err = register_netdev(dev);
 245	if (err) {
 246		IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
 247		goto err_out3;
 248	}
 249	IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
 250
 251	/* Need to store self somewhere */
 252	dev_self[i] = self;
 253	
 254	return 0;
 255err_out3:
 256	dma_free_coherent(NULL, self->tx_buff.truesize,
 257			  self->tx_buff.head, self->tx_buff_dma);
 258err_out2:	
 259	dma_free_coherent(NULL, self->rx_buff.truesize,
 260			  self->rx_buff.head, self->rx_buff_dma);
 261err_out1:
 262	free_netdev(dev);
 263err_out:
 264	release_region(iobase, CHIP_IO_EXTENT);
 265	return err;
 266}
 267
 268/*
 269 * Function w83977af_close (self)
 270 *
 271 *    Close driver instance
 272 *
 273 */
 274static int w83977af_close(struct w83977af_ir *self)
 275{
 276	int iobase;
 277
 278	IRDA_DEBUG(0, "%s()\n", __func__ );
 279
 280        iobase = self->io.fir_base;
 281
 282#ifdef CONFIG_USE_W977_PNP
 283	/* enter PnP configuration mode */
 284	w977_efm_enter(efio);
 285
 286	w977_select_device(W977_DEVICE_IR, efio);
 287
 288	/* Deactivate device */
 289	w977_write_reg(0x30, 0x00, efio);
 290
 291	w977_efm_exit(efio);
 292#endif /* CONFIG_USE_W977_PNP */
 293
 294	/* Remove netdevice */
 295	unregister_netdev(self->netdev);
 296
 297	/* Release the PORT that this driver is using */
 298	IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n", 
 299	      __func__ , self->io.fir_base);
 300	release_region(self->io.fir_base, self->io.fir_ext);
 301
 302	if (self->tx_buff.head)
 303		dma_free_coherent(NULL, self->tx_buff.truesize,
 304				  self->tx_buff.head, self->tx_buff_dma);
 305	
 306	if (self->rx_buff.head)
 307		dma_free_coherent(NULL, self->rx_buff.truesize,
 308				  self->rx_buff.head, self->rx_buff_dma);
 309
 310	free_netdev(self->netdev);
 311
 312	return 0;
 313}
 314
 315static int w83977af_probe(int iobase, int irq, int dma)
 316{
 317  	int version;
 318	int i;
 319  	
 320 	for (i=0; i < 2; i++) {
 321		IRDA_DEBUG( 0, "%s()\n", __func__ );
 322#ifdef CONFIG_USE_W977_PNP
 323 		/* Enter PnP configuration mode */
 324		w977_efm_enter(efbase[i]);
 325  
 326 		w977_select_device(W977_DEVICE_IR, efbase[i]);
 327  
 328 		/* Configure PnP port, IRQ, and DMA channel */
 329 		w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
 330 		w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
 331  
 332 		w977_write_reg(0x70, irq, efbase[i]);
 333#ifdef CONFIG_ARCH_NETWINDER
 334		/* Netwinder uses 1 higher than Linux */
 335 		w977_write_reg(0x74, dma+1, efbase[i]);
 336#else
 337 		w977_write_reg(0x74, dma, efbase[i]);   
 338#endif /*CONFIG_ARCH_NETWINDER */
 339 		w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
 340  	
 341 		/* Set append hardware CRC, enable IR bank selection */	
 342 		w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
 343  
 344 		/* Activate device */
 345 		w977_write_reg(0x30, 0x01, efbase[i]);
 346  
 347 		w977_efm_exit(efbase[i]);
 348#endif /* CONFIG_USE_W977_PNP */
 349  		/* Disable Advanced mode */
 350  		switch_bank(iobase, SET2);
 351  		outb(iobase+2, 0x00);  
 352 
 353 		/* Turn on UART (global) interrupts */
 354 		switch_bank(iobase, SET0);
 355  		outb(HCR_EN_IRQ, iobase+HCR);
 356  	
 357  		/* Switch to advanced mode */
 358  		switch_bank(iobase, SET2);
 359  		outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
 360  
 361  		/* Set default IR-mode */
 362  		switch_bank(iobase, SET0);
 363  		outb(HCR_SIR, iobase+HCR);
 364  
 365  		/* Read the Advanced IR ID */
 366  		switch_bank(iobase, SET3);
 367  		version = inb(iobase+AUID);
 368  	
 369  		/* Should be 0x1? */
 370  		if (0x10 == (version & 0xf0)) {
 371 			efio = efbase[i];
 372 
 373 			/* Set FIFO size to 32 */
 374 			switch_bank(iobase, SET2);
 375 			outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);	
 376 	
 377 			/* Set FIFO threshold to TX17, RX16 */
 378 			switch_bank(iobase, SET0);	
 379 			outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
 380			     UFR_EN_FIFO,iobase+UFR);
 381 
 382 			/* Receiver frame length */
 383 			switch_bank(iobase, SET4);
 384			outb(2048 & 0xff, iobase+6);
 385			outb((2048 >> 8) & 0x1f, iobase+7);
 386
 387			/* 
 388			 * Init HP HSDL-1100 transceiver. 
 389			 * 
 390			 * Set IRX_MSL since we have 2 * receive paths IRRX, 
 391			 * and IRRXH. Clear IRSL0D since we want IRSL0 * to 
 392			 * be a input pin used for IRRXH 
 393			 *
 394			 *   IRRX  pin 37 connected to receiver 
 395			 *   IRTX  pin 38 connected to transmitter
 396			 *   FIRRX pin 39 connected to receiver      (IRSL0) 
 397			 *   CIRRX pin 40 connected to pin 37
 398			 */
 399			switch_bank(iobase, SET7);
 400			outb(0x40, iobase+7);
 401			
 402			IRDA_MESSAGE("W83977AF (IR) driver loaded. "
 403				     "Version: 0x%02x\n", version);
 404			
 405			return 0;
 406		} else {
 407			/* Try next extented function register address */
 408			IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
 409		}
 410  	}   	
 411	return -1;
 412}
 413
 414static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
 415{
 416	int ir_mode = HCR_SIR;
 417	int iobase; 
 418	__u8 set;
 419
 420	iobase = self->io.fir_base;
 421
 422	/* Update accounting for new speed */
 423	self->io.speed = speed;
 424
 425	/* Save current bank */
 426	set = inb(iobase+SSR);
 427
 428	/* Disable interrupts */
 429	switch_bank(iobase, SET0);
 430	outb(0, iobase+ICR);
 431
 432	/* Select Set 2 */
 433	switch_bank(iobase, SET2);
 434	outb(0x00, iobase+ABHL);
 435
 436	switch (speed) {
 437	case 9600:   outb(0x0c, iobase+ABLL); break;
 438	case 19200:  outb(0x06, iobase+ABLL); break;
 439	case 38400:  outb(0x03, iobase+ABLL); break;
 440	case 57600:  outb(0x02, iobase+ABLL); break;
 441	case 115200: outb(0x01, iobase+ABLL); break;
 442	case 576000:
 443		ir_mode = HCR_MIR_576;
 444		IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
 445		break;
 446	case 1152000:
 447		ir_mode = HCR_MIR_1152;
 448		IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
 449		break;
 450	case 4000000:
 451		ir_mode = HCR_FIR;
 452		IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
 453		break;
 454	default:
 455		ir_mode = HCR_FIR;
 456		IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
 457		break;
 458	}
 459
 460	/* Set speed mode */
 461	switch_bank(iobase, SET0);
 462	outb(ir_mode, iobase+HCR);
 463
 464	/* set FIFO size to 32 */
 465	switch_bank(iobase, SET2);
 466	outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);	
 467	
 468	/* set FIFO threshold to TX17, RX16 */
 469	switch_bank(iobase, SET0);
 470	outb(0x00, iobase+UFR);        /* Reset */
 471	outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
 472	outb(0xa7, iobase+UFR);
 473
 474	netif_wake_queue(self->netdev);
 475		
 476	/* Enable some interrupts so we can receive frames */
 477	switch_bank(iobase, SET0);
 478	if (speed > PIO_MAX_SPEED) {
 479		outb(ICR_EFSFI, iobase+ICR);
 480		w83977af_dma_receive(self);
 481	} else
 482		outb(ICR_ERBRI, iobase+ICR);
 483    	
 484	/* Restore SSR */
 485	outb(set, iobase+SSR);
 486}
 487
 488/*
 489 * Function w83977af_hard_xmit (skb, dev)
 490 *
 491 *    Sets up a DMA transfer to send the current frame.
 492 *
 493 */
 494static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
 495					    struct net_device *dev)
 496{
 497	struct w83977af_ir *self;
 498	__s32 speed;
 499	int iobase;
 500	__u8 set;
 501	int mtt;
 502	
 503	self = netdev_priv(dev);
 504
 505	iobase = self->io.fir_base;
 506
 507	IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
 508		   (int) skb->len);
 509	
 510	/* Lock transmit buffer */
 511	netif_stop_queue(dev);
 512	
 513	/* Check if we need to change the speed */
 514	speed = irda_get_next_speed(skb);
 515	if ((speed != self->io.speed) && (speed != -1)) {
 516		/* Check for empty frame */
 517		if (!skb->len) {
 518			w83977af_change_speed(self, speed); 
 519			dev_kfree_skb(skb);
 520			return NETDEV_TX_OK;
 521		} else
 522			self->new_speed = speed;
 523	}
 524
 525	/* Save current set */
 526	set = inb(iobase+SSR);
 527	
 528	/* Decide if we should use PIO or DMA transfer */
 529	if (self->io.speed > PIO_MAX_SPEED) {
 530		self->tx_buff.data = self->tx_buff.head;
 531		skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
 532		self->tx_buff.len = skb->len;
 533		
 534		mtt = irda_get_mtt(skb);
 535			IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
 536			if (mtt)
 537				udelay(mtt);
 538
 539			/* Enable DMA interrupt */
 540			switch_bank(iobase, SET0);
 541	 		outb(ICR_EDMAI, iobase+ICR);
 542	     		w83977af_dma_write(self, iobase);
 543	} else {
 544		self->tx_buff.data = self->tx_buff.head;
 545		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 
 546						   self->tx_buff.truesize);
 547		
 548		/* Add interrupt on tx low level (will fire immediately) */
 549		switch_bank(iobase, SET0);
 550		outb(ICR_ETXTHI, iobase+ICR);
 551	}
 552	dev_kfree_skb(skb);
 553
 554	/* Restore set register */
 555	outb(set, iobase+SSR);
 556
 557	return NETDEV_TX_OK;
 558}
 559
 560/*
 561 * Function w83977af_dma_write (self, iobase)
 562 *
 563 *    Send frame using DMA
 564 *
 565 */
 566static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
 567{
 568	__u8 set;
 569#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
 570	unsigned long flags;
 571	__u8 hcr;
 572#endif
 573        IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
 574
 575	/* Save current set */
 576	set = inb(iobase+SSR);
 577
 578	/* Disable DMA */
 579	switch_bank(iobase, SET0);
 580	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
 581
 582	/* Choose transmit DMA channel  */ 
 583	switch_bank(iobase, SET2);
 584	outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
 585#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
 586	spin_lock_irqsave(&self->lock, flags);
 587
 588	disable_dma(self->io.dma);
 589	clear_dma_ff(self->io.dma);
 590	set_dma_mode(self->io.dma, DMA_MODE_READ);
 591	set_dma_addr(self->io.dma, self->tx_buff_dma);
 592	set_dma_count(self->io.dma, self->tx_buff.len);
 593#else
 594	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
 595		       DMA_MODE_WRITE);	
 596#endif
 597	self->io.direction = IO_XMIT;
 598	
 599	/* Enable DMA */
 600 	switch_bank(iobase, SET0);
 601#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
 602	hcr = inb(iobase+HCR);
 603	outb(hcr | HCR_EN_DMA, iobase+HCR);
 604	enable_dma(self->io.dma);
 605	spin_unlock_irqrestore(&self->lock, flags);
 606#else	
 607	outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
 608#endif
 609
 610	/* Restore set register */
 611	outb(set, iobase+SSR);
 612}
 613
 614/*
 615 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
 616 *
 617 *    
 618 *
 619 */
 620static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
 621{
 622	int actual = 0;
 623	__u8 set;
 624	
 625	IRDA_DEBUG(4, "%s()\n", __func__ );
 626
 627	/* Save current bank */
 628	set = inb(iobase+SSR);
 629
 630	switch_bank(iobase, SET0);
 631	if (!(inb_p(iobase+USR) & USR_TSRE)) {
 632		IRDA_DEBUG(4,
 633			   "%s(), warning, FIFO not empty yet!\n", __func__  );
 634
 635		fifo_size -= 17;
 636		IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n", 
 637			   __func__ , fifo_size);
 638	}
 639
 640	/* Fill FIFO with current frame */
 641	while ((fifo_size-- > 0) && (actual < len)) {
 642		/* Transmit next byte */
 643		outb(buf[actual++], iobase+TBR);
 644	}
 645        
 646	IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", 
 647		   __func__ , fifo_size, actual, len);
 648
 649	/* Restore bank */
 650	outb(set, iobase+SSR);
 651
 652	return actual;
 653}
 654
 655/*
 656 * Function w83977af_dma_xmit_complete (self)
 657 *
 658 *    The transfer of a frame in finished. So do the necessary things
 659 *
 660 *    
 661 */
 662static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
 663{
 664	int iobase;
 665	__u8 set;
 666
 667	IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
 668
 669	IRDA_ASSERT(self != NULL, return;);
 670
 671	iobase = self->io.fir_base;
 672
 673	/* Save current set */
 674	set = inb(iobase+SSR);
 675
 676	/* Disable DMA */
 677	switch_bank(iobase, SET0);
 678	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
 679	
 680	/* Check for underrrun! */
 681	if (inb(iobase+AUDR) & AUDR_UNDR) {
 682		IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
 683		
 684		self->netdev->stats.tx_errors++;
 685		self->netdev->stats.tx_fifo_errors++;
 686
 687		/* Clear bit, by writing 1 to it */
 688		outb(AUDR_UNDR, iobase+AUDR);
 689	} else
 690		self->netdev->stats.tx_packets++;
 691
 692	
 693	if (self->new_speed) {
 694		w83977af_change_speed(self, self->new_speed);
 695		self->new_speed = 0;
 696	}
 697
 698	/* Unlock tx_buff and request another frame */
 699	/* Tell the network layer, that we want more frames */
 700	netif_wake_queue(self->netdev);
 701	
 702	/* Restore set */
 703	outb(set, iobase+SSR);
 704}
 705
 706/*
 707 * Function w83977af_dma_receive (self)
 708 *
 709 *    Get ready for receiving a frame. The device will initiate a DMA
 710 *    if it starts to receive a frame.
 711 *
 712 */
 713static int w83977af_dma_receive(struct w83977af_ir *self)
 714{
 715	int iobase;
 716	__u8 set;
 717#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
 718	unsigned long flags;
 719	__u8 hcr;
 720#endif
 721	IRDA_ASSERT(self != NULL, return -1;);
 722
 723	IRDA_DEBUG(4, "%s\n", __func__ );
 724
 725	iobase= self->io.fir_base;
 726
 727	/* Save current set */
 728	set = inb(iobase+SSR);
 729
 730	/* Disable DMA */
 731	switch_bank(iobase, SET0);
 732	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
 733
 734	/* Choose DMA Rx, DMA Fairness, and Advanced mode */
 735	switch_bank(iobase, SET2);
 736	outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
 737	     iobase+ADCR1);
 738
 739	self->io.direction = IO_RECV;
 740	self->rx_buff.data = self->rx_buff.head;
 741
 742#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
 743	spin_lock_irqsave(&self->lock, flags);
 744
 745	disable_dma(self->io.dma);
 746	clear_dma_ff(self->io.dma);
 747	set_dma_mode(self->io.dma, DMA_MODE_READ);
 748	set_dma_addr(self->io.dma, self->rx_buff_dma);
 749	set_dma_count(self->io.dma, self->rx_buff.truesize);
 750#else
 751	irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
 752		       DMA_MODE_READ);
 753#endif
 754	/* 
 755	 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very 
 756	 * important that we don't reset the Tx FIFO since it might not
 757	 * be finished transmitting yet
 758	 */
 759	switch_bank(iobase, SET0);
 760	outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
 761	self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
 762	
 763	/* Enable DMA */
 764	switch_bank(iobase, SET0);
 765#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
 766	hcr = inb(iobase+HCR);
 767	outb(hcr | HCR_EN_DMA, iobase+HCR);
 768	enable_dma(self->io.dma);
 769	spin_unlock_irqrestore(&self->lock, flags);
 770#else	
 771	outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
 772#endif
 773	/* Restore set */
 774	outb(set, iobase+SSR);
 775
 776	return 0;
 777}
 778
 779/*
 780 * Function w83977af_receive_complete (self)
 781 *
 782 *    Finished with receiving a frame
 783 *
 784 */
 785static int w83977af_dma_receive_complete(struct w83977af_ir *self)
 786{
 787	struct sk_buff *skb;
 788	struct st_fifo *st_fifo;
 789	int len;
 790	int iobase;
 791	__u8 set;
 792	__u8 status;
 793
 794	IRDA_DEBUG(4, "%s\n", __func__ );
 795
 796	st_fifo = &self->st_fifo;
 797
 798	iobase = self->io.fir_base;
 799
 800	/* Save current set */
 801	set = inb(iobase+SSR);
 802	
 803	iobase = self->io.fir_base;
 804
 805	/* Read status FIFO */
 806	switch_bank(iobase, SET5);
 807	while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
 808		st_fifo->entries[st_fifo->tail].status = status;
 809		
 810		st_fifo->entries[st_fifo->tail].len  = inb(iobase+RFLFL);
 811		st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
 812		
 813		st_fifo->tail++;
 814		st_fifo->len++;
 815	}
 816	
 817	while (st_fifo->len) {
 818		/* Get first entry */
 819		status = st_fifo->entries[st_fifo->head].status;
 820		len    = st_fifo->entries[st_fifo->head].len;
 821		st_fifo->head++;
 822		st_fifo->len--;
 823
 824		/* Check for errors */
 825		if (status & FS_FO_ERR_MSK) {
 826			if (status & FS_FO_LST_FR) {
 827				/* Add number of lost frames to stats */
 828				self->netdev->stats.rx_errors += len;
 829			} else {
 830				/* Skip frame */
 831				self->netdev->stats.rx_errors++;
 832				
 833				self->rx_buff.data += len;
 834				
 835				if (status & FS_FO_MX_LEX)
 836					self->netdev->stats.rx_length_errors++;
 837				
 838				if (status & FS_FO_PHY_ERR) 
 839					self->netdev->stats.rx_frame_errors++;
 840				
 841				if (status & FS_FO_CRC_ERR) 
 842					self->netdev->stats.rx_crc_errors++;
 843			}
 844			/* The errors below can be reported in both cases */
 845			if (status & FS_FO_RX_OV)
 846				self->netdev->stats.rx_fifo_errors++;
 847			
 848			if (status & FS_FO_FSF_OV)
 849				self->netdev->stats.rx_fifo_errors++;
 850			
 851		} else {
 852			/* Check if we have transferred all data to memory */
 853			switch_bank(iobase, SET0);
 854			if (inb(iobase+USR) & USR_RDR) {
 855				udelay(80); /* Should be enough!? */
 856			}
 857						
 858			skb = dev_alloc_skb(len+1);
 859			if (skb == NULL)  {
 860				printk(KERN_INFO
 861				       "%s(), memory squeeze, dropping frame.\n", __func__);
 862				/* Restore set register */
 863				outb(set, iobase+SSR);
 864
 865				return FALSE;
 866			}
 867			
 868			/*  Align to 20 bytes */
 869			skb_reserve(skb, 1); 
 870			
 871			/* Copy frame without CRC */
 872			if (self->io.speed < 4000000) {
 873				skb_put(skb, len-2);
 874				skb_copy_to_linear_data(skb,
 875							self->rx_buff.data,
 876							len - 2);
 877			} else {
 878				skb_put(skb, len-4);
 879				skb_copy_to_linear_data(skb,
 880							self->rx_buff.data,
 881							len - 4);
 882			}
 883
 884			/* Move to next frame */
 885			self->rx_buff.data += len;
 886			self->netdev->stats.rx_packets++;
 887			
 888			skb->dev = self->netdev;
 889			skb_reset_mac_header(skb);
 890			skb->protocol = htons(ETH_P_IRDA);
 891			netif_rx(skb);
 892		}
 893	}
 894	/* Restore set register */
 895	outb(set, iobase+SSR);
 896
 897	return TRUE;
 898}
 899
 900/*
 901 * Function pc87108_pio_receive (self)
 902 *
 903 *    Receive all data in receiver FIFO
 904 *
 905 */
 906static void w83977af_pio_receive(struct w83977af_ir *self) 
 907{
 908	__u8 byte = 0x00;
 909	int iobase;
 910
 911	IRDA_DEBUG(4, "%s()\n", __func__ );
 912
 913	IRDA_ASSERT(self != NULL, return;);
 914	
 915	iobase = self->io.fir_base;
 916	
 917	/*  Receive all characters in Rx FIFO */
 918	do {
 919		byte = inb(iobase+RBR);
 920		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
 921				  byte);
 922	} while (inb(iobase+USR) & USR_RDR); /* Data available */	
 923}
 924
 925/*
 926 * Function w83977af_sir_interrupt (self, eir)
 927 *
 928 *    Handle SIR interrupt
 929 *
 930 */
 931static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
 932{
 933	int actual;
 934	__u8 new_icr = 0;
 935	__u8 set;
 936	int iobase;
 937
 938	IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
 939	
 940	iobase = self->io.fir_base;
 941	/* Transmit FIFO low on data */
 942	if (isr & ISR_TXTH_I) {
 943		/* Write data left in transmit buffer */
 944		actual = w83977af_pio_write(self->io.fir_base, 
 945					    self->tx_buff.data, 
 946					    self->tx_buff.len, 
 947					    self->io.fifo_size);
 948
 949		self->tx_buff.data += actual;
 950		self->tx_buff.len  -= actual;
 951		
 952		self->io.direction = IO_XMIT;
 953
 954		/* Check if finished */
 955		if (self->tx_buff.len > 0) {
 956			new_icr |= ICR_ETXTHI;
 957		} else {
 958			set = inb(iobase+SSR);
 959			switch_bank(iobase, SET0);
 960			outb(AUDR_SFEND, iobase+AUDR);
 961			outb(set, iobase+SSR); 
 962
 963			self->netdev->stats.tx_packets++;
 964
 965			/* Feed me more packets */
 966			netif_wake_queue(self->netdev);
 967			new_icr |= ICR_ETBREI;
 968		}
 969	}
 970	/* Check if transmission has completed */
 971	if (isr & ISR_TXEMP_I) {		
 972		/* Check if we need to change the speed? */
 973		if (self->new_speed) {
 974			IRDA_DEBUG(2,
 975				   "%s(), Changing speed!\n", __func__ );
 976			w83977af_change_speed(self, self->new_speed);
 977			self->new_speed = 0;
 978		}
 979
 980		/* Turn around and get ready to receive some data */
 981		self->io.direction = IO_RECV;
 982		new_icr |= ICR_ERBRI;
 983	}
 984
 985	/* Rx FIFO threshold or timeout */
 986	if (isr & ISR_RXTH_I) {
 987		w83977af_pio_receive(self);
 988
 989		/* Keep receiving */
 990		new_icr |= ICR_ERBRI;
 991	}
 992	return new_icr;
 993}
 994
 995/*
 996 * Function pc87108_fir_interrupt (self, eir)
 997 *
 998 *    Handle MIR/FIR interrupt
 999 *
1000 */
1001static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1002{
1003	__u8 new_icr = 0;
1004	__u8 set;
1005	int iobase;
1006
1007	iobase = self->io.fir_base;
1008	set = inb(iobase+SSR);
1009	
1010	/* End of frame detected in FIFO */
1011	if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1012		if (w83977af_dma_receive_complete(self)) {
1013			
1014			/* Wait for next status FIFO interrupt */
1015			new_icr |= ICR_EFSFI;
1016		} else {
1017			/* DMA not finished yet */
1018
1019			/* Set timer value, resolution 1 ms */
1020			switch_bank(iobase, SET4);
1021			outb(0x01, iobase+TMRL); /* 1 ms */
1022			outb(0x00, iobase+TMRH);
1023
1024			/* Start timer */
1025			outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1026
1027			new_icr |= ICR_ETMRI;
1028		}
1029	}
1030	/* Timer finished */
1031	if (isr & ISR_TMR_I) {
1032		/* Disable timer */
1033		switch_bank(iobase, SET4);
1034		outb(0, iobase+IR_MSL);
1035
1036		/* Clear timer event */
1037		/* switch_bank(iobase, SET0); */
1038/* 		outb(ASCR_CTE, iobase+ASCR); */
1039
1040		/* Check if this is a TX timer interrupt */
1041		if (self->io.direction == IO_XMIT) {
1042			w83977af_dma_write(self, iobase);
1043
1044			new_icr |= ICR_EDMAI;
1045		} else {
1046			/* Check if DMA has now finished */
1047			w83977af_dma_receive_complete(self);
1048
1049			new_icr |= ICR_EFSFI;
1050		}
1051	}	
1052	/* Finished with DMA */
1053	if (isr & ISR_DMA_I) {
1054		w83977af_dma_xmit_complete(self);
1055
1056		/* Check if there are more frames to be transmitted */
1057		/* if (irda_device_txqueue_empty(self)) { */
1058		
1059		/* Prepare for receive 
1060		 * 
1061		 * ** Netwinder Tx DMA likes that we do this anyway **
1062		 */
1063		w83977af_dma_receive(self);
1064		new_icr = ICR_EFSFI;
1065	       /* } */
1066	}
1067	
1068	/* Restore set */
1069	outb(set, iobase+SSR);
1070
1071	return new_icr;
1072}
1073
1074/*
1075 * Function w83977af_interrupt (irq, dev_id, regs)
1076 *
1077 *    An interrupt from the chip has arrived. Time to do some work
1078 *
1079 */
1080static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1081{
1082	struct net_device *dev = dev_id;
1083	struct w83977af_ir *self;
1084	__u8 set, icr, isr;
1085	int iobase;
1086
1087	self = netdev_priv(dev);
1088
1089	iobase = self->io.fir_base;
1090
1091	/* Save current bank */
1092	set = inb(iobase+SSR);
1093	switch_bank(iobase, SET0);
1094	
1095	icr = inb(iobase+ICR); 
1096	isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */ 
1097
1098	outb(0, iobase+ICR); /* Disable interrupts */
1099	
1100	if (isr) {
1101		/* Dispatch interrupt handler for the current speed */
1102		if (self->io.speed > PIO_MAX_SPEED )
1103			icr = w83977af_fir_interrupt(self, isr);
1104		else
1105			icr = w83977af_sir_interrupt(self, isr);
1106	}
1107
1108	outb(icr, iobase+ICR);    /* Restore (new) interrupts */
1109	outb(set, iobase+SSR);    /* Restore bank register */
1110	return IRQ_RETVAL(isr);
1111}
1112
1113/*
1114 * Function w83977af_is_receiving (self)
1115 *
1116 *    Return TRUE is we are currently receiving a frame
1117 *
1118 */
1119static int w83977af_is_receiving(struct w83977af_ir *self)
1120{
1121	int status = FALSE;
1122	int iobase;
1123	__u8 set;
1124
1125	IRDA_ASSERT(self != NULL, return FALSE;);
1126
1127	if (self->io.speed > 115200) {
1128		iobase = self->io.fir_base;
1129
1130		/* Check if rx FIFO is not empty */
1131		set = inb(iobase+SSR);
1132		switch_bank(iobase, SET2);
1133		if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1134			/* We are receiving something */
1135			status =  TRUE;
1136		}
1137		outb(set, iobase+SSR);
1138	} else 
1139		status = (self->rx_buff.state != OUTSIDE_FRAME);
1140	
1141	return status;
1142}
1143
1144/*
1145 * Function w83977af_net_open (dev)
1146 *
1147 *    Start the device
1148 *
1149 */
1150static int w83977af_net_open(struct net_device *dev)
1151{
1152	struct w83977af_ir *self;
1153	int iobase;
1154	char hwname[32];
1155	__u8 set;
1156	
1157	IRDA_DEBUG(0, "%s()\n", __func__ );
1158	
1159	IRDA_ASSERT(dev != NULL, return -1;);
1160	self = netdev_priv(dev);
1161	
1162	IRDA_ASSERT(self != NULL, return 0;);
1163	
1164	iobase = self->io.fir_base;
1165
1166	if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name, 
1167			(void *) dev)) {
1168		return -EAGAIN;
1169	}
1170	/*
1171	 * Always allocate the DMA channel after the IRQ,
1172	 * and clean up on failure.
1173	 */
1174	if (request_dma(self->io.dma, dev->name)) {
1175		free_irq(self->io.irq, self);
1176		return -EAGAIN;
1177	}
1178		
1179	/* Save current set */
1180	set = inb(iobase+SSR);
1181
1182 	/* Enable some interrupts so we can receive frames again */
1183 	switch_bank(iobase, SET0);
1184 	if (self->io.speed > 115200) {
1185 		outb(ICR_EFSFI, iobase+ICR);
1186 		w83977af_dma_receive(self);
1187 	} else
1188 		outb(ICR_ERBRI, iobase+ICR);
1189
1190	/* Restore bank register */
1191	outb(set, iobase+SSR);
1192
1193	/* Ready to play! */
1194	netif_start_queue(dev);
1195	
1196	/* Give self a hardware name */
1197	sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1198
1199	/* 
1200	 * Open new IrLAP layer instance, now that everything should be
1201	 * initialized properly 
1202	 */
1203	self->irlap = irlap_open(dev, &self->qos, hwname);
1204
1205	return 0;
1206}
1207
1208/*
1209 * Function w83977af_net_close (dev)
1210 *
1211 *    Stop the device
1212 *
1213 */
1214static int w83977af_net_close(struct net_device *dev)
1215{
1216	struct w83977af_ir *self;
1217	int iobase;
1218	__u8 set;
1219
1220	IRDA_DEBUG(0, "%s()\n", __func__ );
1221
1222	IRDA_ASSERT(dev != NULL, return -1;);
1223	
1224	self = netdev_priv(dev);
1225	
1226	IRDA_ASSERT(self != NULL, return 0;);
1227	
1228	iobase = self->io.fir_base;
1229
1230	/* Stop device */
1231	netif_stop_queue(dev);
1232	
1233	/* Stop and remove instance of IrLAP */
1234	if (self->irlap)
1235		irlap_close(self->irlap);
1236	self->irlap = NULL;
1237
1238	disable_dma(self->io.dma);
1239
1240	/* Save current set */
1241	set = inb(iobase+SSR);
1242	
1243	/* Disable interrupts */
1244	switch_bank(iobase, SET0);
1245	outb(0, iobase+ICR); 
1246
1247	free_irq(self->io.irq, dev);
1248	free_dma(self->io.dma);
1249
1250	/* Restore bank register */
1251	outb(set, iobase+SSR);
1252
1253	return 0;
1254}
1255
1256/*
1257 * Function w83977af_net_ioctl (dev, rq, cmd)
1258 *
1259 *    Process IOCTL commands for this device
1260 *
1261 */
1262static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1263{
1264	struct if_irda_req *irq = (struct if_irda_req *) rq;
1265	struct w83977af_ir *self;
1266	unsigned long flags;
1267	int ret = 0;
1268
1269	IRDA_ASSERT(dev != NULL, return -1;);
1270
1271	self = netdev_priv(dev);
1272
1273	IRDA_ASSERT(self != NULL, return -1;);
1274
1275	IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1276	
1277	spin_lock_irqsave(&self->lock, flags);
1278
1279	switch (cmd) {
1280	case SIOCSBANDWIDTH: /* Set bandwidth */
1281		if (!capable(CAP_NET_ADMIN)) {
1282			ret = -EPERM;
1283			goto out;
1284		}
1285		w83977af_change_speed(self, irq->ifr_baudrate);
1286		break;
1287	case SIOCSMEDIABUSY: /* Set media busy */
1288		if (!capable(CAP_NET_ADMIN)) {
1289			ret = -EPERM;
1290			goto out;
1291		}
1292		irda_device_set_media_busy(self->netdev, TRUE);
1293		break;
1294	case SIOCGRECEIVING: /* Check if we are receiving right now */
1295		irq->ifr_receiving = w83977af_is_receiving(self);
1296		break;
1297	default:
1298		ret = -EOPNOTSUPP;
1299	}
1300out:
1301	spin_unlock_irqrestore(&self->lock, flags);
1302	return ret;
1303}
1304
1305MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1306MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1307MODULE_LICENSE("GPL");
1308
1309
1310module_param(qos_mtt_bits, int, 0);
1311MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1312module_param_array(io, int, NULL, 0);
1313MODULE_PARM_DESC(io, "Base I/O addresses");
1314module_param_array(irq, int, NULL, 0);
1315MODULE_PARM_DESC(irq, "IRQ lines");
1316
1317/*
1318 * Function init_module (void)
1319 *
1320 *    
1321 *
1322 */
1323module_init(w83977af_init);
1324
1325/*
1326 * Function cleanup_module (void)
1327 *
1328 *    
1329 *
1330 */
1331module_exit(w83977af_cleanup);